forked from cerc-io/plugeth
swarm: Chunk refactor (#17659)
Co-authored-by: Janos Guljas <janos@resenje.org> Co-authored-by: Balint Gabor <balint.g@gmail.com> Co-authored-by: Anton Evangelatov <anton.evangelatov@gmail.com> Co-authored-by: Viktor Trón <viktor.tron@gmail.com>
This commit is contained in:
parent
ff3a5d24d2
commit
3ff2f75636
@ -39,7 +39,7 @@ func hash(ctx *cli.Context) {
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
stat, _ := f.Stat()
|
stat, _ := f.Stat()
|
||||||
fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams())
|
||||||
addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false)
|
addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("%v\n", err)
|
utils.Fatalf("%v\n", err)
|
||||||
|
@ -48,7 +48,7 @@ func main() {
|
|||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "cluster-endpoint",
|
Name: "cluster-endpoint",
|
||||||
Value: "testing",
|
Value: "testing",
|
||||||
Usage: "cluster to point to (open, or testing)",
|
Usage: "cluster to point to (local, open or testing)",
|
||||||
Destination: &cluster,
|
Destination: &cluster,
|
||||||
},
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
@ -76,8 +76,8 @@ func main() {
|
|||||||
},
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
Name: "filesize",
|
Name: "filesize",
|
||||||
Value: 1,
|
Value: 1024,
|
||||||
Usage: "file size for generated random file in MB",
|
Usage: "file size for generated random file in KB",
|
||||||
Destination: &filesize,
|
Destination: &filesize,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -39,6 +39,11 @@ import (
|
|||||||
func generateEndpoints(scheme string, cluster string, from int, to int) {
|
func generateEndpoints(scheme string, cluster string, from int, to int) {
|
||||||
if cluster == "prod" {
|
if cluster == "prod" {
|
||||||
cluster = ""
|
cluster = ""
|
||||||
|
} else if cluster == "local" {
|
||||||
|
for port := from; port <= to; port++ {
|
||||||
|
endpoints = append(endpoints, fmt.Sprintf("%s://localhost:%v", scheme, port))
|
||||||
|
}
|
||||||
|
return
|
||||||
} else {
|
} else {
|
||||||
cluster = cluster + "."
|
cluster = cluster + "."
|
||||||
}
|
}
|
||||||
@ -53,13 +58,13 @@ func generateEndpoints(scheme string, cluster string, from int, to int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func cliUploadAndSync(c *cli.Context) error {
|
func cliUploadAndSync(c *cli.Context) error {
|
||||||
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size", filesize) }(time.Now())
|
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
|
||||||
|
|
||||||
generateEndpoints(scheme, cluster, from, to)
|
generateEndpoints(scheme, cluster, from, to)
|
||||||
|
|
||||||
log.Info("uploading to " + endpoints[0] + " and syncing")
|
log.Info("uploading to " + endpoints[0] + " and syncing")
|
||||||
|
|
||||||
f, cleanup := generateRandomFile(filesize * 1000000)
|
f, cleanup := generateRandomFile(filesize * 1000)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
hash, err := upload(f, endpoints[0])
|
hash, err := upload(f, endpoints[0])
|
||||||
@ -76,12 +81,7 @@ func cliUploadAndSync(c *cli.Context) error {
|
|||||||
|
|
||||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
|
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
|
||||||
|
|
||||||
if filesize < 10 {
|
time.Sleep(3 * time.Second)
|
||||||
time.Sleep(35 * time.Second)
|
|
||||||
} else {
|
|
||||||
time.Sleep(15 * time.Second)
|
|
||||||
time.Sleep(2 * time.Duration(filesize) * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
for _, endpoint := range endpoints {
|
for _, endpoint := range endpoints {
|
||||||
@ -109,7 +109,7 @@ func cliUploadAndSync(c *cli.Context) error {
|
|||||||
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
||||||
func fetch(hash string, endpoint string, original []byte, ruid string) error {
|
func fetch(hash string, endpoint string, original []byte, ruid string) error {
|
||||||
log.Trace("sleeping", "ruid", ruid)
|
log.Trace("sleeping", "ruid", ruid)
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
|
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
|
||||||
res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
|
res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
|
||||||
|
@ -250,13 +250,6 @@ func NewAPI(fileStore *storage.FileStore, dns Resolver, resourceHandler *mru.Han
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload to be used only in TEST
|
|
||||||
func (a *API) Upload(ctx context.Context, uploadDir, index string, toEncrypt bool) (hash string, err error) {
|
|
||||||
fs := NewFileSystem(a)
|
|
||||||
hash, err = fs.Upload(uploadDir, index, toEncrypt)
|
|
||||||
return hash, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve FileStore reader API
|
// Retrieve FileStore reader API
|
||||||
func (a *API) Retrieve(ctx context.Context, addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) {
|
func (a *API) Retrieve(ctx context.Context, addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) {
|
||||||
return a.fileStore.Retrieve(ctx, addr)
|
return a.fileStore.Retrieve(ctx, addr)
|
||||||
|
@ -62,6 +62,7 @@ type Config struct {
|
|||||||
NetworkID uint64
|
NetworkID uint64
|
||||||
SwapEnabled bool
|
SwapEnabled bool
|
||||||
SyncEnabled bool
|
SyncEnabled bool
|
||||||
|
SyncingSkipCheck bool
|
||||||
DeliverySkipCheck bool
|
DeliverySkipCheck bool
|
||||||
LightNodeEnabled bool
|
LightNodeEnabled bool
|
||||||
SyncUpdateDelay time.Duration
|
SyncUpdateDelay time.Duration
|
||||||
@ -89,7 +90,8 @@ func NewConfig() (c *Config) {
|
|||||||
NetworkID: network.DefaultNetworkID,
|
NetworkID: network.DefaultNetworkID,
|
||||||
SwapEnabled: false,
|
SwapEnabled: false,
|
||||||
SyncEnabled: true,
|
SyncEnabled: true,
|
||||||
DeliverySkipCheck: false,
|
SyncingSkipCheck: false,
|
||||||
|
DeliverySkipCheck: true,
|
||||||
SyncUpdateDelay: 15 * time.Second,
|
SyncUpdateDelay: 15 * time.Second,
|
||||||
SwapAPI: "",
|
SwapAPI: "",
|
||||||
}
|
}
|
||||||
|
@ -477,12 +477,12 @@ func testBzzGetPath(encrypted bool, t *testing.T) {
|
|||||||
var wait func(context.Context) error
|
var wait func(context.Context) error
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
addr[i], wait, err = srv.FileStore.Store(ctx, reader[i], int64(len(mf)), encrypted)
|
addr[i], wait, err = srv.FileStore.Store(ctx, reader[i], int64(len(mf)), encrypted)
|
||||||
for j := i + 1; j < len(testmanifest); j++ {
|
|
||||||
testmanifest[j] = strings.Replace(testmanifest[j], fmt.Sprintf("<key%v>", i), addr[i].Hex(), -1)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
for j := i + 1; j < len(testmanifest); j++ {
|
||||||
|
testmanifest[j] = strings.Replace(testmanifest[j], fmt.Sprintf("<key%v>", i), addr[i].Hex(), -1)
|
||||||
|
}
|
||||||
err = wait(ctx)
|
err = wait(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -69,9 +69,12 @@ func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
key, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), toEncrypt)
|
addr, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), toEncrypt)
|
||||||
wait(ctx)
|
if err != nil {
|
||||||
return key, err
|
return nil, err
|
||||||
|
}
|
||||||
|
err = wait(ctx)
|
||||||
|
return addr, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Manifest hack for supporting Mutable Resource Updates from the bzz: scheme
|
// Manifest hack for supporting Mutable Resource Updates from the bzz: scheme
|
||||||
@ -87,8 +90,12 @@ func (a *API) NewResourceManifest(ctx context.Context, resourceAddr string) (sto
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
key, _, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), false)
|
addr, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), false)
|
||||||
return key, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = wait(ctx)
|
||||||
|
return addr, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ManifestWriter is used to add and remove entries from an underlying manifest
|
// ManifestWriter is used to add and remove entries from an underlying manifest
|
||||||
@ -106,21 +113,26 @@ func (a *API) NewManifestWriter(ctx context.Context, addr storage.Address, quitC
|
|||||||
return &ManifestWriter{a, trie, quitC}, nil
|
return &ManifestWriter{a, trie, quitC}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddEntry stores the given data and adds the resulting key to the manifest
|
// AddEntry stores the given data and adds the resulting address to the manifest
|
||||||
func (m *ManifestWriter) AddEntry(ctx context.Context, data io.Reader, e *ManifestEntry) (key storage.Address, err error) {
|
func (m *ManifestWriter) AddEntry(ctx context.Context, data io.Reader, e *ManifestEntry) (addr storage.Address, err error) {
|
||||||
entry := newManifestTrieEntry(e, nil)
|
entry := newManifestTrieEntry(e, nil)
|
||||||
if data != nil {
|
if data != nil {
|
||||||
key, _, err = m.api.Store(ctx, data, e.Size, m.trie.encrypted)
|
var wait func(context.Context) error
|
||||||
|
addr, wait, err = m.api.Store(ctx, data, e.Size, m.trie.encrypted)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
entry.Hash = key.Hex()
|
err = wait(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entry.Hash = addr.Hex()
|
||||||
}
|
}
|
||||||
if entry.Hash == "" {
|
if entry.Hash == "" {
|
||||||
return key, errors.New("missing entry hash")
|
return addr, errors.New("missing entry hash")
|
||||||
}
|
}
|
||||||
m.trie.addEntry(entry, m.quitC)
|
m.trie.addEntry(entry, m.quitC)
|
||||||
return key, nil
|
return addr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveEntry removes the given path from the manifest
|
// RemoveEntry removes the given path from the manifest
|
||||||
@ -129,7 +141,7 @@ func (m *ManifestWriter) RemoveEntry(path string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store stores the manifest, returning the resulting storage key
|
// Store stores the manifest, returning the resulting storage address
|
||||||
func (m *ManifestWriter) Store() (storage.Address, error) {
|
func (m *ManifestWriter) Store() (storage.Address, error) {
|
||||||
return m.trie.ref, m.trie.recalcAndStore()
|
return m.trie.ref, m.trie.recalcAndStore()
|
||||||
}
|
}
|
||||||
@ -211,51 +223,51 @@ type manifestTrieEntry struct {
|
|||||||
subtrie *manifestTrie
|
subtrie *manifestTrie
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadManifest(ctx context.Context, fileStore *storage.FileStore, hash storage.Address, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
|
func loadManifest(ctx context.Context, fileStore *storage.FileStore, addr storage.Address, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
|
||||||
log.Trace("manifest lookup", "key", hash)
|
log.Trace("manifest lookup", "addr", addr)
|
||||||
// retrieve manifest via FileStore
|
// retrieve manifest via FileStore
|
||||||
manifestReader, isEncrypted := fileStore.Retrieve(ctx, hash)
|
manifestReader, isEncrypted := fileStore.Retrieve(ctx, addr)
|
||||||
log.Trace("reader retrieved", "key", hash)
|
log.Trace("reader retrieved", "addr", addr)
|
||||||
return readManifest(manifestReader, hash, fileStore, isEncrypted, quitC, decrypt)
|
return readManifest(manifestReader, addr, fileStore, isEncrypted, quitC, decrypt)
|
||||||
}
|
}
|
||||||
|
|
||||||
func readManifest(mr storage.LazySectionReader, hash storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
|
func readManifest(mr storage.LazySectionReader, addr storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
|
||||||
|
|
||||||
// TODO check size for oversized manifests
|
// TODO check size for oversized manifests
|
||||||
size, err := mr.Size(mr.Context(), quitC)
|
size, err := mr.Size(mr.Context(), quitC)
|
||||||
if err != nil { // size == 0
|
if err != nil { // size == 0
|
||||||
// can't determine size means we don't have the root chunk
|
// can't determine size means we don't have the root chunk
|
||||||
log.Trace("manifest not found", "key", hash)
|
log.Trace("manifest not found", "addr", addr)
|
||||||
err = fmt.Errorf("Manifest not Found")
|
err = fmt.Errorf("Manifest not Found")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if size > manifestSizeLimit {
|
if size > manifestSizeLimit {
|
||||||
log.Warn("manifest exceeds size limit", "key", hash, "size", size, "limit", manifestSizeLimit)
|
log.Warn("manifest exceeds size limit", "addr", addr, "size", size, "limit", manifestSizeLimit)
|
||||||
err = fmt.Errorf("Manifest size of %v bytes exceeds the %v byte limit", size, manifestSizeLimit)
|
err = fmt.Errorf("Manifest size of %v bytes exceeds the %v byte limit", size, manifestSizeLimit)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
manifestData := make([]byte, size)
|
manifestData := make([]byte, size)
|
||||||
read, err := mr.Read(manifestData)
|
read, err := mr.Read(manifestData)
|
||||||
if int64(read) < size {
|
if int64(read) < size {
|
||||||
log.Trace("manifest not found", "key", hash)
|
log.Trace("manifest not found", "addr", addr)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = fmt.Errorf("Manifest retrieval cut short: read %v, expect %v", read, size)
|
err = fmt.Errorf("Manifest retrieval cut short: read %v, expect %v", read, size)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("manifest retrieved", "key", hash)
|
log.Debug("manifest retrieved", "addr", addr)
|
||||||
var man struct {
|
var man struct {
|
||||||
Entries []*manifestTrieEntry `json:"entries"`
|
Entries []*manifestTrieEntry `json:"entries"`
|
||||||
}
|
}
|
||||||
err = json.Unmarshal(manifestData, &man)
|
err = json.Unmarshal(manifestData, &man)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("Manifest %v is malformed: %v", hash.Log(), err)
|
err = fmt.Errorf("Manifest %v is malformed: %v", addr.Log(), err)
|
||||||
log.Trace("malformed manifest", "key", hash)
|
log.Trace("malformed manifest", "addr", addr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Trace("manifest entries", "key", hash, "len", len(man.Entries))
|
log.Trace("manifest entries", "addr", addr, "len", len(man.Entries))
|
||||||
|
|
||||||
trie = &manifestTrie{
|
trie = &manifestTrie{
|
||||||
fileStore: fileStore,
|
fileStore: fileStore,
|
||||||
@ -406,12 +418,12 @@ func (mt *manifestTrie) recalcAndStore() error {
|
|||||||
|
|
||||||
sr := bytes.NewReader(manifest)
|
sr := bytes.NewReader(manifest)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
key, wait, err2 := mt.fileStore.Store(ctx, sr, int64(len(manifest)), mt.encrypted)
|
addr, wait, err2 := mt.fileStore.Store(ctx, sr, int64(len(manifest)), mt.encrypted)
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
return err2
|
return err2
|
||||||
}
|
}
|
||||||
err2 = wait(ctx)
|
err2 = wait(ctx)
|
||||||
mt.ref = key
|
mt.ref = addr
|
||||||
return err2
|
return err2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,7 +20,6 @@ package fuse
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -111,7 +110,7 @@ func createTestFilesAndUploadToSwarm(t *testing.T, api *api.API, files map[strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
//upload directory to swarm and return hash
|
//upload directory to swarm and return hash
|
||||||
bzzhash, err := api.Upload(context.TODO(), uploadDir, "", toEncrypt)
|
bzzhash, err := Upload(uploadDir, "", api, toEncrypt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error uploading directory %v: %vm encryption: %v", uploadDir, err, toEncrypt)
|
t.Fatalf("Error uploading directory %v: %vm encryption: %v", uploadDir, err, toEncrypt)
|
||||||
}
|
}
|
||||||
@ -1695,3 +1694,9 @@ func TestFUSE(t *testing.T) {
|
|||||||
t.Run("appendFileContentsToEndNonEncrypted", ta.appendFileContentsToEndNonEncrypted)
|
t.Run("appendFileContentsToEndNonEncrypted", ta.appendFileContentsToEndNonEncrypted)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Upload(uploadDir, index string, a *api.API, toEncrypt bool) (hash string, err error) {
|
||||||
|
fs := api.NewFileSystem(a)
|
||||||
|
hash, err = fs.Upload(uploadDir, index, toEncrypt)
|
||||||
|
return hash, err
|
||||||
|
}
|
||||||
|
305
swarm/network/fetcher.go
Normal file
305
swarm/network/fetcher.go
Normal file
@ -0,0 +1,305 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
var searchTimeout = 1 * time.Second
|
||||||
|
|
||||||
|
// Time to consider peer to be skipped.
|
||||||
|
// Also used in stream delivery.
|
||||||
|
var RequestTimeout = 10 * time.Second
|
||||||
|
|
||||||
|
type RequestFunc func(context.Context, *Request) (*discover.NodeID, chan struct{}, error)
|
||||||
|
|
||||||
|
// Fetcher is created when a chunk is not found locally. It starts a request handler loop once and
|
||||||
|
// keeps it alive until all active requests are completed. This can happen:
|
||||||
|
// 1. either because the chunk is delivered
|
||||||
|
// 2. or becuse the requestor cancelled/timed out
|
||||||
|
// Fetcher self destroys itself after it is completed.
|
||||||
|
// TODO: cancel all forward requests after termination
|
||||||
|
type Fetcher struct {
|
||||||
|
protoRequestFunc RequestFunc // request function fetcher calls to issue retrieve request for a chunk
|
||||||
|
addr storage.Address // the address of the chunk to be fetched
|
||||||
|
offerC chan *discover.NodeID // channel of sources (peer node id strings)
|
||||||
|
requestC chan struct{}
|
||||||
|
skipCheck bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type Request struct {
|
||||||
|
Addr storage.Address // chunk address
|
||||||
|
Source *discover.NodeID // nodeID of peer to request from (can be nil)
|
||||||
|
SkipCheck bool // whether to offer the chunk first or deliver directly
|
||||||
|
peersToSkip *sync.Map // peers not to request chunk from (only makes sense if source is nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRequest returns a new instance of Request based on chunk address skip check and
|
||||||
|
// a map of peers to skip.
|
||||||
|
func NewRequest(addr storage.Address, skipCheck bool, peersToSkip *sync.Map) *Request {
|
||||||
|
return &Request{
|
||||||
|
Addr: addr,
|
||||||
|
SkipCheck: skipCheck,
|
||||||
|
peersToSkip: peersToSkip,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SkipPeer returns if the peer with nodeID should not be requested to deliver a chunk.
|
||||||
|
// Peers to skip are kept per Request and for a time period of RequestTimeout.
|
||||||
|
// This function is used in stream package in Delivery.RequestFromPeers to optimize
|
||||||
|
// requests for chunks.
|
||||||
|
func (r *Request) SkipPeer(nodeID string) bool {
|
||||||
|
val, ok := r.peersToSkip.Load(nodeID)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
t, ok := val.(time.Time)
|
||||||
|
if ok && time.Now().After(t.Add(RequestTimeout)) {
|
||||||
|
// deadine expired
|
||||||
|
r.peersToSkip.Delete(nodeID)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetcherFactory is initialised with a request function and can create fetchers
|
||||||
|
type FetcherFactory struct {
|
||||||
|
request RequestFunc
|
||||||
|
skipCheck bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFetcherFactory takes a request function and skip check parameter and creates a FetcherFactory
|
||||||
|
func NewFetcherFactory(request RequestFunc, skipCheck bool) *FetcherFactory {
|
||||||
|
return &FetcherFactory{
|
||||||
|
request: request,
|
||||||
|
skipCheck: skipCheck,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New contructs a new Fetcher, for the given chunk. All peers in peersToSkip are not requested to
|
||||||
|
// deliver the given chunk. peersToSkip should always contain the peers which are actively requesting
|
||||||
|
// this chunk, to make sure we don't request back the chunks from them.
|
||||||
|
// The created Fetcher is started and returned.
|
||||||
|
func (f *FetcherFactory) New(ctx context.Context, source storage.Address, peersToSkip *sync.Map) storage.NetFetcher {
|
||||||
|
fetcher := NewFetcher(source, f.request, f.skipCheck)
|
||||||
|
go fetcher.run(ctx, peersToSkip)
|
||||||
|
return fetcher
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFetcher creates a new Fetcher for the given chunk address using the given request function.
|
||||||
|
func NewFetcher(addr storage.Address, rf RequestFunc, skipCheck bool) *Fetcher {
|
||||||
|
return &Fetcher{
|
||||||
|
addr: addr,
|
||||||
|
protoRequestFunc: rf,
|
||||||
|
offerC: make(chan *discover.NodeID),
|
||||||
|
requestC: make(chan struct{}),
|
||||||
|
skipCheck: skipCheck,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offer is called when an upstream peer offers the chunk via syncing as part of `OfferedHashesMsg` and the node does not have the chunk locally.
|
||||||
|
func (f *Fetcher) Offer(ctx context.Context, source *discover.NodeID) {
|
||||||
|
// First we need to have this select to make sure that we return if context is done
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// This select alone would not guarantee that we return of context is done, it could potentially
|
||||||
|
// push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements)
|
||||||
|
select {
|
||||||
|
case f.offerC <- source:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request is called when an upstream peer request the chunk as part of `RetrieveRequestMsg`, or from a local request through FileStore, and the node does not have the chunk locally.
|
||||||
|
func (f *Fetcher) Request(ctx context.Context) {
|
||||||
|
// First we need to have this select to make sure that we return if context is done
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// This select alone would not guarantee that we return of context is done, it could potentially
|
||||||
|
// push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements)
|
||||||
|
select {
|
||||||
|
case f.requestC <- struct{}{}:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// start prepares the Fetcher
|
||||||
|
// it keeps the Fetcher alive within the lifecycle of the passed context
|
||||||
|
func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
|
||||||
|
var (
|
||||||
|
doRequest bool // determines if retrieval is initiated in the current iteration
|
||||||
|
wait *time.Timer // timer for search timeout
|
||||||
|
waitC <-chan time.Time // timer channel
|
||||||
|
sources []*discover.NodeID // known sources, ie. peers that offered the chunk
|
||||||
|
requested bool // true if the chunk was actually requested
|
||||||
|
)
|
||||||
|
gone := make(chan *discover.NodeID) // channel to signal that a peer we requested from disconnected
|
||||||
|
|
||||||
|
// loop that keeps the fetching process alive
|
||||||
|
// after every request a timer is set. If this goes off we request again from another peer
|
||||||
|
// note that the previous request is still alive and has the chance to deliver, so
|
||||||
|
// rerequesting extends the search. ie.,
|
||||||
|
// if a peer we requested from is gone we issue a new request, so the number of active
|
||||||
|
// requests never decreases
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
|
||||||
|
// incoming offer
|
||||||
|
case source := <-f.offerC:
|
||||||
|
log.Trace("new source", "peer addr", source, "request addr", f.addr)
|
||||||
|
// 1) the chunk is offered by a syncing peer
|
||||||
|
// add to known sources
|
||||||
|
sources = append(sources, source)
|
||||||
|
// launch a request to the source iff the chunk was requested (not just expected because its offered by a syncing peer)
|
||||||
|
doRequest = requested
|
||||||
|
|
||||||
|
// incoming request
|
||||||
|
case <-f.requestC:
|
||||||
|
log.Trace("new request", "request addr", f.addr)
|
||||||
|
// 2) chunk is requested, set requested flag
|
||||||
|
// launch a request iff none been launched yet
|
||||||
|
doRequest = !requested
|
||||||
|
requested = true
|
||||||
|
|
||||||
|
// peer we requested from is gone. fall back to another
|
||||||
|
// and remove the peer from the peers map
|
||||||
|
case id := <-gone:
|
||||||
|
log.Trace("peer gone", "peer id", id.String(), "request addr", f.addr)
|
||||||
|
peers.Delete(id.String())
|
||||||
|
doRequest = requested
|
||||||
|
|
||||||
|
// search timeout: too much time passed since the last request,
|
||||||
|
// extend the search to a new peer if we can find one
|
||||||
|
case <-waitC:
|
||||||
|
log.Trace("search timed out: rerequesting", "request addr", f.addr)
|
||||||
|
doRequest = requested
|
||||||
|
|
||||||
|
// all Fetcher context closed, can quit
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Trace("terminate fetcher", "request addr", f.addr)
|
||||||
|
// TODO: send cancelations to all peers left over in peers map (i.e., those we requested from)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// need to issue a new request
|
||||||
|
if doRequest {
|
||||||
|
var err error
|
||||||
|
sources, err = f.doRequest(ctx, gone, peers, sources)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("unable to request", "request addr", f.addr, "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if wait channel is not set, set it to a timer
|
||||||
|
if requested {
|
||||||
|
if wait == nil {
|
||||||
|
wait = time.NewTimer(searchTimeout)
|
||||||
|
defer wait.Stop()
|
||||||
|
waitC = wait.C
|
||||||
|
} else {
|
||||||
|
// stop the timer and drain the channel if it was not drained earlier
|
||||||
|
if !wait.Stop() {
|
||||||
|
select {
|
||||||
|
case <-wait.C:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// reset the timer to go off after searchTimeout
|
||||||
|
wait.Reset(searchTimeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
doRequest = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// doRequest attempts at finding a peer to request the chunk from
|
||||||
|
// * first it tries to request explicitly from peers that are known to have offered the chunk
|
||||||
|
// * if there are no such peers (available) it tries to request it from a peer closest to the chunk address
|
||||||
|
// excluding those in the peersToSkip map
|
||||||
|
// * if no such peer is found an error is returned
|
||||||
|
//
|
||||||
|
// if a request is successful,
|
||||||
|
// * the peer's address is added to the set of peers to skip
|
||||||
|
// * the peer's address is removed from prospective sources, and
|
||||||
|
// * a go routine is started that reports on the gone channel if the peer is disconnected (or terminated their streamer)
|
||||||
|
func (f *Fetcher) doRequest(ctx context.Context, gone chan *discover.NodeID, peersToSkip *sync.Map, sources []*discover.NodeID) ([]*discover.NodeID, error) {
|
||||||
|
var i int
|
||||||
|
var sourceID *discover.NodeID
|
||||||
|
var quit chan struct{}
|
||||||
|
|
||||||
|
req := &Request{
|
||||||
|
Addr: f.addr,
|
||||||
|
SkipCheck: f.skipCheck,
|
||||||
|
peersToSkip: peersToSkip,
|
||||||
|
}
|
||||||
|
|
||||||
|
foundSource := false
|
||||||
|
// iterate over known sources
|
||||||
|
for i = 0; i < len(sources); i++ {
|
||||||
|
req.Source = sources[i]
|
||||||
|
var err error
|
||||||
|
sourceID, quit, err = f.protoRequestFunc(ctx, req)
|
||||||
|
if err == nil {
|
||||||
|
// remove the peer from known sources
|
||||||
|
// Note: we can modify the source although we are looping on it, because we break from the loop immediately
|
||||||
|
sources = append(sources[:i], sources[i+1:]...)
|
||||||
|
foundSource = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if there are no known sources, or none available, we try request from a closest node
|
||||||
|
if !foundSource {
|
||||||
|
req.Source = nil
|
||||||
|
var err error
|
||||||
|
sourceID, quit, err = f.protoRequestFunc(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
// if no peers found to request from
|
||||||
|
return sources, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// add peer to the set of peers to skip from now
|
||||||
|
peersToSkip.Store(sourceID.String(), time.Now())
|
||||||
|
|
||||||
|
// if the quit channel is closed, it indicates that the source peer we requested from
|
||||||
|
// disconnected or terminated its streamer
|
||||||
|
// here start a go routine that watches this channel and reports the source peer on the gone channel
|
||||||
|
// this go routine quits if the fetcher global context is done to prevent process leak
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-quit:
|
||||||
|
gone <- sourceID
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return sources, nil
|
||||||
|
}
|
459
swarm/network/fetcher_test.go
Normal file
459
swarm/network/fetcher_test.go
Normal file
@ -0,0 +1,459 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
|
)
|
||||||
|
|
||||||
|
var requestedPeerID = discover.MustHexID("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439")
|
||||||
|
var sourcePeerID = discover.MustHexID("2dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439")
|
||||||
|
|
||||||
|
// mockRequester pushes every request to the requestC channel when its doRequest function is called
|
||||||
|
type mockRequester struct {
|
||||||
|
// requests []Request
|
||||||
|
requestC chan *Request // when a request is coming it is pushed to requestC
|
||||||
|
waitTimes []time.Duration // with waitTimes[i] you can define how much to wait on the ith request (optional)
|
||||||
|
ctr int //counts the number of requests
|
||||||
|
quitC chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockRequester(waitTimes ...time.Duration) *mockRequester {
|
||||||
|
return &mockRequester{
|
||||||
|
requestC: make(chan *Request),
|
||||||
|
waitTimes: waitTimes,
|
||||||
|
quitC: make(chan struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockRequester) doRequest(ctx context.Context, request *Request) (*discover.NodeID, chan struct{}, error) {
|
||||||
|
waitTime := time.Duration(0)
|
||||||
|
if m.ctr < len(m.waitTimes) {
|
||||||
|
waitTime = m.waitTimes[m.ctr]
|
||||||
|
m.ctr++
|
||||||
|
}
|
||||||
|
time.Sleep(waitTime)
|
||||||
|
m.requestC <- request
|
||||||
|
|
||||||
|
// if there is a Source in the request use that, if not use the global requestedPeerId
|
||||||
|
source := request.Source
|
||||||
|
if source == nil {
|
||||||
|
source = &requestedPeerID
|
||||||
|
}
|
||||||
|
return source, m.quitC, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFetcherSingleRequest creates a Fetcher using mockRequester, and run it with a sample set of peers to skip.
|
||||||
|
// mockRequester pushes a Request on a channel every time the request function is called. Using
|
||||||
|
// this channel we test if calling Fetcher.Request calls the request function, and whether it uses
|
||||||
|
// the correct peers to skip which we provided for the fetcher.run function.
|
||||||
|
func TestFetcherSingleRequest(t *testing.T) {
|
||||||
|
requester := newMockRequester()
|
||||||
|
addr := make([]byte, 32)
|
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||||
|
|
||||||
|
peers := []string{"a", "b", "c", "d"}
|
||||||
|
peersToSkip := &sync.Map{}
|
||||||
|
for _, p := range peers {
|
||||||
|
peersToSkip.Store(p, time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
go fetcher.run(ctx, peersToSkip)
|
||||||
|
|
||||||
|
rctx := context.Background()
|
||||||
|
fetcher.Request(rctx)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case request := <-requester.requestC:
|
||||||
|
// request should contain all peers from peersToSkip provided to the fetcher
|
||||||
|
for _, p := range peers {
|
||||||
|
if _, ok := request.peersToSkip.Load(p); !ok {
|
||||||
|
t.Fatalf("request.peersToSkip misses peer")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// source peer should be also added to peersToSkip eventually
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
if _, ok := request.peersToSkip.Load(requestedPeerID.String()); !ok {
|
||||||
|
t.Fatalf("request.peersToSkip does not contain peer returned by the request function")
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetch should trigger a request, if it doesn't happen in time, test should fail
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
t.Fatalf("fetch timeout")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCancelStopsFetcher tests that a cancelled fetcher does not initiate further requests even if its fetch function is called
|
||||||
|
func TestFetcherCancelStopsFetcher(t *testing.T) {
|
||||||
|
requester := newMockRequester()
|
||||||
|
addr := make([]byte, 32)
|
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||||
|
|
||||||
|
peersToSkip := &sync.Map{}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
// we start the fetcher, and then we immediately cancel the context
|
||||||
|
go fetcher.run(ctx, peersToSkip)
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
rctx, rcancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||||
|
defer rcancel()
|
||||||
|
// we call Request with an active context
|
||||||
|
fetcher.Request(rctx)
|
||||||
|
|
||||||
|
// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
|
||||||
|
select {
|
||||||
|
case <-requester.requestC:
|
||||||
|
t.Fatalf("cancelled fetcher initiated request")
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFetchCancelStopsRequest tests that calling a Request function with a cancelled context does not initiate a request
|
||||||
|
func TestFetcherCancelStopsRequest(t *testing.T) {
|
||||||
|
requester := newMockRequester(100 * time.Millisecond)
|
||||||
|
addr := make([]byte, 32)
|
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||||
|
|
||||||
|
peersToSkip := &sync.Map{}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// we start the fetcher with an active context
|
||||||
|
go fetcher.run(ctx, peersToSkip)
|
||||||
|
|
||||||
|
rctx, rcancel := context.WithCancel(context.Background())
|
||||||
|
rcancel()
|
||||||
|
|
||||||
|
// we call Request with a cancelled context
|
||||||
|
fetcher.Request(rctx)
|
||||||
|
|
||||||
|
// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
|
||||||
|
select {
|
||||||
|
case <-requester.requestC:
|
||||||
|
t.Fatalf("cancelled fetch function initiated request")
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
// if there is another Request with active context, there should be a request, because the fetcher itself is not cancelled
|
||||||
|
rctx = context.Background()
|
||||||
|
fetcher.Request(rctx)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-requester.requestC:
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
t.Fatalf("expected request")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestOfferUsesSource tests Fetcher Offer behavior.
|
||||||
|
// In this case there should be 1 (and only one) request initiated from the source peer, and the
|
||||||
|
// source nodeid should appear in the peersToSkip map.
|
||||||
|
func TestFetcherOfferUsesSource(t *testing.T) {
|
||||||
|
requester := newMockRequester(100 * time.Millisecond)
|
||||||
|
addr := make([]byte, 32)
|
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||||
|
|
||||||
|
peersToSkip := &sync.Map{}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// start the fetcher
|
||||||
|
go fetcher.run(ctx, peersToSkip)
|
||||||
|
|
||||||
|
rctx := context.Background()
|
||||||
|
// call the Offer function with the source peer
|
||||||
|
fetcher.Offer(rctx, &sourcePeerID)
|
||||||
|
|
||||||
|
// fetcher should not initiate request
|
||||||
|
select {
|
||||||
|
case <-requester.requestC:
|
||||||
|
t.Fatalf("fetcher initiated request")
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
// call Request after the Offer
|
||||||
|
rctx = context.Background()
|
||||||
|
fetcher.Request(rctx)
|
||||||
|
|
||||||
|
// there should be exactly 1 request coming from fetcher
|
||||||
|
var request *Request
|
||||||
|
select {
|
||||||
|
case request = <-requester.requestC:
|
||||||
|
if *request.Source != sourcePeerID {
|
||||||
|
t.Fatalf("Expected source id %v got %v", sourcePeerID, request.Source)
|
||||||
|
}
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
t.Fatalf("fetcher did not initiate request")
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-requester.requestC:
|
||||||
|
t.Fatalf("Fetcher number of requests expected 1 got 2")
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
// source peer should be added to peersToSkip eventually
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
if _, ok := request.peersToSkip.Load(sourcePeerID.String()); !ok {
|
||||||
|
t.Fatalf("SourcePeerId not added to peersToSkip")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFetcherOfferAfterRequestUsesSourceFromContext(t *testing.T) {
|
||||||
|
requester := newMockRequester(100 * time.Millisecond)
|
||||||
|
addr := make([]byte, 32)
|
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||||
|
|
||||||
|
peersToSkip := &sync.Map{}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// start the fetcher
|
||||||
|
go fetcher.run(ctx, peersToSkip)
|
||||||
|
|
||||||
|
// call Request first
|
||||||
|
rctx := context.Background()
|
||||||
|
fetcher.Request(rctx)
|
||||||
|
|
||||||
|
// there should be a request coming from fetcher
|
||||||
|
var request *Request
|
||||||
|
select {
|
||||||
|
case request = <-requester.requestC:
|
||||||
|
if request.Source != nil {
|
||||||
|
t.Fatalf("Incorrect source peer id, expected nil got %v", request.Source)
|
||||||
|
}
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
t.Fatalf("fetcher did not initiate request")
|
||||||
|
}
|
||||||
|
|
||||||
|
// after the Request call Offer
|
||||||
|
fetcher.Offer(context.Background(), &sourcePeerID)
|
||||||
|
|
||||||
|
// there should be a request coming from fetcher
|
||||||
|
select {
|
||||||
|
case request = <-requester.requestC:
|
||||||
|
if *request.Source != sourcePeerID {
|
||||||
|
t.Fatalf("Incorrect source peer id, expected %v got %v", sourcePeerID, request.Source)
|
||||||
|
}
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
t.Fatalf("fetcher did not initiate request")
|
||||||
|
}
|
||||||
|
|
||||||
|
// source peer should be added to peersToSkip eventually
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
if _, ok := request.peersToSkip.Load(sourcePeerID.String()); !ok {
|
||||||
|
t.Fatalf("SourcePeerId not added to peersToSkip")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFetcherRetryOnTimeout tests that fetch retries after searchTimeOut has passed
|
||||||
|
func TestFetcherRetryOnTimeout(t *testing.T) {
|
||||||
|
requester := newMockRequester()
|
||||||
|
addr := make([]byte, 32)
|
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||||
|
|
||||||
|
peersToSkip := &sync.Map{}
|
||||||
|
|
||||||
|
// set searchTimeOut to low value so the test is quicker
|
||||||
|
defer func(t time.Duration) {
|
||||||
|
searchTimeout = t
|
||||||
|
}(searchTimeout)
|
||||||
|
searchTimeout = 250 * time.Millisecond
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// start the fetcher
|
||||||
|
go fetcher.run(ctx, peersToSkip)
|
||||||
|
|
||||||
|
// call the fetch function with an active context
|
||||||
|
rctx := context.Background()
|
||||||
|
fetcher.Request(rctx)
|
||||||
|
|
||||||
|
// after 100ms the first request should be initiated
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-requester.requestC:
|
||||||
|
default:
|
||||||
|
t.Fatalf("fetch did not initiate request")
|
||||||
|
}
|
||||||
|
|
||||||
|
// after another 100ms no new request should be initiated, because search timeout is 250ms
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-requester.requestC:
|
||||||
|
t.Fatalf("unexpected request from fetcher")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// after another 300ms search timeout is over, there should be a new request
|
||||||
|
time.Sleep(300 * time.Millisecond)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-requester.requestC:
|
||||||
|
default:
|
||||||
|
t.Fatalf("fetch did not retry request")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFetcherFactory creates a FetcherFactory and checks if the factory really creates and starts
|
||||||
|
// a Fetcher when it return a fetch function. We test the fetching functionality just by checking if
|
||||||
|
// a request is initiated when the fetch function is called
|
||||||
|
func TestFetcherFactory(t *testing.T) {
|
||||||
|
requester := newMockRequester(100 * time.Millisecond)
|
||||||
|
addr := make([]byte, 32)
|
||||||
|
fetcherFactory := NewFetcherFactory(requester.doRequest, false)
|
||||||
|
|
||||||
|
peersToSkip := &sync.Map{}
|
||||||
|
|
||||||
|
fetcher := fetcherFactory.New(context.Background(), addr, peersToSkip)
|
||||||
|
|
||||||
|
fetcher.Request(context.Background())
|
||||||
|
|
||||||
|
// check if the created fetchFunction really starts a fetcher and initiates a request
|
||||||
|
select {
|
||||||
|
case <-requester.requestC:
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
t.Fatalf("fetch timeout")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFetcherRequestQuitRetriesRequest(t *testing.T) {
|
||||||
|
requester := newMockRequester()
|
||||||
|
addr := make([]byte, 32)
|
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||||
|
|
||||||
|
// make sure searchTimeout is long so it is sure the request is not retried because of timeout
|
||||||
|
defer func(t time.Duration) {
|
||||||
|
searchTimeout = t
|
||||||
|
}(searchTimeout)
|
||||||
|
searchTimeout = 10 * time.Second
|
||||||
|
|
||||||
|
peersToSkip := &sync.Map{}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
go fetcher.run(ctx, peersToSkip)
|
||||||
|
|
||||||
|
rctx := context.Background()
|
||||||
|
fetcher.Request(rctx)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-requester.requestC:
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
t.Fatalf("request is not initiated")
|
||||||
|
}
|
||||||
|
|
||||||
|
close(requester.quitC)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-requester.requestC:
|
||||||
|
case <-time.After(200 * time.Millisecond):
|
||||||
|
t.Fatalf("request is not initiated after failed request")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRequestSkipPeer checks if PeerSkip function will skip provided peer
|
||||||
|
// and not skip unknown one.
|
||||||
|
func TestRequestSkipPeer(t *testing.T) {
|
||||||
|
addr := make([]byte, 32)
|
||||||
|
peers := []discover.NodeID{
|
||||||
|
discover.MustHexID("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
|
||||||
|
discover.MustHexID("2dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
|
||||||
|
}
|
||||||
|
|
||||||
|
peersToSkip := new(sync.Map)
|
||||||
|
peersToSkip.Store(peers[0].String(), time.Now())
|
||||||
|
r := NewRequest(addr, false, peersToSkip)
|
||||||
|
|
||||||
|
if !r.SkipPeer(peers[0].String()) {
|
||||||
|
t.Errorf("peer not skipped")
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.SkipPeer(peers[1].String()) {
|
||||||
|
t.Errorf("peer skipped")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRequestSkipPeerExpired checks if a peer to skip is not skipped
|
||||||
|
// after RequestTimeout has passed.
|
||||||
|
func TestRequestSkipPeerExpired(t *testing.T) {
|
||||||
|
addr := make([]byte, 32)
|
||||||
|
peer := discover.MustHexID("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439")
|
||||||
|
|
||||||
|
// set RequestTimeout to a low value and reset it after the test
|
||||||
|
defer func(t time.Duration) { RequestTimeout = t }(RequestTimeout)
|
||||||
|
RequestTimeout = 250 * time.Millisecond
|
||||||
|
|
||||||
|
peersToSkip := new(sync.Map)
|
||||||
|
peersToSkip.Store(peer.String(), time.Now())
|
||||||
|
r := NewRequest(addr, false, peersToSkip)
|
||||||
|
|
||||||
|
if !r.SkipPeer(peer.String()) {
|
||||||
|
t.Errorf("peer not skipped")
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
|
||||||
|
if r.SkipPeer(peer.String()) {
|
||||||
|
t.Errorf("peer skipped")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRequestSkipPeerPermanent checks if a peer to skip is not skipped
|
||||||
|
// after RequestTimeout is not skipped if it is set for a permanent skipping
|
||||||
|
// by value to peersToSkip map is not time.Duration.
|
||||||
|
func TestRequestSkipPeerPermanent(t *testing.T) {
|
||||||
|
addr := make([]byte, 32)
|
||||||
|
peer := discover.MustHexID("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439")
|
||||||
|
|
||||||
|
// set RequestTimeout to a low value and reset it after the test
|
||||||
|
defer func(t time.Duration) { RequestTimeout = t }(RequestTimeout)
|
||||||
|
RequestTimeout = 250 * time.Millisecond
|
||||||
|
|
||||||
|
peersToSkip := new(sync.Map)
|
||||||
|
peersToSkip.Store(peer.String(), true)
|
||||||
|
r := NewRequest(addr, false, peersToSkip)
|
||||||
|
|
||||||
|
if !r.SkipPeer(peer.String()) {
|
||||||
|
t.Errorf("peer not skipped")
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
|
||||||
|
if !r.SkipPeer(peer.String()) {
|
||||||
|
t.Errorf("peer not skipped")
|
||||||
|
}
|
||||||
|
}
|
@ -28,10 +28,13 @@ package priorityqueue
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errContention = errors.New("queue contention")
|
ErrContention = errors.New("contention")
|
||||||
|
|
||||||
errBadPriority = errors.New("bad priority")
|
errBadPriority = errors.New("bad priority")
|
||||||
|
|
||||||
wakey = struct{}{}
|
wakey = struct{}{}
|
||||||
@ -39,7 +42,7 @@ var (
|
|||||||
|
|
||||||
// PriorityQueue is the basic structure
|
// PriorityQueue is the basic structure
|
||||||
type PriorityQueue struct {
|
type PriorityQueue struct {
|
||||||
queues []chan interface{}
|
Queues []chan interface{}
|
||||||
wakeup chan struct{}
|
wakeup chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,27 +53,29 @@ func New(n int, l int) *PriorityQueue {
|
|||||||
queues[i] = make(chan interface{}, l)
|
queues[i] = make(chan interface{}, l)
|
||||||
}
|
}
|
||||||
return &PriorityQueue{
|
return &PriorityQueue{
|
||||||
queues: queues,
|
Queues: queues,
|
||||||
wakeup: make(chan struct{}, 1),
|
wakeup: make(chan struct{}, 1),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run is a forever loop popping items from the queues
|
// Run is a forever loop popping items from the queues
|
||||||
func (pq *PriorityQueue) Run(ctx context.Context, f func(interface{})) {
|
func (pq *PriorityQueue) Run(ctx context.Context, f func(interface{})) {
|
||||||
top := len(pq.queues) - 1
|
top := len(pq.Queues) - 1
|
||||||
p := top
|
p := top
|
||||||
READ:
|
READ:
|
||||||
for {
|
for {
|
||||||
q := pq.queues[p]
|
q := pq.Queues[p]
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case x := <-q:
|
case x := <-q:
|
||||||
|
log.Trace("priority.queue f(x)", "p", p, "len(Queues[p])", len(pq.Queues[p]))
|
||||||
f(x)
|
f(x)
|
||||||
p = top
|
p = top
|
||||||
default:
|
default:
|
||||||
if p > 0 {
|
if p > 0 {
|
||||||
p--
|
p--
|
||||||
|
log.Trace("priority.queue p > 0", "p", p)
|
||||||
continue READ
|
continue READ
|
||||||
}
|
}
|
||||||
p = top
|
p = top
|
||||||
@ -78,6 +83,7 @@ READ:
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case <-pq.wakeup:
|
case <-pq.wakeup:
|
||||||
|
log.Trace("priority.queue wakeup", "p", p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -85,23 +91,15 @@ READ:
|
|||||||
|
|
||||||
// Push pushes an item to the appropriate queue specified in the priority argument
|
// Push pushes an item to the appropriate queue specified in the priority argument
|
||||||
// if context is given it waits until either the item is pushed or the Context aborts
|
// if context is given it waits until either the item is pushed or the Context aborts
|
||||||
// otherwise returns errContention if the queue is full
|
func (pq *PriorityQueue) Push(x interface{}, p int) error {
|
||||||
func (pq *PriorityQueue) Push(ctx context.Context, x interface{}, p int) error {
|
if p < 0 || p >= len(pq.Queues) {
|
||||||
if p < 0 || p >= len(pq.queues) {
|
|
||||||
return errBadPriority
|
return errBadPriority
|
||||||
}
|
}
|
||||||
if ctx == nil {
|
log.Trace("priority.queue push", "p", p, "len(Queues[p])", len(pq.Queues[p]))
|
||||||
select {
|
select {
|
||||||
case pq.queues[p] <- x:
|
case pq.Queues[p] <- x:
|
||||||
default:
|
default:
|
||||||
return errContention
|
return ErrContention
|
||||||
}
|
|
||||||
} else {
|
|
||||||
select {
|
|
||||||
case pq.queues[p] <- x:
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case pq.wakeup <- wakey:
|
case pq.wakeup <- wakey:
|
||||||
|
@ -30,7 +30,7 @@ func TestPriorityQueue(t *testing.T) {
|
|||||||
results = append(results, v.(string))
|
results = append(results, v.(string))
|
||||||
wg.Done()
|
wg.Done()
|
||||||
})
|
})
|
||||||
pq.Push(context.Background(), "2.0", 2)
|
pq.Push("2.0", 2)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
if results[0] != "2.0" {
|
if results[0] != "2.0" {
|
||||||
t.Errorf("expected first result %q, got %q", "2.0", results[0])
|
t.Errorf("expected first result %q, got %q", "2.0", results[0])
|
||||||
@ -66,7 +66,7 @@ Loop:
|
|||||||
{
|
{
|
||||||
priorities: []int{0, 0, 0},
|
priorities: []int{0, 0, 0},
|
||||||
values: []string{"0.0", "0.0", "0.1"},
|
values: []string{"0.0", "0.0", "0.1"},
|
||||||
errors: []error{nil, nil, errContention},
|
errors: []error{nil, nil, ErrContention},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
var results []string
|
var results []string
|
||||||
@ -74,7 +74,7 @@ Loop:
|
|||||||
pq := New(3, 2)
|
pq := New(3, 2)
|
||||||
wg.Add(len(tc.values))
|
wg.Add(len(tc.values))
|
||||||
for j, value := range tc.values {
|
for j, value := range tc.values {
|
||||||
err := pq.Push(nil, value, tc.priorities[j])
|
err := pq.Push(value, tc.priorities[j])
|
||||||
if tc.errors != nil && err != tc.errors[j] {
|
if tc.errors != nil && err != tc.errors[j] {
|
||||||
t.Errorf("expected push error %v, got %v", tc.errors[j], err)
|
t.Errorf("expected push error %v, got %v", tc.errors[j], err)
|
||||||
continue Loop
|
continue Loop
|
||||||
|
@ -94,7 +94,7 @@ func New(services map[string]ServiceFunc) (s *Simulation) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.Net = simulations.NewNetwork(
|
s.Net = simulations.NewNetwork(
|
||||||
adapters.NewSimAdapter(adapterServices),
|
adapters.NewTCPAdapter(adapterServices),
|
||||||
&simulations.NetworkConfig{ID: "0"},
|
&simulations.NetworkConfig{ID: "0"},
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -164,17 +164,6 @@ var maxParallelCleanups = 10
|
|||||||
func (s *Simulation) Close() {
|
func (s *Simulation) Close() {
|
||||||
close(s.done)
|
close(s.done)
|
||||||
|
|
||||||
// Close all connections before calling the Network Shutdown.
|
|
||||||
// It is possible that p2p.Server.Stop will block if there are
|
|
||||||
// existing connections.
|
|
||||||
for _, c := range s.Net.Conns {
|
|
||||||
if c.Up {
|
|
||||||
s.Net.Disconnect(c.One, c.Other)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.shutdownWG.Wait()
|
|
||||||
s.Net.Shutdown()
|
|
||||||
|
|
||||||
sem := make(chan struct{}, maxParallelCleanups)
|
sem := make(chan struct{}, maxParallelCleanups)
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
cleanupFuncs := make([]func(), len(s.cleanupFuncs))
|
cleanupFuncs := make([]func(), len(s.cleanupFuncs))
|
||||||
@ -206,6 +195,9 @@ func (s *Simulation) Close() {
|
|||||||
}
|
}
|
||||||
close(s.runC)
|
close(s.runC)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s.shutdownWG.Wait()
|
||||||
|
s.Net.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done returns a channel that is closed when the simulation
|
// Done returns a channel that is closed when the simulation
|
||||||
|
@ -107,9 +107,14 @@ func newStreamerTester(t *testing.T) (*p2ptest.ProtocolTester, *Registry, *stora
|
|||||||
return nil, nil, nil, removeDataDir, err
|
return nil, nil, nil, removeDataDir, err
|
||||||
}
|
}
|
||||||
|
|
||||||
db := storage.NewDBAPI(localStore)
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
delivery := NewDelivery(to, db)
|
if err != nil {
|
||||||
streamer := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), nil)
|
return nil, nil, nil, removeDataDir, err
|
||||||
|
}
|
||||||
|
|
||||||
|
delivery := NewDelivery(to, netStore)
|
||||||
|
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||||
|
streamer := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), nil)
|
||||||
teardown := func() {
|
teardown := func() {
|
||||||
streamer.Close()
|
streamer.Close()
|
||||||
removeDataDir()
|
removeDataDir()
|
||||||
@ -150,14 +155,14 @@ func newRoundRobinStore(stores ...storage.ChunkStore) *roundRobinStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rrs *roundRobinStore) Get(ctx context.Context, addr storage.Address) (*storage.Chunk, error) {
|
func (rrs *roundRobinStore) Get(ctx context.Context, addr storage.Address) (storage.Chunk, error) {
|
||||||
return nil, errors.New("get not well defined on round robin store")
|
return nil, errors.New("get not well defined on round robin store")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rrs *roundRobinStore) Put(ctx context.Context, chunk *storage.Chunk) {
|
func (rrs *roundRobinStore) Put(ctx context.Context, chunk storage.Chunk) error {
|
||||||
i := atomic.AddUint32(&rrs.index, 1)
|
i := atomic.AddUint32(&rrs.index, 1)
|
||||||
idx := int(i) % len(rrs.stores)
|
idx := int(i) % len(rrs.stores)
|
||||||
rrs.stores[idx].Put(ctx, chunk)
|
return rrs.stores[idx].Put(ctx, chunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rrs *roundRobinStore) Close() {
|
func (rrs *roundRobinStore) Close() {
|
||||||
|
@ -19,12 +19,11 @@ package stream
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
cp "github.com/ethereum/go-ethereum/swarm/chunk"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network"
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||||
@ -46,38 +45,33 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Delivery struct {
|
type Delivery struct {
|
||||||
db *storage.DBAPI
|
chunkStore storage.SyncChunkStore
|
||||||
kad *network.Kademlia
|
kad *network.Kademlia
|
||||||
receiveC chan *ChunkDeliveryMsg
|
|
||||||
getPeer func(discover.NodeID) *Peer
|
getPeer func(discover.NodeID) *Peer
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDelivery(kad *network.Kademlia, db *storage.DBAPI) *Delivery {
|
func NewDelivery(kad *network.Kademlia, chunkStore storage.SyncChunkStore) *Delivery {
|
||||||
d := &Delivery{
|
return &Delivery{
|
||||||
db: db,
|
chunkStore: chunkStore,
|
||||||
kad: kad,
|
kad: kad,
|
||||||
receiveC: make(chan *ChunkDeliveryMsg, deliveryCap),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
go d.processReceivedChunks()
|
|
||||||
return d
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SwarmChunkServer implements Server
|
// SwarmChunkServer implements Server
|
||||||
type SwarmChunkServer struct {
|
type SwarmChunkServer struct {
|
||||||
deliveryC chan []byte
|
deliveryC chan []byte
|
||||||
batchC chan []byte
|
batchC chan []byte
|
||||||
db *storage.DBAPI
|
chunkStore storage.ChunkStore
|
||||||
currentLen uint64
|
currentLen uint64
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSwarmChunkServer is SwarmChunkServer constructor
|
// NewSwarmChunkServer is SwarmChunkServer constructor
|
||||||
func NewSwarmChunkServer(db *storage.DBAPI) *SwarmChunkServer {
|
func NewSwarmChunkServer(chunkStore storage.ChunkStore) *SwarmChunkServer {
|
||||||
s := &SwarmChunkServer{
|
s := &SwarmChunkServer{
|
||||||
deliveryC: make(chan []byte, deliveryCap),
|
deliveryC: make(chan []byte, deliveryCap),
|
||||||
batchC: make(chan []byte),
|
batchC: make(chan []byte),
|
||||||
db: db,
|
chunkStore: chunkStore,
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
}
|
}
|
||||||
go s.processDeliveries()
|
go s.processDeliveries()
|
||||||
@ -123,13 +117,11 @@ func (s *SwarmChunkServer) Close() {
|
|||||||
|
|
||||||
// GetData retrives chunk data from db store
|
// GetData retrives chunk data from db store
|
||||||
func (s *SwarmChunkServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
|
func (s *SwarmChunkServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
|
||||||
chunk, err := s.db.Get(ctx, storage.Address(key))
|
chunk, err := s.chunkStore.Get(ctx, storage.Address(key))
|
||||||
if err == storage.ErrFetching {
|
if err != nil {
|
||||||
<-chunk.ReqC
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return chunk.SData, nil
|
return chunk.Data(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveRequestMsg is the protocol msg for chunk retrieve requests
|
// RetrieveRequestMsg is the protocol msg for chunk retrieve requests
|
||||||
@ -153,57 +145,39 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
streamer := s.Server.(*SwarmChunkServer)
|
streamer := s.Server.(*SwarmChunkServer)
|
||||||
chunk, created := d.db.GetOrCreateRequest(ctx, req.Addr)
|
|
||||||
if chunk.ReqC != nil {
|
var cancel func()
|
||||||
if created {
|
// TODO: do something with this hardcoded timeout, maybe use TTL in the future
|
||||||
if err := d.RequestFromPeers(ctx, chunk.Addr[:], true, sp.ID()); err != nil {
|
ctx, cancel = context.WithTimeout(context.WithValue(ctx, "peer", sp.ID().String()), network.RequestTimeout)
|
||||||
log.Warn("unable to forward chunk request", "peer", sp.ID(), "key", chunk.Addr, "err", err)
|
|
||||||
chunk.SetErrored(storage.ErrChunkForward)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
go func() {
|
go func() {
|
||||||
var osp opentracing.Span
|
|
||||||
ctx, osp = spancontext.StartSpan(
|
|
||||||
ctx,
|
|
||||||
"waiting.delivery")
|
|
||||||
defer osp.Finish()
|
|
||||||
|
|
||||||
t := time.NewTimer(10 * time.Minute)
|
|
||||||
defer t.Stop()
|
|
||||||
|
|
||||||
log.Debug("waiting delivery", "peer", sp.ID(), "hash", req.Addr, "node", common.Bytes2Hex(d.kad.BaseAddr()), "created", created)
|
|
||||||
start := time.Now()
|
|
||||||
select {
|
select {
|
||||||
case <-chunk.ReqC:
|
case <-ctx.Done():
|
||||||
log.Debug("retrieve request ReqC closed", "peer", sp.ID(), "hash", req.Addr, "time", time.Since(start))
|
case <-streamer.quit:
|
||||||
case <-t.C:
|
}
|
||||||
log.Debug("retrieve request timeout", "peer", sp.ID(), "hash", req.Addr)
|
cancel()
|
||||||
chunk.SetErrored(storage.ErrChunkTimeout)
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
chunk, err := d.chunkStore.Get(ctx, req.Addr)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("ChunkStore.Get can not retrieve chunk", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
chunk.SetErrored(nil)
|
|
||||||
|
|
||||||
if req.SkipCheck {
|
if req.SkipCheck {
|
||||||
err := sp.Deliver(ctx, chunk, s.priority)
|
err = sp.Deliver(ctx, chunk, s.priority)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("ERROR in handleRetrieveRequestMsg, DROPPING peer!", "err", err)
|
log.Warn("ERROR in handleRetrieveRequestMsg", "err", err)
|
||||||
sp.Drop(err)
|
|
||||||
}
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
streamer.deliveryC <- chunk.Addr[:]
|
select {
|
||||||
|
case streamer.deliveryC <- chunk.Address()[:]:
|
||||||
|
case <-streamer.quit:
|
||||||
|
}
|
||||||
|
|
||||||
}()
|
}()
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// TODO: call the retrieve function of the outgoing syncer
|
|
||||||
if req.SkipCheck {
|
|
||||||
log.Trace("deliver", "peer", sp.ID(), "hash", chunk.Addr)
|
|
||||||
if length := len(chunk.SData); length < 9 {
|
|
||||||
log.Error("Chunk.SData to deliver is too short", "len(chunk.SData)", length, "address", chunk.Addr)
|
|
||||||
}
|
|
||||||
return sp.Deliver(ctx, chunk, s.priority)
|
|
||||||
}
|
|
||||||
streamer.deliveryC <- chunk.Addr[:]
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,6 +187,7 @@ type ChunkDeliveryMsg struct {
|
|||||||
peer *Peer // set in handleChunkDeliveryMsg
|
peer *Peer // set in handleChunkDeliveryMsg
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Fix context SNAFU
|
||||||
func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *ChunkDeliveryMsg) error {
|
func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *ChunkDeliveryMsg) error {
|
||||||
var osp opentracing.Span
|
var osp opentracing.Span
|
||||||
ctx, osp = spancontext.StartSpan(
|
ctx, osp = spancontext.StartSpan(
|
||||||
@ -220,81 +195,63 @@ func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *Ch
|
|||||||
"chunk.delivery")
|
"chunk.delivery")
|
||||||
defer osp.Finish()
|
defer osp.Finish()
|
||||||
|
|
||||||
req.peer = sp
|
|
||||||
d.receiveC <- req
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Delivery) processReceivedChunks() {
|
|
||||||
R:
|
|
||||||
for req := range d.receiveC {
|
|
||||||
processReceivedChunksCount.Inc(1)
|
processReceivedChunksCount.Inc(1)
|
||||||
|
|
||||||
if len(req.SData) > cp.DefaultSize+8 {
|
go func() {
|
||||||
log.Warn("received chunk is bigger than expected", "len", len(req.SData))
|
req.peer = sp
|
||||||
continue R
|
err := d.chunkStore.Put(ctx, storage.NewChunk(req.Addr, req.SData))
|
||||||
}
|
if err != nil {
|
||||||
|
|
||||||
// this should be has locally
|
|
||||||
chunk, err := d.db.Get(context.TODO(), req.Addr)
|
|
||||||
if err == nil {
|
|
||||||
continue R
|
|
||||||
}
|
|
||||||
if err != storage.ErrFetching {
|
|
||||||
log.Error("processReceivedChunks db error", "addr", req.Addr, "err", err, "chunk", chunk)
|
|
||||||
continue R
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-chunk.ReqC:
|
|
||||||
log.Error("someone else delivered?", "hash", chunk.Addr.Hex())
|
|
||||||
continue R
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk.SData = req.SData
|
|
||||||
d.db.Put(context.TODO(), chunk)
|
|
||||||
|
|
||||||
go func(req *ChunkDeliveryMsg) {
|
|
||||||
err := chunk.WaitToStore()
|
|
||||||
if err == storage.ErrChunkInvalid {
|
if err == storage.ErrChunkInvalid {
|
||||||
|
// we removed this log because it spams the logs
|
||||||
|
// TODO: Enable this log line
|
||||||
|
// log.Warn("invalid chunk delivered", "peer", sp.ID(), "chunk", req.Addr, )
|
||||||
req.peer.Drop(err)
|
req.peer.Drop(err)
|
||||||
}
|
}
|
||||||
}(req)
|
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestFromPeers sends a chunk retrieve request to
|
// RequestFromPeers sends a chunk retrieve request to
|
||||||
func (d *Delivery) RequestFromPeers(ctx context.Context, hash []byte, skipCheck bool, peersToSkip ...discover.NodeID) error {
|
func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (*discover.NodeID, chan struct{}, error) {
|
||||||
var success bool
|
|
||||||
var err error
|
|
||||||
requestFromPeersCount.Inc(1)
|
requestFromPeersCount.Inc(1)
|
||||||
|
var sp *Peer
|
||||||
|
spID := req.Source
|
||||||
|
|
||||||
d.kad.EachConn(hash, 255, func(p *network.Peer, po int, nn bool) bool {
|
if spID != nil {
|
||||||
spId := p.ID()
|
sp = d.getPeer(*spID)
|
||||||
for _, p := range peersToSkip {
|
|
||||||
if p == spId {
|
|
||||||
log.Trace("Delivery.RequestFromPeers: skip peer", "peer", spId)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sp := d.getPeer(spId)
|
|
||||||
if sp == nil {
|
if sp == nil {
|
||||||
log.Warn("Delivery.RequestFromPeers: peer not found", "id", spId)
|
return nil, nil, fmt.Errorf("source peer %v not found", spID.String())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
d.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int, nn bool) bool {
|
||||||
|
id := p.ID()
|
||||||
|
// TODO: skip light nodes that do not accept retrieve requests
|
||||||
|
if req.SkipPeer(id.String()) {
|
||||||
|
log.Trace("Delivery.RequestFromPeers: skip peer", "peer id", id)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
err = sp.SendPriority(ctx, &RetrieveRequestMsg{
|
sp = d.getPeer(id)
|
||||||
Addr: hash,
|
if sp == nil {
|
||||||
SkipCheck: skipCheck,
|
log.Warn("Delivery.RequestFromPeers: peer not found", "id", id)
|
||||||
}, Top)
|
|
||||||
if err != nil {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
requestFromPeersEachCount.Inc(1)
|
spID = &id
|
||||||
success = true
|
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
if success {
|
if sp == nil {
|
||||||
return nil
|
return nil, nil, errors.New("no peer found")
|
||||||
}
|
}
|
||||||
return errors.New("no peer found")
|
}
|
||||||
|
|
||||||
|
err := sp.SendPriority(ctx, &RetrieveRequestMsg{
|
||||||
|
Addr: req.Addr,
|
||||||
|
SkipCheck: req.SkipCheck,
|
||||||
|
}, Top)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
requestFromPeersEachCount.Inc(1)
|
||||||
|
|
||||||
|
return spID, sp.quit, nil
|
||||||
}
|
}
|
||||||
|
@ -47,7 +47,13 @@ func TestStreamerRetrieveRequest(t *testing.T) {
|
|||||||
|
|
||||||
peerID := tester.IDs[0]
|
peerID := tester.IDs[0]
|
||||||
|
|
||||||
streamer.delivery.RequestFromPeers(context.TODO(), hash0[:], true)
|
ctx := context.Background()
|
||||||
|
req := network.NewRequest(
|
||||||
|
storage.Address(hash0[:]),
|
||||||
|
true,
|
||||||
|
&sync.Map{},
|
||||||
|
)
|
||||||
|
streamer.delivery.RequestFromPeers(ctx, req)
|
||||||
|
|
||||||
err = tester.TestExchanges(p2ptest.Exchange{
|
err = tester.TestExchanges(p2ptest.Exchange{
|
||||||
Label: "RetrieveRequestMsg",
|
Label: "RetrieveRequestMsg",
|
||||||
@ -93,7 +99,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Code: 5,
|
Code: 5,
|
||||||
Msg: &RetrieveRequestMsg{
|
Msg: &RetrieveRequestMsg{
|
||||||
Addr: chunk.Addr[:],
|
Addr: chunk.Address()[:],
|
||||||
},
|
},
|
||||||
Peer: peerID,
|
Peer: peerID,
|
||||||
},
|
},
|
||||||
@ -139,10 +145,11 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
hash := storage.Address(hash0[:])
|
hash := storage.Address(hash0[:])
|
||||||
chunk := storage.NewChunk(hash, nil)
|
chunk := storage.NewChunk(hash, hash)
|
||||||
chunk.SData = hash
|
err = localStore.Put(context.TODO(), chunk)
|
||||||
localStore.Put(context.TODO(), chunk)
|
if err != nil {
|
||||||
chunk.WaitToStore()
|
t.Fatalf("Expected no err got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
err = tester.TestExchanges(p2ptest.Exchange{
|
err = tester.TestExchanges(p2ptest.Exchange{
|
||||||
Label: "RetrieveRequestMsg",
|
Label: "RetrieveRequestMsg",
|
||||||
@ -178,10 +185,11 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
hash = storage.Address(hash1[:])
|
hash = storage.Address(hash1[:])
|
||||||
chunk = storage.NewChunk(hash, nil)
|
chunk = storage.NewChunk(hash, hash1[:])
|
||||||
chunk.SData = hash1[:]
|
err = localStore.Put(context.TODO(), chunk)
|
||||||
localStore.Put(context.TODO(), chunk)
|
if err != nil {
|
||||||
chunk.WaitToStore()
|
t.Fatalf("Expected no err got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
err = tester.TestExchanges(p2ptest.Exchange{
|
err = tester.TestExchanges(p2ptest.Exchange{
|
||||||
Label: "RetrieveRequestMsg",
|
Label: "RetrieveRequestMsg",
|
||||||
@ -235,16 +243,6 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
|
|||||||
|
|
||||||
chunkKey := hash0[:]
|
chunkKey := hash0[:]
|
||||||
chunkData := hash1[:]
|
chunkData := hash1[:]
|
||||||
chunk, created := localStore.GetOrCreateRequest(context.TODO(), chunkKey)
|
|
||||||
|
|
||||||
if !created {
|
|
||||||
t.Fatal("chunk already exists")
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-chunk.ReqC:
|
|
||||||
t.Fatal("chunk is already received")
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
err = tester.TestExchanges(p2ptest.Exchange{
|
err = tester.TestExchanges(p2ptest.Exchange{
|
||||||
Label: "Subscribe message",
|
Label: "Subscribe message",
|
||||||
@ -261,7 +259,7 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
p2ptest.Exchange{
|
p2ptest.Exchange{
|
||||||
Label: "ChunkDeliveryRequest message",
|
Label: "ChunkDelivery message",
|
||||||
Triggers: []p2ptest.Trigger{
|
Triggers: []p2ptest.Trigger{
|
||||||
{
|
{
|
||||||
Code: 6,
|
Code: 6,
|
||||||
@ -277,21 +275,26 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
timeout := time.NewTimer(1 * time.Second)
|
// wait for the chunk to get stored
|
||||||
|
storedChunk, err := localStore.Get(ctx, chunkKey)
|
||||||
|
for err != nil {
|
||||||
select {
|
select {
|
||||||
case <-timeout.C:
|
case <-ctx.Done():
|
||||||
t.Fatal("timeout receiving chunk")
|
t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
|
||||||
case <-chunk.ReqC:
|
default:
|
||||||
|
}
|
||||||
|
storedChunk, err = localStore.Get(ctx, chunkKey)
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
storedChunk, err := localStore.Get(context.TODO(), chunkKey)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(storedChunk.SData, chunkData) {
|
if !bytes.Equal(storedChunk.Data(), chunkData) {
|
||||||
t.Fatal("Retrieved chunk has different data than original")
|
t.Fatal("Retrieved chunk has different data than original")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,19 +327,20 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
|
|||||||
store.Close()
|
store.Close()
|
||||||
}
|
}
|
||||||
localStore := store.(*storage.LocalStore)
|
localStore := store.(*storage.LocalStore)
|
||||||
db := storage.NewDBAPI(localStore)
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
if err != nil {
|
||||||
delivery := NewDelivery(kad, db)
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
|
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||||
|
delivery := NewDelivery(kad, netStore)
|
||||||
|
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||||
|
|
||||||
|
r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
SkipCheck: skipCheck,
|
SkipCheck: skipCheck,
|
||||||
})
|
})
|
||||||
bucket.Store(bucketKeyRegistry, r)
|
bucket.Store(bucketKeyRegistry, r)
|
||||||
|
|
||||||
retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error {
|
|
||||||
return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck)
|
|
||||||
}
|
|
||||||
netStore := storage.NewNetStore(localStore, retrieveFunc)
|
|
||||||
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
||||||
bucket.Store(bucketKeyFileStore, fileStore)
|
bucket.Store(bucketKeyFileStore, fileStore)
|
||||||
|
|
||||||
@ -498,7 +502,6 @@ func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
|
|||||||
func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
|
func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
|
||||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||||
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
||||||
|
|
||||||
id := ctx.Config.ID
|
id := ctx.Config.ID
|
||||||
addr := network.NewAddrFromNodeID(id)
|
addr := network.NewAddrFromNodeID(id)
|
||||||
store, datadir, err := createTestLocalStorageForID(id, addr)
|
store, datadir, err := createTestLocalStorageForID(id, addr)
|
||||||
@ -511,20 +514,20 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skip
|
|||||||
store.Close()
|
store.Close()
|
||||||
}
|
}
|
||||||
localStore := store.(*storage.LocalStore)
|
localStore := store.(*storage.LocalStore)
|
||||||
db := storage.NewDBAPI(localStore)
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||||
delivery := NewDelivery(kad, db)
|
delivery := NewDelivery(kad, netStore)
|
||||||
|
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||||
|
|
||||||
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
|
r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
SkipCheck: skipCheck,
|
SkipCheck: skipCheck,
|
||||||
DoSync: true,
|
DoSync: true,
|
||||||
SyncUpdateDelay: 0,
|
SyncUpdateDelay: 0,
|
||||||
})
|
})
|
||||||
|
|
||||||
retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error {
|
|
||||||
return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck)
|
|
||||||
}
|
|
||||||
netStore := storage.NewNetStore(localStore, retrieveFunc)
|
|
||||||
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
||||||
bucket.Store(bucketKeyFileStore, fileStore)
|
bucket.Store(bucketKeyFileStore, fileStore)
|
||||||
|
|
||||||
|
@ -38,13 +38,18 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestIntervals(t *testing.T) {
|
func TestIntervalsLive(t *testing.T) {
|
||||||
testIntervals(t, true, nil, false)
|
testIntervals(t, true, nil, false)
|
||||||
testIntervals(t, false, NewRange(9, 26), false)
|
|
||||||
testIntervals(t, true, NewRange(9, 26), false)
|
|
||||||
|
|
||||||
testIntervals(t, true, nil, true)
|
testIntervals(t, true, nil, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIntervalsHistory(t *testing.T) {
|
||||||
|
testIntervals(t, false, NewRange(9, 26), false)
|
||||||
testIntervals(t, false, NewRange(9, 26), true)
|
testIntervals(t, false, NewRange(9, 26), true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIntervalsLiveAndHistory(t *testing.T) {
|
||||||
|
testIntervals(t, true, NewRange(9, 26), false)
|
||||||
testIntervals(t, true, NewRange(9, 26), true)
|
testIntervals(t, true, NewRange(9, 26), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,17 +75,21 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
|||||||
os.RemoveAll(datadir)
|
os.RemoveAll(datadir)
|
||||||
}
|
}
|
||||||
localStore := store.(*storage.LocalStore)
|
localStore := store.(*storage.LocalStore)
|
||||||
db := storage.NewDBAPI(localStore)
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||||
delivery := NewDelivery(kad, db)
|
delivery := NewDelivery(kad, netStore)
|
||||||
|
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||||
|
|
||||||
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
|
r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
SkipCheck: skipCheck,
|
SkipCheck: skipCheck,
|
||||||
})
|
})
|
||||||
bucket.Store(bucketKeyRegistry, r)
|
bucket.Store(bucketKeyRegistry, r)
|
||||||
|
|
||||||
r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) {
|
r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) {
|
||||||
return newTestExternalClient(db), nil
|
return newTestExternalClient(netStore), nil
|
||||||
})
|
})
|
||||||
r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) {
|
r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) {
|
||||||
return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil
|
return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil
|
||||||
@ -101,9 +110,13 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
|
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
|
||||||
nodeIDs := sim.UpNodeIDs()
|
nodeIDs := sim.UpNodeIDs()
|
||||||
storer := nodeIDs[0]
|
storer := nodeIDs[0]
|
||||||
@ -136,11 +149,6 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
|||||||
liveErrC := make(chan error)
|
liveErrC := make(chan error)
|
||||||
historyErrC := make(chan error)
|
historyErrC := make(chan error)
|
||||||
|
|
||||||
if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
|
|
||||||
log.Error("WaitKademlia error: %v", "err", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debug("Watching for disconnections")
|
log.Debug("Watching for disconnections")
|
||||||
disconnections := sim.PeerEvents(
|
disconnections := sim.PeerEvents(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
@ -148,6 +156,11 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
|||||||
simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
|
simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
err = registry.Subscribe(storer, NewStream(externalStreamName, "", live), history, Top)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for d := range disconnections {
|
for d := range disconnections {
|
||||||
if d.Error != nil {
|
if d.Error != nil {
|
||||||
@ -172,7 +185,7 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
|||||||
var liveHashesChan chan []byte
|
var liveHashesChan chan []byte
|
||||||
liveHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", true))
|
liveHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Subscription error: %v", "err", err)
|
log.Error("get hashes", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
i := externalStreamSessionAt
|
i := externalStreamSessionAt
|
||||||
@ -216,6 +229,7 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
|||||||
var historyHashesChan chan []byte
|
var historyHashesChan chan []byte
|
||||||
historyHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", false))
|
historyHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", false))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error("get hashes", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,10 +266,6 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err = registry.Subscribe(storer, NewStream(externalStreamName, "", live), history, Top)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := <-liveErrC; err != nil {
|
if err := <-liveErrC; err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -302,34 +312,32 @@ func enableNotifications(r *Registry, peerID discover.NodeID, s Stream) error {
|
|||||||
|
|
||||||
type testExternalClient struct {
|
type testExternalClient struct {
|
||||||
hashes chan []byte
|
hashes chan []byte
|
||||||
db *storage.DBAPI
|
store storage.SyncChunkStore
|
||||||
enableNotificationsC chan struct{}
|
enableNotificationsC chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestExternalClient(db *storage.DBAPI) *testExternalClient {
|
func newTestExternalClient(store storage.SyncChunkStore) *testExternalClient {
|
||||||
return &testExternalClient{
|
return &testExternalClient{
|
||||||
hashes: make(chan []byte),
|
hashes: make(chan []byte),
|
||||||
db: db,
|
store: store,
|
||||||
enableNotificationsC: make(chan struct{}),
|
enableNotificationsC: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testExternalClient) NeedData(ctx context.Context, hash []byte) func() {
|
func (c *testExternalClient) NeedData(ctx context.Context, hash []byte) func(context.Context) error {
|
||||||
chunk, _ := c.db.GetOrCreateRequest(ctx, hash)
|
wait := c.store.FetchFunc(ctx, storage.Address(hash))
|
||||||
if chunk.ReqC == nil {
|
if wait == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
c.hashes <- hash
|
select {
|
||||||
//NOTE: This was failing on go1.9.x with a deadlock.
|
case c.hashes <- hash:
|
||||||
//Sometimes this function would just block
|
case <-ctx.Done():
|
||||||
//It is commented now, but it may be well worth after the chunk refactor
|
log.Warn("testExternalClient NeedData context", "err", ctx.Err())
|
||||||
//to re-enable this and see if the problem has been addressed
|
return func(_ context.Context) error {
|
||||||
/*
|
return ctx.Err()
|
||||||
return func() {
|
|
||||||
return chunk.WaitToStore()
|
|
||||||
}
|
}
|
||||||
*/
|
}
|
||||||
return nil
|
return wait
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *testExternalClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) {
|
func (c *testExternalClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) {
|
||||||
|
@ -18,9 +18,7 @@ package stream
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
@ -31,6 +29,8 @@ import (
|
|||||||
opentracing "github.com/opentracing/opentracing-go"
|
opentracing "github.com/opentracing/opentracing-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var syncBatchTimeout = 30 * time.Second
|
||||||
|
|
||||||
// Stream defines a unique stream identifier.
|
// Stream defines a unique stream identifier.
|
||||||
type Stream struct {
|
type Stream struct {
|
||||||
// Name is used for Client and Server functions identification.
|
// Name is used for Client and Server functions identification.
|
||||||
@ -117,8 +117,7 @@ func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err e
|
|||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := p.SendOfferedHashes(os, from, to); err != nil {
|
if err := p.SendOfferedHashes(os, from, to); err != nil {
|
||||||
log.Warn("SendOfferedHashes dropping peer", "err", err)
|
log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err)
|
||||||
p.Drop(err)
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -135,8 +134,7 @@ func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err e
|
|||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil {
|
if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil {
|
||||||
log.Warn("SendOfferedHashes dropping peer", "err", err)
|
log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err)
|
||||||
p.Drop(err)
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@ -202,38 +200,52 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err)
|
return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err)
|
||||||
}
|
}
|
||||||
wg := sync.WaitGroup{}
|
|
||||||
|
ctr := 0
|
||||||
|
errC := make(chan error)
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, syncBatchTimeout)
|
||||||
|
|
||||||
|
ctx = context.WithValue(ctx, "source", p.ID().String())
|
||||||
for i := 0; i < len(hashes); i += HashSize {
|
for i := 0; i < len(hashes); i += HashSize {
|
||||||
hash := hashes[i : i+HashSize]
|
hash := hashes[i : i+HashSize]
|
||||||
|
|
||||||
if wait := c.NeedData(ctx, hash); wait != nil {
|
if wait := c.NeedData(ctx, hash); wait != nil {
|
||||||
|
ctr++
|
||||||
want.Set(i/HashSize, true)
|
want.Set(i/HashSize, true)
|
||||||
wg.Add(1)
|
|
||||||
// create request and wait until the chunk data arrives and is stored
|
// create request and wait until the chunk data arrives and is stored
|
||||||
go func(w func()) {
|
go func(w func(context.Context) error) {
|
||||||
w()
|
select {
|
||||||
wg.Done()
|
case errC <- w(ctx):
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
}(wait)
|
}(wait)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// done := make(chan bool)
|
|
||||||
// go func() {
|
|
||||||
// wg.Wait()
|
|
||||||
// close(done)
|
|
||||||
// }()
|
|
||||||
// go func() {
|
|
||||||
// select {
|
|
||||||
// case <-done:
|
|
||||||
// s.next <- s.batchDone(p, req, hashes)
|
|
||||||
// case <-time.After(1 * time.Second):
|
|
||||||
// p.Drop(errors.New("timeout waiting for batch to be delivered"))
|
|
||||||
// }
|
|
||||||
// }()
|
|
||||||
go func() {
|
go func() {
|
||||||
wg.Wait()
|
defer cancel()
|
||||||
|
for i := 0; i < ctr; i++ {
|
||||||
|
select {
|
||||||
|
case err := <-errC:
|
||||||
|
if err != nil {
|
||||||
|
log.Debug("client.handleOfferedHashesMsg() error waiting for chunk, dropping peer", "peer", p.ID(), "err", err)
|
||||||
|
p.Drop(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
|
||||||
|
return
|
||||||
|
case <-c.quit:
|
||||||
|
log.Debug("client.handleOfferedHashesMsg() quit")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case c.next <- c.batchDone(p, req, hashes):
|
case c.next <- c.batchDone(p, req, hashes):
|
||||||
case <-c.quit:
|
case <-c.quit:
|
||||||
|
log.Debug("client.handleOfferedHashesMsg() quit")
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
// only send wantedKeysMsg if all missing chunks of the previous batch arrived
|
// only send wantedKeysMsg if all missing chunks of the previous batch arrived
|
||||||
@ -242,7 +254,7 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
|
|||||||
c.sessionAt = req.From
|
c.sessionAt = req.From
|
||||||
}
|
}
|
||||||
from, to := c.nextBatch(req.To + 1)
|
from, to := c.nextBatch(req.To + 1)
|
||||||
log.Trace("received offered batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
|
log.Trace("set next batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "addr", p.streamer.addr.ID())
|
||||||
if from == to {
|
if from == to {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -254,25 +266,25 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
|
|||||||
To: to,
|
To: to,
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
|
log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
|
||||||
select {
|
select {
|
||||||
case <-time.After(120 * time.Second):
|
|
||||||
log.Warn("handleOfferedHashesMsg timeout, so dropping peer")
|
|
||||||
p.Drop(errors.New("handle offered hashes timeout"))
|
|
||||||
return
|
|
||||||
case err := <-c.next:
|
case err := <-c.next:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("c.next dropping peer", "err", err)
|
log.Warn("c.next error dropping peer", "err", err)
|
||||||
p.Drop(err)
|
p.Drop(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case <-c.quit:
|
case <-c.quit:
|
||||||
|
log.Debug("client.handleOfferedHashesMsg() quit")
|
||||||
|
return
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
|
log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
|
||||||
err := p.SendPriority(ctx, msg, c.priority)
|
err := p.SendPriority(ctx, msg, c.priority)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("SendPriority err, so dropping peer", "err", err)
|
log.Warn("SendPriority error", "err", err)
|
||||||
p.Drop(err)
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
return nil
|
return nil
|
||||||
@ -306,8 +318,7 @@ func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg)
|
|||||||
// launch in go routine since GetBatch blocks until new hashes arrive
|
// launch in go routine since GetBatch blocks until new hashes arrive
|
||||||
go func() {
|
go func() {
|
||||||
if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
|
if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
|
||||||
log.Warn("SendOfferedHashes dropping peer", "err", err)
|
log.Warn("SendOfferedHashes error", "err", err)
|
||||||
p.Drop(err)
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
// go p.SendOfferedHashes(s, req.From, req.To)
|
// go p.SendOfferedHashes(s, req.From, req.To)
|
||||||
@ -327,11 +338,7 @@ func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err)
|
return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err)
|
||||||
}
|
}
|
||||||
chunk := storage.NewChunk(hash, nil)
|
chunk := storage.NewChunk(hash, data)
|
||||||
chunk.SData = data
|
|
||||||
if length := len(chunk.SData); length < 9 {
|
|
||||||
log.Error("Chunk.SData to sync is too short", "len(chunk.SData)", length, "address", chunk.Addr)
|
|
||||||
}
|
|
||||||
if err := p.Deliver(ctx, chunk, s.priority); err != nil {
|
if err := p.Deliver(ctx, chunk, s.priority); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -33,8 +33,6 @@ import (
|
|||||||
opentracing "github.com/opentracing/opentracing-go"
|
opentracing "github.com/opentracing/opentracing-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
var sendTimeout = 30 * time.Second
|
|
||||||
|
|
||||||
type notFoundError struct {
|
type notFoundError struct {
|
||||||
t string
|
t string
|
||||||
s Stream
|
s Stream
|
||||||
@ -83,8 +81,40 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer {
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
go p.pq.Run(ctx, func(i interface{}) {
|
go p.pq.Run(ctx, func(i interface{}) {
|
||||||
wmsg := i.(WrappedPriorityMsg)
|
wmsg := i.(WrappedPriorityMsg)
|
||||||
p.Send(wmsg.Context, wmsg.Msg)
|
err := p.Send(wmsg.Context, wmsg.Msg)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Message send error, dropping peer", "peer", p.ID(), "err", err)
|
||||||
|
p.Drop(err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// basic monitoring for pq contention
|
||||||
|
go func(pq *pq.PriorityQueue) {
|
||||||
|
ticker := time.NewTicker(5 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
var len_maxi int
|
||||||
|
var cap_maxi int
|
||||||
|
for k := range pq.Queues {
|
||||||
|
if len_maxi < len(pq.Queues[k]) {
|
||||||
|
len_maxi = len(pq.Queues[k])
|
||||||
|
}
|
||||||
|
|
||||||
|
if cap_maxi < cap(pq.Queues[k]) {
|
||||||
|
cap_maxi = cap(pq.Queues[k])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.GetOrRegisterGauge(fmt.Sprintf("pq_len_%s", p.ID().TerminalString()), nil).Update(int64(len_maxi))
|
||||||
|
metrics.GetOrRegisterGauge(fmt.Sprintf("pq_cap_%s", p.ID().TerminalString()), nil).Update(int64(cap_maxi))
|
||||||
|
case <-p.quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(p.pq)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
<-p.quit
|
<-p.quit
|
||||||
cancel()
|
cancel()
|
||||||
@ -93,7 +123,7 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Deliver sends a storeRequestMsg protocol message to the peer
|
// Deliver sends a storeRequestMsg protocol message to the peer
|
||||||
func (p *Peer) Deliver(ctx context.Context, chunk *storage.Chunk, priority uint8) error {
|
func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8) error {
|
||||||
var sp opentracing.Span
|
var sp opentracing.Span
|
||||||
ctx, sp = spancontext.StartSpan(
|
ctx, sp = spancontext.StartSpan(
|
||||||
ctx,
|
ctx,
|
||||||
@ -101,8 +131,8 @@ func (p *Peer) Deliver(ctx context.Context, chunk *storage.Chunk, priority uint8
|
|||||||
defer sp.Finish()
|
defer sp.Finish()
|
||||||
|
|
||||||
msg := &ChunkDeliveryMsg{
|
msg := &ChunkDeliveryMsg{
|
||||||
Addr: chunk.Addr,
|
Addr: chunk.Address(),
|
||||||
SData: chunk.SData,
|
SData: chunk.Data(),
|
||||||
}
|
}
|
||||||
return p.SendPriority(ctx, msg, priority)
|
return p.SendPriority(ctx, msg, priority)
|
||||||
}
|
}
|
||||||
@ -111,13 +141,16 @@ func (p *Peer) Deliver(ctx context.Context, chunk *storage.Chunk, priority uint8
|
|||||||
func (p *Peer) SendPriority(ctx context.Context, msg interface{}, priority uint8) error {
|
func (p *Peer) SendPriority(ctx context.Context, msg interface{}, priority uint8) error {
|
||||||
defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("peer.sendpriority_t.%d", priority), nil).UpdateSince(time.Now())
|
defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("peer.sendpriority_t.%d", priority), nil).UpdateSince(time.Now())
|
||||||
metrics.GetOrRegisterCounter(fmt.Sprintf("peer.sendpriority.%d", priority), nil).Inc(1)
|
metrics.GetOrRegisterCounter(fmt.Sprintf("peer.sendpriority.%d", priority), nil).Inc(1)
|
||||||
cctx, cancel := context.WithTimeout(context.Background(), sendTimeout)
|
|
||||||
defer cancel()
|
|
||||||
wmsg := WrappedPriorityMsg{
|
wmsg := WrappedPriorityMsg{
|
||||||
Context: ctx,
|
Context: ctx,
|
||||||
Msg: msg,
|
Msg: msg,
|
||||||
}
|
}
|
||||||
return p.pq.Push(cctx, wmsg, int(priority))
|
err := p.pq.Push(wmsg, int(priority))
|
||||||
|
if err == pq.ErrContention {
|
||||||
|
log.Warn("dropping peer on priority queue contention", "peer", p.ID())
|
||||||
|
p.Drop(err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendOfferedHashes sends OfferedHashesMsg protocol msg
|
// SendOfferedHashes sends OfferedHashesMsg protocol msg
|
||||||
|
@ -124,23 +124,30 @@ func runFileRetrievalTest(nodeCount int) error {
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
bucket.Store(bucketKeyStore, store)
|
bucket.Store(bucketKeyStore, store)
|
||||||
cleanup = func() {
|
|
||||||
os.RemoveAll(datadir)
|
|
||||||
store.Close()
|
|
||||||
}
|
|
||||||
localStore := store.(*storage.LocalStore)
|
|
||||||
db := storage.NewDBAPI(localStore)
|
|
||||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
|
||||||
delivery := NewDelivery(kad, db)
|
|
||||||
|
|
||||||
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
|
localStore := store.(*storage.LocalStore)
|
||||||
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||||
|
delivery := NewDelivery(kad, netStore)
|
||||||
|
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||||
|
|
||||||
|
r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
DoSync: true,
|
DoSync: true,
|
||||||
SyncUpdateDelay: 3 * time.Second,
|
SyncUpdateDelay: 3 * time.Second,
|
||||||
})
|
})
|
||||||
|
|
||||||
fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
||||||
bucket.Store(bucketKeyFileStore, fileStore)
|
bucket.Store(bucketKeyFileStore, fileStore)
|
||||||
|
|
||||||
|
cleanup = func() {
|
||||||
|
os.RemoveAll(datadir)
|
||||||
|
netStore.Close()
|
||||||
|
r.Close()
|
||||||
|
}
|
||||||
|
|
||||||
return r, cleanup, nil
|
return r, cleanup, nil
|
||||||
|
|
||||||
},
|
},
|
||||||
@ -267,24 +274,31 @@ func runRetrievalTest(chunkCount int, nodeCount int) error {
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
bucket.Store(bucketKeyStore, store)
|
bucket.Store(bucketKeyStore, store)
|
||||||
cleanup = func() {
|
|
||||||
os.RemoveAll(datadir)
|
|
||||||
store.Close()
|
|
||||||
}
|
|
||||||
localStore := store.(*storage.LocalStore)
|
|
||||||
db := storage.NewDBAPI(localStore)
|
|
||||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
|
||||||
delivery := NewDelivery(kad, db)
|
|
||||||
|
|
||||||
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
|
localStore := store.(*storage.LocalStore)
|
||||||
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||||
|
delivery := NewDelivery(kad, netStore)
|
||||||
|
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||||
|
|
||||||
|
r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
DoSync: true,
|
DoSync: true,
|
||||||
SyncUpdateDelay: 0,
|
SyncUpdateDelay: 0,
|
||||||
})
|
})
|
||||||
|
|
||||||
fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
||||||
bucketKeyFileStore = simulation.BucketKey("filestore")
|
bucketKeyFileStore = simulation.BucketKey("filestore")
|
||||||
bucket.Store(bucketKeyFileStore, fileStore)
|
bucket.Store(bucketKeyFileStore, fileStore)
|
||||||
|
|
||||||
|
cleanup = func() {
|
||||||
|
os.RemoveAll(datadir)
|
||||||
|
netStore.Close()
|
||||||
|
r.Close()
|
||||||
|
}
|
||||||
|
|
||||||
return r, cleanup, nil
|
return r, cleanup, nil
|
||||||
|
|
||||||
},
|
},
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -39,17 +40,22 @@ import (
|
|||||||
mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
|
mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
|
||||||
)
|
)
|
||||||
|
|
||||||
const testMinProxBinSize = 2
|
|
||||||
const MaxTimeout = 600
|
const MaxTimeout = 600
|
||||||
|
|
||||||
type synctestConfig struct {
|
type synctestConfig struct {
|
||||||
addrs [][]byte
|
addrs [][]byte
|
||||||
hashes []storage.Address
|
hashes []storage.Address
|
||||||
idToChunksMap map[discover.NodeID][]int
|
idToChunksMap map[discover.NodeID][]int
|
||||||
chunksToNodesMap map[string][]int
|
//chunksToNodesMap map[string][]int
|
||||||
addrToIDMap map[string]discover.NodeID
|
addrToIDMap map[string]discover.NodeID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests in this file should not request chunks from peers.
|
||||||
|
// This function will panic indicating that there is a problem if request has been made.
|
||||||
|
func dummyRequestFromPeers(_ context.Context, req *network.Request) (*discover.NodeID, chan struct{}, error) {
|
||||||
|
panic(fmt.Sprintf("unexpected request: address %s, source %s", req.Addr.String(), req.Source.String()))
|
||||||
|
}
|
||||||
|
|
||||||
//This test is a syncing test for nodes.
|
//This test is a syncing test for nodes.
|
||||||
//One node is randomly selected to be the pivot node.
|
//One node is randomly selected to be the pivot node.
|
||||||
//A configurable number of chunks and nodes can be
|
//A configurable number of chunks and nodes can be
|
||||||
@ -58,6 +64,9 @@ type synctestConfig struct {
|
|||||||
//they are expected to store based on the syncing protocol.
|
//they are expected to store based on the syncing protocol.
|
||||||
//Number of chunks and nodes can be provided via commandline too.
|
//Number of chunks and nodes can be provided via commandline too.
|
||||||
func TestSyncingViaGlobalSync(t *testing.T) {
|
func TestSyncingViaGlobalSync(t *testing.T) {
|
||||||
|
if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
|
||||||
|
t.Skip("Flaky on mac on travis")
|
||||||
|
}
|
||||||
//if nodes/chunks have been provided via commandline,
|
//if nodes/chunks have been provided via commandline,
|
||||||
//run the tests with these values
|
//run the tests with these values
|
||||||
if *nodes != 0 && *chunks != 0 {
|
if *nodes != 0 && *chunks != 0 {
|
||||||
@ -86,11 +95,14 @@ func TestSyncingViaGlobalSync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncingViaDirectSubscribe(t *testing.T) {
|
func TestSyncingViaDirectSubscribe(t *testing.T) {
|
||||||
|
if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
|
||||||
|
t.Skip("Flaky on mac on travis")
|
||||||
|
}
|
||||||
//if nodes/chunks have been provided via commandline,
|
//if nodes/chunks have been provided via commandline,
|
||||||
//run the tests with these values
|
//run the tests with these values
|
||||||
if *nodes != 0 && *chunks != 0 {
|
if *nodes != 0 && *chunks != 0 {
|
||||||
log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
|
log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
|
||||||
err := testSyncingViaDirectSubscribe(*chunks, *nodes)
|
err := testSyncingViaDirectSubscribe(t, *chunks, *nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -110,7 +122,7 @@ func TestSyncingViaDirectSubscribe(t *testing.T) {
|
|||||||
for _, chnk := range chnkCnt {
|
for _, chnk := range chnkCnt {
|
||||||
for _, n := range nodeCnt {
|
for _, n := range nodeCnt {
|
||||||
log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
|
log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
|
||||||
err := testSyncingViaDirectSubscribe(chnk, n)
|
err := testSyncingViaDirectSubscribe(t, chnk, n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -130,21 +142,27 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
bucket.Store(bucketKeyStore, store)
|
bucket.Store(bucketKeyStore, store)
|
||||||
cleanup = func() {
|
|
||||||
os.RemoveAll(datadir)
|
|
||||||
store.Close()
|
|
||||||
}
|
|
||||||
localStore := store.(*storage.LocalStore)
|
localStore := store.(*storage.LocalStore)
|
||||||
db := storage.NewDBAPI(localStore)
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||||
delivery := NewDelivery(kad, db)
|
delivery := NewDelivery(kad, netStore)
|
||||||
|
netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
|
||||||
|
|
||||||
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
|
r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
DoSync: true,
|
DoSync: true,
|
||||||
SyncUpdateDelay: 3 * time.Second,
|
SyncUpdateDelay: 3 * time.Second,
|
||||||
})
|
})
|
||||||
bucket.Store(bucketKeyRegistry, r)
|
bucket.Store(bucketKeyRegistry, r)
|
||||||
|
|
||||||
|
cleanup = func() {
|
||||||
|
os.RemoveAll(datadir)
|
||||||
|
netStore.Close()
|
||||||
|
r.Close()
|
||||||
|
}
|
||||||
|
|
||||||
return r, cleanup, nil
|
return r, cleanup, nil
|
||||||
|
|
||||||
},
|
},
|
||||||
@ -166,9 +184,27 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
|
ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
defer cancelSimRun()
|
defer cancelSimRun()
|
||||||
|
|
||||||
|
if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
disconnections := sim.PeerEvents(
|
||||||
|
context.Background(),
|
||||||
|
sim.NodeIDs(),
|
||||||
|
simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
|
||||||
|
)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for d := range disconnections {
|
||||||
|
log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
|
||||||
|
t.Fatal("unexpected disconnect")
|
||||||
|
cancelSimRun()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
|
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
|
||||||
nodeIDs := sim.UpNodeIDs()
|
nodeIDs := sim.UpNodeIDs()
|
||||||
for _, n := range nodeIDs {
|
for _, n := range nodeIDs {
|
||||||
@ -197,10 +233,6 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
|
|||||||
conf.hashes = append(conf.hashes, hashes...)
|
conf.hashes = append(conf.hashes, hashes...)
|
||||||
mapKeysToNodes(conf)
|
mapKeysToNodes(conf)
|
||||||
|
|
||||||
if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
|
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
|
||||||
// or until the timeout is reached.
|
// or until the timeout is reached.
|
||||||
allSuccess := false
|
allSuccess := false
|
||||||
@ -220,6 +252,7 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
for !allSuccess {
|
for !allSuccess {
|
||||||
|
allSuccess = true
|
||||||
for _, id := range nodeIDs {
|
for _, id := range nodeIDs {
|
||||||
//for each expected chunk, check if it is in the local store
|
//for each expected chunk, check if it is in the local store
|
||||||
localChunks := conf.idToChunksMap[id]
|
localChunks := conf.idToChunksMap[id]
|
||||||
@ -252,7 +285,10 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
|
|||||||
log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
|
log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
allSuccess = localSuccess
|
if !localSuccess {
|
||||||
|
allSuccess = false
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !allSuccess {
|
if !allSuccess {
|
||||||
@ -264,6 +300,7 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
|
|||||||
if result.Error != nil {
|
if result.Error != nil {
|
||||||
t.Fatal(result.Error)
|
t.Fatal(result.Error)
|
||||||
}
|
}
|
||||||
|
log.Info("Simulation ended")
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -277,7 +314,7 @@ The test loads a snapshot file to construct the swarm network,
|
|||||||
assuming that the snapshot file identifies a healthy
|
assuming that the snapshot file identifies a healthy
|
||||||
kademlia network. The snapshot should have 'streamer' in its service list.
|
kademlia network. The snapshot should have 'streamer' in its service list.
|
||||||
*/
|
*/
|
||||||
func testSyncingViaDirectSubscribe(chunkCount int, nodeCount int) error {
|
func testSyncingViaDirectSubscribe(t *testing.T, chunkCount int, nodeCount int) error {
|
||||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||||
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
||||||
|
|
||||||
@ -288,28 +325,34 @@ func testSyncingViaDirectSubscribe(chunkCount int, nodeCount int) error {
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
bucket.Store(bucketKeyStore, store)
|
bucket.Store(bucketKeyStore, store)
|
||||||
cleanup = func() {
|
|
||||||
os.RemoveAll(datadir)
|
|
||||||
store.Close()
|
|
||||||
}
|
|
||||||
localStore := store.(*storage.LocalStore)
|
localStore := store.(*storage.LocalStore)
|
||||||
db := storage.NewDBAPI(localStore)
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||||
delivery := NewDelivery(kad, db)
|
delivery := NewDelivery(kad, netStore)
|
||||||
|
netStore.NewNetFetcherFunc = network.NewFetcherFactory(dummyRequestFromPeers, true).New
|
||||||
|
|
||||||
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), nil)
|
r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), nil)
|
||||||
bucket.Store(bucketKeyRegistry, r)
|
bucket.Store(bucketKeyRegistry, r)
|
||||||
|
|
||||||
fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
||||||
bucket.Store(bucketKeyFileStore, fileStore)
|
bucket.Store(bucketKeyFileStore, fileStore)
|
||||||
|
|
||||||
|
cleanup = func() {
|
||||||
|
os.RemoveAll(datadir)
|
||||||
|
netStore.Close()
|
||||||
|
r.Close()
|
||||||
|
}
|
||||||
|
|
||||||
return r, cleanup, nil
|
return r, cleanup, nil
|
||||||
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
defer sim.Close()
|
defer sim.Close()
|
||||||
|
|
||||||
ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
|
ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
defer cancelSimRun()
|
defer cancelSimRun()
|
||||||
|
|
||||||
conf := &synctestConfig{}
|
conf := &synctestConfig{}
|
||||||
@ -325,6 +368,24 @@ func testSyncingViaDirectSubscribe(chunkCount int, nodeCount int) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
disconnections := sim.PeerEvents(
|
||||||
|
context.Background(),
|
||||||
|
sim.NodeIDs(),
|
||||||
|
simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
|
||||||
|
)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for d := range disconnections {
|
||||||
|
log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
|
||||||
|
t.Fatal("unexpected disconnect")
|
||||||
|
cancelSimRun()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
|
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
|
||||||
nodeIDs := sim.UpNodeIDs()
|
nodeIDs := sim.UpNodeIDs()
|
||||||
for _, n := range nodeIDs {
|
for _, n := range nodeIDs {
|
||||||
@ -402,6 +463,7 @@ func testSyncingViaDirectSubscribe(chunkCount int, nodeCount int) error {
|
|||||||
// or until the timeout is reached.
|
// or until the timeout is reached.
|
||||||
allSuccess := false
|
allSuccess := false
|
||||||
for !allSuccess {
|
for !allSuccess {
|
||||||
|
allSuccess = true
|
||||||
for _, id := range nodeIDs {
|
for _, id := range nodeIDs {
|
||||||
//for each expected chunk, check if it is in the local store
|
//for each expected chunk, check if it is in the local store
|
||||||
localChunks := conf.idToChunksMap[id]
|
localChunks := conf.idToChunksMap[id]
|
||||||
@ -434,7 +496,10 @@ func testSyncingViaDirectSubscribe(chunkCount int, nodeCount int) error {
|
|||||||
log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
|
log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
allSuccess = localSuccess
|
if !localSuccess {
|
||||||
|
allSuccess = false
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !allSuccess {
|
if !allSuccess {
|
||||||
@ -447,7 +512,7 @@ func testSyncingViaDirectSubscribe(chunkCount int, nodeCount int) error {
|
|||||||
return result.Error
|
return result.Error
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Simulation terminated")
|
log.Info("Simulation ended")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -462,10 +527,9 @@ func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
|
|||||||
//iterate over each bin and solicit needed subscription to bins
|
//iterate over each bin and solicit needed subscription to bins
|
||||||
kad.EachBin(r.addr.Over(), pof, 0, func(conn *network.Peer, po int) bool {
|
kad.EachBin(r.addr.Over(), pof, 0, func(conn *network.Peer, po int) bool {
|
||||||
//identify begin and start index of the bin(s) we want to subscribe to
|
//identify begin and start index of the bin(s) we want to subscribe to
|
||||||
histRange := &Range{}
|
|
||||||
|
|
||||||
subCnt++
|
subCnt++
|
||||||
err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), histRange, Top)
|
err = r.RequestSubscription(conf.addrToIDMap[string(conn.Address())], NewStream("SYNC", FormatSyncBinKey(uint8(po)), true), NewRange(0, 0), High)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
|
log.Error(fmt.Sprintf("Error in RequestSubsciption! %v", err))
|
||||||
return false
|
return false
|
||||||
@ -478,7 +542,6 @@ func startSyncing(r *Registry, conf *synctestConfig) (int, error) {
|
|||||||
|
|
||||||
//map chunk keys to addresses which are responsible
|
//map chunk keys to addresses which are responsible
|
||||||
func mapKeysToNodes(conf *synctestConfig) {
|
func mapKeysToNodes(conf *synctestConfig) {
|
||||||
kmap := make(map[string][]int)
|
|
||||||
nodemap := make(map[string][]int)
|
nodemap := make(map[string][]int)
|
||||||
//build a pot for chunk hashes
|
//build a pot for chunk hashes
|
||||||
np := pot.NewPot(nil, 0)
|
np := pot.NewPot(nil, 0)
|
||||||
@ -487,36 +550,33 @@ func mapKeysToNodes(conf *synctestConfig) {
|
|||||||
indexmap[string(a)] = i
|
indexmap[string(a)] = i
|
||||||
np, _, _ = pot.Add(np, a, pof)
|
np, _, _ = pot.Add(np, a, pof)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var kadMinProxSize = 2
|
||||||
|
|
||||||
|
ppmap := network.NewPeerPotMap(kadMinProxSize, conf.addrs)
|
||||||
|
|
||||||
//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
|
//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
|
||||||
log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
|
log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
|
||||||
for i := 0; i < len(conf.hashes); i++ {
|
for i := 0; i < len(conf.hashes); i++ {
|
||||||
pl := 256 //highest possible proximity
|
var a []byte
|
||||||
var nns []int
|
|
||||||
np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
|
np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
|
||||||
a := val.([]byte)
|
// take the first address
|
||||||
if pl < 256 && pl != po {
|
a = val.([]byte)
|
||||||
return false
|
return false
|
||||||
}
|
|
||||||
if pl == 256 || pl == po {
|
|
||||||
log.Trace(fmt.Sprintf("appending %s", conf.addrToIDMap[string(a)]))
|
|
||||||
nns = append(nns, indexmap[string(a)])
|
|
||||||
nodemap[string(a)] = append(nodemap[string(a)], i)
|
|
||||||
}
|
|
||||||
if pl == 256 && len(nns) >= testMinProxBinSize {
|
|
||||||
//maxProxBinSize has been reached at this po, so save it
|
|
||||||
//we will add all other nodes at the same po
|
|
||||||
pl = po
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
})
|
||||||
kmap[string(conf.hashes[i])] = nns
|
|
||||||
|
nns := ppmap[common.Bytes2Hex(a)].NNSet
|
||||||
|
nns = append(nns, a)
|
||||||
|
|
||||||
|
for _, p := range nns {
|
||||||
|
nodemap[string(p)] = append(nodemap[string(p)], i)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for addr, chunks := range nodemap {
|
for addr, chunks := range nodemap {
|
||||||
//this selects which chunks are expected to be found with the given node
|
//this selects which chunks are expected to be found with the given node
|
||||||
conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
|
conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
|
||||||
}
|
}
|
||||||
log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
|
log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
|
||||||
conf.chunksToNodesMap = kmap
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//upload a file(chunks) to a single local node store
|
//upload a file(chunks) to a single local node store
|
||||||
|
@ -32,10 +32,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm/network"
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
|
"github.com/ethereum/go-ethereum/swarm/network/stream/intervals"
|
||||||
"github.com/ethereum/go-ethereum/swarm/pot"
|
"github.com/ethereum/go-ethereum/swarm/pot"
|
||||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/state"
|
"github.com/ethereum/go-ethereum/swarm/state"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
opentracing "github.com/opentracing/opentracing-go"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -43,8 +41,8 @@ const (
|
|||||||
Mid
|
Mid
|
||||||
High
|
High
|
||||||
Top
|
Top
|
||||||
PriorityQueue // number of queues
|
PriorityQueue = 4 // number of priority queues - Low, Mid, High, Top
|
||||||
PriorityQueueCap = 32 // queue capacity
|
PriorityQueueCap = 128 // queue capacity
|
||||||
HashSize = 32
|
HashSize = 32
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -73,7 +71,7 @@ type RegistryOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewRegistry is Streamer constructor
|
// NewRegistry is Streamer constructor
|
||||||
func NewRegistry(addr *network.BzzAddr, delivery *Delivery, db *storage.DBAPI, intervalsStore state.Store, options *RegistryOptions) *Registry {
|
func NewRegistry(addr *network.BzzAddr, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions) *Registry {
|
||||||
if options == nil {
|
if options == nil {
|
||||||
options = &RegistryOptions{}
|
options = &RegistryOptions{}
|
||||||
}
|
}
|
||||||
@ -93,13 +91,13 @@ func NewRegistry(addr *network.BzzAddr, delivery *Delivery, db *storage.DBAPI, i
|
|||||||
streamer.api = NewAPI(streamer)
|
streamer.api = NewAPI(streamer)
|
||||||
delivery.getPeer = streamer.getPeer
|
delivery.getPeer = streamer.getPeer
|
||||||
streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, _ bool) (Server, error) {
|
streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, _ bool) (Server, error) {
|
||||||
return NewSwarmChunkServer(delivery.db), nil
|
return NewSwarmChunkServer(delivery.chunkStore), nil
|
||||||
})
|
})
|
||||||
streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) {
|
streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) {
|
||||||
return NewSwarmSyncerClient(p, delivery.db, false, NewStream(swarmChunkServerStreamName, t, live))
|
return NewSwarmSyncerClient(p, syncChunkStore, NewStream(swarmChunkServerStreamName, t, live))
|
||||||
})
|
})
|
||||||
RegisterSwarmSyncerServer(streamer, db)
|
RegisterSwarmSyncerServer(streamer, syncChunkStore)
|
||||||
RegisterSwarmSyncerClient(streamer, db)
|
RegisterSwarmSyncerClient(streamer, syncChunkStore)
|
||||||
|
|
||||||
if options.DoSync {
|
if options.DoSync {
|
||||||
// latestIntC function ensures that
|
// latestIntC function ensures that
|
||||||
@ -325,16 +323,6 @@ func (r *Registry) Quit(peerId discover.NodeID, s Stream) error {
|
|||||||
return peer.Send(context.TODO(), msg)
|
return peer.Send(context.TODO(), msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Registry) Retrieve(ctx context.Context, chunk *storage.Chunk) error {
|
|
||||||
var sp opentracing.Span
|
|
||||||
ctx, sp = spancontext.StartSpan(
|
|
||||||
ctx,
|
|
||||||
"registry.retrieve")
|
|
||||||
defer sp.Finish()
|
|
||||||
|
|
||||||
return r.delivery.RequestFromPeers(ctx, chunk.Addr[:], r.skipCheck)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Registry) NodeInfo() interface{} {
|
func (r *Registry) NodeInfo() interface{} {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -557,7 +545,7 @@ func (c client) NextInterval() (start, end uint64, err error) {
|
|||||||
|
|
||||||
// Client interface for incoming peer Streamer
|
// Client interface for incoming peer Streamer
|
||||||
type Client interface {
|
type Client interface {
|
||||||
NeedData(context.Context, []byte) func()
|
NeedData(context.Context, []byte) func(context.Context) error
|
||||||
BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error)
|
BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error)
|
||||||
Close()
|
Close()
|
||||||
}
|
}
|
||||||
|
@ -80,15 +80,17 @@ func newTestClient(t string) *testClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *testClient) NeedData(ctx context.Context, hash []byte) func() {
|
func (self *testClient) NeedData(ctx context.Context, hash []byte) func(context.Context) error {
|
||||||
self.receivedHashes[string(hash)] = hash
|
self.receivedHashes[string(hash)] = hash
|
||||||
if bytes.Equal(hash, hash0[:]) {
|
if bytes.Equal(hash, hash0[:]) {
|
||||||
return func() {
|
return func(context.Context) error {
|
||||||
<-self.wait0
|
<-self.wait0
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
} else if bytes.Equal(hash, hash2[:]) {
|
} else if bytes.Equal(hash, hash2[:]) {
|
||||||
return func() {
|
return func(context.Context) error {
|
||||||
<-self.wait2
|
<-self.wait2
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// BatchSize = 2
|
|
||||||
BatchSize = 128
|
BatchSize = 128
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -38,35 +37,37 @@ const (
|
|||||||
// * (live/non-live historical) chunk syncing per proximity bin
|
// * (live/non-live historical) chunk syncing per proximity bin
|
||||||
type SwarmSyncerServer struct {
|
type SwarmSyncerServer struct {
|
||||||
po uint8
|
po uint8
|
||||||
db *storage.DBAPI
|
store storage.SyncChunkStore
|
||||||
sessionAt uint64
|
sessionAt uint64
|
||||||
start uint64
|
start uint64
|
||||||
|
live bool
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSwarmSyncerServer is contructor for SwarmSyncerServer
|
// NewSwarmSyncerServer is contructor for SwarmSyncerServer
|
||||||
func NewSwarmSyncerServer(live bool, po uint8, db *storage.DBAPI) (*SwarmSyncerServer, error) {
|
func NewSwarmSyncerServer(live bool, po uint8, syncChunkStore storage.SyncChunkStore) (*SwarmSyncerServer, error) {
|
||||||
sessionAt := db.CurrentBucketStorageIndex(po)
|
sessionAt := syncChunkStore.BinIndex(po)
|
||||||
var start uint64
|
var start uint64
|
||||||
if live {
|
if live {
|
||||||
start = sessionAt
|
start = sessionAt
|
||||||
}
|
}
|
||||||
return &SwarmSyncerServer{
|
return &SwarmSyncerServer{
|
||||||
po: po,
|
po: po,
|
||||||
db: db,
|
store: syncChunkStore,
|
||||||
sessionAt: sessionAt,
|
sessionAt: sessionAt,
|
||||||
start: start,
|
start: start,
|
||||||
|
live: live,
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func RegisterSwarmSyncerServer(streamer *Registry, db *storage.DBAPI) {
|
func RegisterSwarmSyncerServer(streamer *Registry, syncChunkStore storage.SyncChunkStore) {
|
||||||
streamer.RegisterServerFunc("SYNC", func(p *Peer, t string, live bool) (Server, error) {
|
streamer.RegisterServerFunc("SYNC", func(p *Peer, t string, live bool) (Server, error) {
|
||||||
po, err := ParseSyncBinKey(t)
|
po, err := ParseSyncBinKey(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return NewSwarmSyncerServer(live, po, db)
|
return NewSwarmSyncerServer(live, po, syncChunkStore)
|
||||||
})
|
})
|
||||||
// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
|
// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
|
||||||
// return NewOutgoingProvableSwarmSyncer(po, db)
|
// return NewOutgoingProvableSwarmSyncer(po, db)
|
||||||
@ -78,27 +79,35 @@ func (s *SwarmSyncerServer) Close() {
|
|||||||
close(s.quit)
|
close(s.quit)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSection retrieves the actual chunk from localstore
|
// GetData retrieves the actual chunk from netstore
|
||||||
func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
|
func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
|
||||||
chunk, err := s.db.Get(ctx, storage.Address(key))
|
chunk, err := s.store.Get(ctx, storage.Address(key))
|
||||||
if err == storage.ErrFetching {
|
if err != nil {
|
||||||
<-chunk.ReqC
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return chunk.SData, nil
|
return chunk.Data(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBatch retrieves the next batch of hashes from the dbstore
|
// GetBatch retrieves the next batch of hashes from the dbstore
|
||||||
func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
|
func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
|
||||||
var batch []byte
|
var batch []byte
|
||||||
i := 0
|
i := 0
|
||||||
|
if s.live {
|
||||||
if from == 0 {
|
if from == 0 {
|
||||||
from = s.start
|
from = s.start
|
||||||
}
|
}
|
||||||
if to <= from || from >= s.sessionAt {
|
if to <= from || from >= s.sessionAt {
|
||||||
to = math.MaxUint64
|
to = math.MaxUint64
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if (to < from && to != 0) || from > s.sessionAt {
|
||||||
|
return nil, 0, 0, nil, nil
|
||||||
|
}
|
||||||
|
if to == 0 || to > s.sessionAt {
|
||||||
|
to = s.sessionAt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var ticker *time.Ticker
|
var ticker *time.Ticker
|
||||||
defer func() {
|
defer func() {
|
||||||
if ticker != nil {
|
if ticker != nil {
|
||||||
@ -119,8 +128,8 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
|
|||||||
}
|
}
|
||||||
|
|
||||||
metrics.GetOrRegisterCounter("syncer.setnextbatch.iterator", nil).Inc(1)
|
metrics.GetOrRegisterCounter("syncer.setnextbatch.iterator", nil).Inc(1)
|
||||||
err := s.db.Iterator(from, to, s.po, func(addr storage.Address, idx uint64) bool {
|
err := s.store.Iterator(from, to, s.po, func(key storage.Address, idx uint64) bool {
|
||||||
batch = append(batch, addr[:]...)
|
batch = append(batch, key[:]...)
|
||||||
i++
|
i++
|
||||||
to = idx
|
to = idx
|
||||||
return i < BatchSize
|
return i < BatchSize
|
||||||
@ -134,7 +143,7 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
|
|||||||
wait = true
|
wait = true
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.db.CurrentBucketStorageIndex(s.po))
|
log.Trace("Swarm syncer offer batch", "po", s.po, "len", i, "from", from, "to", to, "current store count", s.store.BinIndex(s.po))
|
||||||
return batch, from, to, nil, nil
|
return batch, from, to, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -146,28 +155,26 @@ type SwarmSyncerClient struct {
|
|||||||
sessionReader storage.LazySectionReader
|
sessionReader storage.LazySectionReader
|
||||||
retrieveC chan *storage.Chunk
|
retrieveC chan *storage.Chunk
|
||||||
storeC chan *storage.Chunk
|
storeC chan *storage.Chunk
|
||||||
db *storage.DBAPI
|
store storage.SyncChunkStore
|
||||||
// chunker storage.Chunker
|
// chunker storage.Chunker
|
||||||
currentRoot storage.Address
|
currentRoot storage.Address
|
||||||
requestFunc func(chunk *storage.Chunk)
|
requestFunc func(chunk *storage.Chunk)
|
||||||
end, start uint64
|
end, start uint64
|
||||||
peer *Peer
|
peer *Peer
|
||||||
ignoreExistingRequest bool
|
|
||||||
stream Stream
|
stream Stream
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSwarmSyncerClient is a contructor for provable data exchange syncer
|
// NewSwarmSyncerClient is a contructor for provable data exchange syncer
|
||||||
func NewSwarmSyncerClient(p *Peer, db *storage.DBAPI, ignoreExistingRequest bool, stream Stream) (*SwarmSyncerClient, error) {
|
func NewSwarmSyncerClient(p *Peer, store storage.SyncChunkStore, stream Stream) (*SwarmSyncerClient, error) {
|
||||||
return &SwarmSyncerClient{
|
return &SwarmSyncerClient{
|
||||||
db: db,
|
store: store,
|
||||||
peer: p,
|
peer: p,
|
||||||
ignoreExistingRequest: ignoreExistingRequest,
|
|
||||||
stream: stream,
|
stream: stream,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// // NewIncomingProvableSwarmSyncer is a contructor for provable data exchange syncer
|
// // NewIncomingProvableSwarmSyncer is a contructor for provable data exchange syncer
|
||||||
// func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Key, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient {
|
// func NewIncomingProvableSwarmSyncer(po int, priority int, index uint64, sessionAt uint64, intervals []uint64, sessionRoot storage.Address, chunker *storage.PyramidChunker, store storage.ChunkStore, p Peer) *SwarmSyncerClient {
|
||||||
// retrieveC := make(storage.Chunk, chunksCap)
|
// retrieveC := make(storage.Chunk, chunksCap)
|
||||||
// RunChunkRequestor(p, retrieveC)
|
// RunChunkRequestor(p, retrieveC)
|
||||||
// storeC := make(storage.Chunk, chunksCap)
|
// storeC := make(storage.Chunk, chunksCap)
|
||||||
@ -204,26 +211,15 @@ func NewSwarmSyncerClient(p *Peer, db *storage.DBAPI, ignoreExistingRequest bool
|
|||||||
|
|
||||||
// RegisterSwarmSyncerClient registers the client constructor function for
|
// RegisterSwarmSyncerClient registers the client constructor function for
|
||||||
// to handle incoming sync streams
|
// to handle incoming sync streams
|
||||||
func RegisterSwarmSyncerClient(streamer *Registry, db *storage.DBAPI) {
|
func RegisterSwarmSyncerClient(streamer *Registry, store storage.SyncChunkStore) {
|
||||||
streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
|
streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
|
||||||
return NewSwarmSyncerClient(p, db, true, NewStream("SYNC", t, live))
|
return NewSwarmSyncerClient(p, store, NewStream("SYNC", t, live))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NeedData
|
// NeedData
|
||||||
func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func()) {
|
func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func(context.Context) error) {
|
||||||
chunk, _ := s.db.GetOrCreateRequest(ctx, key)
|
return s.store.FetchFunc(ctx, key)
|
||||||
// TODO: we may want to request from this peer anyway even if the request exists
|
|
||||||
|
|
||||||
// ignoreExistingRequest is temporary commented out until its functionality is verified.
|
|
||||||
// For now, this optimization can be disabled.
|
|
||||||
if chunk.ReqC == nil { //|| (s.ignoreExistingRequest && !created) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// create request and wait until the chunk data arrives and is stored
|
|
||||||
return func() {
|
|
||||||
chunk.WaitToStore()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// BatchDone
|
// BatchDone
|
||||||
|
@ -102,17 +102,22 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
localStore := store.(*storage.LocalStore)
|
localStore := store.(*storage.LocalStore)
|
||||||
db := storage.NewDBAPI(localStore)
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
bucket.Store(bucketKeyDB, db)
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
bucket.Store(bucketKeyDB, netStore)
|
||||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||||
delivery := NewDelivery(kad, db)
|
delivery := NewDelivery(kad, netStore)
|
||||||
|
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||||
|
|
||||||
bucket.Store(bucketKeyDelivery, delivery)
|
bucket.Store(bucketKeyDelivery, delivery)
|
||||||
|
|
||||||
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
|
r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||||
SkipCheck: skipCheck,
|
SkipCheck: skipCheck,
|
||||||
})
|
})
|
||||||
|
|
||||||
fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
|
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
||||||
bucket.Store(bucketKeyFileStore, fileStore)
|
bucket.Store(bucketKeyFileStore, fileStore)
|
||||||
|
|
||||||
return r, cleanup, nil
|
return r, cleanup, nil
|
||||||
@ -197,8 +202,8 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("No DB")
|
return fmt.Errorf("No DB")
|
||||||
}
|
}
|
||||||
db := item.(*storage.DBAPI)
|
netStore := item.(*storage.NetStore)
|
||||||
db.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool {
|
netStore.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool {
|
||||||
hashes[i] = append(hashes[i], addr)
|
hashes[i] = append(hashes[i], addr)
|
||||||
totalHashes++
|
totalHashes++
|
||||||
hashCounts[i]++
|
hashCounts[i]++
|
||||||
@ -216,18 +221,13 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("No DB")
|
return fmt.Errorf("No DB")
|
||||||
}
|
}
|
||||||
db := item.(*storage.DBAPI)
|
db := item.(*storage.NetStore)
|
||||||
chunk, err := db.Get(ctx, key)
|
_, err := db.Get(ctx, key)
|
||||||
if err == storage.ErrFetching {
|
if err == nil {
|
||||||
<-chunk.ReqC
|
|
||||||
} else if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// needed for leveldb not to be closed?
|
|
||||||
// chunk.WaitToStore()
|
|
||||||
found++
|
found++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
log.Debug("sync check", "node", node, "index", i, "bin", po, "found", found, "total", total)
|
log.Debug("sync check", "node", node, "index", i, "bin", po, "found", found, "total", total)
|
||||||
}
|
}
|
||||||
if total == found && total > 0 {
|
if total == found && total > 0 {
|
||||||
|
@ -87,10 +87,10 @@ func TestSwarmNetwork(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "100_nodes",
|
name: "50_nodes",
|
||||||
steps: []testSwarmNetworkStep{
|
steps: []testSwarmNetworkStep{
|
||||||
{
|
{
|
||||||
nodeCount: 100,
|
nodeCount: 50,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
options: &testSwarmNetworkOptions{
|
options: &testSwarmNetworkOptions{
|
||||||
@ -99,10 +99,10 @@ func TestSwarmNetwork(t *testing.T) {
|
|||||||
disabled: !*longrunning,
|
disabled: !*longrunning,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "100_nodes_skip_check",
|
name: "50_nodes_skip_check",
|
||||||
steps: []testSwarmNetworkStep{
|
steps: []testSwarmNetworkStep{
|
||||||
{
|
{
|
||||||
nodeCount: 100,
|
nodeCount: 50,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
options: &testSwarmNetworkOptions{
|
options: &testSwarmNetworkOptions{
|
||||||
@ -287,6 +287,7 @@ func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwa
|
|||||||
|
|
||||||
config.Init(privkey)
|
config.Init(privkey)
|
||||||
config.DeliverySkipCheck = o.SkipCheck
|
config.DeliverySkipCheck = o.SkipCheck
|
||||||
|
config.Port = ""
|
||||||
|
|
||||||
swarm, err := NewSwarm(config, nil)
|
swarm, err := NewSwarm(config, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -22,10 +22,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||||
opentracing "github.com/opentracing/opentracing-go"
|
opentracing "github.com/opentracing/opentracing-go"
|
||||||
@ -67,7 +66,6 @@ The hashing itself does use extra copies and allocation though, since it does ne
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
errAppendOppNotSuported = errors.New("Append operation not supported")
|
errAppendOppNotSuported = errors.New("Append operation not supported")
|
||||||
errOperationTimedOut = errors.New("operation timed out")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ChunkerParams struct {
|
type ChunkerParams struct {
|
||||||
@ -133,7 +131,7 @@ type TreeChunker struct {
|
|||||||
func TreeJoin(ctx context.Context, addr Address, getter Getter, depth int) *LazyChunkReader {
|
func TreeJoin(ctx context.Context, addr Address, getter Getter, depth int) *LazyChunkReader {
|
||||||
jp := &JoinerParams{
|
jp := &JoinerParams{
|
||||||
ChunkerParams: ChunkerParams{
|
ChunkerParams: ChunkerParams{
|
||||||
chunkSize: chunk.DefaultSize,
|
chunkSize: ch.DefaultSize,
|
||||||
hashSize: int64(len(addr)),
|
hashSize: int64(len(addr)),
|
||||||
},
|
},
|
||||||
addr: addr,
|
addr: addr,
|
||||||
@ -153,7 +151,7 @@ func TreeSplit(ctx context.Context, data io.Reader, size int64, putter Putter) (
|
|||||||
tsp := &TreeSplitterParams{
|
tsp := &TreeSplitterParams{
|
||||||
SplitterParams: SplitterParams{
|
SplitterParams: SplitterParams{
|
||||||
ChunkerParams: ChunkerParams{
|
ChunkerParams: ChunkerParams{
|
||||||
chunkSize: chunk.DefaultSize,
|
chunkSize: ch.DefaultSize,
|
||||||
hashSize: putter.RefSize(),
|
hashSize: putter.RefSize(),
|
||||||
},
|
},
|
||||||
reader: data,
|
reader: data,
|
||||||
@ -201,11 +199,6 @@ func NewTreeSplitter(params *TreeSplitterParams) *TreeChunker {
|
|||||||
return tc
|
return tc
|
||||||
}
|
}
|
||||||
|
|
||||||
// String() for pretty printing
|
|
||||||
func (c *Chunk) String() string {
|
|
||||||
return fmt.Sprintf("Key: %v TreeSize: %v Chunksize: %v", c.Addr.Log(), c.Size, len(c.SData))
|
|
||||||
}
|
|
||||||
|
|
||||||
type hashJob struct {
|
type hashJob struct {
|
||||||
key Address
|
key Address
|
||||||
chunk []byte
|
chunk []byte
|
||||||
@ -236,7 +229,7 @@ func (tc *TreeChunker) Split(ctx context.Context) (k Address, wait func(context.
|
|||||||
panic("chunker must be initialised")
|
panic("chunker must be initialised")
|
||||||
}
|
}
|
||||||
|
|
||||||
tc.runWorker()
|
tc.runWorker(ctx)
|
||||||
|
|
||||||
depth := 0
|
depth := 0
|
||||||
treeSize := tc.chunkSize
|
treeSize := tc.chunkSize
|
||||||
@ -251,7 +244,7 @@ func (tc *TreeChunker) Split(ctx context.Context) (k Address, wait func(context.
|
|||||||
// this waitgroup member is released after the root hash is calculated
|
// this waitgroup member is released after the root hash is calculated
|
||||||
tc.wg.Add(1)
|
tc.wg.Add(1)
|
||||||
//launch actual recursive function passing the waitgroups
|
//launch actual recursive function passing the waitgroups
|
||||||
go tc.split(depth, treeSize/tc.branches, key, tc.dataSize, tc.wg)
|
go tc.split(ctx, depth, treeSize/tc.branches, key, tc.dataSize, tc.wg)
|
||||||
|
|
||||||
// closes internal error channel if all subprocesses in the workgroup finished
|
// closes internal error channel if all subprocesses in the workgroup finished
|
||||||
go func() {
|
go func() {
|
||||||
@ -267,14 +260,14 @@ func (tc *TreeChunker) Split(ctx context.Context) (k Address, wait func(context.
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
case <-time.NewTimer(splitTimeout).C:
|
case <-ctx.Done():
|
||||||
return nil, nil, errOperationTimedOut
|
return nil, nil, ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
return key, tc.putter.Wait, nil
|
return key, tc.putter.Wait, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *TreeChunker) split(depth int, treeSize int64, addr Address, size int64, parentWg *sync.WaitGroup) {
|
func (tc *TreeChunker) split(ctx context.Context, depth int, treeSize int64, addr Address, size int64, parentWg *sync.WaitGroup) {
|
||||||
|
|
||||||
//
|
//
|
||||||
|
|
||||||
@ -321,10 +314,10 @@ func (tc *TreeChunker) split(depth int, treeSize int64, addr Address, size int64
|
|||||||
secSize = treeSize
|
secSize = treeSize
|
||||||
}
|
}
|
||||||
// the hash of that data
|
// the hash of that data
|
||||||
subTreeKey := chunk[8+i*tc.hashSize : 8+(i+1)*tc.hashSize]
|
subTreeAddress := chunk[8+i*tc.hashSize : 8+(i+1)*tc.hashSize]
|
||||||
|
|
||||||
childrenWg.Add(1)
|
childrenWg.Add(1)
|
||||||
tc.split(depth-1, treeSize/tc.branches, subTreeKey, secSize, childrenWg)
|
tc.split(ctx, depth-1, treeSize/tc.branches, subTreeAddress, secSize, childrenWg)
|
||||||
|
|
||||||
i++
|
i++
|
||||||
pos += treeSize
|
pos += treeSize
|
||||||
@ -336,7 +329,7 @@ func (tc *TreeChunker) split(depth int, treeSize int64, addr Address, size int64
|
|||||||
|
|
||||||
worker := tc.getWorkerCount()
|
worker := tc.getWorkerCount()
|
||||||
if int64(len(tc.jobC)) > worker && worker < ChunkProcessors {
|
if int64(len(tc.jobC)) > worker && worker < ChunkProcessors {
|
||||||
tc.runWorker()
|
tc.runWorker(ctx)
|
||||||
|
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
@ -345,7 +338,7 @@ func (tc *TreeChunker) split(depth int, treeSize int64, addr Address, size int64
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *TreeChunker) runWorker() {
|
func (tc *TreeChunker) runWorker(ctx context.Context) {
|
||||||
tc.incrementWorkerCount()
|
tc.incrementWorkerCount()
|
||||||
go func() {
|
go func() {
|
||||||
defer tc.decrementWorkerCount()
|
defer tc.decrementWorkerCount()
|
||||||
@ -357,7 +350,7 @@ func (tc *TreeChunker) runWorker() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
h, err := tc.putter.Put(tc.ctx, job.chunk)
|
h, err := tc.putter.Put(ctx, job.chunk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tc.errC <- err
|
tc.errC <- err
|
||||||
return
|
return
|
||||||
@ -377,8 +370,8 @@ func (tc *TreeChunker) Append() (Address, func(), error) {
|
|||||||
|
|
||||||
// LazyChunkReader implements LazySectionReader
|
// LazyChunkReader implements LazySectionReader
|
||||||
type LazyChunkReader struct {
|
type LazyChunkReader struct {
|
||||||
Ctx context.Context
|
ctx context.Context
|
||||||
key Address // root key
|
addr Address // root address
|
||||||
chunkData ChunkData
|
chunkData ChunkData
|
||||||
off int64 // offset
|
off int64 // offset
|
||||||
chunkSize int64 // inherit from chunker
|
chunkSize int64 // inherit from chunker
|
||||||
@ -390,18 +383,18 @@ type LazyChunkReader struct {
|
|||||||
|
|
||||||
func (tc *TreeChunker) Join(ctx context.Context) *LazyChunkReader {
|
func (tc *TreeChunker) Join(ctx context.Context) *LazyChunkReader {
|
||||||
return &LazyChunkReader{
|
return &LazyChunkReader{
|
||||||
key: tc.addr,
|
addr: tc.addr,
|
||||||
chunkSize: tc.chunkSize,
|
chunkSize: tc.chunkSize,
|
||||||
branches: tc.branches,
|
branches: tc.branches,
|
||||||
hashSize: tc.hashSize,
|
hashSize: tc.hashSize,
|
||||||
depth: tc.depth,
|
depth: tc.depth,
|
||||||
getter: tc.getter,
|
getter: tc.getter,
|
||||||
Ctx: tc.ctx,
|
ctx: tc.ctx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *LazyChunkReader) Context() context.Context {
|
func (r *LazyChunkReader) Context() context.Context {
|
||||||
return r.Ctx
|
return r.ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size is meant to be called on the LazySectionReader
|
// Size is meant to be called on the LazySectionReader
|
||||||
@ -415,23 +408,24 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e
|
|||||||
"lcr.size")
|
"lcr.size")
|
||||||
defer sp.Finish()
|
defer sp.Finish()
|
||||||
|
|
||||||
log.Debug("lazychunkreader.size", "key", r.key)
|
log.Debug("lazychunkreader.size", "addr", r.addr)
|
||||||
if r.chunkData == nil {
|
if r.chunkData == nil {
|
||||||
chunkData, err := r.getter.Get(cctx, Reference(r.key))
|
chunkData, err := r.getter.Get(cctx, Reference(r.addr))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if chunkData == nil {
|
|
||||||
select {
|
|
||||||
case <-quitC:
|
|
||||||
return 0, errors.New("aborted")
|
|
||||||
default:
|
|
||||||
return 0, fmt.Errorf("root chunk not found for %v", r.key.Hex())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.chunkData = chunkData
|
r.chunkData = chunkData
|
||||||
|
s := r.chunkData.Size()
|
||||||
|
log.Debug("lazychunkreader.size", "key", r.addr, "size", s)
|
||||||
|
if s < 0 {
|
||||||
|
return 0, errors.New("corrupt size")
|
||||||
}
|
}
|
||||||
return r.chunkData.Size(), nil
|
return int64(s), nil
|
||||||
|
}
|
||||||
|
s := r.chunkData.Size()
|
||||||
|
log.Debug("lazychunkreader.size", "key", r.addr, "size", s)
|
||||||
|
|
||||||
|
return int64(s), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// read at can be called numerous times
|
// read at can be called numerous times
|
||||||
@ -443,7 +437,7 @@ func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) {
|
|||||||
var sp opentracing.Span
|
var sp opentracing.Span
|
||||||
var cctx context.Context
|
var cctx context.Context
|
||||||
cctx, sp = spancontext.StartSpan(
|
cctx, sp = spancontext.StartSpan(
|
||||||
r.Ctx,
|
r.ctx,
|
||||||
"lcr.read")
|
"lcr.read")
|
||||||
defer sp.Finish()
|
defer sp.Finish()
|
||||||
|
|
||||||
@ -460,7 +454,7 @@ func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) {
|
|||||||
quitC := make(chan bool)
|
quitC := make(chan bool)
|
||||||
size, err := r.Size(cctx, quitC)
|
size, err := r.Size(cctx, quitC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("lazychunkreader.readat.size", "size", size, "err", err)
|
log.Debug("lazychunkreader.readat.size", "size", size, "err", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -481,7 +475,7 @@ func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) {
|
|||||||
length *= r.chunkSize
|
length *= r.chunkSize
|
||||||
}
|
}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go r.join(cctx, b, off, off+length, depth, treeSize/r.branches, r.chunkData, &wg, errC, quitC)
|
go r.join(b, off, off+length, depth, treeSize/r.branches, r.chunkData, &wg, errC, quitC)
|
||||||
go func() {
|
go func() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
close(errC)
|
close(errC)
|
||||||
@ -489,20 +483,22 @@ func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) {
|
|||||||
|
|
||||||
err = <-errC
|
err = <-errC
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("lazychunkreader.readat.errc", "err", err)
|
log.Debug("lazychunkreader.readat.errc", "err", err)
|
||||||
close(quitC)
|
close(quitC)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if off+int64(len(b)) >= size {
|
if off+int64(len(b)) >= size {
|
||||||
|
log.Debug("lazychunkreader.readat.return at end", "size", size, "off", off)
|
||||||
return int(size - off), io.EOF
|
return int(size - off), io.EOF
|
||||||
}
|
}
|
||||||
|
log.Debug("lazychunkreader.readat.errc", "buff", len(b))
|
||||||
return len(b), nil
|
return len(b), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *LazyChunkReader) join(ctx context.Context, b []byte, off int64, eoff int64, depth int, treeSize int64, chunkData ChunkData, parentWg *sync.WaitGroup, errC chan error, quitC chan bool) {
|
func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeSize int64, chunkData ChunkData, parentWg *sync.WaitGroup, errC chan error, quitC chan bool) {
|
||||||
defer parentWg.Done()
|
defer parentWg.Done()
|
||||||
// find appropriate block level
|
// find appropriate block level
|
||||||
for chunkData.Size() < treeSize && depth > r.depth {
|
for chunkData.Size() < uint64(treeSize) && depth > r.depth {
|
||||||
treeSize /= r.branches
|
treeSize /= r.branches
|
||||||
depth--
|
depth--
|
||||||
}
|
}
|
||||||
@ -545,19 +541,19 @@ func (r *LazyChunkReader) join(ctx context.Context, b []byte, off int64, eoff in
|
|||||||
}
|
}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(j int64) {
|
go func(j int64) {
|
||||||
childKey := chunkData[8+j*r.hashSize : 8+(j+1)*r.hashSize]
|
childAddress := chunkData[8+j*r.hashSize : 8+(j+1)*r.hashSize]
|
||||||
chunkData, err := r.getter.Get(ctx, Reference(childKey))
|
chunkData, err := r.getter.Get(r.ctx, Reference(childAddress))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("lazychunkreader.join", "key", fmt.Sprintf("%x", childKey), "err", err)
|
log.Debug("lazychunkreader.join", "key", fmt.Sprintf("%x", childAddress), "err", err)
|
||||||
select {
|
select {
|
||||||
case errC <- fmt.Errorf("chunk %v-%v not found; key: %s", off, off+treeSize, fmt.Sprintf("%x", childKey)):
|
case errC <- fmt.Errorf("chunk %v-%v not found; key: %s", off, off+treeSize, fmt.Sprintf("%x", childAddress)):
|
||||||
case <-quitC:
|
case <-quitC:
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if l := len(chunkData); l < 9 {
|
if l := len(chunkData); l < 9 {
|
||||||
select {
|
select {
|
||||||
case errC <- fmt.Errorf("chunk %v-%v incomplete; key: %s, data length %v", off, off+treeSize, fmt.Sprintf("%x", childKey), l):
|
case errC <- fmt.Errorf("chunk %v-%v incomplete; key: %s, data length %v", off, off+treeSize, fmt.Sprintf("%x", childAddress), l):
|
||||||
case <-quitC:
|
case <-quitC:
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@ -565,26 +561,26 @@ func (r *LazyChunkReader) join(ctx context.Context, b []byte, off int64, eoff in
|
|||||||
if soff < off {
|
if soff < off {
|
||||||
soff = off
|
soff = off
|
||||||
}
|
}
|
||||||
r.join(ctx, b[soff-off:seoff-off], soff-roff, seoff-roff, depth-1, treeSize/r.branches, chunkData, wg, errC, quitC)
|
r.join(b[soff-off:seoff-off], soff-roff, seoff-roff, depth-1, treeSize/r.branches, chunkData, wg, errC, quitC)
|
||||||
}(i)
|
}(i)
|
||||||
} //for
|
} //for
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read keeps a cursor so cannot be called simulateously, see ReadAt
|
// Read keeps a cursor so cannot be called simulateously, see ReadAt
|
||||||
func (r *LazyChunkReader) Read(b []byte) (read int, err error) {
|
func (r *LazyChunkReader) Read(b []byte) (read int, err error) {
|
||||||
log.Debug("lazychunkreader.read", "key", r.key)
|
log.Debug("lazychunkreader.read", "key", r.addr)
|
||||||
metrics.GetOrRegisterCounter("lazychunkreader.read", nil).Inc(1)
|
metrics.GetOrRegisterCounter("lazychunkreader.read", nil).Inc(1)
|
||||||
|
|
||||||
read, err = r.ReadAt(b, r.off)
|
read, err = r.ReadAt(b, r.off)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
log.Error("lazychunkreader.readat", "read", read, "err", err)
|
log.Debug("lazychunkreader.readat", "read", read, "err", err)
|
||||||
metrics.GetOrRegisterCounter("lazychunkreader.read.err", nil).Inc(1)
|
metrics.GetOrRegisterCounter("lazychunkreader.read.err", nil).Inc(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.GetOrRegisterCounter("lazychunkreader.read.bytes", nil).Inc(int64(read))
|
metrics.GetOrRegisterCounter("lazychunkreader.read.bytes", nil).Inc(int64(read))
|
||||||
|
|
||||||
r.off += int64(read)
|
r.off += int64(read)
|
||||||
return
|
return read, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// completely analogous to standard SectionReader implementation
|
// completely analogous to standard SectionReader implementation
|
||||||
@ -592,7 +588,7 @@ var errWhence = errors.New("Seek: invalid whence")
|
|||||||
var errOffset = errors.New("Seek: invalid offset")
|
var errOffset = errors.New("Seek: invalid offset")
|
||||||
|
|
||||||
func (r *LazyChunkReader) Seek(offset int64, whence int) (int64, error) {
|
func (r *LazyChunkReader) Seek(offset int64, whence int) (int64, error) {
|
||||||
log.Debug("lazychunkreader.seek", "key", r.key, "offset", offset)
|
log.Debug("lazychunkreader.seek", "key", r.addr, "offset", offset)
|
||||||
switch whence {
|
switch whence {
|
||||||
default:
|
default:
|
||||||
return 0, errWhence
|
return 0, errWhence
|
||||||
@ -607,7 +603,7 @@ func (r *LazyChunkReader) Seek(offset int64, whence int) (int64, error) {
|
|||||||
return 0, fmt.Errorf("can't get size: %v", err)
|
return 0, fmt.Errorf("can't get size: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
offset += r.chunkData.Size()
|
offset += int64(r.chunkData.Size())
|
||||||
}
|
}
|
||||||
|
|
||||||
if offset < 0 {
|
if offset < 0 {
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
@ -43,27 +42,8 @@ type chunkerTester struct {
|
|||||||
t test
|
t test
|
||||||
}
|
}
|
||||||
|
|
||||||
// fakeChunkStore doesn't store anything, just implements the ChunkStore interface
|
func newTestHasherStore(store ChunkStore, hash string) *hasherStore {
|
||||||
// It can be used to inject into a hasherStore if you don't want to actually store data just do the
|
return NewHasherStore(store, MakeHashFunc(hash), false)
|
||||||
// hashing
|
|
||||||
type fakeChunkStore struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put doesn't store anything it is just here to implement ChunkStore
|
|
||||||
func (f *fakeChunkStore) Put(context.Context, *Chunk) {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gut doesn't store anything it is just here to implement ChunkStore
|
|
||||||
func (f *fakeChunkStore) Get(context.Context, Address) (*Chunk, error) {
|
|
||||||
return nil, errors.New("FakeChunkStore doesn't support Get")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close doesn't store anything it is just here to implement ChunkStore
|
|
||||||
func (f *fakeChunkStore) Close() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTestHasherStore(chunkStore ChunkStore, hash string) *hasherStore {
|
|
||||||
return NewHasherStore(chunkStore, MakeHashFunc(hash), false)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRandomBrokenData(n int, tester *chunkerTester) {
|
func testRandomBrokenData(n int, tester *chunkerTester) {
|
||||||
@ -82,11 +62,12 @@ func testRandomBrokenData(n int, tester *chunkerTester) {
|
|||||||
putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
|
putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
|
||||||
|
|
||||||
expectedError := fmt.Errorf("Broken reader")
|
expectedError := fmt.Errorf("Broken reader")
|
||||||
addr, _, err := TreeSplit(context.TODO(), brokendata, int64(n), putGetter)
|
ctx := context.Background()
|
||||||
|
key, _, err := TreeSplit(ctx, brokendata, int64(n), putGetter)
|
||||||
if err == nil || err.Error() != expectedError.Error() {
|
if err == nil || err.Error() != expectedError.Error() {
|
||||||
tester.t.Fatalf("Not receiving the correct error! Expected %v, received %v", expectedError, err)
|
tester.t.Fatalf("Not receiving the correct error! Expected %v, received %v", expectedError, err)
|
||||||
}
|
}
|
||||||
tester.t.Logf(" Key = %v\n", addr)
|
tester.t.Logf(" Address = %v\n", key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester) Address {
|
func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester) Address {
|
||||||
@ -96,7 +77,7 @@ func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester)
|
|||||||
input, found := tester.inputs[uint64(n)]
|
input, found := tester.inputs[uint64(n)]
|
||||||
var data io.Reader
|
var data io.Reader
|
||||||
if !found {
|
if !found {
|
||||||
data, input = generateRandomData(n)
|
data, input = GenerateRandomData(n)
|
||||||
tester.inputs[uint64(n)] = input
|
tester.inputs[uint64(n)] = input
|
||||||
} else {
|
} else {
|
||||||
data = io.LimitReader(bytes.NewReader(input), int64(n))
|
data = io.LimitReader(bytes.NewReader(input), int64(n))
|
||||||
@ -116,13 +97,13 @@ func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
tester.t.Fatalf(err.Error())
|
tester.t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
tester.t.Logf(" Key = %v\n", addr)
|
tester.t.Logf(" Address = %v\n", addr)
|
||||||
err = wait(ctx)
|
err = wait(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tester.t.Fatalf(err.Error())
|
tester.t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
reader := TreeJoin(context.TODO(), addr, putGetter, 0)
|
reader := TreeJoin(ctx, addr, putGetter, 0)
|
||||||
output := make([]byte, n)
|
output := make([]byte, n)
|
||||||
r, err := reader.Read(output)
|
r, err := reader.Read(output)
|
||||||
if r != n || err != io.EOF {
|
if r != n || err != io.EOF {
|
||||||
@ -196,14 +177,14 @@ func TestDataAppend(t *testing.T) {
|
|||||||
input, found := tester.inputs[uint64(n)]
|
input, found := tester.inputs[uint64(n)]
|
||||||
var data io.Reader
|
var data io.Reader
|
||||||
if !found {
|
if !found {
|
||||||
data, input = generateRandomData(n)
|
data, input = GenerateRandomData(n)
|
||||||
tester.inputs[uint64(n)] = input
|
tester.inputs[uint64(n)] = input
|
||||||
} else {
|
} else {
|
||||||
data = io.LimitReader(bytes.NewReader(input), int64(n))
|
data = io.LimitReader(bytes.NewReader(input), int64(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkStore := NewMapChunkStore()
|
store := NewMapChunkStore()
|
||||||
putGetter := newTestHasherStore(chunkStore, SHA3Hash)
|
putGetter := newTestHasherStore(store, SHA3Hash)
|
||||||
|
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
addr, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
|
addr, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
|
||||||
@ -214,18 +195,17 @@ func TestDataAppend(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
tester.t.Fatalf(err.Error())
|
tester.t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
//create a append data stream
|
//create a append data stream
|
||||||
appendInput, found := tester.inputs[uint64(m)]
|
appendInput, found := tester.inputs[uint64(m)]
|
||||||
var appendData io.Reader
|
var appendData io.Reader
|
||||||
if !found {
|
if !found {
|
||||||
appendData, appendInput = generateRandomData(m)
|
appendData, appendInput = GenerateRandomData(m)
|
||||||
tester.inputs[uint64(m)] = appendInput
|
tester.inputs[uint64(m)] = appendInput
|
||||||
} else {
|
} else {
|
||||||
appendData = io.LimitReader(bytes.NewReader(appendInput), int64(m))
|
appendData = io.LimitReader(bytes.NewReader(appendInput), int64(m))
|
||||||
}
|
}
|
||||||
|
|
||||||
putGetter = newTestHasherStore(chunkStore, SHA3Hash)
|
putGetter = newTestHasherStore(store, SHA3Hash)
|
||||||
newAddr, wait, err := PyramidAppend(ctx, addr, appendData, putGetter, putGetter)
|
newAddr, wait, err := PyramidAppend(ctx, addr, appendData, putGetter, putGetter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tester.t.Fatalf(err.Error())
|
tester.t.Fatalf(err.Error())
|
||||||
@ -256,18 +236,18 @@ func TestRandomData(t *testing.T) {
|
|||||||
tester := &chunkerTester{t: t}
|
tester := &chunkerTester{t: t}
|
||||||
|
|
||||||
for _, s := range sizes {
|
for _, s := range sizes {
|
||||||
treeChunkerKey := testRandomData(false, SHA3Hash, s, tester)
|
treeChunkerAddress := testRandomData(false, SHA3Hash, s, tester)
|
||||||
pyramidChunkerKey := testRandomData(true, SHA3Hash, s, tester)
|
pyramidChunkerAddress := testRandomData(true, SHA3Hash, s, tester)
|
||||||
if treeChunkerKey.String() != pyramidChunkerKey.String() {
|
if treeChunkerAddress.String() != pyramidChunkerAddress.String() {
|
||||||
tester.t.Fatalf("tree chunker and pyramid chunker key mismatch for size %v\n TC: %v\n PC: %v\n", s, treeChunkerKey.String(), pyramidChunkerKey.String())
|
tester.t.Fatalf("tree chunker and pyramid chunker key mismatch for size %v\n TC: %v\n PC: %v\n", s, treeChunkerAddress.String(), pyramidChunkerAddress.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, s := range sizes {
|
for _, s := range sizes {
|
||||||
treeChunkerKey := testRandomData(false, BMTHash, s, tester)
|
treeChunkerAddress := testRandomData(false, BMTHash, s, tester)
|
||||||
pyramidChunkerKey := testRandomData(true, BMTHash, s, tester)
|
pyramidChunkerAddress := testRandomData(true, BMTHash, s, tester)
|
||||||
if treeChunkerKey.String() != pyramidChunkerKey.String() {
|
if treeChunkerAddress.String() != pyramidChunkerAddress.String() {
|
||||||
tester.t.Fatalf("tree chunker and pyramid chunker key mismatch for size %v\n TC: %v\n PC: %v\n", s, treeChunkerKey.String(), pyramidChunkerKey.String())
|
tester.t.Fatalf("tree chunker and pyramid chunker key mismatch for size %v\n TC: %v\n PC: %v\n", s, treeChunkerAddress.String(), pyramidChunkerAddress.String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -312,12 +292,18 @@ func benchmarkSplitTreeSHA3(n int, t *testing.B) {
|
|||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
data := testDataReader(n)
|
data := testDataReader(n)
|
||||||
putGetter := newTestHasherStore(&fakeChunkStore{}, SHA3Hash)
|
putGetter := newTestHasherStore(&FakeChunkStore{}, SHA3Hash)
|
||||||
|
|
||||||
_, _, err := TreeSplit(context.TODO(), data, int64(n), putGetter)
|
ctx := context.Background()
|
||||||
|
_, wait, err := TreeSplit(ctx, data, int64(n), putGetter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
|
err = wait(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,9 +311,32 @@ func benchmarkSplitTreeBMT(n int, t *testing.B) {
|
|||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
data := testDataReader(n)
|
data := testDataReader(n)
|
||||||
putGetter := newTestHasherStore(&fakeChunkStore{}, BMTHash)
|
putGetter := newTestHasherStore(&FakeChunkStore{}, BMTHash)
|
||||||
|
|
||||||
_, _, err := TreeSplit(context.TODO(), data, int64(n), putGetter)
|
ctx := context.Background()
|
||||||
|
_, wait, err := TreeSplit(ctx, data, int64(n), putGetter)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf(err.Error())
|
||||||
|
}
|
||||||
|
err = wait(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkSplitPyramidBMT(n int, t *testing.B) {
|
||||||
|
t.ReportAllocs()
|
||||||
|
for i := 0; i < t.N; i++ {
|
||||||
|
data := testDataReader(n)
|
||||||
|
putGetter := newTestHasherStore(&FakeChunkStore{}, BMTHash)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
_, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf(err.Error())
|
||||||
|
}
|
||||||
|
err = wait(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
@ -338,23 +347,14 @@ func benchmarkSplitPyramidSHA3(n int, t *testing.B) {
|
|||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
data := testDataReader(n)
|
data := testDataReader(n)
|
||||||
putGetter := newTestHasherStore(&fakeChunkStore{}, SHA3Hash)
|
putGetter := newTestHasherStore(&FakeChunkStore{}, SHA3Hash)
|
||||||
|
|
||||||
_, _, err := PyramidSplit(context.TODO(), data, putGetter, putGetter)
|
ctx := context.Background()
|
||||||
|
_, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
|
err = wait(ctx)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchmarkSplitPyramidBMT(n int, t *testing.B) {
|
|
||||||
t.ReportAllocs()
|
|
||||||
for i := 0; i < t.N; i++ {
|
|
||||||
data := testDataReader(n)
|
|
||||||
putGetter := newTestHasherStore(&fakeChunkStore{}, BMTHash)
|
|
||||||
|
|
||||||
_, _, err := PyramidSplit(context.TODO(), data, putGetter, putGetter)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
@ -367,10 +367,10 @@ func benchmarkSplitAppendPyramid(n, m int, t *testing.B) {
|
|||||||
data := testDataReader(n)
|
data := testDataReader(n)
|
||||||
data1 := testDataReader(m)
|
data1 := testDataReader(m)
|
||||||
|
|
||||||
chunkStore := NewMapChunkStore()
|
store := NewMapChunkStore()
|
||||||
putGetter := newTestHasherStore(chunkStore, SHA3Hash)
|
putGetter := newTestHasherStore(store, SHA3Hash)
|
||||||
|
|
||||||
ctx := context.TODO()
|
ctx := context.Background()
|
||||||
key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
|
key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
@ -380,7 +380,7 @@ func benchmarkSplitAppendPyramid(n, m int, t *testing.B) {
|
|||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
putGetter = newTestHasherStore(chunkStore, SHA3Hash)
|
putGetter = newTestHasherStore(store, SHA3Hash)
|
||||||
_, wait, err = PyramidAppend(ctx, key, data1, putGetter, putGetter)
|
_, wait, err = PyramidAppend(ctx, key, data1, putGetter, putGetter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatalf(err.Error())
|
||||||
|
@ -1,69 +0,0 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
ChunkStore interface is implemented by :
|
|
||||||
|
|
||||||
- MemStore: a memory cache
|
|
||||||
- DbStore: local disk/db store
|
|
||||||
- LocalStore: a combination (sequence of) memStore and dbStore
|
|
||||||
- NetStore: cloud storage abstraction layer
|
|
||||||
- FakeChunkStore: dummy store which doesn't store anything just implements the interface
|
|
||||||
*/
|
|
||||||
type ChunkStore interface {
|
|
||||||
Put(context.Context, *Chunk) // effectively there is no error even if there is an error
|
|
||||||
Get(context.Context, Address) (*Chunk, error)
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapChunkStore is a very simple ChunkStore implementation to store chunks in a map in memory.
|
|
||||||
type MapChunkStore struct {
|
|
||||||
chunks map[string]*Chunk
|
|
||||||
mu sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMapChunkStore() *MapChunkStore {
|
|
||||||
return &MapChunkStore{
|
|
||||||
chunks: make(map[string]*Chunk),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapChunkStore) Put(ctx context.Context, chunk *Chunk) {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
m.chunks[chunk.Addr.Hex()] = chunk
|
|
||||||
chunk.markAsStored()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapChunkStore) Get(ctx context.Context, addr Address) (*Chunk, error) {
|
|
||||||
m.mu.RLock()
|
|
||||||
defer m.mu.RUnlock()
|
|
||||||
chunk := m.chunks[addr.Hex()]
|
|
||||||
if chunk == nil {
|
|
||||||
return nil, ErrChunkNotFound
|
|
||||||
}
|
|
||||||
return chunk, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapChunkStore) Close() {
|
|
||||||
}
|
|
@ -1,44 +0,0 @@
|
|||||||
// Copyright 2018 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PutChunks adds chunks to localstore
|
|
||||||
// It waits for receive on the stored channel
|
|
||||||
// It logs but does not fail on delivery error
|
|
||||||
func PutChunks(store *LocalStore, chunks ...*Chunk) {
|
|
||||||
wg := sync.WaitGroup{}
|
|
||||||
wg.Add(len(chunks))
|
|
||||||
go func() {
|
|
||||||
for _, c := range chunks {
|
|
||||||
<-c.dbStoredC
|
|
||||||
if err := c.GetErrored(); err != nil {
|
|
||||||
log.Error("chunk store fail", "err", err, "key", c.Addr)
|
|
||||||
}
|
|
||||||
wg.Done()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
for _, c := range chunks {
|
|
||||||
go store.Put(context.TODO(), c)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
@ -23,16 +23,20 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
colorable "github.com/mattn/go-colorable"
|
colorable "github.com/mattn/go-colorable"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
||||||
|
getTimeout = 30 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -56,47 +60,73 @@ func brokenLimitReader(data io.Reader, size int, errAt int) *brokenLimitedReader
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func mputRandomChunks(store ChunkStore, processors int, n int, chunksize int64) (hs []Address) {
|
func newLDBStore(t *testing.T) (*LDBStore, func()) {
|
||||||
return mput(store, processors, n, GenerateRandomChunk)
|
dir, err := ioutil.TempDir("", "bzz-storage-test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
log.Trace("memstore.tempdir", "dir", dir)
|
||||||
|
|
||||||
|
ldbparams := NewLDBStoreParams(NewDefaultStoreParams(), dir)
|
||||||
|
db, err := NewLDBStore(ldbparams)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func mput(store ChunkStore, processors int, n int, f func(i int64) *Chunk) (hs []Address) {
|
cleanup := func() {
|
||||||
wg := sync.WaitGroup{}
|
db.Close()
|
||||||
wg.Add(processors)
|
err := os.RemoveAll(dir)
|
||||||
c := make(chan *Chunk)
|
if err != nil {
|
||||||
for i := 0; i < processors; i++ {
|
t.Fatal(err)
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for chunk := range c {
|
|
||||||
wg.Add(1)
|
|
||||||
chunk := chunk
|
|
||||||
store.Put(context.TODO(), chunk)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
<-chunk.dbStoredC
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
fa := f
|
|
||||||
if _, ok := store.(*MemStore); ok {
|
return db, cleanup
|
||||||
fa = func(i int64) *Chunk {
|
}
|
||||||
chunk := f(i)
|
|
||||||
chunk.markAsStored()
|
func mputRandomChunks(store ChunkStore, n int, chunksize int64) ([]Chunk, error) {
|
||||||
|
return mput(store, n, GenerateRandomChunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mputChunks(store ChunkStore, chunks ...Chunk) error {
|
||||||
|
i := 0
|
||||||
|
f := func(n int64) Chunk {
|
||||||
|
chunk := chunks[i]
|
||||||
|
i++
|
||||||
return chunk
|
return chunk
|
||||||
}
|
}
|
||||||
}
|
_, err := mput(store, len(chunks), f)
|
||||||
for i := 0; i < n; i++ {
|
return err
|
||||||
chunk := fa(int64(i))
|
|
||||||
hs = append(hs, chunk.Addr)
|
|
||||||
c <- chunk
|
|
||||||
}
|
|
||||||
close(c)
|
|
||||||
wg.Wait()
|
|
||||||
return hs
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func mget(store ChunkStore, hs []Address, f func(h Address, chunk *Chunk) error) error {
|
func mput(store ChunkStore, n int, f func(i int64) Chunk) (hs []Chunk, err error) {
|
||||||
|
// put to localstore and wait for stored channel
|
||||||
|
// does not check delivery error state
|
||||||
|
errc := make(chan error)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
for i := int64(0); i < int64(n); i++ {
|
||||||
|
chunk := f(ch.DefaultSize)
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case errc <- store.Put(ctx, chunk):
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
hs = append(hs, chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for all chunks to be stored
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
err := <-errc
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return hs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mget(store ChunkStore, hs []Address, f func(h Address, chunk Chunk) error) error {
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(len(hs))
|
wg.Add(len(hs))
|
||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
@ -104,6 +134,7 @@ func mget(store ChunkStore, hs []Address, f func(h Address, chunk *Chunk) error)
|
|||||||
for _, k := range hs {
|
for _, k := range hs {
|
||||||
go func(h Address) {
|
go func(h Address) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
// TODO: write timeout with context
|
||||||
chunk, err := store.Get(context.TODO(), h)
|
chunk, err := store.Get(context.TODO(), h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errc <- err
|
errc <- err
|
||||||
@ -143,57 +174,54 @@ func (r *brokenLimitedReader) Read(buf []byte) (int, error) {
|
|||||||
return r.lr.Read(buf)
|
return r.lr.Read(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateRandomData(l int) (r io.Reader, slice []byte) {
|
func testStoreRandom(m ChunkStore, n int, chunksize int64, t *testing.T) {
|
||||||
slice = make([]byte, l)
|
chunks, err := mputRandomChunks(m, n, chunksize)
|
||||||
if _, err := rand.Read(slice); err != nil {
|
if err != nil {
|
||||||
panic("rand error")
|
t.Fatalf("expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
r = io.LimitReader(bytes.NewReader(slice), int64(l))
|
err = mget(m, chunkAddresses(chunks), nil)
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func testStoreRandom(m ChunkStore, processors int, n int, chunksize int64, t *testing.T) {
|
|
||||||
hs := mputRandomChunks(m, processors, n, chunksize)
|
|
||||||
err := mget(m, hs, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("testStore failed: %v", err)
|
t.Fatalf("testStore failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testStoreCorrect(m ChunkStore, processors int, n int, chunksize int64, t *testing.T) {
|
func testStoreCorrect(m ChunkStore, n int, chunksize int64, t *testing.T) {
|
||||||
hs := mputRandomChunks(m, processors, n, chunksize)
|
chunks, err := mputRandomChunks(m, n, chunksize)
|
||||||
f := func(h Address, chunk *Chunk) error {
|
if err != nil {
|
||||||
if !bytes.Equal(h, chunk.Addr) {
|
t.Fatalf("expected no error, got %v", err)
|
||||||
return fmt.Errorf("key does not match retrieved chunk Key")
|
}
|
||||||
|
f := func(h Address, chunk Chunk) error {
|
||||||
|
if !bytes.Equal(h, chunk.Address()) {
|
||||||
|
return fmt.Errorf("key does not match retrieved chunk Address")
|
||||||
}
|
}
|
||||||
hasher := MakeHashFunc(DefaultHash)()
|
hasher := MakeHashFunc(DefaultHash)()
|
||||||
hasher.ResetWithLength(chunk.SData[:8])
|
hasher.ResetWithLength(chunk.SpanBytes())
|
||||||
hasher.Write(chunk.SData[8:])
|
hasher.Write(chunk.Payload())
|
||||||
exp := hasher.Sum(nil)
|
exp := hasher.Sum(nil)
|
||||||
if !bytes.Equal(h, exp) {
|
if !bytes.Equal(h, exp) {
|
||||||
return fmt.Errorf("key is not hash of chunk data")
|
return fmt.Errorf("key is not hash of chunk data")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
err := mget(m, hs, f)
|
err = mget(m, chunkAddresses(chunks), f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("testStore failed: %v", err)
|
t.Fatalf("testStore failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkStorePut(store ChunkStore, processors int, n int, chunksize int64, b *testing.B) {
|
func benchmarkStorePut(store ChunkStore, n int, chunksize int64, b *testing.B) {
|
||||||
chunks := make([]*Chunk, n)
|
chunks := make([]Chunk, n)
|
||||||
i := 0
|
i := 0
|
||||||
f := func(dataSize int64) *Chunk {
|
f := func(dataSize int64) Chunk {
|
||||||
chunk := GenerateRandomChunk(dataSize)
|
chunk := GenerateRandomChunk(dataSize)
|
||||||
chunks[i] = chunk
|
chunks[i] = chunk
|
||||||
i++
|
i++
|
||||||
return chunk
|
return chunk
|
||||||
}
|
}
|
||||||
|
|
||||||
mput(store, processors, n, f)
|
mput(store, n, f)
|
||||||
|
|
||||||
f = func(dataSize int64) *Chunk {
|
f = func(dataSize int64) Chunk {
|
||||||
chunk := chunks[i]
|
chunk := chunks[i]
|
||||||
i++
|
i++
|
||||||
return chunk
|
return chunk
|
||||||
@ -204,18 +232,62 @@ func benchmarkStorePut(store ChunkStore, processors int, n int, chunksize int64,
|
|||||||
|
|
||||||
for j := 0; j < b.N; j++ {
|
for j := 0; j < b.N; j++ {
|
||||||
i = 0
|
i = 0
|
||||||
mput(store, processors, n, f)
|
mput(store, n, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkStoreGet(store ChunkStore, processors int, n int, chunksize int64, b *testing.B) {
|
func benchmarkStoreGet(store ChunkStore, n int, chunksize int64, b *testing.B) {
|
||||||
hs := mputRandomChunks(store, processors, n, chunksize)
|
chunks, err := mputRandomChunks(store, n, chunksize)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
addrs := chunkAddresses(chunks)
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
err := mget(store, hs, nil)
|
err := mget(store, addrs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("mget failed: %v", err)
|
b.Fatalf("mget failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MapChunkStore is a very simple ChunkStore implementation to store chunks in a map in memory.
|
||||||
|
type MapChunkStore struct {
|
||||||
|
chunks map[string]Chunk
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMapChunkStore() *MapChunkStore {
|
||||||
|
return &MapChunkStore{
|
||||||
|
chunks: make(map[string]Chunk),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MapChunkStore) Put(_ context.Context, ch Chunk) error {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.chunks[ch.Address().Hex()] = ch
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MapChunkStore) Get(_ context.Context, ref Address) (Chunk, error) {
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
chunk := m.chunks[ref.Hex()]
|
||||||
|
if chunk == nil {
|
||||||
|
return nil, ErrChunkNotFound
|
||||||
|
}
|
||||||
|
return chunk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MapChunkStore) Close() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func chunkAddresses(chunks []Chunk) []Address {
|
||||||
|
addrs := make([]Address, len(chunks))
|
||||||
|
for i, ch := range chunks {
|
||||||
|
addrs[i] = ch.Address()
|
||||||
|
}
|
||||||
|
return addrs
|
||||||
|
}
|
||||||
|
@ -1,54 +0,0 @@
|
|||||||
// Copyright 2018 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import "context"
|
|
||||||
|
|
||||||
// wrapper of db-s to provide mockable custom local chunk store access to syncer
|
|
||||||
type DBAPI struct {
|
|
||||||
db *LDBStore
|
|
||||||
loc *LocalStore
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDBAPI(loc *LocalStore) *DBAPI {
|
|
||||||
return &DBAPI{loc.DbStore, loc}
|
|
||||||
}
|
|
||||||
|
|
||||||
// to obtain the chunks from address or request db entry only
|
|
||||||
func (d *DBAPI) Get(ctx context.Context, addr Address) (*Chunk, error) {
|
|
||||||
return d.loc.Get(ctx, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// current storage counter of chunk db
|
|
||||||
func (d *DBAPI) CurrentBucketStorageIndex(po uint8) uint64 {
|
|
||||||
return d.db.CurrentBucketStorageIndex(po)
|
|
||||||
}
|
|
||||||
|
|
||||||
// iteration storage counter and proximity order
|
|
||||||
func (d *DBAPI) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error {
|
|
||||||
return d.db.SyncIterator(from, to, po, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// to obtain the chunks from address or request db entry only
|
|
||||||
func (d *DBAPI) GetOrCreateRequest(ctx context.Context, addr Address) (*Chunk, bool) {
|
|
||||||
return d.loc.GetOrCreateRequest(ctx, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// to obtain the chunks from key or request db entry only
|
|
||||||
func (d *DBAPI) Put(ctx context.Context, chunk *Chunk) {
|
|
||||||
d.loc.Put(ctx, chunk)
|
|
||||||
}
|
|
@ -49,11 +49,11 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) {
|
|||||||
fileStore := NewFileStore(localStore, NewFileStoreParams())
|
fileStore := NewFileStore(localStore, NewFileStoreParams())
|
||||||
defer os.RemoveAll("/tmp/bzz")
|
defer os.RemoveAll("/tmp/bzz")
|
||||||
|
|
||||||
reader, slice := generateRandomData(testDataSize)
|
reader, slice := GenerateRandomData(testDataSize)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
key, wait, err := fileStore.Store(ctx, reader, testDataSize, toEncrypt)
|
key, wait, err := fileStore.Store(ctx, reader, testDataSize, toEncrypt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Store error: %v", err)
|
t.Fatalf("Store error: %v", err)
|
||||||
}
|
}
|
||||||
err = wait(ctx)
|
err = wait(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -66,13 +66,13 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) {
|
|||||||
resultSlice := make([]byte, len(slice))
|
resultSlice := make([]byte, len(slice))
|
||||||
n, err := resultReader.ReadAt(resultSlice, 0)
|
n, err := resultReader.ReadAt(resultSlice, 0)
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
t.Errorf("Retrieve error: %v", err)
|
t.Fatalf("Retrieve error: %v", err)
|
||||||
}
|
}
|
||||||
if n != len(slice) {
|
if n != len(slice) {
|
||||||
t.Errorf("Slice size error got %d, expected %d.", n, len(slice))
|
t.Fatalf("Slice size error got %d, expected %d.", n, len(slice))
|
||||||
}
|
}
|
||||||
if !bytes.Equal(slice, resultSlice) {
|
if !bytes.Equal(slice, resultSlice) {
|
||||||
t.Errorf("Comparison error.")
|
t.Fatalf("Comparison error.")
|
||||||
}
|
}
|
||||||
ioutil.WriteFile("/tmp/slice.bzz.16M", slice, 0666)
|
ioutil.WriteFile("/tmp/slice.bzz.16M", slice, 0666)
|
||||||
ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666)
|
ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666)
|
||||||
@ -86,13 +86,13 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) {
|
|||||||
}
|
}
|
||||||
n, err = resultReader.ReadAt(resultSlice, 0)
|
n, err = resultReader.ReadAt(resultSlice, 0)
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
t.Errorf("Retrieve error after removing memStore: %v", err)
|
t.Fatalf("Retrieve error after removing memStore: %v", err)
|
||||||
}
|
}
|
||||||
if n != len(slice) {
|
if n != len(slice) {
|
||||||
t.Errorf("Slice size error after removing memStore got %d, expected %d.", n, len(slice))
|
t.Fatalf("Slice size error after removing memStore got %d, expected %d.", n, len(slice))
|
||||||
}
|
}
|
||||||
if !bytes.Equal(slice, resultSlice) {
|
if !bytes.Equal(slice, resultSlice) {
|
||||||
t.Errorf("Comparison error after removing memStore.")
|
t.Fatalf("Comparison error after removing memStore.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,7 +114,7 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
|
|||||||
DbStore: db,
|
DbStore: db,
|
||||||
}
|
}
|
||||||
fileStore := NewFileStore(localStore, NewFileStoreParams())
|
fileStore := NewFileStore(localStore, NewFileStoreParams())
|
||||||
reader, slice := generateRandomData(testDataSize)
|
reader, slice := GenerateRandomData(testDataSize)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
key, wait, err := fileStore.Store(ctx, reader, testDataSize, toEncrypt)
|
key, wait, err := fileStore.Store(ctx, reader, testDataSize, toEncrypt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -122,7 +122,7 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
|
|||||||
}
|
}
|
||||||
err = wait(ctx)
|
err = wait(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Store error: %v", err)
|
t.Fatalf("Store error: %v", err)
|
||||||
}
|
}
|
||||||
resultReader, isEncrypted := fileStore.Retrieve(context.TODO(), key)
|
resultReader, isEncrypted := fileStore.Retrieve(context.TODO(), key)
|
||||||
if isEncrypted != toEncrypt {
|
if isEncrypted != toEncrypt {
|
||||||
@ -131,13 +131,13 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
|
|||||||
resultSlice := make([]byte, len(slice))
|
resultSlice := make([]byte, len(slice))
|
||||||
n, err := resultReader.ReadAt(resultSlice, 0)
|
n, err := resultReader.ReadAt(resultSlice, 0)
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
t.Errorf("Retrieve error: %v", err)
|
t.Fatalf("Retrieve error: %v", err)
|
||||||
}
|
}
|
||||||
if n != len(slice) {
|
if n != len(slice) {
|
||||||
t.Errorf("Slice size error got %d, expected %d.", n, len(slice))
|
t.Fatalf("Slice size error got %d, expected %d.", n, len(slice))
|
||||||
}
|
}
|
||||||
if !bytes.Equal(slice, resultSlice) {
|
if !bytes.Equal(slice, resultSlice) {
|
||||||
t.Errorf("Comparison error.")
|
t.Fatalf("Comparison error.")
|
||||||
}
|
}
|
||||||
// Clear memStore
|
// Clear memStore
|
||||||
memStore.setCapacity(0)
|
memStore.setCapacity(0)
|
||||||
@ -148,7 +148,7 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
|
|||||||
t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted)
|
t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted)
|
||||||
}
|
}
|
||||||
if _, err = resultReader.ReadAt(resultSlice, 0); err == nil {
|
if _, err = resultReader.ReadAt(resultSlice, 0); err == nil {
|
||||||
t.Errorf("Was able to read %d bytes from an empty memStore.", len(slice))
|
t.Fatalf("Was able to read %d bytes from an empty memStore.", len(slice))
|
||||||
}
|
}
|
||||||
// check how it works with localStore
|
// check how it works with localStore
|
||||||
fileStore.ChunkStore = localStore
|
fileStore.ChunkStore = localStore
|
||||||
@ -162,12 +162,12 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
|
|||||||
}
|
}
|
||||||
n, err = resultReader.ReadAt(resultSlice, 0)
|
n, err = resultReader.ReadAt(resultSlice, 0)
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
t.Errorf("Retrieve error after clearing memStore: %v", err)
|
t.Fatalf("Retrieve error after clearing memStore: %v", err)
|
||||||
}
|
}
|
||||||
if n != len(slice) {
|
if n != len(slice) {
|
||||||
t.Errorf("Slice size error after clearing memStore got %d, expected %d.", n, len(slice))
|
t.Fatalf("Slice size error after clearing memStore got %d, expected %d.", n, len(slice))
|
||||||
}
|
}
|
||||||
if !bytes.Equal(slice, resultSlice) {
|
if !bytes.Equal(slice, resultSlice) {
|
||||||
t.Errorf("Comparison error after clearing memStore.")
|
t.Fatalf("Comparison error after clearing memStore.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,10 +19,10 @@ package storage
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/encryption"
|
"github.com/ethereum/go-ethereum/swarm/storage/encryption"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -32,29 +32,34 @@ type hasherStore struct {
|
|||||||
hashFunc SwarmHasher
|
hashFunc SwarmHasher
|
||||||
hashSize int // content hash size
|
hashSize int // content hash size
|
||||||
refSize int64 // reference size (content hash + possibly encryption key)
|
refSize int64 // reference size (content hash + possibly encryption key)
|
||||||
wg *sync.WaitGroup
|
nrChunks uint64 // number of chunks to store
|
||||||
closed chan struct{}
|
errC chan error // global error channel
|
||||||
|
doneC chan struct{} // closed by Close() call to indicate that count is the final number of chunks
|
||||||
|
quitC chan struct{} // closed to quit unterminated routines
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHasherStore creates a hasherStore object, which implements Putter and Getter interfaces.
|
// NewHasherStore creates a hasherStore object, which implements Putter and Getter interfaces.
|
||||||
// With the HasherStore you can put and get chunk data (which is just []byte) into a ChunkStore
|
// With the HasherStore you can put and get chunk data (which is just []byte) into a ChunkStore
|
||||||
// and the hasherStore will take core of encryption/decryption of data if necessary
|
// and the hasherStore will take core of encryption/decryption of data if necessary
|
||||||
func NewHasherStore(chunkStore ChunkStore, hashFunc SwarmHasher, toEncrypt bool) *hasherStore {
|
func NewHasherStore(store ChunkStore, hashFunc SwarmHasher, toEncrypt bool) *hasherStore {
|
||||||
hashSize := hashFunc().Size()
|
hashSize := hashFunc().Size()
|
||||||
refSize := int64(hashSize)
|
refSize := int64(hashSize)
|
||||||
if toEncrypt {
|
if toEncrypt {
|
||||||
refSize += encryption.KeyLength
|
refSize += encryption.KeyLength
|
||||||
}
|
}
|
||||||
|
|
||||||
return &hasherStore{
|
h := &hasherStore{
|
||||||
store: chunkStore,
|
store: store,
|
||||||
toEncrypt: toEncrypt,
|
toEncrypt: toEncrypt,
|
||||||
hashFunc: hashFunc,
|
hashFunc: hashFunc,
|
||||||
hashSize: hashSize,
|
hashSize: hashSize,
|
||||||
refSize: refSize,
|
refSize: refSize,
|
||||||
wg: &sync.WaitGroup{},
|
errC: make(chan error),
|
||||||
closed: make(chan struct{}),
|
doneC: make(chan struct{}),
|
||||||
|
quitC: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put stores the chunkData into the ChunkStore of the hasherStore and returns the reference.
|
// Put stores the chunkData into the ChunkStore of the hasherStore and returns the reference.
|
||||||
@ -62,7 +67,6 @@ func NewHasherStore(chunkStore ChunkStore, hashFunc SwarmHasher, toEncrypt bool)
|
|||||||
// Asynchronous function, the data will not necessarily be stored when it returns.
|
// Asynchronous function, the data will not necessarily be stored when it returns.
|
||||||
func (h *hasherStore) Put(ctx context.Context, chunkData ChunkData) (Reference, error) {
|
func (h *hasherStore) Put(ctx context.Context, chunkData ChunkData) (Reference, error) {
|
||||||
c := chunkData
|
c := chunkData
|
||||||
size := chunkData.Size()
|
|
||||||
var encryptionKey encryption.Key
|
var encryptionKey encryption.Key
|
||||||
if h.toEncrypt {
|
if h.toEncrypt {
|
||||||
var err error
|
var err error
|
||||||
@ -71,29 +75,28 @@ func (h *hasherStore) Put(ctx context.Context, chunkData ChunkData) (Reference,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
chunk := h.createChunk(c, size)
|
chunk := h.createChunk(c)
|
||||||
|
|
||||||
h.storeChunk(ctx, chunk)
|
h.storeChunk(ctx, chunk)
|
||||||
|
|
||||||
return Reference(append(chunk.Addr, encryptionKey...)), nil
|
return Reference(append(chunk.Address(), encryptionKey...)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns data of the chunk with the given reference (retrieved from the ChunkStore of hasherStore).
|
// Get returns data of the chunk with the given reference (retrieved from the ChunkStore of hasherStore).
|
||||||
// If the data is encrypted and the reference contains an encryption key, it will be decrypted before
|
// If the data is encrypted and the reference contains an encryption key, it will be decrypted before
|
||||||
// return.
|
// return.
|
||||||
func (h *hasherStore) Get(ctx context.Context, ref Reference) (ChunkData, error) {
|
func (h *hasherStore) Get(ctx context.Context, ref Reference) (ChunkData, error) {
|
||||||
key, encryptionKey, err := parseReference(ref, h.hashSize)
|
addr, encryptionKey, err := parseReference(ref, h.hashSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
chunk, err := h.store.Get(ctx, addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkData := ChunkData(chunk.Data())
|
||||||
toDecrypt := (encryptionKey != nil)
|
toDecrypt := (encryptionKey != nil)
|
||||||
|
|
||||||
chunk, err := h.store.Get(ctx, key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
chunkData := chunk.SData
|
|
||||||
if toDecrypt {
|
if toDecrypt {
|
||||||
var err error
|
var err error
|
||||||
chunkData, err = h.decryptChunkData(chunkData, encryptionKey)
|
chunkData, err = h.decryptChunkData(chunkData, encryptionKey)
|
||||||
@ -107,17 +110,41 @@ func (h *hasherStore) Get(ctx context.Context, ref Reference) (ChunkData, error)
|
|||||||
// Close indicates that no more chunks will be put with the hasherStore, so the Wait
|
// Close indicates that no more chunks will be put with the hasherStore, so the Wait
|
||||||
// function can return when all the previously put chunks has been stored.
|
// function can return when all the previously put chunks has been stored.
|
||||||
func (h *hasherStore) Close() {
|
func (h *hasherStore) Close() {
|
||||||
close(h.closed)
|
close(h.doneC)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait returns when
|
// Wait returns when
|
||||||
// 1) the Close() function has been called and
|
// 1) the Close() function has been called and
|
||||||
// 2) all the chunks which has been Put has been stored
|
// 2) all the chunks which has been Put has been stored
|
||||||
func (h *hasherStore) Wait(ctx context.Context) error {
|
func (h *hasherStore) Wait(ctx context.Context) error {
|
||||||
<-h.closed
|
defer close(h.quitC)
|
||||||
h.wg.Wait()
|
var nrStoredChunks uint64 // number of stored chunks
|
||||||
|
var done bool
|
||||||
|
doneC := h.doneC
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
// if context is done earlier, just return with the error
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
// doneC is closed if all chunks have been submitted, from then we just wait until all of them are also stored
|
||||||
|
case <-doneC:
|
||||||
|
done = true
|
||||||
|
doneC = nil
|
||||||
|
// a chunk has been stored, if err is nil, then successfully, so increase the stored chunk counter
|
||||||
|
case err := <-h.errC:
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nrStoredChunks++
|
||||||
|
}
|
||||||
|
// if all the chunks have been submitted and all of them are stored, then we can return
|
||||||
|
if done {
|
||||||
|
if nrStoredChunks >= atomic.LoadUint64(&h.nrChunks) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (h *hasherStore) createHash(chunkData ChunkData) Address {
|
func (h *hasherStore) createHash(chunkData ChunkData) Address {
|
||||||
hasher := h.hashFunc()
|
hasher := h.hashFunc()
|
||||||
@ -126,12 +153,9 @@ func (h *hasherStore) createHash(chunkData ChunkData) Address {
|
|||||||
return hasher.Sum(nil)
|
return hasher.Sum(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hasherStore) createChunk(chunkData ChunkData, chunkSize int64) *Chunk {
|
func (h *hasherStore) createChunk(chunkData ChunkData) *chunk {
|
||||||
hash := h.createHash(chunkData)
|
hash := h.createHash(chunkData)
|
||||||
chunk := NewChunk(hash, nil)
|
chunk := NewChunk(hash, chunkData)
|
||||||
chunk.SData = chunkData
|
|
||||||
chunk.Size = chunkSize
|
|
||||||
|
|
||||||
return chunk
|
return chunk
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -162,10 +186,10 @@ func (h *hasherStore) decryptChunkData(chunkData ChunkData, encryptionKey encryp
|
|||||||
|
|
||||||
// removing extra bytes which were just added for padding
|
// removing extra bytes which were just added for padding
|
||||||
length := ChunkData(decryptedSpan).Size()
|
length := ChunkData(decryptedSpan).Size()
|
||||||
for length > chunk.DefaultSize {
|
for length > ch.DefaultSize {
|
||||||
length = length + (chunk.DefaultSize - 1)
|
length = length + (ch.DefaultSize - 1)
|
||||||
length = length / chunk.DefaultSize
|
length = length / ch.DefaultSize
|
||||||
length *= h.refSize
|
length *= uint64(h.refSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := make(ChunkData, length+8)
|
c := make(ChunkData, length+8)
|
||||||
@ -205,32 +229,32 @@ func (h *hasherStore) decrypt(chunkData ChunkData, key encryption.Key) ([]byte,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *hasherStore) newSpanEncryption(key encryption.Key) encryption.Encryption {
|
func (h *hasherStore) newSpanEncryption(key encryption.Key) encryption.Encryption {
|
||||||
return encryption.New(key, 0, uint32(chunk.DefaultSize/h.refSize), sha3.NewKeccak256)
|
return encryption.New(key, 0, uint32(ch.DefaultSize/h.refSize), sha3.NewKeccak256)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hasherStore) newDataEncryption(key encryption.Key) encryption.Encryption {
|
func (h *hasherStore) newDataEncryption(key encryption.Key) encryption.Encryption {
|
||||||
return encryption.New(key, int(chunk.DefaultSize), 0, sha3.NewKeccak256)
|
return encryption.New(key, int(ch.DefaultSize), 0, sha3.NewKeccak256)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hasherStore) storeChunk(ctx context.Context, chunk *Chunk) {
|
func (h *hasherStore) storeChunk(ctx context.Context, chunk *chunk) {
|
||||||
h.wg.Add(1)
|
atomic.AddUint64(&h.nrChunks, 1)
|
||||||
go func() {
|
go func() {
|
||||||
<-chunk.dbStoredC
|
select {
|
||||||
h.wg.Done()
|
case h.errC <- h.store.Put(ctx, chunk):
|
||||||
|
case <-h.quitC:
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
h.store.Put(ctx, chunk)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseReference(ref Reference, hashSize int) (Address, encryption.Key, error) {
|
func parseReference(ref Reference, hashSize int) (Address, encryption.Key, error) {
|
||||||
encryptedKeyLength := hashSize + encryption.KeyLength
|
encryptedRefLength := hashSize + encryption.KeyLength
|
||||||
switch len(ref) {
|
switch len(ref) {
|
||||||
case KeyLength:
|
case AddressLength:
|
||||||
return Address(ref), nil, nil
|
return Address(ref), nil, nil
|
||||||
case encryptedKeyLength:
|
case encryptedRefLength:
|
||||||
encKeyIdx := len(ref) - encryption.KeyLength
|
encKeyIdx := len(ref) - encryption.KeyLength
|
||||||
return Address(ref[:encKeyIdx]), encryption.Key(ref[encKeyIdx:]), nil
|
return Address(ref[:encKeyIdx]), encryption.Key(ref[encKeyIdx:]), nil
|
||||||
default:
|
default:
|
||||||
return nil, nil, fmt.Errorf("Invalid reference length, expected %v or %v got %v", hashSize, encryptedKeyLength, len(ref))
|
return nil, nil, fmt.Errorf("Invalid reference length, expected %v or %v got %v", hashSize, encryptedRefLength, len(ref))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -46,14 +46,16 @@ func TestHasherStore(t *testing.T) {
|
|||||||
hasherStore := NewHasherStore(chunkStore, MakeHashFunc(DefaultHash), tt.toEncrypt)
|
hasherStore := NewHasherStore(chunkStore, MakeHashFunc(DefaultHash), tt.toEncrypt)
|
||||||
|
|
||||||
// Put two random chunks into the hasherStore
|
// Put two random chunks into the hasherStore
|
||||||
chunkData1 := GenerateRandomChunk(int64(tt.chunkLength)).SData
|
chunkData1 := GenerateRandomChunk(int64(tt.chunkLength)).Data()
|
||||||
key1, err := hasherStore.Put(context.TODO(), chunkData1)
|
ctx, cancel := context.WithTimeout(context.Background(), getTimeout)
|
||||||
|
defer cancel()
|
||||||
|
key1, err := hasherStore.Put(ctx, chunkData1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error got \"%v\"", err)
|
t.Fatalf("Expected no error got \"%v\"", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkData2 := GenerateRandomChunk(int64(tt.chunkLength)).SData
|
chunkData2 := GenerateRandomChunk(int64(tt.chunkLength)).Data()
|
||||||
key2, err := hasherStore.Put(context.TODO(), chunkData2)
|
key2, err := hasherStore.Put(ctx, chunkData2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error got \"%v\"", err)
|
t.Fatalf("Expected no error got \"%v\"", err)
|
||||||
}
|
}
|
||||||
@ -61,13 +63,13 @@ func TestHasherStore(t *testing.T) {
|
|||||||
hasherStore.Close()
|
hasherStore.Close()
|
||||||
|
|
||||||
// Wait until chunks are really stored
|
// Wait until chunks are really stored
|
||||||
err = hasherStore.Wait(context.TODO())
|
err = hasherStore.Wait(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error got \"%v\"", err)
|
t.Fatalf("Expected no error got \"%v\"", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the first chunk
|
// Get the first chunk
|
||||||
retrievedChunkData1, err := hasherStore.Get(context.TODO(), key1)
|
retrievedChunkData1, err := hasherStore.Get(ctx, key1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got \"%v\"", err)
|
t.Fatalf("Expected no error, got \"%v\"", err)
|
||||||
}
|
}
|
||||||
@ -78,7 +80,7 @@ func TestHasherStore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the second chunk
|
// Get the second chunk
|
||||||
retrievedChunkData2, err := hasherStore.Get(context.TODO(), key2)
|
retrievedChunkData2, err := hasherStore.Get(ctx, key2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got \"%v\"", err)
|
t.Fatalf("Expected no error, got \"%v\"", err)
|
||||||
}
|
}
|
||||||
@ -105,12 +107,12 @@ func TestHasherStore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if chunk data in store is encrypted or not
|
// Check if chunk data in store is encrypted or not
|
||||||
chunkInStore, err := chunkStore.Get(context.TODO(), hash1)
|
chunkInStore, err := chunkStore.Get(ctx, hash1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error got \"%v\"", err)
|
t.Fatalf("Expected no error got \"%v\"", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkDataInStore := chunkInStore.SData
|
chunkDataInStore := chunkInStore.Data()
|
||||||
|
|
||||||
if tt.toEncrypt && bytes.Equal(chunkData1, chunkDataInStore) {
|
if tt.toEncrypt && bytes.Equal(chunkData1, chunkDataInStore) {
|
||||||
t.Fatalf("Chunk expected to be encrypted but it is stored without encryption")
|
t.Fatalf("Chunk expected to be encrypted but it is stored without encryption")
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -36,7 +37,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/mock"
|
"github.com/ethereum/go-ethereum/swarm/storage/mock"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
@ -62,6 +63,10 @@ var (
|
|||||||
keyDistanceCnt = byte(7)
|
keyDistanceCnt = byte(7)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrDBClosed = errors.New("LDBStore closed")
|
||||||
|
)
|
||||||
|
|
||||||
type gcItem struct {
|
type gcItem struct {
|
||||||
idx uint64
|
idx uint64
|
||||||
value uint64
|
value uint64
|
||||||
@ -99,18 +104,29 @@ type LDBStore struct {
|
|||||||
|
|
||||||
batchC chan bool
|
batchC chan bool
|
||||||
batchesC chan struct{}
|
batchesC chan struct{}
|
||||||
batch *leveldb.Batch
|
closed bool
|
||||||
|
batch *dbBatch
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
|
|
||||||
// Functions encodeDataFunc is used to bypass
|
// Functions encodeDataFunc is used to bypass
|
||||||
// the default functionality of DbStore with
|
// the default functionality of DbStore with
|
||||||
// mock.NodeStore for testing purposes.
|
// mock.NodeStore for testing purposes.
|
||||||
encodeDataFunc func(chunk *Chunk) []byte
|
encodeDataFunc func(chunk Chunk) []byte
|
||||||
// If getDataFunc is defined, it will be used for
|
// If getDataFunc is defined, it will be used for
|
||||||
// retrieving the chunk data instead from the local
|
// retrieving the chunk data instead from the local
|
||||||
// LevelDB database.
|
// LevelDB database.
|
||||||
getDataFunc func(addr Address) (data []byte, err error)
|
getDataFunc func(key Address) (data []byte, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type dbBatch struct {
|
||||||
|
*leveldb.Batch
|
||||||
|
err error
|
||||||
|
c chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBatch() *dbBatch {
|
||||||
|
return &dbBatch{Batch: new(leveldb.Batch), c: make(chan struct{})}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Instead of passing the distance function, just pass the address from which distances are calculated
|
// TODO: Instead of passing the distance function, just pass the address from which distances are calculated
|
||||||
@ -121,10 +137,9 @@ func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) {
|
|||||||
s.hashfunc = params.Hash
|
s.hashfunc = params.Hash
|
||||||
s.quit = make(chan struct{})
|
s.quit = make(chan struct{})
|
||||||
|
|
||||||
s.batchC = make(chan bool)
|
|
||||||
s.batchesC = make(chan struct{}, 1)
|
s.batchesC = make(chan struct{}, 1)
|
||||||
go s.writeBatches()
|
go s.writeBatches()
|
||||||
s.batch = new(leveldb.Batch)
|
s.batch = newBatch()
|
||||||
// associate encodeData with default functionality
|
// associate encodeData with default functionality
|
||||||
s.encodeDataFunc = encodeData
|
s.encodeDataFunc = encodeData
|
||||||
|
|
||||||
@ -143,7 +158,6 @@ func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) {
|
|||||||
k[1] = uint8(i)
|
k[1] = uint8(i)
|
||||||
cnt, _ := s.db.Get(k)
|
cnt, _ := s.db.Get(k)
|
||||||
s.bucketCnt[i] = BytesToU64(cnt)
|
s.bucketCnt[i] = BytesToU64(cnt)
|
||||||
s.bucketCnt[i]++
|
|
||||||
}
|
}
|
||||||
data, _ := s.db.Get(keyEntryCnt)
|
data, _ := s.db.Get(keyEntryCnt)
|
||||||
s.entryCnt = BytesToU64(data)
|
s.entryCnt = BytesToU64(data)
|
||||||
@ -202,14 +216,6 @@ func getIndexKey(hash Address) []byte {
|
|||||||
return key
|
return key
|
||||||
}
|
}
|
||||||
|
|
||||||
func getOldDataKey(idx uint64) []byte {
|
|
||||||
key := make([]byte, 9)
|
|
||||||
key[0] = keyOldData
|
|
||||||
binary.BigEndian.PutUint64(key[1:9], idx)
|
|
||||||
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDataKey(idx uint64, po uint8) []byte {
|
func getDataKey(idx uint64, po uint8) []byte {
|
||||||
key := make([]byte, 10)
|
key := make([]byte, 10)
|
||||||
key[0] = keyData
|
key[0] = keyData
|
||||||
@ -224,12 +230,12 @@ func encodeIndex(index *dpaDBIndex) []byte {
|
|||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeData(chunk *Chunk) []byte {
|
func encodeData(chunk Chunk) []byte {
|
||||||
// Always create a new underlying array for the returned byte slice.
|
// Always create a new underlying array for the returned byte slice.
|
||||||
// The chunk.Key array may be used in the returned slice which
|
// The chunk.Address array may be used in the returned slice which
|
||||||
// may be changed later in the code or by the LevelDB, resulting
|
// may be changed later in the code or by the LevelDB, resulting
|
||||||
// that the Key is changed as well.
|
// that the Address is changed as well.
|
||||||
return append(append([]byte{}, chunk.Addr[:]...), chunk.SData...)
|
return append(append([]byte{}, chunk.Address()[:]...), chunk.Data()...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeIndex(data []byte, index *dpaDBIndex) error {
|
func decodeIndex(data []byte, index *dpaDBIndex) error {
|
||||||
@ -237,14 +243,8 @@ func decodeIndex(data []byte, index *dpaDBIndex) error {
|
|||||||
return dec.Decode(index)
|
return dec.Decode(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeData(data []byte, chunk *Chunk) {
|
func decodeData(addr Address, data []byte) (*chunk, error) {
|
||||||
chunk.SData = data[32:]
|
return NewChunk(addr, data[32:]), nil
|
||||||
chunk.Size = int64(binary.BigEndian.Uint64(data[32:40]))
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeOldData(data []byte, chunk *Chunk) {
|
|
||||||
chunk.SData = data
|
|
||||||
chunk.Size = int64(binary.BigEndian.Uint64(data[0:8]))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *LDBStore) collectGarbage(ratio float32) {
|
func (s *LDBStore) collectGarbage(ratio float32) {
|
||||||
@ -347,14 +347,22 @@ func (s *LDBStore) Export(out io.Writer) (int64, error) {
|
|||||||
func (s *LDBStore) Import(in io.Reader) (int64, error) {
|
func (s *LDBStore) Import(in io.Reader) (int64, error) {
|
||||||
tr := tar.NewReader(in)
|
tr := tar.NewReader(in)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
countC := make(chan int64)
|
||||||
|
errC := make(chan error)
|
||||||
var count int64
|
var count int64
|
||||||
var wg sync.WaitGroup
|
go func() {
|
||||||
for {
|
for {
|
||||||
hdr, err := tr.Next()
|
hdr, err := tr.Next()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return count, err
|
select {
|
||||||
|
case errC <- err:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(hdr.Name) != 64 {
|
if len(hdr.Name) != 64 {
|
||||||
@ -370,21 +378,44 @@ func (s *LDBStore) Import(in io.Reader) (int64, error) {
|
|||||||
|
|
||||||
data, err := ioutil.ReadAll(tr)
|
data, err := ioutil.ReadAll(tr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return count, err
|
select {
|
||||||
|
case errC <- err:
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
}
|
}
|
||||||
key := Address(keybytes)
|
key := Address(keybytes)
|
||||||
chunk := NewChunk(key, nil)
|
chunk := NewChunk(key, data[32:])
|
||||||
chunk.SData = data[32:]
|
|
||||||
s.Put(context.TODO(), chunk)
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
select {
|
||||||
<-chunk.dbStoredC
|
case errC <- s.Put(ctx, chunk):
|
||||||
|
case <-ctx.Done():
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
wg.Wait()
|
countC <- count
|
||||||
return count, nil
|
}()
|
||||||
|
|
||||||
|
// wait for all chunks to be stored
|
||||||
|
i := int64(0)
|
||||||
|
var total int64
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case err := <-errC:
|
||||||
|
if err != nil {
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
case total = <-countC:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return i, ctx.Err()
|
||||||
|
}
|
||||||
|
if total > 0 && i == total {
|
||||||
|
return total, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *LDBStore) Cleanup() {
|
func (s *LDBStore) Cleanup() {
|
||||||
@ -430,15 +461,18 @@ func (s *LDBStore) Cleanup() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c := &Chunk{}
|
|
||||||
ck := data[:32]
|
ck := data[:32]
|
||||||
decodeData(data, c)
|
c, err := decodeData(ck, data)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("decodeData error", "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
cs := int64(binary.LittleEndian.Uint64(c.SData[:8]))
|
cs := int64(binary.LittleEndian.Uint64(c.sdata[:8]))
|
||||||
log.Trace("chunk", "key", fmt.Sprintf("%x", key[:]), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.SData), "size", cs)
|
log.Trace("chunk", "key", fmt.Sprintf("%x", key[:]), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs)
|
||||||
|
|
||||||
if len(c.SData) > chunk.DefaultSize+8 {
|
if len(c.sdata) > ch.DefaultSize+8 {
|
||||||
log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key[:]), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.SData), "size", cs)
|
log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key[:]), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs)
|
||||||
s.delete(index.Idx, getIndexKey(key[1:]), po)
|
s.delete(index.Idx, getIndexKey(key[1:]), po)
|
||||||
removed++
|
removed++
|
||||||
errorsFound++
|
errorsFound++
|
||||||
@ -511,7 +545,6 @@ func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) {
|
|||||||
batch.Delete(getDataKey(idx, po))
|
batch.Delete(getDataKey(idx, po))
|
||||||
s.entryCnt--
|
s.entryCnt--
|
||||||
dbEntryCount.Dec(1)
|
dbEntryCount.Dec(1)
|
||||||
s.bucketCnt[po]--
|
|
||||||
cntKey := make([]byte, 2)
|
cntKey := make([]byte, 2)
|
||||||
cntKey[0] = keyDistanceCnt
|
cntKey[0] = keyDistanceCnt
|
||||||
cntKey[1] = po
|
cntKey[1] = po
|
||||||
@ -520,10 +553,9 @@ func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) {
|
|||||||
s.db.Write(batch)
|
s.db.Write(batch)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *LDBStore) CurrentBucketStorageIndex(po uint8) uint64 {
|
func (s *LDBStore) BinIndex(po uint8) uint64 {
|
||||||
s.lock.RLock()
|
s.lock.RLock()
|
||||||
defer s.lock.RUnlock()
|
defer s.lock.RUnlock()
|
||||||
|
|
||||||
return s.bucketCnt[po]
|
return s.bucketCnt[po]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -539,43 +571,53 @@ func (s *LDBStore) CurrentStorageIndex() uint64 {
|
|||||||
return s.dataIdx
|
return s.dataIdx
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *LDBStore) Put(ctx context.Context, chunk *Chunk) {
|
func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error {
|
||||||
metrics.GetOrRegisterCounter("ldbstore.put", nil).Inc(1)
|
metrics.GetOrRegisterCounter("ldbstore.put", nil).Inc(1)
|
||||||
log.Trace("ldbstore.put", "key", chunk.Addr)
|
log.Trace("ldbstore.put", "key", chunk.Address())
|
||||||
|
|
||||||
ikey := getIndexKey(chunk.Addr)
|
ikey := getIndexKey(chunk.Address())
|
||||||
var index dpaDBIndex
|
var index dpaDBIndex
|
||||||
|
|
||||||
po := s.po(chunk.Addr)
|
po := s.po(chunk.Address())
|
||||||
s.lock.Lock()
|
|
||||||
defer s.lock.Unlock()
|
|
||||||
|
|
||||||
log.Trace("ldbstore.put: s.db.Get", "key", chunk.Addr, "ikey", fmt.Sprintf("%x", ikey))
|
s.lock.Lock()
|
||||||
|
|
||||||
|
if s.closed {
|
||||||
|
s.lock.Unlock()
|
||||||
|
return ErrDBClosed
|
||||||
|
}
|
||||||
|
batch := s.batch
|
||||||
|
|
||||||
|
log.Trace("ldbstore.put: s.db.Get", "key", chunk.Address(), "ikey", fmt.Sprintf("%x", ikey))
|
||||||
idata, err := s.db.Get(ikey)
|
idata, err := s.db.Get(ikey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.doPut(chunk, &index, po)
|
s.doPut(chunk, &index, po)
|
||||||
batchC := s.batchC
|
|
||||||
go func() {
|
|
||||||
<-batchC
|
|
||||||
chunk.markAsStored()
|
|
||||||
}()
|
|
||||||
} else {
|
} else {
|
||||||
log.Trace("ldbstore.put: chunk already exists, only update access", "key", chunk.Addr)
|
log.Trace("ldbstore.put: chunk already exists, only update access", "key", chunk.Address)
|
||||||
decodeIndex(idata, &index)
|
decodeIndex(idata, &index)
|
||||||
chunk.markAsStored()
|
|
||||||
}
|
}
|
||||||
index.Access = s.accessCnt
|
index.Access = s.accessCnt
|
||||||
s.accessCnt++
|
s.accessCnt++
|
||||||
idata = encodeIndex(&index)
|
idata = encodeIndex(&index)
|
||||||
s.batch.Put(ikey, idata)
|
s.batch.Put(ikey, idata)
|
||||||
|
|
||||||
|
s.lock.Unlock()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case s.batchesC <- struct{}{}:
|
case s.batchesC <- struct{}{}:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-batch.c:
|
||||||
|
return batch.err
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// force putting into db, does not check access index
|
// force putting into db, does not check access index
|
||||||
func (s *LDBStore) doPut(chunk *Chunk, index *dpaDBIndex, po uint8) {
|
func (s *LDBStore) doPut(chunk Chunk, index *dpaDBIndex, po uint8) {
|
||||||
data := s.encodeDataFunc(chunk)
|
data := s.encodeDataFunc(chunk)
|
||||||
dkey := getDataKey(s.dataIdx, po)
|
dkey := getDataKey(s.dataIdx, po)
|
||||||
s.batch.Put(dkey, data)
|
s.batch.Put(dkey, data)
|
||||||
@ -592,26 +634,36 @@ func (s *LDBStore) doPut(chunk *Chunk, index *dpaDBIndex, po uint8) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *LDBStore) writeBatches() {
|
func (s *LDBStore) writeBatches() {
|
||||||
mainLoop:
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-s.quit:
|
case <-s.quit:
|
||||||
break mainLoop
|
log.Debug("DbStore: quit batch write loop")
|
||||||
|
return
|
||||||
case <-s.batchesC:
|
case <-s.batchesC:
|
||||||
|
err := s.writeCurrentBatch()
|
||||||
|
if err != nil {
|
||||||
|
log.Debug("DbStore: quit batch write loop", "err", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LDBStore) writeCurrentBatch() error {
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
b := s.batch
|
b := s.batch
|
||||||
|
l := b.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
e := s.entryCnt
|
e := s.entryCnt
|
||||||
d := s.dataIdx
|
d := s.dataIdx
|
||||||
a := s.accessCnt
|
a := s.accessCnt
|
||||||
c := s.batchC
|
s.batch = newBatch()
|
||||||
s.batchC = make(chan bool)
|
b.err = s.writeBatch(b, e, d, a)
|
||||||
s.batch = new(leveldb.Batch)
|
close(b.c)
|
||||||
err := s.writeBatch(b, e, d, a)
|
|
||||||
// TODO: set this error on the batch, then tell the chunk
|
|
||||||
if err != nil {
|
|
||||||
log.Error(fmt.Sprintf("spawn batch write (%d entries): %v", b.Len(), err))
|
|
||||||
}
|
|
||||||
close(c)
|
|
||||||
for e > s.capacity {
|
for e > s.capacity {
|
||||||
log.Trace("for >", "e", e, "s.capacity", s.capacity)
|
log.Trace("for >", "e", e, "s.capacity", s.capacity)
|
||||||
// Collect garbage in a separate goroutine
|
// Collect garbage in a separate goroutine
|
||||||
@ -625,25 +677,21 @@ mainLoop:
|
|||||||
|
|
||||||
select {
|
select {
|
||||||
case <-s.quit:
|
case <-s.quit:
|
||||||
s.lock.Unlock()
|
return errors.New("CollectGarbage terminated due to quit")
|
||||||
break mainLoop
|
|
||||||
case <-done:
|
case <-done:
|
||||||
}
|
}
|
||||||
e = s.entryCnt
|
e = s.entryCnt
|
||||||
}
|
}
|
||||||
s.lock.Unlock()
|
return nil
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Trace(fmt.Sprintf("DbStore: quit batch write loop"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// must be called non concurrently
|
// must be called non concurrently
|
||||||
func (s *LDBStore) writeBatch(b *leveldb.Batch, entryCnt, dataIdx, accessCnt uint64) error {
|
func (s *LDBStore) writeBatch(b *dbBatch, entryCnt, dataIdx, accessCnt uint64) error {
|
||||||
b.Put(keyEntryCnt, U64ToBytes(entryCnt))
|
b.Put(keyEntryCnt, U64ToBytes(entryCnt))
|
||||||
b.Put(keyDataIdx, U64ToBytes(dataIdx))
|
b.Put(keyDataIdx, U64ToBytes(dataIdx))
|
||||||
b.Put(keyAccessCnt, U64ToBytes(accessCnt))
|
b.Put(keyAccessCnt, U64ToBytes(accessCnt))
|
||||||
l := b.Len()
|
l := b.Len()
|
||||||
if err := s.db.Write(b); err != nil {
|
if err := s.db.Write(b.Batch); err != nil {
|
||||||
return fmt.Errorf("unable to write batch: %v", err)
|
return fmt.Errorf("unable to write batch: %v", err)
|
||||||
}
|
}
|
||||||
log.Trace(fmt.Sprintf("batch write (%d entries)", l))
|
log.Trace(fmt.Sprintf("batch write (%d entries)", l))
|
||||||
@ -654,12 +702,12 @@ func (s *LDBStore) writeBatch(b *leveldb.Batch, entryCnt, dataIdx, accessCnt uin
|
|||||||
// to a mock store to bypass the default functionality encodeData.
|
// to a mock store to bypass the default functionality encodeData.
|
||||||
// The constructed function always returns the nil data, as DbStore does
|
// The constructed function always returns the nil data, as DbStore does
|
||||||
// not need to store the data, but still need to create the index.
|
// not need to store the data, but still need to create the index.
|
||||||
func newMockEncodeDataFunc(mockStore *mock.NodeStore) func(chunk *Chunk) []byte {
|
func newMockEncodeDataFunc(mockStore *mock.NodeStore) func(chunk Chunk) []byte {
|
||||||
return func(chunk *Chunk) []byte {
|
return func(chunk Chunk) []byte {
|
||||||
if err := mockStore.Put(chunk.Addr, encodeData(chunk)); err != nil {
|
if err := mockStore.Put(chunk.Address(), encodeData(chunk)); err != nil {
|
||||||
log.Error(fmt.Sprintf("%T: Chunk %v put: %v", mockStore, chunk.Addr.Log(), err))
|
log.Error(fmt.Sprintf("%T: Chunk %v put: %v", mockStore, chunk.Address().Log(), err))
|
||||||
}
|
}
|
||||||
return chunk.Addr[:]
|
return chunk.Address()[:]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -682,7 +730,7 @@ func (s *LDBStore) tryAccessIdx(ikey []byte, index *dpaDBIndex) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *LDBStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) {
|
func (s *LDBStore) Get(_ context.Context, addr Address) (chunk Chunk, err error) {
|
||||||
metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1)
|
metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1)
|
||||||
log.Trace("ldbstore.get", "key", addr)
|
log.Trace("ldbstore.get", "key", addr)
|
||||||
|
|
||||||
@ -691,9 +739,11 @@ func (s *LDBStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err err
|
|||||||
return s.get(addr)
|
return s.get(addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *LDBStore) get(addr Address) (chunk *Chunk, err error) {
|
func (s *LDBStore) get(addr Address) (chunk *chunk, err error) {
|
||||||
var indx dpaDBIndex
|
var indx dpaDBIndex
|
||||||
|
if s.closed {
|
||||||
|
return nil, ErrDBClosed
|
||||||
|
}
|
||||||
if s.tryAccessIdx(getIndexKey(addr), &indx) {
|
if s.tryAccessIdx(getIndexKey(addr), &indx) {
|
||||||
var data []byte
|
var data []byte
|
||||||
if s.getDataFunc != nil {
|
if s.getDataFunc != nil {
|
||||||
@ -716,9 +766,7 @@ func (s *LDBStore) get(addr Address) (chunk *Chunk, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk = NewChunk(addr, nil)
|
return decodeData(addr, data)
|
||||||
chunk.markAsStored()
|
|
||||||
decodeData(data, chunk)
|
|
||||||
} else {
|
} else {
|
||||||
err = ErrChunkNotFound
|
err = ErrChunkNotFound
|
||||||
}
|
}
|
||||||
@ -772,6 +820,12 @@ func (s *LDBStore) setCapacity(c uint64) {
|
|||||||
|
|
||||||
func (s *LDBStore) Close() {
|
func (s *LDBStore) Close() {
|
||||||
close(s.quit)
|
close(s.quit)
|
||||||
|
s.lock.Lock()
|
||||||
|
s.closed = true
|
||||||
|
s.lock.Unlock()
|
||||||
|
// force writing out current batch
|
||||||
|
s.writeCurrentBatch()
|
||||||
|
close(s.batchesC)
|
||||||
s.db.Close()
|
s.db.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,13 +22,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
|
"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
|
||||||
|
|
||||||
ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
|
ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
|
||||||
@ -86,70 +85,54 @@ func (db *testDbStore) close() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testDbStoreRandom(n int, processors int, chunksize int64, mock bool, t *testing.T) {
|
func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) {
|
||||||
db, cleanup, err := newTestDbStore(mock, true)
|
db, cleanup, err := newTestDbStore(mock, true)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("init dbStore failed: %v", err)
|
t.Fatalf("init dbStore failed: %v", err)
|
||||||
}
|
}
|
||||||
testStoreRandom(db, processors, n, chunksize, t)
|
testStoreRandom(db, n, chunksize, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testDbStoreCorrect(n int, processors int, chunksize int64, mock bool, t *testing.T) {
|
func testDbStoreCorrect(n int, chunksize int64, mock bool, t *testing.T) {
|
||||||
db, cleanup, err := newTestDbStore(mock, false)
|
db, cleanup, err := newTestDbStore(mock, false)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("init dbStore failed: %v", err)
|
t.Fatalf("init dbStore failed: %v", err)
|
||||||
}
|
}
|
||||||
testStoreCorrect(db, processors, n, chunksize, t)
|
testStoreCorrect(db, n, chunksize, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDbStoreRandom_1(t *testing.T) {
|
func TestDbStoreRandom_1(t *testing.T) {
|
||||||
testDbStoreRandom(1, 1, 0, false, t)
|
testDbStoreRandom(1, 0, false, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDbStoreCorrect_1(t *testing.T) {
|
func TestDbStoreCorrect_1(t *testing.T) {
|
||||||
testDbStoreCorrect(1, 1, 4096, false, t)
|
testDbStoreCorrect(1, 4096, false, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDbStoreRandom_1_5k(t *testing.T) {
|
func TestDbStoreRandom_5k(t *testing.T) {
|
||||||
testDbStoreRandom(8, 5000, 0, false, t)
|
testDbStoreRandom(5000, 0, false, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDbStoreRandom_8_5k(t *testing.T) {
|
func TestDbStoreCorrect_5k(t *testing.T) {
|
||||||
testDbStoreRandom(8, 5000, 0, false, t)
|
testDbStoreCorrect(5000, 4096, false, t)
|
||||||
}
|
|
||||||
|
|
||||||
func TestDbStoreCorrect_1_5k(t *testing.T) {
|
|
||||||
testDbStoreCorrect(1, 5000, 4096, false, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDbStoreCorrect_8_5k(t *testing.T) {
|
|
||||||
testDbStoreCorrect(8, 5000, 4096, false, t)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMockDbStoreRandom_1(t *testing.T) {
|
func TestMockDbStoreRandom_1(t *testing.T) {
|
||||||
testDbStoreRandom(1, 1, 0, true, t)
|
testDbStoreRandom(1, 0, true, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMockDbStoreCorrect_1(t *testing.T) {
|
func TestMockDbStoreCorrect_1(t *testing.T) {
|
||||||
testDbStoreCorrect(1, 1, 4096, true, t)
|
testDbStoreCorrect(1, 4096, true, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMockDbStoreRandom_1_5k(t *testing.T) {
|
func TestMockDbStoreRandom_5k(t *testing.T) {
|
||||||
testDbStoreRandom(8, 5000, 0, true, t)
|
testDbStoreRandom(5000, 0, true, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMockDbStoreRandom_8_5k(t *testing.T) {
|
func TestMockDbStoreCorrect_5k(t *testing.T) {
|
||||||
testDbStoreRandom(8, 5000, 0, true, t)
|
testDbStoreCorrect(5000, 4096, true, t)
|
||||||
}
|
|
||||||
|
|
||||||
func TestMockDbStoreCorrect_1_5k(t *testing.T) {
|
|
||||||
testDbStoreCorrect(1, 5000, 4096, true, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMockDbStoreCorrect_8_5k(t *testing.T) {
|
|
||||||
testDbStoreCorrect(8, 5000, 4096, true, t)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testDbStoreNotFound(t *testing.T, mock bool) {
|
func testDbStoreNotFound(t *testing.T, mock bool) {
|
||||||
@ -185,26 +168,19 @@ func testIterator(t *testing.T, mock bool) {
|
|||||||
t.Fatalf("init dbStore failed: %v", err)
|
t.Fatalf("init dbStore failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunks := GenerateRandomChunks(chunk.DefaultSize, chunkcount)
|
chunks := GenerateRandomChunks(ch.DefaultSize, chunkcount)
|
||||||
|
|
||||||
wg := &sync.WaitGroup{}
|
|
||||||
wg.Add(len(chunks))
|
|
||||||
for i = 0; i < len(chunks); i++ {
|
for i = 0; i < len(chunks); i++ {
|
||||||
db.Put(context.TODO(), chunks[i])
|
chunkkeys[i] = chunks[i].Address()
|
||||||
chunkkeys[i] = chunks[i].Addr
|
err := db.Put(context.TODO(), chunks[i])
|
||||||
j := i
|
if err != nil {
|
||||||
go func() {
|
t.Fatalf("dbStore.Put failed: %v", err)
|
||||||
defer wg.Done()
|
}
|
||||||
<-chunks[j].dbStoredC
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//testSplit(m, l, 128, chunkkeys, t)
|
|
||||||
|
|
||||||
for i = 0; i < len(chunkkeys); i++ {
|
for i = 0; i < len(chunkkeys); i++ {
|
||||||
log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
|
log.Trace(fmt.Sprintf("Chunk array pos %d/%d: '%v'", i, chunkcount, chunkkeys[i]))
|
||||||
}
|
}
|
||||||
wg.Wait()
|
|
||||||
i = 0
|
i = 0
|
||||||
for poc = 0; poc <= 255; poc++ {
|
for poc = 0; poc <= 255; poc++ {
|
||||||
err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
|
err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
|
||||||
@ -239,7 +215,7 @@ func benchmarkDbStorePut(n int, processors int, chunksize int64, mock bool, b *t
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("init dbStore failed: %v", err)
|
b.Fatalf("init dbStore failed: %v", err)
|
||||||
}
|
}
|
||||||
benchmarkStorePut(db, processors, n, chunksize, b)
|
benchmarkStorePut(db, n, chunksize, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *testing.B) {
|
func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *testing.B) {
|
||||||
@ -248,7 +224,7 @@ func benchmarkDbStoreGet(n int, processors int, chunksize int64, mock bool, b *t
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("init dbStore failed: %v", err)
|
b.Fatalf("init dbStore failed: %v", err)
|
||||||
}
|
}
|
||||||
benchmarkStoreGet(db, processors, n, chunksize, b)
|
benchmarkStoreGet(db, n, chunksize, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkDbStorePut_1_500(b *testing.B) {
|
func BenchmarkDbStorePut_1_500(b *testing.B) {
|
||||||
@ -293,35 +269,22 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
|
|||||||
ldb.setCapacity(uint64(capacity))
|
ldb.setCapacity(uint64(capacity))
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
chunks := []*Chunk{}
|
chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize))
|
||||||
for i := 0; i < n; i++ {
|
if err != nil {
|
||||||
c := GenerateRandomChunk(chunk.DefaultSize)
|
t.Fatal(err.Error())
|
||||||
chunks = append(chunks, c)
|
|
||||||
log.Trace("generate random chunk", "idx", i, "chunk", c)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
go ldb.Put(context.TODO(), chunks[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for all chunks to be stored
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
<-chunks[i].dbStoredC
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for _, ch := range chunks {
|
||||||
ret, err := ldb.Get(context.TODO(), chunks[i].Addr)
|
ret, err := ldb.Get(context.TODO(), ch.Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(ret.SData, chunks[i].SData) {
|
if !bytes.Equal(ret.Data(), ch.Data()) {
|
||||||
t.Fatal("expected to get the same data back, but got smth else")
|
t.Fatal("expected to get the same data back, but got smth else")
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("got back chunk", "chunk", ret)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ldb.entryCnt != uint64(n) {
|
if ldb.entryCnt != uint64(n) {
|
||||||
@ -343,30 +306,18 @@ func TestLDBStoreCollectGarbage(t *testing.T) {
|
|||||||
ldb.setCapacity(uint64(capacity))
|
ldb.setCapacity(uint64(capacity))
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
chunks := []*Chunk{}
|
chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize))
|
||||||
for i := 0; i < n; i++ {
|
if err != nil {
|
||||||
c := GenerateRandomChunk(chunk.DefaultSize)
|
t.Fatal(err.Error())
|
||||||
chunks = append(chunks, c)
|
|
||||||
log.Trace("generate random chunk", "idx", i, "chunk", c)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
ldb.Put(context.TODO(), chunks[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for all chunks to be stored
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
<-chunks[i].dbStoredC
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
||||||
|
|
||||||
// wait for garbage collection to kick in on the responsible actor
|
// wait for garbage collection to kick in on the responsible actor
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
var missing int
|
var missing int
|
||||||
for i := 0; i < n; i++ {
|
for _, ch := range chunks {
|
||||||
ret, err := ldb.Get(context.TODO(), chunks[i].Addr)
|
ret, err := ldb.Get(context.Background(), ch.Address())
|
||||||
if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
|
if err == ErrChunkNotFound || err == ldberrors.ErrNotFound {
|
||||||
missing++
|
missing++
|
||||||
continue
|
continue
|
||||||
@ -375,7 +326,7 @@ func TestLDBStoreCollectGarbage(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(ret.SData, chunks[i].SData) {
|
if !bytes.Equal(ret.Data(), ch.Data()) {
|
||||||
t.Fatal("expected to get the same data back, but got smth else")
|
t.Fatal("expected to get the same data back, but got smth else")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -396,38 +347,27 @@ func TestLDBStoreAddRemove(t *testing.T) {
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
n := 100
|
n := 100
|
||||||
|
chunks, err := mputRandomChunks(ldb, n, int64(ch.DefaultSize))
|
||||||
chunks := []*Chunk{}
|
if err != nil {
|
||||||
for i := 0; i < n; i++ {
|
t.Fatalf(err.Error())
|
||||||
c := GenerateRandomChunk(chunk.DefaultSize)
|
|
||||||
chunks = append(chunks, c)
|
|
||||||
log.Trace("generate random chunk", "idx", i, "chunk", c)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
go ldb.Put(context.TODO(), chunks[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for all chunks to be stored before continuing
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
<-chunks[i].dbStoredC
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
// delete all even index chunks
|
// delete all even index chunks
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
ldb.Delete(chunks[i].Addr)
|
ldb.Delete(chunks[i].Address())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
ret, err := ldb.Get(context.TODO(), chunks[i].Addr)
|
ret, err := ldb.Get(nil, chunks[i].Address())
|
||||||
|
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
// expect even chunks to be missing
|
// expect even chunks to be missing
|
||||||
if err == nil || ret != nil {
|
if err == nil {
|
||||||
|
// if err != ErrChunkNotFound {
|
||||||
t.Fatal("expected chunk to be missing, but got no error")
|
t.Fatal("expected chunk to be missing, but got no error")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -436,7 +376,7 @@ func TestLDBStoreAddRemove(t *testing.T) {
|
|||||||
t.Fatalf("expected no error, but got %s", err)
|
t.Fatalf("expected no error, but got %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(ret.SData, chunks[i].SData) {
|
if !bytes.Equal(ret.Data(), chunks[i].Data()) {
|
||||||
t.Fatal("expected to get the same data back, but got smth else")
|
t.Fatal("expected to get the same data back, but got smth else")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -446,15 +386,16 @@ func TestLDBStoreAddRemove(t *testing.T) {
|
|||||||
// TestLDBStoreRemoveThenCollectGarbage tests that we can delete chunks and that we can trigger garbage collection
|
// TestLDBStoreRemoveThenCollectGarbage tests that we can delete chunks and that we can trigger garbage collection
|
||||||
func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
|
func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
|
||||||
capacity := 11
|
capacity := 11
|
||||||
|
surplus := 4
|
||||||
|
|
||||||
ldb, cleanup := newLDBStore(t)
|
ldb, cleanup := newLDBStore(t)
|
||||||
ldb.setCapacity(uint64(capacity))
|
ldb.setCapacity(uint64(capacity))
|
||||||
|
|
||||||
n := 11
|
n := capacity
|
||||||
|
|
||||||
chunks := []*Chunk{}
|
chunks := []Chunk{}
|
||||||
for i := 0; i < capacity; i++ {
|
for i := 0; i < n+surplus; i++ {
|
||||||
c := GenerateRandomChunk(chunk.DefaultSize)
|
c := GenerateRandomChunk(ch.DefaultSize)
|
||||||
chunks = append(chunks, c)
|
chunks = append(chunks, c)
|
||||||
log.Trace("generate random chunk", "idx", i, "chunk", c)
|
log.Trace("generate random chunk", "idx", i, "chunk", c)
|
||||||
}
|
}
|
||||||
@ -463,53 +404,54 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
|
|||||||
ldb.Put(context.TODO(), chunks[i])
|
ldb.Put(context.TODO(), chunks[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for all chunks to be stored before continuing
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
<-chunks[i].dbStoredC
|
|
||||||
}
|
|
||||||
|
|
||||||
// delete all chunks
|
// delete all chunks
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
ldb.Delete(chunks[i].Addr)
|
ldb.Delete(chunks[i].Address())
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
|
||||||
|
|
||||||
|
if ldb.entryCnt != 0 {
|
||||||
|
t.Fatalf("ldb.entrCnt expected 0 got %v", ldb.entryCnt)
|
||||||
|
}
|
||||||
|
|
||||||
|
expAccessCnt := uint64(n * 2)
|
||||||
|
if ldb.accessCnt != expAccessCnt {
|
||||||
|
t.Fatalf("ldb.accessCnt expected %v got %v", expAccessCnt, ldb.entryCnt)
|
||||||
|
}
|
||||||
|
|
||||||
cleanup()
|
cleanup()
|
||||||
|
|
||||||
ldb, cleanup = newLDBStore(t)
|
ldb, cleanup = newLDBStore(t)
|
||||||
capacity = 10
|
capacity = 10
|
||||||
ldb.setCapacity(uint64(capacity))
|
ldb.setCapacity(uint64(capacity))
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
n = 11
|
n = capacity + surplus
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
ldb.Put(context.TODO(), chunks[i])
|
ldb.Put(context.TODO(), chunks[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for all chunks to be stored before continuing
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
<-chunks[i].dbStoredC
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for garbage collection
|
// wait for garbage collection
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
// expect for first chunk to be missing, because it has the smallest access value
|
// expect first surplus chunks to be missing, because they have the smallest access value
|
||||||
idx := 0
|
for i := 0; i < surplus; i++ {
|
||||||
ret, err := ldb.Get(context.TODO(), chunks[idx].Addr)
|
_, err := ldb.Get(context.TODO(), chunks[i].Address())
|
||||||
if err == nil || ret != nil {
|
if err == nil {
|
||||||
t.Fatal("expected first chunk to be missing, but got no error")
|
t.Fatal("expected surplus chunk to be missing, but got no error")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// expect for last chunk to be present, as it has the largest access value
|
// expect last chunks to be present, as they have the largest access value
|
||||||
idx = 9
|
for i := surplus; i < surplus+capacity; i++ {
|
||||||
ret, err = ldb.Get(context.TODO(), chunks[idx].Addr)
|
ret, err := ldb.Get(context.TODO(), chunks[i].Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("expected no error, but got %s", err)
|
t.Fatalf("chunk %v: expected no error, but got %s", i, err)
|
||||||
}
|
}
|
||||||
|
if !bytes.Equal(ret.Data(), chunks[i].Data()) {
|
||||||
if !bytes.Equal(ret.SData, chunks[idx].SData) {
|
|
||||||
t.Fatal("expected to get the same data back, but got smth else")
|
t.Fatal("expected to get the same data back, but got smth else")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
@ -18,8 +18,6 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -97,123 +95,89 @@ func NewTestLocalStoreForAddr(params *LocalStoreParams) (*LocalStore, error) {
|
|||||||
// when the chunk is stored in memstore.
|
// when the chunk is stored in memstore.
|
||||||
// After the LDBStore.Put, it is ensured that the MemStore
|
// After the LDBStore.Put, it is ensured that the MemStore
|
||||||
// contains the chunk with the same data, but nil ReqC channel.
|
// contains the chunk with the same data, but nil ReqC channel.
|
||||||
func (ls *LocalStore) Put(ctx context.Context, chunk *Chunk) {
|
func (ls *LocalStore) Put(ctx context.Context, chunk Chunk) error {
|
||||||
valid := true
|
valid := true
|
||||||
// ls.Validators contains a list of one validator per chunk type.
|
// ls.Validators contains a list of one validator per chunk type.
|
||||||
// if one validator succeeds, then the chunk is valid
|
// if one validator succeeds, then the chunk is valid
|
||||||
for _, v := range ls.Validators {
|
for _, v := range ls.Validators {
|
||||||
if valid = v.Validate(chunk.Addr, chunk.SData); valid {
|
if valid = v.Validate(chunk.Address(), chunk.Data()); valid {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !valid {
|
if !valid {
|
||||||
log.Trace("invalid chunk", "addr", chunk.Addr, "len", len(chunk.SData))
|
return ErrChunkInvalid
|
||||||
chunk.SetErrored(ErrChunkInvalid)
|
|
||||||
chunk.markAsStored()
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Trace("localstore.put", "addr", chunk.Addr)
|
log.Trace("localstore.put", "key", chunk.Address())
|
||||||
|
|
||||||
ls.mu.Lock()
|
ls.mu.Lock()
|
||||||
defer ls.mu.Unlock()
|
defer ls.mu.Unlock()
|
||||||
|
|
||||||
chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
|
_, err := ls.memStore.Get(ctx, chunk.Address())
|
||||||
|
if err == nil {
|
||||||
memChunk, err := ls.memStore.Get(ctx, chunk.Addr)
|
return nil
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
if memChunk.ReqC == nil {
|
|
||||||
chunk.markAsStored()
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
case ErrChunkNotFound:
|
if err != nil && err != ErrChunkNotFound {
|
||||||
default:
|
return err
|
||||||
chunk.SetErrored(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ls.DbStore.Put(ctx, chunk)
|
|
||||||
|
|
||||||
// chunk is no longer a request, but a chunk with data, so replace it in memStore
|
|
||||||
newc := NewChunk(chunk.Addr, nil)
|
|
||||||
newc.SData = chunk.SData
|
|
||||||
newc.Size = chunk.Size
|
|
||||||
newc.dbStoredC = chunk.dbStoredC
|
|
||||||
|
|
||||||
ls.memStore.Put(ctx, newc)
|
|
||||||
|
|
||||||
if memChunk != nil && memChunk.ReqC != nil {
|
|
||||||
close(memChunk.ReqC)
|
|
||||||
}
|
}
|
||||||
|
ls.memStore.Put(ctx, chunk)
|
||||||
|
err = ls.DbStore.Put(ctx, chunk)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get(chunk *Chunk) looks up a chunk in the local stores
|
// Get(chunk *Chunk) looks up a chunk in the local stores
|
||||||
// This method is blocking until the chunk is retrieved
|
// This method is blocking until the chunk is retrieved
|
||||||
// so additional timeout may be needed to wrap this call if
|
// so additional timeout may be needed to wrap this call if
|
||||||
// ChunkStores are remote and can have long latency
|
// ChunkStores are remote and can have long latency
|
||||||
func (ls *LocalStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) {
|
func (ls *LocalStore) Get(ctx context.Context, addr Address) (chunk Chunk, err error) {
|
||||||
ls.mu.Lock()
|
ls.mu.Lock()
|
||||||
defer ls.mu.Unlock()
|
defer ls.mu.Unlock()
|
||||||
|
|
||||||
return ls.get(ctx, addr)
|
return ls.get(ctx, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ls *LocalStore) get(ctx context.Context, addr Address) (chunk *Chunk, err error) {
|
func (ls *LocalStore) get(ctx context.Context, addr Address) (chunk Chunk, err error) {
|
||||||
chunk, err = ls.memStore.Get(ctx, addr)
|
chunk, err = ls.memStore.Get(ctx, addr)
|
||||||
|
|
||||||
|
if err != nil && err != ErrChunkNotFound {
|
||||||
|
metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if chunk.ReqC != nil {
|
|
||||||
select {
|
|
||||||
case <-chunk.ReqC:
|
|
||||||
default:
|
|
||||||
metrics.GetOrRegisterCounter("localstore.get.errfetching", nil).Inc(1)
|
|
||||||
return chunk, ErrFetching
|
|
||||||
}
|
|
||||||
}
|
|
||||||
metrics.GetOrRegisterCounter("localstore.get.cachehit", nil).Inc(1)
|
metrics.GetOrRegisterCounter("localstore.get.cachehit", nil).Inc(1)
|
||||||
return
|
return chunk, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.GetOrRegisterCounter("localstore.get.cachemiss", nil).Inc(1)
|
metrics.GetOrRegisterCounter("localstore.get.cachemiss", nil).Inc(1)
|
||||||
chunk, err = ls.DbStore.Get(ctx, addr)
|
chunk, err = ls.DbStore.Get(ctx, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1)
|
metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1)
|
||||||
return
|
return nil, err
|
||||||
}
|
}
|
||||||
chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
|
|
||||||
ls.memStore.Put(ctx, chunk)
|
ls.memStore.Put(ctx, chunk)
|
||||||
return
|
return chunk, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// retrieve logic common for local and network chunk retrieval requests
|
func (ls *LocalStore) FetchFunc(ctx context.Context, addr Address) func(context.Context) error {
|
||||||
func (ls *LocalStore) GetOrCreateRequest(ctx context.Context, addr Address) (chunk *Chunk, created bool) {
|
|
||||||
metrics.GetOrRegisterCounter("localstore.getorcreaterequest", nil).Inc(1)
|
|
||||||
|
|
||||||
ls.mu.Lock()
|
ls.mu.Lock()
|
||||||
defer ls.mu.Unlock()
|
defer ls.mu.Unlock()
|
||||||
|
|
||||||
var err error
|
_, err := ls.get(ctx, addr)
|
||||||
chunk, err = ls.get(ctx, addr)
|
if err == nil {
|
||||||
if err == nil && chunk.GetErrored() == nil {
|
return nil
|
||||||
metrics.GetOrRegisterCounter("localstore.getorcreaterequest.hit", nil).Inc(1)
|
|
||||||
log.Trace(fmt.Sprintf("LocalStore.GetOrRetrieve: %v found locally", addr))
|
|
||||||
return chunk, false
|
|
||||||
}
|
}
|
||||||
if err == ErrFetching && chunk.GetErrored() == nil {
|
return func(context.Context) error {
|
||||||
metrics.GetOrRegisterCounter("localstore.getorcreaterequest.errfetching", nil).Inc(1)
|
return err
|
||||||
log.Trace(fmt.Sprintf("LocalStore.GetOrRetrieve: %v hit on an existing request %v", addr, chunk.ReqC))
|
|
||||||
return chunk, false
|
|
||||||
}
|
}
|
||||||
// no data and no request status
|
|
||||||
metrics.GetOrRegisterCounter("localstore.getorcreaterequest.miss", nil).Inc(1)
|
|
||||||
log.Trace(fmt.Sprintf("LocalStore.GetOrRetrieve: %v not found locally. open new request", addr))
|
|
||||||
chunk = NewChunk(addr, make(chan bool))
|
|
||||||
ls.memStore.Put(ctx, chunk)
|
|
||||||
return chunk, true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestsCacheLen returns the current number of outgoing requests stored in the cache
|
func (ls *LocalStore) BinIndex(po uint8) uint64 {
|
||||||
func (ls *LocalStore) RequestsCacheLen() int {
|
return ls.DbStore.BinIndex(po)
|
||||||
return ls.memStore.requests.Len()
|
}
|
||||||
|
|
||||||
|
func (ls *LocalStore) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error {
|
||||||
|
return ls.DbStore.SyncIterator(from, to, po, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the local store
|
// Close the local store
|
||||||
|
@ -17,11 +17,12 @@
|
|||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -50,29 +51,29 @@ func TestValidator(t *testing.T) {
|
|||||||
chunks := GenerateRandomChunks(259, 2)
|
chunks := GenerateRandomChunks(259, 2)
|
||||||
goodChunk := chunks[0]
|
goodChunk := chunks[0]
|
||||||
badChunk := chunks[1]
|
badChunk := chunks[1]
|
||||||
copy(badChunk.SData, goodChunk.SData)
|
copy(badChunk.Data(), goodChunk.Data())
|
||||||
|
|
||||||
PutChunks(store, goodChunk, badChunk)
|
errs := putChunks(store, goodChunk, badChunk)
|
||||||
if err := goodChunk.GetErrored(); err != nil {
|
if errs[0] != nil {
|
||||||
t.Fatalf("expected no error on good content address chunk in spite of no validation, but got: %s", err)
|
t.Fatalf("expected no error on good content address chunk in spite of no validation, but got: %s", err)
|
||||||
}
|
}
|
||||||
if err := badChunk.GetErrored(); err != nil {
|
if errs[1] != nil {
|
||||||
t.Fatalf("expected no error on bad content address chunk in spite of no validation, but got: %s", err)
|
t.Fatalf("expected no error on bad content address chunk in spite of no validation, but got: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// add content address validator and check puts
|
// add content address validator and check puts
|
||||||
// bad should fail, good should pass
|
// bad should fail, good should pass
|
||||||
store.Validators = append(store.Validators, NewContentAddressValidator(hashfunc))
|
store.Validators = append(store.Validators, NewContentAddressValidator(hashfunc))
|
||||||
chunks = GenerateRandomChunks(chunk.DefaultSize, 2)
|
chunks = GenerateRandomChunks(ch.DefaultSize, 2)
|
||||||
goodChunk = chunks[0]
|
goodChunk = chunks[0]
|
||||||
badChunk = chunks[1]
|
badChunk = chunks[1]
|
||||||
copy(badChunk.SData, goodChunk.SData)
|
copy(badChunk.Data(), goodChunk.Data())
|
||||||
|
|
||||||
PutChunks(store, goodChunk, badChunk)
|
errs = putChunks(store, goodChunk, badChunk)
|
||||||
if err := goodChunk.GetErrored(); err != nil {
|
if errs[0] != nil {
|
||||||
t.Fatalf("expected no error on good content address chunk with content address validator only, but got: %s", err)
|
t.Fatalf("expected no error on good content address chunk with content address validator only, but got: %s", err)
|
||||||
}
|
}
|
||||||
if err := badChunk.GetErrored(); err == nil {
|
if errs[1] == nil {
|
||||||
t.Fatal("expected error on bad content address chunk with content address validator only, but got nil")
|
t.Fatal("expected error on bad content address chunk with content address validator only, but got nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,16 +82,16 @@ func TestValidator(t *testing.T) {
|
|||||||
var negV boolTestValidator
|
var negV boolTestValidator
|
||||||
store.Validators = append(store.Validators, negV)
|
store.Validators = append(store.Validators, negV)
|
||||||
|
|
||||||
chunks = GenerateRandomChunks(chunk.DefaultSize, 2)
|
chunks = GenerateRandomChunks(ch.DefaultSize, 2)
|
||||||
goodChunk = chunks[0]
|
goodChunk = chunks[0]
|
||||||
badChunk = chunks[1]
|
badChunk = chunks[1]
|
||||||
copy(badChunk.SData, goodChunk.SData)
|
copy(badChunk.Data(), goodChunk.Data())
|
||||||
|
|
||||||
PutChunks(store, goodChunk, badChunk)
|
errs = putChunks(store, goodChunk, badChunk)
|
||||||
if err := goodChunk.GetErrored(); err != nil {
|
if errs[0] != nil {
|
||||||
t.Fatalf("expected no error on good content address chunk with content address validator only, but got: %s", err)
|
t.Fatalf("expected no error on good content address chunk with content address validator only, but got: %s", err)
|
||||||
}
|
}
|
||||||
if err := badChunk.GetErrored(); err == nil {
|
if errs[1] == nil {
|
||||||
t.Fatal("expected error on bad content address chunk with content address validator only, but got nil")
|
t.Fatal("expected error on bad content address chunk with content address validator only, but got nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,18 +100,19 @@ func TestValidator(t *testing.T) {
|
|||||||
var posV boolTestValidator = true
|
var posV boolTestValidator = true
|
||||||
store.Validators = append(store.Validators, posV)
|
store.Validators = append(store.Validators, posV)
|
||||||
|
|
||||||
chunks = GenerateRandomChunks(chunk.DefaultSize, 2)
|
chunks = GenerateRandomChunks(ch.DefaultSize, 2)
|
||||||
goodChunk = chunks[0]
|
goodChunk = chunks[0]
|
||||||
badChunk = chunks[1]
|
badChunk = chunks[1]
|
||||||
copy(badChunk.SData, goodChunk.SData)
|
copy(badChunk.Data(), goodChunk.Data())
|
||||||
|
|
||||||
PutChunks(store, goodChunk, badChunk)
|
errs = putChunks(store, goodChunk, badChunk)
|
||||||
if err := goodChunk.GetErrored(); err != nil {
|
if errs[0] != nil {
|
||||||
t.Fatalf("expected no error on good content address chunk with content address validator only, but got: %s", err)
|
t.Fatalf("expected no error on good content address chunk with content address validator only, but got: %s", err)
|
||||||
}
|
}
|
||||||
if err := badChunk.GetErrored(); err != nil {
|
if errs[1] != nil {
|
||||||
t.Fatalf("expected no error on bad content address chunk with content address validator only, but got: %s", err)
|
t.Fatalf("expected no error on bad content address chunk in spite of no validation, but got: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type boolTestValidator bool
|
type boolTestValidator bool
|
||||||
@ -118,3 +120,27 @@ type boolTestValidator bool
|
|||||||
func (self boolTestValidator) Validate(addr Address, data []byte) bool {
|
func (self boolTestValidator) Validate(addr Address, data []byte) bool {
|
||||||
return bool(self)
|
return bool(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// putChunks adds chunks to localstore
|
||||||
|
// It waits for receive on the stored channel
|
||||||
|
// It logs but does not fail on delivery error
|
||||||
|
func putChunks(store *LocalStore, chunks ...Chunk) []error {
|
||||||
|
i := 0
|
||||||
|
f := func(n int64) Chunk {
|
||||||
|
chunk := chunks[i]
|
||||||
|
i++
|
||||||
|
return chunk
|
||||||
|
}
|
||||||
|
_, errs := put(store, len(chunks), f)
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
func put(store *LocalStore, n int, f func(i int64) Chunk) (hs []Address, errs []error) {
|
||||||
|
for i := int64(0); i < int64(n); i++ {
|
||||||
|
chunk := f(ch.DefaultSize)
|
||||||
|
err := store.Put(context.TODO(), chunk)
|
||||||
|
errs = append(errs, err)
|
||||||
|
hs = append(hs, chunk.Address())
|
||||||
|
}
|
||||||
|
return hs, errs
|
||||||
|
}
|
||||||
|
@ -20,24 +20,17 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sync"
|
|
||||||
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MemStore struct {
|
type MemStore struct {
|
||||||
cache *lru.Cache
|
cache *lru.Cache
|
||||||
requests *lru.Cache
|
|
||||||
mu sync.RWMutex
|
|
||||||
disabled bool
|
disabled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
//NewMemStore is instantiating a MemStore cache. We are keeping a record of all outgoing requests for chunks, that
|
//NewMemStore is instantiating a MemStore cache keeping all frequently requested
|
||||||
//should later be delivered by peer nodes, in the `requests` LRU cache. We are also keeping all frequently requested
|
|
||||||
//chunks in the `cache` LRU cache.
|
//chunks in the `cache` LRU cache.
|
||||||
//
|
|
||||||
//`requests` LRU cache capacity should ideally never be reached, this is why for the time being it should be initialised
|
|
||||||
//with the same value as the LDBStore capacity.
|
|
||||||
func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore) {
|
func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore) {
|
||||||
if params.CacheCapacity == 0 {
|
if params.CacheCapacity == 0 {
|
||||||
return &MemStore{
|
return &MemStore{
|
||||||
@ -45,102 +38,48 @@ func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
onEvicted := func(key interface{}, value interface{}) {
|
c, err := lru.New(int(params.CacheCapacity))
|
||||||
v := value.(*Chunk)
|
|
||||||
<-v.dbStoredC
|
|
||||||
}
|
|
||||||
c, err := lru.NewWithEvict(int(params.CacheCapacity), onEvicted)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
requestEvicted := func(key interface{}, value interface{}) {
|
|
||||||
// temporary remove of the error log, until we figure out the problem, as it is too spamy
|
|
||||||
//log.Error("evict called on outgoing request")
|
|
||||||
}
|
|
||||||
r, err := lru.NewWithEvict(int(params.ChunkRequestsCacheCapacity), requestEvicted)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &MemStore{
|
return &MemStore{
|
||||||
cache: c,
|
cache: c,
|
||||||
requests: r,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemStore) Get(ctx context.Context, addr Address) (*Chunk, error) {
|
func (m *MemStore) Get(_ context.Context, addr Address) (Chunk, error) {
|
||||||
if m.disabled {
|
if m.disabled {
|
||||||
return nil, ErrChunkNotFound
|
return nil, ErrChunkNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
m.mu.RLock()
|
|
||||||
defer m.mu.RUnlock()
|
|
||||||
|
|
||||||
r, ok := m.requests.Get(string(addr))
|
|
||||||
// it is a request
|
|
||||||
if ok {
|
|
||||||
return r.(*Chunk), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// it is not a request
|
|
||||||
c, ok := m.cache.Get(string(addr))
|
c, ok := m.cache.Get(string(addr))
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, ErrChunkNotFound
|
return nil, ErrChunkNotFound
|
||||||
}
|
}
|
||||||
return c.(*Chunk), nil
|
return c.(*chunk), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemStore) Put(ctx context.Context, c *Chunk) {
|
func (m *MemStore) Put(_ context.Context, c Chunk) error {
|
||||||
if m.disabled {
|
if m.disabled {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
m.mu.Lock()
|
m.cache.Add(string(c.Address()), c)
|
||||||
defer m.mu.Unlock()
|
return nil
|
||||||
|
|
||||||
// it is a request
|
|
||||||
if c.ReqC != nil {
|
|
||||||
select {
|
|
||||||
case <-c.ReqC:
|
|
||||||
if c.GetErrored() != nil {
|
|
||||||
m.requests.Remove(string(c.Addr))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.cache.Add(string(c.Addr), c)
|
|
||||||
m.requests.Remove(string(c.Addr))
|
|
||||||
default:
|
|
||||||
m.requests.Add(string(c.Addr), c)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// it is not a request
|
|
||||||
m.cache.Add(string(c.Addr), c)
|
|
||||||
m.requests.Remove(string(c.Addr))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemStore) setCapacity(n int) {
|
func (m *MemStore) setCapacity(n int) {
|
||||||
if n <= 0 {
|
if n <= 0 {
|
||||||
m.disabled = true
|
m.disabled = true
|
||||||
} else {
|
} else {
|
||||||
onEvicted := func(key interface{}, value interface{}) {
|
c, err := lru.New(n)
|
||||||
v := value.(*Chunk)
|
|
||||||
<-v.dbStoredC
|
|
||||||
}
|
|
||||||
c, err := lru.NewWithEvict(n, onEvicted)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := lru.New(defaultChunkRequestsCacheCapacity)
|
*m = MemStore{
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m = &MemStore{
|
|
||||||
cache: c,
|
cache: c,
|
||||||
requests: r,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,11 +18,6 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
|
||||||
"encoding/binary"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
@ -33,40 +28,32 @@ func newTestMemStore() *MemStore {
|
|||||||
return NewMemStore(storeparams, nil)
|
return NewMemStore(storeparams, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testMemStoreRandom(n int, processors int, chunksize int64, t *testing.T) {
|
func testMemStoreRandom(n int, chunksize int64, t *testing.T) {
|
||||||
m := newTestMemStore()
|
m := newTestMemStore()
|
||||||
defer m.Close()
|
defer m.Close()
|
||||||
testStoreRandom(m, processors, n, chunksize, t)
|
testStoreRandom(m, n, chunksize, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testMemStoreCorrect(n int, processors int, chunksize int64, t *testing.T) {
|
func testMemStoreCorrect(n int, chunksize int64, t *testing.T) {
|
||||||
m := newTestMemStore()
|
m := newTestMemStore()
|
||||||
defer m.Close()
|
defer m.Close()
|
||||||
testStoreCorrect(m, processors, n, chunksize, t)
|
testStoreCorrect(m, n, chunksize, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMemStoreRandom_1(t *testing.T) {
|
func TestMemStoreRandom_1(t *testing.T) {
|
||||||
testMemStoreRandom(1, 1, 0, t)
|
testMemStoreRandom(1, 0, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMemStoreCorrect_1(t *testing.T) {
|
func TestMemStoreCorrect_1(t *testing.T) {
|
||||||
testMemStoreCorrect(1, 1, 4104, t)
|
testMemStoreCorrect(1, 4104, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMemStoreRandom_1_1k(t *testing.T) {
|
func TestMemStoreRandom_1k(t *testing.T) {
|
||||||
testMemStoreRandom(1, 1000, 0, t)
|
testMemStoreRandom(1000, 0, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMemStoreCorrect_1_1k(t *testing.T) {
|
func TestMemStoreCorrect_1k(t *testing.T) {
|
||||||
testMemStoreCorrect(1, 100, 4096, t)
|
testMemStoreCorrect(100, 4096, t)
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemStoreRandom_8_1k(t *testing.T) {
|
|
||||||
testMemStoreRandom(8, 1000, 0, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemStoreCorrect_8_1k(t *testing.T) {
|
|
||||||
testMemStoreCorrect(8, 1000, 4096, t)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMemStoreNotFound(t *testing.T) {
|
func TestMemStoreNotFound(t *testing.T) {
|
||||||
@ -82,13 +69,13 @@ func TestMemStoreNotFound(t *testing.T) {
|
|||||||
func benchmarkMemStorePut(n int, processors int, chunksize int64, b *testing.B) {
|
func benchmarkMemStorePut(n int, processors int, chunksize int64, b *testing.B) {
|
||||||
m := newTestMemStore()
|
m := newTestMemStore()
|
||||||
defer m.Close()
|
defer m.Close()
|
||||||
benchmarkStorePut(m, processors, n, chunksize, b)
|
benchmarkStorePut(m, n, chunksize, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkMemStoreGet(n int, processors int, chunksize int64, b *testing.B) {
|
func benchmarkMemStoreGet(n int, processors int, chunksize int64, b *testing.B) {
|
||||||
m := newTestMemStore()
|
m := newTestMemStore()
|
||||||
defer m.Close()
|
defer m.Close()
|
||||||
benchmarkStoreGet(m, processors, n, chunksize, b)
|
benchmarkStoreGet(m, n, chunksize, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkMemStorePut_1_500(b *testing.B) {
|
func BenchmarkMemStorePut_1_500(b *testing.B) {
|
||||||
@ -107,104 +94,70 @@ func BenchmarkMemStoreGet_8_500(b *testing.B) {
|
|||||||
benchmarkMemStoreGet(500, 8, 4096, b)
|
benchmarkMemStoreGet(500, 8, 4096, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLDBStore(t *testing.T) (*LDBStore, func()) {
|
|
||||||
dir, err := ioutil.TempDir("", "bzz-storage-test")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
log.Trace("memstore.tempdir", "dir", dir)
|
|
||||||
|
|
||||||
ldbparams := NewLDBStoreParams(NewDefaultStoreParams(), dir)
|
|
||||||
db, err := NewLDBStore(ldbparams)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanup := func() {
|
|
||||||
db.Close()
|
|
||||||
err := os.RemoveAll(dir)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return db, cleanup
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemStoreAndLDBStore(t *testing.T) {
|
func TestMemStoreAndLDBStore(t *testing.T) {
|
||||||
ldb, cleanup := newLDBStore(t)
|
ldb, cleanup := newLDBStore(t)
|
||||||
ldb.setCapacity(4000)
|
ldb.setCapacity(4000)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
cacheCap := 200
|
cacheCap := 200
|
||||||
requestsCap := 200
|
memStore := NewMemStore(NewStoreParams(4000, 200, nil, nil), nil)
|
||||||
memStore := NewMemStore(NewStoreParams(4000, 200, 200, nil, nil), nil)
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
n int // number of chunks to push to memStore
|
n int // number of chunks to push to memStore
|
||||||
chunkSize uint64 // size of chunk (by default in Swarm - 4096)
|
chunkSize int64 // size of chunk (by default in Swarm - 4096)
|
||||||
request bool // whether or not to set the ReqC channel on the random chunks
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
n: 1,
|
n: 1,
|
||||||
chunkSize: 4096,
|
chunkSize: 4096,
|
||||||
request: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
n: 201,
|
n: 201,
|
||||||
chunkSize: 4096,
|
chunkSize: 4096,
|
||||||
request: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
n: 501,
|
n: 501,
|
||||||
chunkSize: 4096,
|
chunkSize: 4096,
|
||||||
request: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
n: 3100,
|
n: 3100,
|
||||||
chunkSize: 4096,
|
chunkSize: 4096,
|
||||||
request: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
n: 100,
|
n: 100,
|
||||||
chunkSize: 4096,
|
chunkSize: 4096,
|
||||||
request: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
log.Info("running test", "idx", i, "tt", tt)
|
log.Info("running test", "idx", i, "tt", tt)
|
||||||
var chunks []*Chunk
|
var chunks []Chunk
|
||||||
|
|
||||||
for i := 0; i < tt.n; i++ {
|
for i := 0; i < tt.n; i++ {
|
||||||
var c *Chunk
|
c := GenerateRandomChunk(tt.chunkSize)
|
||||||
if tt.request {
|
|
||||||
c = NewRandomRequestChunk(tt.chunkSize)
|
|
||||||
} else {
|
|
||||||
c = NewRandomChunk(tt.chunkSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
chunks = append(chunks, c)
|
chunks = append(chunks, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < tt.n; i++ {
|
for i := 0; i < tt.n; i++ {
|
||||||
go ldb.Put(context.TODO(), chunks[i])
|
err := ldb.Put(context.TODO(), chunks[i])
|
||||||
memStore.Put(context.TODO(), chunks[i])
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
err = memStore.Put(context.TODO(), chunks[i])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
if got := memStore.cache.Len(); got > cacheCap {
|
if got := memStore.cache.Len(); got > cacheCap {
|
||||||
t.Fatalf("expected to get cache capacity less than %v, but got %v", cacheCap, got)
|
t.Fatalf("expected to get cache capacity less than %v, but got %v", cacheCap, got)
|
||||||
}
|
}
|
||||||
|
|
||||||
if got := memStore.requests.Len(); got > requestsCap {
|
|
||||||
t.Fatalf("expected to get requests capacity less than %v, but got %v", requestsCap, got)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < tt.n; i++ {
|
for i := 0; i < tt.n; i++ {
|
||||||
_, err := memStore.Get(context.TODO(), chunks[i].Addr)
|
_, err := memStore.Get(context.TODO(), chunks[i].Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == ErrChunkNotFound {
|
if err == ErrChunkNotFound {
|
||||||
_, err := ldb.Get(context.TODO(), chunks[i].Addr)
|
_, err := ldb.Get(context.TODO(), chunks[i].Address())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("couldn't get chunk %v from ldb, got error: %v", i, err)
|
t.Fatalf("couldn't get chunk %v from ldb, got error: %v", i, err)
|
||||||
}
|
}
|
||||||
@ -213,37 +166,5 @@ func TestMemStoreAndLDBStore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for all chunks to be stored before ending the test are cleaning up
|
|
||||||
for i := 0; i < tt.n; i++ {
|
|
||||||
<-chunks[i].dbStoredC
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func NewRandomChunk(chunkSize uint64) *Chunk {
|
|
||||||
c := &Chunk{
|
|
||||||
Addr: make([]byte, 32),
|
|
||||||
ReqC: nil,
|
|
||||||
SData: make([]byte, chunkSize+8), // SData should be chunkSize + 8 bytes reserved for length
|
|
||||||
dbStoredC: make(chan bool),
|
|
||||||
dbStoredMu: &sync.Mutex{},
|
|
||||||
}
|
|
||||||
|
|
||||||
rand.Read(c.SData)
|
|
||||||
|
|
||||||
binary.LittleEndian.PutUint64(c.SData[:8], chunkSize)
|
|
||||||
|
|
||||||
hasher := MakeHashFunc(SHA3Hash)()
|
|
||||||
hasher.Write(c.SData)
|
|
||||||
copy(c.Addr, hasher.Sum(nil))
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRandomRequestChunk(chunkSize uint64) *Chunk {
|
|
||||||
c := NewRandomChunk(chunkSize)
|
|
||||||
c.ReqC = make(chan bool)
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
@ -187,12 +187,12 @@ func (h *Handler) New(ctx context.Context, request *Request) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if request.metaHash != nil && !bytes.Equal(request.metaHash, metaHash) ||
|
if request.metaHash != nil && !bytes.Equal(request.metaHash, metaHash) ||
|
||||||
request.rootAddr != nil && !bytes.Equal(request.rootAddr, chunk.Addr) {
|
request.rootAddr != nil && !bytes.Equal(request.rootAddr, chunk.Address()) {
|
||||||
return NewError(ErrInvalidValue, "metaHash in UpdateRequest does not match actual metadata")
|
return NewError(ErrInvalidValue, "metaHash in UpdateRequest does not match actual metadata")
|
||||||
}
|
}
|
||||||
|
|
||||||
request.metaHash = metaHash
|
request.metaHash = metaHash
|
||||||
request.rootAddr = chunk.Addr
|
request.rootAddr = chunk.Address()
|
||||||
|
|
||||||
h.chunkStore.Put(ctx, chunk)
|
h.chunkStore.Put(ctx, chunk)
|
||||||
log.Debug("new resource", "name", request.metadata.Name, "startTime", request.metadata.StartTime, "frequency", request.metadata.Frequency, "owner", request.metadata.Owner)
|
log.Debug("new resource", "name", request.metadata.Name, "startTime", request.metadata.StartTime, "frequency", request.metadata.Frequency, "owner", request.metadata.Owner)
|
||||||
@ -202,14 +202,14 @@ func (h *Handler) New(ctx context.Context, request *Request) error {
|
|||||||
resourceUpdate: resourceUpdate{
|
resourceUpdate: resourceUpdate{
|
||||||
updateHeader: updateHeader{
|
updateHeader: updateHeader{
|
||||||
UpdateLookup: UpdateLookup{
|
UpdateLookup: UpdateLookup{
|
||||||
rootAddr: chunk.Addr,
|
rootAddr: chunk.Address(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
ResourceMetadata: request.metadata,
|
ResourceMetadata: request.metadata,
|
||||||
updated: time.Now(),
|
updated: time.Now(),
|
||||||
}
|
}
|
||||||
h.set(chunk.Addr, rsrc)
|
h.set(chunk.Address(), rsrc)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -348,7 +348,11 @@ func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error
|
|||||||
return nil, NewErrorf(ErrPeriodDepth, "Lookup exceeded max period hops (%d)", lp.Limit)
|
return nil, NewErrorf(ErrPeriodDepth, "Lookup exceeded max period hops (%d)", lp.Limit)
|
||||||
}
|
}
|
||||||
updateAddr := lp.UpdateAddr()
|
updateAddr := lp.UpdateAddr()
|
||||||
chunk, err := h.chunkStore.GetWithTimeout(context.TODO(), updateAddr, defaultRetrieveTimeout)
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultRetrieveTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
chunk, err := h.chunkStore.Get(ctx, updateAddr)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if specificversion {
|
if specificversion {
|
||||||
return h.updateIndex(rsrc, chunk)
|
return h.updateIndex(rsrc, chunk)
|
||||||
@ -358,7 +362,11 @@ func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error
|
|||||||
for {
|
for {
|
||||||
newversion := lp.version + 1
|
newversion := lp.version + 1
|
||||||
updateAddr := lp.UpdateAddr()
|
updateAddr := lp.UpdateAddr()
|
||||||
newchunk, err := h.chunkStore.GetWithTimeout(context.TODO(), updateAddr, defaultRetrieveTimeout)
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultRetrieveTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
newchunk, err := h.chunkStore.Get(ctx, updateAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return h.updateIndex(rsrc, chunk)
|
return h.updateIndex(rsrc, chunk)
|
||||||
}
|
}
|
||||||
@ -380,7 +388,10 @@ func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error
|
|||||||
// Load retrieves the Mutable Resource metadata chunk stored at rootAddr
|
// Load retrieves the Mutable Resource metadata chunk stored at rootAddr
|
||||||
// Upon retrieval it creates/updates the index entry for it with metadata corresponding to the chunk contents
|
// Upon retrieval it creates/updates the index entry for it with metadata corresponding to the chunk contents
|
||||||
func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource, error) {
|
func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource, error) {
|
||||||
chunk, err := h.chunkStore.GetWithTimeout(ctx, rootAddr, defaultRetrieveTimeout)
|
//TODO: Maybe add timeout to context, defaultRetrieveTimeout?
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout)
|
||||||
|
defer cancel()
|
||||||
|
chunk, err := h.chunkStore.Get(ctx, rootAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, NewError(ErrNotFound, err.Error())
|
return nil, NewError(ErrNotFound, err.Error())
|
||||||
}
|
}
|
||||||
@ -388,11 +399,11 @@ func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource
|
|||||||
// create the index entry
|
// create the index entry
|
||||||
rsrc := &resource{}
|
rsrc := &resource{}
|
||||||
|
|
||||||
if err := rsrc.ResourceMetadata.binaryGet(chunk.SData); err != nil { // Will fail if this is not really a metadata chunk
|
if err := rsrc.ResourceMetadata.binaryGet(chunk.Data()); err != nil { // Will fail if this is not really a metadata chunk
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rsrc.rootAddr, rsrc.metaHash = metadataHash(chunk.SData)
|
rsrc.rootAddr, rsrc.metaHash = metadataHash(chunk.Data())
|
||||||
if !bytes.Equal(rsrc.rootAddr, rootAddr) {
|
if !bytes.Equal(rsrc.rootAddr, rootAddr) {
|
||||||
return nil, NewError(ErrCorruptData, "Corrupt metadata chunk")
|
return nil, NewError(ErrCorruptData, "Corrupt metadata chunk")
|
||||||
}
|
}
|
||||||
@ -402,17 +413,17 @@ func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource
|
|||||||
}
|
}
|
||||||
|
|
||||||
// update mutable resource index map with specified content
|
// update mutable resource index map with specified content
|
||||||
func (h *Handler) updateIndex(rsrc *resource, chunk *storage.Chunk) (*resource, error) {
|
func (h *Handler) updateIndex(rsrc *resource, chunk storage.Chunk) (*resource, error) {
|
||||||
|
|
||||||
// retrieve metadata from chunk data and check that it matches this mutable resource
|
// retrieve metadata from chunk data and check that it matches this mutable resource
|
||||||
var r SignedResourceUpdate
|
var r SignedResourceUpdate
|
||||||
if err := r.fromChunk(chunk.Addr, chunk.SData); err != nil {
|
if err := r.fromChunk(chunk.Address(), chunk.Data()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Trace("resource index update", "name", rsrc.ResourceMetadata.Name, "updatekey", chunk.Addr, "period", r.period, "version", r.version)
|
log.Trace("resource index update", "name", rsrc.ResourceMetadata.Name, "updatekey", chunk.Address(), "period", r.period, "version", r.version)
|
||||||
|
|
||||||
// update our rsrcs entry map
|
// update our rsrcs entry map
|
||||||
rsrc.lastKey = chunk.Addr
|
rsrc.lastKey = chunk.Address()
|
||||||
rsrc.period = r.period
|
rsrc.period = r.period
|
||||||
rsrc.version = r.version
|
rsrc.version = r.version
|
||||||
rsrc.updated = time.Now()
|
rsrc.updated = time.Now()
|
||||||
@ -420,8 +431,8 @@ func (h *Handler) updateIndex(rsrc *resource, chunk *storage.Chunk) (*resource,
|
|||||||
rsrc.multihash = r.multihash
|
rsrc.multihash = r.multihash
|
||||||
copy(rsrc.data, r.data)
|
copy(rsrc.data, r.data)
|
||||||
rsrc.Reader = bytes.NewReader(rsrc.data)
|
rsrc.Reader = bytes.NewReader(rsrc.data)
|
||||||
log.Debug("resource synced", "name", rsrc.ResourceMetadata.Name, "updateAddr", chunk.Addr, "period", rsrc.period, "version", rsrc.version)
|
log.Debug("resource synced", "name", rsrc.ResourceMetadata.Name, "updateAddr", chunk.Address(), "period", rsrc.period, "version", rsrc.version)
|
||||||
h.set(chunk.Addr, rsrc)
|
h.set(chunk.Address(), rsrc)
|
||||||
return rsrc, nil
|
return rsrc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -457,7 +468,7 @@ func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAd
|
|||||||
|
|
||||||
// send the chunk
|
// send the chunk
|
||||||
h.chunkStore.Put(ctx, chunk)
|
h.chunkStore.Put(ctx, chunk)
|
||||||
log.Trace("resource update", "updateAddr", r.updateAddr, "lastperiod", r.period, "version", r.version, "data", chunk.SData, "multihash", r.multihash)
|
log.Trace("resource update", "updateAddr", r.updateAddr, "lastperiod", r.period, "version", r.version, "data", chunk.Data(), "multihash", r.multihash)
|
||||||
|
|
||||||
// update our resources map entry if the new update is older than the one we have, if we have it.
|
// update our resources map entry if the new update is older than the one we have, if we have it.
|
||||||
if rsrc != nil && (r.period > rsrc.period || (rsrc.period == r.period && r.version > rsrc.version)) {
|
if rsrc != nil && (r.period > rsrc.period || (rsrc.period == r.period && r.version > rsrc.version)) {
|
||||||
@ -475,7 +486,7 @@ func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAd
|
|||||||
|
|
||||||
// Retrieves the resource index value for the given nameHash
|
// Retrieves the resource index value for the given nameHash
|
||||||
func (h *Handler) get(rootAddr storage.Address) *resource {
|
func (h *Handler) get(rootAddr storage.Address) *resource {
|
||||||
if len(rootAddr) < storage.KeyLength {
|
if len(rootAddr) < storage.AddressLength {
|
||||||
log.Warn("Handler.get with invalid rootAddr")
|
log.Warn("Handler.get with invalid rootAddr")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -488,7 +499,7 @@ func (h *Handler) get(rootAddr storage.Address) *resource {
|
|||||||
|
|
||||||
// Sets the resource index value for the given nameHash
|
// Sets the resource index value for the given nameHash
|
||||||
func (h *Handler) set(rootAddr storage.Address, rsrc *resource) {
|
func (h *Handler) set(rootAddr storage.Address, rsrc *resource) {
|
||||||
if len(rootAddr) < storage.KeyLength {
|
if len(rootAddr) < storage.AddressLength {
|
||||||
log.Warn("Handler.set with invalid rootAddr")
|
log.Warn("Handler.set with invalid rootAddr")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ type UpdateLookup struct {
|
|||||||
// 4 bytes period
|
// 4 bytes period
|
||||||
// 4 bytes version
|
// 4 bytes version
|
||||||
// storage.Keylength for rootAddr
|
// storage.Keylength for rootAddr
|
||||||
const updateLookupLength = 4 + 4 + storage.KeyLength
|
const updateLookupLength = 4 + 4 + storage.AddressLength
|
||||||
|
|
||||||
// UpdateAddr calculates the resource update chunk address corresponding to this lookup key
|
// UpdateAddr calculates the resource update chunk address corresponding to this lookup key
|
||||||
func (u *UpdateLookup) UpdateAddr() (updateAddr storage.Address) {
|
func (u *UpdateLookup) UpdateAddr() (updateAddr storage.Address) {
|
||||||
@ -90,7 +90,7 @@ func (u *UpdateLookup) binaryPut(serializedData []byte) error {
|
|||||||
if len(serializedData) != updateLookupLength {
|
if len(serializedData) != updateLookupLength {
|
||||||
return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData))
|
return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData))
|
||||||
}
|
}
|
||||||
if len(u.rootAddr) != storage.KeyLength {
|
if len(u.rootAddr) != storage.AddressLength {
|
||||||
return NewError(ErrInvalidValue, "UpdateLookup.binaryPut called without rootAddr set")
|
return NewError(ErrInvalidValue, "UpdateLookup.binaryPut called without rootAddr set")
|
||||||
}
|
}
|
||||||
binary.LittleEndian.PutUint32(serializedData[:4], u.period)
|
binary.LittleEndian.PutUint32(serializedData[:4], u.period)
|
||||||
@ -111,7 +111,7 @@ func (u *UpdateLookup) binaryGet(serializedData []byte) error {
|
|||||||
}
|
}
|
||||||
u.period = binary.LittleEndian.Uint32(serializedData[:4])
|
u.period = binary.LittleEndian.Uint32(serializedData[:4])
|
||||||
u.version = binary.LittleEndian.Uint32(serializedData[4:8])
|
u.version = binary.LittleEndian.Uint32(serializedData[4:8])
|
||||||
u.rootAddr = storage.Address(make([]byte, storage.KeyLength))
|
u.rootAddr = storage.Address(make([]byte, storage.AddressLength))
|
||||||
copy(u.rootAddr[:], serializedData[8:])
|
copy(u.rootAddr[:], serializedData[8:])
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -142,7 +142,7 @@ func (r *ResourceMetadata) serializeAndHash() (rootAddr, metaHash []byte, chunkD
|
|||||||
}
|
}
|
||||||
|
|
||||||
// creates a metadata chunk out of a resourceMetadata structure
|
// creates a metadata chunk out of a resourceMetadata structure
|
||||||
func (metadata *ResourceMetadata) newChunk() (chunk *storage.Chunk, metaHash []byte, err error) {
|
func (metadata *ResourceMetadata) newChunk() (chunk storage.Chunk, metaHash []byte, err error) {
|
||||||
// the metadata chunk contains a timestamp of when the resource starts to be valid
|
// the metadata chunk contains a timestamp of when the resource starts to be valid
|
||||||
// and also how frequently it is expected to be updated
|
// and also how frequently it is expected to be updated
|
||||||
// from this we know at what time we should look for updates, and how often
|
// from this we know at what time we should look for updates, and how often
|
||||||
@ -157,9 +157,7 @@ func (metadata *ResourceMetadata) newChunk() (chunk *storage.Chunk, metaHash []b
|
|||||||
}
|
}
|
||||||
|
|
||||||
// make the chunk and send it to swarm
|
// make the chunk and send it to swarm
|
||||||
chunk = storage.NewChunk(rootAddr, nil)
|
chunk = storage.NewChunk(rootAddr, chunkData)
|
||||||
chunk.SData = chunkData
|
|
||||||
chunk.Size = int64(len(chunkData))
|
|
||||||
|
|
||||||
return chunk, metaHash, nil
|
return chunk, metaHash, nil
|
||||||
}
|
}
|
||||||
|
@ -182,7 +182,7 @@ func (r *Request) fromJSON(j *updateRequestJSON) error {
|
|||||||
var declaredRootAddr storage.Address
|
var declaredRootAddr storage.Address
|
||||||
var declaredMetaHash []byte
|
var declaredMetaHash []byte
|
||||||
|
|
||||||
declaredRootAddr, err = decodeHexSlice(j.RootAddr, storage.KeyLength, "rootAddr")
|
declaredRootAddr, err = decodeHexSlice(j.RootAddr, storage.AddressLength, "rootAddr")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -87,8 +87,7 @@ func TestUpdateChunkSerializationErrorChecking(t *testing.T) {
|
|||||||
resourceUpdate: resourceUpdate{
|
resourceUpdate: resourceUpdate{
|
||||||
updateHeader: updateHeader{
|
updateHeader: updateHeader{
|
||||||
UpdateLookup: UpdateLookup{
|
UpdateLookup: UpdateLookup{
|
||||||
|
rootAddr: make([]byte, 79), // put the wrong length, should be storage.AddressLength
|
||||||
rootAddr: make([]byte, 79), // put the wrong length, should be storage.KeyLength
|
|
||||||
},
|
},
|
||||||
metaHash: nil,
|
metaHash: nil,
|
||||||
multihash: false,
|
multihash: false,
|
||||||
@ -99,8 +98,8 @@ func TestUpdateChunkSerializationErrorChecking(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Expected newUpdateChunk to fail when rootAddr or metaHash have the wrong length")
|
t.Fatal("Expected newUpdateChunk to fail when rootAddr or metaHash have the wrong length")
|
||||||
}
|
}
|
||||||
r.rootAddr = make([]byte, storage.KeyLength)
|
r.rootAddr = make([]byte, storage.AddressLength)
|
||||||
r.metaHash = make([]byte, storage.KeyLength)
|
r.metaHash = make([]byte, storage.AddressLength)
|
||||||
_, err = r.toChunk()
|
_, err = r.toChunk()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Expected newUpdateChunk to fail when there is no data")
|
t.Fatal("Expected newUpdateChunk to fail when there is no data")
|
||||||
@ -197,7 +196,7 @@ func TestReverse(t *testing.T) {
|
|||||||
|
|
||||||
// check that we can recover the owner account from the update chunk's signature
|
// check that we can recover the owner account from the update chunk's signature
|
||||||
var checkUpdate SignedResourceUpdate
|
var checkUpdate SignedResourceUpdate
|
||||||
if err := checkUpdate.fromChunk(chunk.Addr, chunk.SData); err != nil {
|
if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
checkdigest, err := checkUpdate.GetDigest()
|
checkdigest, err := checkUpdate.GetDigest()
|
||||||
@ -215,8 +214,8 @@ func TestReverse(t *testing.T) {
|
|||||||
t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress)
|
t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(key[:], chunk.Addr[:]) {
|
if !bytes.Equal(key[:], chunk.Address()[:]) {
|
||||||
t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Addr)
|
t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Address())
|
||||||
}
|
}
|
||||||
if period != checkUpdate.period {
|
if period != checkUpdate.period {
|
||||||
t.Fatalf("Expected period '%d', was '%d'", period, checkUpdate.period)
|
t.Fatalf("Expected period '%d', was '%d'", period, checkUpdate.period)
|
||||||
@ -270,16 +269,16 @@ func TestResourceHandler(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk, err := rh.chunkStore.Get(context.TODO(), storage.Address(request.rootAddr))
|
chunk, err := rh.chunkStore.Get(ctx, storage.Address(request.rootAddr))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if len(chunk.SData) < 16 {
|
} else if len(chunk.Data()) < 16 {
|
||||||
t.Fatalf("chunk data must be minimum 16 bytes, is %d", len(chunk.SData))
|
t.Fatalf("chunk data must be minimum 16 bytes, is %d", len(chunk.Data()))
|
||||||
}
|
}
|
||||||
|
|
||||||
var recoveredMetadata ResourceMetadata
|
var recoveredMetadata ResourceMetadata
|
||||||
|
|
||||||
recoveredMetadata.binaryGet(chunk.SData)
|
recoveredMetadata.binaryGet(chunk.Data())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -704,7 +703,7 @@ func TestValidator(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if !rh.Validate(chunk.Addr, chunk.SData) {
|
if !rh.Validate(chunk.Address(), chunk.Data()) {
|
||||||
t.Fatal("Chunk validator fail on update chunk")
|
t.Fatal("Chunk validator fail on update chunk")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -724,7 +723,7 @@ func TestValidator(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rh.Validate(chunk.Addr, chunk.SData) {
|
if rh.Validate(chunk.Address(), chunk.Data()) {
|
||||||
t.Fatal("Chunk validator did not fail on update chunk with false address")
|
t.Fatal("Chunk validator did not fail on update chunk with false address")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -742,7 +741,7 @@ func TestValidator(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !rh.Validate(chunk.Addr, chunk.SData) {
|
if !rh.Validate(chunk.Address(), chunk.Data()) {
|
||||||
t.Fatal("Chunk validator fail on metadata chunk")
|
t.Fatal("Chunk validator fail on metadata chunk")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -783,8 +782,7 @@ func TestValidatorInStore(t *testing.T) {
|
|||||||
// create content addressed chunks, one good, one faulty
|
// create content addressed chunks, one good, one faulty
|
||||||
chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2)
|
chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2)
|
||||||
goodChunk := chunks[0]
|
goodChunk := chunks[0]
|
||||||
badChunk := chunks[1]
|
badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data())
|
||||||
badChunk.SData = goodChunk.SData
|
|
||||||
|
|
||||||
metadata := &ResourceMetadata{
|
metadata := &ResourceMetadata{
|
||||||
StartTime: startTime,
|
StartTime: startTime,
|
||||||
@ -801,7 +799,7 @@ func TestValidatorInStore(t *testing.T) {
|
|||||||
updateLookup := UpdateLookup{
|
updateLookup := UpdateLookup{
|
||||||
period: 42,
|
period: 42,
|
||||||
version: 1,
|
version: 1,
|
||||||
rootAddr: rootChunk.Addr,
|
rootAddr: rootChunk.Address(),
|
||||||
}
|
}
|
||||||
|
|
||||||
updateAddr := updateLookup.UpdateAddr()
|
updateAddr := updateLookup.UpdateAddr()
|
||||||
@ -826,16 +824,16 @@ func TestValidatorInStore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// put the chunks in the store and check their error status
|
// put the chunks in the store and check their error status
|
||||||
storage.PutChunks(store, goodChunk)
|
err = store.Put(context.Background(), goodChunk)
|
||||||
if goodChunk.GetErrored() == nil {
|
if err == nil {
|
||||||
t.Fatal("expected error on good content address chunk with resource validator only, but got nil")
|
t.Fatal("expected error on good content address chunk with resource validator only, but got nil")
|
||||||
}
|
}
|
||||||
storage.PutChunks(store, badChunk)
|
err = store.Put(context.Background(), badChunk)
|
||||||
if badChunk.GetErrored() == nil {
|
if err == nil {
|
||||||
t.Fatal("expected error on bad content address chunk with resource validator only, but got nil")
|
t.Fatal("expected error on bad content address chunk with resource validator only, but got nil")
|
||||||
}
|
}
|
||||||
storage.PutChunks(store, uglyChunk)
|
err = store.Put(context.Background(), uglyChunk)
|
||||||
if err := uglyChunk.GetErrored(); err != nil {
|
if err != nil {
|
||||||
t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err)
|
t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -897,7 +895,7 @@ func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var r SignedResourceUpdate
|
var r SignedResourceUpdate
|
||||||
if err := r.fromChunk(addr, chunk.SData); err != nil {
|
if err := r.fromChunk(addr, chunk.Data()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return r.data, nil
|
return r.data, nil
|
||||||
|
@ -96,7 +96,7 @@ func (r *SignedResourceUpdate) Sign(signer Signer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create an update chunk.
|
// create an update chunk.
|
||||||
func (r *SignedResourceUpdate) toChunk() (*storage.Chunk, error) {
|
func (r *SignedResourceUpdate) toChunk() (storage.Chunk, error) {
|
||||||
|
|
||||||
// Check that the update is signed and serialized
|
// Check that the update is signed and serialized
|
||||||
// For efficiency, data is serialized during signature and cached in
|
// For efficiency, data is serialized during signature and cached in
|
||||||
@ -105,14 +105,11 @@ func (r *SignedResourceUpdate) toChunk() (*storage.Chunk, error) {
|
|||||||
return nil, NewError(ErrInvalidSignature, "newUpdateChunk called without a valid signature or payload data. Call .Sign() first.")
|
return nil, NewError(ErrInvalidSignature, "newUpdateChunk called without a valid signature or payload data. Call .Sign() first.")
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk := storage.NewChunk(r.updateAddr, nil)
|
|
||||||
resourceUpdateLength := r.resourceUpdate.binaryLength()
|
resourceUpdateLength := r.resourceUpdate.binaryLength()
|
||||||
chunk.SData = r.binaryData
|
|
||||||
|
|
||||||
// signature is the last item in the chunk data
|
// signature is the last item in the chunk data
|
||||||
copy(chunk.SData[resourceUpdateLength:], r.signature[:])
|
copy(r.binaryData[resourceUpdateLength:], r.signature[:])
|
||||||
|
|
||||||
chunk.Size = int64(len(chunk.SData))
|
chunk := storage.NewChunk(r.updateAddr, r.binaryData)
|
||||||
return chunk, nil
|
return chunk, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,8 +17,12 @@
|
|||||||
package mru
|
package mru
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
)
|
)
|
||||||
@ -35,6 +39,17 @@ func (t *TestHandler) Close() {
|
|||||||
t.chunkStore.Close()
|
t.chunkStore.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type mockNetFetcher struct{}
|
||||||
|
|
||||||
|
func (m *mockNetFetcher) Request(ctx context.Context) {
|
||||||
|
}
|
||||||
|
func (m *mockNetFetcher) Offer(ctx context.Context, source *discover.NodeID) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFakeNetFetcher(context.Context, storage.Address, *sync.Map) storage.NetFetcher {
|
||||||
|
return &mockNetFetcher{}
|
||||||
|
}
|
||||||
|
|
||||||
// NewTestHandler creates Handler object to be used for testing purposes.
|
// NewTestHandler creates Handler object to be used for testing purposes.
|
||||||
func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) {
|
func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) {
|
||||||
path := filepath.Join(datadir, testDbDirName)
|
path := filepath.Join(datadir, testDbDirName)
|
||||||
@ -47,7 +62,11 @@ func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error)
|
|||||||
}
|
}
|
||||||
localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHashAlgorithm)))
|
localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHashAlgorithm)))
|
||||||
localStore.Validators = append(localStore.Validators, rh)
|
localStore.Validators = append(localStore.Validators, rh)
|
||||||
netStore := storage.NewNetStore(localStore, nil)
|
netStore, err := storage.NewNetStore(localStore, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
netStore.NewNetFetcherFunc = newFakeNetFetcher
|
||||||
rh.SetStore(netStore)
|
rh.SetStore(netStore)
|
||||||
return &TestHandler{rh}, nil
|
return &TestHandler{rh}, nil
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,7 @@ type updateHeader struct {
|
|||||||
metaHash []byte // SHA3 hash of the metadata chunk (less ownerAddr). Used to prove ownerhsip of the resource.
|
metaHash []byte // SHA3 hash of the metadata chunk (less ownerAddr). Used to prove ownerhsip of the resource.
|
||||||
}
|
}
|
||||||
|
|
||||||
const metaHashLength = storage.KeyLength
|
const metaHashLength = storage.AddressLength
|
||||||
|
|
||||||
// updateLookupLength bytes
|
// updateLookupLength bytes
|
||||||
// 1 byte flags (multihash bool for now)
|
// 1 byte flags (multihash bool for now)
|
||||||
@ -76,7 +76,7 @@ func (h *updateHeader) binaryGet(serializedData []byte) error {
|
|||||||
}
|
}
|
||||||
cursor := updateLookupLength
|
cursor := updateLookupLength
|
||||||
h.metaHash = make([]byte, metaHashLength)
|
h.metaHash = make([]byte, metaHashLength)
|
||||||
copy(h.metaHash[:storage.KeyLength], serializedData[cursor:cursor+storage.KeyLength])
|
copy(h.metaHash[:storage.AddressLength], serializedData[cursor:cursor+storage.AddressLength])
|
||||||
cursor += metaHashLength
|
cursor += metaHashLength
|
||||||
|
|
||||||
flags := serializedData[cursor]
|
flags := serializedData[cursor]
|
||||||
|
@ -18,181 +18,275 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
|
||||||
opentracing "github.com/opentracing/opentracing-go"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
type (
|
||||||
// NetStore.Get timeout for get and get retries
|
NewNetFetcherFunc func(ctx context.Context, addr Address, peers *sync.Map) NetFetcher
|
||||||
// This is the maximum period that the Get will block.
|
|
||||||
// If it is reached, Get will return ErrChunkNotFound.
|
|
||||||
netStoreRetryTimeout = 30 * time.Second
|
|
||||||
// Minimal period between calling get method on NetStore
|
|
||||||
// on retry. It protects calling get very frequently if
|
|
||||||
// it returns ErrChunkNotFound very fast.
|
|
||||||
netStoreMinRetryDelay = 3 * time.Second
|
|
||||||
// Timeout interval before retrieval is timed out.
|
|
||||||
// It is used in NetStore.get on waiting for ReqC to be
|
|
||||||
// closed on a single retrieve request.
|
|
||||||
searchTimeout = 10 * time.Second
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NetStore implements the ChunkStore interface,
|
type NetFetcher interface {
|
||||||
// this chunk access layer assumed 2 chunk stores
|
Request(ctx context.Context)
|
||||||
// local storage eg. LocalStore and network storage eg., NetStore
|
Offer(ctx context.Context, source *discover.NodeID)
|
||||||
// access by calling network is blocking with a timeout
|
}
|
||||||
|
|
||||||
|
// NetStore is an extension of local storage
|
||||||
|
// it implements the ChunkStore interface
|
||||||
|
// on request it initiates remote cloud retrieval using a fetcher
|
||||||
|
// fetchers are unique to a chunk and are stored in fetchers LRU memory cache
|
||||||
|
// fetchFuncFactory is a factory object to create a fetch function for a specific chunk address
|
||||||
type NetStore struct {
|
type NetStore struct {
|
||||||
localStore *LocalStore
|
mu sync.Mutex
|
||||||
retrieve func(ctx context.Context, chunk *Chunk) error
|
store SyncChunkStore
|
||||||
|
fetchers *lru.Cache
|
||||||
|
NewNetFetcherFunc NewNetFetcherFunc
|
||||||
|
closeC chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewNetStore(localStore *LocalStore, retrieve func(ctx context.Context, chunk *Chunk) error) *NetStore {
|
// NewNetStore creates a new NetStore object using the given local store. newFetchFunc is a
|
||||||
return &NetStore{localStore, retrieve}
|
// constructor function that can create a fetch function for a specific chunk address.
|
||||||
}
|
func NewNetStore(store SyncChunkStore, nnf NewNetFetcherFunc) (*NetStore, error) {
|
||||||
|
fetchers, err := lru.New(defaultChunkRequestsCacheCapacity)
|
||||||
// Get is the entrypoint for local retrieve requests
|
|
||||||
// waits for response or times out
|
|
||||||
//
|
|
||||||
// Get uses get method to retrieve request, but retries if the
|
|
||||||
// ErrChunkNotFound is returned by get, until the netStoreRetryTimeout
|
|
||||||
// is reached.
|
|
||||||
func (ns *NetStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) {
|
|
||||||
|
|
||||||
var sp opentracing.Span
|
|
||||||
ctx, sp = spancontext.StartSpan(
|
|
||||||
ctx,
|
|
||||||
"netstore.get.global")
|
|
||||||
defer sp.Finish()
|
|
||||||
|
|
||||||
timer := time.NewTimer(netStoreRetryTimeout)
|
|
||||||
defer timer.Stop()
|
|
||||||
|
|
||||||
// result and resultC provide results from the goroutine
|
|
||||||
// where NetStore.get is called.
|
|
||||||
type result struct {
|
|
||||||
chunk *Chunk
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
resultC := make(chan result)
|
|
||||||
|
|
||||||
// quitC ensures that retring goroutine is terminated
|
|
||||||
// when this function returns.
|
|
||||||
quitC := make(chan struct{})
|
|
||||||
defer close(quitC)
|
|
||||||
|
|
||||||
// do retries in a goroutine so that the timer can
|
|
||||||
// force this method to return after the netStoreRetryTimeout.
|
|
||||||
go func() {
|
|
||||||
// limiter ensures that NetStore.get is not called more frequently
|
|
||||||
// then netStoreMinRetryDelay. If NetStore.get takes longer
|
|
||||||
// then netStoreMinRetryDelay, the next retry call will be
|
|
||||||
// without a delay.
|
|
||||||
limiter := time.NewTimer(netStoreMinRetryDelay)
|
|
||||||
defer limiter.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
chunk, err := ns.get(ctx, addr, 0)
|
|
||||||
if err != ErrChunkNotFound {
|
|
||||||
// break retry only if the error is nil
|
|
||||||
// or other error then ErrChunkNotFound
|
|
||||||
select {
|
|
||||||
case <-quitC:
|
|
||||||
// Maybe NetStore.Get function has returned
|
|
||||||
// by the timer.C while we were waiting for the
|
|
||||||
// results. Terminate this goroutine.
|
|
||||||
case resultC <- result{chunk: chunk, err: err}:
|
|
||||||
// Send the result to the parrent goroutine.
|
|
||||||
}
|
|
||||||
return
|
|
||||||
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-quitC:
|
|
||||||
// NetStore.Get function has returned, possibly
|
|
||||||
// by the timer.C, which makes this goroutine
|
|
||||||
// not needed.
|
|
||||||
return
|
|
||||||
case <-limiter.C:
|
|
||||||
}
|
|
||||||
// Reset the limiter for the next iteration.
|
|
||||||
limiter.Reset(netStoreMinRetryDelay)
|
|
||||||
log.Debug("NetStore.Get retry chunk", "key", addr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case r := <-resultC:
|
|
||||||
return r.chunk, r.err
|
|
||||||
case <-timer.C:
|
|
||||||
return nil, ErrChunkNotFound
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetWithTimeout makes a single retrieval attempt for a chunk with a explicit timeout parameter
|
|
||||||
func (ns *NetStore) GetWithTimeout(ctx context.Context, addr Address, timeout time.Duration) (chunk *Chunk, err error) {
|
|
||||||
return ns.get(ctx, addr, timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ns *NetStore) get(ctx context.Context, addr Address, timeout time.Duration) (chunk *Chunk, err error) {
|
|
||||||
if timeout == 0 {
|
|
||||||
timeout = searchTimeout
|
|
||||||
}
|
|
||||||
|
|
||||||
var sp opentracing.Span
|
|
||||||
ctx, sp = spancontext.StartSpan(
|
|
||||||
ctx,
|
|
||||||
"netstore.get")
|
|
||||||
defer sp.Finish()
|
|
||||||
|
|
||||||
if ns.retrieve == nil {
|
|
||||||
chunk, err = ns.localStore.Get(ctx, addr)
|
|
||||||
if err == nil {
|
|
||||||
return chunk, nil
|
|
||||||
}
|
|
||||||
if err != ErrFetching {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var created bool
|
|
||||||
chunk, created = ns.localStore.GetOrCreateRequest(ctx, addr)
|
|
||||||
|
|
||||||
if chunk.ReqC == nil {
|
|
||||||
return chunk, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if created {
|
|
||||||
err := ns.retrieve(ctx, chunk)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// mark chunk request as failed so that we can retry it later
|
|
||||||
chunk.SetErrored(ErrChunkUnavailable)
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
return &NetStore{
|
||||||
|
store: store,
|
||||||
|
fetchers: fetchers,
|
||||||
|
NewNetFetcherFunc: nnf,
|
||||||
|
closeC: make(chan struct{}),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
t := time.NewTicker(timeout)
|
// Put stores a chunk in localstore, and delivers to all requestor peers using the fetcher stored in
|
||||||
defer t.Stop()
|
// the fetchers cache
|
||||||
|
func (n *NetStore) Put(ctx context.Context, ch Chunk) error {
|
||||||
|
n.mu.Lock()
|
||||||
|
defer n.mu.Unlock()
|
||||||
|
|
||||||
select {
|
// put to the chunk to the store, there should be no error
|
||||||
case <-t.C:
|
err := n.store.Put(ctx, ch)
|
||||||
// mark chunk request as failed so that we can retry
|
if err != nil {
|
||||||
chunk.SetErrored(ErrChunkNotFound)
|
return err
|
||||||
return nil, ErrChunkNotFound
|
|
||||||
case <-chunk.ReqC:
|
|
||||||
}
|
}
|
||||||
chunk.SetErrored(nil)
|
|
||||||
|
// if chunk is now put in the store, check if there was an active fetcher and call deliver on it
|
||||||
|
// (this delivers the chunk to requestors via the fetcher)
|
||||||
|
if f := n.getFetcher(ch.Address()); f != nil {
|
||||||
|
f.deliver(ctx, ch)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves the chunk from the NetStore DPA synchronously.
|
||||||
|
// It calls NetStore.get, and if the chunk is not in local Storage
|
||||||
|
// it calls fetch with the request, which blocks until the chunk
|
||||||
|
// arrived or context is done
|
||||||
|
func (n *NetStore) Get(rctx context.Context, ref Address) (Chunk, error) {
|
||||||
|
chunk, fetch, err := n.get(rctx, ref)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if chunk != nil {
|
||||||
return chunk, nil
|
return chunk, nil
|
||||||
}
|
}
|
||||||
|
return fetch(rctx)
|
||||||
|
}
|
||||||
|
|
||||||
// Put is the entrypoint for local store requests coming from storeLoop
|
func (n *NetStore) BinIndex(po uint8) uint64 {
|
||||||
func (ns *NetStore) Put(ctx context.Context, chunk *Chunk) {
|
return n.store.BinIndex(po)
|
||||||
ns.localStore.Put(ctx, chunk)
|
}
|
||||||
|
|
||||||
|
func (n *NetStore) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error {
|
||||||
|
return n.store.Iterator(from, to, po, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchFunc returns nil if the store contains the given address. Otherwise it returns a wait function,
|
||||||
|
// which returns after the chunk is available or the context is done
|
||||||
|
func (n *NetStore) FetchFunc(ctx context.Context, ref Address) func(context.Context) error {
|
||||||
|
chunk, fetch, _ := n.get(ctx, ref)
|
||||||
|
if chunk != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return func(ctx context.Context) error {
|
||||||
|
_, err := fetch(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close chunk store
|
// Close chunk store
|
||||||
func (ns *NetStore) Close() {
|
func (n *NetStore) Close() {
|
||||||
ns.localStore.Close()
|
close(n.closeC)
|
||||||
|
n.store.Close()
|
||||||
|
// TODO: loop through fetchers to cancel them
|
||||||
|
}
|
||||||
|
|
||||||
|
// get attempts at retrieving the chunk from LocalStore
|
||||||
|
// If it is not found then using getOrCreateFetcher:
|
||||||
|
// 1. Either there is already a fetcher to retrieve it
|
||||||
|
// 2. A new fetcher is created and saved in the fetchers cache
|
||||||
|
// From here on, all Get will hit on this fetcher until the chunk is delivered
|
||||||
|
// or all fetcher contexts are done.
|
||||||
|
// It returns a chunk, a fetcher function and an error
|
||||||
|
// If chunk is nil, the returned fetch function needs to be called with a context to return the chunk.
|
||||||
|
func (n *NetStore) get(ctx context.Context, ref Address) (Chunk, func(context.Context) (Chunk, error), error) {
|
||||||
|
n.mu.Lock()
|
||||||
|
defer n.mu.Unlock()
|
||||||
|
|
||||||
|
chunk, err := n.store.Get(ctx, ref)
|
||||||
|
if err != nil {
|
||||||
|
if err != ErrChunkNotFound {
|
||||||
|
log.Debug("Received error from LocalStore other than ErrNotFound", "err", err)
|
||||||
|
}
|
||||||
|
// The chunk is not available in the LocalStore, let's get the fetcher for it, or create a new one
|
||||||
|
// if it doesn't exist yet
|
||||||
|
f := n.getOrCreateFetcher(ref)
|
||||||
|
// If the caller needs the chunk, it has to use the returned fetch function to get it
|
||||||
|
return nil, f.Fetch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return chunk, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getOrCreateFetcher attempts at retrieving an existing fetchers
|
||||||
|
// if none exists, creates one and saves it in the fetchers cache
|
||||||
|
// caller must hold the lock
|
||||||
|
func (n *NetStore) getOrCreateFetcher(ref Address) *fetcher {
|
||||||
|
if f := n.getFetcher(ref); f != nil {
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// no fetcher for the given address, we have to create a new one
|
||||||
|
key := hex.EncodeToString(ref)
|
||||||
|
// create the context during which fetching is kept alive
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
// destroy is called when all requests finish
|
||||||
|
destroy := func() {
|
||||||
|
// remove fetcher from fetchers
|
||||||
|
n.fetchers.Remove(key)
|
||||||
|
// stop fetcher by cancelling context called when
|
||||||
|
// all requests cancelled/timedout or chunk is delivered
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
// peers always stores all the peers which have an active request for the chunk. It is shared
|
||||||
|
// between fetcher and the NewFetchFunc function. It is needed by the NewFetchFunc because
|
||||||
|
// the peers which requested the chunk should not be requested to deliver it.
|
||||||
|
peers := &sync.Map{}
|
||||||
|
|
||||||
|
fetcher := newFetcher(ref, n.NewNetFetcherFunc(ctx, ref, peers), destroy, peers, n.closeC)
|
||||||
|
n.fetchers.Add(key, fetcher)
|
||||||
|
|
||||||
|
return fetcher
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFetcher retrieves the fetcher for the given address from the fetchers cache if it exists,
|
||||||
|
// otherwise it returns nil
|
||||||
|
func (n *NetStore) getFetcher(ref Address) *fetcher {
|
||||||
|
key := hex.EncodeToString(ref)
|
||||||
|
f, ok := n.fetchers.Get(key)
|
||||||
|
if ok {
|
||||||
|
return f.(*fetcher)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestsCacheLen returns the current number of outgoing requests stored in the cache
|
||||||
|
func (n *NetStore) RequestsCacheLen() int {
|
||||||
|
return n.fetchers.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// One fetcher object is responsible to fetch one chunk for one address, and keep track of all the
|
||||||
|
// peers who have requested it and did not receive it yet.
|
||||||
|
type fetcher struct {
|
||||||
|
addr Address // address of chunk
|
||||||
|
chunk Chunk // fetcher can set the chunk on the fetcher
|
||||||
|
deliveredC chan struct{} // chan signalling chunk delivery to requests
|
||||||
|
cancelledC chan struct{} // chan signalling the fetcher has been cancelled (removed from fetchers in NetStore)
|
||||||
|
netFetcher NetFetcher // remote fetch function to be called with a request source taken from the context
|
||||||
|
cancel func() // cleanup function for the remote fetcher to call when all upstream contexts are called
|
||||||
|
peers *sync.Map // the peers which asked for the chunk
|
||||||
|
requestCnt int32 // number of requests on this chunk. If all the requests are done (delivered or context is done) the cancel function is called
|
||||||
|
deliverOnce *sync.Once // guarantees that we only close deliveredC once
|
||||||
|
}
|
||||||
|
|
||||||
|
// newFetcher creates a new fetcher object for the fiven addr. fetch is the function which actually
|
||||||
|
// does the retrieval (in non-test cases this is coming from the network package). cancel function is
|
||||||
|
// called either
|
||||||
|
// 1. when the chunk has been fetched all peers have been either notified or their context has been done
|
||||||
|
// 2. the chunk has not been fetched but all context from all the requests has been done
|
||||||
|
// The peers map stores all the peers which have requested chunk.
|
||||||
|
func newFetcher(addr Address, nf NetFetcher, cancel func(), peers *sync.Map, closeC chan struct{}) *fetcher {
|
||||||
|
cancelOnce := &sync.Once{} // cancel should only be called once
|
||||||
|
return &fetcher{
|
||||||
|
addr: addr,
|
||||||
|
deliveredC: make(chan struct{}),
|
||||||
|
deliverOnce: &sync.Once{},
|
||||||
|
cancelledC: closeC,
|
||||||
|
netFetcher: nf,
|
||||||
|
cancel: func() {
|
||||||
|
cancelOnce.Do(func() {
|
||||||
|
cancel()
|
||||||
|
})
|
||||||
|
},
|
||||||
|
peers: peers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch fetches the chunk synchronously, it is called by NetStore.Get is the chunk is not available
|
||||||
|
// locally.
|
||||||
|
func (f *fetcher) Fetch(rctx context.Context) (Chunk, error) {
|
||||||
|
atomic.AddInt32(&f.requestCnt, 1)
|
||||||
|
defer func() {
|
||||||
|
// if all the requests are done the fetcher can be cancelled
|
||||||
|
if atomic.AddInt32(&f.requestCnt, -1) == 0 {
|
||||||
|
f.cancel()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// The peer asking for the chunk. Store in the shared peers map, but delete after the request
|
||||||
|
// has been delivered
|
||||||
|
peer := rctx.Value("peer")
|
||||||
|
if peer != nil {
|
||||||
|
f.peers.Store(peer, time.Now())
|
||||||
|
defer f.peers.Delete(peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there is a source in the context then it is an offer, otherwise a request
|
||||||
|
sourceIF := rctx.Value("source")
|
||||||
|
if sourceIF != nil {
|
||||||
|
var source *discover.NodeID
|
||||||
|
id := discover.MustHexID(sourceIF.(string))
|
||||||
|
source = &id
|
||||||
|
f.netFetcher.Offer(rctx, source)
|
||||||
|
} else {
|
||||||
|
f.netFetcher.Request(rctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait until either the chunk is delivered or the context is done
|
||||||
|
select {
|
||||||
|
case <-rctx.Done():
|
||||||
|
return nil, rctx.Err()
|
||||||
|
case <-f.deliveredC:
|
||||||
|
return f.chunk, nil
|
||||||
|
case <-f.cancelledC:
|
||||||
|
return nil, fmt.Errorf("fetcher cancelled")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deliver is called by NetStore.Put to notify all pending requests
|
||||||
|
func (f *fetcher) deliver(ctx context.Context, ch Chunk) {
|
||||||
|
f.deliverOnce.Do(func() {
|
||||||
|
f.chunk = ch
|
||||||
|
// closing the deliveredC channel will terminate ongoing requests
|
||||||
|
close(f.deliveredC)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
@ -17,107 +17,622 @@
|
|||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"crypto/rand"
|
||||||
"errors"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/network"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var sourcePeerID = discover.MustHexID("2dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439")
|
||||||
errUnknown = errors.New("unknown error")
|
|
||||||
)
|
|
||||||
|
|
||||||
type mockRetrieve struct {
|
type mockNetFetcher struct {
|
||||||
requests map[string]int
|
peers *sync.Map
|
||||||
|
sources []*discover.NodeID
|
||||||
|
peersPerRequest [][]Address
|
||||||
|
requestCalled bool
|
||||||
|
offerCalled bool
|
||||||
|
quit <-chan struct{}
|
||||||
|
ctx context.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMockRetrieve() *mockRetrieve {
|
func (m *mockNetFetcher) Offer(ctx context.Context, source *discover.NodeID) {
|
||||||
return &mockRetrieve{requests: make(map[string]int)}
|
m.offerCalled = true
|
||||||
|
m.sources = append(m.sources, source)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDummyChunk(addr Address) *Chunk {
|
func (m *mockNetFetcher) Request(ctx context.Context) {
|
||||||
chunk := NewChunk(addr, make(chan bool))
|
m.requestCalled = true
|
||||||
chunk.SData = []byte{3, 4, 5}
|
var peers []Address
|
||||||
chunk.Size = 3
|
m.peers.Range(func(key interface{}, _ interface{}) bool {
|
||||||
|
peers = append(peers, common.FromHex(key.(string)))
|
||||||
return chunk
|
return true
|
||||||
|
})
|
||||||
|
m.peersPerRequest = append(m.peersPerRequest, peers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockRetrieve) retrieve(ctx context.Context, chunk *Chunk) error {
|
type mockNetFetchFuncFactory struct {
|
||||||
hkey := hex.EncodeToString(chunk.Addr)
|
fetcher *mockNetFetcher
|
||||||
m.requests[hkey] += 1
|
|
||||||
|
|
||||||
// on second call return error
|
|
||||||
if m.requests[hkey] == 2 {
|
|
||||||
return errUnknown
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// on third call return data
|
func (m *mockNetFetchFuncFactory) newMockNetFetcher(ctx context.Context, _ Address, peers *sync.Map) NetFetcher {
|
||||||
if m.requests[hkey] == 3 {
|
m.fetcher.peers = peers
|
||||||
*chunk = *newDummyChunk(chunk.Addr)
|
m.fetcher.quit = ctx.Done()
|
||||||
go func() {
|
m.fetcher.ctx = ctx
|
||||||
time.Sleep(100 * time.Millisecond)
|
return m.fetcher
|
||||||
close(chunk.ReqC)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
func mustNewNetStore(t *testing.T) *NetStore {
|
||||||
|
netStore, _ := mustNewNetStoreWithFetcher(t)
|
||||||
|
return netStore
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNetstoreFailedRequest(t *testing.T) {
|
func mustNewNetStoreWithFetcher(t *testing.T) (*NetStore, *mockNetFetcher) {
|
||||||
searchTimeout = 300 * time.Millisecond
|
t.Helper()
|
||||||
|
|
||||||
// setup
|
|
||||||
addr := network.RandomAddr() // tested peers peer address
|
|
||||||
|
|
||||||
// temp datadir
|
|
||||||
datadir, err := ioutil.TempDir("", "netstore")
|
datadir, err := ioutil.TempDir("", "netstore")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
naddr := make([]byte, 32)
|
||||||
params := NewDefaultLocalStoreParams()
|
params := NewDefaultLocalStoreParams()
|
||||||
params.Init(datadir)
|
params.Init(datadir)
|
||||||
params.BaseKey = addr.Over()
|
params.BaseKey = naddr
|
||||||
localStore, err := NewTestLocalStoreForAddr(params)
|
localStore, err := NewTestLocalStoreForAddr(params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
r := NewMockRetrieve()
|
fetcher := &mockNetFetcher{}
|
||||||
netStore := NewNetStore(localStore, r.retrieve)
|
mockNetFetchFuncFactory := &mockNetFetchFuncFactory{
|
||||||
|
fetcher: fetcher,
|
||||||
key := Address{}
|
|
||||||
|
|
||||||
// first call is done by the retry on ErrChunkNotFound, no need to do it here
|
|
||||||
// _, err = netStore.Get(key)
|
|
||||||
// if err == nil || err != ErrChunkNotFound {
|
|
||||||
// t.Fatalf("expected to get ErrChunkNotFound, but got: %s", err)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// second call
|
|
||||||
_, err = netStore.Get(context.TODO(), key)
|
|
||||||
if got := r.requests[hex.EncodeToString(key)]; got != 2 {
|
|
||||||
t.Fatalf("expected to have called retrieve two times, but got: %v", got)
|
|
||||||
}
|
}
|
||||||
if err != errUnknown {
|
netStore, err := NewNetStore(localStore, mockNetFetchFuncFactory.newMockNetFetcher)
|
||||||
t.Fatalf("expected to get an unknown error, but got: %s", err)
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return netStore, fetcher
|
||||||
}
|
}
|
||||||
|
|
||||||
// third call
|
// TestNetStoreGetAndPut tests calling NetStore.Get which is blocked until the same chunk is Put.
|
||||||
chunk, err := netStore.Get(context.TODO(), key)
|
// After the Put there should no active fetchers, and the context created for the fetcher should
|
||||||
if got := r.requests[hex.EncodeToString(key)]; got != 3 {
|
// be cancelled.
|
||||||
t.Fatalf("expected to have called retrieve three times, but got: %v", got)
|
func TestNetStoreGetAndPut(t *testing.T) {
|
||||||
|
netStore, fetcher := mustNewNetStoreWithFetcher(t)
|
||||||
|
|
||||||
|
chunk := GenerateRandomChunk(ch.DefaultSize)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
c := make(chan struct{}) // this channel ensures that the gouroutine with the Put does not run earlier than the Get
|
||||||
|
go func() {
|
||||||
|
<-c // wait for the Get to be called
|
||||||
|
time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
|
||||||
|
|
||||||
|
// check if netStore created a fetcher in the Get call for the unavailable chunk
|
||||||
|
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
|
||||||
|
t.Fatal("Expected netStore to use a fetcher for the Get call")
|
||||||
}
|
}
|
||||||
if err != nil || chunk == nil {
|
|
||||||
t.Fatalf("expected to get a chunk but got: %v, %s", chunk, err)
|
err := netStore.Put(ctx, chunk)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected no err got %v", err)
|
||||||
}
|
}
|
||||||
if len(chunk.SData) != 3 {
|
}()
|
||||||
t.Fatalf("expected to get a chunk with size 3, but got: %v", chunk.SData)
|
|
||||||
|
close(c)
|
||||||
|
recChunk, err := netStore.Get(ctx, chunk.Address()) // this is blocked until the Put above is done
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected no err got %v", err)
|
||||||
|
}
|
||||||
|
// the retrieved chunk should be the same as what we Put
|
||||||
|
if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
|
||||||
|
t.Fatalf("Different chunk received than what was put")
|
||||||
|
}
|
||||||
|
// the chunk is already available locally, so there should be no active fetchers waiting for it
|
||||||
|
if netStore.fetchers.Len() != 0 {
|
||||||
|
t.Fatal("Expected netStore to remove the fetcher after delivery")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A fetcher was created when the Get was called (and the chunk was not available). The chunk
|
||||||
|
// was delivered with the Put call, so the fetcher should be cancelled now.
|
||||||
|
select {
|
||||||
|
case <-fetcher.ctx.Done():
|
||||||
|
default:
|
||||||
|
t.Fatal("Expected fetcher context to be cancelled")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNetStoreGetAndPut tests calling NetStore.Put and then NetStore.Get.
|
||||||
|
// After the Put the chunk is available locally, so the Get can just retrieve it from LocalStore,
|
||||||
|
// there is no need to create fetchers.
|
||||||
|
func TestNetStoreGetAfterPut(t *testing.T) {
|
||||||
|
netStore, fetcher := mustNewNetStoreWithFetcher(t)
|
||||||
|
|
||||||
|
chunk := GenerateRandomChunk(ch.DefaultSize)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// First we Put the chunk, so the chunk will be available locally
|
||||||
|
err := netStore.Put(ctx, chunk)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected no err got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get should retrieve the chunk from LocalStore, without creating fetcher
|
||||||
|
recChunk, err := netStore.Get(ctx, chunk.Address())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected no err got %v", err)
|
||||||
|
}
|
||||||
|
// the retrieved chunk should be the same as what we Put
|
||||||
|
if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
|
||||||
|
t.Fatalf("Different chunk received than what was put")
|
||||||
|
}
|
||||||
|
// no fetcher offer or request should be created for a locally available chunk
|
||||||
|
if fetcher.offerCalled || fetcher.requestCalled {
|
||||||
|
t.Fatal("NetFetcher.offerCalled or requestCalled not expected to be called")
|
||||||
|
}
|
||||||
|
// no fetchers should be created for a locally available chunk
|
||||||
|
if netStore.fetchers.Len() != 0 {
|
||||||
|
t.Fatal("Expected netStore to not have fetcher")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNetStoreGetTimeout tests a Get call for an unavailable chunk and waits for timeout
|
||||||
|
func TestNetStoreGetTimeout(t *testing.T) {
|
||||||
|
netStore, fetcher := mustNewNetStoreWithFetcher(t)
|
||||||
|
|
||||||
|
chunk := GenerateRandomChunk(ch.DefaultSize)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
c := make(chan struct{}) // this channel ensures that the gouroutine does not run earlier than the Get
|
||||||
|
go func() {
|
||||||
|
<-c // wait for the Get to be called
|
||||||
|
time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
|
||||||
|
|
||||||
|
// check if netStore created a fetcher in the Get call for the unavailable chunk
|
||||||
|
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
|
||||||
|
t.Fatal("Expected netStore to use a fetcher for the Get call")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
close(c)
|
||||||
|
// We call Get on this chunk, which is not in LocalStore. We don't Put it at all, so there will
|
||||||
|
// be a timeout
|
||||||
|
_, err := netStore.Get(ctx, chunk.Address())
|
||||||
|
|
||||||
|
// Check if the timeout happened
|
||||||
|
if err != context.DeadlineExceeded {
|
||||||
|
t.Fatalf("Expected context.DeadLineExceeded err got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A fetcher was created, check if it has been removed after timeout
|
||||||
|
if netStore.fetchers.Len() != 0 {
|
||||||
|
t.Fatal("Expected netStore to remove the fetcher after timeout")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the fetcher context has been cancelled after the timeout
|
||||||
|
select {
|
||||||
|
case <-fetcher.ctx.Done():
|
||||||
|
default:
|
||||||
|
t.Fatal("Expected fetcher context to be cancelled")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestNetStoreGetCancel tests a Get call for an unavailable chunk, then cancels the context and checks
|
||||||
|
// the errors
|
||||||
|
func TestNetStoreGetCancel(t *testing.T) {
|
||||||
|
netStore, fetcher := mustNewNetStoreWithFetcher(t)
|
||||||
|
|
||||||
|
chunk := GenerateRandomChunk(ch.DefaultSize)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||||
|
|
||||||
|
c := make(chan struct{}) // this channel ensures that the gouroutine with the cancel does not run earlier than the Get
|
||||||
|
go func() {
|
||||||
|
<-c // wait for the Get to be called
|
||||||
|
time.Sleep(200 * time.Millisecond) // and a little more so it is surely called
|
||||||
|
// check if netStore created a fetcher in the Get call for the unavailable chunk
|
||||||
|
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
|
||||||
|
t.Fatal("Expected netStore to use a fetcher for the Get call")
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
|
||||||
|
close(c)
|
||||||
|
// We call Get with an unavailable chunk, so it will create a fetcher and wait for delivery
|
||||||
|
_, err := netStore.Get(ctx, chunk.Address())
|
||||||
|
|
||||||
|
// After the context is cancelled above Get should return with an error
|
||||||
|
if err != context.Canceled {
|
||||||
|
t.Fatalf("Expected context.Canceled err got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A fetcher was created, check if it has been removed after cancel
|
||||||
|
if netStore.fetchers.Len() != 0 {
|
||||||
|
t.Fatal("Expected netStore to remove the fetcher after cancel")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the fetcher context has been cancelled after the request context cancel
|
||||||
|
select {
|
||||||
|
case <-fetcher.ctx.Done():
|
||||||
|
default:
|
||||||
|
t.Fatal("Expected fetcher context to be cancelled")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNetStoreMultipleGetAndPut tests four Get calls for the same unavailable chunk. The chunk is
|
||||||
|
// delivered with a Put, we have to make sure all Get calls return, and they use a single fetcher
|
||||||
|
// for the chunk retrieval
|
||||||
|
func TestNetStoreMultipleGetAndPut(t *testing.T) {
|
||||||
|
netStore, fetcher := mustNewNetStoreWithFetcher(t)
|
||||||
|
|
||||||
|
chunk := GenerateRandomChunk(ch.DefaultSize)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// sleep to make sure Put is called after all the Get
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
// check if netStore created exactly one fetcher for all Get calls
|
||||||
|
if netStore.fetchers.Len() != 1 {
|
||||||
|
t.Fatal("Expected netStore to use one fetcher for all Get calls")
|
||||||
|
}
|
||||||
|
err := netStore.Put(ctx, chunk)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected no err got %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// call Get 4 times for the same unavailable chunk. The calls will be blocked until the Put above.
|
||||||
|
getWG := sync.WaitGroup{}
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
getWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer getWG.Done()
|
||||||
|
recChunk, err := netStore.Get(ctx, chunk.Address())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected no err got %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(recChunk.Address(), chunk.Address()) || !bytes.Equal(recChunk.Data(), chunk.Data()) {
|
||||||
|
t.Fatalf("Different chunk received than what was put")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
finishedC := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
getWG.Wait()
|
||||||
|
close(finishedC)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// The Get calls should return after Put, so no timeout expected
|
||||||
|
select {
|
||||||
|
case <-finishedC:
|
||||||
|
case <-time.After(1 * time.Second):
|
||||||
|
t.Fatalf("Timeout waiting for Get calls to return")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A fetcher was created, check if it has been removed after cancel
|
||||||
|
if netStore.fetchers.Len() != 0 {
|
||||||
|
t.Fatal("Expected netStore to remove the fetcher after delivery")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A fetcher was created, check if it has been removed after delivery
|
||||||
|
select {
|
||||||
|
case <-fetcher.ctx.Done():
|
||||||
|
default:
|
||||||
|
t.Fatal("Expected fetcher context to be cancelled")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNetStoreFetchFuncTimeout tests a FetchFunc call for an unavailable chunk and waits for timeout
|
||||||
|
func TestNetStoreFetchFuncTimeout(t *testing.T) {
|
||||||
|
netStore, fetcher := mustNewNetStoreWithFetcher(t)
|
||||||
|
|
||||||
|
chunk := GenerateRandomChunk(ch.DefaultSize)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// FetchFunc is called for an unavaible chunk, so the returned wait function should not be nil
|
||||||
|
wait := netStore.FetchFunc(ctx, chunk.Address())
|
||||||
|
if wait == nil {
|
||||||
|
t.Fatal("Expected wait function to be not nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// There should an active fetcher for the chunk after the FetchFunc call
|
||||||
|
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
|
||||||
|
t.Fatalf("Expected netStore to have one fetcher for the requested chunk")
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait function should timeout because we don't deliver the chunk with a Put
|
||||||
|
err := wait(ctx)
|
||||||
|
if err != context.DeadlineExceeded {
|
||||||
|
t.Fatalf("Expected context.DeadLineExceeded err got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the fetcher should be removed after timeout
|
||||||
|
if netStore.fetchers.Len() != 0 {
|
||||||
|
t.Fatal("Expected netStore to remove the fetcher after timeout")
|
||||||
|
}
|
||||||
|
|
||||||
|
// the fetcher context should be cancelled after timeout
|
||||||
|
select {
|
||||||
|
case <-fetcher.ctx.Done():
|
||||||
|
default:
|
||||||
|
t.Fatal("Expected fetcher context to be cancelled")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNetStoreFetchFuncAfterPut tests that the FetchFunc should return nil for a locally available chunk
|
||||||
|
func TestNetStoreFetchFuncAfterPut(t *testing.T) {
|
||||||
|
netStore := mustNewNetStore(t)
|
||||||
|
|
||||||
|
chunk := GenerateRandomChunk(ch.DefaultSize)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// We deliver the created the chunk with a Put
|
||||||
|
err := netStore.Put(ctx, chunk)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected no err got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchFunc should return nil, because the chunk is available locally, no need to fetch it
|
||||||
|
wait := netStore.FetchFunc(ctx, chunk.Address())
|
||||||
|
if wait != nil {
|
||||||
|
t.Fatal("Expected wait to be nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// No fetchers should be created at all
|
||||||
|
if netStore.fetchers.Len() != 0 {
|
||||||
|
t.Fatal("Expected netStore to not have fetcher")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNetStoreGetCallsRequest tests if Get created a request on the NetFetcher for an unavailable chunk
|
||||||
|
func TestNetStoreGetCallsRequest(t *testing.T) {
|
||||||
|
netStore, fetcher := mustNewNetStoreWithFetcher(t)
|
||||||
|
|
||||||
|
chunk := GenerateRandomChunk(ch.DefaultSize)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// We call get for a not available chunk, it will timeout because the chunk is not delivered
|
||||||
|
_, err := netStore.Get(ctx, chunk.Address())
|
||||||
|
|
||||||
|
if err != context.DeadlineExceeded {
|
||||||
|
t.Fatalf("Expected context.DeadlineExceeded err got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetStore should call NetFetcher.Request and wait for the chunk
|
||||||
|
if !fetcher.requestCalled {
|
||||||
|
t.Fatal("Expected NetFetcher.Request to be called")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNetStoreGetCallsOffer tests if Get created a request on the NetFetcher for an unavailable chunk
|
||||||
|
// in case of a source peer provided in the context.
|
||||||
|
func TestNetStoreGetCallsOffer(t *testing.T) {
|
||||||
|
netStore, fetcher := mustNewNetStoreWithFetcher(t)
|
||||||
|
|
||||||
|
chunk := GenerateRandomChunk(ch.DefaultSize)
|
||||||
|
|
||||||
|
// If a source peer is added to the context, NetStore will handle it as an offer
|
||||||
|
ctx := context.WithValue(context.Background(), "source", sourcePeerID.String())
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// We call get for a not available chunk, it will timeout because the chunk is not delivered
|
||||||
|
chunk, err := netStore.Get(ctx, chunk.Address())
|
||||||
|
|
||||||
|
if err != context.DeadlineExceeded {
|
||||||
|
t.Fatalf("Expect error %v got %v", context.DeadlineExceeded, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetStore should call NetFetcher.Offer with the source peer
|
||||||
|
if !fetcher.offerCalled {
|
||||||
|
t.Fatal("Expected NetFetcher.Request to be called")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fetcher.sources) != 1 {
|
||||||
|
t.Fatalf("Expected fetcher sources length 1 got %v", len(fetcher.sources))
|
||||||
|
}
|
||||||
|
|
||||||
|
if fetcher.sources[0].String() != sourcePeerID.String() {
|
||||||
|
t.Fatalf("Expected fetcher source %v got %v", sourcePeerID, fetcher.sources[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNetStoreFetcherCountPeers tests multiple NetStore.Get calls with peer in the context.
|
||||||
|
// There is no Put call, so the Get calls timeout
|
||||||
|
func TestNetStoreFetcherCountPeers(t *testing.T) {
|
||||||
|
|
||||||
|
netStore, fetcher := mustNewNetStoreWithFetcher(t)
|
||||||
|
|
||||||
|
addr := randomAddr()
|
||||||
|
peers := []string{randomAddr().Hex(), randomAddr().Hex(), randomAddr().Hex()}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
errC := make(chan error)
|
||||||
|
nrGets := 3
|
||||||
|
|
||||||
|
// Call Get 3 times with a peer in context
|
||||||
|
for i := 0; i < nrGets; i++ {
|
||||||
|
peer := peers[i]
|
||||||
|
go func() {
|
||||||
|
ctx := context.WithValue(ctx, "peer", peer)
|
||||||
|
_, err := netStore.Get(ctx, addr)
|
||||||
|
errC <- err
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// All 3 Get calls should timeout
|
||||||
|
for i := 0; i < nrGets; i++ {
|
||||||
|
err := <-errC
|
||||||
|
if err != context.DeadlineExceeded {
|
||||||
|
t.Fatalf("Expected \"%v\" error got \"%v\"", context.DeadlineExceeded, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetcher should be closed after timeout
|
||||||
|
select {
|
||||||
|
case <-fetcher.quit:
|
||||||
|
case <-time.After(3 * time.Second):
|
||||||
|
t.Fatalf("mockNetFetcher not closed after timeout")
|
||||||
|
}
|
||||||
|
|
||||||
|
// All 3 peers should be given to NetFetcher after the 3 Get calls
|
||||||
|
if len(fetcher.peersPerRequest) != nrGets {
|
||||||
|
t.Fatalf("Expected 3 got %v", len(fetcher.peersPerRequest))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, peers := range fetcher.peersPerRequest {
|
||||||
|
if len(peers) < i+1 {
|
||||||
|
t.Fatalf("Expected at least %v got %v", i+1, len(peers))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNetStoreFetchFuncCalledMultipleTimes calls the wait function given by FetchFunc three times,
|
||||||
|
// and checks there is still exactly one fetcher for one chunk. Afthe chunk is delivered, it checks
|
||||||
|
// if the fetcher is closed.
|
||||||
|
func TestNetStoreFetchFuncCalledMultipleTimes(t *testing.T) {
|
||||||
|
netStore, fetcher := mustNewNetStoreWithFetcher(t)
|
||||||
|
|
||||||
|
chunk := GenerateRandomChunk(ch.DefaultSize)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// FetchFunc should return a non-nil wait function, because the chunk is not available
|
||||||
|
wait := netStore.FetchFunc(ctx, chunk.Address())
|
||||||
|
if wait == nil {
|
||||||
|
t.Fatal("Expected wait function to be not nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// There should be exactly one fetcher for the chunk
|
||||||
|
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
|
||||||
|
t.Fatalf("Expected netStore to have one fetcher for the requested chunk")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call wait three times parallelly
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
err := wait(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected no err got %v", err)
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// sleep a little so the wait functions are called above
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
// there should be still only one fetcher, because all wait calls are for the same chunk
|
||||||
|
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
|
||||||
|
t.Fatal("Expected netStore to have one fetcher for the requested chunk")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deliver the chunk with a Put
|
||||||
|
err := netStore.Put(ctx, chunk)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Expected no err got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait until all wait calls return (because the chunk is delivered)
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// There should be no more fetchers for the delivered chunk
|
||||||
|
if netStore.fetchers.Len() != 0 {
|
||||||
|
t.Fatal("Expected netStore to remove the fetcher after delivery")
|
||||||
|
}
|
||||||
|
|
||||||
|
// The context for the fetcher should be cancelled after delivery
|
||||||
|
select {
|
||||||
|
case <-fetcher.ctx.Done():
|
||||||
|
default:
|
||||||
|
t.Fatal("Expected fetcher context to be cancelled")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNetStoreFetcherLifeCycleWithTimeout is similar to TestNetStoreFetchFuncCalledMultipleTimes,
|
||||||
|
// the only difference is that we don't deilver the chunk, just wait for timeout
|
||||||
|
func TestNetStoreFetcherLifeCycleWithTimeout(t *testing.T) {
|
||||||
|
netStore, fetcher := mustNewNetStoreWithFetcher(t)
|
||||||
|
|
||||||
|
chunk := GenerateRandomChunk(ch.DefaultSize)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// FetchFunc should return a non-nil wait function, because the chunk is not available
|
||||||
|
wait := netStore.FetchFunc(ctx, chunk.Address())
|
||||||
|
if wait == nil {
|
||||||
|
t.Fatal("Expected wait function to be not nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// There should be exactly one fetcher for the chunk
|
||||||
|
if netStore.fetchers.Len() != 1 || netStore.getFetcher(chunk.Address()) == nil {
|
||||||
|
t.Fatalf("Expected netStore to have one fetcher for the requested chunk")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call wait three times parallelly
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
rctx, rcancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||||
|
defer rcancel()
|
||||||
|
err := wait(rctx)
|
||||||
|
if err != context.DeadlineExceeded {
|
||||||
|
t.Fatalf("Expected err %v got %v", context.DeadlineExceeded, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait until all wait calls timeout
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// There should be no more fetchers after timeout
|
||||||
|
if netStore.fetchers.Len() != 0 {
|
||||||
|
t.Fatal("Expected netStore to remove the fetcher after delivery")
|
||||||
|
}
|
||||||
|
|
||||||
|
// The context for the fetcher should be cancelled after timeout
|
||||||
|
select {
|
||||||
|
case <-fetcher.ctx.Done():
|
||||||
|
default:
|
||||||
|
t.Fatal("Expected fetcher context to be cancelled")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func randomAddr() Address {
|
||||||
|
addr := make([]byte, 32)
|
||||||
|
rand.Read(addr)
|
||||||
|
return Address(addr)
|
||||||
|
}
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -57,7 +57,7 @@ import (
|
|||||||
When certain no of data chunks are created (defaultBranches), a signal is sent to create a tree
|
When certain no of data chunks are created (defaultBranches), a signal is sent to create a tree
|
||||||
entry. When the level 0 tree entries reaches certain threshold (defaultBranches), another signal
|
entry. When the level 0 tree entries reaches certain threshold (defaultBranches), another signal
|
||||||
is sent to a tree entry one level up.. and so on... until only the data is exhausted AND only one
|
is sent to a tree entry one level up.. and so on... until only the data is exhausted AND only one
|
||||||
tree entry is present in certain level. The key of tree entry is given out as the rootKey of the file.
|
tree entry is present in certain level. The key of tree entry is given out as the rootAddress of the file.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -98,15 +98,15 @@ func NewPyramidSplitterParams(addr Address, reader io.Reader, putter Putter, get
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes.
|
When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Address), the root hash of the entire content will fill this once processing finishes.
|
||||||
New chunks to store are store using the putter which the caller provides.
|
New chunks to store are store using the putter which the caller provides.
|
||||||
*/
|
*/
|
||||||
func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) {
|
func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) {
|
||||||
return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, chunk.DefaultSize)).Split(ctx)
|
return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, ch.DefaultSize)).Split(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func PyramidAppend(ctx context.Context, addr Address, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) {
|
func PyramidAppend(ctx context.Context, addr Address, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) {
|
||||||
return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, chunk.DefaultSize)).Append(ctx)
|
return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, ch.DefaultSize)).Append(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Entry to create a tree node
|
// Entry to create a tree node
|
||||||
@ -153,7 +153,7 @@ type PyramidChunker struct {
|
|||||||
wg *sync.WaitGroup
|
wg *sync.WaitGroup
|
||||||
errC chan error
|
errC chan error
|
||||||
quitC chan bool
|
quitC chan bool
|
||||||
rootKey []byte
|
rootAddress []byte
|
||||||
chunkLevel [][]*TreeEntry
|
chunkLevel [][]*TreeEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -171,14 +171,14 @@ func NewPyramidSplitter(params *PyramidSplitterParams) (pc *PyramidChunker) {
|
|||||||
pc.wg = &sync.WaitGroup{}
|
pc.wg = &sync.WaitGroup{}
|
||||||
pc.errC = make(chan error)
|
pc.errC = make(chan error)
|
||||||
pc.quitC = make(chan bool)
|
pc.quitC = make(chan bool)
|
||||||
pc.rootKey = make([]byte, pc.hashSize)
|
pc.rootAddress = make([]byte, pc.hashSize)
|
||||||
pc.chunkLevel = make([][]*TreeEntry, pc.branches)
|
pc.chunkLevel = make([][]*TreeEntry, pc.branches)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pc *PyramidChunker) Join(addr Address, getter Getter, depth int) LazySectionReader {
|
func (pc *PyramidChunker) Join(addr Address, getter Getter, depth int) LazySectionReader {
|
||||||
return &LazyChunkReader{
|
return &LazyChunkReader{
|
||||||
key: addr,
|
addr: addr,
|
||||||
depth: depth,
|
depth: depth,
|
||||||
chunkSize: pc.chunkSize,
|
chunkSize: pc.chunkSize,
|
||||||
branches: pc.branches,
|
branches: pc.branches,
|
||||||
@ -209,7 +209,7 @@ func (pc *PyramidChunker) Split(ctx context.Context) (k Address, wait func(conte
|
|||||||
log.Debug("pyramid.chunker: Split()")
|
log.Debug("pyramid.chunker: Split()")
|
||||||
|
|
||||||
pc.wg.Add(1)
|
pc.wg.Add(1)
|
||||||
pc.prepareChunks(false)
|
pc.prepareChunks(ctx, false)
|
||||||
|
|
||||||
// closes internal error channel if all subprocesses in the workgroup finished
|
// closes internal error channel if all subprocesses in the workgroup finished
|
||||||
go func() {
|
go func() {
|
||||||
@ -231,19 +231,21 @@ func (pc *PyramidChunker) Split(ctx context.Context) (k Address, wait func(conte
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
case <-time.NewTimer(splitTimeout).C:
|
case <-ctx.Done():
|
||||||
|
_ = pc.putter.Wait(ctx) //???
|
||||||
|
return nil, nil, ctx.Err()
|
||||||
}
|
}
|
||||||
return pc.rootKey, pc.putter.Wait, nil
|
return pc.rootAddress, pc.putter.Wait, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pc *PyramidChunker) Append(ctx context.Context) (k Address, wait func(context.Context) error, err error) {
|
func (pc *PyramidChunker) Append(ctx context.Context) (k Address, wait func(context.Context) error, err error) {
|
||||||
log.Debug("pyramid.chunker: Append()")
|
log.Debug("pyramid.chunker: Append()")
|
||||||
// Load the right most unfinished tree chunks in every level
|
// Load the right most unfinished tree chunks in every level
|
||||||
pc.loadTree()
|
pc.loadTree(ctx)
|
||||||
|
|
||||||
pc.wg.Add(1)
|
pc.wg.Add(1)
|
||||||
pc.prepareChunks(true)
|
pc.prepareChunks(ctx, true)
|
||||||
|
|
||||||
// closes internal error channel if all subprocesses in the workgroup finished
|
// closes internal error channel if all subprocesses in the workgroup finished
|
||||||
go func() {
|
go func() {
|
||||||
@ -265,11 +267,11 @@ func (pc *PyramidChunker) Append(ctx context.Context) (k Address, wait func(cont
|
|||||||
case <-time.NewTimer(splitTimeout).C:
|
case <-time.NewTimer(splitTimeout).C:
|
||||||
}
|
}
|
||||||
|
|
||||||
return pc.rootKey, pc.putter.Wait, nil
|
return pc.rootAddress, pc.putter.Wait, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pc *PyramidChunker) processor(id int64) {
|
func (pc *PyramidChunker) processor(ctx context.Context, id int64) {
|
||||||
defer pc.decrementWorkerCount()
|
defer pc.decrementWorkerCount()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -278,19 +280,22 @@ func (pc *PyramidChunker) processor(id int64) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pc.processChunk(id, job)
|
pc.processChunk(ctx, id, job)
|
||||||
case <-pc.quitC:
|
case <-pc.quitC:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pc *PyramidChunker) processChunk(id int64, job *chunkJob) {
|
func (pc *PyramidChunker) processChunk(ctx context.Context, id int64, job *chunkJob) {
|
||||||
log.Debug("pyramid.chunker: processChunk()", "id", id)
|
log.Debug("pyramid.chunker: processChunk()", "id", id)
|
||||||
|
|
||||||
ref, err := pc.putter.Put(context.TODO(), job.chunk)
|
ref, err := pc.putter.Put(ctx, job.chunk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pc.errC <- err
|
select {
|
||||||
|
case pc.errC <- err:
|
||||||
|
case <-pc.quitC:
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// report hash of this chunk one level up (keys corresponds to the proper subslice of the parent chunk)
|
// report hash of this chunk one level up (keys corresponds to the proper subslice of the parent chunk)
|
||||||
@ -300,14 +305,14 @@ func (pc *PyramidChunker) processChunk(id int64, job *chunkJob) {
|
|||||||
job.parentWg.Done()
|
job.parentWg.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pc *PyramidChunker) loadTree() error {
|
func (pc *PyramidChunker) loadTree(ctx context.Context) error {
|
||||||
log.Debug("pyramid.chunker: loadTree()")
|
log.Debug("pyramid.chunker: loadTree()")
|
||||||
// Get the root chunk to get the total size
|
// Get the root chunk to get the total size
|
||||||
chunkData, err := pc.getter.Get(context.TODO(), Reference(pc.key))
|
chunkData, err := pc.getter.Get(ctx, Reference(pc.key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errLoadingTreeRootChunk
|
return errLoadingTreeRootChunk
|
||||||
}
|
}
|
||||||
chunkSize := chunkData.Size()
|
chunkSize := int64(chunkData.Size())
|
||||||
log.Trace("pyramid.chunker: root chunk", "chunk.Size", chunkSize, "pc.chunkSize", pc.chunkSize)
|
log.Trace("pyramid.chunker: root chunk", "chunk.Size", chunkSize, "pc.chunkSize", pc.chunkSize)
|
||||||
|
|
||||||
//if data size is less than a chunk... add a parent with update as pending
|
//if data size is less than a chunk... add a parent with update as pending
|
||||||
@ -356,7 +361,7 @@ func (pc *PyramidChunker) loadTree() error {
|
|||||||
branchCount = int64(len(ent.chunk)-8) / pc.hashSize
|
branchCount = int64(len(ent.chunk)-8) / pc.hashSize
|
||||||
for i := int64(0); i < branchCount; i++ {
|
for i := int64(0); i < branchCount; i++ {
|
||||||
key := ent.chunk[8+(i*pc.hashSize) : 8+((i+1)*pc.hashSize)]
|
key := ent.chunk[8+(i*pc.hashSize) : 8+((i+1)*pc.hashSize)]
|
||||||
newChunkData, err := pc.getter.Get(context.TODO(), Reference(key))
|
newChunkData, err := pc.getter.Get(ctx, Reference(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errLoadingTreeChunk
|
return errLoadingTreeChunk
|
||||||
}
|
}
|
||||||
@ -365,7 +370,7 @@ func (pc *PyramidChunker) loadTree() error {
|
|||||||
newEntry := &TreeEntry{
|
newEntry := &TreeEntry{
|
||||||
level: lvl - 1,
|
level: lvl - 1,
|
||||||
branchCount: bewBranchCount,
|
branchCount: bewBranchCount,
|
||||||
subtreeSize: uint64(newChunkSize),
|
subtreeSize: newChunkSize,
|
||||||
chunk: newChunkData,
|
chunk: newChunkData,
|
||||||
key: key,
|
key: key,
|
||||||
index: 0,
|
index: 0,
|
||||||
@ -385,7 +390,7 @@ func (pc *PyramidChunker) loadTree() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pc *PyramidChunker) prepareChunks(isAppend bool) {
|
func (pc *PyramidChunker) prepareChunks(ctx context.Context, isAppend bool) {
|
||||||
log.Debug("pyramid.chunker: prepareChunks", "isAppend", isAppend)
|
log.Debug("pyramid.chunker: prepareChunks", "isAppend", isAppend)
|
||||||
defer pc.wg.Done()
|
defer pc.wg.Done()
|
||||||
|
|
||||||
@ -393,11 +398,11 @@ func (pc *PyramidChunker) prepareChunks(isAppend bool) {
|
|||||||
|
|
||||||
pc.incrementWorkerCount()
|
pc.incrementWorkerCount()
|
||||||
|
|
||||||
go pc.processor(pc.workerCount)
|
go pc.processor(ctx, pc.workerCount)
|
||||||
|
|
||||||
parent := NewTreeEntry(pc)
|
parent := NewTreeEntry(pc)
|
||||||
var unfinishedChunkData ChunkData
|
var unfinishedChunkData ChunkData
|
||||||
var unfinishedChunkSize int64
|
var unfinishedChunkSize uint64
|
||||||
|
|
||||||
if isAppend && len(pc.chunkLevel[0]) != 0 {
|
if isAppend && len(pc.chunkLevel[0]) != 0 {
|
||||||
lastIndex := len(pc.chunkLevel[0]) - 1
|
lastIndex := len(pc.chunkLevel[0]) - 1
|
||||||
@ -415,16 +420,16 @@ func (pc *PyramidChunker) prepareChunks(isAppend bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lastBranch := parent.branchCount - 1
|
lastBranch := parent.branchCount - 1
|
||||||
lastKey := parent.chunk[8+lastBranch*pc.hashSize : 8+(lastBranch+1)*pc.hashSize]
|
lastAddress := parent.chunk[8+lastBranch*pc.hashSize : 8+(lastBranch+1)*pc.hashSize]
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
unfinishedChunkData, err = pc.getter.Get(context.TODO(), lastKey)
|
unfinishedChunkData, err = pc.getter.Get(ctx, lastAddress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pc.errC <- err
|
pc.errC <- err
|
||||||
}
|
}
|
||||||
unfinishedChunkSize = unfinishedChunkData.Size()
|
unfinishedChunkSize = unfinishedChunkData.Size()
|
||||||
if unfinishedChunkSize < pc.chunkSize {
|
if unfinishedChunkSize < uint64(pc.chunkSize) {
|
||||||
parent.subtreeSize = parent.subtreeSize - uint64(unfinishedChunkSize)
|
parent.subtreeSize = parent.subtreeSize - unfinishedChunkSize
|
||||||
parent.branchCount = parent.branchCount - 1
|
parent.branchCount = parent.branchCount - 1
|
||||||
} else {
|
} else {
|
||||||
unfinishedChunkData = nil
|
unfinishedChunkData = nil
|
||||||
@ -468,8 +473,8 @@ func (pc *PyramidChunker) prepareChunks(isAppend bool) {
|
|||||||
if parent.branchCount == 1 && (pc.depth() == 0 || isAppend) {
|
if parent.branchCount == 1 && (pc.depth() == 0 || isAppend) {
|
||||||
// Data is exactly one chunk.. pick the last chunk key as root
|
// Data is exactly one chunk.. pick the last chunk key as root
|
||||||
chunkWG.Wait()
|
chunkWG.Wait()
|
||||||
lastChunksKey := parent.chunk[8 : 8+pc.hashSize]
|
lastChunksAddress := parent.chunk[8 : 8+pc.hashSize]
|
||||||
copy(pc.rootKey, lastChunksKey)
|
copy(pc.rootAddress, lastChunksAddress)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -502,7 +507,7 @@ func (pc *PyramidChunker) prepareChunks(isAppend bool) {
|
|||||||
// No need to build the tree if the depth is 0
|
// No need to build the tree if the depth is 0
|
||||||
// or we are appending.
|
// or we are appending.
|
||||||
// Just use the last key.
|
// Just use the last key.
|
||||||
copy(pc.rootKey, pkey)
|
copy(pc.rootAddress, pkey)
|
||||||
} else {
|
} else {
|
||||||
// We need to build the tree and and provide the lonely
|
// We need to build the tree and and provide the lonely
|
||||||
// chunk key to replace the last tree chunk key.
|
// chunk key to replace the last tree chunk key.
|
||||||
@ -525,7 +530,7 @@ func (pc *PyramidChunker) prepareChunks(isAppend bool) {
|
|||||||
workers := pc.getWorkerCount()
|
workers := pc.getWorkerCount()
|
||||||
if int64(len(pc.jobC)) > workers && workers < ChunkProcessors {
|
if int64(len(pc.jobC)) > workers && workers < ChunkProcessors {
|
||||||
pc.incrementWorkerCount()
|
pc.incrementWorkerCount()
|
||||||
go pc.processor(pc.workerCount)
|
go pc.processor(ctx, pc.workerCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -558,7 +563,7 @@ func (pc *PyramidChunker) buildTree(isAppend bool, ent *TreeEntry, chunkWG *sync
|
|||||||
|
|
||||||
lvlCount := int64(len(pc.chunkLevel[lvl]))
|
lvlCount := int64(len(pc.chunkLevel[lvl]))
|
||||||
if lvlCount == 1 && last {
|
if lvlCount == 1 && last {
|
||||||
copy(pc.rootKey, pc.chunkLevel[lvl][0].key)
|
copy(pc.rootAddress, pc.chunkLevel[lvl][0].key)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,16 +25,16 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||||
"github.com/ethereum/go-ethereum/swarm/bmt"
|
"github.com/ethereum/go-ethereum/swarm/bmt"
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
ch "github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
)
|
)
|
||||||
|
|
||||||
const MaxPO = 16
|
const MaxPO = 16
|
||||||
const KeyLength = 32
|
const AddressLength = 32
|
||||||
|
|
||||||
type Hasher func() hash.Hash
|
type Hasher func() hash.Hash
|
||||||
type SwarmHasher func() SwarmHash
|
type SwarmHasher func() SwarmHash
|
||||||
@ -116,7 +116,7 @@ func MakeHashFunc(hash string) SwarmHasher {
|
|||||||
return func() SwarmHash {
|
return func() SwarmHash {
|
||||||
hasher := sha3.NewKeccak256
|
hasher := sha3.NewKeccak256
|
||||||
hasherSize := hasher().Size()
|
hasherSize := hasher().Size()
|
||||||
segmentCount := chunk.DefaultSize / hasherSize
|
segmentCount := ch.DefaultSize / hasherSize
|
||||||
pool := bmt.NewTreePool(hasher, segmentCount, bmt.PoolSize)
|
pool := bmt.NewTreePool(hasher, segmentCount, bmt.PoolSize)
|
||||||
return bmt.New(pool)
|
return bmt.New(pool)
|
||||||
}
|
}
|
||||||
@ -169,88 +169,88 @@ func (c AddressCollection) Swap(i, j int) {
|
|||||||
c[i], c[j] = c[j], c[i]
|
c[i], c[j] = c[j], c[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chunk also serves as a request object passed to ChunkStores
|
// Chunk interface implemented by context.Contexts and data chunks
|
||||||
// in case it is a retrieval request, Data is nil and Size is 0
|
type Chunk interface {
|
||||||
// Note that Size is not the size of the data chunk, which is Data.Size()
|
Address() Address
|
||||||
// but the size of the subtree encoded in the chunk
|
Payload() []byte
|
||||||
// 0 if request, to be supplied by the dpa
|
SpanBytes() []byte
|
||||||
type Chunk struct {
|
Span() int64
|
||||||
Addr Address // always
|
Data() []byte
|
||||||
SData []byte // nil if request, to be supplied by dpa
|
|
||||||
Size int64 // size of the data covered by the subtree encoded in this chunk
|
|
||||||
//Source Peer // peer
|
|
||||||
C chan bool // to signal data delivery by the dpa
|
|
||||||
ReqC chan bool // to signal the request done
|
|
||||||
dbStoredC chan bool // never remove a chunk from memStore before it is written to dbStore
|
|
||||||
dbStored bool
|
|
||||||
dbStoredMu *sync.Mutex
|
|
||||||
errored error // flag which is set when the chunk request has errored or timeouted
|
|
||||||
erroredMu sync.Mutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Chunk) SetErrored(err error) {
|
type chunk struct {
|
||||||
c.erroredMu.Lock()
|
addr Address
|
||||||
defer c.erroredMu.Unlock()
|
sdata []byte
|
||||||
|
span int64
|
||||||
c.errored = err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Chunk) GetErrored() error {
|
func NewChunk(addr Address, data []byte) *chunk {
|
||||||
c.erroredMu.Lock()
|
return &chunk{
|
||||||
defer c.erroredMu.Unlock()
|
addr: addr,
|
||||||
|
sdata: data,
|
||||||
return c.errored
|
span: -1,
|
||||||
}
|
|
||||||
|
|
||||||
func NewChunk(addr Address, reqC chan bool) *Chunk {
|
|
||||||
return &Chunk{
|
|
||||||
Addr: addr,
|
|
||||||
ReqC: reqC,
|
|
||||||
dbStoredC: make(chan bool),
|
|
||||||
dbStoredMu: &sync.Mutex{},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Chunk) markAsStored() {
|
func (c *chunk) Address() Address {
|
||||||
c.dbStoredMu.Lock()
|
return c.addr
|
||||||
defer c.dbStoredMu.Unlock()
|
|
||||||
|
|
||||||
if !c.dbStored {
|
|
||||||
close(c.dbStoredC)
|
|
||||||
c.dbStored = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Chunk) WaitToStore() error {
|
func (c *chunk) SpanBytes() []byte {
|
||||||
<-c.dbStoredC
|
return c.sdata[:8]
|
||||||
return c.GetErrored()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func GenerateRandomChunk(dataSize int64) *Chunk {
|
func (c *chunk) Span() int64 {
|
||||||
return GenerateRandomChunks(dataSize, 1)[0]
|
if c.span == -1 {
|
||||||
|
c.span = int64(binary.LittleEndian.Uint64(c.sdata[:8]))
|
||||||
|
}
|
||||||
|
return c.span
|
||||||
}
|
}
|
||||||
|
|
||||||
func GenerateRandomChunks(dataSize int64, count int) (chunks []*Chunk) {
|
func (c *chunk) Data() []byte {
|
||||||
var i int
|
return c.sdata
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *chunk) Payload() []byte {
|
||||||
|
return c.sdata[8:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// String() for pretty printing
|
||||||
|
func (self *chunk) String() string {
|
||||||
|
return fmt.Sprintf("Address: %v TreeSize: %v Chunksize: %v", self.addr.Log(), self.span, len(self.sdata))
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateRandomChunk(dataSize int64) Chunk {
|
||||||
hasher := MakeHashFunc(DefaultHash)()
|
hasher := MakeHashFunc(DefaultHash)()
|
||||||
if dataSize > chunk.DefaultSize {
|
sdata := make([]byte, dataSize+8)
|
||||||
dataSize = chunk.DefaultSize
|
rand.Read(sdata[8:])
|
||||||
|
binary.LittleEndian.PutUint64(sdata[:8], uint64(dataSize))
|
||||||
|
hasher.ResetWithLength(sdata[:8])
|
||||||
|
hasher.Write(sdata[8:])
|
||||||
|
return NewChunk(hasher.Sum(nil), sdata)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i = 0; i < count; i++ {
|
func GenerateRandomChunks(dataSize int64, count int) (chunks []Chunk) {
|
||||||
chunks = append(chunks, NewChunk(nil, nil))
|
if dataSize > ch.DefaultSize {
|
||||||
chunks[i].SData = make([]byte, dataSize+8)
|
dataSize = ch.DefaultSize
|
||||||
rand.Read(chunks[i].SData)
|
}
|
||||||
binary.LittleEndian.PutUint64(chunks[i].SData[:8], uint64(dataSize))
|
for i := 0; i < count; i++ {
|
||||||
hasher.ResetWithLength(chunks[i].SData[:8])
|
ch := GenerateRandomChunk(ch.DefaultSize)
|
||||||
hasher.Write(chunks[i].SData[8:])
|
chunks = append(chunks, ch)
|
||||||
chunks[i].Addr = make([]byte, 32)
|
|
||||||
copy(chunks[i].Addr, hasher.Sum(nil))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return chunks
|
return chunks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GenerateRandomData(l int) (r io.Reader, slice []byte) {
|
||||||
|
slice, err := ioutil.ReadAll(io.LimitReader(rand.Reader, int64(l)))
|
||||||
|
if err != nil {
|
||||||
|
panic("rand error")
|
||||||
|
}
|
||||||
|
// log.Warn("generate random data", "len", len(slice), "data", common.Bytes2Hex(slice))
|
||||||
|
r = io.LimitReader(bytes.NewReader(slice), int64(l))
|
||||||
|
return r, slice
|
||||||
|
}
|
||||||
|
|
||||||
// Size, Seek, Read, ReadAt
|
// Size, Seek, Read, ReadAt
|
||||||
type LazySectionReader interface {
|
type LazySectionReader interface {
|
||||||
Context() context.Context
|
Context() context.Context
|
||||||
@ -276,15 +276,14 @@ type StoreParams struct {
|
|||||||
Hash SwarmHasher `toml:"-"`
|
Hash SwarmHasher `toml:"-"`
|
||||||
DbCapacity uint64
|
DbCapacity uint64
|
||||||
CacheCapacity uint
|
CacheCapacity uint
|
||||||
ChunkRequestsCacheCapacity uint
|
|
||||||
BaseKey []byte
|
BaseKey []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDefaultStoreParams() *StoreParams {
|
func NewDefaultStoreParams() *StoreParams {
|
||||||
return NewStoreParams(defaultLDBCapacity, defaultCacheCapacity, defaultChunkRequestsCacheCapacity, nil, nil)
|
return NewStoreParams(defaultLDBCapacity, defaultCacheCapacity, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStoreParams(ldbCap uint64, cacheCap uint, requestsCap uint, hash SwarmHasher, basekey []byte) *StoreParams {
|
func NewStoreParams(ldbCap uint64, cacheCap uint, hash SwarmHasher, basekey []byte) *StoreParams {
|
||||||
if basekey == nil {
|
if basekey == nil {
|
||||||
basekey = make([]byte, 32)
|
basekey = make([]byte, 32)
|
||||||
}
|
}
|
||||||
@ -295,7 +294,6 @@ func NewStoreParams(ldbCap uint64, cacheCap uint, requestsCap uint, hash SwarmHa
|
|||||||
Hash: hash,
|
Hash: hash,
|
||||||
DbCapacity: ldbCap,
|
DbCapacity: ldbCap,
|
||||||
CacheCapacity: cacheCap,
|
CacheCapacity: cacheCap,
|
||||||
ChunkRequestsCacheCapacity: requestsCap,
|
|
||||||
BaseKey: basekey,
|
BaseKey: basekey,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -321,8 +319,8 @@ type Getter interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: this returns invalid data if chunk is encrypted
|
// NOTE: this returns invalid data if chunk is encrypted
|
||||||
func (c ChunkData) Size() int64 {
|
func (c ChunkData) Size() uint64 {
|
||||||
return int64(binary.LittleEndian.Uint64(c[:8]))
|
return binary.LittleEndian.Uint64(c[:8])
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c ChunkData) Data() []byte {
|
func (c ChunkData) Data() []byte {
|
||||||
@ -348,7 +346,8 @@ func NewContentAddressValidator(hasher SwarmHasher) *ContentAddressValidator {
|
|||||||
|
|
||||||
// Validate that the given key is a valid content address for the given data
|
// Validate that the given key is a valid content address for the given data
|
||||||
func (v *ContentAddressValidator) Validate(addr Address, data []byte) bool {
|
func (v *ContentAddressValidator) Validate(addr Address, data []byte) bool {
|
||||||
if l := len(data); l < 9 || l > chunk.DefaultSize+8 {
|
if l := len(data); l < 9 || l > ch.DefaultSize+8 {
|
||||||
|
// log.Error("invalid chunk size", "chunk", addr.Hex(), "size", l)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -359,3 +358,37 @@ func (v *ContentAddressValidator) Validate(addr Address, data []byte) bool {
|
|||||||
|
|
||||||
return bytes.Equal(hash, addr[:])
|
return bytes.Equal(hash, addr[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ChunkStore interface {
|
||||||
|
Put(ctx context.Context, ch Chunk) (err error)
|
||||||
|
Get(rctx context.Context, ref Address) (ch Chunk, err error)
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SyncChunkStore is a ChunkStore which supports syncing
|
||||||
|
type SyncChunkStore interface {
|
||||||
|
ChunkStore
|
||||||
|
BinIndex(po uint8) uint64
|
||||||
|
Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error
|
||||||
|
FetchFunc(ctx context.Context, ref Address) func(context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// FakeChunkStore doesn't store anything, just implements the ChunkStore interface
|
||||||
|
// It can be used to inject into a hasherStore if you don't want to actually store data just do the
|
||||||
|
// hashing
|
||||||
|
type FakeChunkStore struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put doesn't store anything it is just here to implement ChunkStore
|
||||||
|
func (f *FakeChunkStore) Put(_ context.Context, ch Chunk) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gut doesn't store anything it is just here to implement ChunkStore
|
||||||
|
func (f *FakeChunkStore) Get(_ context.Context, ref Address) (Chunk, error) {
|
||||||
|
panic("FakeChunkStore doesn't support Get")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close doesn't store anything it is just here to implement ChunkStore
|
||||||
|
func (f *FakeChunkStore) Close() {
|
||||||
|
}
|
||||||
|
@ -75,7 +75,7 @@ type Swarm struct {
|
|||||||
privateKey *ecdsa.PrivateKey
|
privateKey *ecdsa.PrivateKey
|
||||||
corsString string
|
corsString string
|
||||||
swapEnabled bool
|
swapEnabled bool
|
||||||
lstore *storage.LocalStore // local store, needs to store for releasing resources after node stopped
|
netStore *storage.NetStore
|
||||||
sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit
|
sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit
|
||||||
ps *pss.Pss
|
ps *pss.Pss
|
||||||
|
|
||||||
@ -164,37 +164,40 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
|
|||||||
self.dns = resolver
|
self.dns = resolver
|
||||||
}
|
}
|
||||||
|
|
||||||
self.lstore, err = storage.NewLocalStore(config.LocalStoreParams, mockStore)
|
lstore, err := storage.NewLocalStore(config.LocalStoreParams, mockStore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
self.netStore, err = storage.NewNetStore(lstore, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
db := storage.NewDBAPI(self.lstore)
|
|
||||||
to := network.NewKademlia(
|
to := network.NewKademlia(
|
||||||
common.FromHex(config.BzzKey),
|
common.FromHex(config.BzzKey),
|
||||||
network.NewKadParams(),
|
network.NewKadParams(),
|
||||||
)
|
)
|
||||||
delivery := stream.NewDelivery(to, db)
|
delivery := stream.NewDelivery(to, self.netStore)
|
||||||
|
self.netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, config.DeliverySkipCheck).New
|
||||||
|
|
||||||
self.streamer = stream.NewRegistry(addr, delivery, db, stateStore, &stream.RegistryOptions{
|
self.streamer = stream.NewRegistry(addr, delivery, self.netStore, stateStore, &stream.RegistryOptions{
|
||||||
SkipCheck: config.DeliverySkipCheck,
|
SkipCheck: config.SyncingSkipCheck,
|
||||||
DoSync: config.SyncEnabled,
|
DoSync: config.SyncEnabled,
|
||||||
DoRetrieve: true,
|
DoRetrieve: true,
|
||||||
SyncUpdateDelay: config.SyncUpdateDelay,
|
SyncUpdateDelay: config.SyncUpdateDelay,
|
||||||
})
|
})
|
||||||
|
|
||||||
// set up NetStore, the cloud storage local access layer
|
|
||||||
netStore := storage.NewNetStore(self.lstore, self.streamer.Retrieve)
|
|
||||||
// Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage
|
// Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage
|
||||||
self.fileStore = storage.NewFileStore(netStore, self.config.FileStoreParams)
|
self.fileStore = storage.NewFileStore(self.netStore, self.config.FileStoreParams)
|
||||||
|
|
||||||
var resourceHandler *mru.Handler
|
var resourceHandler *mru.Handler
|
||||||
rhparams := &mru.HandlerParams{}
|
rhparams := &mru.HandlerParams{}
|
||||||
|
|
||||||
resourceHandler = mru.NewHandler(rhparams)
|
resourceHandler = mru.NewHandler(rhparams)
|
||||||
resourceHandler.SetStore(netStore)
|
resourceHandler.SetStore(self.netStore)
|
||||||
|
|
||||||
self.lstore.Validators = []storage.ChunkValidator{
|
lstore.Validators = []storage.ChunkValidator{
|
||||||
storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash)),
|
storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash)),
|
||||||
resourceHandler,
|
resourceHandler,
|
||||||
}
|
}
|
||||||
@ -399,7 +402,7 @@ func (self *Swarm) periodicallyUpdateGauges() {
|
|||||||
|
|
||||||
func (self *Swarm) updateGauges() {
|
func (self *Swarm) updateGauges() {
|
||||||
uptimeGauge.Update(time.Since(startTime).Nanoseconds())
|
uptimeGauge.Update(time.Since(startTime).Nanoseconds())
|
||||||
requestsCacheGauge.Update(int64(self.lstore.RequestsCacheLen()))
|
requestsCacheGauge.Update(int64(self.netStore.RequestsCacheLen()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// implements the node.Service interface
|
// implements the node.Service interface
|
||||||
@ -420,8 +423,8 @@ func (self *Swarm) Stop() error {
|
|||||||
ch.Save()
|
ch.Save()
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.lstore != nil {
|
if self.netStore != nil {
|
||||||
self.lstore.DbStore.Close()
|
self.netStore.Close()
|
||||||
}
|
}
|
||||||
self.sfs.Stop()
|
self.sfs.Stop()
|
||||||
stopCounter.Inc(1)
|
stopCounter.Inc(1)
|
||||||
@ -478,21 +481,6 @@ func (self *Swarm) APIs() []rpc.API {
|
|||||||
Service: self.sfs,
|
Service: self.sfs,
|
||||||
Public: false,
|
Public: false,
|
||||||
},
|
},
|
||||||
// storage APIs
|
|
||||||
// DEPRECATED: Use the HTTP API instead
|
|
||||||
{
|
|
||||||
Namespace: "bzz",
|
|
||||||
Version: "0.1",
|
|
||||||
Service: api.NewStorage(self.api),
|
|
||||||
Public: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Namespace: "bzz",
|
|
||||||
Version: "0.1",
|
|
||||||
Service: api.NewFileSystem(self.api),
|
|
||||||
Public: false,
|
|
||||||
},
|
|
||||||
// {Namespace, Version, api.NewAdmin(self), false},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
apis = append(apis, self.bzz.APIs()...)
|
apis = append(apis, self.bzz.APIs()...)
|
||||||
|
@ -82,8 +82,8 @@ func TestNewSwarm(t *testing.T) {
|
|||||||
if s.dns != nil {
|
if s.dns != nil {
|
||||||
t.Error("dns initialized, but it should not be")
|
t.Error("dns initialized, but it should not be")
|
||||||
}
|
}
|
||||||
if s.lstore == nil {
|
if s.netStore == nil {
|
||||||
t.Error("localstore not initialized")
|
t.Error("netStore not initialized")
|
||||||
}
|
}
|
||||||
if s.streamer == nil {
|
if s.streamer == nil {
|
||||||
t.Error("streamer not initialized")
|
t.Error("streamer not initialized")
|
||||||
@ -91,9 +91,6 @@ func TestNewSwarm(t *testing.T) {
|
|||||||
if s.fileStore == nil {
|
if s.fileStore == nil {
|
||||||
t.Error("fileStore not initialized")
|
t.Error("fileStore not initialized")
|
||||||
}
|
}
|
||||||
if s.lstore.Validators == nil {
|
|
||||||
t.Error("localstore validators not initialized")
|
|
||||||
}
|
|
||||||
if s.bzz == nil {
|
if s.bzz == nil {
|
||||||
t.Error("bzz not initialized")
|
t.Error("bzz not initialized")
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user