forked from cerc-io/plugeth
swarm: fix megacheck warnings
This commit is contained in:
parent
6ca59d98f8
commit
133de3d806
@ -34,11 +34,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}")
|
||||||
hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}")
|
|
||||||
slashes = regexp.MustCompile("/+")
|
|
||||||
domainAndVersion = regexp.MustCompile("[@:;,]+")
|
|
||||||
)
|
|
||||||
|
|
||||||
type Resolver interface {
|
type Resolver interface {
|
||||||
Resolve(string) (common.Hash, error)
|
Resolve(string) (common.Hash, error)
|
||||||
@ -335,7 +331,6 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storage.Key, manifestEntryMap map[string]*manifestTrieEntry, err error) {
|
func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storage.Key, manifestEntryMap map[string]*manifestTrieEntry, err error) {
|
||||||
|
|
||||||
uri, err := Parse("bzz:/" + mhash)
|
uri, err := Parse("bzz:/" + mhash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@ -356,5 +351,8 @@ func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storag
|
|||||||
manifestEntryMap[suffix] = entry
|
manifestEntryMap[suffix] = entry
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("list with prefix failed %v: %v", key.String(), err)
|
||||||
|
}
|
||||||
return key, manifestEntryMap, nil
|
return key, manifestEntryMap, nil
|
||||||
}
|
}
|
||||||
|
@ -89,8 +89,8 @@ func TestClientUploadDownloadFiles(t *testing.T) {
|
|||||||
if file.Size != int64(len(expected)) {
|
if file.Size != int64(len(expected)) {
|
||||||
t.Fatalf("expected downloaded file to be %d bytes, got %d", len(expected), file.Size)
|
t.Fatalf("expected downloaded file to be %d bytes, got %d", len(expected), file.Size)
|
||||||
}
|
}
|
||||||
if file.ContentType != file.ContentType {
|
if file.ContentType != "text/plain" {
|
||||||
t.Fatalf("expected downloaded file to have type %q, got %q", file.ContentType, file.ContentType)
|
t.Fatalf("expected downloaded file to have type %q, got %q", "text/plain", file.ContentType)
|
||||||
}
|
}
|
||||||
data, err := ioutil.ReadAll(file)
|
data, err := ioutil.ReadAll(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -235,9 +235,7 @@ func TestClientFileList(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
paths := make([]string, 0, len(list.CommonPrefixes)+len(list.Entries))
|
paths := make([]string, 0, len(list.CommonPrefixes)+len(list.Entries))
|
||||||
for _, prefix := range list.CommonPrefixes {
|
paths = append(paths, list.CommonPrefixes...)
|
||||||
paths = append(paths, prefix)
|
|
||||||
}
|
|
||||||
for _, entry := range list.Entries {
|
for _, entry := range list.Entries {
|
||||||
paths = append(paths, entry.Path)
|
paths = append(paths, entry.Path)
|
||||||
}
|
}
|
||||||
|
@ -224,7 +224,7 @@ func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.Ma
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error copying multipart content: %s", err)
|
return fmt.Errorf("error copying multipart content: %s", err)
|
||||||
}
|
}
|
||||||
if _, err := tmp.Seek(0, os.SEEK_SET); err != nil {
|
if _, err := tmp.Seek(0, io.SeekStart); err != nil {
|
||||||
return fmt.Errorf("error copying multipart content: %s", err)
|
return fmt.Errorf("error copying multipart content: %s", err)
|
||||||
}
|
}
|
||||||
reader = tmp
|
reader = tmp
|
||||||
|
@ -97,7 +97,7 @@ func TestBzzrGetPath(t *testing.T) {
|
|||||||
isexpectedfailrequest = true
|
isexpectedfailrequest = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if isexpectedfailrequest == false {
|
if !isexpectedfailrequest {
|
||||||
t.Fatalf("Response body does not match, expected: %v, got %v", testmanifest[v], string(respbody))
|
t.Fatalf("Response body does not match, expected: %v, got %v", testmanifest[v], string(respbody))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -126,6 +126,9 @@ func TestBzzrGetPath(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
respbody, err = ioutil.ReadAll(resp.Body)
|
respbody, err = ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadAll failed: %v", err)
|
||||||
|
}
|
||||||
if string(respbody) != nonhashresponses[i] {
|
if string(respbody) != nonhashresponses[i] {
|
||||||
t.Fatalf("Non-Hash response body does not match, expected: %v, got: %v", nonhashresponses[i], string(respbody))
|
t.Fatalf("Non-Hash response body does not match, expected: %v, got: %v", nonhashresponses[i], string(respbody))
|
||||||
}
|
}
|
||||||
|
@ -59,14 +59,6 @@ type MountInfo struct {
|
|||||||
lock *sync.RWMutex
|
lock *sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// Inode numbers need to be unique, they are used for caching inside fuse
|
|
||||||
func newInode() uint64 {
|
|
||||||
inodeLock.Lock()
|
|
||||||
defer inodeLock.Unlock()
|
|
||||||
inode += 1
|
|
||||||
return inode
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMountInfo(mhash, mpoint string, sapi *api.Api) *MountInfo {
|
func NewMountInfo(mhash, mpoint string, sapi *api.Api) *MountInfo {
|
||||||
newMountInfo := &MountInfo{
|
newMountInfo := &MountInfo{
|
||||||
MountPoint: mpoint,
|
MountPoint: mpoint,
|
||||||
@ -103,7 +95,7 @@ func (self *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Info(fmt.Sprintf("Attempting to mount %s ", cleanedMountPoint))
|
log.Info(fmt.Sprintf("Attempting to mount %s ", cleanedMountPoint))
|
||||||
key, manifestEntryMap, err := self.swarmApi.BuildDirectoryTree(mhash, true)
|
_, manifestEntryMap, err := self.swarmApi.BuildDirectoryTree(mhash, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -116,8 +108,7 @@ func (self *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
|
|||||||
mi.rootDir = rootDir
|
mi.rootDir = rootDir
|
||||||
|
|
||||||
for suffix, entry := range manifestEntryMap {
|
for suffix, entry := range manifestEntryMap {
|
||||||
|
key := common.Hex2Bytes(entry.Hash)
|
||||||
key = common.Hex2Bytes(entry.Hash)
|
|
||||||
fullpath := "/" + suffix
|
fullpath := "/" + suffix
|
||||||
basepath := filepath.Dir(fullpath)
|
basepath := filepath.Dir(fullpath)
|
||||||
|
|
||||||
|
@ -355,7 +355,7 @@ func saveSync(record *kademlia.NodeRecord, node kademlia.Node) {
|
|||||||
// sends relevant peer data given by the kademlia hive to the requester
|
// sends relevant peer data given by the kademlia hive to the requester
|
||||||
// TODO: remember peers sent for duration of the session, only new peers sent
|
// TODO: remember peers sent for duration of the session, only new peers sent
|
||||||
func (self *Hive) peers(req *retrieveRequestMsgData) {
|
func (self *Hive) peers(req *retrieveRequestMsgData) {
|
||||||
if req != nil && req.MaxPeers >= 0 {
|
if req != nil {
|
||||||
var addrs []*peerAddr
|
var addrs []*peerAddr
|
||||||
if req.timeout == nil || time.Now().Before(*(req.timeout)) {
|
if req.timeout == nil || time.Now().Before(*(req.timeout)) {
|
||||||
key := req.Key
|
key := req.Key
|
||||||
|
@ -133,7 +133,7 @@ type retrieveRequestMsgData struct {
|
|||||||
from *peer //
|
from *peer //
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self retrieveRequestMsgData) String() string {
|
func (self *retrieveRequestMsgData) String() string {
|
||||||
var from string
|
var from string
|
||||||
if self.from == nil {
|
if self.from == nil {
|
||||||
from = "ourselves"
|
from = "ourselves"
|
||||||
@ -148,12 +148,12 @@ func (self retrieveRequestMsgData) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// lookups are encoded by missing request ID
|
// lookups are encoded by missing request ID
|
||||||
func (self retrieveRequestMsgData) isLookup() bool {
|
func (self *retrieveRequestMsgData) isLookup() bool {
|
||||||
return self.Id == 0
|
return self.Id == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// sets timeout fields
|
// sets timeout fields
|
||||||
func (self retrieveRequestMsgData) setTimeout(t *time.Time) {
|
func (self *retrieveRequestMsgData) setTimeout(t *time.Time) {
|
||||||
self.timeout = t
|
self.timeout = t
|
||||||
if t != nil {
|
if t != nil {
|
||||||
self.Timeout = uint64(t.UnixNano())
|
self.Timeout = uint64(t.UnixNano())
|
||||||
@ -162,7 +162,7 @@ func (self retrieveRequestMsgData) setTimeout(t *time.Time) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self retrieveRequestMsgData) getTimeout() (t *time.Time) {
|
func (self *retrieveRequestMsgData) getTimeout() (t *time.Time) {
|
||||||
if self.Timeout > 0 && self.timeout == nil {
|
if self.Timeout > 0 && self.timeout == nil {
|
||||||
timeout := time.Unix(int64(self.Timeout), 0)
|
timeout := time.Unix(int64(self.Timeout), 0)
|
||||||
t = &timeout
|
t = &timeout
|
||||||
@ -180,7 +180,7 @@ type peerAddr struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// peerAddr pretty prints as enode
|
// peerAddr pretty prints as enode
|
||||||
func (self peerAddr) String() string {
|
func (self *peerAddr) String() string {
|
||||||
var nodeid discover.NodeID
|
var nodeid discover.NodeID
|
||||||
copy(nodeid[:], self.ID)
|
copy(nodeid[:], self.ID)
|
||||||
return discover.NewNode(nodeid, self.IP, 0, self.Port).String()
|
return discover.NewNode(nodeid, self.IP, 0, self.Port).String()
|
||||||
@ -213,7 +213,7 @@ type peersMsgData struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// peers msg pretty printer
|
// peers msg pretty printer
|
||||||
func (self peersMsgData) String() string {
|
func (self *peersMsgData) String() string {
|
||||||
var from string
|
var from string
|
||||||
if self.from == nil {
|
if self.from == nil {
|
||||||
from = "ourselves"
|
from = "ourselves"
|
||||||
@ -227,7 +227,7 @@ func (self peersMsgData) String() string {
|
|||||||
return fmt.Sprintf("from: %v, Key: %x; ID: %v, Peers: %v", from, target, self.Id, self.Peers)
|
return fmt.Sprintf("from: %v, Key: %x; ID: %v, Peers: %v", from, target, self.Id, self.Peers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self peersMsgData) setTimeout(t *time.Time) {
|
func (self *peersMsgData) setTimeout(t *time.Time) {
|
||||||
self.timeout = t
|
self.timeout = t
|
||||||
if t != nil {
|
if t != nil {
|
||||||
self.Timeout = uint64(t.UnixNano())
|
self.Timeout = uint64(t.UnixNano())
|
||||||
@ -236,15 +236,6 @@ func (self peersMsgData) setTimeout(t *time.Time) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self peersMsgData) getTimeout() (t *time.Time) {
|
|
||||||
if self.Timeout > 0 && self.timeout == nil {
|
|
||||||
timeout := time.Unix(int64(self.Timeout), 0)
|
|
||||||
t = &timeout
|
|
||||||
self.timeout = t
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
syncRequest
|
syncRequest
|
||||||
|
|
||||||
|
@ -40,7 +40,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/contracts/chequebook"
|
"github.com/ethereum/go-ethereum/contracts/chequebook"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
|
||||||
bzzswap "github.com/ethereum/go-ethereum/swarm/services/swap"
|
bzzswap "github.com/ethereum/go-ethereum/swarm/services/swap"
|
||||||
"github.com/ethereum/go-ethereum/swarm/services/swap/swap"
|
"github.com/ethereum/go-ethereum/swarm/services/swap/swap"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
@ -56,8 +55,6 @@ const (
|
|||||||
// bzz represents the swarm wire protocol
|
// bzz represents the swarm wire protocol
|
||||||
// an instance is running on each peer
|
// an instance is running on each peer
|
||||||
type bzz struct {
|
type bzz struct {
|
||||||
selfID discover.NodeID // peer's node id used in peer advertising in handshake
|
|
||||||
key storage.Key // baseaddress as storage.Key
|
|
||||||
storage StorageHandler // handler storage/retrieval related requests coming via the bzz wire protocol
|
storage StorageHandler // handler storage/retrieval related requests coming via the bzz wire protocol
|
||||||
hive *Hive // the logistic manager, peerPool, routing service and peer handler
|
hive *Hive // the logistic manager, peerPool, routing service and peer handler
|
||||||
dbAccess *DbAccess // access to db storage counter and iterator for syncing
|
dbAccess *DbAccess // access to db storage counter and iterator for syncing
|
||||||
|
@ -156,8 +156,7 @@ type syncer struct {
|
|||||||
quit chan bool // signal to quit loops
|
quit chan bool // signal to quit loops
|
||||||
|
|
||||||
// DB related fields
|
// DB related fields
|
||||||
dbAccess *DbAccess // access to dbStore
|
dbAccess *DbAccess // access to dbStore
|
||||||
db *storage.LDBDatabase // delivery msg db
|
|
||||||
|
|
||||||
// native fields
|
// native fields
|
||||||
queues [priorities]*syncDb // in-memory cache / queues for sync reqs
|
queues [priorities]*syncDb // in-memory cache / queues for sync reqs
|
||||||
|
@ -156,14 +156,12 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
|
|||||||
close(errC)
|
close(errC)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
select {
|
//TODO: add a timeout
|
||||||
case err := <-errC:
|
if err := <-errC; err != nil {
|
||||||
if err != nil {
|
close(quitC)
|
||||||
close(quitC)
|
return nil, err
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
//TODO: add a timeout
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return key, nil
|
return key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,13 +43,6 @@ type chunkerTester struct {
|
|||||||
t test
|
t test
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *chunkerTester) checkChunks(t *testing.T, want int) {
|
|
||||||
l := len(self.chunks)
|
|
||||||
if l != want {
|
|
||||||
t.Errorf("expected %v chunks, got %v", want, l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *chunkerTester) Split(chunker Splitter, data io.Reader, size int64, chunkC chan *Chunk, swg *sync.WaitGroup, expectedError error) (key Key) {
|
func (self *chunkerTester) Split(chunker Splitter, data io.Reader, size int64, chunkC chan *Chunk, swg *sync.WaitGroup, expectedError error) (key Key) {
|
||||||
// reset
|
// reset
|
||||||
self.chunks = make(map[string]*Chunk)
|
self.chunks = make(map[string]*Chunk)
|
||||||
@ -209,20 +202,6 @@ func TestRandomBrokenData(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func readAll(reader LazySectionReader, result []byte) {
|
|
||||||
size := int64(len(result))
|
|
||||||
|
|
||||||
var end int64
|
|
||||||
for pos := int64(0); pos < size; pos += 1000 {
|
|
||||||
if pos+1000 > size {
|
|
||||||
end = size
|
|
||||||
} else {
|
|
||||||
end = pos + 1000
|
|
||||||
}
|
|
||||||
reader.ReadAt(result[pos:end], pos)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func benchReadAll(reader LazySectionReader) {
|
func benchReadAll(reader LazySectionReader) {
|
||||||
size, _ := reader.Size(nil)
|
size, _ := reader.Size(nil)
|
||||||
output := make([]byte, 1000)
|
output := make([]byte, 1000)
|
||||||
|
@ -514,8 +514,7 @@ func (s *DbStore) setCapacity(c uint64) {
|
|||||||
s.capacity = c
|
s.capacity = c
|
||||||
|
|
||||||
if s.entryCnt > c {
|
if s.entryCnt > c {
|
||||||
var ratio float32
|
ratio := float32(1.01) - float32(c)/float32(s.entryCnt)
|
||||||
ratio = float32(1.01) - float32(c)/float32(s.entryCnt)
|
|
||||||
if ratio < gcArrayFreeRatio {
|
if ratio < gcArrayFreeRatio {
|
||||||
ratio = gcArrayFreeRatio
|
ratio = gcArrayFreeRatio
|
||||||
}
|
}
|
||||||
@ -528,10 +527,6 @@ func (s *DbStore) setCapacity(c uint64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DbStore) getEntryCnt() uint64 {
|
|
||||||
return s.entryCnt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *DbStore) Close() {
|
func (s *DbStore) Close() {
|
||||||
s.db.Close()
|
s.db.Close()
|
||||||
}
|
}
|
||||||
|
@ -59,7 +59,6 @@ type DPA struct {
|
|||||||
|
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
running bool
|
running bool
|
||||||
wg *sync.WaitGroup
|
|
||||||
quitC chan bool
|
quitC chan bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -239,6 +238,4 @@ func (self *dpaChunkStore) Put(entry *Chunk) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Close chunk store
|
// Close chunk store
|
||||||
func (self *dpaChunkStore) Close() {
|
func (self *dpaChunkStore) Close() {}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
@ -74,6 +74,4 @@ func (self *LocalStore) Get(key Key) (chunk *Chunk, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Close local store
|
// Close local store
|
||||||
func (self *LocalStore) Close() {
|
func (self *LocalStore) Close() {}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
@ -130,10 +130,6 @@ func (s *MemStore) setCapacity(c uint) {
|
|||||||
s.capacity = c
|
s.capacity = c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MemStore) getEntryCnt() uint {
|
|
||||||
return s.entryCnt
|
|
||||||
}
|
|
||||||
|
|
||||||
// entry (not its copy) is going to be in MemStore
|
// entry (not its copy) is going to be in MemStore
|
||||||
func (s *MemStore) Put(entry *Chunk) {
|
func (s *MemStore) Put(entry *Chunk) {
|
||||||
if s.capacity == 0 {
|
if s.capacity == 0 {
|
||||||
@ -206,8 +202,6 @@ func (s *MemStore) Put(entry *Chunk) {
|
|||||||
node.lastDBaccess = s.dbAccessCnt
|
node.lastDBaccess = s.dbAccessCnt
|
||||||
node.updateAccess(s.accessCnt)
|
node.updateAccess(s.accessCnt)
|
||||||
s.entryCnt++
|
s.entryCnt++
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *MemStore) Get(hash Key) (chunk *Chunk, err error) {
|
func (s *MemStore) Get(hash Key) (chunk *Chunk, err error) {
|
||||||
@ -323,6 +317,4 @@ func (s *MemStore) removeOldest() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Close memstore
|
// Close memstore
|
||||||
func (s *MemStore) Close() {
|
func (s *MemStore) Close() {}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
@ -19,7 +19,6 @@ package storage
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
@ -40,7 +39,6 @@ type NetStore struct {
|
|||||||
hashfunc Hasher
|
hashfunc Hasher
|
||||||
localStore *LocalStore
|
localStore *LocalStore
|
||||||
cloud CloudStore
|
cloud CloudStore
|
||||||
lock sync.Mutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// backend engine for cloud store
|
// backend engine for cloud store
|
||||||
@ -134,6 +132,4 @@ func (self *NetStore) Get(key Key) (*Chunk, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Close netstore
|
// Close netstore
|
||||||
func (self *NetStore) Close() {
|
func (self *NetStore) Close() {}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
@ -178,10 +178,9 @@ func (self *PyramidChunker) processor(pend, swg *sync.WaitGroup, tasks chan *Tas
|
|||||||
if swg != nil {
|
if swg != nil {
|
||||||
swg.Add(1)
|
swg.Add(1)
|
||||||
}
|
}
|
||||||
select {
|
|
||||||
case chunkC <- &Chunk{Key: hash, SData: data, wg: swg}:
|
chunkC <- &Chunk{Key: hash, SData: data, wg: swg}
|
||||||
// case <- self.quitC
|
// TODO: consider selecting on self.quitC to avoid blocking forever on shutdown
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if depth+1 < len(results.Levels) {
|
if depth+1 < len(results.Levels) {
|
||||||
delete(results.Levels[depth+1], task.Index/(pow/self.branches))
|
delete(results.Levels[depth+1], task.Index/(pow/self.branches))
|
||||||
|
@ -115,11 +115,11 @@ func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, ensClient *e
|
|||||||
log.Debug(fmt.Sprintf("Set up swarm network with Kademlia hive"))
|
log.Debug(fmt.Sprintf("Set up swarm network with Kademlia hive"))
|
||||||
|
|
||||||
// setup cloud storage backend
|
// setup cloud storage backend
|
||||||
cloud := network.NewForwarder(self.hive)
|
self.cloud = network.NewForwarder(self.hive)
|
||||||
log.Debug(fmt.Sprintf("-> set swarm forwarder as cloud storage backend"))
|
log.Debug(fmt.Sprintf("-> set swarm forwarder as cloud storage backend"))
|
||||||
// setup cloud storage internal access layer
|
|
||||||
|
|
||||||
self.storage = storage.NewNetStore(hash, self.lstore, cloud, config.StoreParams)
|
// setup cloud storage internal access layer
|
||||||
|
self.storage = storage.NewNetStore(hash, self.lstore, self.cloud, config.StoreParams)
|
||||||
log.Debug(fmt.Sprintf("-> swarm net store shared access layer to Swarm Chunk Store"))
|
log.Debug(fmt.Sprintf("-> swarm net store shared access layer to Swarm Chunk Store"))
|
||||||
|
|
||||||
// set up Depo (storage handler = cloud storage access layer for incoming remote requests)
|
// set up Depo (storage handler = cloud storage access layer for incoming remote requests)
|
||||||
|
Loading…
Reference in New Issue
Block a user