swarm: fix megacheck warnings

This commit is contained in:
Egon Elbre 2017-08-08 20:34:35 +03:00
parent 6ca59d98f8
commit 133de3d806
18 changed files with 39 additions and 108 deletions

View File

@ -34,11 +34,7 @@ import (
"github.com/ethereum/go-ethereum/swarm/storage"
)
var (
hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}")
slashes = regexp.MustCompile("/+")
domainAndVersion = regexp.MustCompile("[@:;,]+")
)
var hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}")
type Resolver interface {
Resolve(string) (common.Hash, error)
@ -335,7 +331,6 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
}
func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storage.Key, manifestEntryMap map[string]*manifestTrieEntry, err error) {
uri, err := Parse("bzz:/" + mhash)
if err != nil {
return nil, nil, err
@ -356,5 +351,8 @@ func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storag
manifestEntryMap[suffix] = entry
})
if err != nil {
return nil, nil, fmt.Errorf("list with prefix failed %v: %v", key.String(), err)
}
return key, manifestEntryMap, nil
}

View File

@ -89,8 +89,8 @@ func TestClientUploadDownloadFiles(t *testing.T) {
if file.Size != int64(len(expected)) {
t.Fatalf("expected downloaded file to be %d bytes, got %d", len(expected), file.Size)
}
if file.ContentType != file.ContentType {
t.Fatalf("expected downloaded file to have type %q, got %q", file.ContentType, file.ContentType)
if file.ContentType != "text/plain" {
t.Fatalf("expected downloaded file to have type %q, got %q", "text/plain", file.ContentType)
}
data, err := ioutil.ReadAll(file)
if err != nil {
@ -235,9 +235,7 @@ func TestClientFileList(t *testing.T) {
t.Fatal(err)
}
paths := make([]string, 0, len(list.CommonPrefixes)+len(list.Entries))
for _, prefix := range list.CommonPrefixes {
paths = append(paths, prefix)
}
paths = append(paths, list.CommonPrefixes...)
for _, entry := range list.Entries {
paths = append(paths, entry.Path)
}

View File

@ -224,7 +224,7 @@ func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.Ma
if err != nil {
return fmt.Errorf("error copying multipart content: %s", err)
}
if _, err := tmp.Seek(0, os.SEEK_SET); err != nil {
if _, err := tmp.Seek(0, io.SeekStart); err != nil {
return fmt.Errorf("error copying multipart content: %s", err)
}
reader = tmp

View File

@ -97,7 +97,7 @@ func TestBzzrGetPath(t *testing.T) {
isexpectedfailrequest = true
}
}
if isexpectedfailrequest == false {
if !isexpectedfailrequest {
t.Fatalf("Response body does not match, expected: %v, got %v", testmanifest[v], string(respbody))
}
}
@ -126,6 +126,9 @@ func TestBzzrGetPath(t *testing.T) {
}
defer resp.Body.Close()
respbody, err = ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("ReadAll failed: %v", err)
}
if string(respbody) != nonhashresponses[i] {
t.Fatalf("Non-Hash response body does not match, expected: %v, got: %v", nonhashresponses[i], string(respbody))
}

View File

@ -59,14 +59,6 @@ type MountInfo struct {
lock *sync.RWMutex
}
// Inode numbers need to be unique, they are used for caching inside fuse
func newInode() uint64 {
inodeLock.Lock()
defer inodeLock.Unlock()
inode += 1
return inode
}
func NewMountInfo(mhash, mpoint string, sapi *api.Api) *MountInfo {
newMountInfo := &MountInfo{
MountPoint: mpoint,
@ -103,7 +95,7 @@ func (self *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
}
log.Info(fmt.Sprintf("Attempting to mount %s ", cleanedMountPoint))
key, manifestEntryMap, err := self.swarmApi.BuildDirectoryTree(mhash, true)
_, manifestEntryMap, err := self.swarmApi.BuildDirectoryTree(mhash, true)
if err != nil {
return nil, err
}
@ -116,8 +108,7 @@ func (self *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
mi.rootDir = rootDir
for suffix, entry := range manifestEntryMap {
key = common.Hex2Bytes(entry.Hash)
key := common.Hex2Bytes(entry.Hash)
fullpath := "/" + suffix
basepath := filepath.Dir(fullpath)

View File

@ -355,7 +355,7 @@ func saveSync(record *kademlia.NodeRecord, node kademlia.Node) {
// sends relevant peer data given by the kademlia hive to the requester
// TODO: remember peers sent for duration of the session, only new peers sent
func (self *Hive) peers(req *retrieveRequestMsgData) {
if req != nil && req.MaxPeers >= 0 {
if req != nil {
var addrs []*peerAddr
if req.timeout == nil || time.Now().Before(*(req.timeout)) {
key := req.Key

View File

@ -133,7 +133,7 @@ type retrieveRequestMsgData struct {
from *peer //
}
func (self retrieveRequestMsgData) String() string {
func (self *retrieveRequestMsgData) String() string {
var from string
if self.from == nil {
from = "ourselves"
@ -148,12 +148,12 @@ func (self retrieveRequestMsgData) String() string {
}
// lookups are encoded by missing request ID
func (self retrieveRequestMsgData) isLookup() bool {
func (self *retrieveRequestMsgData) isLookup() bool {
return self.Id == 0
}
// sets timeout fields
func (self retrieveRequestMsgData) setTimeout(t *time.Time) {
func (self *retrieveRequestMsgData) setTimeout(t *time.Time) {
self.timeout = t
if t != nil {
self.Timeout = uint64(t.UnixNano())
@ -162,7 +162,7 @@ func (self retrieveRequestMsgData) setTimeout(t *time.Time) {
}
}
func (self retrieveRequestMsgData) getTimeout() (t *time.Time) {
func (self *retrieveRequestMsgData) getTimeout() (t *time.Time) {
if self.Timeout > 0 && self.timeout == nil {
timeout := time.Unix(int64(self.Timeout), 0)
t = &timeout
@ -180,7 +180,7 @@ type peerAddr struct {
}
// peerAddr pretty prints as enode
func (self peerAddr) String() string {
func (self *peerAddr) String() string {
var nodeid discover.NodeID
copy(nodeid[:], self.ID)
return discover.NewNode(nodeid, self.IP, 0, self.Port).String()
@ -213,7 +213,7 @@ type peersMsgData struct {
}
// peers msg pretty printer
func (self peersMsgData) String() string {
func (self *peersMsgData) String() string {
var from string
if self.from == nil {
from = "ourselves"
@ -227,7 +227,7 @@ func (self peersMsgData) String() string {
return fmt.Sprintf("from: %v, Key: %x; ID: %v, Peers: %v", from, target, self.Id, self.Peers)
}
func (self peersMsgData) setTimeout(t *time.Time) {
func (self *peersMsgData) setTimeout(t *time.Time) {
self.timeout = t
if t != nil {
self.Timeout = uint64(t.UnixNano())
@ -236,15 +236,6 @@ func (self peersMsgData) setTimeout(t *time.Time) {
}
}
func (self peersMsgData) getTimeout() (t *time.Time) {
if self.Timeout > 0 && self.timeout == nil {
timeout := time.Unix(int64(self.Timeout), 0)
t = &timeout
self.timeout = t
}
return
}
/*
syncRequest

View File

@ -40,7 +40,6 @@ import (
"github.com/ethereum/go-ethereum/contracts/chequebook"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
bzzswap "github.com/ethereum/go-ethereum/swarm/services/swap"
"github.com/ethereum/go-ethereum/swarm/services/swap/swap"
"github.com/ethereum/go-ethereum/swarm/storage"
@ -56,8 +55,6 @@ const (
// bzz represents the swarm wire protocol
// an instance is running on each peer
type bzz struct {
selfID discover.NodeID // peer's node id used in peer advertising in handshake
key storage.Key // baseaddress as storage.Key
storage StorageHandler // handler storage/retrieval related requests coming via the bzz wire protocol
hive *Hive // the logistic manager, peerPool, routing service and peer handler
dbAccess *DbAccess // access to db storage counter and iterator for syncing

View File

@ -157,7 +157,6 @@ type syncer struct {
// DB related fields
dbAccess *DbAccess // access to dbStore
db *storage.LDBDatabase // delivery msg db
// native fields
queues [priorities]*syncDb // in-memory cache / queues for sync reqs

View File

@ -156,14 +156,12 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
close(errC)
}()
select {
case err := <-errC:
if err != nil {
//TODO: add a timeout
if err := <-errC; err != nil {
close(quitC)
return nil, err
}
//TODO: add a timeout
}
return key, nil
}

View File

@ -43,13 +43,6 @@ type chunkerTester struct {
t test
}
func (self *chunkerTester) checkChunks(t *testing.T, want int) {
l := len(self.chunks)
if l != want {
t.Errorf("expected %v chunks, got %v", want, l)
}
}
func (self *chunkerTester) Split(chunker Splitter, data io.Reader, size int64, chunkC chan *Chunk, swg *sync.WaitGroup, expectedError error) (key Key) {
// reset
self.chunks = make(map[string]*Chunk)
@ -209,20 +202,6 @@ func TestRandomBrokenData(t *testing.T) {
}
}
func readAll(reader LazySectionReader, result []byte) {
size := int64(len(result))
var end int64
for pos := int64(0); pos < size; pos += 1000 {
if pos+1000 > size {
end = size
} else {
end = pos + 1000
}
reader.ReadAt(result[pos:end], pos)
}
}
func benchReadAll(reader LazySectionReader) {
size, _ := reader.Size(nil)
output := make([]byte, 1000)

View File

@ -514,8 +514,7 @@ func (s *DbStore) setCapacity(c uint64) {
s.capacity = c
if s.entryCnt > c {
var ratio float32
ratio = float32(1.01) - float32(c)/float32(s.entryCnt)
ratio := float32(1.01) - float32(c)/float32(s.entryCnt)
if ratio < gcArrayFreeRatio {
ratio = gcArrayFreeRatio
}
@ -528,10 +527,6 @@ func (s *DbStore) setCapacity(c uint64) {
}
}
func (s *DbStore) getEntryCnt() uint64 {
return s.entryCnt
}
func (s *DbStore) Close() {
s.db.Close()
}

View File

@ -59,7 +59,6 @@ type DPA struct {
lock sync.Mutex
running bool
wg *sync.WaitGroup
quitC chan bool
}
@ -239,6 +238,4 @@ func (self *dpaChunkStore) Put(entry *Chunk) {
}
// Close chunk store
func (self *dpaChunkStore) Close() {
return
}
func (self *dpaChunkStore) Close() {}

View File

@ -74,6 +74,4 @@ func (self *LocalStore) Get(key Key) (chunk *Chunk, err error) {
}
// Close local store
func (self *LocalStore) Close() {
return
}
func (self *LocalStore) Close() {}

View File

@ -130,10 +130,6 @@ func (s *MemStore) setCapacity(c uint) {
s.capacity = c
}
func (s *MemStore) getEntryCnt() uint {
return s.entryCnt
}
// entry (not its copy) is going to be in MemStore
func (s *MemStore) Put(entry *Chunk) {
if s.capacity == 0 {
@ -206,8 +202,6 @@ func (s *MemStore) Put(entry *Chunk) {
node.lastDBaccess = s.dbAccessCnt
node.updateAccess(s.accessCnt)
s.entryCnt++
return
}
func (s *MemStore) Get(hash Key) (chunk *Chunk, err error) {
@ -323,6 +317,4 @@ func (s *MemStore) removeOldest() {
}
// Close memstore
func (s *MemStore) Close() {
return
}
func (s *MemStore) Close() {}

View File

@ -19,7 +19,6 @@ package storage
import (
"fmt"
"path/filepath"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
@ -40,7 +39,6 @@ type NetStore struct {
hashfunc Hasher
localStore *LocalStore
cloud CloudStore
lock sync.Mutex
}
// backend engine for cloud store
@ -134,6 +132,4 @@ func (self *NetStore) Get(key Key) (*Chunk, error) {
}
// Close netstore
func (self *NetStore) Close() {
return
}
func (self *NetStore) Close() {}

View File

@ -178,10 +178,9 @@ func (self *PyramidChunker) processor(pend, swg *sync.WaitGroup, tasks chan *Tas
if swg != nil {
swg.Add(1)
}
select {
case chunkC <- &Chunk{Key: hash, SData: data, wg: swg}:
// case <- self.quitC
}
chunkC <- &Chunk{Key: hash, SData: data, wg: swg}
// TODO: consider selecting on self.quitC to avoid blocking forever on shutdown
}
if depth+1 < len(results.Levels) {
delete(results.Levels[depth+1], task.Index/(pow/self.branches))

View File

@ -115,11 +115,11 @@ func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, ensClient *e
log.Debug(fmt.Sprintf("Set up swarm network with Kademlia hive"))
// setup cloud storage backend
cloud := network.NewForwarder(self.hive)
self.cloud = network.NewForwarder(self.hive)
log.Debug(fmt.Sprintf("-> set swarm forwarder as cloud storage backend"))
// setup cloud storage internal access layer
self.storage = storage.NewNetStore(hash, self.lstore, cloud, config.StoreParams)
// setup cloud storage internal access layer
self.storage = storage.NewNetStore(hash, self.lstore, self.cloud, config.StoreParams)
log.Debug(fmt.Sprintf("-> swarm net store shared access layer to Swarm Chunk Store"))
// set up Depo (storage handler = cloud storage access layer for incoming remote requests)