swarm: remove unused/dead code (#18351)

This commit is contained in:
Anton Evangelatov 2018-12-23 17:31:32 +01:00 committed by GitHub
parent 335760bf06
commit 9e9fc87e70
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 19 additions and 400 deletions

View File

@ -164,10 +164,6 @@ var (
Name: "topic", Name: "topic",
Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters", Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters",
} }
SwarmFeedDataOnCreateFlag = cli.StringFlag{
Name: "data",
Usage: "Initializes the feed with the given hex-encoded data. Data must be prefixed by 0x",
}
SwarmFeedManifestFlag = cli.StringFlag{ SwarmFeedManifestFlag = cli.StringFlag{
Name: "manifest", Name: "manifest",
Usage: "Refers to the feed through a manifest", Usage: "Refers to the feed through a manifest",

View File

@ -50,10 +50,6 @@ import (
opentracing "github.com/opentracing/opentracing-go" opentracing "github.com/opentracing/opentracing-go"
) )
var (
ErrNotFound = errors.New("not found")
)
var ( var (
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil) apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil) apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
@ -136,13 +132,6 @@ func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolv
} }
} }
// MultiResolverOptionWithNameHash is unused at the time of this writing
func MultiResolverOptionWithNameHash(nameHash func(string) common.Hash) MultiResolverOption {
return func(m *MultiResolver) {
m.nameHash = nameHash
}
}
// NewMultiResolver creates a new instance of MultiResolver. // NewMultiResolver creates a new instance of MultiResolver.
func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) { func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
m = &MultiResolver{ m = &MultiResolver{
@ -173,40 +162,6 @@ func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) {
return return
} }
// ValidateOwner checks the ENS to validate that the owner of the given domain is the given eth address
func (m *MultiResolver) ValidateOwner(name string, address common.Address) (bool, error) {
rs, err := m.getResolveValidator(name)
if err != nil {
return false, err
}
var addr common.Address
for _, r := range rs {
addr, err = r.Owner(m.nameHash(name))
// we hide the error if it is not for the last resolver we check
if err == nil {
return addr == address, nil
}
}
return false, err
}
// HeaderByNumber uses the validator of the given domainname and retrieves the header for the given block number
func (m *MultiResolver) HeaderByNumber(ctx context.Context, name string, blockNr *big.Int) (*types.Header, error) {
rs, err := m.getResolveValidator(name)
if err != nil {
return nil, err
}
for _, r := range rs {
var header *types.Header
header, err = r.HeaderByNumber(ctx, blockNr)
// we hide the error if it is not for the last resolver we check
if err == nil {
return header, nil
}
}
return nil, err
}
// getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain // getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain
func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) { func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) {
rs := m.resolvers[""] rs := m.resolvers[""]
@ -224,11 +179,6 @@ func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, er
return rs, nil return rs, nil
} }
// SetNameHash sets the hasher function that hashes the domain into a name hash that ENS uses
func (m *MultiResolver) SetNameHash(nameHash func(string) common.Hash) {
m.nameHash = nameHash
}
/* /*
API implements webserver/file system related content storage and retrieval API implements webserver/file system related content storage and retrieval
on top of the FileStore on top of the FileStore
@ -265,9 +215,6 @@ func (a *API) Store(ctx context.Context, data io.Reader, size int64, toEncrypt b
return a.fileStore.Store(ctx, data, size, toEncrypt) return a.fileStore.Store(ctx, data, size, toEncrypt)
} }
// ErrResolve is returned when an URI cannot be resolved from ENS.
type ErrResolve error
// Resolve a name into a content-addressed hash // Resolve a name into a content-addressed hash
// where address could be an ENS name, or a content addressed hash // where address could be an ENS name, or a content addressed hash
func (a *API) Resolve(ctx context.Context, address string) (storage.Address, error) { func (a *API) Resolve(ctx context.Context, address string) (storage.Address, error) {
@ -980,11 +927,6 @@ func (a *API) FeedsUpdate(ctx context.Context, request *feed.Request) (storage.A
return a.feed.Update(ctx, request) return a.feed.Update(ctx, request)
} }
// FeedsHashSize returned the size of the digest produced by Swarm feeds' hashing function
func (a *API) FeedsHashSize() int {
return a.feed.HashSize
}
// ErrCannotLoadFeedManifest is returned when looking up a feeds manifest fails // ErrCannotLoadFeedManifest is returned when looking up a feeds manifest fails
var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest") var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest")

View File

@ -45,11 +45,6 @@ import (
"github.com/pborman/uuid" "github.com/pborman/uuid"
) )
var (
DefaultGateway = "http://localhost:8500"
DefaultClient = NewClient(DefaultGateway)
)
var ( var (
ErrUnauthorized = errors.New("unauthorized") ErrUnauthorized = errors.New("unauthorized")
) )

View File

@ -83,23 +83,3 @@ func (s *Storage) Get(ctx context.Context, bzzpath string) (*Response, error) {
} }
return &Response{mimeType, status, expsize, string(body[:size])}, err return &Response{mimeType, status, expsize, string(body[:size])}, err
} }
// Modify(rootHash, basePath, contentHash, contentType) takes th e manifest trie rooted in rootHash,
// and merge on to it. creating an entry w conentType (mime)
//
// DEPRECATED: Use the HTTP API instead
func (s *Storage) Modify(ctx context.Context, rootHash, path, contentHash, contentType string) (newRootHash string, err error) {
uri, err := Parse("bzz:/" + rootHash)
if err != nil {
return "", err
}
addr, err := s.api.Resolve(ctx, uri.Addr)
if err != nil {
return "", err
}
addr, err = s.api.Modify(ctx, addr, path, contentHash, contentType)
if err != nil {
return "", err
}
return addr.Hex(), nil
}

View File

@ -29,18 +29,6 @@ func NewControl(api *API, hive *network.Hive) *Control {
return &Control{api, hive} return &Control{api, hive}
} }
//func (self *Control) BlockNetworkRead(on bool) {
// self.hive.BlockNetworkRead(on)
//}
//
//func (self *Control) SyncEnabled(on bool) {
// self.hive.SyncEnabled(on)
//}
//
//func (self *Control) SwapEnabled(on bool) {
// self.hive.SwapEnabled(on)
//}
//
func (c *Control) Hive() string { func (c *Control) Hive() string {
return c.hive.String() return c.hive.String()
} }

View File

@ -26,17 +26,15 @@ import (
func TestParseURI(t *testing.T) { func TestParseURI(t *testing.T) {
type test struct { type test struct {
uri string uri string
expectURI *URI expectURI *URI
expectErr bool expectErr bool
expectRaw bool expectRaw bool
expectImmutable bool expectImmutable bool
expectList bool expectList bool
expectHash bool expectHash bool
expectDeprecatedRaw bool expectValidKey bool
expectDeprecatedImmutable bool expectAddr storage.Address
expectValidKey bool
expectAddr storage.Address
} }
tests := []test{ tests := []test{
{ {

View File

@ -60,7 +60,3 @@ func (bv *BitVector) Set(i int, v bool) {
func (bv *BitVector) Bytes() []byte { func (bv *BitVector) Bytes() []byte {
return bv.b return bv.b
} }
func (bv *BitVector) Length() int {
return bv.len
}

View File

@ -35,8 +35,6 @@ import (
const ( const (
DefaultNetworkID = 3 DefaultNetworkID = 3
// ProtocolMaxMsgSize maximum allowed message size
ProtocolMaxMsgSize = 10 * 1024 * 1024
// timeout for waiting // timeout for waiting
bzzHandshakeTimeout = 3000 * time.Millisecond bzzHandshakeTimeout = 3000 * time.Millisecond
) )
@ -250,11 +248,6 @@ func NewBzzPeer(p *protocols.Peer) *BzzPeer {
return &BzzPeer{Peer: p, BzzAddr: NewAddr(p.Node())} return &BzzPeer{Peer: p, BzzAddr: NewAddr(p.Node())}
} }
// LastActive returns the time the peer was last active
func (p *BzzPeer) LastActive() time.Time {
return p.lastActive
}
// ID returns the peer's underlay node identifier. // ID returns the peer's underlay node identifier.
func (p *BzzPeer) ID() enode.ID { func (p *BzzPeer) ID() enode.ID {
// This is here to resolve a method tie: both protocols.Peer and BzzAddr are embedded // This is here to resolve a method tie: both protocols.Peer and BzzAddr are embedded

View File

@ -20,7 +20,6 @@ import (
"flag" "flag"
"fmt" "fmt"
"os" "os"
"sync"
"testing" "testing"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -44,31 +43,7 @@ func init() {
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
} }
type testStore struct {
sync.Mutex
values map[string][]byte
}
func (t *testStore) Load(key string) ([]byte, error) {
t.Lock()
defer t.Unlock()
v, ok := t.values[key]
if !ok {
return nil, fmt.Errorf("key not found: %s", key)
}
return v, nil
}
func (t *testStore) Save(key string, v []byte) error {
t.Lock()
defer t.Unlock()
t.values[key] = v
return nil
}
func HandshakeMsgExchange(lhs, rhs *HandshakeMsg, id enode.ID) []p2ptest.Exchange { func HandshakeMsgExchange(lhs, rhs *HandshakeMsg, id enode.ID) []p2ptest.Exchange {
return []p2ptest.Exchange{ return []p2ptest.Exchange{
{ {
Expects: []p2ptest.Expect{ Expects: []p2ptest.Expect{

View File

@ -17,14 +17,11 @@
package intervals package intervals
import ( import (
"errors"
"testing" "testing"
"github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/state"
) )
var ErrNotFound = errors.New("not found")
// TestInmemoryStore tests basic functionality of InmemoryStore. // TestInmemoryStore tests basic functionality of InmemoryStore.
func TestInmemoryStore(t *testing.T) { func TestInmemoryStore(t *testing.T) {
testStore(t, state.NewInmemoryStore()) testStore(t, state.NewInmemoryStore())

View File

@ -388,14 +388,6 @@ func (r *Registry) Quit(peerId enode.ID, s Stream) error {
return peer.Send(context.TODO(), msg) return peer.Send(context.TODO(), msg)
} }
func (r *Registry) NodeInfo() interface{} {
return nil
}
func (r *Registry) PeerInfo(id enode.ID) interface{} {
return nil
}
func (r *Registry) Close() error { func (r *Registry) Close() error {
return r.intervalsStore.Close() return r.intervalsStore.Close()
} }

View File

@ -127,19 +127,9 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
// SwarmSyncerClient // SwarmSyncerClient
type SwarmSyncerClient struct { type SwarmSyncerClient struct {
sessionAt uint64 store storage.SyncChunkStore
nextC chan struct{} peer *Peer
sessionRoot storage.Address stream Stream
sessionReader storage.LazySectionReader
retrieveC chan *storage.Chunk
storeC chan *storage.Chunk
store storage.SyncChunkStore
// chunker storage.Chunker
currentRoot storage.Address
requestFunc func(chunk *storage.Chunk)
end, start uint64
peer *Peer
stream Stream
} }
// NewSwarmSyncerClient is a contructor for provable data exchange syncer // NewSwarmSyncerClient is a contructor for provable data exchange syncer
@ -209,46 +199,6 @@ func (s *SwarmSyncerClient) BatchDone(stream Stream, from uint64, hashes []byte,
return nil return nil
} }
func (s *SwarmSyncerClient) TakeoverProof(stream Stream, from uint64, hashes []byte, root storage.Address) (*TakeoverProof, error) {
// for provable syncer currentRoot is non-zero length
// TODO: reenable this with putter/getter
// if s.chunker != nil {
// if from > s.sessionAt { // for live syncing currentRoot is always updated
// //expRoot, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC, s.storeC)
// expRoot, _, err := s.chunker.Append(s.currentRoot, bytes.NewReader(hashes), s.retrieveC)
// if err != nil {
// return nil, err
// }
// if !bytes.Equal(root, expRoot) {
// return nil, fmt.Errorf("HandoverProof mismatch")
// }
// s.currentRoot = root
// } else {
// expHashes := make([]byte, len(hashes))
// _, err := s.sessionReader.ReadAt(expHashes, int64(s.end*HashSize))
// if err != nil && err != io.EOF {
// return nil, err
// }
// if !bytes.Equal(expHashes, hashes) {
// return nil, errors.New("invalid proof")
// }
// }
// return nil, nil
// }
s.end += uint64(len(hashes)) / HashSize
takeover := &Takeover{
Stream: stream,
Start: s.start,
End: s.end,
Root: root,
}
// serialise and sign
return &TakeoverProof{
Takeover: takeover,
Sig: nil,
}, nil
}
func (s *SwarmSyncerClient) Close() {} func (s *SwarmSyncerClient) Close() {}
// base for parsing and formating sync bin key // base for parsing and formating sync bin key

View File

@ -41,10 +41,6 @@ func NewAddressFromBytes(b []byte) Address {
return Address(h) return Address(h)
} }
func (a Address) IsZero() bool {
return a.Bin() == zerosBin
}
func (a Address) String() string { func (a Address) String() string {
return fmt.Sprintf("%x", a[:]) return fmt.Sprintf("%x", a[:])
} }

View File

@ -477,7 +477,7 @@ func (t *Pot) each(f func(Val, int) bool) bool {
return f(t.pin, t.po) return f(t.pin, t.po)
} }
// EachFrom called with (f, start) is a synchronous iterator over the elements of a Pot // eachFrom called with (f, start) is a synchronous iterator over the elements of a Pot
// within the inclusive range starting from proximity order start // within the inclusive range starting from proximity order start
// the function argument is passed the value and the proximity order wrt the root pin // the function argument is passed the value and the proximity order wrt the root pin
// it does NOT include the pinned item of the root // it does NOT include the pinned item of the root
@ -485,10 +485,6 @@ func (t *Pot) each(f func(Val, int) bool) bool {
// proximity > pinnedness // proximity > pinnedness
// the iteration ends if the function return false or there are no more elements // the iteration ends if the function return false or there are no more elements
// end of a po range can be implemented since po is passed to the function // end of a po range can be implemented since po is passed to the function
func (t *Pot) EachFrom(f func(Val, int) bool, po int) bool {
return t.eachFrom(f, po)
}
func (t *Pot) eachFrom(f func(Val, int) bool, po int) bool { func (t *Pot) eachFrom(f func(Val, int) bool, po int) bool {
var next bool var next bool
_, lim := t.getPos(po) _, lim := t.getPos(po)

View File

@ -1,28 +0,0 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package swarm
type Voidstore struct {
}
func (self Voidstore) Load(string) ([]byte, error) {
return nil, nil
}
func (self Voidstore) Save(string, []byte) error {
return nil
}

View File

@ -28,9 +28,6 @@ import (
// ErrNotFound is returned when no results are returned from the database // ErrNotFound is returned when no results are returned from the database
var ErrNotFound = errors.New("ErrorNotFound") var ErrNotFound = errors.New("ErrorNotFound")
// ErrInvalidArgument is returned when the argument type does not match the expected type
var ErrInvalidArgument = errors.New("ErrorInvalidArgument")
// Store defines methods required to get, set, delete values for different keys // Store defines methods required to get, set, delete values for different keys
// and close the underlying resources. // and close the underlying resources.
type Store interface { type Store interface {

View File

@ -65,10 +65,6 @@ If all is well it is possible to implement this by simply composing readers so t
The hashing itself does use extra copies and allocation though, since it does need it. The hashing itself does use extra copies and allocation though, since it does need it.
*/ */
var (
errAppendOppNotSuported = errors.New("Append operation not supported")
)
type ChunkerParams struct { type ChunkerParams struct {
chunkSize int64 chunkSize int64
hashSize int64 hashSize int64
@ -99,7 +95,6 @@ type TreeChunker struct {
ctx context.Context ctx context.Context
branches int64 branches int64
hashFunc SwarmHasher
dataSize int64 dataSize int64
data io.Reader data io.Reader
// calculated // calculated
@ -365,10 +360,6 @@ func (tc *TreeChunker) runWorker(ctx context.Context) {
}() }()
} }
func (tc *TreeChunker) Append() (Address, func(), error) {
return nil, nil, errAppendOppNotSuported
}
// LazyChunkReader implements LazySectionReader // LazyChunkReader implements LazySectionReader
type LazyChunkReader struct { type LazyChunkReader struct {
ctx context.Context ctx context.Context
@ -411,7 +402,6 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e
log.Debug("lazychunkreader.size", "addr", r.addr) log.Debug("lazychunkreader.size", "addr", r.addr)
if r.chunkData == nil { if r.chunkData == nil {
startTime := time.Now() startTime := time.Now()
chunkData, err := r.getter.Get(cctx, Reference(r.addr)) chunkData, err := r.getter.Get(cctx, Reference(r.addr))
if err != nil { if err != nil {
@ -420,13 +410,8 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e
} }
metrics.GetOrRegisterResettingTimer("lcr.getter.get", nil).UpdateSince(startTime) metrics.GetOrRegisterResettingTimer("lcr.getter.get", nil).UpdateSince(startTime)
r.chunkData = chunkData r.chunkData = chunkData
s := r.chunkData.Size()
log.Debug("lazychunkreader.size", "key", r.addr, "size", s)
if s < 0 {
return 0, errors.New("corrupt size")
}
return int64(s), nil
} }
s := r.chunkData.Size() s := r.chunkData.Size()
log.Debug("lazychunkreader.size", "key", r.addr, "size", s) log.Debug("lazychunkreader.size", "key", r.addr, "size", s)

View File

@ -64,16 +64,6 @@ func (db *LDBDatabase) Delete(key []byte) error {
return db.db.Delete(key, nil) return db.db.Delete(key, nil)
} }
func (db *LDBDatabase) LastKnownTD() []byte {
data, _ := db.Get([]byte("LTD"))
if len(data) == 0 {
data = []byte{0x0}
}
return data
}
func (db *LDBDatabase) NewIterator() iterator.Iterator { func (db *LDBDatabase) NewIterator() iterator.Iterator {
metrics.GetOrRegisterCounter("ldbdatabase.newiterator", nil).Inc(1) metrics.GetOrRegisterCounter("ldbdatabase.newiterator", nil).Inc(1)

View File

@ -23,23 +23,15 @@ import (
const ( const (
ErrInit = iota ErrInit = iota
ErrNotFound ErrNotFound
ErrIO
ErrUnauthorized ErrUnauthorized
ErrInvalidValue ErrInvalidValue
ErrDataOverflow ErrDataOverflow
ErrNothingToReturn ErrNothingToReturn
ErrCorruptData
ErrInvalidSignature ErrInvalidSignature
ErrNotSynced ErrNotSynced
ErrPeriodDepth
ErrCnt
) )
var ( var (
ErrChunkNotFound = errors.New("chunk not found") ErrChunkNotFound = errors.New("chunk not found")
ErrFetching = errors.New("chunk still fetching") ErrChunkInvalid = errors.New("invalid chunk")
ErrChunkInvalid = errors.New("invalid chunk")
ErrChunkForward = errors.New("cannot forward")
ErrChunkUnavailable = errors.New("chunk unavailable")
ErrChunkTimeout = errors.New("timeout")
) )

View File

@ -248,10 +248,6 @@ func U64ToBytes(val uint64) []byte {
return data return data
} }
func (s *LDBStore) updateIndexAccess(index *dpaDBIndex) {
index.Access = s.accessCnt
}
func getIndexKey(hash Address) []byte { func getIndexKey(hash Address) []byte {
hashSize := len(hash) hashSize := len(hash)
key := make([]byte, hashSize+1) key := make([]byte, hashSize+1)
@ -777,18 +773,6 @@ func (s *LDBStore) BinIndex(po uint8) uint64 {
return s.bucketCnt[po] return s.bucketCnt[po]
} }
func (s *LDBStore) Size() uint64 {
s.lock.RLock()
defer s.lock.RUnlock()
return s.entryCnt
}
func (s *LDBStore) CurrentStorageIndex() uint64 {
s.lock.RLock()
defer s.lock.RUnlock()
return s.dataIdx
}
// Put adds a chunk to the database, adding indices and incrementing global counters. // Put adds a chunk to the database, adding indices and incrementing global counters.
// If it already exists, it merely increments the access count of the existing entry. // If it already exists, it merely increments the access count of the existing entry.
// Is thread safe // Is thread safe
@ -810,11 +794,11 @@ func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error {
batch := s.batch batch := s.batch
log.Trace("ldbstore.put: s.db.Get", "key", chunk.Address(), "ikey", fmt.Sprintf("%x", ikey)) log.Trace("ldbstore.put: s.db.Get", "key", chunk.Address(), "ikey", fmt.Sprintf("%x", ikey))
idata, err := s.db.Get(ikey) _, err := s.db.Get(ikey)
if err != nil { if err != nil {
s.doPut(chunk, &index, po) s.doPut(chunk, &index, po)
} }
idata = encodeIndex(&index) idata := encodeIndex(&index)
s.batch.Put(ikey, idata) s.batch.Put(ikey, idata)
// add the access-chunkindex index for garbage collection // add the access-chunkindex index for garbage collection

View File

@ -79,14 +79,6 @@ func testPoFunc(k Address) (ret uint8) {
return uint8(Proximity(basekey, k[:])) return uint8(Proximity(basekey, k[:]))
} }
func (db *testDbStore) close() {
db.Close()
err := os.RemoveAll(db.dir)
if err != nil {
panic(err)
}
}
func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) { func testDbStoreRandom(n int, chunksize int64, mock bool, t *testing.T) {
db, cleanup, err := newTestDbStore(mock, true) db, cleanup, err := newTestDbStore(mock, true)
defer cleanup() defer cleanup()
@ -453,7 +445,7 @@ func TestLDBStoreAddRemove(t *testing.T) {
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
ret, err := ldb.Get(nil, chunks[i].Address()) ret, err := ldb.Get(context.TODO(), chunks[i].Address())
if i%2 == 0 { if i%2 == 0 {
// expect even chunks to be missing // expect even chunks to be missing

View File

@ -103,13 +103,6 @@ type Exporter interface {
Export(w io.Writer) (n int, err error) Export(w io.Writer) (n int, err error)
} }
// ImportExporter is an interface for importing and exporting
// mock store data to and from a tar archive.
type ImportExporter interface {
Importer
Exporter
}
// ExportedChunk is the structure that is saved in tar archive for // ExportedChunk is the structure that is saved in tar archive for
// each chunk as JSON-encoded bytes. // each chunk as JSON-encoded bytes.
type ExportedChunk struct { type ExportedChunk struct {

View File

@ -71,11 +71,6 @@ const (
splitTimeout = time.Minute * 5 splitTimeout = time.Minute * 5
) )
const (
DataChunk = 0
TreeChunk = 1
)
type PyramidSplitterParams struct { type PyramidSplitterParams struct {
SplitterParams SplitterParams
getter Getter getter Getter

View File

@ -23,7 +23,6 @@ import (
"crypto/rand" "crypto/rand"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"hash"
"io" "io"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -35,50 +34,10 @@ import (
const MaxPO = 16 const MaxPO = 16
const AddressLength = 32 const AddressLength = 32
type Hasher func() hash.Hash
type SwarmHasher func() SwarmHash type SwarmHasher func() SwarmHash
// Peer is the recorded as Source on the chunk
// should probably not be here? but network should wrap chunk object
type Peer interface{}
type Address []byte type Address []byte
func (a Address) Size() uint {
return uint(len(a))
}
func (a Address) isEqual(y Address) bool {
return bytes.Equal(a, y)
}
func (a Address) bits(i, j uint) uint {
ii := i >> 3
jj := i & 7
if ii >= a.Size() {
return 0
}
if jj+j <= 8 {
return uint((a[ii] >> jj) & ((1 << j) - 1))
}
res := uint(a[ii] >> jj)
jj = 8 - jj
j -= jj
for j != 0 {
ii++
if j < 8 {
res += uint(a[ii]&((1<<j)-1)) << jj
return res
}
res += uint(a[ii]) << jj
jj += 8
j -= 8
}
return res
}
// Proximity(x, y) returns the proximity order of the MSB distance between x and y // Proximity(x, y) returns the proximity order of the MSB distance between x and y
// //
// The distance metric MSB(x, y) of two equal length byte sequences x an y is the // The distance metric MSB(x, y) of two equal length byte sequences x an y is the
@ -112,10 +71,6 @@ func Proximity(one, other []byte) (ret int) {
return MaxPO return MaxPO
} }
func IsZeroAddr(addr Address) bool {
return len(addr) == 0 || bytes.Equal(addr, ZeroAddr)
}
var ZeroAddr = Address(common.Hash{}.Bytes()) var ZeroAddr = Address(common.Hash{}.Bytes())
func MakeHashFunc(hash string) SwarmHasher { func MakeHashFunc(hash string) SwarmHasher {
@ -304,10 +259,6 @@ func (c ChunkData) Size() uint64 {
return binary.LittleEndian.Uint64(c[:8]) return binary.LittleEndian.Uint64(c[:8])
} }
func (c ChunkData) Data() []byte {
return c[8:]
}
type ChunkValidator interface { type ChunkValidator interface {
Validate(chunk Chunk) bool Validate(chunk Chunk) bool
} }

View File

@ -74,8 +74,6 @@ type Swarm struct {
bzz *network.Bzz // the logistic manager bzz *network.Bzz // the logistic manager
backend chequebook.Backend // simple blockchain Backend backend chequebook.Backend // simple blockchain Backend
privateKey *ecdsa.PrivateKey privateKey *ecdsa.PrivateKey
corsString string
swapEnabled bool
netStore *storage.NetStore netStore *storage.NetStore
sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit
ps *pss.Pss ps *pss.Pss
@ -86,18 +84,6 @@ type Swarm struct {
tracerClose io.Closer tracerClose io.Closer
} }
type SwarmAPI struct {
Api *api.API
Backend chequebook.Backend
}
func (self *Swarm) API() *SwarmAPI {
return &SwarmAPI{
Api: self.api,
Backend: self.backend,
}
}
// creates a new swarm service instance // creates a new swarm service instance
// implements node.Service // implements node.Service
// If mockStore is not nil, it will be used as the storage for chunk data. // If mockStore is not nil, it will be used as the storage for chunk data.
@ -479,14 +465,6 @@ func (self *Swarm) Protocols() (protos []p2p.Protocol) {
return return
} }
func (self *Swarm) RegisterPssProtocol(spec *protocols.Spec, targetprotocol *p2p.Protocol, options *pss.ProtocolParams) (*pss.Protocol, error) {
if !pss.IsActiveProtocol {
return nil, fmt.Errorf("Pss protocols not available (built with !nopssprotocol tag)")
}
topic := pss.ProtocolTopic(spec)
return pss.RegisterProtocol(self.ps, &topic, spec, targetprotocol, options)
}
// implements node.Service // implements node.Service
// APIs returns the RPC API descriptors the Swarm implementation offers // APIs returns the RPC API descriptors the Swarm implementation offers
func (self *Swarm) APIs() []rpc.API { func (self *Swarm) APIs() []rpc.API {
@ -535,10 +513,6 @@ func (self *Swarm) APIs() []rpc.API {
return apis return apis
} }
func (self *Swarm) Api() *api.API {
return self.api
}
// SetChequebook ensures that the local checquebook is set up on chain. // SetChequebook ensures that the local checquebook is set up on chain.
func (self *Swarm) SetChequebook(ctx context.Context) error { func (self *Swarm) SetChequebook(ctx context.Context) error {
err := self.config.Swap.SetChequebook(ctx, self.backend, self.config.Path) err := self.config.Swap.SetChequebook(ctx, self.backend, self.config.Path)