Merge pull request #2965 from fjl/swarm-merge
swarm: plan bee for content storage and distribution on web3
This commit is contained in:
commit
5cb3fa2f89
@ -42,6 +42,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/services/swap/swap"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -408,8 +409,7 @@ func NewOutbox(chbook *Chequebook, beneficiary common.Address) *Outbox {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Issue creates cheque.
|
// Issue creates cheque.
|
||||||
func (self *Outbox) Issue(amount *big.Int) (interface{}, error) {
|
func (self *Outbox) Issue(amount *big.Int) (swap.Promise, error) {
|
||||||
// TODO(fjl): the return type should be more descriptive.
|
|
||||||
return self.chequeBook.Issue(self.beneficiary, amount)
|
return self.chequeBook.Issue(self.beneficiary, amount)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -546,8 +546,7 @@ func (self *Inbox) autoCash(cashInterval time.Duration) {
|
|||||||
|
|
||||||
// Receive is called to deposit the latest cheque to the incoming Inbox.
|
// Receive is called to deposit the latest cheque to the incoming Inbox.
|
||||||
// The given promise must be a *Cheque.
|
// The given promise must be a *Cheque.
|
||||||
func (self *Inbox) Receive(promise interface{}) (*big.Int, error) {
|
func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
|
||||||
// TODO(fjl): the type of promise should be safer
|
|
||||||
ch := promise.(*Cheque)
|
ch := promise.(*Cheque)
|
||||||
|
|
||||||
defer self.lock.Unlock()
|
defer self.lock.Unlock()
|
||||||
|
@ -18,17 +18,159 @@
|
|||||||
package web3ext
|
package web3ext
|
||||||
|
|
||||||
var Modules = map[string]string{
|
var Modules = map[string]string{
|
||||||
"admin": Admin_JS,
|
"admin": Admin_JS,
|
||||||
"debug": Debug_JS,
|
"bzz": Bzz_JS,
|
||||||
"eth": Eth_JS,
|
"chequebook": Chequebook_JS,
|
||||||
"miner": Miner_JS,
|
"debug": Debug_JS,
|
||||||
"net": Net_JS,
|
"ens": ENS_JS,
|
||||||
"personal": Personal_JS,
|
"eth": Eth_JS,
|
||||||
"rpc": RPC_JS,
|
"miner": Miner_JS,
|
||||||
"shh": Shh_JS,
|
"net": Net_JS,
|
||||||
"txpool": TxPool_JS,
|
"personal": Personal_JS,
|
||||||
|
"rpc": RPC_JS,
|
||||||
|
"shh": Shh_JS,
|
||||||
|
"txpool": TxPool_JS,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const Bzz_JS = `
|
||||||
|
web3._extend({
|
||||||
|
property: 'bzz',
|
||||||
|
methods:
|
||||||
|
[
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'blockNetworkRead',
|
||||||
|
call: 'bzz_blockNetworkRead',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'syncEnabled',
|
||||||
|
call: 'bzz_syncEnabled',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'swapEnabled',
|
||||||
|
call: 'bzz_swapEnabled',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'download',
|
||||||
|
call: 'bzz_download',
|
||||||
|
params: 2,
|
||||||
|
inputFormatter: [null, null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'upload',
|
||||||
|
call: 'bzz_upload',
|
||||||
|
params: 2,
|
||||||
|
inputFormatter: [null, null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'retrieve',
|
||||||
|
call: 'bzz_retrieve',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'store',
|
||||||
|
call: 'bzz_store',
|
||||||
|
params: 2,
|
||||||
|
inputFormatter: [null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'get',
|
||||||
|
call: 'bzz_get',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'put',
|
||||||
|
call: 'bzz_put',
|
||||||
|
params: 2,
|
||||||
|
inputFormatter: [null, null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'modify',
|
||||||
|
call: 'bzz_modify',
|
||||||
|
params: 4,
|
||||||
|
inputFormatter: [null, null, null, null]
|
||||||
|
})
|
||||||
|
],
|
||||||
|
properties:
|
||||||
|
[
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'hive',
|
||||||
|
getter: 'bzz_hive'
|
||||||
|
}),
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'info',
|
||||||
|
getter: 'bzz_info',
|
||||||
|
}),
|
||||||
|
]
|
||||||
|
});
|
||||||
|
`
|
||||||
|
|
||||||
|
const ENS_JS = `
|
||||||
|
web3._extend({
|
||||||
|
property: 'ens',
|
||||||
|
methods:
|
||||||
|
[
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'register',
|
||||||
|
call: 'ens_register',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'setContentHash',
|
||||||
|
call: 'ens_setContentHash',
|
||||||
|
params: 2,
|
||||||
|
inputFormatter: [null, null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'resolve',
|
||||||
|
call: 'ens_resolve',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [null]
|
||||||
|
}),
|
||||||
|
]
|
||||||
|
})
|
||||||
|
`
|
||||||
|
|
||||||
|
const Chequebook_JS = `
|
||||||
|
web3._extend({
|
||||||
|
property: 'chequebook',
|
||||||
|
methods:
|
||||||
|
[
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'deposit',
|
||||||
|
call: 'chequebook_deposit',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'balance',
|
||||||
|
getter: 'chequebook_balance',
|
||||||
|
outputFormatter: web3._extend.utils.toDecimal
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'cash',
|
||||||
|
call: 'chequebook_cash',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [null]
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'issue',
|
||||||
|
call: 'chequebook_issue',
|
||||||
|
params: 2,
|
||||||
|
inputFormatter: [null, null]
|
||||||
|
}),
|
||||||
|
]
|
||||||
|
});
|
||||||
|
`
|
||||||
|
|
||||||
const Admin_JS = `
|
const Admin_JS = `
|
||||||
web3._extend({
|
web3._extend({
|
||||||
property: 'admin',
|
property: 'admin',
|
||||||
|
191
swarm/api/api.go
Normal file
191
swarm/api/api.go
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}")
|
||||||
|
slashes = regexp.MustCompile("/+")
|
||||||
|
domainAndVersion = regexp.MustCompile("[@:;,]+")
|
||||||
|
)
|
||||||
|
|
||||||
|
type Resolver interface {
|
||||||
|
Resolve(string) (common.Hash, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Api implements webserver/file system related content storage and retrieval
|
||||||
|
on top of the dpa
|
||||||
|
it is the public interface of the dpa which is included in the ethereum stack
|
||||||
|
*/
|
||||||
|
type Api struct {
|
||||||
|
dpa *storage.DPA
|
||||||
|
dns Resolver
|
||||||
|
}
|
||||||
|
|
||||||
|
//the api constructor initialises
|
||||||
|
func NewApi(dpa *storage.DPA, dns Resolver) (self *Api) {
|
||||||
|
self = &Api{
|
||||||
|
dpa: dpa,
|
||||||
|
dns: dns,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DPA reader API
|
||||||
|
func (self *Api) Retrieve(key storage.Key) storage.LazySectionReader {
|
||||||
|
return self.dpa.Retrieve(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Api) Store(data io.Reader, size int64, wg *sync.WaitGroup) (key storage.Key, err error) {
|
||||||
|
return self.dpa.Store(data, size, wg, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ErrResolve error
|
||||||
|
|
||||||
|
// DNS Resolver
|
||||||
|
func (self *Api) Resolve(hostPort string, nameresolver bool) (storage.Key, error) {
|
||||||
|
if hashMatcher.MatchString(hostPort) || self.dns == nil {
|
||||||
|
glog.V(logger.Detail).Infof("host is a contentHash: '%v'", hostPort)
|
||||||
|
return storage.Key(common.Hex2Bytes(hostPort)), nil
|
||||||
|
}
|
||||||
|
if !nameresolver {
|
||||||
|
return nil, fmt.Errorf("'%s' is not a content hash value.", hostPort)
|
||||||
|
}
|
||||||
|
contentHash, err := self.dns.Resolve(hostPort)
|
||||||
|
if err != nil {
|
||||||
|
err = ErrResolve(err)
|
||||||
|
glog.V(logger.Warn).Infof("DNS error : %v", err)
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("host lookup: %v -> %v", err)
|
||||||
|
return contentHash[:], err
|
||||||
|
}
|
||||||
|
|
||||||
|
func parse(uri string) (hostPort, path string) {
|
||||||
|
parts := slashes.Split(uri, 3)
|
||||||
|
var i int
|
||||||
|
if len(parts) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// beginning with slash is now optional
|
||||||
|
for len(parts[i]) == 0 {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
hostPort = parts[i]
|
||||||
|
for i < len(parts)-1 {
|
||||||
|
i++
|
||||||
|
if len(path) > 0 {
|
||||||
|
path = path + "/" + parts[i]
|
||||||
|
} else {
|
||||||
|
path = parts[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("host: '%s', path '%s' requested.", hostPort, path)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Api) parseAndResolve(uri string, nameresolver bool) (key storage.Key, hostPort, path string, err error) {
|
||||||
|
hostPort, path = parse(uri)
|
||||||
|
//resolving host and port
|
||||||
|
contentHash, err := self.Resolve(hostPort, nameresolver)
|
||||||
|
glog.V(logger.Debug).Infof("Resolved '%s' to contentHash: '%s', path: '%s'", uri, contentHash, path)
|
||||||
|
return contentHash[:], hostPort, path, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put provides singleton manifest creation on top of dpa store
|
||||||
|
func (self *Api) Put(content, contentType string) (string, error) {
|
||||||
|
r := strings.NewReader(content)
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
key, err := self.dpa.Store(r, int64(len(content)), wg, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
|
||||||
|
r = strings.NewReader(manifest)
|
||||||
|
key, err = self.dpa.Store(r, int64(len(manifest)), wg, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
return key.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get uses iterative manifest retrieval and prefix matching
|
||||||
|
// to resolve path to content using dpa retrieve
|
||||||
|
// it returns a section reader, mimeType, status and an error
|
||||||
|
func (self *Api) Get(uri string, nameresolver bool) (reader storage.LazySectionReader, mimeType string, status int, err error) {
|
||||||
|
|
||||||
|
key, _, path, err := self.parseAndResolve(uri, nameresolver)
|
||||||
|
quitC := make(chan bool)
|
||||||
|
trie, err := loadManifest(self.dpa, key, quitC)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("loadManifestTrie error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(logger.Detail).Infof("getEntry(%s)", path)
|
||||||
|
entry, _ := trie.getEntry(path)
|
||||||
|
if entry != nil {
|
||||||
|
key = common.Hex2Bytes(entry.Hash)
|
||||||
|
status = entry.Status
|
||||||
|
mimeType = entry.ContentType
|
||||||
|
glog.V(logger.Detail).Infof("content lookup key: '%v' (%v)", key, mimeType)
|
||||||
|
reader = self.dpa.Retrieve(key)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("manifest entry for '%s' not found", path)
|
||||||
|
glog.V(logger.Warn).Infof("%v", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Api) Modify(uri, contentHash, contentType string, nameresolver bool) (newRootHash string, err error) {
|
||||||
|
root, _, path, err := self.parseAndResolve(uri, nameresolver)
|
||||||
|
quitC := make(chan bool)
|
||||||
|
trie, err := loadManifest(self.dpa, root, quitC)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if contentHash != "" {
|
||||||
|
entry := &manifestTrieEntry{
|
||||||
|
Path: path,
|
||||||
|
Hash: contentHash,
|
||||||
|
ContentType: contentType,
|
||||||
|
}
|
||||||
|
trie.addEntry(entry, quitC)
|
||||||
|
} else {
|
||||||
|
trie.deleteEntry(path, quitC)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = trie.recalcAndStore()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return trie.hash.String(), nil
|
||||||
|
}
|
117
swarm/api/api_test.go
Normal file
117
swarm/api/api_test.go
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testApi(t *testing.T, f func(*Api)) {
|
||||||
|
datadir, err := ioutil.TempDir("", "bzz-test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to create temp dir: %v", err)
|
||||||
|
}
|
||||||
|
os.RemoveAll(datadir)
|
||||||
|
defer os.RemoveAll(datadir)
|
||||||
|
dpa, err := storage.NewLocalDPA(datadir)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
api := NewApi(dpa, nil)
|
||||||
|
dpa.Start()
|
||||||
|
f(api)
|
||||||
|
dpa.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
type testResponse struct {
|
||||||
|
reader storage.LazySectionReader
|
||||||
|
*Response
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkResponse(t *testing.T, resp *testResponse, exp *Response) {
|
||||||
|
|
||||||
|
if resp.MimeType != exp.MimeType {
|
||||||
|
t.Errorf("incorrect mimeType. expected '%s', got '%s'", exp.MimeType, resp.MimeType)
|
||||||
|
}
|
||||||
|
if resp.Status != exp.Status {
|
||||||
|
t.Errorf("incorrect status. expected '%d', got '%d'", exp.Status, resp.Status)
|
||||||
|
}
|
||||||
|
if resp.Size != exp.Size {
|
||||||
|
t.Errorf("incorrect size. expected '%d', got '%d'", exp.Size, resp.Size)
|
||||||
|
}
|
||||||
|
if resp.reader != nil {
|
||||||
|
content := make([]byte, resp.Size)
|
||||||
|
read, _ := resp.reader.Read(content)
|
||||||
|
if int64(read) != exp.Size {
|
||||||
|
t.Errorf("incorrect content length. expected '%d...', got '%d...'", read, exp.Size)
|
||||||
|
}
|
||||||
|
resp.Content = string(content)
|
||||||
|
}
|
||||||
|
if resp.Content != exp.Content {
|
||||||
|
// if !bytes.Equal(resp.Content, exp.Content)
|
||||||
|
t.Errorf("incorrect content. expected '%s...', got '%s...'", string(exp.Content), string(resp.Content))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// func expResponse(content []byte, mimeType string, status int) *Response {
|
||||||
|
func expResponse(content string, mimeType string, status int) *Response {
|
||||||
|
glog.V(logger.Detail).Infof("expected content (%v): %v ", len(content), content)
|
||||||
|
return &Response{mimeType, status, int64(len(content)), content}
|
||||||
|
}
|
||||||
|
|
||||||
|
// func testGet(t *testing.T, api *Api, bzzhash string) *testResponse {
|
||||||
|
func testGet(t *testing.T, api *Api, bzzhash string) *testResponse {
|
||||||
|
reader, mimeType, status, err := api.Get(bzzhash, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
quitC := make(chan bool)
|
||||||
|
size, err := reader.Size(quitC)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("reader size: %v ", size)
|
||||||
|
s := make([]byte, size)
|
||||||
|
_, err = reader.Read(s)
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
reader.Seek(0, 0)
|
||||||
|
return &testResponse{reader, &Response{mimeType, status, size, string(s)}}
|
||||||
|
// return &testResponse{reader, &Response{mimeType, status, reader.Size(), nil}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApiPut(t *testing.T) {
|
||||||
|
testApi(t, func(api *Api) {
|
||||||
|
content := "hello"
|
||||||
|
exp := expResponse(content, "text/plain", 0)
|
||||||
|
// exp := expResponse([]byte(content), "text/plain", 0)
|
||||||
|
bzzhash, err := api.Put(content, exp.MimeType)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
resp := testGet(t, api, bzzhash)
|
||||||
|
checkResponse(t, resp, exp)
|
||||||
|
})
|
||||||
|
}
|
132
swarm/api/config.go
Normal file
132
swarm/api/config.go
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/services/swap"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
port = "8500"
|
||||||
|
)
|
||||||
|
|
||||||
|
// by default ens root is north internal
|
||||||
|
var (
|
||||||
|
toyNetEnsRoot = common.HexToAddress("0xd344889e0be3e9ef6c26b0f60ef66a32e83c1b69")
|
||||||
|
)
|
||||||
|
|
||||||
|
// separate bzz directories
|
||||||
|
// allow several bzz nodes running in parallel
|
||||||
|
type Config struct {
|
||||||
|
// serialised/persisted fields
|
||||||
|
*storage.StoreParams
|
||||||
|
*storage.ChunkerParams
|
||||||
|
*network.HiveParams
|
||||||
|
Swap *swap.SwapParams
|
||||||
|
*network.SyncParams
|
||||||
|
Path string
|
||||||
|
Port string
|
||||||
|
PublicKey string
|
||||||
|
BzzKey string
|
||||||
|
EnsRoot common.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
// config is agnostic to where private key is coming from
|
||||||
|
// so managing accounts is outside swarm and left to wrappers
|
||||||
|
func NewConfig(path string, contract common.Address, prvKey *ecdsa.PrivateKey) (self *Config, err error) {
|
||||||
|
|
||||||
|
address := crypto.PubkeyToAddress(prvKey.PublicKey) // default beneficiary address
|
||||||
|
dirpath := filepath.Join(path, common.Bytes2Hex(address.Bytes()))
|
||||||
|
err = os.MkdirAll(dirpath, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
confpath := filepath.Join(dirpath, "config.json")
|
||||||
|
var data []byte
|
||||||
|
pubkey := crypto.FromECDSAPub(&prvKey.PublicKey)
|
||||||
|
pubkeyhex := common.ToHex(pubkey)
|
||||||
|
keyhex := crypto.Sha3Hash(pubkey).Hex()
|
||||||
|
|
||||||
|
self = &Config{
|
||||||
|
SyncParams: network.NewSyncParams(dirpath),
|
||||||
|
HiveParams: network.NewHiveParams(dirpath),
|
||||||
|
ChunkerParams: storage.NewChunkerParams(),
|
||||||
|
StoreParams: storage.NewStoreParams(dirpath),
|
||||||
|
Port: port,
|
||||||
|
Path: dirpath,
|
||||||
|
Swap: swap.DefaultSwapParams(contract, prvKey),
|
||||||
|
PublicKey: pubkeyhex,
|
||||||
|
BzzKey: keyhex,
|
||||||
|
EnsRoot: toyNetEnsRoot,
|
||||||
|
}
|
||||||
|
data, err = ioutil.ReadFile(confpath)
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// file does not exist
|
||||||
|
// write out config file
|
||||||
|
err = self.Save()
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("error writing config: %v", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// file exists, deserialise
|
||||||
|
err = json.Unmarshal(data, self)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to parse config: %v", err)
|
||||||
|
}
|
||||||
|
// check public key
|
||||||
|
if pubkeyhex != self.PublicKey {
|
||||||
|
return nil, fmt.Errorf("public key does not match the one in the config file %v != %v", pubkeyhex, self.PublicKey)
|
||||||
|
}
|
||||||
|
if keyhex != self.BzzKey {
|
||||||
|
return nil, fmt.Errorf("bzz key does not match the one in the config file %v != %v", keyhex, self.BzzKey)
|
||||||
|
}
|
||||||
|
self.Swap.SetKey(prvKey)
|
||||||
|
|
||||||
|
if (self.EnsRoot == common.Address{}) {
|
||||||
|
self.EnsRoot = toyNetEnsRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Config) Save() error {
|
||||||
|
data, err := json.MarshalIndent(self, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = os.MkdirAll(self.Path, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
confpath := filepath.Join(self.Path, "config.json")
|
||||||
|
return ioutil.WriteFile(confpath, data, os.ModePerm)
|
||||||
|
}
|
124
swarm/api/config_test.go
Normal file
124
swarm/api/config_test.go
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
hexprvkey = "65138b2aa745041b372153550584587da326ab440576b2a1191dd95cee30039c"
|
||||||
|
defaultConfig = `{
|
||||||
|
"ChunkDbPath": "` + filepath.Join("TMPDIR", "0d2f62485607cf38d9d795d93682a517661e513e", "chunks") + `",
|
||||||
|
"DbCapacity": 5000000,
|
||||||
|
"CacheCapacity": 5000,
|
||||||
|
"Radius": 0,
|
||||||
|
"Branches": 128,
|
||||||
|
"Hash": "SHA3",
|
||||||
|
"CallInterval": 3000000000,
|
||||||
|
"KadDbPath": "` + filepath.Join("TMPDIR", "0d2f62485607cf38d9d795d93682a517661e513e", "bzz-peers.json") + `",
|
||||||
|
"MaxProx": 8,
|
||||||
|
"ProxBinSize": 2,
|
||||||
|
"BucketSize": 4,
|
||||||
|
"PurgeInterval": 151200000000000,
|
||||||
|
"InitialRetryInterval": 42000000,
|
||||||
|
"MaxIdleInterval": 42000000000,
|
||||||
|
"ConnRetryExp": 2,
|
||||||
|
"Swap": {
|
||||||
|
"BuyAt": 20000000000,
|
||||||
|
"SellAt": 20000000000,
|
||||||
|
"PayAt": 100,
|
||||||
|
"DropAt": 10000,
|
||||||
|
"AutoCashInterval": 300000000000,
|
||||||
|
"AutoCashThreshold": 50000000000000,
|
||||||
|
"AutoDepositInterval": 300000000000,
|
||||||
|
"AutoDepositThreshold": 50000000000000,
|
||||||
|
"AutoDepositBuffer": 100000000000000,
|
||||||
|
"PublicKey": "0x045f5cfd26692e48d0017d380349bcf50982488bc11b5145f3ddf88b24924299048450542d43527fbe29a5cb32f38d62755393ac002e6bfdd71b8d7ba725ecd7a3",
|
||||||
|
"Contract": "0x0000000000000000000000000000000000000000",
|
||||||
|
"Beneficiary": "0x0d2f62485607cf38d9d795d93682a517661e513e"
|
||||||
|
},
|
||||||
|
"RequestDbPath": "` + filepath.Join("TMPDIR", "0d2f62485607cf38d9d795d93682a517661e513e", "requests") + `",
|
||||||
|
"RequestDbBatchSize": 512,
|
||||||
|
"KeyBufferSize": 1024,
|
||||||
|
"SyncBatchSize": 128,
|
||||||
|
"SyncBufferSize": 128,
|
||||||
|
"SyncCacheSize": 1024,
|
||||||
|
"SyncPriorities": [
|
||||||
|
2,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"SyncModes": [
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
false
|
||||||
|
],
|
||||||
|
"Path": "` + filepath.Join("TMPDIR", "0d2f62485607cf38d9d795d93682a517661e513e") + `",
|
||||||
|
"Port": "8500",
|
||||||
|
"PublicKey": "0x045f5cfd26692e48d0017d380349bcf50982488bc11b5145f3ddf88b24924299048450542d43527fbe29a5cb32f38d62755393ac002e6bfdd71b8d7ba725ecd7a3",
|
||||||
|
"BzzKey": "0xe861964402c0b78e2d44098329b8545726f215afa737d803714a4338552fcb81",
|
||||||
|
"EnsRoot": "0xd344889e0be3e9ef6c26b0f60ef66a32e83c1b69"
|
||||||
|
}`
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfigWriteRead(t *testing.T) {
|
||||||
|
tmp, err := ioutil.TempDir(os.TempDir(), "bzz-test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmp)
|
||||||
|
|
||||||
|
prvkey := crypto.ToECDSA(common.Hex2Bytes(hexprvkey))
|
||||||
|
orig, err := NewConfig(tmp, common.Address{}, prvkey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
account := crypto.PubkeyToAddress(prvkey.PublicKey)
|
||||||
|
dirpath := filepath.Join(tmp, common.Bytes2Hex(account.Bytes()))
|
||||||
|
confpath := filepath.Join(dirpath, "config.json")
|
||||||
|
data, err := ioutil.ReadFile(confpath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("default config file cannot be read: %v", err)
|
||||||
|
}
|
||||||
|
exp := strings.Replace(defaultConfig, "TMPDIR", tmp, -1)
|
||||||
|
exp = strings.Replace(exp, "\\", "\\\\", -1)
|
||||||
|
|
||||||
|
if string(data) != exp {
|
||||||
|
t.Fatalf("default config mismatch:\nexpected: %v\ngot: %v", exp, string(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
conf, err := NewConfig(tmp, common.Address{}, prvkey)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error, got %v", err)
|
||||||
|
}
|
||||||
|
if conf.Swap.Beneficiary.Hex() != orig.Swap.Beneficiary.Hex() {
|
||||||
|
t.Fatalf("expected beneficiary from loaded config %v to match original %v", conf.Swap.Beneficiary.Hex(), orig.Swap.Beneficiary.Hex())
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
283
swarm/api/filesystem.go
Normal file
283
swarm/api/filesystem.go
Normal file
@ -0,0 +1,283 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
const maxParallelFiles = 5
|
||||||
|
|
||||||
|
type FileSystem struct {
|
||||||
|
api *Api
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFileSystem(api *Api) *FileSystem {
|
||||||
|
return &FileSystem{api}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload replicates a local directory as a manifest file and uploads it
|
||||||
|
// using dpa store
|
||||||
|
// TODO: localpath should point to a manifest
|
||||||
|
func (self *FileSystem) Upload(lpath, index string) (string, error) {
|
||||||
|
var list []*manifestTrieEntry
|
||||||
|
localpath, err := filepath.Abs(filepath.Clean(lpath))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(localpath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
stat, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var start int
|
||||||
|
if stat.IsDir() {
|
||||||
|
start = len(localpath)
|
||||||
|
glog.V(logger.Debug).Infof("uploading '%s'", localpath)
|
||||||
|
err = filepath.Walk(localpath, func(path string, info os.FileInfo, err error) error {
|
||||||
|
if (err == nil) && !info.IsDir() {
|
||||||
|
//fmt.Printf("lp %s path %s\n", localpath, path)
|
||||||
|
if len(path) <= start {
|
||||||
|
return fmt.Errorf("Path is too short")
|
||||||
|
}
|
||||||
|
if path[:start] != localpath {
|
||||||
|
return fmt.Errorf("Path prefix of '%s' does not match localpath '%s'", path, localpath)
|
||||||
|
}
|
||||||
|
entry := &manifestTrieEntry{
|
||||||
|
Path: filepath.ToSlash(path),
|
||||||
|
}
|
||||||
|
list = append(list, entry)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dir := filepath.Dir(localpath)
|
||||||
|
start = len(dir)
|
||||||
|
if len(localpath) <= start {
|
||||||
|
return "", fmt.Errorf("Path is too short")
|
||||||
|
}
|
||||||
|
if localpath[:start] != dir {
|
||||||
|
return "", fmt.Errorf("Path prefix of '%s' does not match dir '%s'", localpath, dir)
|
||||||
|
}
|
||||||
|
entry := &manifestTrieEntry{
|
||||||
|
Path: filepath.ToSlash(localpath),
|
||||||
|
}
|
||||||
|
list = append(list, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
cnt := len(list)
|
||||||
|
errors := make([]error, cnt)
|
||||||
|
done := make(chan bool, maxParallelFiles)
|
||||||
|
dcnt := 0
|
||||||
|
awg := &sync.WaitGroup{}
|
||||||
|
|
||||||
|
for i, entry := range list {
|
||||||
|
if i >= dcnt+maxParallelFiles {
|
||||||
|
<-done
|
||||||
|
dcnt++
|
||||||
|
}
|
||||||
|
awg.Add(1)
|
||||||
|
go func(i int, entry *manifestTrieEntry, done chan bool) {
|
||||||
|
f, err := os.Open(entry.Path)
|
||||||
|
if err == nil {
|
||||||
|
stat, _ := f.Stat()
|
||||||
|
var hash storage.Key
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
hash, err = self.api.dpa.Store(f, stat.Size(), wg, nil)
|
||||||
|
if hash != nil {
|
||||||
|
list[i].Hash = hash.String()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
awg.Done()
|
||||||
|
if err == nil {
|
||||||
|
first512 := make([]byte, 512)
|
||||||
|
fread, _ := f.ReadAt(first512, 0)
|
||||||
|
if fread > 0 {
|
||||||
|
mimeType := http.DetectContentType(first512[:fread])
|
||||||
|
if filepath.Ext(entry.Path) == ".css" {
|
||||||
|
mimeType = "text/css"
|
||||||
|
}
|
||||||
|
list[i].ContentType = mimeType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
errors[i] = err
|
||||||
|
done <- true
|
||||||
|
}(i, entry, done)
|
||||||
|
}
|
||||||
|
for dcnt < cnt {
|
||||||
|
<-done
|
||||||
|
dcnt++
|
||||||
|
}
|
||||||
|
|
||||||
|
trie := &manifestTrie{
|
||||||
|
dpa: self.api.dpa,
|
||||||
|
}
|
||||||
|
quitC := make(chan bool)
|
||||||
|
for i, entry := range list {
|
||||||
|
if errors[i] != nil {
|
||||||
|
return "", errors[i]
|
||||||
|
}
|
||||||
|
entry.Path = RegularSlashes(entry.Path[start:])
|
||||||
|
if entry.Path == index {
|
||||||
|
ientry := &manifestTrieEntry{
|
||||||
|
Path: "",
|
||||||
|
Hash: entry.Hash,
|
||||||
|
ContentType: entry.ContentType,
|
||||||
|
}
|
||||||
|
trie.addEntry(ientry, quitC)
|
||||||
|
}
|
||||||
|
trie.addEntry(entry, quitC)
|
||||||
|
}
|
||||||
|
|
||||||
|
err2 := trie.recalcAndStore()
|
||||||
|
var hs string
|
||||||
|
if err2 == nil {
|
||||||
|
hs = trie.hash.String()
|
||||||
|
}
|
||||||
|
awg.Wait()
|
||||||
|
return hs, err2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download replicates the manifest path structure on the local filesystem
|
||||||
|
// under localpath
|
||||||
|
func (self *FileSystem) Download(bzzpath, localpath string) error {
|
||||||
|
lpath, err := filepath.Abs(filepath.Clean(localpath))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = os.MkdirAll(lpath, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
//resolving host and port
|
||||||
|
key, _, path, err := self.api.parseAndResolve(bzzpath, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(path) > 0 {
|
||||||
|
path += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
quitC := make(chan bool)
|
||||||
|
trie, err := loadManifest(self.api.dpa, key, quitC)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("fs.Download: loadManifestTrie error: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type downloadListEntry struct {
|
||||||
|
key storage.Key
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
var list []*downloadListEntry
|
||||||
|
var mde error
|
||||||
|
|
||||||
|
prevPath := lpath
|
||||||
|
err = trie.listWithPrefix(path, quitC, func(entry *manifestTrieEntry, suffix string) {
|
||||||
|
glog.V(logger.Detail).Infof("fs.Download: %#v", entry)
|
||||||
|
|
||||||
|
key = common.Hex2Bytes(entry.Hash)
|
||||||
|
path := lpath + "/" + suffix
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if dir != prevPath {
|
||||||
|
mde = os.MkdirAll(dir, os.ModePerm)
|
||||||
|
prevPath = dir
|
||||||
|
}
|
||||||
|
if (mde == nil) && (path != dir+"/") {
|
||||||
|
list = append(list, &downloadListEntry{key: key, path: path})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
errC := make(chan error)
|
||||||
|
done := make(chan bool, maxParallelFiles)
|
||||||
|
for i, entry := range list {
|
||||||
|
select {
|
||||||
|
case done <- true:
|
||||||
|
wg.Add(1)
|
||||||
|
case <-quitC:
|
||||||
|
return fmt.Errorf("aborted")
|
||||||
|
}
|
||||||
|
go func(i int, entry *downloadListEntry) {
|
||||||
|
defer wg.Done()
|
||||||
|
f, err := os.Create(entry.path) // TODO: path separators
|
||||||
|
if err == nil {
|
||||||
|
|
||||||
|
reader := self.api.dpa.Retrieve(entry.key)
|
||||||
|
writer := bufio.NewWriter(f)
|
||||||
|
size, err := reader.Size(quitC)
|
||||||
|
if err == nil {
|
||||||
|
_, err = io.CopyN(writer, reader, size) // TODO: handle errors
|
||||||
|
err2 := writer.Flush()
|
||||||
|
if err == nil {
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
err2 = f.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case errC <- err:
|
||||||
|
case <-quitC:
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
<-done
|
||||||
|
}(i, entry)
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(errC)
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case err = <-errC:
|
||||||
|
return err
|
||||||
|
case <-quitC:
|
||||||
|
return fmt.Errorf("aborted")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
187
swarm/api/filesystem_test.go
Normal file
187
swarm/api/filesystem_test.go
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
var testDownloadDir, _ = ioutil.TempDir(os.TempDir(), "bzz-test")
|
||||||
|
|
||||||
|
func testFileSystem(t *testing.T, f func(*FileSystem)) {
|
||||||
|
testApi(t, func(api *Api) {
|
||||||
|
f(NewFileSystem(api))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func readPath(t *testing.T, parts ...string) string {
|
||||||
|
file := filepath.Join(parts...)
|
||||||
|
content, err := ioutil.ReadFile(file)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error reading '%v': %v", file, err)
|
||||||
|
}
|
||||||
|
return string(content)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApiDirUpload0(t *testing.T) {
|
||||||
|
testFileSystem(t, func(fs *FileSystem) {
|
||||||
|
api := fs.api
|
||||||
|
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
content := readPath(t, "testdata", "test0", "index.html")
|
||||||
|
resp := testGet(t, api, bzzhash+"/index.html")
|
||||||
|
exp := expResponse(content, "text/html; charset=utf-8", 0)
|
||||||
|
checkResponse(t, resp, exp)
|
||||||
|
|
||||||
|
content = readPath(t, "testdata", "test0", "index.css")
|
||||||
|
resp = testGet(t, api, bzzhash+"/index.css")
|
||||||
|
exp = expResponse(content, "text/css", 0)
|
||||||
|
checkResponse(t, resp, exp)
|
||||||
|
|
||||||
|
_, _, _, err = api.Get(bzzhash, true)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadDir := filepath.Join(testDownloadDir, "test0")
|
||||||
|
defer os.RemoveAll(downloadDir)
|
||||||
|
err = fs.Download(bzzhash, downloadDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
newbzzhash, err := fs.Upload(downloadDir, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if bzzhash != newbzzhash {
|
||||||
|
t.Fatalf("download %v reuploaded has incorrect hash, expected %v, got %v", downloadDir, bzzhash, newbzzhash)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApiDirUploadModify(t *testing.T) {
|
||||||
|
testFileSystem(t, func(fs *FileSystem) {
|
||||||
|
api := fs.api
|
||||||
|
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bzzhash, err = api.Modify(bzzhash+"/index.html", "", "", true)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
index, err := ioutil.ReadFile(filepath.Join("testdata", "test0", "index.html"))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
hash, err := api.Store(bytes.NewReader(index), int64(len(index)), wg)
|
||||||
|
wg.Wait()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bzzhash, err = api.Modify(bzzhash+"/index2.html", hash.Hex(), "text/html; charset=utf-8", true)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bzzhash, err = api.Modify(bzzhash+"/img/logo.png", hash.Hex(), "text/html; charset=utf-8", true)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
content := readPath(t, "testdata", "test0", "index.html")
|
||||||
|
resp := testGet(t, api, bzzhash+"/index2.html")
|
||||||
|
exp := expResponse(content, "text/html; charset=utf-8", 0)
|
||||||
|
checkResponse(t, resp, exp)
|
||||||
|
|
||||||
|
resp = testGet(t, api, bzzhash+"/img/logo.png")
|
||||||
|
exp = expResponse(content, "text/html; charset=utf-8", 0)
|
||||||
|
checkResponse(t, resp, exp)
|
||||||
|
|
||||||
|
content = readPath(t, "testdata", "test0", "index.css")
|
||||||
|
resp = testGet(t, api, bzzhash+"/index.css")
|
||||||
|
exp = expResponse(content, "text/css", 0)
|
||||||
|
|
||||||
|
_, _, _, err = api.Get(bzzhash, true)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("expected error: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApiDirUploadWithRootFile(t *testing.T) {
|
||||||
|
testFileSystem(t, func(fs *FileSystem) {
|
||||||
|
api := fs.api
|
||||||
|
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "index.html")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
content := readPath(t, "testdata", "test0", "index.html")
|
||||||
|
resp := testGet(t, api, bzzhash)
|
||||||
|
exp := expResponse(content, "text/html; charset=utf-8", 0)
|
||||||
|
checkResponse(t, resp, exp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApiFileUpload(t *testing.T) {
|
||||||
|
testFileSystem(t, func(fs *FileSystem) {
|
||||||
|
api := fs.api
|
||||||
|
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
content := readPath(t, "testdata", "test0", "index.html")
|
||||||
|
resp := testGet(t, api, bzzhash+"/index.html")
|
||||||
|
exp := expResponse(content, "text/html; charset=utf-8", 0)
|
||||||
|
checkResponse(t, resp, exp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApiFileUploadWithRootFile(t *testing.T) {
|
||||||
|
testFileSystem(t, func(fs *FileSystem) {
|
||||||
|
api := fs.api
|
||||||
|
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "index.html")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
content := readPath(t, "testdata", "test0", "index.html")
|
||||||
|
resp := testGet(t, api, bzzhash)
|
||||||
|
exp := expResponse(content, "text/html; charset=utf-8", 0)
|
||||||
|
checkResponse(t, resp, exp)
|
||||||
|
})
|
||||||
|
}
|
69
swarm/api/http/roundtripper.go
Normal file
69
swarm/api/http/roundtripper.go
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package http
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
http roundtripper to register for bzz url scheme
|
||||||
|
see https://github.com/ethereum/go-ethereum/issues/2040
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common/httpclient"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/api/http"
|
||||||
|
)
|
||||||
|
client := httpclient.New()
|
||||||
|
// for (private) swarm proxy running locally
|
||||||
|
client.RegisterScheme("bzz", &http.RoundTripper{Port: port})
|
||||||
|
client.RegisterScheme("bzzi", &http.RoundTripper{Port: port})
|
||||||
|
client.RegisterScheme("bzzr", &http.RoundTripper{Port: port})
|
||||||
|
|
||||||
|
The port you give the Roundtripper is the port the swarm proxy is listening on.
|
||||||
|
If Host is left empty, localhost is assumed.
|
||||||
|
|
||||||
|
Using a public gateway, the above few lines gives you the leanest
|
||||||
|
bzz-scheme aware read-only http client. You really only ever need this
|
||||||
|
if you need go-native swarm access to bzz addresses, e.g.,
|
||||||
|
github.com/ethereum/go-ethereum/common/natspec
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
type RoundTripper struct {
|
||||||
|
Host string
|
||||||
|
Port string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *RoundTripper) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||||
|
host := self.Host
|
||||||
|
if len(host) == 0 {
|
||||||
|
host = "localhost"
|
||||||
|
}
|
||||||
|
url := fmt.Sprintf("http://%s:%s/%s:/%s/%s", host, self.Port, req.Proto, req.URL.Host, req.URL.Path)
|
||||||
|
glog.V(logger.Info).Infof("roundtripper: proxying request '%s' to '%s'", req.RequestURI, url)
|
||||||
|
reqProxy, err := http.NewRequest(req.Method, url, req.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return http.DefaultClient.Do(reqProxy)
|
||||||
|
}
|
68
swarm/api/http/roundtripper_test.go
Normal file
68
swarm/api/http/roundtripper_test.go
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package http
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/httpclient"
|
||||||
|
)
|
||||||
|
|
||||||
|
const port = "3222"
|
||||||
|
|
||||||
|
func TestRoundTripper(t *testing.T) {
|
||||||
|
serveMux := http.NewServeMux()
|
||||||
|
serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.Method == "GET" {
|
||||||
|
w.Header().Set("Content-Type", "text/plain")
|
||||||
|
http.ServeContent(w, r, "", time.Unix(0, 0), strings.NewReader(r.RequestURI))
|
||||||
|
} else {
|
||||||
|
http.Error(w, "Method "+r.Method+" is not supported.", http.StatusMethodNotAllowed)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
go http.ListenAndServe(":"+port, serveMux)
|
||||||
|
|
||||||
|
rt := &RoundTripper{Port: port}
|
||||||
|
client := httpclient.New("/")
|
||||||
|
client.RegisterProtocol("bzz", rt)
|
||||||
|
|
||||||
|
resp, err := client.Client().Get("bzz://test.com/path")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("expected no error, got %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if resp != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
content, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("expected no error, got %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if string(content) != "/HTTP/1.1:/test.com/path" {
|
||||||
|
t.Errorf("incorrect response from http server: expected '%v', got '%v'", "/HTTP/1.1:/test.com/path", string(content))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
286
swarm/api/http/server.go
Normal file
286
swarm/api/http/server.go
Normal file
@ -0,0 +1,286 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
/*
|
||||||
|
A simple http server interface to Swarm
|
||||||
|
*/
|
||||||
|
package http
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"regexp"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rawType = "application/octet-stream"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// accepted protocols: bzz (traditional), bzzi (immutable) and bzzr (raw)
|
||||||
|
bzzPrefix = regexp.MustCompile("^/+bzz[ir]?:/+")
|
||||||
|
trailingSlashes = regexp.MustCompile("/+$")
|
||||||
|
rootDocumentUri = regexp.MustCompile("^/+bzz[i]?:/+[^/]+$")
|
||||||
|
// forever = func() time.Time { return time.Unix(0, 0) }
|
||||||
|
forever = time.Now
|
||||||
|
)
|
||||||
|
|
||||||
|
type sequentialReader struct {
|
||||||
|
reader io.Reader
|
||||||
|
pos int64
|
||||||
|
ahead map[int64](chan bool)
|
||||||
|
lock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// browser API for registering bzz url scheme handlers:
|
||||||
|
// https://developer.mozilla.org/en/docs/Web-based_protocol_handlers
|
||||||
|
// electron (chromium) api for registering bzz url scheme handlers:
|
||||||
|
// https://github.com/atom/electron/blob/master/docs/api/protocol.md
|
||||||
|
|
||||||
|
// starts up http server
|
||||||
|
func StartHttpServer(api *api.Api, port string) {
|
||||||
|
serveMux := http.NewServeMux()
|
||||||
|
serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
handler(w, r, api)
|
||||||
|
})
|
||||||
|
go http.ListenAndServe(":"+port, serveMux)
|
||||||
|
glog.V(logger.Info).Infof("Swarm HTTP proxy started on localhost:%s", port)
|
||||||
|
}
|
||||||
|
|
||||||
|
func handler(w http.ResponseWriter, r *http.Request, a *api.Api) {
|
||||||
|
requestURL := r.URL
|
||||||
|
// This is wrong
|
||||||
|
// if requestURL.Host == "" {
|
||||||
|
// var err error
|
||||||
|
// requestURL, err = url.Parse(r.Referer() + requestURL.String())
|
||||||
|
// if err != nil {
|
||||||
|
// http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
glog.V(logger.Debug).Infof("HTTP %s request URL: '%s', Host: '%s', Path: '%s', Referer: '%s', Accept: '%s'", r.Method, r.RequestURI, requestURL.Host, requestURL.Path, r.Referer(), r.Header.Get("Accept"))
|
||||||
|
uri := requestURL.Path
|
||||||
|
var raw, nameresolver bool
|
||||||
|
var proto string
|
||||||
|
|
||||||
|
// HTTP-based URL protocol handler
|
||||||
|
glog.V(logger.Debug).Infof("BZZ request URI: '%s'", uri)
|
||||||
|
|
||||||
|
path := bzzPrefix.ReplaceAllStringFunc(uri, func(p string) string {
|
||||||
|
proto = p
|
||||||
|
return ""
|
||||||
|
})
|
||||||
|
|
||||||
|
// protocol identification (ugly)
|
||||||
|
if proto == "" {
|
||||||
|
if glog.V(logger.Error) {
|
||||||
|
glog.Errorf(
|
||||||
|
"[BZZ] Swarm: Protocol error in request `%s`.",
|
||||||
|
uri,
|
||||||
|
)
|
||||||
|
http.Error(w, "BZZ protocol error", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(proto) > 4 {
|
||||||
|
raw = proto[1:5] == "bzzr"
|
||||||
|
nameresolver = proto[1:5] != "bzzi"
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(logger.Debug).Infof(
|
||||||
|
"[BZZ] Swarm: %s request over protocol %s '%s' received.",
|
||||||
|
r.Method, proto, path,
|
||||||
|
)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case r.Method == "POST" || r.Method == "PUT":
|
||||||
|
key, err := a.Store(r.Body, r.ContentLength, nil)
|
||||||
|
if err == nil {
|
||||||
|
glog.V(logger.Debug).Infof("Content for %v stored", key.Log())
|
||||||
|
} else {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if r.Method == "POST" {
|
||||||
|
if raw {
|
||||||
|
w.Header().Set("Content-Type", "text/plain")
|
||||||
|
http.ServeContent(w, r, "", time.Now(), bytes.NewReader([]byte(common.Bytes2Hex(key))))
|
||||||
|
} else {
|
||||||
|
http.Error(w, "No POST to "+uri+" allowed.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// PUT
|
||||||
|
if raw {
|
||||||
|
http.Error(w, "No PUT to /raw allowed.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
path = api.RegularSlashes(path)
|
||||||
|
mime := r.Header.Get("Content-Type")
|
||||||
|
// TODO proper root hash separation
|
||||||
|
glog.V(logger.Debug).Infof("Modify '%s' to store %v as '%s'.", path, key.Log(), mime)
|
||||||
|
newKey, err := a.Modify(path, common.Bytes2Hex(key), mime, nameresolver)
|
||||||
|
if err == nil {
|
||||||
|
glog.V(logger.Debug).Infof("Swarm replaced manifest by '%s'", newKey)
|
||||||
|
w.Header().Set("Content-Type", "text/plain")
|
||||||
|
http.ServeContent(w, r, "", time.Now(), bytes.NewReader([]byte(newKey)))
|
||||||
|
} else {
|
||||||
|
http.Error(w, "PUT to "+path+"failed.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case r.Method == "DELETE":
|
||||||
|
if raw {
|
||||||
|
http.Error(w, "No DELETE to /raw allowed.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
path = api.RegularSlashes(path)
|
||||||
|
glog.V(logger.Debug).Infof("Delete '%s'.", path)
|
||||||
|
newKey, err := a.Modify(path, "", "", nameresolver)
|
||||||
|
if err == nil {
|
||||||
|
glog.V(logger.Debug).Infof("Swarm replaced manifest by '%s'", newKey)
|
||||||
|
w.Header().Set("Content-Type", "text/plain")
|
||||||
|
http.ServeContent(w, r, "", time.Now(), bytes.NewReader([]byte(newKey)))
|
||||||
|
} else {
|
||||||
|
http.Error(w, "DELETE to "+path+"failed.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case r.Method == "GET" || r.Method == "HEAD":
|
||||||
|
path = trailingSlashes.ReplaceAllString(path, "")
|
||||||
|
if raw {
|
||||||
|
// resolving host
|
||||||
|
key, err := a.Resolve(path, nameresolver)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Error).Infof("%v", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// retrieving content
|
||||||
|
reader := a.Retrieve(key)
|
||||||
|
quitC := make(chan bool)
|
||||||
|
size, err := reader.Size(quitC)
|
||||||
|
glog.V(logger.Debug).Infof("Reading %d bytes.", size)
|
||||||
|
|
||||||
|
// setting mime type
|
||||||
|
qv := requestURL.Query()
|
||||||
|
mimeType := qv.Get("content_type")
|
||||||
|
if mimeType == "" {
|
||||||
|
mimeType = rawType
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", mimeType)
|
||||||
|
http.ServeContent(w, r, uri, forever(), reader)
|
||||||
|
glog.V(logger.Debug).Infof("Serve raw content '%s' (%d bytes) as '%s'", uri, size, mimeType)
|
||||||
|
|
||||||
|
// retrieve path via manifest
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Debug).Infof("Structured GET request '%s' received.", uri)
|
||||||
|
// add trailing slash, if missing
|
||||||
|
if rootDocumentUri.MatchString(uri) {
|
||||||
|
http.Redirect(w, r, path+"/", http.StatusFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
reader, mimeType, status, err := a.Get(path, nameresolver)
|
||||||
|
if err != nil {
|
||||||
|
if _, ok := err.(api.ErrResolve); ok {
|
||||||
|
glog.V(logger.Debug).Infof("%v", err)
|
||||||
|
status = http.StatusBadRequest
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Debug).Infof("error retrieving '%s': %v", uri, err)
|
||||||
|
status = http.StatusNotFound
|
||||||
|
}
|
||||||
|
http.Error(w, err.Error(), status)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// set mime type and status headers
|
||||||
|
w.Header().Set("Content-Type", mimeType)
|
||||||
|
if status > 0 {
|
||||||
|
w.WriteHeader(status)
|
||||||
|
} else {
|
||||||
|
status = 200
|
||||||
|
}
|
||||||
|
quitC := make(chan bool)
|
||||||
|
size, err := reader.Size(quitC)
|
||||||
|
glog.V(logger.Debug).Infof("Served '%s' (%d bytes) as '%s' (status code: %v)", uri, size, mimeType, status)
|
||||||
|
|
||||||
|
http.ServeContent(w, r, path, forever(), reader)
|
||||||
|
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
http.Error(w, "Method "+r.Method+" is not supported.", http.StatusMethodNotAllowed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *sequentialReader) ReadAt(target []byte, off int64) (n int, err error) {
|
||||||
|
self.lock.Lock()
|
||||||
|
// assert self.pos <= off
|
||||||
|
if self.pos > off {
|
||||||
|
glog.V(logger.Error).Infof("non-sequential read attempted from sequentialReader; %d > %d",
|
||||||
|
self.pos, off)
|
||||||
|
panic("Non-sequential read attempt")
|
||||||
|
}
|
||||||
|
if self.pos != off {
|
||||||
|
glog.V(logger.Debug).Infof("deferred read in POST at position %d, offset %d.",
|
||||||
|
self.pos, off)
|
||||||
|
wait := make(chan bool)
|
||||||
|
self.ahead[off] = wait
|
||||||
|
self.lock.Unlock()
|
||||||
|
if <-wait {
|
||||||
|
// failed read behind
|
||||||
|
n = 0
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
return
|
||||||
|
}
|
||||||
|
self.lock.Lock()
|
||||||
|
}
|
||||||
|
localPos := 0
|
||||||
|
for localPos < len(target) {
|
||||||
|
n, err = self.reader.Read(target[localPos:])
|
||||||
|
localPos += n
|
||||||
|
glog.V(logger.Debug).Infof("Read %d bytes into buffer size %d from POST, error %v.",
|
||||||
|
n, len(target), err)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Debug).Infof("POST stream's reading terminated with %v.", err)
|
||||||
|
for i := range self.ahead {
|
||||||
|
self.ahead[i] <- true
|
||||||
|
delete(self.ahead, i)
|
||||||
|
}
|
||||||
|
self.lock.Unlock()
|
||||||
|
return localPos, err
|
||||||
|
}
|
||||||
|
self.pos += int64(n)
|
||||||
|
}
|
||||||
|
wait := self.ahead[self.pos]
|
||||||
|
if wait != nil {
|
||||||
|
glog.V(logger.Debug).Infof("deferred read in POST at position %d triggered.",
|
||||||
|
self.pos)
|
||||||
|
delete(self.ahead, self.pos)
|
||||||
|
close(wait)
|
||||||
|
}
|
||||||
|
self.lock.Unlock()
|
||||||
|
return localPos, err
|
||||||
|
}
|
336
swarm/api/manifest.go
Normal file
336
swarm/api/manifest.go
Normal file
@ -0,0 +1,336 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
manifestType = "application/bzz-manifest+json"
|
||||||
|
)
|
||||||
|
|
||||||
|
type manifestTrie struct {
|
||||||
|
dpa *storage.DPA
|
||||||
|
entries [257]*manifestTrieEntry // indexed by first character of path, entries[256] is the empty path entry
|
||||||
|
hash storage.Key // if hash != nil, it is stored
|
||||||
|
}
|
||||||
|
|
||||||
|
type manifestJSON struct {
|
||||||
|
Entries []*manifestTrieEntry `json:"entries"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type manifestTrieEntry struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
Hash string `json:"hash"` // for manifest content type, empty until subtrie is evaluated
|
||||||
|
ContentType string `json:"contentType"`
|
||||||
|
Status int `json:"status"`
|
||||||
|
subtrie *manifestTrie
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadManifest(dpa *storage.DPA, hash storage.Key, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
|
||||||
|
|
||||||
|
glog.V(logger.Detail).Infof("manifest lookup key: '%v'.", hash.Log())
|
||||||
|
// retrieve manifest via DPA
|
||||||
|
manifestReader := dpa.Retrieve(hash)
|
||||||
|
return readManifest(manifestReader, hash, dpa, quitC)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readManifest(manifestReader storage.LazySectionReader, hash storage.Key, dpa *storage.DPA, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
|
||||||
|
|
||||||
|
// TODO check size for oversized manifests
|
||||||
|
size, err := manifestReader.Size(quitC)
|
||||||
|
manifestData := make([]byte, size)
|
||||||
|
read, err := manifestReader.Read(manifestData)
|
||||||
|
if int64(read) < size {
|
||||||
|
glog.V(logger.Detail).Infof("Manifest %v not found.", hash.Log())
|
||||||
|
if err == nil {
|
||||||
|
err = fmt.Errorf("Manifest retrieval cut short: read %v, expect %v", read, size)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(logger.Detail).Infof("Manifest %v retrieved", hash.Log())
|
||||||
|
man := manifestJSON{}
|
||||||
|
err = json.Unmarshal(manifestData, &man)
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("Manifest %v is malformed: %v", hash.Log(), err)
|
||||||
|
glog.V(logger.Detail).Infof("%v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(logger.Detail).Infof("Manifest %v has %d entries.", hash.Log(), len(man.Entries))
|
||||||
|
|
||||||
|
trie = &manifestTrie{
|
||||||
|
dpa: dpa,
|
||||||
|
}
|
||||||
|
for _, entry := range man.Entries {
|
||||||
|
trie.addEntry(entry, quitC)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) {
|
||||||
|
self.hash = nil // trie modified, hash needs to be re-calculated on demand
|
||||||
|
|
||||||
|
if len(entry.Path) == 0 {
|
||||||
|
self.entries[256] = entry
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
b := byte(entry.Path[0])
|
||||||
|
if (self.entries[b] == nil) || (self.entries[b].Path == entry.Path) {
|
||||||
|
self.entries[b] = entry
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
oldentry := self.entries[b]
|
||||||
|
cpl := 0
|
||||||
|
for (len(entry.Path) > cpl) && (len(oldentry.Path) > cpl) && (entry.Path[cpl] == oldentry.Path[cpl]) {
|
||||||
|
cpl++
|
||||||
|
}
|
||||||
|
|
||||||
|
if (oldentry.ContentType == manifestType) && (cpl == len(oldentry.Path)) {
|
||||||
|
if self.loadSubTrie(oldentry, quitC) != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
entry.Path = entry.Path[cpl:]
|
||||||
|
oldentry.subtrie.addEntry(entry, quitC)
|
||||||
|
oldentry.Hash = ""
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
commonPrefix := entry.Path[:cpl]
|
||||||
|
|
||||||
|
subtrie := &manifestTrie{
|
||||||
|
dpa: self.dpa,
|
||||||
|
}
|
||||||
|
entry.Path = entry.Path[cpl:]
|
||||||
|
oldentry.Path = oldentry.Path[cpl:]
|
||||||
|
subtrie.addEntry(entry, quitC)
|
||||||
|
subtrie.addEntry(oldentry, quitC)
|
||||||
|
|
||||||
|
self.entries[b] = &manifestTrieEntry{
|
||||||
|
Path: commonPrefix,
|
||||||
|
Hash: "",
|
||||||
|
ContentType: manifestType,
|
||||||
|
subtrie: subtrie,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *manifestTrie) getCountLast() (cnt int, entry *manifestTrieEntry) {
|
||||||
|
for _, e := range self.entries {
|
||||||
|
if e != nil {
|
||||||
|
cnt++
|
||||||
|
entry = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *manifestTrie) deleteEntry(path string, quitC chan bool) {
|
||||||
|
self.hash = nil // trie modified, hash needs to be re-calculated on demand
|
||||||
|
|
||||||
|
if len(path) == 0 {
|
||||||
|
self.entries[256] = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
b := byte(path[0])
|
||||||
|
entry := self.entries[b]
|
||||||
|
if entry == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if entry.Path == path {
|
||||||
|
self.entries[b] = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
epl := len(entry.Path)
|
||||||
|
if (entry.ContentType == manifestType) && (len(path) >= epl) && (path[:epl] == entry.Path) {
|
||||||
|
if self.loadSubTrie(entry, quitC) != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
entry.subtrie.deleteEntry(path[epl:], quitC)
|
||||||
|
entry.Hash = ""
|
||||||
|
// remove subtree if it has less than 2 elements
|
||||||
|
cnt, lastentry := entry.subtrie.getCountLast()
|
||||||
|
if cnt < 2 {
|
||||||
|
if lastentry != nil {
|
||||||
|
lastentry.Path = entry.Path + lastentry.Path
|
||||||
|
}
|
||||||
|
self.entries[b] = lastentry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *manifestTrie) recalcAndStore() error {
|
||||||
|
if self.hash != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
buffer.WriteString(`{"entries":[`)
|
||||||
|
|
||||||
|
list := &manifestJSON{}
|
||||||
|
for _, entry := range self.entries {
|
||||||
|
if entry != nil {
|
||||||
|
if entry.Hash == "" { // TODO: paralellize
|
||||||
|
err := entry.subtrie.recalcAndStore()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
entry.Hash = entry.subtrie.hash.String()
|
||||||
|
}
|
||||||
|
list.Entries = append(list.Entries, entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest, err := json.Marshal(list)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sr := bytes.NewReader(manifest)
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
key, err2 := self.dpa.Store(sr, int64(len(manifest)), wg, nil)
|
||||||
|
wg.Wait()
|
||||||
|
self.hash = key
|
||||||
|
return err2
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *manifestTrie) loadSubTrie(entry *manifestTrieEntry, quitC chan bool) (err error) {
|
||||||
|
if entry.subtrie == nil {
|
||||||
|
hash := common.Hex2Bytes(entry.Hash)
|
||||||
|
entry.subtrie, err = loadManifest(self.dpa, hash, quitC)
|
||||||
|
entry.Hash = "" // might not match, should be recalculated
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *manifestTrie) listWithPrefixInt(prefix, rp string, quitC chan bool, cb func(entry *manifestTrieEntry, suffix string)) error {
|
||||||
|
plen := len(prefix)
|
||||||
|
var start, stop int
|
||||||
|
if plen == 0 {
|
||||||
|
start = 0
|
||||||
|
stop = 256
|
||||||
|
} else {
|
||||||
|
start = int(prefix[0])
|
||||||
|
stop = start
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := start; i <= stop; i++ {
|
||||||
|
select {
|
||||||
|
case <-quitC:
|
||||||
|
return fmt.Errorf("aborted")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
entry := self.entries[i]
|
||||||
|
if entry != nil {
|
||||||
|
epl := len(entry.Path)
|
||||||
|
if entry.ContentType == manifestType {
|
||||||
|
l := plen
|
||||||
|
if epl < l {
|
||||||
|
l = epl
|
||||||
|
}
|
||||||
|
if prefix[:l] == entry.Path[:l] {
|
||||||
|
err := self.loadSubTrie(entry, quitC)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = entry.subtrie.listWithPrefixInt(prefix[l:], rp+entry.Path[l:], quitC, cb)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (epl >= plen) && (prefix == entry.Path[:plen]) {
|
||||||
|
cb(entry, rp+entry.Path[plen:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *manifestTrie) listWithPrefix(prefix string, quitC chan bool, cb func(entry *manifestTrieEntry, suffix string)) (err error) {
|
||||||
|
return self.listWithPrefixInt(prefix, "", quitC, cb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *manifestTrieEntry, pos int) {
|
||||||
|
|
||||||
|
glog.V(logger.Detail).Infof("findPrefixOf(%s)", path)
|
||||||
|
|
||||||
|
if len(path) == 0 {
|
||||||
|
return self.entries[256], 0
|
||||||
|
}
|
||||||
|
|
||||||
|
b := byte(path[0])
|
||||||
|
entry = self.entries[b]
|
||||||
|
if entry == nil {
|
||||||
|
return self.entries[256], 0
|
||||||
|
}
|
||||||
|
epl := len(entry.Path)
|
||||||
|
glog.V(logger.Detail).Infof("path = %v entry.Path = %v epl = %v", path, entry.Path, epl)
|
||||||
|
if (len(path) >= epl) && (path[:epl] == entry.Path) {
|
||||||
|
glog.V(logger.Detail).Infof("entry.ContentType = %v", entry.ContentType)
|
||||||
|
if entry.ContentType == manifestType {
|
||||||
|
if self.loadSubTrie(entry, quitC) != nil {
|
||||||
|
return nil, 0
|
||||||
|
}
|
||||||
|
entry, pos = entry.subtrie.findPrefixOf(path[epl:], quitC)
|
||||||
|
if entry != nil {
|
||||||
|
pos += epl
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pos = epl
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
entry = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// file system manifest always contains regularized paths
|
||||||
|
// no leading or trailing slashes, only single slashes inside
|
||||||
|
func RegularSlashes(path string) (res string) {
|
||||||
|
for i := 0; i < len(path); i++ {
|
||||||
|
if (path[i] != '/') || ((i > 0) && (path[i-1] != '/')) {
|
||||||
|
res = res + path[i:i+1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (len(res) > 0) && (res[len(res)-1] == '/') {
|
||||||
|
res = res[:len(res)-1]
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *manifestTrie) getEntry(spath string) (entry *manifestTrieEntry, fullpath string) {
|
||||||
|
path := RegularSlashes(spath)
|
||||||
|
var pos int
|
||||||
|
quitC := make(chan bool)
|
||||||
|
entry, pos = self.findPrefixOf(path, quitC)
|
||||||
|
return entry, path[:pos]
|
||||||
|
}
|
80
swarm/api/manifest_test.go
Normal file
80
swarm/api/manifest_test.go
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
// "encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
func manifest(paths ...string) (manifestReader storage.LazySectionReader) {
|
||||||
|
var entries []string
|
||||||
|
for _, path := range paths {
|
||||||
|
entry := fmt.Sprintf(`{"path":"%s"}`, path)
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
manifest := fmt.Sprintf(`{"entries":[%s]}`, strings.Join(entries, ","))
|
||||||
|
return &storage.LazyTestSectionReader{
|
||||||
|
SectionReader: io.NewSectionReader(strings.NewReader(manifest), 0, int64(len(manifest))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testGetEntry(t *testing.T, path, match string, paths ...string) *manifestTrie {
|
||||||
|
quitC := make(chan bool)
|
||||||
|
trie, err := readManifest(manifest(paths...), nil, nil, quitC)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error making manifest: %v", err)
|
||||||
|
}
|
||||||
|
checkEntry(t, path, match, trie)
|
||||||
|
return trie
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkEntry(t *testing.T, path, match string, trie *manifestTrie) {
|
||||||
|
entry, fullpath := trie.getEntry(path)
|
||||||
|
if match == "-" && entry != nil {
|
||||||
|
t.Errorf("expected no match for '%s', got '%s'", path, fullpath)
|
||||||
|
} else if entry == nil {
|
||||||
|
if match != "-" {
|
||||||
|
t.Errorf("expected entry '%s' to match '%s', got no match", match, path)
|
||||||
|
}
|
||||||
|
} else if fullpath != match {
|
||||||
|
t.Errorf("incorrect entry retrieved for '%s'. expected path '%v', got '%s'", path, match, fullpath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetEntry(t *testing.T) {
|
||||||
|
// file system manifest always contains regularized paths
|
||||||
|
testGetEntry(t, "a", "a", "a")
|
||||||
|
testGetEntry(t, "b", "-", "a")
|
||||||
|
testGetEntry(t, "/a//", "a", "a")
|
||||||
|
// fallback
|
||||||
|
testGetEntry(t, "/a", "", "")
|
||||||
|
testGetEntry(t, "/a/b", "a/b", "a/b")
|
||||||
|
// longest/deepest math
|
||||||
|
testGetEntry(t, "a/b", "-", "a", "a/ba", "a/b/c")
|
||||||
|
testGetEntry(t, "a/b", "a/b", "a", "a/b", "a/bb", "a/b/c")
|
||||||
|
testGetEntry(t, "//a//b//", "a/b", "a", "a/b", "a/bb", "a/b/c")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteEntry(t *testing.T) {
|
||||||
|
|
||||||
|
}
|
70
swarm/api/storage.go
Normal file
70
swarm/api/storage.go
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
type Response struct {
|
||||||
|
MimeType string
|
||||||
|
Status int
|
||||||
|
Size int64
|
||||||
|
// Content []byte
|
||||||
|
Content string
|
||||||
|
}
|
||||||
|
|
||||||
|
// implements a service
|
||||||
|
type Storage struct {
|
||||||
|
api *Api
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStorage(api *Api) *Storage {
|
||||||
|
return &Storage{api}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put uploads the content to the swarm with a simple manifest speficying
|
||||||
|
// its content type
|
||||||
|
func (self *Storage) Put(content, contentType string) (string, error) {
|
||||||
|
return self.api.Put(content, contentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves the content from bzzpath and reads the response in full
|
||||||
|
// It returns the Response object, which serialises containing the
|
||||||
|
// response body as the value of the Content field
|
||||||
|
// NOTE: if error is non-nil, sResponse may still have partial content
|
||||||
|
// the actual size of which is given in len(resp.Content), while the expected
|
||||||
|
// size is resp.Size
|
||||||
|
func (self *Storage) Get(bzzpath string) (*Response, error) {
|
||||||
|
reader, mimeType, status, err := self.api.Get(bzzpath, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
quitC := make(chan bool)
|
||||||
|
expsize, err := reader.Size(quitC)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
body := make([]byte, expsize)
|
||||||
|
size, err := reader.Read(body)
|
||||||
|
if int64(size) == expsize {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return &Response{mimeType, status, expsize, string(body[:size])}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modify(rootHash, path, contentHash, contentType) takes th e manifest trie rooted in rootHash,
|
||||||
|
// and merge on to it. creating an entry w conentType (mime)
|
||||||
|
func (self *Storage) Modify(rootHash, path, contentHash, contentType string) (newRootHash string, err error) {
|
||||||
|
return self.api.Modify(rootHash+"/"+path, contentHash, contentType, true)
|
||||||
|
}
|
49
swarm/api/storage_test.go
Normal file
49
swarm/api/storage_test.go
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testStorage(t *testing.T, f func(*Storage)) {
|
||||||
|
testApi(t, func(api *Api) {
|
||||||
|
f(NewStorage(api))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoragePutGet(t *testing.T) {
|
||||||
|
testStorage(t, func(api *Storage) {
|
||||||
|
content := "hello"
|
||||||
|
exp := expResponse(content, "text/plain", 0)
|
||||||
|
// exp := expResponse([]byte(content), "text/plain", 0)
|
||||||
|
bzzhash, err := api.Put(content, exp.MimeType)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
// to check put against the Api#Get
|
||||||
|
resp0 := testGet(t, api.api, bzzhash)
|
||||||
|
checkResponse(t, resp0, exp)
|
||||||
|
|
||||||
|
// check storage#Get
|
||||||
|
resp, err := api.Get(bzzhash)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
checkResponse(t, &testResponse{nil, resp}, exp)
|
||||||
|
})
|
||||||
|
}
|
46
swarm/api/testapi.go
Normal file
46
swarm/api/testapi.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Control struct {
|
||||||
|
api *Api
|
||||||
|
hive *network.Hive
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewControl(api *Api, hive *network.Hive) *Control {
|
||||||
|
return &Control{api, hive}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Control) BlockNetworkRead(on bool) {
|
||||||
|
self.hive.BlockNetworkRead(on)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Control) SyncEnabled(on bool) {
|
||||||
|
self.hive.SyncEnabled(on)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Control) SwapEnabled(on bool) {
|
||||||
|
self.hive.SwapEnabled(on)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Control) Hive() string {
|
||||||
|
return self.hive.String()
|
||||||
|
}
|
BIN
swarm/api/testdata/test0/img/logo.png
vendored
Normal file
BIN
swarm/api/testdata/test0/img/logo.png
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 18 KiB |
9
swarm/api/testdata/test0/index.css
vendored
Normal file
9
swarm/api/testdata/test0/index.css
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
h1 {
|
||||||
|
color: black;
|
||||||
|
font-size: 12px;
|
||||||
|
background-color: orange;
|
||||||
|
border: 4px solid black;
|
||||||
|
}
|
||||||
|
body {
|
||||||
|
background-color: orange
|
||||||
|
}
|
10
swarm/api/testdata/test0/index.html
vendored
Normal file
10
swarm/api/testdata/test0/index.html
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<link rel="stylesheet" href="index.css">
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>Swarm Test</h1>
|
||||||
|
<img src="img/logo.gif" align="center", alt="Ethereum logo">
|
||||||
|
</body>
|
||||||
|
</html>
|
211
swarm/network/depo.go
Normal file
211
swarm/network/depo.go
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Handler for storage/retrieval related protocol requests
|
||||||
|
// implements the StorageHandler interface used by the bzz protocol
|
||||||
|
type Depo struct {
|
||||||
|
hashfunc storage.Hasher
|
||||||
|
localStore storage.ChunkStore
|
||||||
|
netStore storage.ChunkStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDepo(hash storage.Hasher, localStore, remoteStore storage.ChunkStore) *Depo {
|
||||||
|
return &Depo{
|
||||||
|
hashfunc: hash,
|
||||||
|
localStore: localStore,
|
||||||
|
netStore: remoteStore, // entrypoint internal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handles UnsyncedKeysMsg after msg decoding - unsynced hashes upto sync state
|
||||||
|
// * the remote sync state is just stored and handled in protocol
|
||||||
|
// * filters through the new syncRequests and send the ones missing
|
||||||
|
// * back immediately as a deliveryRequest message
|
||||||
|
// * empty message just pings back for more (is this needed?)
|
||||||
|
// * strict signed sync states may be needed.
|
||||||
|
func (self *Depo) HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error {
|
||||||
|
unsynced := req.Unsynced
|
||||||
|
var missing []*syncRequest
|
||||||
|
var chunk *storage.Chunk
|
||||||
|
var err error
|
||||||
|
for _, req := range unsynced {
|
||||||
|
// skip keys that are found,
|
||||||
|
chunk, err = self.localStore.Get(storage.Key(req.Key[:]))
|
||||||
|
if err != nil || chunk.SData == nil {
|
||||||
|
missing = append(missing, req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("Depo.HandleUnsyncedKeysMsg: received %v unsynced keys: %v missing. new state: %v", len(unsynced), len(missing), req.State)
|
||||||
|
glog.V(logger.Detail).Infof("Depo.HandleUnsyncedKeysMsg: received %v", unsynced)
|
||||||
|
// send delivery request with missing keys
|
||||||
|
err = p.deliveryRequest(missing)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// set peers state to persist
|
||||||
|
p.syncState = req.State
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handles deliveryRequestMsg
|
||||||
|
// * serves actual chunks asked by the remote peer
|
||||||
|
// by pushing to the delivery queue (sync db) of the correct priority
|
||||||
|
// (remote peer is free to reprioritize)
|
||||||
|
// * the message implies remote peer wants more, so trigger for
|
||||||
|
// * new outgoing unsynced keys message is fired
|
||||||
|
func (self *Depo) HandleDeliveryRequestMsg(req *deliveryRequestMsgData, p *peer) error {
|
||||||
|
deliver := req.Deliver
|
||||||
|
// queue the actual delivery of a chunk ()
|
||||||
|
glog.V(logger.Detail).Infof("Depo.HandleDeliveryRequestMsg: received %v delivery requests: %v", len(deliver), deliver)
|
||||||
|
for _, sreq := range deliver {
|
||||||
|
// TODO: look up in cache here or in deliveries
|
||||||
|
// priorities are taken from the message so the remote party can
|
||||||
|
// reprioritise to at their leisure
|
||||||
|
// r = self.pullCached(sreq.Key) // pulls and deletes from cache
|
||||||
|
Push(p, sreq.Key, sreq.Priority)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sends it out as unsyncedKeysMsg
|
||||||
|
p.syncer.sendUnsyncedKeys()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// the entrypoint for store requests coming from the bzz wire protocol
|
||||||
|
// if key found locally, return. otherwise
|
||||||
|
// remote is untrusted, so hash is verified and chunk passed on to NetStore
|
||||||
|
func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) {
|
||||||
|
req.from = p
|
||||||
|
chunk, err := self.localStore.Get(req.Key)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
glog.V(logger.Detail).Infof("Depo.handleStoreRequest: %v not found locally. create new chunk/request", req.Key)
|
||||||
|
// not found in memory cache, ie., a genuine store request
|
||||||
|
// create chunk
|
||||||
|
chunk = storage.NewChunk(req.Key, nil)
|
||||||
|
|
||||||
|
case chunk.SData == nil:
|
||||||
|
// found chunk in memory store, needs the data, validate now
|
||||||
|
hasher := self.hashfunc()
|
||||||
|
hasher.Write(req.SData)
|
||||||
|
if !bytes.Equal(hasher.Sum(nil), req.Key) {
|
||||||
|
// data does not validate, ignore
|
||||||
|
// TODO: peer should be penalised/dropped?
|
||||||
|
glog.V(logger.Warn).Infof("Depo.HandleStoreRequest: chunk invalid. store request ignored: %v", req)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("Depo.HandleStoreRequest: %v. request entry found", req)
|
||||||
|
|
||||||
|
default:
|
||||||
|
// data is found, store request ignored
|
||||||
|
// this should update access count?
|
||||||
|
glog.V(logger.Detail).Infof("Depo.HandleStoreRequest: %v found locally. ignore.", req)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// update chunk with size and data
|
||||||
|
chunk.SData = req.SData // protocol validates that SData is minimum 9 bytes long (int64 size + at least one byte of data)
|
||||||
|
chunk.Size = int64(binary.LittleEndian.Uint64(req.SData[0:8]))
|
||||||
|
glog.V(logger.Detail).Infof("delivery of %p from %v", chunk, p)
|
||||||
|
chunk.Source = p
|
||||||
|
self.netStore.Put(chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
// entrypoint for retrieve requests coming from the bzz wire protocol
|
||||||
|
// checks swap balance - return if peer has no credit
|
||||||
|
func (self *Depo) HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer) {
|
||||||
|
req.from = p
|
||||||
|
// swap - record credit for 1 request
|
||||||
|
// note that only charge actual reqsearches
|
||||||
|
var err error
|
||||||
|
if p.swap != nil {
|
||||||
|
err = p.swap.Add(1)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("Depo.HandleRetrieveRequest: %v - cannot process request: %v", req.Key.Log(), err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// call storage.NetStore#Get which
|
||||||
|
// blocks until local retrieval finished
|
||||||
|
// launches cloud retrieval
|
||||||
|
chunk, _ := self.netStore.Get(req.Key)
|
||||||
|
req = self.strategyUpdateRequest(chunk.Req, req)
|
||||||
|
// check if we can immediately deliver
|
||||||
|
if chunk.SData != nil {
|
||||||
|
glog.V(logger.Detail).Infof("Depo.HandleRetrieveRequest: %v - content found, delivering...", req.Key.Log())
|
||||||
|
|
||||||
|
if req.MaxSize == 0 || int64(req.MaxSize) >= chunk.Size {
|
||||||
|
sreq := &storeRequestMsgData{
|
||||||
|
Id: req.Id,
|
||||||
|
Key: chunk.Key,
|
||||||
|
SData: chunk.SData,
|
||||||
|
requestTimeout: req.timeout, //
|
||||||
|
}
|
||||||
|
p.syncer.addRequest(sreq, DeliverReq)
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Detail).Infof("Depo.HandleRetrieveRequest: %v - content found, not wanted", req.Key.Log())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Detail).Infof("Depo.HandleRetrieveRequest: %v - content not found locally. asked swarm for help. will get back", req.Key.Log())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// add peer request the chunk and decides the timeout for the response if still searching
|
||||||
|
func (self *Depo) strategyUpdateRequest(rs *storage.RequestStatus, origReq *retrieveRequestMsgData) (req *retrieveRequestMsgData) {
|
||||||
|
glog.V(logger.Detail).Infof("Depo.strategyUpdateRequest: key %v", origReq.Key.Log())
|
||||||
|
// we do not create an alternative one
|
||||||
|
req = origReq
|
||||||
|
if rs != nil {
|
||||||
|
self.addRequester(rs, req)
|
||||||
|
req.setTimeout(self.searchTimeout(rs, req))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// decides the timeout promise sent with the immediate peers response to a retrieve request
|
||||||
|
// if timeout is explicitly set and expired
|
||||||
|
func (self *Depo) searchTimeout(rs *storage.RequestStatus, req *retrieveRequestMsgData) (timeout *time.Time) {
|
||||||
|
reqt := req.getTimeout()
|
||||||
|
t := time.Now().Add(searchTimeout)
|
||||||
|
if reqt != nil && reqt.Before(t) {
|
||||||
|
return reqt
|
||||||
|
} else {
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
adds a new peer to an existing open request
|
||||||
|
only add if less than requesterCount peers forwarded the same request id so far
|
||||||
|
note this is done irrespective of status (searching or found)
|
||||||
|
*/
|
||||||
|
func (self *Depo) addRequester(rs *storage.RequestStatus, req *retrieveRequestMsgData) {
|
||||||
|
glog.V(logger.Detail).Infof("Depo.addRequester: key %v - add peer to req.Id %v", req.Key.Log(), req.from, req.Id)
|
||||||
|
list := rs.Requesters[req.Id]
|
||||||
|
rs.Requesters[req.Id] = append(list, req)
|
||||||
|
}
|
150
swarm/network/forwarding.go
Normal file
150
swarm/network/forwarding.go
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
const requesterCount = 3
|
||||||
|
|
||||||
|
/*
|
||||||
|
forwarder implements the CloudStore interface (use by storage.NetStore)
|
||||||
|
and serves as the cloud store backend orchestrating storage/retrieval/delivery
|
||||||
|
via the native bzz protocol
|
||||||
|
which uses an MSB logarithmic distance-based semi-permanent Kademlia table for
|
||||||
|
* recursive forwarding style routing for retrieval
|
||||||
|
* smart syncronisation
|
||||||
|
*/
|
||||||
|
|
||||||
|
type forwarder struct {
|
||||||
|
hive *Hive
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewForwarder(hive *Hive) *forwarder {
|
||||||
|
return &forwarder{hive: hive}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate a unique id uint64
|
||||||
|
func generateId() uint64 {
|
||||||
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
return uint64(r.Int63())
|
||||||
|
}
|
||||||
|
|
||||||
|
var searchTimeout = 3 * time.Second
|
||||||
|
|
||||||
|
// forwarding logic
|
||||||
|
// logic propagating retrieve requests to peers given by the kademlia hive
|
||||||
|
func (self *forwarder) Retrieve(chunk *storage.Chunk) {
|
||||||
|
peers := self.hive.getPeers(chunk.Key, 0)
|
||||||
|
glog.V(logger.Detail).Infof("forwarder.Retrieve: %v - received %d peers from KΛÐΞMLIΛ...", chunk.Key.Log(), len(peers))
|
||||||
|
OUT:
|
||||||
|
for _, p := range peers {
|
||||||
|
glog.V(logger.Detail).Infof("forwarder.Retrieve: sending retrieveRequest %v to peer [%v]", chunk.Key.Log(), p)
|
||||||
|
for _, recipients := range chunk.Req.Requesters {
|
||||||
|
for _, recipient := range recipients {
|
||||||
|
req := recipient.(*retrieveRequestMsgData)
|
||||||
|
if req.from.Addr() == p.Addr() {
|
||||||
|
continue OUT
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req := &retrieveRequestMsgData{
|
||||||
|
Key: chunk.Key,
|
||||||
|
Id: generateId(),
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if p.swap != nil {
|
||||||
|
err = p.swap.Add(-1)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
p.retrieve(req)
|
||||||
|
break OUT
|
||||||
|
}
|
||||||
|
glog.V(logger.Warn).Infof("forwarder.Retrieve: unable to send retrieveRequest to peer [%v]: %v", chunk.Key.Log(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// requests to specific peers given by the kademlia hive
|
||||||
|
// except for peers that the store request came from (if any)
|
||||||
|
// delivery queueing taken care of by syncer
|
||||||
|
func (self *forwarder) Store(chunk *storage.Chunk) {
|
||||||
|
var n int
|
||||||
|
msg := &storeRequestMsgData{
|
||||||
|
Key: chunk.Key,
|
||||||
|
SData: chunk.SData,
|
||||||
|
}
|
||||||
|
var source *peer
|
||||||
|
if chunk.Source != nil {
|
||||||
|
source = chunk.Source.(*peer)
|
||||||
|
}
|
||||||
|
for _, p := range self.hive.getPeers(chunk.Key, 0) {
|
||||||
|
glog.V(logger.Detail).Infof("forwarder.Store: %v %v", p, chunk)
|
||||||
|
|
||||||
|
if p.syncer != nil && (source == nil || p.Addr() != source.Addr()) {
|
||||||
|
n++
|
||||||
|
Deliver(p, msg, PropagateReq)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("forwarder.Store: sent to %v peers (chunk = %v)", n, chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
// once a chunk is found deliver it to its requesters unless timed out
|
||||||
|
func (self *forwarder) Deliver(chunk *storage.Chunk) {
|
||||||
|
// iterate over request entries
|
||||||
|
for id, requesters := range chunk.Req.Requesters {
|
||||||
|
counter := requesterCount
|
||||||
|
msg := &storeRequestMsgData{
|
||||||
|
Key: chunk.Key,
|
||||||
|
SData: chunk.SData,
|
||||||
|
}
|
||||||
|
var n int
|
||||||
|
var req *retrieveRequestMsgData
|
||||||
|
// iterate over requesters with the same id
|
||||||
|
for id, r := range requesters {
|
||||||
|
req = r.(*retrieveRequestMsgData)
|
||||||
|
if req.timeout == nil || req.timeout.After(time.Now()) {
|
||||||
|
glog.V(logger.Detail).Infof("forwarder.Deliver: %v -> %v", req.Id, req.from)
|
||||||
|
msg.Id = uint64(id)
|
||||||
|
Deliver(req.from, msg, DeliverReq)
|
||||||
|
n++
|
||||||
|
counter--
|
||||||
|
if counter <= 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("forwarder.Deliver: submit chunk %v (request id %v) for delivery to %v peers", chunk.Key.Log(), id, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// initiate delivery of a chunk to a particular peer via syncer#addRequest
|
||||||
|
// depending on syncer mode and priority settings and sync request type
|
||||||
|
// this either goes via confirmation roundtrip or queued or pushed directly
|
||||||
|
func Deliver(p *peer, req interface{}, ty int) {
|
||||||
|
p.syncer.addRequest(req, ty)
|
||||||
|
}
|
||||||
|
|
||||||
|
// push chunk over to peer
|
||||||
|
func Push(p *peer, key storage.Key, priority uint) {
|
||||||
|
p.syncer.doDelivery(key, priority, p.syncer.quit)
|
||||||
|
}
|
383
swarm/network/hive.go
Normal file
383
swarm/network/hive.go
Normal file
@ -0,0 +1,383 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/network/kademlia"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Hive is the logistic manager of the swarm
|
||||||
|
// it uses a generic kademlia nodetable to find best peer list
|
||||||
|
// for any target
|
||||||
|
// this is used by the netstore to search for content in the swarm
|
||||||
|
// the bzz protocol peersMsgData exchange is relayed to Kademlia
|
||||||
|
// for db storage and filtering
|
||||||
|
// connections and disconnections are reported and relayed
|
||||||
|
// to keep the nodetable uptodate
|
||||||
|
|
||||||
|
type Hive struct {
|
||||||
|
listenAddr func() string
|
||||||
|
callInterval uint64
|
||||||
|
id discover.NodeID
|
||||||
|
addr kademlia.Address
|
||||||
|
kad *kademlia.Kademlia
|
||||||
|
path string
|
||||||
|
quit chan bool
|
||||||
|
toggle chan bool
|
||||||
|
more chan bool
|
||||||
|
|
||||||
|
// for testing only
|
||||||
|
swapEnabled bool
|
||||||
|
syncEnabled bool
|
||||||
|
blockRead bool
|
||||||
|
blockWrite bool
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
callInterval = 3000000000
|
||||||
|
// bucketSize = 3
|
||||||
|
// maxProx = 8
|
||||||
|
// proxBinSize = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
type HiveParams struct {
|
||||||
|
CallInterval uint64
|
||||||
|
KadDbPath string
|
||||||
|
*kademlia.KadParams
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHiveParams(path string) *HiveParams {
|
||||||
|
kad := kademlia.NewKadParams()
|
||||||
|
// kad.BucketSize = bucketSize
|
||||||
|
// kad.MaxProx = maxProx
|
||||||
|
// kad.ProxBinSize = proxBinSize
|
||||||
|
|
||||||
|
return &HiveParams{
|
||||||
|
CallInterval: callInterval,
|
||||||
|
KadDbPath: filepath.Join(path, "bzz-peers.json"),
|
||||||
|
KadParams: kad,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHive(addr common.Hash, params *HiveParams, swapEnabled, syncEnabled bool) *Hive {
|
||||||
|
kad := kademlia.New(kademlia.Address(addr), params.KadParams)
|
||||||
|
return &Hive{
|
||||||
|
callInterval: params.CallInterval,
|
||||||
|
kad: kad,
|
||||||
|
addr: kad.Addr(),
|
||||||
|
path: params.KadDbPath,
|
||||||
|
swapEnabled: swapEnabled,
|
||||||
|
syncEnabled: syncEnabled,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Hive) SyncEnabled(on bool) {
|
||||||
|
self.syncEnabled = on
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Hive) SwapEnabled(on bool) {
|
||||||
|
self.swapEnabled = on
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Hive) BlockNetworkRead(on bool) {
|
||||||
|
self.blockRead = on
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Hive) BlockNetworkWrite(on bool) {
|
||||||
|
self.blockWrite = on
|
||||||
|
}
|
||||||
|
|
||||||
|
// public accessor to the hive base address
|
||||||
|
func (self *Hive) Addr() kademlia.Address {
|
||||||
|
return self.addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start receives network info only at startup
|
||||||
|
// listedAddr is a function to retrieve listening address to advertise to peers
|
||||||
|
// connectPeer is a function to connect to a peer based on its NodeID or enode URL
|
||||||
|
// there are called on the p2p.Server which runs on the node
|
||||||
|
func (self *Hive) Start(id discover.NodeID, listenAddr func() string, connectPeer func(string) error) (err error) {
|
||||||
|
self.toggle = make(chan bool)
|
||||||
|
self.more = make(chan bool)
|
||||||
|
self.quit = make(chan bool)
|
||||||
|
self.id = id
|
||||||
|
self.listenAddr = listenAddr
|
||||||
|
err = self.kad.Load(self.path, nil)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("Warning: error reading kaddb '%s' (skipping): %v", self.path, err)
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
// this loop is doing bootstrapping and maintains a healthy table
|
||||||
|
go self.keepAlive()
|
||||||
|
go func() {
|
||||||
|
// whenever toggled ask kademlia about most preferred peer
|
||||||
|
for alive := range self.more {
|
||||||
|
if !alive {
|
||||||
|
// receiving false closes the loop while allowing parallel routines
|
||||||
|
// to attempt to write to more (remove Peer when shutting down)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
node, need, proxLimit := self.kad.Suggest()
|
||||||
|
|
||||||
|
if node != nil && len(node.Url) > 0 {
|
||||||
|
glog.V(logger.Detail).Infof("call known bee %v", node.Url)
|
||||||
|
// enode or any lower level connection address is unnecessary in future
|
||||||
|
// discovery table is used to look it up.
|
||||||
|
connectPeer(node.Url)
|
||||||
|
}
|
||||||
|
if need {
|
||||||
|
// a random peer is taken from the table
|
||||||
|
peers := self.kad.FindClosest(kademlia.RandomAddressAt(self.addr, rand.Intn(self.kad.MaxProx)), 1)
|
||||||
|
if len(peers) > 0 {
|
||||||
|
// a random address at prox bin 0 is sent for lookup
|
||||||
|
randAddr := kademlia.RandomAddressAt(self.addr, proxLimit)
|
||||||
|
req := &retrieveRequestMsgData{
|
||||||
|
Key: storage.Key(randAddr[:]),
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("call any bee near %v (PO%03d) - messenger bee: %v", randAddr, proxLimit, peers[0])
|
||||||
|
peers[0].(*peer).retrieve(req)
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Warn).Infof("no peer")
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("buzz kept alive")
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Info).Infof("no need for more bees")
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case self.toggle <- need:
|
||||||
|
case <-self.quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("queen's address: %v, population: %d (%d)", self.addr, self.kad.Count(), self.kad.DBCount())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// keepAlive is a forever loop
|
||||||
|
// in its awake state it periodically triggers connection attempts
|
||||||
|
// by writing to self.more until Kademlia Table is saturated
|
||||||
|
// wake state is toggled by writing to self.toggle
|
||||||
|
// it restarts if the table becomes non-full again due to disconnections
|
||||||
|
func (self *Hive) keepAlive() {
|
||||||
|
alarm := time.NewTicker(time.Duration(self.callInterval)).C
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-alarm:
|
||||||
|
if self.kad.DBCount() > 0 {
|
||||||
|
select {
|
||||||
|
case self.more <- true:
|
||||||
|
glog.V(logger.Debug).Infof("buzz wakeup")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case need := <-self.toggle:
|
||||||
|
if alarm == nil && need {
|
||||||
|
alarm = time.NewTicker(time.Duration(self.callInterval)).C
|
||||||
|
}
|
||||||
|
if alarm != nil && !need {
|
||||||
|
alarm = nil
|
||||||
|
|
||||||
|
}
|
||||||
|
case <-self.quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Hive) Stop() error {
|
||||||
|
// closing toggle channel quits the updateloop
|
||||||
|
close(self.quit)
|
||||||
|
return self.kad.Save(self.path, saveSync)
|
||||||
|
}
|
||||||
|
|
||||||
|
// called at the end of a successful protocol handshake
|
||||||
|
func (self *Hive) addPeer(p *peer) error {
|
||||||
|
defer func() {
|
||||||
|
select {
|
||||||
|
case self.more <- true:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
glog.V(logger.Detail).Infof("hi new bee %v", p)
|
||||||
|
err := self.kad.On(p, loadSync)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// self lookup (can be encoded as nil/zero key since peers addr known) + no id ()
|
||||||
|
// the most common way of saying hi in bzz is initiation of gossip
|
||||||
|
// let me know about anyone new from my hood , here is the storageradius
|
||||||
|
// to send the 6 byte self lookup
|
||||||
|
// we do not record as request or forward it, just reply with peers
|
||||||
|
p.retrieve(&retrieveRequestMsgData{})
|
||||||
|
glog.V(logger.Detail).Infof("'whatsup wheresdaparty' sent to %v", p)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// called after peer disconnected
|
||||||
|
func (self *Hive) removePeer(p *peer) {
|
||||||
|
glog.V(logger.Debug).Infof("bee %v removed", p)
|
||||||
|
self.kad.Off(p, saveSync)
|
||||||
|
select {
|
||||||
|
case self.more <- true:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
if self.kad.Count() == 0 {
|
||||||
|
glog.V(logger.Debug).Infof("empty, all bees gone")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve a list of live peers that are closer to target than us
|
||||||
|
func (self *Hive) getPeers(target storage.Key, max int) (peers []*peer) {
|
||||||
|
var addr kademlia.Address
|
||||||
|
copy(addr[:], target[:])
|
||||||
|
for _, node := range self.kad.FindClosest(addr, max) {
|
||||||
|
peers = append(peers, node.(*peer))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// disconnects all the peers
|
||||||
|
func (self *Hive) DropAll() {
|
||||||
|
glog.V(logger.Info).Infof("dropping all bees")
|
||||||
|
for _, node := range self.kad.FindClosest(kademlia.Address{}, 0) {
|
||||||
|
node.Drop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// contructor for kademlia.NodeRecord based on peer address alone
|
||||||
|
// TODO: should go away and only addr passed to kademlia
|
||||||
|
func newNodeRecord(addr *peerAddr) *kademlia.NodeRecord {
|
||||||
|
now := time.Now()
|
||||||
|
return &kademlia.NodeRecord{
|
||||||
|
Addr: addr.Addr,
|
||||||
|
Url: addr.String(),
|
||||||
|
Seen: now,
|
||||||
|
After: now,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// called by the protocol when receiving peerset (for target address)
|
||||||
|
// peersMsgData is converted to a slice of NodeRecords for Kademlia
|
||||||
|
// this is to store all thats needed
|
||||||
|
func (self *Hive) HandlePeersMsg(req *peersMsgData, from *peer) {
|
||||||
|
var nrs []*kademlia.NodeRecord
|
||||||
|
for _, p := range req.Peers {
|
||||||
|
nrs = append(nrs, newNodeRecord(p))
|
||||||
|
}
|
||||||
|
self.kad.Add(nrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// peer wraps the protocol instance to represent a connected peer
|
||||||
|
// it implements kademlia.Node interface
|
||||||
|
type peer struct {
|
||||||
|
*bzz // protocol instance running on peer connection
|
||||||
|
}
|
||||||
|
|
||||||
|
// protocol instance implements kademlia.Node interface (embedded peer)
|
||||||
|
func (self *peer) Addr() kademlia.Address {
|
||||||
|
return self.remoteAddr.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *peer) Url() string {
|
||||||
|
return self.remoteAddr.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO take into account traffic
|
||||||
|
func (self *peer) LastActive() time.Time {
|
||||||
|
return self.lastActive
|
||||||
|
}
|
||||||
|
|
||||||
|
// reads the serialised form of sync state persisted as the 'Meta' attribute
|
||||||
|
// and sets the decoded syncState on the online node
|
||||||
|
func loadSync(record *kademlia.NodeRecord, node kademlia.Node) error {
|
||||||
|
p, ok := node.(*peer)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid type")
|
||||||
|
}
|
||||||
|
if record.Meta == nil {
|
||||||
|
glog.V(logger.Debug).Infof("no sync state for node record %v setting default", record)
|
||||||
|
p.syncState = &syncState{DbSyncState: &storage.DbSyncState{}}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
state, err := decodeSync(record.Meta)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error decoding kddb record meta info into a sync state: %v", err)
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("sync state for node record %v read from Meta: %s", record, string(*(record.Meta)))
|
||||||
|
p.syncState = state
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// callback when saving a sync state
|
||||||
|
func saveSync(record *kademlia.NodeRecord, node kademlia.Node) {
|
||||||
|
if p, ok := node.(*peer); ok {
|
||||||
|
meta, err := encodeSync(p.syncState)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("error saving sync state for %v: %v", node, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("saved sync state for %v: %s", node, string(*meta))
|
||||||
|
record.Meta = meta
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// the immediate response to a retrieve request,
|
||||||
|
// sends relevant peer data given by the kademlia hive to the requester
|
||||||
|
// TODO: remember peers sent for duration of the session, only new peers sent
|
||||||
|
func (self *Hive) peers(req *retrieveRequestMsgData) {
|
||||||
|
if req != nil && req.MaxPeers >= 0 {
|
||||||
|
var addrs []*peerAddr
|
||||||
|
if req.timeout == nil || time.Now().Before(*(req.timeout)) {
|
||||||
|
key := req.Key
|
||||||
|
// self lookup from remote peer
|
||||||
|
if storage.IsZeroKey(key) {
|
||||||
|
addr := req.from.Addr()
|
||||||
|
key = storage.Key(addr[:])
|
||||||
|
req.Key = nil
|
||||||
|
}
|
||||||
|
// get peer addresses from hive
|
||||||
|
for _, peer := range self.getPeers(key, int(req.MaxPeers)) {
|
||||||
|
addrs = append(addrs, peer.remoteAddr)
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("Hive sending %d peer addresses to %v. req.Id: %v, req.Key: %v", len(addrs), req.from, req.Id, req.Key.Log())
|
||||||
|
|
||||||
|
peersData := &peersMsgData{
|
||||||
|
Peers: addrs,
|
||||||
|
Key: req.Key,
|
||||||
|
Id: req.Id,
|
||||||
|
}
|
||||||
|
peersData.setTimeout(req.timeout)
|
||||||
|
req.from.peers(peersData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Hive) String() string {
|
||||||
|
return self.kad.String()
|
||||||
|
}
|
173
swarm/network/kademlia/address.go
Normal file
173
swarm/network/kademlia/address.go
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package kademlia
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Address common.Hash
|
||||||
|
|
||||||
|
func (a Address) String() string {
|
||||||
|
return fmt.Sprintf("%x", a[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Address) MarshalJSON() (out []byte, err error) {
|
||||||
|
return []byte(`"` + a.String() + `"`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Address) UnmarshalJSON(value []byte) error {
|
||||||
|
*a = Address(common.HexToHash(string(value[1 : len(value)-1])))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// the string form of the binary representation of an address (only first 8 bits)
|
||||||
|
func (a Address) Bin() string {
|
||||||
|
var bs []string
|
||||||
|
for _, b := range a[:] {
|
||||||
|
bs = append(bs, fmt.Sprintf("%08b", b))
|
||||||
|
}
|
||||||
|
return strings.Join(bs, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Proximity(x, y) returns the proximity order of the MSB distance between x and y
|
||||||
|
|
||||||
|
The distance metric MSB(x, y) of two equal length byte sequences x an y is the
|
||||||
|
value of the binary integer cast of the x^y, ie., x and y bitwise xor-ed.
|
||||||
|
the binary cast is big endian: most significant bit first (=MSB).
|
||||||
|
|
||||||
|
Proximity(x, y) is a discrete logarithmic scaling of the MSB distance.
|
||||||
|
It is defined as the reverse rank of the integer part of the base 2
|
||||||
|
logarithm of the distance.
|
||||||
|
It is calculated by counting the number of common leading zeros in the (MSB)
|
||||||
|
binary representation of the x^y.
|
||||||
|
|
||||||
|
(0 farthest, 255 closest, 256 self)
|
||||||
|
*/
|
||||||
|
func proximity(one, other Address) (ret int) {
|
||||||
|
for i := 0; i < len(one); i++ {
|
||||||
|
oxo := one[i] ^ other[i]
|
||||||
|
for j := 0; j < 8; j++ {
|
||||||
|
if (uint8(oxo)>>uint8(7-j))&0x01 != 0 {
|
||||||
|
return i*8 + j
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(one) * 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Address.ProxCmp compares the distances a->target and b->target.
|
||||||
|
// Returns -1 if a is closer to target, 1 if b is closer to target
|
||||||
|
// and 0 if they are equal.
|
||||||
|
func (target Address) ProxCmp(a, b Address) int {
|
||||||
|
for i := range target {
|
||||||
|
da := a[i] ^ target[i]
|
||||||
|
db := b[i] ^ target[i]
|
||||||
|
if da > db {
|
||||||
|
return 1
|
||||||
|
} else if da < db {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// randomAddressAt(address, prox) generates a random address
|
||||||
|
// at proximity order prox relative to address
|
||||||
|
// if prox is negative a random address is generated
|
||||||
|
func RandomAddressAt(self Address, prox int) (addr Address) {
|
||||||
|
addr = self
|
||||||
|
var pos int
|
||||||
|
if prox >= 0 {
|
||||||
|
pos = prox / 8
|
||||||
|
trans := prox % 8
|
||||||
|
transbytea := byte(0)
|
||||||
|
for j := 0; j <= trans; j++ {
|
||||||
|
transbytea |= 1 << uint8(7-j)
|
||||||
|
}
|
||||||
|
flipbyte := byte(1 << uint8(7-trans))
|
||||||
|
transbyteb := transbytea ^ byte(255)
|
||||||
|
randbyte := byte(rand.Intn(255))
|
||||||
|
addr[pos] = ((addr[pos] & transbytea) ^ flipbyte) | randbyte&transbyteb
|
||||||
|
}
|
||||||
|
for i := pos + 1; i < len(addr); i++ {
|
||||||
|
addr[i] = byte(rand.Intn(255))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyRange(a0, a1, proxLimit) returns the address inclusive address
|
||||||
|
// range that contain addresses closer to one than other
|
||||||
|
func KeyRange(one, other Address, proxLimit int) (start, stop Address) {
|
||||||
|
prox := proximity(one, other)
|
||||||
|
if prox >= proxLimit {
|
||||||
|
prox = proxLimit
|
||||||
|
}
|
||||||
|
start = CommonBitsAddrByte(one, other, byte(0x00), prox)
|
||||||
|
stop = CommonBitsAddrByte(one, other, byte(0xff), prox)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func CommonBitsAddrF(self, other Address, f func() byte, p int) (addr Address) {
|
||||||
|
prox := proximity(self, other)
|
||||||
|
var pos int
|
||||||
|
if p <= prox {
|
||||||
|
prox = p
|
||||||
|
}
|
||||||
|
pos = prox / 8
|
||||||
|
addr = self
|
||||||
|
trans := byte(prox % 8)
|
||||||
|
var transbytea byte
|
||||||
|
if p > prox {
|
||||||
|
transbytea = byte(0x7f)
|
||||||
|
} else {
|
||||||
|
transbytea = byte(0xff)
|
||||||
|
}
|
||||||
|
transbytea >>= trans
|
||||||
|
transbyteb := transbytea ^ byte(0xff)
|
||||||
|
addrpos := addr[pos]
|
||||||
|
addrpos &= transbyteb
|
||||||
|
if p > prox {
|
||||||
|
addrpos ^= byte(0x80 >> trans)
|
||||||
|
}
|
||||||
|
addrpos |= transbytea & f()
|
||||||
|
addr[pos] = addrpos
|
||||||
|
for i := pos + 1; i < len(addr); i++ {
|
||||||
|
addr[i] = f()
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func CommonBitsAddr(self, other Address, prox int) (addr Address) {
|
||||||
|
return CommonBitsAddrF(self, other, func() byte { return byte(rand.Intn(255)) }, prox)
|
||||||
|
}
|
||||||
|
|
||||||
|
func CommonBitsAddrByte(self, other Address, b byte, prox int) (addr Address) {
|
||||||
|
return CommonBitsAddrF(self, other, func() byte { return b }, prox)
|
||||||
|
}
|
||||||
|
|
||||||
|
// randomAddressAt() generates a random address
|
||||||
|
func RandomAddress() Address {
|
||||||
|
return RandomAddressAt(Address{}, -1)
|
||||||
|
}
|
96
swarm/network/kademlia/address_test.go
Normal file
96
swarm/network/kademlia/address_test.go
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package kademlia
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (Address) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||||
|
var id Address
|
||||||
|
for i := 0; i < len(id); i++ {
|
||||||
|
id[i] = byte(uint8(rand.Intn(255)))
|
||||||
|
}
|
||||||
|
return reflect.ValueOf(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCommonBitsAddrF(t *testing.T) {
|
||||||
|
a := Address(common.HexToHash("0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
|
||||||
|
b := Address(common.HexToHash("0x8123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
|
||||||
|
c := Address(common.HexToHash("0x4123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
|
||||||
|
d := Address(common.HexToHash("0x0023456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
|
||||||
|
e := Address(common.HexToHash("0x01A3456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
|
||||||
|
ab := CommonBitsAddrF(a, b, func() byte { return byte(0x00) }, 10)
|
||||||
|
expab := Address(common.HexToHash("0x8000000000000000000000000000000000000000000000000000000000000000"))
|
||||||
|
|
||||||
|
if ab != expab {
|
||||||
|
t.Fatalf("%v != %v", ab, expab)
|
||||||
|
}
|
||||||
|
ac := CommonBitsAddrF(a, c, func() byte { return byte(0x00) }, 10)
|
||||||
|
expac := Address(common.HexToHash("0x4000000000000000000000000000000000000000000000000000000000000000"))
|
||||||
|
|
||||||
|
if ac != expac {
|
||||||
|
t.Fatalf("%v != %v", ac, expac)
|
||||||
|
}
|
||||||
|
ad := CommonBitsAddrF(a, d, func() byte { return byte(0x00) }, 10)
|
||||||
|
expad := Address(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
|
||||||
|
|
||||||
|
if ad != expad {
|
||||||
|
t.Fatalf("%v != %v", ad, expad)
|
||||||
|
}
|
||||||
|
ae := CommonBitsAddrF(a, e, func() byte { return byte(0x00) }, 10)
|
||||||
|
expae := Address(common.HexToHash("0x0180000000000000000000000000000000000000000000000000000000000000"))
|
||||||
|
|
||||||
|
if ae != expae {
|
||||||
|
t.Fatalf("%v != %v", ae, expae)
|
||||||
|
}
|
||||||
|
acf := CommonBitsAddrF(a, c, func() byte { return byte(0xff) }, 10)
|
||||||
|
expacf := Address(common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
|
||||||
|
|
||||||
|
if acf != expacf {
|
||||||
|
t.Fatalf("%v != %v", acf, expacf)
|
||||||
|
}
|
||||||
|
aeo := CommonBitsAddrF(a, e, func() byte { return byte(0x00) }, 2)
|
||||||
|
expaeo := Address(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
|
||||||
|
|
||||||
|
if aeo != expaeo {
|
||||||
|
t.Fatalf("%v != %v", aeo, expaeo)
|
||||||
|
}
|
||||||
|
aep := CommonBitsAddrF(a, e, func() byte { return byte(0xff) }, 2)
|
||||||
|
expaep := Address(common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
|
||||||
|
|
||||||
|
if aep != expaep {
|
||||||
|
t.Fatalf("%v != %v", aep, expaep)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRandomAddressAt(t *testing.T) {
|
||||||
|
var a Address
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
a = RandomAddress()
|
||||||
|
prox := rand.Intn(255)
|
||||||
|
b := RandomAddressAt(a, prox)
|
||||||
|
if proximity(a, b) != prox {
|
||||||
|
t.Fatalf("incorrect address prox(%v, %v) == %v (expected %v)", a, b, proximity(a, b), prox)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
351
swarm/network/kademlia/kaddb.go
Normal file
351
swarm/network/kademlia/kaddb.go
Normal file
@ -0,0 +1,351 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package kademlia
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NodeData interface {
|
||||||
|
json.Marshaler
|
||||||
|
json.Unmarshaler
|
||||||
|
}
|
||||||
|
|
||||||
|
// allow inactive peers under
|
||||||
|
type NodeRecord struct {
|
||||||
|
Addr Address // address of node
|
||||||
|
Url string // Url, used to connect to node
|
||||||
|
After time.Time // next call after time
|
||||||
|
Seen time.Time // last connected at time
|
||||||
|
Meta *json.RawMessage // arbitrary metadata saved for a peer
|
||||||
|
|
||||||
|
node Node
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *NodeRecord) setSeen() {
|
||||||
|
t := time.Now()
|
||||||
|
self.Seen = t
|
||||||
|
self.After = t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *NodeRecord) String() string {
|
||||||
|
return fmt.Sprintf("<%v>", self.Addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// persisted node record database ()
|
||||||
|
type KadDb struct {
|
||||||
|
Address Address
|
||||||
|
Nodes [][]*NodeRecord
|
||||||
|
index map[Address]*NodeRecord
|
||||||
|
cursors []int
|
||||||
|
lock sync.RWMutex
|
||||||
|
purgeInterval time.Duration
|
||||||
|
initialRetryInterval time.Duration
|
||||||
|
connRetryExp int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newKadDb(addr Address, params *KadParams) *KadDb {
|
||||||
|
return &KadDb{
|
||||||
|
Address: addr,
|
||||||
|
Nodes: make([][]*NodeRecord, params.MaxProx+1), // overwritten by load
|
||||||
|
cursors: make([]int, params.MaxProx+1),
|
||||||
|
index: make(map[Address]*NodeRecord),
|
||||||
|
purgeInterval: params.PurgeInterval,
|
||||||
|
initialRetryInterval: params.InitialRetryInterval,
|
||||||
|
connRetryExp: params.ConnRetryExp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *KadDb) findOrCreate(index int, a Address, url string) *NodeRecord {
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
|
||||||
|
record, found := self.index[a]
|
||||||
|
if !found {
|
||||||
|
record = &NodeRecord{
|
||||||
|
Addr: a,
|
||||||
|
Url: url,
|
||||||
|
}
|
||||||
|
glog.V(logger.Info).Infof("add new record %v to kaddb", record)
|
||||||
|
// insert in kaddb
|
||||||
|
self.index[a] = record
|
||||||
|
self.Nodes[index] = append(self.Nodes[index], record)
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Info).Infof("found record %v in kaddb", record)
|
||||||
|
}
|
||||||
|
// update last seen time
|
||||||
|
record.setSeen()
|
||||||
|
// update with url in case IP/port changes
|
||||||
|
record.Url = url
|
||||||
|
return record
|
||||||
|
}
|
||||||
|
|
||||||
|
// add adds node records to kaddb (persisted node record db)
|
||||||
|
func (self *KadDb) add(nrs []*NodeRecord, proximityBin func(Address) int) {
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
var n int
|
||||||
|
var nodes []*NodeRecord
|
||||||
|
for _, node := range nrs {
|
||||||
|
_, found := self.index[node.Addr]
|
||||||
|
if !found && node.Addr != self.Address {
|
||||||
|
node.setSeen()
|
||||||
|
self.index[node.Addr] = node
|
||||||
|
index := proximityBin(node.Addr)
|
||||||
|
dbcursor := self.cursors[index]
|
||||||
|
nodes = self.Nodes[index]
|
||||||
|
// this is inefficient for allocation, need to just append then shift
|
||||||
|
newnodes := make([]*NodeRecord, len(nodes)+1)
|
||||||
|
copy(newnodes[:], nodes[:dbcursor])
|
||||||
|
newnodes[dbcursor] = node
|
||||||
|
copy(newnodes[dbcursor+1:], nodes[dbcursor:])
|
||||||
|
glog.V(logger.Detail).Infof("new nodes: %v (keys: %v)\nnodes: %v", newnodes, nodes)
|
||||||
|
self.Nodes[index] = newnodes
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n > 0 {
|
||||||
|
glog.V(logger.Debug).Infof("%d/%d node records (new/known)", n, len(nrs))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
next return one node record with the highest priority for desired
|
||||||
|
connection.
|
||||||
|
This is used to pick candidates for live nodes that are most wanted for
|
||||||
|
a higly connected low centrality network structure for Swarm which best suits
|
||||||
|
for a Kademlia-style routing.
|
||||||
|
|
||||||
|
* Starting as naive node with empty db, this implements Kademlia bootstrapping
|
||||||
|
* As a mature node, it fills short lines. All on demand.
|
||||||
|
|
||||||
|
The candidate is chosen using the following strategy:
|
||||||
|
We check for missing online nodes in the buckets for 1 upto Max BucketSize rounds.
|
||||||
|
On each round we proceed from the low to high proximity order buckets.
|
||||||
|
If the number of active nodes (=connected peers) is < rounds, then start looking
|
||||||
|
for a known candidate. To determine if there is a candidate to recommend the
|
||||||
|
kaddb node record database row corresponding to the bucket is checked.
|
||||||
|
|
||||||
|
If the row cursor is on position i, the ith element in the row is chosen.
|
||||||
|
If the record is scheduled not to be retried before NOW, the next element is taken.
|
||||||
|
If the record is scheduled to be retried, it is set as checked, scheduled for
|
||||||
|
checking and is returned. The time of the next check is in X (duration) such that
|
||||||
|
X = ConnRetryExp * delta where delta is the time past since the last check and
|
||||||
|
ConnRetryExp is constant obsoletion factor. (Note that when node records are added
|
||||||
|
from peer messages, they are marked as checked and placed at the cursor, ie.
|
||||||
|
given priority over older entries). Entries which were checked more than
|
||||||
|
purgeInterval ago are deleted from the kaddb row. If no candidate is found after
|
||||||
|
a full round of checking the next bucket up is considered. If no candidate is
|
||||||
|
found when we reach the maximum-proximity bucket, the next round starts.
|
||||||
|
|
||||||
|
node record a is more favoured to b a > b iff a is a passive node (record of
|
||||||
|
offline past peer)
|
||||||
|
|proxBin(a)| < |proxBin(b)|
|
||||||
|
|| (proxBin(a) < proxBin(b) && |proxBin(a)| == |proxBin(b)|)
|
||||||
|
|| (proxBin(a) == proxBin(b) && lastChecked(a) < lastChecked(b))
|
||||||
|
|
||||||
|
|
||||||
|
The second argument returned names the first missing slot found
|
||||||
|
*/
|
||||||
|
func (self *KadDb) findBest(maxBinSize int, binSize func(int) int) (node *NodeRecord, need bool, proxLimit int) {
|
||||||
|
// return nil, proxLimit indicates that all buckets are filled
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
|
||||||
|
var interval time.Duration
|
||||||
|
var found bool
|
||||||
|
var purge []bool
|
||||||
|
var delta time.Duration
|
||||||
|
var cursor int
|
||||||
|
var count int
|
||||||
|
var after time.Time
|
||||||
|
|
||||||
|
// iterate over columns maximum bucketsize times
|
||||||
|
for rounds := 1; rounds <= maxBinSize; rounds++ {
|
||||||
|
ROUND:
|
||||||
|
// iterate over rows from PO 0 upto MaxProx
|
||||||
|
for po, dbrow := range self.Nodes {
|
||||||
|
// if row has rounds connected peers, then take the next
|
||||||
|
if binSize(po) >= rounds {
|
||||||
|
continue ROUND
|
||||||
|
}
|
||||||
|
if !need {
|
||||||
|
// set proxlimit to the PO where the first missing slot is found
|
||||||
|
proxLimit = po
|
||||||
|
need = true
|
||||||
|
}
|
||||||
|
purge = make([]bool, len(dbrow))
|
||||||
|
|
||||||
|
// there is a missing slot - finding a node to connect to
|
||||||
|
// select a node record from the relavant kaddb row (of identical prox order)
|
||||||
|
ROW:
|
||||||
|
for cursor = self.cursors[po]; !found && count < len(dbrow); cursor = (cursor + 1) % len(dbrow) {
|
||||||
|
count++
|
||||||
|
node = dbrow[cursor]
|
||||||
|
|
||||||
|
// skip already connected nodes
|
||||||
|
if node.node != nil {
|
||||||
|
glog.V(logger.Debug).Infof("kaddb record %v (PO%03d:%d/%d) already connected", node.Addr, po, cursor, len(dbrow))
|
||||||
|
continue ROW
|
||||||
|
}
|
||||||
|
|
||||||
|
// if node is scheduled to connect
|
||||||
|
if time.Time(node.After).After(time.Now()) {
|
||||||
|
glog.V(logger.Debug).Infof("kaddb record %v (PO%03d:%d) skipped. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After)
|
||||||
|
continue ROW
|
||||||
|
}
|
||||||
|
|
||||||
|
delta = time.Since(time.Time(node.Seen))
|
||||||
|
if delta < self.initialRetryInterval {
|
||||||
|
delta = self.initialRetryInterval
|
||||||
|
}
|
||||||
|
if delta > self.purgeInterval {
|
||||||
|
// remove node
|
||||||
|
purge[cursor] = true
|
||||||
|
glog.V(logger.Debug).Infof("kaddb record %v (PO%03d:%d) unreachable since %v. Removed", node.Addr, po, cursor, node.Seen)
|
||||||
|
continue ROW
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(logger.Debug).Infof("kaddb record %v (PO%03d:%d) ready to be tried. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After)
|
||||||
|
|
||||||
|
// scheduling next check
|
||||||
|
interval = time.Duration(delta * time.Duration(self.connRetryExp))
|
||||||
|
after = time.Now().Add(interval)
|
||||||
|
|
||||||
|
glog.V(logger.Debug).Infof("kaddb record %v (PO%03d:%d) selected as candidate connection %v. seen at %v (%v ago), selectable since %v, retry after %v (in %v)", node.Addr, po, cursor, rounds, node.Seen, delta, node.After, after, interval)
|
||||||
|
node.After = after
|
||||||
|
found = true
|
||||||
|
} // ROW
|
||||||
|
self.cursors[po] = cursor
|
||||||
|
self.delete(po, purge)
|
||||||
|
if found {
|
||||||
|
return node, need, proxLimit
|
||||||
|
}
|
||||||
|
} // ROUND
|
||||||
|
} // ROUNDS
|
||||||
|
|
||||||
|
return nil, need, proxLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
// deletes the noderecords of a kaddb row corresponding to the indexes
|
||||||
|
// caller must hold the dblock
|
||||||
|
// the call is unsafe, no index checks
|
||||||
|
func (self *KadDb) delete(row int, purge []bool) {
|
||||||
|
var nodes []*NodeRecord
|
||||||
|
dbrow := self.Nodes[row]
|
||||||
|
for i, del := range purge {
|
||||||
|
if i == self.cursors[row] {
|
||||||
|
//reset cursor
|
||||||
|
self.cursors[row] = len(nodes)
|
||||||
|
}
|
||||||
|
// delete the entry to be purged
|
||||||
|
if del {
|
||||||
|
delete(self.index, dbrow[i].Addr)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// otherwise append to new list
|
||||||
|
nodes = append(nodes, dbrow[i])
|
||||||
|
}
|
||||||
|
self.Nodes[row] = nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// save persists kaddb on disk (written to file on path in json format.
|
||||||
|
func (self *KadDb) save(path string, cb func(*NodeRecord, Node)) error {
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
|
||||||
|
var n int
|
||||||
|
|
||||||
|
for _, b := range self.Nodes {
|
||||||
|
for _, node := range b {
|
||||||
|
n++
|
||||||
|
node.After = time.Now()
|
||||||
|
node.Seen = time.Now()
|
||||||
|
if cb != nil {
|
||||||
|
cb(node, node.node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := json.MarshalIndent(self, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = ioutil.WriteFile(path, data, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("unable to save kaddb with %v nodes to %v: err", n, path, err)
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Info).Infof("saved kaddb with %v nodes to %v", n, path)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load(path) loads the node record database (kaddb) from file on path.
|
||||||
|
func (self *KadDb) load(path string, cb func(*NodeRecord, Node) error) (err error) {
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
|
||||||
|
var data []byte
|
||||||
|
data, err = ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(data, self)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var n int
|
||||||
|
var purge []bool
|
||||||
|
for po, b := range self.Nodes {
|
||||||
|
purge = make([]bool, len(b))
|
||||||
|
ROW:
|
||||||
|
for i, node := range b {
|
||||||
|
if cb != nil {
|
||||||
|
err = cb(node, node.node)
|
||||||
|
if err != nil {
|
||||||
|
purge[i] = true
|
||||||
|
continue ROW
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n++
|
||||||
|
if (node.After == time.Time{}) {
|
||||||
|
node.After = time.Now()
|
||||||
|
}
|
||||||
|
self.index[node.Addr] = node
|
||||||
|
}
|
||||||
|
self.delete(po, purge)
|
||||||
|
}
|
||||||
|
glog.V(logger.Info).Infof("loaded kaddb with %v nodes from %v", n, path)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// accessor for KAD offline db count
|
||||||
|
func (self *KadDb) count() int {
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
return len(self.index)
|
||||||
|
}
|
429
swarm/network/kademlia/kademlia.go
Normal file
429
swarm/network/kademlia/kademlia.go
Normal file
@ -0,0 +1,429 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package kademlia
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
bucketSize = 4
|
||||||
|
proxBinSize = 2
|
||||||
|
maxProx = 8
|
||||||
|
connRetryExp = 2
|
||||||
|
maxPeers = 100
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
purgeInterval = 42 * time.Hour
|
||||||
|
initialRetryInterval = 42 * time.Millisecond
|
||||||
|
maxIdleInterval = 42 * 1000 * time.Millisecond
|
||||||
|
// maxIdleInterval = 42 * 10 0 * time.Millisecond
|
||||||
|
)
|
||||||
|
|
||||||
|
type KadParams struct {
|
||||||
|
// adjustable parameters
|
||||||
|
MaxProx int
|
||||||
|
ProxBinSize int
|
||||||
|
BucketSize int
|
||||||
|
PurgeInterval time.Duration
|
||||||
|
InitialRetryInterval time.Duration
|
||||||
|
MaxIdleInterval time.Duration
|
||||||
|
ConnRetryExp int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewKadParams() *KadParams {
|
||||||
|
return &KadParams{
|
||||||
|
MaxProx: maxProx,
|
||||||
|
ProxBinSize: proxBinSize,
|
||||||
|
BucketSize: bucketSize,
|
||||||
|
PurgeInterval: purgeInterval,
|
||||||
|
InitialRetryInterval: initialRetryInterval,
|
||||||
|
MaxIdleInterval: maxIdleInterval,
|
||||||
|
ConnRetryExp: connRetryExp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kademlia is a table of active nodes
|
||||||
|
type Kademlia struct {
|
||||||
|
addr Address // immutable baseaddress of the table
|
||||||
|
*KadParams // Kademlia configuration parameters
|
||||||
|
proxLimit int // state, the PO of the first row of the most proximate bin
|
||||||
|
proxSize int // state, the number of peers in the most proximate bin
|
||||||
|
count int // number of active peers (w live connection)
|
||||||
|
buckets [][]Node // the actual bins
|
||||||
|
db *KadDb // kaddb, node record database
|
||||||
|
lock sync.RWMutex // mutex to access buckets
|
||||||
|
}
|
||||||
|
|
||||||
|
type Node interface {
|
||||||
|
Addr() Address
|
||||||
|
Url() string
|
||||||
|
LastActive() time.Time
|
||||||
|
Drop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// public constructor
|
||||||
|
// add is the base address of the table
|
||||||
|
// params is KadParams configuration
|
||||||
|
func New(addr Address, params *KadParams) *Kademlia {
|
||||||
|
buckets := make([][]Node, params.MaxProx+1)
|
||||||
|
return &Kademlia{
|
||||||
|
addr: addr,
|
||||||
|
KadParams: params,
|
||||||
|
buckets: buckets,
|
||||||
|
db: newKadDb(addr, params),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// accessor for KAD base address
|
||||||
|
func (self *Kademlia) Addr() Address {
|
||||||
|
return self.addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// accessor for KAD active node count
|
||||||
|
func (self *Kademlia) Count() int {
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
return self.count
|
||||||
|
}
|
||||||
|
|
||||||
|
// accessor for KAD active node count
|
||||||
|
func (self *Kademlia) DBCount() int {
|
||||||
|
return self.db.count()
|
||||||
|
}
|
||||||
|
|
||||||
|
// On is the entry point called when a new nodes is added
|
||||||
|
// unsafe in that node is not checked to be already active node (to be called once)
|
||||||
|
func (self *Kademlia) On(node Node, cb func(*NodeRecord, Node) error) (err error) {
|
||||||
|
glog.V(logger.Warn).Infof("%v", self)
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
|
||||||
|
index := self.proximityBin(node.Addr())
|
||||||
|
record := self.db.findOrCreate(index, node.Addr(), node.Url())
|
||||||
|
|
||||||
|
if cb != nil {
|
||||||
|
err = cb(record, node)
|
||||||
|
glog.V(logger.Detail).Infof("cb(%v, %v) ->%v", record, node, err)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to add node %v, callback error: %v", node.Addr(), err)
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("add node record %v with node %v", record, node)
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert in kademlia table of active nodes
|
||||||
|
bucket := self.buckets[index]
|
||||||
|
// if bucket is full insertion replaces the worst node
|
||||||
|
// TODO: give priority to peers with active traffic
|
||||||
|
if len(bucket) < self.BucketSize { // >= allows us to add peers beyond the bucketsize limitation
|
||||||
|
self.buckets[index] = append(bucket, node)
|
||||||
|
glog.V(logger.Debug).Infof("add node %v to table", node)
|
||||||
|
self.setProxLimit(index, true)
|
||||||
|
record.node = node
|
||||||
|
self.count++
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// always rotate peers
|
||||||
|
idle := self.MaxIdleInterval
|
||||||
|
var pos int
|
||||||
|
var replaced Node
|
||||||
|
for i, p := range bucket {
|
||||||
|
idleInt := time.Since(p.LastActive())
|
||||||
|
if idleInt > idle {
|
||||||
|
idle = idleInt
|
||||||
|
pos = i
|
||||||
|
replaced = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if replaced == nil {
|
||||||
|
glog.V(logger.Debug).Infof("all peers wanted, PO%03d bucket full", index)
|
||||||
|
return fmt.Errorf("bucket full")
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("node %v replaced by %v (idle for %v > %v)", replaced, node, idle, self.MaxIdleInterval)
|
||||||
|
replaced.Drop()
|
||||||
|
// actually replace in the row. When off(node) is called, the peer is no longer in the row
|
||||||
|
bucket[pos] = node
|
||||||
|
// there is no change in bucket cardinalities so no prox limit adjustment is needed
|
||||||
|
record.node = node
|
||||||
|
self.count++
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Off is the called when a node is taken offline (from the protocol main loop exit)
|
||||||
|
func (self *Kademlia) Off(node Node, cb func(*NodeRecord, Node)) (err error) {
|
||||||
|
self.lock.Lock()
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
|
||||||
|
index := self.proximityBin(node.Addr())
|
||||||
|
bucket := self.buckets[index]
|
||||||
|
for i := 0; i < len(bucket); i++ {
|
||||||
|
if node.Addr() == bucket[i].Addr() {
|
||||||
|
self.buckets[index] = append(bucket[:i], bucket[(i+1):]...)
|
||||||
|
self.setProxLimit(index, false)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
record := self.db.index[node.Addr()]
|
||||||
|
// callback on remove
|
||||||
|
if cb != nil {
|
||||||
|
cb(record, record.node)
|
||||||
|
}
|
||||||
|
record.node = nil
|
||||||
|
self.count--
|
||||||
|
glog.V(logger.Debug).Infof("remove node %v from table, population now is %v", node, self.count)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// proxLimit is dynamically adjusted so that
|
||||||
|
// 1) there is no empty buckets in bin < proxLimit and
|
||||||
|
// 2) the sum of all items are the minimum possible but higher than ProxBinSize
|
||||||
|
// adjust Prox (proxLimit and proxSize after an insertion/removal of nodes)
|
||||||
|
// caller holds the lock
|
||||||
|
func (self *Kademlia) setProxLimit(r int, on bool) {
|
||||||
|
// if the change is outside the core (PO lower)
|
||||||
|
// and the change does not leave a bucket empty then
|
||||||
|
// no adjustment needed
|
||||||
|
if r < self.proxLimit && len(self.buckets[r]) > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// if on=a node was added, then r must be within prox limit so increment cardinality
|
||||||
|
if on {
|
||||||
|
self.proxSize++
|
||||||
|
curr := len(self.buckets[self.proxLimit])
|
||||||
|
// if now core is big enough without the furthest bucket, then contract
|
||||||
|
// this can result in more than one bucket change
|
||||||
|
for self.proxSize >= self.ProxBinSize+curr && curr > 0 {
|
||||||
|
self.proxSize -= curr
|
||||||
|
self.proxLimit++
|
||||||
|
curr = len(self.buckets[self.proxLimit])
|
||||||
|
|
||||||
|
glog.V(logger.Detail).Infof("proxbin contraction (size: %v, limit: %v, bin: %v)", self.proxSize, self.proxLimit, r)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// otherwise
|
||||||
|
if r >= self.proxLimit {
|
||||||
|
self.proxSize--
|
||||||
|
}
|
||||||
|
// expand core by lowering prox limit until hit zero or cover the empty bucket or reached target cardinality
|
||||||
|
for (self.proxSize < self.ProxBinSize || r < self.proxLimit) &&
|
||||||
|
self.proxLimit > 0 {
|
||||||
|
//
|
||||||
|
self.proxLimit--
|
||||||
|
self.proxSize += len(self.buckets[self.proxLimit])
|
||||||
|
glog.V(logger.Detail).Infof("proxbin expansion (size: %v, limit: %v, bin: %v)", self.proxSize, self.proxLimit, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
returns the list of nodes belonging to the same proximity bin
|
||||||
|
as the target. The most proximate bin will be the union of the bins between
|
||||||
|
proxLimit and MaxProx.
|
||||||
|
*/
|
||||||
|
func (self *Kademlia) FindClosest(target Address, max int) []Node {
|
||||||
|
self.lock.Lock()
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
|
||||||
|
r := nodesByDistance{
|
||||||
|
target: target,
|
||||||
|
}
|
||||||
|
|
||||||
|
po := self.proximityBin(target)
|
||||||
|
index := po
|
||||||
|
step := 1
|
||||||
|
glog.V(logger.Detail).Infof("serving %v nodes at %v (PO%02d)", max, index, po)
|
||||||
|
|
||||||
|
// if max is set to 0, just want a full bucket, dynamic number
|
||||||
|
min := max
|
||||||
|
// set limit to max
|
||||||
|
limit := max
|
||||||
|
if max == 0 {
|
||||||
|
min = 1
|
||||||
|
limit = maxPeers
|
||||||
|
}
|
||||||
|
|
||||||
|
var n int
|
||||||
|
for index >= 0 {
|
||||||
|
// add entire bucket
|
||||||
|
for _, p := range self.buckets[index] {
|
||||||
|
r.push(p, limit)
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
// terminate if index reached the bottom or enough peers > min
|
||||||
|
glog.V(logger.Detail).Infof("add %v -> %v (PO%02d, PO%03d)", len(self.buckets[index]), n, index, po)
|
||||||
|
if n >= min && (step < 0 || max == 0) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// reach top most non-empty PO bucket, turn around
|
||||||
|
if index == self.MaxProx {
|
||||||
|
index = po
|
||||||
|
step = -1
|
||||||
|
}
|
||||||
|
index += step
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("serve %d (<=%d) nodes for target lookup %v (PO%03d)", n, max, target, po)
|
||||||
|
return r.nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Kademlia) Suggest() (*NodeRecord, bool, int) {
|
||||||
|
defer self.lock.RUnlock()
|
||||||
|
self.lock.RLock()
|
||||||
|
return self.db.findBest(self.BucketSize, func(i int) int { return len(self.buckets[i]) })
|
||||||
|
}
|
||||||
|
|
||||||
|
// adds node records to kaddb (persisted node record db)
|
||||||
|
func (self *Kademlia) Add(nrs []*NodeRecord) {
|
||||||
|
self.db.add(nrs, self.proximityBin)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nodesByDistance is a list of nodes, ordered by distance to target.
|
||||||
|
type nodesByDistance struct {
|
||||||
|
nodes []Node
|
||||||
|
target Address
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortedByDistanceTo(target Address, slice []Node) bool {
|
||||||
|
var last Address
|
||||||
|
for i, node := range slice {
|
||||||
|
if i > 0 {
|
||||||
|
if target.ProxCmp(node.Addr(), last) < 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
last = node.Addr()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// push(node, max) adds the given node to the list, keeping the total size
|
||||||
|
// below max elements.
|
||||||
|
func (h *nodesByDistance) push(node Node, max int) {
|
||||||
|
// returns the firt index ix such that func(i) returns true
|
||||||
|
ix := sort.Search(len(h.nodes), func(i int) bool {
|
||||||
|
return h.target.ProxCmp(h.nodes[i].Addr(), node.Addr()) >= 0
|
||||||
|
})
|
||||||
|
|
||||||
|
if len(h.nodes) < max {
|
||||||
|
h.nodes = append(h.nodes, node)
|
||||||
|
}
|
||||||
|
if ix < len(h.nodes) {
|
||||||
|
copy(h.nodes[ix+1:], h.nodes[ix:])
|
||||||
|
h.nodes[ix] = node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Taking the proximity order relative to a fix point x classifies the points in
|
||||||
|
the space (n byte long byte sequences) into bins. Items in each are at
|
||||||
|
most half as distant from x as items in the previous bin. Given a sample of
|
||||||
|
uniformly distributed items (a hash function over arbitrary sequence) the
|
||||||
|
proximity scale maps onto series of subsets with cardinalities on a negative
|
||||||
|
exponential scale.
|
||||||
|
|
||||||
|
It also has the property that any two item belonging to the same bin are at
|
||||||
|
most half as distant from each other as they are from x.
|
||||||
|
|
||||||
|
If we think of random sample of items in the bins as connections in a network of interconnected nodes than relative proximity can serve as the basis for local
|
||||||
|
decisions for graph traversal where the task is to find a route between two
|
||||||
|
points. Since in every hop, the finite distance halves, there is
|
||||||
|
a guaranteed constant maximum limit on the number of hops needed to reach one
|
||||||
|
node from the other.
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (self *Kademlia) proximityBin(other Address) (ret int) {
|
||||||
|
ret = proximity(self.addr, other)
|
||||||
|
if ret > self.MaxProx {
|
||||||
|
ret = self.MaxProx
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// provides keyrange for chunk db iteration
|
||||||
|
func (self *Kademlia) KeyRange(other Address) (start, stop Address) {
|
||||||
|
defer self.lock.RUnlock()
|
||||||
|
self.lock.RLock()
|
||||||
|
return KeyRange(self.addr, other, self.proxLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// save persists kaddb on disk (written to file on path in json format.
|
||||||
|
func (self *Kademlia) Save(path string, cb func(*NodeRecord, Node)) error {
|
||||||
|
return self.db.save(path, cb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load(path) loads the node record database (kaddb) from file on path.
|
||||||
|
func (self *Kademlia) Load(path string, cb func(*NodeRecord, Node) error) (err error) {
|
||||||
|
return self.db.load(path, cb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// kademlia table + kaddb table displayed with ascii
|
||||||
|
func (self *Kademlia) String() string {
|
||||||
|
defer self.lock.RUnlock()
|
||||||
|
self.lock.RLock()
|
||||||
|
defer self.db.lock.RUnlock()
|
||||||
|
self.db.lock.RLock()
|
||||||
|
|
||||||
|
var rows []string
|
||||||
|
rows = append(rows, "=========================================================================")
|
||||||
|
rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %v", time.Now().UTC().Format(time.UnixDate), self.addr.String()[:6]))
|
||||||
|
rows = append(rows, fmt.Sprintf("population: %d (%d), proxLimit: %d, proxSize: %d", self.count, len(self.db.index), self.proxLimit, self.proxSize))
|
||||||
|
rows = append(rows, fmt.Sprintf("MaxProx: %d, ProxBinSize: %d, BucketSize: %d", self.MaxProx, self.ProxBinSize, self.BucketSize))
|
||||||
|
|
||||||
|
for i, bucket := range self.buckets {
|
||||||
|
|
||||||
|
if i == self.proxLimit {
|
||||||
|
rows = append(rows, fmt.Sprintf("============ PROX LIMIT: %d ==========================================", i))
|
||||||
|
}
|
||||||
|
row := []string{fmt.Sprintf("%03d", i), fmt.Sprintf("%2d", len(bucket))}
|
||||||
|
var k int
|
||||||
|
c := self.db.cursors[i]
|
||||||
|
for ; k < len(bucket); k++ {
|
||||||
|
p := bucket[(c+k)%len(bucket)]
|
||||||
|
row = append(row, p.Addr().String()[:6])
|
||||||
|
if k == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for ; k < 4; k++ {
|
||||||
|
row = append(row, " ")
|
||||||
|
}
|
||||||
|
row = append(row, fmt.Sprintf("| %2d %2d", len(self.db.Nodes[i]), self.db.cursors[i]))
|
||||||
|
|
||||||
|
for j, p := range self.db.Nodes[i] {
|
||||||
|
row = append(row, p.Addr.String()[:6])
|
||||||
|
if j == 3 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rows = append(rows, strings.Join(row, " "))
|
||||||
|
if i == self.MaxProx {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rows = append(rows, "=========================================================================")
|
||||||
|
return strings.Join(rows, "\n")
|
||||||
|
}
|
392
swarm/network/kademlia/kademlia_test.go
Normal file
392
swarm/network/kademlia/kademlia_test.go
Normal file
@ -0,0 +1,392 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package kademlia
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"testing/quick"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
quickrand = rand.New(rand.NewSource(time.Now().Unix()))
|
||||||
|
quickcfgFindClosest = &quick.Config{MaxCount: 50, Rand: quickrand}
|
||||||
|
quickcfgBootStrap = &quick.Config{MaxCount: 100, Rand: quickrand}
|
||||||
|
)
|
||||||
|
|
||||||
|
type testNode struct {
|
||||||
|
addr Address
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *testNode) String() string {
|
||||||
|
return fmt.Sprintf("%x", n.addr[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *testNode) Addr() Address {
|
||||||
|
return n.addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *testNode) Drop() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *testNode) Url() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *testNode) LastActive() time.Time {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOn(t *testing.T) {
|
||||||
|
addr, ok := gen(Address{}, quickrand).(Address)
|
||||||
|
other, ok := gen(Address{}, quickrand).(Address)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("oops")
|
||||||
|
}
|
||||||
|
kad := New(addr, NewKadParams())
|
||||||
|
err := kad.On(&testNode{addr: other}, nil)
|
||||||
|
_ = err
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBootstrap(t *testing.T) {
|
||||||
|
|
||||||
|
test := func(test *bootstrapTest) bool {
|
||||||
|
// for any node kad.le, Target and N
|
||||||
|
params := NewKadParams()
|
||||||
|
params.MaxProx = test.MaxProx
|
||||||
|
params.BucketSize = test.BucketSize
|
||||||
|
params.ProxBinSize = test.BucketSize
|
||||||
|
kad := New(test.Self, params)
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for p := 0; p < 9; p++ {
|
||||||
|
var nrs []*NodeRecord
|
||||||
|
n := math.Pow(float64(2), float64(7-p))
|
||||||
|
for i := 0; i < int(n); i++ {
|
||||||
|
addr := RandomAddressAt(test.Self, p)
|
||||||
|
nrs = append(nrs, &NodeRecord{
|
||||||
|
Addr: addr,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
kad.Add(nrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
node := &testNode{test.Self}
|
||||||
|
|
||||||
|
n := 0
|
||||||
|
for n < 100 {
|
||||||
|
err = kad.On(node, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("backend not accepting node: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
record, need, _ := kad.Suggest()
|
||||||
|
if !need {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
n++
|
||||||
|
if record == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
node = &testNode{record.Addr}
|
||||||
|
}
|
||||||
|
exp := test.BucketSize * (test.MaxProx + 1)
|
||||||
|
if kad.Count() != exp {
|
||||||
|
t.Errorf("incorrect number of peers, expected %d, got %d\n%v", exp, kad.Count(), kad)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err := quick.Check(test, quickcfgBootStrap); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindClosest(t *testing.T) {
|
||||||
|
|
||||||
|
test := func(test *FindClosestTest) bool {
|
||||||
|
// for any node kad.le, Target and N
|
||||||
|
params := NewKadParams()
|
||||||
|
params.MaxProx = 7
|
||||||
|
kad := New(test.Self, params)
|
||||||
|
var err error
|
||||||
|
for _, node := range test.All {
|
||||||
|
err = kad.On(node, nil)
|
||||||
|
if err != nil && err.Error() != "bucket full" {
|
||||||
|
t.Fatalf("backend not accepting node: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(test.All) == 0 || test.N == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
nodes := kad.FindClosest(test.Target, test.N)
|
||||||
|
|
||||||
|
// check that the number of results is min(N, kad.len)
|
||||||
|
wantN := test.N
|
||||||
|
if tlen := kad.Count(); tlen < test.N {
|
||||||
|
wantN = tlen
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(nodes) != wantN {
|
||||||
|
t.Errorf("wrong number of nodes: got %d, want %d", len(nodes), wantN)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasDuplicates(nodes) {
|
||||||
|
t.Errorf("result contains duplicates")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !sortedByDistanceTo(test.Target, nodes) {
|
||||||
|
t.Errorf("result is not sorted by distance to target")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that the result nodes have minimum distance to target.
|
||||||
|
farthestResult := nodes[len(nodes)-1].Addr()
|
||||||
|
for i, b := range kad.buckets {
|
||||||
|
for j, n := range b {
|
||||||
|
if contains(nodes, n.Addr()) {
|
||||||
|
continue // don't run the check below for nodes in result
|
||||||
|
}
|
||||||
|
if test.Target.ProxCmp(n.Addr(), farthestResult) < 0 {
|
||||||
|
_ = i * j
|
||||||
|
t.Errorf("kad.le contains node that is closer to target but it's not in result")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err := quick.Check(test, quickcfgFindClosest); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type proxTest struct {
|
||||||
|
add bool
|
||||||
|
index int
|
||||||
|
addr Address
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
addresses []Address
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProxAdjust(t *testing.T) {
|
||||||
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
self := gen(Address{}, r).(Address)
|
||||||
|
params := NewKadParams()
|
||||||
|
params.MaxProx = 7
|
||||||
|
kad := New(self, params)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
a := gen(Address{}, r).(Address)
|
||||||
|
addresses = append(addresses, a)
|
||||||
|
err = kad.On(&testNode{addr: a}, nil)
|
||||||
|
if err != nil && err.Error() != "bucket full" {
|
||||||
|
t.Fatalf("backend not accepting node: %v", err)
|
||||||
|
}
|
||||||
|
if !kad.proxCheck(t) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
test := func(test *proxTest) bool {
|
||||||
|
node := &testNode{test.addr}
|
||||||
|
if test.add {
|
||||||
|
kad.On(node, nil)
|
||||||
|
} else {
|
||||||
|
kad.Off(node, nil)
|
||||||
|
}
|
||||||
|
return kad.proxCheck(t)
|
||||||
|
}
|
||||||
|
if err := quick.Check(test, quickcfgFindClosest); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSaveLoad(t *testing.T) {
|
||||||
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
addresses := gen([]Address{}, r).([]Address)
|
||||||
|
self := RandomAddress()
|
||||||
|
params := NewKadParams()
|
||||||
|
params.MaxProx = 7
|
||||||
|
kad := New(self, params)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for _, a := range addresses {
|
||||||
|
err = kad.On(&testNode{addr: a}, nil)
|
||||||
|
if err != nil && err.Error() != "bucket full" {
|
||||||
|
t.Fatalf("backend not accepting node: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nodes := kad.FindClosest(self, 100)
|
||||||
|
|
||||||
|
path := filepath.Join(os.TempDir(), "bzz-kad-test-save-load.peers")
|
||||||
|
err = kad.Save(path, nil)
|
||||||
|
if err != nil && err.Error() != "bucket full" {
|
||||||
|
t.Fatalf("unepected error saving kaddb: %v", err)
|
||||||
|
}
|
||||||
|
kad = New(self, params)
|
||||||
|
err = kad.Load(path, nil)
|
||||||
|
if err != nil && err.Error() != "bucket full" {
|
||||||
|
t.Fatalf("unepected error loading kaddb: %v", err)
|
||||||
|
}
|
||||||
|
for _, b := range kad.db.Nodes {
|
||||||
|
for _, node := range b {
|
||||||
|
err = kad.On(&testNode{node.Addr}, nil)
|
||||||
|
if err != nil && err.Error() != "bucket full" {
|
||||||
|
t.Fatalf("backend not accepting node: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
loadednodes := kad.FindClosest(self, 100)
|
||||||
|
for i, node := range loadednodes {
|
||||||
|
if nodes[i].Addr() != node.Addr() {
|
||||||
|
t.Errorf("node mismatch at %d/%d: %v != %v", i, len(nodes), nodes[i].Addr(), node.Addr())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Kademlia) proxCheck(t *testing.T) bool {
|
||||||
|
var sum int
|
||||||
|
for i, b := range self.buckets {
|
||||||
|
l := len(b)
|
||||||
|
// if we are in the high prox multibucket
|
||||||
|
if i >= self.proxLimit {
|
||||||
|
sum += l
|
||||||
|
} else if l == 0 {
|
||||||
|
t.Errorf("bucket %d empty, yet proxLimit is %d\n%v", len(b), self.proxLimit, self)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// check if merged high prox bucket does not exceed size
|
||||||
|
if sum > 0 {
|
||||||
|
if sum != self.proxSize {
|
||||||
|
t.Errorf("proxSize incorrect, expected %v, got %v", sum, self.proxSize)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
last := len(self.buckets[self.proxLimit])
|
||||||
|
if last > 0 && sum >= self.ProxBinSize+last {
|
||||||
|
t.Errorf("proxLimit %v incorrect, redundant non-empty bucket %d added to proxBin with %v (target %v)\n%v", self.proxLimit, last, sum-last, self.ProxBinSize, self)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if self.proxLimit > 0 && sum < self.ProxBinSize {
|
||||||
|
t.Errorf("proxLimit %v incorrect. proxSize %v is less than target %v, yet there is more peers", self.proxLimit, sum, self.ProxBinSize)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type bootstrapTest struct {
|
||||||
|
MaxProx int
|
||||||
|
BucketSize int
|
||||||
|
Self Address
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*bootstrapTest) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||||
|
t := &bootstrapTest{
|
||||||
|
Self: gen(Address{}, rand).(Address),
|
||||||
|
MaxProx: 5 + rand.Intn(2),
|
||||||
|
BucketSize: rand.Intn(3) + 1,
|
||||||
|
}
|
||||||
|
return reflect.ValueOf(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
type FindClosestTest struct {
|
||||||
|
Self Address
|
||||||
|
Target Address
|
||||||
|
All []Node
|
||||||
|
N int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c FindClosestTest) String() string {
|
||||||
|
return fmt.Sprintf("A: %064x\nT: %064x\n(%d)\n", c.Self[:], c.Target[:], c.N)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*FindClosestTest) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||||
|
t := &FindClosestTest{
|
||||||
|
Self: gen(Address{}, rand).(Address),
|
||||||
|
Target: gen(Address{}, rand).(Address),
|
||||||
|
N: rand.Intn(bucketSize),
|
||||||
|
}
|
||||||
|
for _, a := range gen([]Address{}, rand).([]Address) {
|
||||||
|
t.All = append(t.All, &testNode{addr: a})
|
||||||
|
}
|
||||||
|
return reflect.ValueOf(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*proxTest) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||||
|
var add bool
|
||||||
|
if rand.Intn(1) == 0 {
|
||||||
|
add = true
|
||||||
|
}
|
||||||
|
var t *proxTest
|
||||||
|
if add {
|
||||||
|
t = &proxTest{
|
||||||
|
addr: gen(Address{}, rand).(Address),
|
||||||
|
add: add,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t = &proxTest{
|
||||||
|
index: rand.Intn(len(addresses)),
|
||||||
|
add: add,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return reflect.ValueOf(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasDuplicates(slice []Node) bool {
|
||||||
|
seen := make(map[Address]bool)
|
||||||
|
for _, node := range slice {
|
||||||
|
if seen[node.Addr()] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
seen[node.Addr()] = true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func contains(nodes []Node, addr Address) bool {
|
||||||
|
for _, n := range nodes {
|
||||||
|
if n.Addr() == addr {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// gen wraps quick.Value so it's easier to use.
|
||||||
|
// it generates a random value of the given value's type.
|
||||||
|
func gen(typ interface{}, rand *rand.Rand) interface{} {
|
||||||
|
v, ok := quick.Value(reflect.TypeOf(typ), rand)
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("couldn't generate random value of type %T", typ))
|
||||||
|
}
|
||||||
|
return v.Interface()
|
||||||
|
}
|
317
swarm/network/messages.go
Normal file
317
swarm/network/messages.go
Normal file
@ -0,0 +1,317 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/contracts/chequebook"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/network/kademlia"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/services/swap"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
BZZ protocol Message Types and Message Data Types
|
||||||
|
*/
|
||||||
|
|
||||||
|
// bzz protocol message codes
|
||||||
|
const (
|
||||||
|
statusMsg = iota // 0x01
|
||||||
|
storeRequestMsg // 0x02
|
||||||
|
retrieveRequestMsg // 0x03
|
||||||
|
peersMsg // 0x04
|
||||||
|
syncRequestMsg // 0x05
|
||||||
|
deliveryRequestMsg // 0x06
|
||||||
|
unsyncedKeysMsg // 0x07
|
||||||
|
paymentMsg // 0x08
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Handshake
|
||||||
|
|
||||||
|
* Version: 8 byte integer version of the protocol
|
||||||
|
* ID: arbitrary byte sequence client identifier human readable
|
||||||
|
* Addr: the address advertised by the node, format similar to DEVp2p wire protocol
|
||||||
|
* Swap: info for the swarm accounting protocol
|
||||||
|
* NetworkID: 8 byte integer network identifier
|
||||||
|
* Caps: swarm-specific capabilities, format identical to devp2p
|
||||||
|
* SyncState: syncronisation state (db iterator key and address space etc) persisted about the peer
|
||||||
|
|
||||||
|
*/
|
||||||
|
type statusMsgData struct {
|
||||||
|
Version uint64
|
||||||
|
ID string
|
||||||
|
Addr *peerAddr
|
||||||
|
Swap *swap.SwapProfile
|
||||||
|
NetworkId uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *statusMsgData) String() string {
|
||||||
|
return fmt.Sprintf("Status: Version: %v, ID: %v, Addr: %v, Swap: %v, NetworkId: %v", self.Version, self.ID, self.Addr, self.Swap, self.NetworkId)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
store requests are forwarded to the peers in their kademlia proximity bin
|
||||||
|
if they are distant
|
||||||
|
if they are within our storage radius or have any incentive to store it
|
||||||
|
then attach your nodeID to the metadata
|
||||||
|
if the storage request is sufficiently close (within our proxLimit, i. e., the
|
||||||
|
last row of the routing table)
|
||||||
|
*/
|
||||||
|
type storeRequestMsgData struct {
|
||||||
|
Key storage.Key // hash of datasize | data
|
||||||
|
SData []byte // the actual chunk Data
|
||||||
|
// optional
|
||||||
|
Id uint64 // request ID. if delivery, the ID is retrieve request ID
|
||||||
|
requestTimeout *time.Time // expiry for forwarding - [not serialised][not currently used]
|
||||||
|
storageTimeout *time.Time // expiry of content - [not serialised][not currently used]
|
||||||
|
from *peer // [not serialised] protocol registers the requester
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self storeRequestMsgData) String() string {
|
||||||
|
var from string
|
||||||
|
if self.from == nil {
|
||||||
|
from = "self"
|
||||||
|
} else {
|
||||||
|
from = self.from.Addr().String()
|
||||||
|
}
|
||||||
|
end := len(self.SData)
|
||||||
|
if len(self.SData) > 10 {
|
||||||
|
end = 10
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("from: %v, Key: %v; ID: %v, requestTimeout: %v, storageTimeout: %v, SData %x", from, self.Key, self.Id, self.requestTimeout, self.storageTimeout, self.SData[:end])
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Retrieve request
|
||||||
|
|
||||||
|
Timeout in milliseconds. Note that zero timeout retrieval requests do not request forwarding, but prompt for a peers message response. therefore they serve also
|
||||||
|
as messages to retrieve peers.
|
||||||
|
|
||||||
|
MaxSize specifies the maximum size that the peer will accept. This is useful in
|
||||||
|
particular if we allow storage and delivery of multichunk payload representing
|
||||||
|
the entire or partial subtree unfolding from the requested root key.
|
||||||
|
So when only interested in limited part of a stream (infinite trees) or only
|
||||||
|
testing chunk availability etc etc, we can indicate it by limiting the size here.
|
||||||
|
|
||||||
|
Request ID can be newly generated or kept from the request originator.
|
||||||
|
If request ID Is missing or zero, the request is handled as a lookup only
|
||||||
|
prompting a peers response but not launching a search. Lookup requests are meant
|
||||||
|
to be used to bootstrap kademlia tables.
|
||||||
|
|
||||||
|
In the special case that the key is the zero value as well, the remote peer's
|
||||||
|
address is assumed (the message is to be handled as a self lookup request).
|
||||||
|
The response is a PeersMsg with the peers in the kademlia proximity bin
|
||||||
|
corresponding to the address.
|
||||||
|
*/
|
||||||
|
|
||||||
|
type retrieveRequestMsgData struct {
|
||||||
|
Key storage.Key // target Key address of chunk to be retrieved
|
||||||
|
Id uint64 // request id, request is a lookup if missing or zero
|
||||||
|
MaxSize uint64 // maximum size of delivery accepted
|
||||||
|
MaxPeers uint64 // maximum number of peers returned
|
||||||
|
Timeout uint64 // the longest time we are expecting a response
|
||||||
|
timeout *time.Time // [not serialied]
|
||||||
|
from *peer //
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self retrieveRequestMsgData) String() string {
|
||||||
|
var from string
|
||||||
|
if self.from == nil {
|
||||||
|
from = "ourselves"
|
||||||
|
} else {
|
||||||
|
from = self.from.Addr().String()
|
||||||
|
}
|
||||||
|
var target []byte
|
||||||
|
if len(self.Key) > 3 {
|
||||||
|
target = self.Key[:4]
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("from: %v, Key: %x; ID: %v, MaxSize: %v, MaxPeers: %d", from, target, self.Id, self.MaxSize, self.MaxPeers)
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookups are encoded by missing request ID
|
||||||
|
func (self retrieveRequestMsgData) isLookup() bool {
|
||||||
|
return self.Id == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// sets timeout fields
|
||||||
|
func (self retrieveRequestMsgData) setTimeout(t *time.Time) {
|
||||||
|
self.timeout = t
|
||||||
|
if t != nil {
|
||||||
|
self.Timeout = uint64(t.UnixNano())
|
||||||
|
} else {
|
||||||
|
self.Timeout = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self retrieveRequestMsgData) getTimeout() (t *time.Time) {
|
||||||
|
if self.Timeout > 0 && self.timeout == nil {
|
||||||
|
timeout := time.Unix(int64(self.Timeout), 0)
|
||||||
|
t = &timeout
|
||||||
|
self.timeout = t
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// peerAddr is sent in StatusMsg as part of the handshake
|
||||||
|
type peerAddr struct {
|
||||||
|
IP net.IP
|
||||||
|
Port uint16
|
||||||
|
ID []byte // the 64 byte NodeID (ECDSA Public Key)
|
||||||
|
Addr kademlia.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
// peerAddr pretty prints as enode
|
||||||
|
func (self peerAddr) String() string {
|
||||||
|
var nodeid discover.NodeID
|
||||||
|
copy(nodeid[:], self.ID)
|
||||||
|
return discover.NewNode(nodeid, self.IP, 0, self.Port).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
peers Msg is one response to retrieval; it is always encouraged after a retrieval
|
||||||
|
request to respond with a list of peers in the same kademlia proximity bin.
|
||||||
|
The encoding of a peer is identical to that in the devp2p base protocol peers
|
||||||
|
messages: [IP, Port, NodeID]
|
||||||
|
note that a node's DPA address is not the NodeID but the hash of the NodeID.
|
||||||
|
|
||||||
|
Timeout serves to indicate whether the responder is forwarding the query within
|
||||||
|
the timeout or not.
|
||||||
|
|
||||||
|
NodeID serves as the owner of payment contracts and signer of proofs of transfer.
|
||||||
|
|
||||||
|
The Key is the target (if response to a retrieval request) or missing (zero value)
|
||||||
|
peers address (hash of NodeID) if retrieval request was a self lookup.
|
||||||
|
|
||||||
|
Peers message is requested by retrieval requests with a missing or zero value request ID
|
||||||
|
*/
|
||||||
|
type peersMsgData struct {
|
||||||
|
Peers []*peerAddr //
|
||||||
|
Timeout uint64 //
|
||||||
|
timeout *time.Time // indicate whether responder is expected to deliver content
|
||||||
|
Key storage.Key // present if a response to a retrieval request
|
||||||
|
Id uint64 // present if a response to a retrieval request
|
||||||
|
from *peer
|
||||||
|
}
|
||||||
|
|
||||||
|
// peers msg pretty printer
|
||||||
|
func (self peersMsgData) String() string {
|
||||||
|
var from string
|
||||||
|
if self.from == nil {
|
||||||
|
from = "ourselves"
|
||||||
|
} else {
|
||||||
|
from = self.from.Addr().String()
|
||||||
|
}
|
||||||
|
var target []byte
|
||||||
|
if len(self.Key) > 3 {
|
||||||
|
target = self.Key[:4]
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("from: %v, Key: %x; ID: %v, Peers: %v", from, target, self.Id, self.Peers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self peersMsgData) setTimeout(t *time.Time) {
|
||||||
|
self.timeout = t
|
||||||
|
if t != nil {
|
||||||
|
self.Timeout = uint64(t.UnixNano())
|
||||||
|
} else {
|
||||||
|
self.Timeout = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self peersMsgData) getTimeout() (t *time.Time) {
|
||||||
|
if self.Timeout > 0 && self.timeout == nil {
|
||||||
|
timeout := time.Unix(int64(self.Timeout), 0)
|
||||||
|
t = &timeout
|
||||||
|
self.timeout = t
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
syncRequest
|
||||||
|
|
||||||
|
is sent after the handshake to initiate syncing
|
||||||
|
the syncState of the remote node is persisted in kaddb and set on the
|
||||||
|
peer/protocol instance when the node is registered by hive as online{
|
||||||
|
*/
|
||||||
|
|
||||||
|
type syncRequestMsgData struct {
|
||||||
|
SyncState *syncState `rlp:"nil"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *syncRequestMsgData) String() string {
|
||||||
|
return fmt.Sprintf("%v", self.SyncState)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
deliveryRequest
|
||||||
|
|
||||||
|
is sent once a batch of sync keys is filtered. The ones not found are
|
||||||
|
sent as a list of syncReuest (hash, priority) in the Deliver field.
|
||||||
|
When the source receives the sync request it continues to iterate
|
||||||
|
and fetch at most N items as yet unsynced.
|
||||||
|
At the same time responds with deliveries of the items.
|
||||||
|
*/
|
||||||
|
type deliveryRequestMsgData struct {
|
||||||
|
Deliver []*syncRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *deliveryRequestMsgData) String() string {
|
||||||
|
return fmt.Sprintf("sync request for new chunks\ndelivery request for %v chunks", len(self.Deliver))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
unsyncedKeys
|
||||||
|
|
||||||
|
is sent first after the handshake if SyncState iterator brings up hundreds, thousands?
|
||||||
|
and subsequently sent as a response to deliveryRequestMsgData.
|
||||||
|
|
||||||
|
Syncing is the iterative process of exchanging unsyncedKeys and deliveryRequestMsgs
|
||||||
|
both ways.
|
||||||
|
|
||||||
|
State contains the sync state sent by the source. When the source receives the
|
||||||
|
sync state it continues to iterate and fetch at most N items as yet unsynced.
|
||||||
|
At the same time responds with deliveries of the items.
|
||||||
|
*/
|
||||||
|
type unsyncedKeysMsgData struct {
|
||||||
|
Unsynced []*syncRequest
|
||||||
|
State *syncState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *unsyncedKeysMsgData) String() string {
|
||||||
|
return fmt.Sprintf("sync: keys of %d new chunks (state %v) => synced: %v", len(self.Unsynced), self.State, self.State.Synced)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
payment
|
||||||
|
|
||||||
|
is sent when the swap balance is tilted in favour of the remote peer
|
||||||
|
and in absolute units exceeds the PayAt parameter in the remote peer's profile
|
||||||
|
*/
|
||||||
|
|
||||||
|
type paymentMsgData struct {
|
||||||
|
Units uint // units actually paid for (checked against amount by swap)
|
||||||
|
Promise *chequebook.Cheque // payment with cheque
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *paymentMsgData) String() string {
|
||||||
|
return fmt.Sprintf("payment for %d units: %v", self.Units, self.Promise)
|
||||||
|
}
|
554
swarm/network/protocol.go
Normal file
554
swarm/network/protocol.go
Normal file
@ -0,0 +1,554 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network
|
||||||
|
|
||||||
|
/*
|
||||||
|
bzz implements the swarm wire protocol [bzz] (sister of eth and shh)
|
||||||
|
the protocol instance is launched on each peer by the network layer if the
|
||||||
|
bzz protocol handler is registered on the p2p server.
|
||||||
|
|
||||||
|
The bzz protocol component speaks the bzz protocol
|
||||||
|
* handle the protocol handshake
|
||||||
|
* register peers in the KΛÐΞMLIΛ table via the hive logistic manager
|
||||||
|
* dispatch to hive for handling the DHT logic
|
||||||
|
* encode and decode requests for storage and retrieval
|
||||||
|
* handle sync protocol messages via the syncer
|
||||||
|
* talks the SWAP payment protocol (swap accounting is done within NetStore)
|
||||||
|
*/
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/contracts/chequebook"
|
||||||
|
"github.com/ethereum/go-ethereum/errs"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
|
bzzswap "github.com/ethereum/go-ethereum/swarm/services/swap"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/services/swap/swap"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
Version = 0
|
||||||
|
ProtocolLength = uint64(8)
|
||||||
|
ProtocolMaxMsgSize = 10 * 1024 * 1024
|
||||||
|
NetworkId = 322
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ErrMsgTooLarge = iota
|
||||||
|
ErrDecode
|
||||||
|
ErrInvalidMsgCode
|
||||||
|
ErrVersionMismatch
|
||||||
|
ErrNetworkIdMismatch
|
||||||
|
ErrNoStatusMsg
|
||||||
|
ErrExtraStatusMsg
|
||||||
|
ErrSwap
|
||||||
|
ErrSync
|
||||||
|
ErrUnwanted
|
||||||
|
)
|
||||||
|
|
||||||
|
var errorToString = map[int]string{
|
||||||
|
ErrMsgTooLarge: "Message too long",
|
||||||
|
ErrDecode: "Invalid message",
|
||||||
|
ErrInvalidMsgCode: "Invalid message code",
|
||||||
|
ErrVersionMismatch: "Protocol version mismatch",
|
||||||
|
ErrNetworkIdMismatch: "NetworkId mismatch",
|
||||||
|
ErrNoStatusMsg: "No status message",
|
||||||
|
ErrExtraStatusMsg: "Extra status message",
|
||||||
|
ErrSwap: "SWAP error",
|
||||||
|
ErrSync: "Sync error",
|
||||||
|
ErrUnwanted: "Unwanted peer",
|
||||||
|
}
|
||||||
|
|
||||||
|
// bzz represents the swarm wire protocol
|
||||||
|
// an instance is running on each peer
|
||||||
|
type bzz struct {
|
||||||
|
selfID discover.NodeID // peer's node id used in peer advertising in handshake
|
||||||
|
key storage.Key // baseaddress as storage.Key
|
||||||
|
storage StorageHandler // handler storage/retrieval related requests coming via the bzz wire protocol
|
||||||
|
hive *Hive // the logistic manager, peerPool, routing service and peer handler
|
||||||
|
dbAccess *DbAccess // access to db storage counter and iterator for syncing
|
||||||
|
requestDb *storage.LDBDatabase // db to persist backlog of deliveries to aid syncing
|
||||||
|
remoteAddr *peerAddr // remote peers address
|
||||||
|
peer *p2p.Peer // the p2p peer object
|
||||||
|
rw p2p.MsgReadWriter // messageReadWriter to send messages to
|
||||||
|
errors *errs.Errors // errors table
|
||||||
|
backend chequebook.Backend
|
||||||
|
lastActive time.Time
|
||||||
|
|
||||||
|
swap *swap.Swap // swap instance for the peer connection
|
||||||
|
swapParams *bzzswap.SwapParams // swap settings both local and remote
|
||||||
|
swapEnabled bool // flag to enable SWAP (will be set via Caps in handshake)
|
||||||
|
syncEnabled bool // flag to enable SYNC (will be set via Caps in handshake)
|
||||||
|
syncer *syncer // syncer instance for the peer connection
|
||||||
|
syncParams *SyncParams // syncer params
|
||||||
|
syncState *syncState // outgoing syncronisation state (contains reference to remote peers db counter)
|
||||||
|
}
|
||||||
|
|
||||||
|
// interface type for handler of storage/retrieval related requests coming
|
||||||
|
// via the bzz wire protocol
|
||||||
|
// messages: UnsyncedKeys, DeliveryRequest, StoreRequest, RetrieveRequest
|
||||||
|
type StorageHandler interface {
|
||||||
|
HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error
|
||||||
|
HandleDeliveryRequestMsg(req *deliveryRequestMsgData, p *peer) error
|
||||||
|
HandleStoreRequestMsg(req *storeRequestMsgData, p *peer)
|
||||||
|
HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
main entrypoint, wrappers starting a server that will run the bzz protocol
|
||||||
|
use this constructor to attach the protocol ("class") to server caps
|
||||||
|
This is done by node.Node#Register(func(node.ServiceContext) (Service, error))
|
||||||
|
Service implements Protocols() which is an array of protocol constructors
|
||||||
|
at node startup the protocols are initialised
|
||||||
|
the Dev p2p layer then calls Run(p *p2p.Peer, rw p2p.MsgReadWriter) error
|
||||||
|
on each peer connection
|
||||||
|
The Run function of the Bzz protocol class creates a bzz instance
|
||||||
|
which will represent the peer for the swarm hive and all peer-aware components
|
||||||
|
*/
|
||||||
|
func Bzz(cloud StorageHandler, backend chequebook.Backend, hive *Hive, dbaccess *DbAccess, sp *bzzswap.SwapParams, sy *SyncParams) (p2p.Protocol, error) {
|
||||||
|
|
||||||
|
// a single global request db is created for all peer connections
|
||||||
|
// this is to persist delivery backlog and aid syncronisation
|
||||||
|
requestDb, err := storage.NewLDBDatabase(sy.RequestDbPath)
|
||||||
|
if err != nil {
|
||||||
|
return p2p.Protocol{}, fmt.Errorf("error setting up request db: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p2p.Protocol{
|
||||||
|
Name: "bzz",
|
||||||
|
Version: Version,
|
||||||
|
Length: ProtocolLength,
|
||||||
|
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||||
|
return run(requestDb, cloud, backend, hive, dbaccess, sp, sy, p, rw)
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
the main protocol loop that
|
||||||
|
* does the handshake by exchanging statusMsg
|
||||||
|
* if peer is valid and accepted, registers with the hive
|
||||||
|
* then enters into a forever loop handling incoming messages
|
||||||
|
* storage and retrieval related queries coming via bzz are dispatched to StorageHandler
|
||||||
|
* peer-related messages are dispatched to the hive
|
||||||
|
* payment related messages are relayed to SWAP service
|
||||||
|
* on disconnect, unregister the peer in the hive (note RemovePeer in the post-disconnect hook)
|
||||||
|
* whenever the loop terminates, the peer will disconnect with Subprotocol error
|
||||||
|
* whenever handlers return an error the loop terminates
|
||||||
|
*/
|
||||||
|
func run(requestDb *storage.LDBDatabase, depo StorageHandler, backend chequebook.Backend, hive *Hive, dbaccess *DbAccess, sp *bzzswap.SwapParams, sy *SyncParams, p *p2p.Peer, rw p2p.MsgReadWriter) (err error) {
|
||||||
|
|
||||||
|
self := &bzz{
|
||||||
|
storage: depo,
|
||||||
|
backend: backend,
|
||||||
|
hive: hive,
|
||||||
|
dbAccess: dbaccess,
|
||||||
|
requestDb: requestDb,
|
||||||
|
peer: p,
|
||||||
|
rw: rw,
|
||||||
|
errors: &errs.Errors{
|
||||||
|
Package: "BZZ",
|
||||||
|
Errors: errorToString,
|
||||||
|
},
|
||||||
|
swapParams: sp,
|
||||||
|
syncParams: sy,
|
||||||
|
swapEnabled: hive.swapEnabled,
|
||||||
|
syncEnabled: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle handshake
|
||||||
|
err = self.handleStatus()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
// if the handler loop exits, the peer is disconnecting
|
||||||
|
// deregister the peer in the hive
|
||||||
|
self.hive.removePeer(&peer{bzz: self})
|
||||||
|
if self.syncer != nil {
|
||||||
|
self.syncer.stop() // quits request db and delivery loops, save requests
|
||||||
|
}
|
||||||
|
if self.swap != nil {
|
||||||
|
self.swap.Stop() // quits chequebox autocash etc
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// the main forever loop that handles incoming requests
|
||||||
|
for {
|
||||||
|
if self.hive.blockRead {
|
||||||
|
glog.V(logger.Warn).Infof("Cannot read network")
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = self.handle()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: may need to implement protocol drop only? don't want to kick off the peer
|
||||||
|
// if they are useful for other protocols
|
||||||
|
func (self *bzz) Drop() {
|
||||||
|
self.peer.Disconnect(p2p.DiscSubprotocolError)
|
||||||
|
}
|
||||||
|
|
||||||
|
// one cycle of the main forever loop that handles and dispatches incoming messages
|
||||||
|
func (self *bzz) handle() error {
|
||||||
|
msg, err := self.rw.ReadMsg()
|
||||||
|
glog.V(logger.Debug).Infof("<- %v", msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if msg.Size > ProtocolMaxMsgSize {
|
||||||
|
return self.protoError(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
|
||||||
|
}
|
||||||
|
// make sure that the payload has been fully consumed
|
||||||
|
defer msg.Discard()
|
||||||
|
|
||||||
|
switch msg.Code {
|
||||||
|
|
||||||
|
case statusMsg:
|
||||||
|
// no extra status message allowed. The one needed already handled by
|
||||||
|
// handleStatus
|
||||||
|
glog.V(logger.Debug).Infof("Status message: %v", msg)
|
||||||
|
return self.protoError(ErrExtraStatusMsg, "")
|
||||||
|
|
||||||
|
case storeRequestMsg:
|
||||||
|
// store requests are dispatched to netStore
|
||||||
|
var req storeRequestMsgData
|
||||||
|
if err := msg.Decode(&req); err != nil {
|
||||||
|
return self.protoError(ErrDecode, "<- %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
if len(req.SData) < 9 {
|
||||||
|
return self.protoError(ErrDecode, "<- %v: Data too short (%v)", msg)
|
||||||
|
}
|
||||||
|
// last Active time is set only when receiving chunks
|
||||||
|
self.lastActive = time.Now()
|
||||||
|
glog.V(logger.Detail).Infof("incoming store request: %s", req.String())
|
||||||
|
// swap accounting is done within forwarding
|
||||||
|
self.storage.HandleStoreRequestMsg(&req, &peer{bzz: self})
|
||||||
|
|
||||||
|
case retrieveRequestMsg:
|
||||||
|
// retrieve Requests are dispatched to netStore
|
||||||
|
var req retrieveRequestMsgData
|
||||||
|
if err := msg.Decode(&req); err != nil {
|
||||||
|
return self.protoError(ErrDecode, "<- %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
req.from = &peer{bzz: self}
|
||||||
|
// if request is lookup and not to be delivered
|
||||||
|
if req.isLookup() {
|
||||||
|
glog.V(logger.Detail).Infof("self lookup for %v: responding with peers only...", req.from)
|
||||||
|
} else if req.Key == nil {
|
||||||
|
return self.protoError(ErrDecode, "protocol handler: req.Key == nil || req.Timeout == nil")
|
||||||
|
} else {
|
||||||
|
// swap accounting is done within netStore
|
||||||
|
self.storage.HandleRetrieveRequestMsg(&req, &peer{bzz: self})
|
||||||
|
}
|
||||||
|
// direct response with peers, TODO: sort this out
|
||||||
|
self.hive.peers(&req)
|
||||||
|
|
||||||
|
case peersMsg:
|
||||||
|
// response to lookups and immediate response to retrieve requests
|
||||||
|
// dispatches new peer data to the hive that adds them to KADDB
|
||||||
|
var req peersMsgData
|
||||||
|
if err := msg.Decode(&req); err != nil {
|
||||||
|
return self.protoError(ErrDecode, "<- %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
req.from = &peer{bzz: self}
|
||||||
|
glog.V(logger.Detail).Infof("<- peer addresses: %v", req)
|
||||||
|
self.hive.HandlePeersMsg(&req, &peer{bzz: self})
|
||||||
|
|
||||||
|
case syncRequestMsg:
|
||||||
|
var req syncRequestMsgData
|
||||||
|
if err := msg.Decode(&req); err != nil {
|
||||||
|
return self.protoError(ErrDecode, "<- %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("<- sync request: %v", req)
|
||||||
|
self.lastActive = time.Now()
|
||||||
|
self.sync(req.SyncState)
|
||||||
|
|
||||||
|
case unsyncedKeysMsg:
|
||||||
|
// coming from parent node offering
|
||||||
|
var req unsyncedKeysMsgData
|
||||||
|
if err := msg.Decode(&req); err != nil {
|
||||||
|
return self.protoError(ErrDecode, "<- %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("<- unsynced keys : %s", req.String())
|
||||||
|
err := self.storage.HandleUnsyncedKeysMsg(&req, &peer{bzz: self})
|
||||||
|
self.lastActive = time.Now()
|
||||||
|
if err != nil {
|
||||||
|
return self.protoError(ErrDecode, "<- %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case deliveryRequestMsg:
|
||||||
|
// response to syncKeysMsg hashes filtered not existing in db
|
||||||
|
// also relays the last synced state to the source
|
||||||
|
var req deliveryRequestMsgData
|
||||||
|
if err := msg.Decode(&req); err != nil {
|
||||||
|
return self.protoError(ErrDecode, "<-msg %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("<- delivery request: %s", req.String())
|
||||||
|
err := self.storage.HandleDeliveryRequestMsg(&req, &peer{bzz: self})
|
||||||
|
self.lastActive = time.Now()
|
||||||
|
if err != nil {
|
||||||
|
return self.protoError(ErrDecode, "<- %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case paymentMsg:
|
||||||
|
// swap protocol message for payment, Units paid for, Cheque paid with
|
||||||
|
if self.swapEnabled {
|
||||||
|
var req paymentMsgData
|
||||||
|
if err := msg.Decode(&req); err != nil {
|
||||||
|
return self.protoError(ErrDecode, "<- %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("<- payment: %s", req.String())
|
||||||
|
self.swap.Receive(int(req.Units), req.Promise)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
// no other message is allowed
|
||||||
|
return self.protoError(ErrInvalidMsgCode, "%v", msg.Code)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *bzz) handleStatus() (err error) {
|
||||||
|
|
||||||
|
handshake := &statusMsgData{
|
||||||
|
Version: uint64(Version),
|
||||||
|
ID: "honey",
|
||||||
|
Addr: self.selfAddr(),
|
||||||
|
NetworkId: uint64(NetworkId),
|
||||||
|
Swap: &bzzswap.SwapProfile{
|
||||||
|
Profile: self.swapParams.Profile,
|
||||||
|
PayProfile: self.swapParams.PayProfile,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = p2p.Send(self.rw, statusMsg, handshake)
|
||||||
|
if err != nil {
|
||||||
|
self.protoError(ErrNoStatusMsg, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// read and handle remote status
|
||||||
|
var msg p2p.Msg
|
||||||
|
msg, err = self.rw.ReadMsg()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if msg.Code != statusMsg {
|
||||||
|
self.protoError(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, statusMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if msg.Size > ProtocolMaxMsgSize {
|
||||||
|
return self.protoError(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
var status statusMsgData
|
||||||
|
if err := msg.Decode(&status); err != nil {
|
||||||
|
return self.protoError(ErrDecode, " %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status.NetworkId != NetworkId {
|
||||||
|
return self.protoError(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, NetworkId)
|
||||||
|
}
|
||||||
|
|
||||||
|
if Version != status.Version {
|
||||||
|
return self.protoError(ErrVersionMismatch, "%d (!= %d)", status.Version, Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
self.remoteAddr = self.peerAddr(status.Addr)
|
||||||
|
glog.V(logger.Detail).Infof("self: advertised IP: %v, peer advertised: %v, local address: %v\npeer: advertised IP: %v, remote address: %v\n", self.selfAddr(), self.remoteAddr, self.peer.LocalAddr(), status.Addr.IP, self.peer.RemoteAddr())
|
||||||
|
|
||||||
|
if self.swapEnabled {
|
||||||
|
// set remote profile for accounting
|
||||||
|
self.swap, err = bzzswap.NewSwap(self.swapParams, status.Swap, self.backend, self)
|
||||||
|
if err != nil {
|
||||||
|
return self.protoError(ErrSwap, "%v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(logger.Info).Infof("Peer %08x is capable (%d/%d)", self.remoteAddr.Addr[:4], status.Version, status.NetworkId)
|
||||||
|
err = self.hive.addPeer(&peer{bzz: self})
|
||||||
|
if err != nil {
|
||||||
|
return self.protoError(ErrUnwanted, "%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hive sets syncstate so sync should start after node added
|
||||||
|
glog.V(logger.Info).Infof("syncronisation request sent with %v", self.syncState)
|
||||||
|
self.syncRequest()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *bzz) sync(state *syncState) error {
|
||||||
|
// syncer setup
|
||||||
|
if self.syncer != nil {
|
||||||
|
return self.protoError(ErrSync, "sync request can only be sent once")
|
||||||
|
}
|
||||||
|
|
||||||
|
cnt := self.dbAccess.counter()
|
||||||
|
remoteaddr := self.remoteAddr.Addr
|
||||||
|
start, stop := self.hive.kad.KeyRange(remoteaddr)
|
||||||
|
|
||||||
|
// an explicitly received nil syncstate disables syncronisation
|
||||||
|
if state == nil {
|
||||||
|
self.syncEnabled = false
|
||||||
|
glog.V(logger.Warn).Infof("syncronisation disabled for peer %v", self)
|
||||||
|
state = &syncState{DbSyncState: &storage.DbSyncState{}, Synced: true}
|
||||||
|
} else {
|
||||||
|
state.synced = make(chan bool)
|
||||||
|
state.SessionAt = cnt
|
||||||
|
if storage.IsZeroKey(state.Stop) && state.Synced {
|
||||||
|
state.Start = storage.Key(start[:])
|
||||||
|
state.Stop = storage.Key(stop[:])
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("syncronisation requested by peer %v at state %v", self, state)
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
self.syncer, err = newSyncer(
|
||||||
|
self.requestDb,
|
||||||
|
storage.Key(remoteaddr[:]),
|
||||||
|
self.dbAccess,
|
||||||
|
self.unsyncedKeys, self.store,
|
||||||
|
self.syncParams, state, func() bool { return self.syncEnabled },
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return self.protoError(ErrSync, "%v", err)
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("syncer set for peer %v", self)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *bzz) String() string {
|
||||||
|
return self.remoteAddr.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// repair reported address if IP missing
|
||||||
|
func (self *bzz) peerAddr(base *peerAddr) *peerAddr {
|
||||||
|
if base.IP.IsUnspecified() {
|
||||||
|
host, _, _ := net.SplitHostPort(self.peer.RemoteAddr().String())
|
||||||
|
base.IP = net.ParseIP(host)
|
||||||
|
}
|
||||||
|
return base
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns self advertised node connection info (listening address w enodes)
|
||||||
|
// IP will get repaired on the other end if missing
|
||||||
|
// or resolved via ID by discovery at dialout
|
||||||
|
func (self *bzz) selfAddr() *peerAddr {
|
||||||
|
id := self.hive.id
|
||||||
|
host, port, _ := net.SplitHostPort(self.hive.listenAddr())
|
||||||
|
intport, _ := strconv.Atoi(port)
|
||||||
|
addr := &peerAddr{
|
||||||
|
Addr: self.hive.addr,
|
||||||
|
ID: id[:],
|
||||||
|
IP: net.ParseIP(host),
|
||||||
|
Port: uint16(intport),
|
||||||
|
}
|
||||||
|
return addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// outgoing messages
|
||||||
|
// send retrieveRequestMsg
|
||||||
|
func (self *bzz) retrieve(req *retrieveRequestMsgData) error {
|
||||||
|
return self.send(retrieveRequestMsg, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// send storeRequestMsg
|
||||||
|
func (self *bzz) store(req *storeRequestMsgData) error {
|
||||||
|
return self.send(storeRequestMsg, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *bzz) syncRequest() error {
|
||||||
|
req := &syncRequestMsgData{}
|
||||||
|
if self.hive.syncEnabled {
|
||||||
|
glog.V(logger.Debug).Infof("syncronisation request to peer %v at state %v", self, self.syncState)
|
||||||
|
req.SyncState = self.syncState
|
||||||
|
}
|
||||||
|
if self.syncState == nil {
|
||||||
|
glog.V(logger.Warn).Infof("syncronisation disabled for peer %v at state %v", self, self.syncState)
|
||||||
|
}
|
||||||
|
return self.send(syncRequestMsg, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// queue storeRequestMsg in request db
|
||||||
|
func (self *bzz) deliveryRequest(reqs []*syncRequest) error {
|
||||||
|
req := &deliveryRequestMsgData{
|
||||||
|
Deliver: reqs,
|
||||||
|
}
|
||||||
|
return self.send(deliveryRequestMsg, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// batch of syncRequests to send off
|
||||||
|
func (self *bzz) unsyncedKeys(reqs []*syncRequest, state *syncState) error {
|
||||||
|
req := &unsyncedKeysMsgData{
|
||||||
|
Unsynced: reqs,
|
||||||
|
State: state,
|
||||||
|
}
|
||||||
|
return self.send(unsyncedKeysMsg, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// send paymentMsg
|
||||||
|
func (self *bzz) Pay(units int, promise swap.Promise) {
|
||||||
|
req := &paymentMsgData{uint(units), promise.(*chequebook.Cheque)}
|
||||||
|
self.payment(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// send paymentMsg
|
||||||
|
func (self *bzz) payment(req *paymentMsgData) error {
|
||||||
|
return self.send(paymentMsg, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sends peersMsg
|
||||||
|
func (self *bzz) peers(req *peersMsgData) error {
|
||||||
|
return self.send(peersMsg, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *bzz) protoError(code int, format string, params ...interface{}) (err *errs.Error) {
|
||||||
|
err = self.errors.New(code, format, params...)
|
||||||
|
err.Log(glog.V(logger.Info))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *bzz) protoErrorDisconnect(err *errs.Error) {
|
||||||
|
err.Log(glog.V(logger.Info))
|
||||||
|
if err.Fatal() {
|
||||||
|
self.peer.Disconnect(p2p.DiscSubprotocolError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *bzz) send(msg uint64, data interface{}) error {
|
||||||
|
if self.hive.blockWrite {
|
||||||
|
return fmt.Errorf("network write blocked")
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("-> %v: %v (%T) to %v", msg, data, data, self)
|
||||||
|
err := p2p.Send(self.rw, msg, data)
|
||||||
|
if err != nil {
|
||||||
|
self.Drop()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
17
swarm/network/protocol_test.go
Normal file
17
swarm/network/protocol_test.go
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network
|
390
swarm/network/syncdb.go
Normal file
390
swarm/network/syncdb.go
Normal file
@ -0,0 +1,390 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||||
|
)
|
||||||
|
|
||||||
|
const counterKeyPrefix = 0x01
|
||||||
|
|
||||||
|
/*
|
||||||
|
syncDb is a queueing service for outgoing deliveries.
|
||||||
|
One instance per priority queue for each peer
|
||||||
|
|
||||||
|
a syncDb instance maintains an in-memory buffer (of capacity bufferSize)
|
||||||
|
once its in-memory buffer is full it switches to persisting in db
|
||||||
|
and dbRead iterator iterates through the items keeping their order
|
||||||
|
once the db read catches up (there is no more items in the db) then
|
||||||
|
it switches back to in-memory buffer.
|
||||||
|
|
||||||
|
when syncdb is stopped all items in the buffer are saved to the db
|
||||||
|
*/
|
||||||
|
type syncDb struct {
|
||||||
|
start []byte // this syncdb starting index in requestdb
|
||||||
|
key storage.Key // remote peers address key
|
||||||
|
counterKey []byte // db key to persist counter
|
||||||
|
priority uint // priotity High|Medium|Low
|
||||||
|
buffer chan interface{} // incoming request channel
|
||||||
|
db *storage.LDBDatabase // underlying db (TODO should be interface)
|
||||||
|
done chan bool // chan to signal goroutines finished quitting
|
||||||
|
quit chan bool // chan to signal quitting to goroutines
|
||||||
|
total, dbTotal int // counts for one session
|
||||||
|
batch chan chan int // channel for batch requests
|
||||||
|
dbBatchSize uint // number of items before batch is saved
|
||||||
|
}
|
||||||
|
|
||||||
|
// constructor needs a shared request db (leveldb)
|
||||||
|
// priority is used in the index key
|
||||||
|
// uses a buffer and a leveldb for persistent storage
|
||||||
|
// bufferSize, dbBatchSize are config parameters
|
||||||
|
func newSyncDb(db *storage.LDBDatabase, key storage.Key, priority uint, bufferSize, dbBatchSize uint, deliver func(interface{}, chan bool) bool) *syncDb {
|
||||||
|
start := make([]byte, 42)
|
||||||
|
start[1] = byte(priorities - priority)
|
||||||
|
copy(start[2:34], key)
|
||||||
|
|
||||||
|
counterKey := make([]byte, 34)
|
||||||
|
counterKey[0] = counterKeyPrefix
|
||||||
|
copy(counterKey[1:], start[1:34])
|
||||||
|
|
||||||
|
syncdb := &syncDb{
|
||||||
|
start: start,
|
||||||
|
key: key,
|
||||||
|
counterKey: counterKey,
|
||||||
|
priority: priority,
|
||||||
|
buffer: make(chan interface{}, bufferSize),
|
||||||
|
db: db,
|
||||||
|
done: make(chan bool),
|
||||||
|
quit: make(chan bool),
|
||||||
|
batch: make(chan chan int),
|
||||||
|
dbBatchSize: dbBatchSize,
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("syncDb[peer: %v, priority: %v] - initialised", key.Log(), priority)
|
||||||
|
|
||||||
|
// starts the main forever loop reading from buffer
|
||||||
|
go syncdb.bufferRead(deliver)
|
||||||
|
return syncdb
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
bufferRead is a forever iterator loop that takes care of delivering
|
||||||
|
outgoing store requests reads from incoming buffer
|
||||||
|
|
||||||
|
its argument is the deliver function taking the item as first argument
|
||||||
|
and a quit channel as second.
|
||||||
|
Closing of this channel is supposed to abort all waiting for delivery
|
||||||
|
(typically network write)
|
||||||
|
|
||||||
|
The iteration switches between 2 modes,
|
||||||
|
* buffer mode reads the in-memory buffer and delivers the items directly
|
||||||
|
* db mode reads from the buffer and writes to the db, parallelly another
|
||||||
|
routine is started that reads from the db and delivers items
|
||||||
|
|
||||||
|
If there is buffer contention in buffer mode (slow network, high upload volume)
|
||||||
|
syncdb switches to db mode and starts dbRead
|
||||||
|
Once db backlog is delivered, it reverts back to in-memory buffer
|
||||||
|
|
||||||
|
It is automatically started when syncdb is initialised.
|
||||||
|
|
||||||
|
It saves the buffer to db upon receiving quit signal. syncDb#stop()
|
||||||
|
*/
|
||||||
|
func (self *syncDb) bufferRead(deliver func(interface{}, chan bool) bool) {
|
||||||
|
var buffer, db chan interface{} // channels representing the two read modes
|
||||||
|
var more bool
|
||||||
|
var req interface{}
|
||||||
|
var entry *syncDbEntry
|
||||||
|
var inBatch, inDb int
|
||||||
|
batch := new(leveldb.Batch)
|
||||||
|
var dbSize chan int
|
||||||
|
quit := self.quit
|
||||||
|
counterValue := make([]byte, 8)
|
||||||
|
|
||||||
|
// counter is used for keeping the items in order, persisted to db
|
||||||
|
// start counter where db was at, 0 if not found
|
||||||
|
data, err := self.db.Get(self.counterKey)
|
||||||
|
var counter uint64
|
||||||
|
if err == nil {
|
||||||
|
counter = binary.BigEndian.Uint64(data)
|
||||||
|
glog.V(logger.Detail).Infof("syncDb[%v/%v] - counter read from db at %v", self.key.Log(), self.priority, counter)
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Detail).Infof("syncDb[%v/%v] - counter starts at %v", self.key.Log(), self.priority, counter)
|
||||||
|
}
|
||||||
|
|
||||||
|
LOOP:
|
||||||
|
for {
|
||||||
|
// waiting for item next in the buffer, or quit signal or batch request
|
||||||
|
select {
|
||||||
|
// buffer only closes when writing to db
|
||||||
|
case req = <-buffer:
|
||||||
|
// deliver request : this is blocking on network write so
|
||||||
|
// it is passed the quit channel as argument, so that it returns
|
||||||
|
// if syncdb is stopped. In this case we need to save the item to the db
|
||||||
|
more = deliver(req, self.quit)
|
||||||
|
if !more {
|
||||||
|
glog.V(logger.Debug).Infof("syncDb[%v/%v] quit: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total)
|
||||||
|
// received quit signal, save request currently waiting delivery
|
||||||
|
// by switching to db mode and closing the buffer
|
||||||
|
buffer = nil
|
||||||
|
db = self.buffer
|
||||||
|
close(db)
|
||||||
|
quit = nil // needs to block the quit case in select
|
||||||
|
break // break from select, this item will be written to the db
|
||||||
|
}
|
||||||
|
self.total++
|
||||||
|
glog.V(logger.Detail).Infof("syncDb[%v/%v] deliver (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total)
|
||||||
|
// by the time deliver returns, there were new writes to the buffer
|
||||||
|
// if buffer contention is detected, switch to db mode which drains
|
||||||
|
// the buffer so no process will block on pushing store requests
|
||||||
|
if len(buffer) == cap(buffer) {
|
||||||
|
glog.V(logger.Debug).Infof("syncDb[%v/%v] buffer full %v: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, cap(buffer), self.dbTotal, self.total)
|
||||||
|
buffer = nil
|
||||||
|
db = self.buffer
|
||||||
|
}
|
||||||
|
continue LOOP
|
||||||
|
|
||||||
|
// incoming entry to put into db
|
||||||
|
case req, more = <-db:
|
||||||
|
if !more {
|
||||||
|
// only if quit is called, saved all the buffer
|
||||||
|
binary.BigEndian.PutUint64(counterValue, counter)
|
||||||
|
batch.Put(self.counterKey, counterValue) // persist counter in batch
|
||||||
|
self.writeSyncBatch(batch) // save batch
|
||||||
|
glog.V(logger.Detail).Infof("syncDb[%v/%v] quitting: save current batch to db", self.key.Log(), self.priority)
|
||||||
|
break LOOP
|
||||||
|
}
|
||||||
|
self.dbTotal++
|
||||||
|
self.total++
|
||||||
|
// otherwise break after select
|
||||||
|
case dbSize = <-self.batch:
|
||||||
|
// explicit request for batch
|
||||||
|
if inBatch == 0 && quit != nil {
|
||||||
|
// there was no writes since the last batch so db depleted
|
||||||
|
// switch to buffer mode
|
||||||
|
glog.V(logger.Debug).Infof("syncDb[%v/%v] empty db: switching to buffer", self.key.Log(), self.priority)
|
||||||
|
db = nil
|
||||||
|
buffer = self.buffer
|
||||||
|
dbSize <- 0 // indicates to 'caller' that batch has been written
|
||||||
|
inDb = 0
|
||||||
|
continue LOOP
|
||||||
|
}
|
||||||
|
binary.BigEndian.PutUint64(counterValue, counter)
|
||||||
|
batch.Put(self.counterKey, counterValue)
|
||||||
|
glog.V(logger.Debug).Infof("syncDb[%v/%v] write batch %v/%v - %x - %x", self.key.Log(), self.priority, inBatch, counter, self.counterKey, counterValue)
|
||||||
|
batch = self.writeSyncBatch(batch)
|
||||||
|
dbSize <- inBatch // indicates to 'caller' that batch has been written
|
||||||
|
inBatch = 0
|
||||||
|
continue LOOP
|
||||||
|
|
||||||
|
// closing syncDb#quit channel is used to signal to all goroutines to quit
|
||||||
|
case <-quit:
|
||||||
|
// need to save backlog, so switch to db mode
|
||||||
|
db = self.buffer
|
||||||
|
buffer = nil
|
||||||
|
quit = nil
|
||||||
|
glog.V(logger.Detail).Infof("syncDb[%v/%v] quitting: save buffer to db", self.key.Log(), self.priority)
|
||||||
|
close(db)
|
||||||
|
continue LOOP
|
||||||
|
}
|
||||||
|
|
||||||
|
// only get here if we put req into db
|
||||||
|
entry, err = self.newSyncDbEntry(req, counter)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("syncDb[%v/%v] saving request %v (#%v/%v) failed: %v", self.key.Log(), self.priority, req, inBatch, inDb, err)
|
||||||
|
continue LOOP
|
||||||
|
}
|
||||||
|
batch.Put(entry.key, entry.val)
|
||||||
|
glog.V(logger.Detail).Infof("syncDb[%v/%v] to batch %v '%v' (#%v/%v/%v)", self.key.Log(), self.priority, req, entry, inBatch, inDb, counter)
|
||||||
|
// if just switched to db mode and not quitting, then launch dbRead
|
||||||
|
// in a parallel go routine to send deliveries from db
|
||||||
|
if inDb == 0 && quit != nil {
|
||||||
|
glog.V(logger.Detail).Infof("syncDb[%v/%v] start dbRead")
|
||||||
|
go self.dbRead(true, counter, deliver)
|
||||||
|
}
|
||||||
|
inDb++
|
||||||
|
inBatch++
|
||||||
|
counter++
|
||||||
|
// need to save the batch if it gets too large (== dbBatchSize)
|
||||||
|
if inBatch%int(self.dbBatchSize) == 0 {
|
||||||
|
batch = self.writeSyncBatch(batch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
glog.V(logger.Info).Infof("syncDb[%v:%v]: saved %v keys (saved counter at %v)", self.key.Log(), self.priority, inBatch, counter)
|
||||||
|
close(self.done)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writes the batch to the db and returns a new batch object
|
||||||
|
func (self *syncDb) writeSyncBatch(batch *leveldb.Batch) *leveldb.Batch {
|
||||||
|
err := self.db.Write(batch)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("syncDb[%v/%v] saving batch to db failed: %v", self.key.Log(), self.priority, err)
|
||||||
|
return batch
|
||||||
|
}
|
||||||
|
return new(leveldb.Batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// abstract type for db entries (TODO could be a feature of Receipts)
|
||||||
|
type syncDbEntry struct {
|
||||||
|
key, val []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self syncDbEntry) String() string {
|
||||||
|
return fmt.Sprintf("key: %x, value: %x", self.key, self.val)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
dbRead is iterating over store requests to be sent over to the peer
|
||||||
|
this is mainly to prevent crashes due to network output buffer contention (???)
|
||||||
|
as well as to make syncronisation resilient to disconnects
|
||||||
|
the messages are supposed to be sent in the p2p priority queue.
|
||||||
|
|
||||||
|
the request DB is shared between peers, but domains for each syncdb
|
||||||
|
are disjoint. dbkeys (42 bytes) are structured:
|
||||||
|
* 0: 0x00 (0x01 reserved for counter key)
|
||||||
|
* 1: priorities - priority (so that high priority can be replayed first)
|
||||||
|
* 2-33: peers address
|
||||||
|
* 34-41: syncdb counter to preserve order (this field is missing for the counter key)
|
||||||
|
|
||||||
|
values (40 bytes) are:
|
||||||
|
* 0-31: key
|
||||||
|
* 32-39: request id
|
||||||
|
|
||||||
|
dbRead needs a boolean to indicate if on first round all the historical
|
||||||
|
record is synced. Second argument to indicate current db counter
|
||||||
|
The third is the function to apply
|
||||||
|
*/
|
||||||
|
func (self *syncDb) dbRead(useBatches bool, counter uint64, fun func(interface{}, chan bool) bool) {
|
||||||
|
key := make([]byte, 42)
|
||||||
|
copy(key, self.start)
|
||||||
|
binary.BigEndian.PutUint64(key[34:], counter)
|
||||||
|
var batches, n, cnt, total int
|
||||||
|
var more bool
|
||||||
|
var entry *syncDbEntry
|
||||||
|
var it iterator.Iterator
|
||||||
|
var del *leveldb.Batch
|
||||||
|
batchSizes := make(chan int)
|
||||||
|
|
||||||
|
for {
|
||||||
|
// if useBatches is false, cnt is not set
|
||||||
|
if useBatches {
|
||||||
|
// this could be called before all cnt items sent out
|
||||||
|
// so that loop is not blocking while delivering
|
||||||
|
// only relevant if cnt is large
|
||||||
|
select {
|
||||||
|
case self.batch <- batchSizes:
|
||||||
|
case <-self.quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// wait for the write to finish and get the item count in the next batch
|
||||||
|
cnt = <-batchSizes
|
||||||
|
batches++
|
||||||
|
if cnt == 0 {
|
||||||
|
// empty
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
it = self.db.NewIterator()
|
||||||
|
it.Seek(key)
|
||||||
|
if !it.Valid() {
|
||||||
|
copy(key, self.start)
|
||||||
|
useBatches = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
del = new(leveldb.Batch)
|
||||||
|
glog.V(logger.Detail).Infof("syncDb[%v/%v]: new iterator: %x (batch %v, count %v)", self.key.Log(), self.priority, key, batches, cnt)
|
||||||
|
|
||||||
|
for n = 0; !useBatches || n < cnt; it.Next() {
|
||||||
|
copy(key, it.Key())
|
||||||
|
if len(key) == 0 || key[0] != 0 {
|
||||||
|
copy(key, self.start)
|
||||||
|
useBatches = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
val := make([]byte, 40)
|
||||||
|
copy(val, it.Value())
|
||||||
|
entry = &syncDbEntry{key, val}
|
||||||
|
// glog.V(logger.Detail).Infof("syncDb[%v/%v] - %v, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, self.key.Log(), batches, total, self.dbTotal, self.total)
|
||||||
|
more = fun(entry, self.quit)
|
||||||
|
if !more {
|
||||||
|
// quit received when waiting to deliver entry, the entry will not be deleted
|
||||||
|
glog.V(logger.Detail).Infof("syncDb[%v/%v] batch %v quit after %v/%v items", self.key.Log(), self.priority, batches, n, cnt)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// since subsequent batches of the same db session are indexed incrementally
|
||||||
|
// deleting earlier batches can be delayed and parallelised
|
||||||
|
// this could be batch delete when db is idle (but added complexity esp when quitting)
|
||||||
|
del.Delete(key)
|
||||||
|
n++
|
||||||
|
total++
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("syncDb[%v/%v] - db session closed, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, batches, total, self.dbTotal, self.total)
|
||||||
|
self.db.Write(del) // this could be async called only when db is idle
|
||||||
|
it.Release()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
func (self *syncDb) stop() {
|
||||||
|
close(self.quit)
|
||||||
|
<-self.done
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculate a dbkey for the request, for the db to work
|
||||||
|
// see syncdb for db key structure
|
||||||
|
// polimorphic: accepted types, see syncer#addRequest
|
||||||
|
func (self *syncDb) newSyncDbEntry(req interface{}, counter uint64) (entry *syncDbEntry, err error) {
|
||||||
|
var key storage.Key
|
||||||
|
var chunk *storage.Chunk
|
||||||
|
var id uint64
|
||||||
|
var ok bool
|
||||||
|
var sreq *storeRequestMsgData
|
||||||
|
|
||||||
|
if key, ok = req.(storage.Key); ok {
|
||||||
|
id = generateId()
|
||||||
|
} else if chunk, ok = req.(*storage.Chunk); ok {
|
||||||
|
key = chunk.Key
|
||||||
|
id = generateId()
|
||||||
|
} else if sreq, ok = req.(*storeRequestMsgData); ok {
|
||||||
|
key = sreq.Key
|
||||||
|
id = sreq.Id
|
||||||
|
} else if entry, ok = req.(*syncDbEntry); !ok {
|
||||||
|
return nil, fmt.Errorf("type not allowed: %v (%T)", req, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// order by peer > priority > seqid
|
||||||
|
// value is request id if exists
|
||||||
|
if entry == nil {
|
||||||
|
dbkey := make([]byte, 42)
|
||||||
|
dbval := make([]byte, 40)
|
||||||
|
|
||||||
|
// encode key
|
||||||
|
copy(dbkey[:], self.start[:34]) // db peer
|
||||||
|
binary.BigEndian.PutUint64(dbkey[34:], counter)
|
||||||
|
// encode value
|
||||||
|
copy(dbval, key[:])
|
||||||
|
binary.BigEndian.PutUint64(dbval[32:], id)
|
||||||
|
|
||||||
|
entry = &syncDbEntry{dbkey, dbval}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
221
swarm/network/syncdb_test.go
Normal file
221
swarm/network/syncdb_test.go
Normal file
@ -0,0 +1,221 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
glog.SetV(0)
|
||||||
|
glog.SetToStderr(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
type testSyncDb struct {
|
||||||
|
*syncDb
|
||||||
|
c int
|
||||||
|
t *testing.T
|
||||||
|
fromDb chan bool
|
||||||
|
delivered [][]byte
|
||||||
|
sent []int
|
||||||
|
dbdir string
|
||||||
|
at int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestSyncDb(priority, bufferSize, batchSize int, dbdir string, t *testing.T) *testSyncDb {
|
||||||
|
if len(dbdir) == 0 {
|
||||||
|
tmp, err := ioutil.TempDir(os.TempDir(), "syncdb-test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to create temporary direcory %v: %v", tmp, err)
|
||||||
|
}
|
||||||
|
dbdir = tmp
|
||||||
|
}
|
||||||
|
db, err := storage.NewLDBDatabase(filepath.Join(dbdir, "requestdb"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to create db: %v", err)
|
||||||
|
}
|
||||||
|
self := &testSyncDb{
|
||||||
|
fromDb: make(chan bool),
|
||||||
|
dbdir: dbdir,
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
|
h := crypto.Sha3Hash([]byte{0})
|
||||||
|
key := storage.Key(h[:])
|
||||||
|
self.syncDb = newSyncDb(db, key, uint(priority), uint(bufferSize), uint(batchSize), self.deliver)
|
||||||
|
// kick off db iterator right away, if no items on db this will allow
|
||||||
|
// reading from the buffer
|
||||||
|
return self
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testSyncDb) close() {
|
||||||
|
self.db.Close()
|
||||||
|
os.RemoveAll(self.dbdir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testSyncDb) push(n int) {
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
self.buffer <- storage.Key(crypto.Sha3([]byte{byte(self.c)}))
|
||||||
|
self.sent = append(self.sent, self.c)
|
||||||
|
self.c++
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("pushed %v requests", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testSyncDb) draindb() {
|
||||||
|
it := self.db.NewIterator()
|
||||||
|
defer it.Release()
|
||||||
|
for {
|
||||||
|
it.Seek(self.start)
|
||||||
|
if !it.Valid() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
k := it.Key()
|
||||||
|
if len(k) == 0 || k[0] == 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
it.Release()
|
||||||
|
it = self.db.NewIterator()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testSyncDb) deliver(req interface{}, quit chan bool) bool {
|
||||||
|
_, db := req.(*syncDbEntry)
|
||||||
|
key, _, _, _, err := parseRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
self.t.Fatalf("unexpected error of key %v: %v", key, err)
|
||||||
|
}
|
||||||
|
self.delivered = append(self.delivered, key)
|
||||||
|
select {
|
||||||
|
case self.fromDb <- db:
|
||||||
|
return true
|
||||||
|
case <-quit:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testSyncDb) expect(n int, db bool) {
|
||||||
|
var ok bool
|
||||||
|
// for n items
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
ok = <-self.fromDb
|
||||||
|
if self.at+1 > len(self.delivered) {
|
||||||
|
self.t.Fatalf("expected %v, got %v", self.at+1, len(self.delivered))
|
||||||
|
}
|
||||||
|
if len(self.sent) > self.at && !bytes.Equal(crypto.Sha3([]byte{byte(self.sent[self.at])}), self.delivered[self.at]) {
|
||||||
|
self.t.Fatalf("expected delivery %v/%v/%v to be hash of %v, from db: %v = %v", i, n, self.at, self.sent[self.at], ok, db)
|
||||||
|
glog.V(logger.Debug).Infof("%v/%v/%v to be hash of %v, from db: %v = %v", i, n, self.at, self.sent[self.at], ok, db)
|
||||||
|
}
|
||||||
|
if !ok && db {
|
||||||
|
self.t.Fatalf("expected delivery %v/%v/%v from db", i, n, self.at)
|
||||||
|
}
|
||||||
|
if ok && !db {
|
||||||
|
self.t.Fatalf("expected delivery %v/%v/%v from cache", i, n, self.at)
|
||||||
|
}
|
||||||
|
self.at++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyncDb(t *testing.T) {
|
||||||
|
priority := High
|
||||||
|
bufferSize := 5
|
||||||
|
batchSize := 2 * bufferSize
|
||||||
|
s := newTestSyncDb(priority, bufferSize, batchSize, "", t)
|
||||||
|
defer s.close()
|
||||||
|
defer s.stop()
|
||||||
|
s.dbRead(false, 0, s.deliver)
|
||||||
|
s.draindb()
|
||||||
|
|
||||||
|
s.push(4)
|
||||||
|
s.expect(1, false)
|
||||||
|
// 3 in buffer
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
s.push(3)
|
||||||
|
// push over limit
|
||||||
|
s.expect(1, false)
|
||||||
|
// one popped from the buffer, then contention detected
|
||||||
|
s.expect(4, true)
|
||||||
|
s.push(4)
|
||||||
|
s.expect(5, true)
|
||||||
|
// depleted db, switch back to buffer
|
||||||
|
s.draindb()
|
||||||
|
s.push(5)
|
||||||
|
s.expect(4, false)
|
||||||
|
s.push(3)
|
||||||
|
s.expect(4, false)
|
||||||
|
// buffer depleted
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
s.push(6)
|
||||||
|
s.expect(1, false)
|
||||||
|
// push into buffer full, switch to db
|
||||||
|
s.expect(5, true)
|
||||||
|
s.draindb()
|
||||||
|
s.push(1)
|
||||||
|
s.expect(1, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSaveSyncDb(t *testing.T) {
|
||||||
|
amount := 30
|
||||||
|
priority := High
|
||||||
|
bufferSize := amount
|
||||||
|
batchSize := 10
|
||||||
|
s := newTestSyncDb(priority, bufferSize, batchSize, "", t)
|
||||||
|
go s.dbRead(false, 0, s.deliver)
|
||||||
|
s.push(amount)
|
||||||
|
s.stop()
|
||||||
|
s.db.Close()
|
||||||
|
|
||||||
|
s = newTestSyncDb(priority, bufferSize, batchSize, s.dbdir, t)
|
||||||
|
go s.dbRead(false, 0, s.deliver)
|
||||||
|
s.expect(amount, true)
|
||||||
|
for i, key := range s.delivered {
|
||||||
|
expKey := crypto.Sha3([]byte{byte(i)})
|
||||||
|
if !bytes.Equal(key, expKey) {
|
||||||
|
t.Fatalf("delivery %v expected to be key %x, got %x", i, expKey, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.push(amount)
|
||||||
|
s.expect(amount, false)
|
||||||
|
for i := amount; i < 2*amount; i++ {
|
||||||
|
key := s.delivered[i]
|
||||||
|
expKey := crypto.Sha3([]byte{byte(i - amount)})
|
||||||
|
if !bytes.Equal(key, expKey) {
|
||||||
|
t.Fatalf("delivery %v expected to be key %x, got %x", i, expKey, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.stop()
|
||||||
|
s.db.Close()
|
||||||
|
|
||||||
|
s = newTestSyncDb(priority, bufferSize, batchSize, s.dbdir, t)
|
||||||
|
defer s.close()
|
||||||
|
defer s.stop()
|
||||||
|
|
||||||
|
go s.dbRead(false, 0, s.deliver)
|
||||||
|
s.push(1)
|
||||||
|
s.expect(1, false)
|
||||||
|
|
||||||
|
}
|
778
swarm/network/syncer.go
Normal file
778
swarm/network/syncer.go
Normal file
@ -0,0 +1,778 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// syncer parameters (global, not peer specific) default values
|
||||||
|
const (
|
||||||
|
requestDbBatchSize = 512 // size of batch before written to request db
|
||||||
|
keyBufferSize = 1024 // size of buffer for unsynced keys
|
||||||
|
syncBatchSize = 128 // maximum batchsize for outgoing requests
|
||||||
|
syncBufferSize = 128 // size of buffer for delivery requests
|
||||||
|
syncCacheSize = 1024 // cache capacity to store request queue in memory
|
||||||
|
)
|
||||||
|
|
||||||
|
// priorities
|
||||||
|
const (
|
||||||
|
Low = iota // 0
|
||||||
|
Medium // 1
|
||||||
|
High // 2
|
||||||
|
priorities // 3 number of priority levels
|
||||||
|
)
|
||||||
|
|
||||||
|
// request types
|
||||||
|
const (
|
||||||
|
DeliverReq = iota // 0
|
||||||
|
PushReq // 1
|
||||||
|
PropagateReq // 2
|
||||||
|
HistoryReq // 3
|
||||||
|
BacklogReq // 4
|
||||||
|
)
|
||||||
|
|
||||||
|
// json serialisable struct to record the syncronisation state between 2 peers
|
||||||
|
type syncState struct {
|
||||||
|
*storage.DbSyncState // embeds the following 4 fields:
|
||||||
|
// Start Key // lower limit of address space
|
||||||
|
// Stop Key // upper limit of address space
|
||||||
|
// First uint64 // counter taken from last sync state
|
||||||
|
// Last uint64 // counter of remote peer dbStore at the time of last connection
|
||||||
|
SessionAt uint64 // set at the time of connection
|
||||||
|
LastSeenAt uint64 // set at the time of connection
|
||||||
|
Latest storage.Key // cursor of dbstore when last (continuously set by syncer)
|
||||||
|
Synced bool // true iff Sync is done up to the last disconnect
|
||||||
|
synced chan bool // signal that sync stage finished
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrapper of db-s to provide mockable custom local chunk store access to syncer
|
||||||
|
type DbAccess struct {
|
||||||
|
db *storage.DbStore
|
||||||
|
loc *storage.LocalStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDbAccess(loc *storage.LocalStore) *DbAccess {
|
||||||
|
return &DbAccess{loc.DbStore.(*storage.DbStore), loc}
|
||||||
|
}
|
||||||
|
|
||||||
|
// to obtain the chunks from key or request db entry only
|
||||||
|
func (self *DbAccess) get(key storage.Key) (*storage.Chunk, error) {
|
||||||
|
return self.loc.Get(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// current storage counter of chunk db
|
||||||
|
func (self *DbAccess) counter() uint64 {
|
||||||
|
return self.db.Counter()
|
||||||
|
}
|
||||||
|
|
||||||
|
// implemented by dbStoreSyncIterator
|
||||||
|
type keyIterator interface {
|
||||||
|
Next() storage.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
// generator function for iteration by address range and storage counter
|
||||||
|
func (self *DbAccess) iterator(s *syncState) keyIterator {
|
||||||
|
it, err := self.db.NewSyncIterator(*(s.DbSyncState))
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return keyIterator(it)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self syncState) String() string {
|
||||||
|
if self.Synced {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"session started at: %v, last seen at: %v, latest key: %v",
|
||||||
|
self.SessionAt, self.LastSeenAt,
|
||||||
|
self.Latest.Log(),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"address: %v-%v, index: %v-%v, session started at: %v, last seen at: %v, latest key: %v",
|
||||||
|
self.Start.Log(), self.Stop.Log(),
|
||||||
|
self.First, self.Last,
|
||||||
|
self.SessionAt, self.LastSeenAt,
|
||||||
|
self.Latest.Log(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// syncer parameters (global, not peer specific)
|
||||||
|
type SyncParams struct {
|
||||||
|
RequestDbPath string // path for request db (leveldb)
|
||||||
|
RequestDbBatchSize uint // nuber of items before batch is saved to requestdb
|
||||||
|
KeyBufferSize uint // size of key buffer
|
||||||
|
SyncBatchSize uint // maximum batchsize for outgoing requests
|
||||||
|
SyncBufferSize uint // size of buffer for
|
||||||
|
SyncCacheSize uint // cache capacity to store request queue in memory
|
||||||
|
SyncPriorities []uint // list of priority levels for req types 0-3
|
||||||
|
SyncModes []bool // list of sync modes for for req types 0-3
|
||||||
|
}
|
||||||
|
|
||||||
|
// constructor with default values
|
||||||
|
func NewSyncParams(bzzdir string) *SyncParams {
|
||||||
|
return &SyncParams{
|
||||||
|
RequestDbPath: filepath.Join(bzzdir, "requests"),
|
||||||
|
RequestDbBatchSize: requestDbBatchSize,
|
||||||
|
KeyBufferSize: keyBufferSize,
|
||||||
|
SyncBufferSize: syncBufferSize,
|
||||||
|
SyncBatchSize: syncBatchSize,
|
||||||
|
SyncCacheSize: syncCacheSize,
|
||||||
|
SyncPriorities: []uint{High, Medium, Medium, Low, Low},
|
||||||
|
SyncModes: []bool{true, true, true, true, false},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// syncer is the agent that manages content distribution/storage replication/chunk storeRequest forwarding
|
||||||
|
type syncer struct {
|
||||||
|
*SyncParams // sync parameters
|
||||||
|
syncF func() bool // if syncing is needed
|
||||||
|
key storage.Key // remote peers address key
|
||||||
|
state *syncState // sync state for our dbStore
|
||||||
|
syncStates chan *syncState // different stages of sync
|
||||||
|
deliveryRequest chan bool // one of two triggers needed to send unsyncedKeys
|
||||||
|
newUnsyncedKeys chan bool // one of two triggers needed to send unsynced keys
|
||||||
|
quit chan bool // signal to quit loops
|
||||||
|
|
||||||
|
// DB related fields
|
||||||
|
dbAccess *DbAccess // access to dbStore
|
||||||
|
db *storage.LDBDatabase // delivery msg db
|
||||||
|
|
||||||
|
// native fields
|
||||||
|
queues [priorities]*syncDb // in-memory cache / queues for sync reqs
|
||||||
|
keys [priorities]chan interface{} // buffer for unsynced keys
|
||||||
|
deliveries [priorities]chan *storeRequestMsgData // delivery
|
||||||
|
|
||||||
|
// bzz protocol instance outgoing message callbacks (mockable for testing)
|
||||||
|
unsyncedKeys func([]*syncRequest, *syncState) error // send unsyncedKeysMsg
|
||||||
|
store func(*storeRequestMsgData) error // send storeRequestMsg
|
||||||
|
}
|
||||||
|
|
||||||
|
// a syncer instance is linked to each peer connection
|
||||||
|
// constructor is called from protocol after successful handshake
|
||||||
|
// the returned instance is attached to the peer and can be called
|
||||||
|
// by the forwarder
|
||||||
|
func newSyncer(
|
||||||
|
db *storage.LDBDatabase, remotekey storage.Key,
|
||||||
|
dbAccess *DbAccess,
|
||||||
|
unsyncedKeys func([]*syncRequest, *syncState) error,
|
||||||
|
store func(*storeRequestMsgData) error,
|
||||||
|
params *SyncParams,
|
||||||
|
state *syncState,
|
||||||
|
syncF func() bool,
|
||||||
|
) (*syncer, error) {
|
||||||
|
|
||||||
|
syncBufferSize := params.SyncBufferSize
|
||||||
|
keyBufferSize := params.KeyBufferSize
|
||||||
|
dbBatchSize := params.RequestDbBatchSize
|
||||||
|
|
||||||
|
self := &syncer{
|
||||||
|
syncF: syncF,
|
||||||
|
key: remotekey,
|
||||||
|
dbAccess: dbAccess,
|
||||||
|
syncStates: make(chan *syncState, 20),
|
||||||
|
deliveryRequest: make(chan bool, 1),
|
||||||
|
newUnsyncedKeys: make(chan bool, 1),
|
||||||
|
SyncParams: params,
|
||||||
|
state: state,
|
||||||
|
quit: make(chan bool),
|
||||||
|
unsyncedKeys: unsyncedKeys,
|
||||||
|
store: store,
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialising
|
||||||
|
for i := 0; i < priorities; i++ {
|
||||||
|
self.keys[i] = make(chan interface{}, keyBufferSize)
|
||||||
|
self.deliveries[i] = make(chan *storeRequestMsgData)
|
||||||
|
// initialise a syncdb instance for each priority queue
|
||||||
|
self.queues[i] = newSyncDb(db, remotekey, uint(i), syncBufferSize, dbBatchSize, self.deliver(uint(i)))
|
||||||
|
}
|
||||||
|
glog.V(logger.Info).Infof("syncer started: %v", state)
|
||||||
|
// launch chunk delivery service
|
||||||
|
go self.syncDeliveries()
|
||||||
|
// launch sync task manager
|
||||||
|
if self.syncF() {
|
||||||
|
go self.sync()
|
||||||
|
}
|
||||||
|
// process unsynced keys to broadcast
|
||||||
|
go self.syncUnsyncedKeys()
|
||||||
|
|
||||||
|
return self, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// metadata serialisation
|
||||||
|
func encodeSync(state *syncState) (*json.RawMessage, error) {
|
||||||
|
data, err := json.MarshalIndent(state, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
meta := json.RawMessage(data)
|
||||||
|
return &meta, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeSync(meta *json.RawMessage) (*syncState, error) {
|
||||||
|
if meta == nil {
|
||||||
|
return nil, fmt.Errorf("unable to deserialise sync state from <nil>")
|
||||||
|
}
|
||||||
|
data := []byte(*(meta))
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil, fmt.Errorf("unable to deserialise sync state from <nil>")
|
||||||
|
}
|
||||||
|
state := &syncState{DbSyncState: &storage.DbSyncState{}}
|
||||||
|
err := json.Unmarshal(data, state)
|
||||||
|
return state, err
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
sync implements the syncing script
|
||||||
|
* first all items left in the request Db are replayed
|
||||||
|
* type = StaleSync
|
||||||
|
* Mode: by default once again via confirmation roundtrip
|
||||||
|
* Priority: the items are replayed as the proirity specified for StaleSync
|
||||||
|
* but within the order respects earlier priority level of request
|
||||||
|
* after all items are consumed for a priority level, the the respective
|
||||||
|
queue for delivery requests is open (this way new reqs not written to db)
|
||||||
|
(TODO: this should be checked)
|
||||||
|
* the sync state provided by the remote peer is used to sync history
|
||||||
|
* all the backlog from earlier (aborted) syncing is completed starting from latest
|
||||||
|
* if Last < LastSeenAt then all items in between then process all
|
||||||
|
backlog from upto last disconnect
|
||||||
|
* if Last > 0 &&
|
||||||
|
|
||||||
|
sync is called from the syncer constructor and is not supposed to be used externally
|
||||||
|
*/
|
||||||
|
func (self *syncer) sync() {
|
||||||
|
state := self.state
|
||||||
|
// sync finished
|
||||||
|
defer close(self.syncStates)
|
||||||
|
|
||||||
|
// 0. first replay stale requests from request db
|
||||||
|
if state.SessionAt == 0 {
|
||||||
|
glog.V(logger.Debug).Infof("syncer[%v]: nothing to sync", self.key.Log())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("syncer[%v]: start replaying stale requests from request db", self.key.Log())
|
||||||
|
for p := priorities - 1; p >= 0; p-- {
|
||||||
|
self.queues[p].dbRead(false, 0, self.replay())
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("syncer[%v]: done replaying stale requests from request db", self.key.Log())
|
||||||
|
|
||||||
|
// unless peer is synced sync unfinished history beginning on
|
||||||
|
if !state.Synced {
|
||||||
|
start := state.Start
|
||||||
|
|
||||||
|
if !storage.IsZeroKey(state.Latest) {
|
||||||
|
// 1. there is unfinished earlier sync
|
||||||
|
state.Start = state.Latest
|
||||||
|
glog.V(logger.Debug).Infof("syncer[%v]: start syncronising backlog (unfinished sync: %v)", self.key.Log(), state)
|
||||||
|
// blocks while the entire history upto state is synced
|
||||||
|
self.syncState(state)
|
||||||
|
if state.Last < state.SessionAt {
|
||||||
|
state.First = state.Last + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
state.Latest = storage.ZeroKey
|
||||||
|
state.Start = start
|
||||||
|
// 2. sync up to last disconnect1
|
||||||
|
if state.First < state.LastSeenAt {
|
||||||
|
state.Last = state.LastSeenAt
|
||||||
|
glog.V(logger.Debug).Infof("syncer[%v]: start syncronising history upto last disconnect at %v: %v", self.key.Log(), state.LastSeenAt, state)
|
||||||
|
self.syncState(state)
|
||||||
|
state.First = state.LastSeenAt
|
||||||
|
}
|
||||||
|
state.Latest = storage.ZeroKey
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// synchronisation starts at end of last session
|
||||||
|
state.First = state.LastSeenAt
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. sync up to current session start
|
||||||
|
// if there have been new chunks since last session
|
||||||
|
if state.LastSeenAt < state.SessionAt {
|
||||||
|
state.Last = state.SessionAt
|
||||||
|
glog.V(logger.Debug).Infof("syncer[%v]: start syncronising history since last disconnect at %v up until session start at %v: %v", self.key.Log(), state.LastSeenAt, state.SessionAt, state)
|
||||||
|
// blocks until state syncing is finished
|
||||||
|
self.syncState(state)
|
||||||
|
}
|
||||||
|
glog.V(logger.Info).Infof("syncer[%v]: syncing all history complete", self.key.Log())
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait till syncronised block uptil state is synced
|
||||||
|
func (self *syncer) syncState(state *syncState) {
|
||||||
|
self.syncStates <- state
|
||||||
|
select {
|
||||||
|
case <-state.synced:
|
||||||
|
case <-self.quit:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// stop quits both request processor and saves the request cache to disk
|
||||||
|
func (self *syncer) stop() {
|
||||||
|
close(self.quit)
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: stop and save sync request db backlog", self.key.Log())
|
||||||
|
for _, db := range self.queues {
|
||||||
|
db.stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// rlp serialisable sync request
|
||||||
|
type syncRequest struct {
|
||||||
|
Key storage.Key
|
||||||
|
Priority uint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *syncRequest) String() string {
|
||||||
|
return fmt.Sprintf("<Key: %v, Priority: %v>", self.Key.Log(), self.Priority)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *syncer) newSyncRequest(req interface{}, p int) (*syncRequest, error) {
|
||||||
|
key, _, _, _, err := parseRequest(req)
|
||||||
|
// TODO: if req has chunk, it should be put in a cache
|
||||||
|
// create
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &syncRequest{key, uint(p)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// serves historical items from the DB
|
||||||
|
// * read is on demand, blocking unless history channel is read
|
||||||
|
// * accepts sync requests (syncStates) to create new db iterator
|
||||||
|
// * closes the channel one iteration finishes
|
||||||
|
func (self *syncer) syncHistory(state *syncState) chan interface{} {
|
||||||
|
var n uint
|
||||||
|
history := make(chan interface{})
|
||||||
|
glog.V(logger.Debug).Infof("syncer[%v]: syncing history between %v - %v for chunk addresses %v - %v", self.key.Log(), state.First, state.Last, state.Start, state.Stop)
|
||||||
|
it := self.dbAccess.iterator(state)
|
||||||
|
if it != nil {
|
||||||
|
go func() {
|
||||||
|
// signal end of the iteration ended
|
||||||
|
defer close(history)
|
||||||
|
IT:
|
||||||
|
for {
|
||||||
|
key := it.Next()
|
||||||
|
if key == nil {
|
||||||
|
break IT
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
// blocking until history channel is read from
|
||||||
|
case history <- storage.Key(key):
|
||||||
|
n++
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: history: %v (%v keys)", self.key.Log(), key.Log(), n)
|
||||||
|
state.Latest = key
|
||||||
|
case <-self.quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("syncer[%v]: finished syncing history between %v - %v for chunk addresses %v - %v (at %v) (chunks = %v)", self.key.Log(), state.First, state.Last, state.Start, state.Stop, state.Latest, n)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
return history
|
||||||
|
}
|
||||||
|
|
||||||
|
// triggers key syncronisation
|
||||||
|
func (self *syncer) sendUnsyncedKeys() {
|
||||||
|
select {
|
||||||
|
case self.deliveryRequest <- true:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// assembles a new batch of unsynced keys
|
||||||
|
// * keys are drawn from the key buffers in order of priority queue
|
||||||
|
// * if the queues of priority for History (HistoryReq) or higher are depleted,
|
||||||
|
// historical data is used so historical items are lower priority within
|
||||||
|
// their priority group.
|
||||||
|
// * Order of historical data is unspecified
|
||||||
|
func (self *syncer) syncUnsyncedKeys() {
|
||||||
|
// send out new
|
||||||
|
var unsynced []*syncRequest
|
||||||
|
var more, justSynced bool
|
||||||
|
var keyCount, historyCnt int
|
||||||
|
var history chan interface{}
|
||||||
|
|
||||||
|
priority := High
|
||||||
|
keys := self.keys[priority]
|
||||||
|
var newUnsyncedKeys, deliveryRequest chan bool
|
||||||
|
keyCounts := make([]int, priorities)
|
||||||
|
histPrior := self.SyncPriorities[HistoryReq]
|
||||||
|
syncStates := self.syncStates
|
||||||
|
state := self.state
|
||||||
|
|
||||||
|
LOOP:
|
||||||
|
for {
|
||||||
|
|
||||||
|
var req interface{}
|
||||||
|
// select the highest priority channel to read from
|
||||||
|
// keys channels are buffered so the highest priority ones
|
||||||
|
// are checked first - integrity can only be guaranteed if writing
|
||||||
|
// is locked while selecting
|
||||||
|
if priority != High || len(keys) == 0 {
|
||||||
|
// selection is not needed if the High priority queue has items
|
||||||
|
keys = nil
|
||||||
|
PRIORITIES:
|
||||||
|
for priority = High; priority >= 0; priority-- {
|
||||||
|
// the first priority channel that is non-empty will be assigned to keys
|
||||||
|
if len(self.keys[priority]) > 0 {
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: reading request with priority %v", self.key.Log(), priority)
|
||||||
|
keys = self.keys[priority]
|
||||||
|
break PRIORITIES
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v/%v]: queue: [%v, %v, %v]", self.key.Log(), priority, len(self.keys[High]), len(self.keys[Medium]), len(self.keys[Low]))
|
||||||
|
// if the input queue is empty on this level, resort to history if there is any
|
||||||
|
if uint(priority) == histPrior && history != nil {
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: reading history for %v", self.key.Log(), self.key)
|
||||||
|
keys = history
|
||||||
|
break PRIORITIES
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if peer ready to receive but nothing to send
|
||||||
|
if keys == nil && deliveryRequest == nil {
|
||||||
|
// if no items left and switch to waiting mode
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: buffers consumed. Waiting", self.key.Log())
|
||||||
|
newUnsyncedKeys = self.newUnsyncedKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
// send msg iff
|
||||||
|
// * peer is ready to receive keys AND (
|
||||||
|
// * all queues and history are depleted OR
|
||||||
|
// * batch full OR
|
||||||
|
// * all history have been consumed, synced)
|
||||||
|
if deliveryRequest == nil &&
|
||||||
|
(justSynced ||
|
||||||
|
len(unsynced) > 0 && keys == nil ||
|
||||||
|
len(unsynced) == int(self.SyncBatchSize)) {
|
||||||
|
justSynced = false
|
||||||
|
// listen to requests
|
||||||
|
deliveryRequest = self.deliveryRequest
|
||||||
|
newUnsyncedKeys = nil // not care about data until next req comes in
|
||||||
|
// set sync to current counter
|
||||||
|
// (all nonhistorical outgoing traffic sheduled and persisted
|
||||||
|
state.LastSeenAt = self.dbAccess.counter()
|
||||||
|
state.Latest = storage.ZeroKey
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: sending %v", self.key.Log(), unsynced)
|
||||||
|
// send the unsynced keys
|
||||||
|
stateCopy := *state
|
||||||
|
err := self.unsyncedKeys(unsynced, &stateCopy)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("syncer[%v]: unable to send unsynced keys: %v", err)
|
||||||
|
}
|
||||||
|
self.state = state
|
||||||
|
glog.V(logger.Debug).Infof("syncer[%v]: --> %v keys sent: (total: %v (%v), history: %v), sent sync state: %v", self.key.Log(), len(unsynced), keyCounts, keyCount, historyCnt, stateCopy)
|
||||||
|
unsynced = nil
|
||||||
|
keys = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// process item and add it to the batch
|
||||||
|
select {
|
||||||
|
case <-self.quit:
|
||||||
|
break LOOP
|
||||||
|
case req, more = <-keys:
|
||||||
|
if keys == history && !more {
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: syncing history segment complete", self.key.Log())
|
||||||
|
// history channel is closed, waiting for new state (called from sync())
|
||||||
|
syncStates = self.syncStates
|
||||||
|
state.Synced = true // this signals that the current segment is complete
|
||||||
|
select {
|
||||||
|
case state.synced <- false:
|
||||||
|
case <-self.quit:
|
||||||
|
break LOOP
|
||||||
|
}
|
||||||
|
justSynced = true
|
||||||
|
history = nil
|
||||||
|
}
|
||||||
|
case <-deliveryRequest:
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: peer ready to receive", self.key.Log())
|
||||||
|
|
||||||
|
// this 1 cap channel can wake up the loop
|
||||||
|
// signaling that peer is ready to receive unsynced Keys
|
||||||
|
// the channel is set to nil any further writes will be ignored
|
||||||
|
deliveryRequest = nil
|
||||||
|
|
||||||
|
case <-newUnsyncedKeys:
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: new unsynced keys available", self.key.Log())
|
||||||
|
// this 1 cap channel can wake up the loop
|
||||||
|
// signals that data is available to send if peer is ready to receive
|
||||||
|
newUnsyncedKeys = nil
|
||||||
|
keys = self.keys[High]
|
||||||
|
|
||||||
|
case state, more = <-syncStates:
|
||||||
|
// this resets the state
|
||||||
|
if !more {
|
||||||
|
state = self.state
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: (priority %v) syncing complete upto %v)", self.key.Log(), priority, state)
|
||||||
|
state.Synced = true
|
||||||
|
syncStates = nil
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: (priority %v) syncing history upto %v priority %v)", self.key.Log(), priority, state, histPrior)
|
||||||
|
state.Synced = false
|
||||||
|
history = self.syncHistory(state)
|
||||||
|
// only one history at a time, only allow another one once the
|
||||||
|
// history channel is closed
|
||||||
|
syncStates = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if req == nil {
|
||||||
|
continue LOOP
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: (priority %v) added to unsynced keys: %v", self.key.Log(), priority, req)
|
||||||
|
keyCounts[priority]++
|
||||||
|
keyCount++
|
||||||
|
if keys == history {
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: (priority %v) history item %v (synced = %v)", self.key.Log(), priority, req, state.Synced)
|
||||||
|
historyCnt++
|
||||||
|
}
|
||||||
|
if sreq, err := self.newSyncRequest(req, priority); err == nil {
|
||||||
|
// extract key from req
|
||||||
|
glog.V(logger.Detail).Infof("syncer(priority %v): request %v (synced = %v)", self.key.Log(), priority, req, state.Synced)
|
||||||
|
unsynced = append(unsynced, sreq)
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Warn).Infof("syncer(priority %v): error creating request for %v: %v)", self.key.Log(), priority, req, state.Synced, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// delivery loop
|
||||||
|
// takes into account priority, send store Requests with chunk (delivery)
|
||||||
|
// idle blocking if no new deliveries in any of the queues
|
||||||
|
func (self *syncer) syncDeliveries() {
|
||||||
|
var req *storeRequestMsgData
|
||||||
|
p := High
|
||||||
|
var deliveries chan *storeRequestMsgData
|
||||||
|
var msg *storeRequestMsgData
|
||||||
|
var err error
|
||||||
|
var c = [priorities]int{}
|
||||||
|
var n = [priorities]int{}
|
||||||
|
var total, success uint
|
||||||
|
|
||||||
|
for {
|
||||||
|
deliveries = self.deliveries[p]
|
||||||
|
select {
|
||||||
|
case req = <-deliveries:
|
||||||
|
n[p]++
|
||||||
|
c[p]++
|
||||||
|
default:
|
||||||
|
if p == Low {
|
||||||
|
// blocking, depletion on all channels, no preference for priority
|
||||||
|
select {
|
||||||
|
case req = <-self.deliveries[High]:
|
||||||
|
n[High]++
|
||||||
|
case req = <-self.deliveries[Medium]:
|
||||||
|
n[Medium]++
|
||||||
|
case req = <-self.deliveries[Low]:
|
||||||
|
n[Low]++
|
||||||
|
case <-self.quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p = High
|
||||||
|
} else {
|
||||||
|
p--
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
total++
|
||||||
|
msg, err = self.newStoreRequestMsgData(req)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("syncer[%v]: failed to create store request for %v: %v", self.key.Log(), req, err)
|
||||||
|
} else {
|
||||||
|
err = self.store(msg)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("syncer[%v]: failed to deliver %v: %v", self.key.Log(), req, err)
|
||||||
|
} else {
|
||||||
|
success++
|
||||||
|
glog.V(logger.Detail).Infof("syncer[%v]: %v successfully delivered", self.key.Log(), req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if total%self.SyncBatchSize == 0 {
|
||||||
|
glog.V(logger.Debug).Infof("syncer[%v]: deliver Total: %v, Success: %v, High: %v/%v, Medium: %v/%v, Low %v/%v", self.key.Log(), total, success, c[High], n[High], c[Medium], n[Medium], c[Low], n[Low])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
addRequest handles requests for delivery
|
||||||
|
it accepts 4 types:
|
||||||
|
|
||||||
|
* storeRequestMsgData: coming from netstore propagate response
|
||||||
|
* chunk: coming from forwarding (questionable: id?)
|
||||||
|
* key: from incoming syncRequest
|
||||||
|
* syncDbEntry: key,id encoded in db
|
||||||
|
|
||||||
|
If sync mode is on for the type of request, then
|
||||||
|
it sends the request to the keys queue of the correct priority
|
||||||
|
channel buffered with capacity (SyncBufferSize)
|
||||||
|
|
||||||
|
If sync mode is off then, requests are directly sent to deliveries
|
||||||
|
*/
|
||||||
|
func (self *syncer) addRequest(req interface{}, ty int) {
|
||||||
|
// retrieve priority for request type name int8
|
||||||
|
|
||||||
|
priority := self.SyncPriorities[ty]
|
||||||
|
// sync mode for this type ON
|
||||||
|
if self.syncF() || ty == DeliverReq {
|
||||||
|
if self.SyncModes[ty] {
|
||||||
|
self.addKey(req, priority, self.quit)
|
||||||
|
} else {
|
||||||
|
self.addDelivery(req, priority, self.quit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addKey queues sync request for sync confirmation with given priority
|
||||||
|
// ie the key will go out in an unsyncedKeys message
|
||||||
|
func (self *syncer) addKey(req interface{}, priority uint, quit chan bool) bool {
|
||||||
|
select {
|
||||||
|
case self.keys[priority] <- req:
|
||||||
|
// this wakes up the unsynced keys loop if idle
|
||||||
|
select {
|
||||||
|
case self.newUnsyncedKeys <- true:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
case <-quit:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addDelivery queues delivery request for with given priority
|
||||||
|
// ie the chunk will be delivered ASAP mod priority queueing handled by syncdb
|
||||||
|
// requests are persisted across sessions for correct sync
|
||||||
|
func (self *syncer) addDelivery(req interface{}, priority uint, quit chan bool) bool {
|
||||||
|
select {
|
||||||
|
case self.queues[priority].buffer <- req:
|
||||||
|
return true
|
||||||
|
case <-quit:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// doDelivery delivers the chunk for the request with given priority
|
||||||
|
// without queuing
|
||||||
|
func (self *syncer) doDelivery(req interface{}, priority uint, quit chan bool) bool {
|
||||||
|
msgdata, err := self.newStoreRequestMsgData(req)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("unable to deliver request %v: %v", msgdata, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case self.deliveries[priority] <- msgdata:
|
||||||
|
return true
|
||||||
|
case <-quit:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns the delivery function for given priority
|
||||||
|
// passed on to syncDb
|
||||||
|
func (self *syncer) deliver(priority uint) func(req interface{}, quit chan bool) bool {
|
||||||
|
return func(req interface{}, quit chan bool) bool {
|
||||||
|
return self.doDelivery(req, priority, quit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns the replay function passed on to syncDb
|
||||||
|
// depending on sync mode settings for BacklogReq,
|
||||||
|
// re play of request db backlog sends items via confirmation
|
||||||
|
// or directly delivers
|
||||||
|
func (self *syncer) replay() func(req interface{}, quit chan bool) bool {
|
||||||
|
sync := self.SyncModes[BacklogReq]
|
||||||
|
priority := self.SyncPriorities[BacklogReq]
|
||||||
|
// sync mode for this type ON
|
||||||
|
if sync {
|
||||||
|
return func(req interface{}, quit chan bool) bool {
|
||||||
|
return self.addKey(req, priority, quit)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return func(req interface{}, quit chan bool) bool {
|
||||||
|
return self.doDelivery(req, priority, quit)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// given a request, extends it to a full storeRequestMsgData
|
||||||
|
// polimorphic: see addRequest for the types accepted
|
||||||
|
func (self *syncer) newStoreRequestMsgData(req interface{}) (*storeRequestMsgData, error) {
|
||||||
|
|
||||||
|
key, id, chunk, sreq, err := parseRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sreq == nil {
|
||||||
|
if chunk == nil {
|
||||||
|
var err error
|
||||||
|
chunk, err = self.dbAccess.get(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sreq = &storeRequestMsgData{
|
||||||
|
Id: id,
|
||||||
|
Key: chunk.Key,
|
||||||
|
SData: chunk.SData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sreq, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse request types and extracts, key, id, chunk, request if available
|
||||||
|
// does not do chunk lookup !
|
||||||
|
func parseRequest(req interface{}) (storage.Key, uint64, *storage.Chunk, *storeRequestMsgData, error) {
|
||||||
|
var key storage.Key
|
||||||
|
var entry *syncDbEntry
|
||||||
|
var chunk *storage.Chunk
|
||||||
|
var id uint64
|
||||||
|
var ok bool
|
||||||
|
var sreq *storeRequestMsgData
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if key, ok = req.(storage.Key); ok {
|
||||||
|
id = generateId()
|
||||||
|
|
||||||
|
} else if entry, ok = req.(*syncDbEntry); ok {
|
||||||
|
id = binary.BigEndian.Uint64(entry.val[32:])
|
||||||
|
key = storage.Key(entry.val[:32])
|
||||||
|
|
||||||
|
} else if chunk, ok = req.(*storage.Chunk); ok {
|
||||||
|
key = chunk.Key
|
||||||
|
id = generateId()
|
||||||
|
|
||||||
|
} else if sreq, ok = req.(*storeRequestMsgData); ok {
|
||||||
|
key = sreq.Key
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("type not allowed: %v (%T)", req, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return key, id, chunk, sreq, err
|
||||||
|
}
|
283
swarm/services/swap/swap.go
Normal file
283
swarm/services/swap/swap.go
Normal file
@ -0,0 +1,283 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package swap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/contracts/chequebook"
|
||||||
|
"github.com/ethereum/go-ethereum/contracts/chequebook/contract"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/services/swap/swap"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SwAP Swarm Accounting Protocol with
|
||||||
|
// SWAP^2 Strategies of Withholding Automatic Payments
|
||||||
|
// SWAP^3 Accreditation: payment via credit SWAP
|
||||||
|
// using chequebook pkg for delayed payments
|
||||||
|
// default parameters
|
||||||
|
|
||||||
|
var (
|
||||||
|
autoCashInterval = 300 * time.Second // default interval for autocash
|
||||||
|
autoCashThreshold = big.NewInt(50000000000000) // threshold that triggers autocash (wei)
|
||||||
|
autoDepositInterval = 300 * time.Second // default interval for autocash
|
||||||
|
autoDepositThreshold = big.NewInt(50000000000000) // threshold that triggers autodeposit (wei)
|
||||||
|
autoDepositBuffer = big.NewInt(100000000000000) // buffer that is surplus for fork protection etc (wei)
|
||||||
|
buyAt = big.NewInt(20000000000) // maximum chunk price host is willing to pay (wei)
|
||||||
|
sellAt = big.NewInt(20000000000) // minimum chunk price host requires (wei)
|
||||||
|
payAt = 100 // threshold that triggers payment {request} (units)
|
||||||
|
dropAt = 10000 // threshold that triggers disconnect (units)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
chequebookDeployRetries = 5
|
||||||
|
chequebookDeployDelay = 1 * time.Second // delay between retries
|
||||||
|
)
|
||||||
|
|
||||||
|
type SwapParams struct {
|
||||||
|
*swap.Params
|
||||||
|
*PayProfile
|
||||||
|
}
|
||||||
|
|
||||||
|
type SwapProfile struct {
|
||||||
|
*swap.Profile
|
||||||
|
*PayProfile
|
||||||
|
}
|
||||||
|
|
||||||
|
type PayProfile struct {
|
||||||
|
PublicKey string // check against signature of promise
|
||||||
|
Contract common.Address // address of chequebook contract
|
||||||
|
Beneficiary common.Address // recipient address for swarm sales revenue
|
||||||
|
privateKey *ecdsa.PrivateKey
|
||||||
|
publicKey *ecdsa.PublicKey
|
||||||
|
owner common.Address
|
||||||
|
chbook *chequebook.Chequebook
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func DefaultSwapParams(contract common.Address, prvkey *ecdsa.PrivateKey) *SwapParams {
|
||||||
|
pubkey := &prvkey.PublicKey
|
||||||
|
return &SwapParams{
|
||||||
|
PayProfile: &PayProfile{
|
||||||
|
PublicKey: common.ToHex(crypto.FromECDSAPub(pubkey)),
|
||||||
|
Contract: contract,
|
||||||
|
Beneficiary: crypto.PubkeyToAddress(*pubkey),
|
||||||
|
privateKey: prvkey,
|
||||||
|
publicKey: pubkey,
|
||||||
|
owner: crypto.PubkeyToAddress(*pubkey),
|
||||||
|
},
|
||||||
|
Params: &swap.Params{
|
||||||
|
Profile: &swap.Profile{
|
||||||
|
BuyAt: buyAt,
|
||||||
|
SellAt: sellAt,
|
||||||
|
PayAt: uint(payAt),
|
||||||
|
DropAt: uint(dropAt),
|
||||||
|
},
|
||||||
|
Strategy: &swap.Strategy{
|
||||||
|
AutoCashInterval: autoCashInterval,
|
||||||
|
AutoCashThreshold: autoCashThreshold,
|
||||||
|
AutoDepositInterval: autoDepositInterval,
|
||||||
|
AutoDepositThreshold: autoDepositThreshold,
|
||||||
|
AutoDepositBuffer: autoDepositBuffer,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// swap constructor, parameters
|
||||||
|
// * global chequebook, assume deployed service and
|
||||||
|
// * the balance is at buffer.
|
||||||
|
// swap.Add(n) called in netstore
|
||||||
|
// n > 0 called when sending chunks = receiving retrieve requests
|
||||||
|
// OR sending cheques.
|
||||||
|
// n < 0 called when receiving chunks = receiving delivery responses
|
||||||
|
// OR receiving cheques.
|
||||||
|
|
||||||
|
func NewSwap(local *SwapParams, remote *SwapProfile, backend chequebook.Backend, proto swap.Protocol) (self *swap.Swap, err error) {
|
||||||
|
var (
|
||||||
|
ctx = context.TODO()
|
||||||
|
ok bool
|
||||||
|
in *chequebook.Inbox
|
||||||
|
out *chequebook.Outbox
|
||||||
|
)
|
||||||
|
|
||||||
|
// check if remote chequebook is valid
|
||||||
|
// insolvent chequebooks suicide so will signal as invalid
|
||||||
|
// TODO: monitoring a chequebooks events
|
||||||
|
ok, err = chequebook.ValidateCode(ctx, backend, remote.Contract)
|
||||||
|
if !ok {
|
||||||
|
glog.V(logger.Info).Infof("invalid contract %v for peer %v: %v)", remote.Contract.Hex()[:8], proto, err)
|
||||||
|
} else {
|
||||||
|
// remote contract valid, create inbox
|
||||||
|
in, err = chequebook.NewInbox(local.privateKey, remote.Contract, local.Beneficiary, crypto.ToECDSAPub(common.FromHex(remote.PublicKey)), backend)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("unable to set up inbox for chequebook contract %v for peer %v: %v)", remote.Contract.Hex()[:8], proto, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if local chequebook contract is valid
|
||||||
|
ok, err = chequebook.ValidateCode(ctx, backend, local.Contract)
|
||||||
|
if !ok {
|
||||||
|
glog.V(logger.Warn).Infof("unable to set up outbox for peer %v: chequebook contract (owner: %v): %v)", proto, local.owner.Hex(), err)
|
||||||
|
} else {
|
||||||
|
out = chequebook.NewOutbox(local.Chequebook(), remote.Beneficiary)
|
||||||
|
}
|
||||||
|
|
||||||
|
pm := swap.Payment{
|
||||||
|
In: in,
|
||||||
|
Out: out,
|
||||||
|
Buys: out != nil,
|
||||||
|
Sells: in != nil,
|
||||||
|
}
|
||||||
|
self, err = swap.New(local.Params, pm, proto)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// remote profile given (first) in handshake
|
||||||
|
self.SetRemote(remote.Profile)
|
||||||
|
var buy, sell string
|
||||||
|
if self.Buys {
|
||||||
|
buy = "purchase from peer enabled at " + remote.SellAt.String() + " wei/chunk"
|
||||||
|
} else {
|
||||||
|
buy = "purchase from peer disabled"
|
||||||
|
}
|
||||||
|
if self.Sells {
|
||||||
|
sell = "selling to peer enabled at " + local.SellAt.String() + " wei/chunk"
|
||||||
|
} else {
|
||||||
|
sell = "selling to peer disabled"
|
||||||
|
}
|
||||||
|
glog.V(logger.Warn).Infof("SWAP arrangement with <%v>: %v; %v)", proto, buy, sell)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *SwapParams) Chequebook() *chequebook.Chequebook {
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
return self.chbook
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *SwapParams) PrivateKey() *ecdsa.PrivateKey {
|
||||||
|
return self.privateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (self *SwapParams) PublicKey() *ecdsa.PublicKey {
|
||||||
|
// return self.publicKey
|
||||||
|
// }
|
||||||
|
|
||||||
|
func (self *SwapParams) SetKey(prvkey *ecdsa.PrivateKey) {
|
||||||
|
self.privateKey = prvkey
|
||||||
|
self.publicKey = &prvkey.PublicKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// setChequebook(path, backend) wraps the
|
||||||
|
// chequebook initialiser and sets up autoDeposit to cover spending.
|
||||||
|
func (self *SwapParams) SetChequebook(ctx context.Context, backend chequebook.Backend, path string) error {
|
||||||
|
self.lock.Lock()
|
||||||
|
contract := self.Contract
|
||||||
|
self.lock.Unlock()
|
||||||
|
|
||||||
|
valid, err := chequebook.ValidateCode(ctx, backend, contract)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
} else if valid {
|
||||||
|
return self.newChequebookFromContract(path, backend)
|
||||||
|
}
|
||||||
|
return self.deployChequebook(ctx, backend, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *SwapParams) deployChequebook(ctx context.Context, backend chequebook.Backend, path string) error {
|
||||||
|
opts := bind.NewKeyedTransactor(self.privateKey)
|
||||||
|
opts.Value = self.AutoDepositBuffer
|
||||||
|
opts.Context = ctx
|
||||||
|
|
||||||
|
glog.V(logger.Info).Infof("Deploying new chequebook (owner: %v)", opts.From.Hex())
|
||||||
|
contract, err := deployChequebookLoop(opts, backend)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Error).Infof("unable to deploy new chequebook: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
glog.V(logger.Info).Infof("new chequebook deployed at %v (owner: %v)", contract.Hex(), opts.From.Hex())
|
||||||
|
|
||||||
|
// need to save config at this point
|
||||||
|
self.lock.Lock()
|
||||||
|
self.Contract = contract
|
||||||
|
err = self.newChequebookFromContract(path, backend)
|
||||||
|
self.lock.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("error initialising cheque book (owner: %v): %v", opts.From.Hex(), err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// repeatedly tries to deploy a chequebook.
|
||||||
|
func deployChequebookLoop(opts *bind.TransactOpts, backend chequebook.Backend) (addr common.Address, err error) {
|
||||||
|
var tx *types.Transaction
|
||||||
|
for try := 0; try < chequebookDeployRetries; try++ {
|
||||||
|
if try > 0 {
|
||||||
|
time.Sleep(chequebookDeployDelay)
|
||||||
|
}
|
||||||
|
if _, tx, _, err = contract.DeployChequebook(opts, backend); err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("can't send chequebook deploy tx (try %d): %v", try, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if addr, err = bind.WaitDeployed(opts.Context, backend, tx); err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("chequebook deploy error (try %d): %v", try, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return addr, nil
|
||||||
|
}
|
||||||
|
return addr, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialise the chequebook from a persisted json file or create a new one
|
||||||
|
// caller holds the lock
|
||||||
|
func (self *SwapParams) newChequebookFromContract(path string, backend chequebook.Backend) error {
|
||||||
|
hexkey := common.Bytes2Hex(self.Contract.Bytes())
|
||||||
|
err := os.MkdirAll(filepath.Join(path, "chequebooks"), os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to create directory for chequebooks: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chbookpath := filepath.Join(path, "chequebooks", hexkey+".json")
|
||||||
|
self.chbook, err = chequebook.LoadChequebook(chbookpath, self.privateKey, backend, true)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
self.chbook, err = chequebook.NewChequebook(chbookpath, self.Contract, self.privateKey, backend)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("unable to initialise chequebook (owner: %v): %v", self.owner.Hex(), err)
|
||||||
|
return fmt.Errorf("unable to initialise chequebook (owner: %v): %v", self.owner.Hex(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.chbook.AutoDeposit(self.AutoDepositInterval, self.AutoDepositThreshold, self.AutoDepositBuffer)
|
||||||
|
glog.V(logger.Info).Infof("auto deposit ON for %v -> %v: interval = %v, threshold = %v, buffer = %v)", crypto.PubkeyToAddress(*(self.publicKey)).Hex()[:8], self.Contract.Hex()[:8], self.AutoDepositInterval, self.AutoDepositThreshold, self.AutoDepositBuffer)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
254
swarm/services/swap/swap/swap.go
Normal file
254
swarm/services/swap/swap/swap.go
Normal file
@ -0,0 +1,254 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package swap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SwAP Swarm Accounting Protocol with
|
||||||
|
// Swift Automatic Payments
|
||||||
|
// a peer to peer micropayment system
|
||||||
|
|
||||||
|
// public swap profile
|
||||||
|
// public parameters for SWAP, serializable config struct passed in handshake
|
||||||
|
type Profile struct {
|
||||||
|
BuyAt *big.Int // accepted max price for chunk
|
||||||
|
SellAt *big.Int // offered sale price for chunk
|
||||||
|
PayAt uint // threshold that triggers payment request
|
||||||
|
DropAt uint // threshold that triggers disconnect
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strategy encapsulates parameters relating to
|
||||||
|
// automatic deposit and automatic cashing
|
||||||
|
type Strategy struct {
|
||||||
|
AutoCashInterval time.Duration // default interval for autocash
|
||||||
|
AutoCashThreshold *big.Int // threshold that triggers autocash (wei)
|
||||||
|
AutoDepositInterval time.Duration // default interval for autocash
|
||||||
|
AutoDepositThreshold *big.Int // threshold that triggers autodeposit (wei)
|
||||||
|
AutoDepositBuffer *big.Int // buffer that is surplus for fork protection etc (wei)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Params extends the public profile with private parameters relating to
|
||||||
|
// automatic deposit and automatic cashing
|
||||||
|
type Params struct {
|
||||||
|
*Profile
|
||||||
|
*Strategy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Promise
|
||||||
|
// 3rd party Provable Promise of Payment
|
||||||
|
// issued by outPayment
|
||||||
|
// serialisable to send with Protocol
|
||||||
|
type Promise interface{}
|
||||||
|
|
||||||
|
// interface for the peer protocol for testing or external alternative payment
|
||||||
|
type Protocol interface {
|
||||||
|
Pay(int, Promise) // units, payment proof
|
||||||
|
Drop()
|
||||||
|
String() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// interface for the (delayed) ougoing payment system with autodeposit
|
||||||
|
type OutPayment interface {
|
||||||
|
Issue(amount *big.Int) (promise Promise, err error)
|
||||||
|
AutoDeposit(interval time.Duration, threshold, buffer *big.Int)
|
||||||
|
Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// interface for the (delayed) incoming payment system with autocash
|
||||||
|
type InPayment interface {
|
||||||
|
Receive(promise Promise) (*big.Int, error)
|
||||||
|
AutoCash(cashInterval time.Duration, maxUncashed *big.Int)
|
||||||
|
Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// swap is the swarm accounting protocol instance
|
||||||
|
// * pairwise accounting and payments
|
||||||
|
type Swap struct {
|
||||||
|
lock sync.Mutex // mutex for balance access
|
||||||
|
balance int // units of chunk/retrieval request
|
||||||
|
local *Params // local peer's swap parameters
|
||||||
|
remote *Profile // remote peer's swap profile
|
||||||
|
proto Protocol // peer communication protocol
|
||||||
|
Payment
|
||||||
|
}
|
||||||
|
|
||||||
|
type Payment struct {
|
||||||
|
Out OutPayment // outgoing payment handler
|
||||||
|
In InPayment // incoming payment handler
|
||||||
|
Buys, Sells bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// swap constructor
|
||||||
|
func New(local *Params, pm Payment, proto Protocol) (self *Swap, err error) {
|
||||||
|
|
||||||
|
self = &Swap{
|
||||||
|
local: local,
|
||||||
|
Payment: pm,
|
||||||
|
proto: proto,
|
||||||
|
}
|
||||||
|
|
||||||
|
self.SetParams(local)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// entry point for setting remote swap profile (e.g from handshake or other message)
|
||||||
|
func (self *Swap) SetRemote(remote *Profile) {
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
|
||||||
|
self.remote = remote
|
||||||
|
if self.Sells && (remote.BuyAt.Cmp(common.Big0) <= 0 || self.local.SellAt.Cmp(common.Big0) <= 0 || remote.BuyAt.Cmp(self.local.SellAt) < 0) {
|
||||||
|
self.Out.Stop()
|
||||||
|
self.Sells = false
|
||||||
|
}
|
||||||
|
if self.Buys && (remote.SellAt.Cmp(common.Big0) <= 0 || self.local.BuyAt.Cmp(common.Big0) <= 0 || self.local.BuyAt.Cmp(self.remote.SellAt) < 0) {
|
||||||
|
self.In.Stop()
|
||||||
|
self.Buys = false
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(logger.Debug).Infof("<%v> remote profile set: pay at: %v, drop at: %v, buy at: %v, sell at: %v", self.proto, remote.PayAt, remote.DropAt, remote.BuyAt, remote.SellAt)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// to set strategy dynamically
|
||||||
|
func (self *Swap) SetParams(local *Params) {
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
self.local = local
|
||||||
|
self.setParams(local)
|
||||||
|
}
|
||||||
|
|
||||||
|
// caller holds the lock
|
||||||
|
|
||||||
|
func (self *Swap) setParams(local *Params) {
|
||||||
|
|
||||||
|
if self.Sells {
|
||||||
|
self.In.AutoCash(local.AutoCashInterval, local.AutoCashThreshold)
|
||||||
|
glog.V(logger.Info).Infof("<%v> set autocash to every %v, max uncashed limit: %v", self.proto, local.AutoCashInterval, local.AutoCashThreshold)
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Info).Infof("<%v> autocash off (not selling)", self.proto)
|
||||||
|
}
|
||||||
|
if self.Buys {
|
||||||
|
self.Out.AutoDeposit(local.AutoDepositInterval, local.AutoDepositThreshold, local.AutoDepositBuffer)
|
||||||
|
glog.V(logger.Info).Infof("<%v> set autodeposit to every %v, pay at: %v, buffer: %v", self.proto, local.AutoDepositInterval, local.AutoDepositThreshold, local.AutoDepositBuffer)
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Info).Infof("<%v> autodeposit off (not buying)", self.proto)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add(n)
|
||||||
|
// n > 0 called when promised/provided n units of service
|
||||||
|
// n < 0 called when used/requested n units of service
|
||||||
|
func (self *Swap) Add(n int) error {
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
self.balance += n
|
||||||
|
if !self.Sells && self.balance > 0 {
|
||||||
|
glog.V(logger.Detail).Infof("<%v> remote peer cannot have debt (balance: %v)", self.proto, self.balance)
|
||||||
|
self.proto.Drop()
|
||||||
|
return fmt.Errorf("[SWAP] <%v> remote peer cannot have debt (balance: %v)", self.proto, self.balance)
|
||||||
|
}
|
||||||
|
if !self.Buys && self.balance < 0 {
|
||||||
|
glog.V(logger.Detail).Infof("<%v> we cannot have debt (balance: %v)", self.proto, self.balance)
|
||||||
|
return fmt.Errorf("[SWAP] <%v> we cannot have debt (balance: %v)", self.proto, self.balance)
|
||||||
|
}
|
||||||
|
if self.balance >= int(self.local.DropAt) {
|
||||||
|
glog.V(logger.Detail).Infof("<%v> remote peer has too much debt (balance: %v, disconnect threshold: %v)", self.proto, self.balance, self.local.DropAt)
|
||||||
|
self.proto.Drop()
|
||||||
|
return fmt.Errorf("[SWAP] <%v> remote peer has too much debt (balance: %v, disconnect threshold: %v)", self.proto, self.balance, self.local.DropAt)
|
||||||
|
} else if self.balance <= -int(self.remote.PayAt) {
|
||||||
|
self.send()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Swap) Balance() int {
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
return self.balance
|
||||||
|
}
|
||||||
|
|
||||||
|
// send(units) is called when payment is due
|
||||||
|
// In case of insolvency no promise is issued and sent, safe against fraud
|
||||||
|
// No return value: no error = payment is opportunistic = hang in till dropped
|
||||||
|
func (self *Swap) send() {
|
||||||
|
if self.local.BuyAt != nil && self.balance < 0 {
|
||||||
|
amount := big.NewInt(int64(-self.balance))
|
||||||
|
amount.Mul(amount, self.remote.SellAt)
|
||||||
|
promise, err := self.Out.Issue(amount)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Warn).Infof("<%v> cannot issue cheque (amount: %v, channel: %v): %v", self.proto, amount, self.Out, err)
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Warn).Infof("<%v> cheque issued (amount: %v, channel: %v)", self.proto, amount, self.Out)
|
||||||
|
self.proto.Pay(-self.balance, promise)
|
||||||
|
self.balance = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// receive(units, promise) is called by the protocol when a payment msg is received
|
||||||
|
// returns error if promise is invalid.
|
||||||
|
func (self *Swap) Receive(units int, promise Promise) error {
|
||||||
|
if units <= 0 {
|
||||||
|
return fmt.Errorf("invalid units: %v <= 0", units)
|
||||||
|
}
|
||||||
|
|
||||||
|
price := new(big.Int).SetInt64(int64(units))
|
||||||
|
price.Mul(price, self.local.SellAt)
|
||||||
|
|
||||||
|
amount, err := self.In.Receive(promise)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("invalid promise: %v", err)
|
||||||
|
} else if price.Cmp(amount) != 0 {
|
||||||
|
// verify amount = units * unit sale price
|
||||||
|
return fmt.Errorf("invalid amount: %v = %v * %v (units sent in msg * agreed sale unit price) != %v (signed in cheque)", price, units, self.local.SellAt, amount)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Detail).Infof("<%v> invalid promise (amount: %v, channel: %v): %v", self.proto, amount, self.In, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// credit remote peer with units
|
||||||
|
self.Add(-units)
|
||||||
|
glog.V(logger.Detail).Infof("<%v> received promise (amount: %v, channel: %v): %v", self.proto, amount, self.In, promise)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// stop() causes autocash loop to terminate.
|
||||||
|
// Called after protocol handle loop terminates.
|
||||||
|
func (self *Swap) Stop() {
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
self.lock.Lock()
|
||||||
|
if self.Buys {
|
||||||
|
self.Out.Stop()
|
||||||
|
}
|
||||||
|
if self.Sells {
|
||||||
|
self.In.Stop()
|
||||||
|
}
|
||||||
|
}
|
194
swarm/services/swap/swap/swap_test.go
Normal file
194
swarm/services/swap/swap/swap_test.go
Normal file
@ -0,0 +1,194 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package swap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testInPayment struct {
|
||||||
|
received []*testPromise
|
||||||
|
autocashInterval time.Duration
|
||||||
|
autocashLimit *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
type testPromise struct {
|
||||||
|
amount *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testInPayment) Receive(promise Promise) (*big.Int, error) {
|
||||||
|
p := promise.(*testPromise)
|
||||||
|
self.received = append(self.received, p)
|
||||||
|
return p.amount, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testInPayment) AutoCash(interval time.Duration, limit *big.Int) {
|
||||||
|
self.autocashInterval = interval
|
||||||
|
self.autocashLimit = limit
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testInPayment) Cash() (string, error) { return "", nil }
|
||||||
|
|
||||||
|
func (self *testInPayment) Stop() {}
|
||||||
|
|
||||||
|
type testOutPayment struct {
|
||||||
|
deposits []*big.Int
|
||||||
|
autodepositInterval time.Duration
|
||||||
|
autodepositThreshold *big.Int
|
||||||
|
autodepositBuffer *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testOutPayment) Issue(amount *big.Int) (promise Promise, err error) {
|
||||||
|
return &testPromise{amount}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testOutPayment) Deposit(amount *big.Int) (string, error) {
|
||||||
|
self.deposits = append(self.deposits, amount)
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testOutPayment) AutoDeposit(interval time.Duration, threshold, buffer *big.Int) {
|
||||||
|
self.autodepositInterval = interval
|
||||||
|
self.autodepositThreshold = threshold
|
||||||
|
self.autodepositBuffer = buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testOutPayment) Stop() {}
|
||||||
|
|
||||||
|
type testProtocol struct {
|
||||||
|
drop bool
|
||||||
|
amounts []int
|
||||||
|
promises []*testPromise
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testProtocol) Drop() {
|
||||||
|
self.drop = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testProtocol) String() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *testProtocol) Pay(amount int, promise Promise) {
|
||||||
|
p := promise.(*testPromise)
|
||||||
|
self.promises = append(self.promises, p)
|
||||||
|
self.amounts = append(self.amounts, amount)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSwap(t *testing.T) {
|
||||||
|
|
||||||
|
strategy := &Strategy{
|
||||||
|
AutoCashInterval: 1 * time.Second,
|
||||||
|
AutoCashThreshold: big.NewInt(20),
|
||||||
|
AutoDepositInterval: 1 * time.Second,
|
||||||
|
AutoDepositThreshold: big.NewInt(20),
|
||||||
|
AutoDepositBuffer: big.NewInt(40),
|
||||||
|
}
|
||||||
|
|
||||||
|
local := &Params{
|
||||||
|
Profile: &Profile{
|
||||||
|
PayAt: 5,
|
||||||
|
DropAt: 10,
|
||||||
|
BuyAt: common.Big3,
|
||||||
|
SellAt: common.Big2,
|
||||||
|
},
|
||||||
|
Strategy: strategy,
|
||||||
|
}
|
||||||
|
|
||||||
|
in := &testInPayment{}
|
||||||
|
out := &testOutPayment{}
|
||||||
|
proto := &testProtocol{}
|
||||||
|
|
||||||
|
swap, _ := New(local, Payment{In: in, Out: out, Buys: true, Sells: true}, proto)
|
||||||
|
|
||||||
|
if in.autocashInterval != strategy.AutoCashInterval {
|
||||||
|
t.Fatalf("autocash interval not properly set, expect %v, got %v", strategy.AutoCashInterval, in.autocashInterval)
|
||||||
|
}
|
||||||
|
if out.autodepositInterval != strategy.AutoDepositInterval {
|
||||||
|
t.Fatalf("autodeposit interval not properly set, expect %v, got %v", strategy.AutoDepositInterval, out.autodepositInterval)
|
||||||
|
}
|
||||||
|
|
||||||
|
remote := &Profile{
|
||||||
|
PayAt: 3,
|
||||||
|
DropAt: 10,
|
||||||
|
BuyAt: common.Big2,
|
||||||
|
SellAt: common.Big3,
|
||||||
|
}
|
||||||
|
swap.SetRemote(remote)
|
||||||
|
|
||||||
|
swap.Add(9)
|
||||||
|
if proto.drop {
|
||||||
|
t.Fatalf("not expected peer to be dropped")
|
||||||
|
}
|
||||||
|
swap.Add(1)
|
||||||
|
if !proto.drop {
|
||||||
|
t.Fatalf("expected peer to be dropped")
|
||||||
|
}
|
||||||
|
if !proto.drop {
|
||||||
|
t.Fatalf("expected peer to be dropped")
|
||||||
|
}
|
||||||
|
proto.drop = false
|
||||||
|
|
||||||
|
swap.Receive(10, &testPromise{big.NewInt(20)})
|
||||||
|
if swap.balance != 0 {
|
||||||
|
t.Fatalf("expected zero balance, got %v", swap.balance)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(proto.amounts) != 0 {
|
||||||
|
t.Fatalf("expected zero balance, got %v", swap.balance)
|
||||||
|
}
|
||||||
|
|
||||||
|
swap.Add(-2)
|
||||||
|
if len(proto.amounts) > 0 {
|
||||||
|
t.Fatalf("expected no payments yet, got %v", proto.amounts)
|
||||||
|
}
|
||||||
|
|
||||||
|
swap.Add(-1)
|
||||||
|
if len(proto.amounts) != 1 {
|
||||||
|
t.Fatalf("expected one payment, got %v", len(proto.amounts))
|
||||||
|
}
|
||||||
|
|
||||||
|
if proto.amounts[0] != 3 {
|
||||||
|
t.Fatalf("expected payment for %v units, got %v", proto.amounts[0], 3)
|
||||||
|
}
|
||||||
|
|
||||||
|
exp := new(big.Int).Mul(big.NewInt(int64(proto.amounts[0])), remote.SellAt)
|
||||||
|
if proto.promises[0].amount.Cmp(exp) != 0 {
|
||||||
|
t.Fatalf("expected payment amount %v, got %v", exp, proto.promises[0].amount)
|
||||||
|
}
|
||||||
|
|
||||||
|
swap.SetParams(&Params{
|
||||||
|
Profile: &Profile{
|
||||||
|
PayAt: 5,
|
||||||
|
DropAt: 10,
|
||||||
|
BuyAt: common.Big3,
|
||||||
|
SellAt: common.Big2,
|
||||||
|
},
|
||||||
|
Strategy: &Strategy{
|
||||||
|
AutoCashInterval: 2 * time.Second,
|
||||||
|
AutoCashThreshold: big.NewInt(40),
|
||||||
|
AutoDepositInterval: 2 * time.Second,
|
||||||
|
AutoDepositThreshold: big.NewInt(40),
|
||||||
|
AutoDepositBuffer: big.NewInt(60),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
509
swarm/storage/chunker.go
Normal file
509
swarm/storage/chunker.go
Normal file
@ -0,0 +1,509 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
// "github.com/ethereum/go-ethereum/logger"
|
||||||
|
// "github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
The distributed storage implemented in this package requires fix sized chunks of content.
|
||||||
|
|
||||||
|
Chunker is the interface to a component that is responsible for disassembling and assembling larger data.
|
||||||
|
|
||||||
|
TreeChunker implements a Chunker based on a tree structure defined as follows:
|
||||||
|
|
||||||
|
1 each node in the tree including the root and other branching nodes are stored as a chunk.
|
||||||
|
|
||||||
|
2 branching nodes encode data contents that includes the size of the dataslice covered by its entire subtree under the node as well as the hash keys of all its children :
|
||||||
|
data_{i} := size(subtree_{i}) || key_{j} || key_{j+1} .... || key_{j+n-1}
|
||||||
|
|
||||||
|
3 Leaf nodes encode an actual subslice of the input data.
|
||||||
|
|
||||||
|
4 if data size is not more than maximum chunksize, the data is stored in a single chunk
|
||||||
|
key = hash(int64(size) + data)
|
||||||
|
|
||||||
|
5 if data size is more than chunksize*branches^l, but no more than chunksize*
|
||||||
|
branches^(l+1), the data vector is split into slices of chunksize*
|
||||||
|
branches^l length (except the last one).
|
||||||
|
key = hash(int64(size) + key(slice0) + key(slice1) + ...)
|
||||||
|
|
||||||
|
The underlying hash function is configurable
|
||||||
|
*/
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultHash = "SHA3" // http://golang.org/pkg/hash/#Hash
|
||||||
|
// defaultHash = "SHA256" // http://golang.org/pkg/hash/#Hash
|
||||||
|
defaultBranches int64 = 128
|
||||||
|
// hashSize int64 = hasherfunc.New().Size() // hasher knows about its own length in bytes
|
||||||
|
// chunksize int64 = branches * hashSize // chunk is defined as this
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Tree chunker is a concrete implementation of data chunking.
|
||||||
|
This chunker works in a simple way, it builds a tree out of the document so that each node either represents a chunk of real data or a chunk of data representing an branching non-leaf node of the tree. In particular each such non-leaf chunk will represent is a concatenation of the hash of its respective children. This scheme simultaneously guarantees data integrity as well as self addressing. Abstract nodes are transparent since their represented size component is strictly greater than their maximum data size, since they encode a subtree.
|
||||||
|
|
||||||
|
If all is well it is possible to implement this by simply composing readers so that no extra allocation or buffering is necessary for the data splitting and joining. This means that in principle there can be direct IO between : memory, file system, network socket (bzz peers storage request is read from the socket). In practice there may be need for several stages of internal buffering.
|
||||||
|
The hashing itself does use extra copies and allocation though, since it does need it.
|
||||||
|
*/
|
||||||
|
|
||||||
|
type ChunkerParams struct {
|
||||||
|
Branches int64
|
||||||
|
Hash string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewChunkerParams() *ChunkerParams {
|
||||||
|
return &ChunkerParams{
|
||||||
|
Branches: defaultBranches,
|
||||||
|
Hash: defaultHash,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type TreeChunker struct {
|
||||||
|
branches int64
|
||||||
|
hashFunc Hasher
|
||||||
|
// calculated
|
||||||
|
hashSize int64 // self.hashFunc.New().Size()
|
||||||
|
chunkSize int64 // hashSize* branches
|
||||||
|
workerCount int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTreeChunker(params *ChunkerParams) (self *TreeChunker) {
|
||||||
|
self = &TreeChunker{}
|
||||||
|
self.hashFunc = MakeHashFunc(params.Hash)
|
||||||
|
self.branches = params.Branches
|
||||||
|
self.hashSize = int64(self.hashFunc().Size())
|
||||||
|
self.chunkSize = self.hashSize * self.branches
|
||||||
|
self.workerCount = 1
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (self *TreeChunker) KeySize() int64 {
|
||||||
|
// return self.hashSize
|
||||||
|
// }
|
||||||
|
|
||||||
|
// String() for pretty printing
|
||||||
|
func (self *Chunk) String() string {
|
||||||
|
return fmt.Sprintf("Key: %v TreeSize: %v Chunksize: %v", self.Key.Log(), self.Size, len(self.SData))
|
||||||
|
}
|
||||||
|
|
||||||
|
type hashJob struct {
|
||||||
|
key Key
|
||||||
|
chunk []byte
|
||||||
|
size int64
|
||||||
|
parentWg *sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, swg, wwg *sync.WaitGroup) (Key, error) {
|
||||||
|
|
||||||
|
if self.chunkSize <= 0 {
|
||||||
|
panic("chunker must be initialised")
|
||||||
|
}
|
||||||
|
|
||||||
|
jobC := make(chan *hashJob, 2*processors)
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
errC := make(chan error)
|
||||||
|
|
||||||
|
// wwg = workers waitgroup keeps track of hashworkers spawned by this split call
|
||||||
|
if wwg != nil {
|
||||||
|
wwg.Add(1)
|
||||||
|
}
|
||||||
|
go self.hashWorker(jobC, chunkC, errC, swg, wwg)
|
||||||
|
|
||||||
|
depth := 0
|
||||||
|
treeSize := self.chunkSize
|
||||||
|
|
||||||
|
// takes lowest depth such that chunksize*HashCount^(depth+1) > size
|
||||||
|
// power series, will find the order of magnitude of the data size in base hashCount or numbers of levels of branching in the resulting tree.
|
||||||
|
for ; treeSize < size; treeSize *= self.branches {
|
||||||
|
depth++
|
||||||
|
}
|
||||||
|
|
||||||
|
key := make([]byte, self.hashFunc().Size())
|
||||||
|
// glog.V(logger.Detail).Infof("split request received for data (%v bytes, depth: %v)", size, depth)
|
||||||
|
// this waitgroup member is released after the root hash is calculated
|
||||||
|
wg.Add(1)
|
||||||
|
//launch actual recursive function passing the waitgroups
|
||||||
|
go self.split(depth, treeSize/self.branches, key, data, size, jobC, chunkC, errC, wg, swg, wwg)
|
||||||
|
|
||||||
|
// closes internal error channel if all subprocesses in the workgroup finished
|
||||||
|
go func() {
|
||||||
|
// waiting for all threads to finish
|
||||||
|
wg.Wait()
|
||||||
|
// if storage waitgroup is non-nil, we wait for storage to finish too
|
||||||
|
if swg != nil {
|
||||||
|
// glog.V(logger.Detail).Infof("Waiting for storage to finish")
|
||||||
|
swg.Wait()
|
||||||
|
}
|
||||||
|
close(errC)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-errC:
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//
|
||||||
|
}
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *TreeChunker) split(depth int, treeSize int64, key Key, data io.Reader, size int64, jobC chan *hashJob, chunkC chan *Chunk, errC chan error, parentWg, swg, wwg *sync.WaitGroup) {
|
||||||
|
|
||||||
|
for depth > 0 && size < treeSize {
|
||||||
|
treeSize /= self.branches
|
||||||
|
depth--
|
||||||
|
}
|
||||||
|
|
||||||
|
if depth == 0 {
|
||||||
|
// leaf nodes -> content chunks
|
||||||
|
chunkData := make([]byte, size+8)
|
||||||
|
binary.LittleEndian.PutUint64(chunkData[0:8], uint64(size))
|
||||||
|
data.Read(chunkData[8:])
|
||||||
|
select {
|
||||||
|
case jobC <- &hashJob{key, chunkData, size, parentWg}:
|
||||||
|
case <-errC:
|
||||||
|
}
|
||||||
|
// glog.V(logger.Detail).Infof("read %v", size)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// intermediate chunk containing child nodes hashes
|
||||||
|
branchCnt := int64((size + treeSize - 1) / treeSize)
|
||||||
|
// glog.V(logger.Detail).Infof("intermediate node: setting branches: %v, depth: %v, max subtree size: %v, data size: %v", branches, depth, treeSize, size)
|
||||||
|
|
||||||
|
var chunk []byte = make([]byte, branchCnt*self.hashSize+8)
|
||||||
|
var pos, i int64
|
||||||
|
|
||||||
|
binary.LittleEndian.PutUint64(chunk[0:8], uint64(size))
|
||||||
|
|
||||||
|
childrenWg := &sync.WaitGroup{}
|
||||||
|
var secSize int64
|
||||||
|
for i < branchCnt {
|
||||||
|
// the last item can have shorter data
|
||||||
|
if size-pos < treeSize {
|
||||||
|
secSize = size - pos
|
||||||
|
} else {
|
||||||
|
secSize = treeSize
|
||||||
|
}
|
||||||
|
// the hash of that data
|
||||||
|
subTreeKey := chunk[8+i*self.hashSize : 8+(i+1)*self.hashSize]
|
||||||
|
|
||||||
|
childrenWg.Add(1)
|
||||||
|
self.split(depth-1, treeSize/self.branches, subTreeKey, data, secSize, jobC, chunkC, errC, childrenWg, swg, wwg)
|
||||||
|
|
||||||
|
i++
|
||||||
|
pos += treeSize
|
||||||
|
}
|
||||||
|
// wait for all the children to complete calculating their hashes and copying them onto sections of the chunk
|
||||||
|
// parentWg.Add(1)
|
||||||
|
// go func() {
|
||||||
|
childrenWg.Wait()
|
||||||
|
if len(jobC) > self.workerCount && self.workerCount < processors {
|
||||||
|
if wwg != nil {
|
||||||
|
wwg.Add(1)
|
||||||
|
}
|
||||||
|
self.workerCount++
|
||||||
|
go self.hashWorker(jobC, chunkC, errC, swg, wwg)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case jobC <- &hashJob{key, chunk, size, parentWg}:
|
||||||
|
case <-errC:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *TreeChunker) hashWorker(jobC chan *hashJob, chunkC chan *Chunk, errC chan error, swg, wwg *sync.WaitGroup) {
|
||||||
|
hasher := self.hashFunc()
|
||||||
|
if wwg != nil {
|
||||||
|
defer wwg.Done()
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
|
||||||
|
case job, ok := <-jobC:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// now we got the hashes in the chunk, then hash the chunks
|
||||||
|
hasher.Reset()
|
||||||
|
self.hashChunk(hasher, job, chunkC, swg)
|
||||||
|
// glog.V(logger.Detail).Infof("hash chunk (%v)", job.size)
|
||||||
|
case <-errC:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The treeChunkers own Hash hashes together
|
||||||
|
// - the size (of the subtree encoded in the Chunk)
|
||||||
|
// - the Chunk, ie. the contents read from the input reader
|
||||||
|
func (self *TreeChunker) hashChunk(hasher hash.Hash, job *hashJob, chunkC chan *Chunk, swg *sync.WaitGroup) {
|
||||||
|
hasher.Write(job.chunk)
|
||||||
|
h := hasher.Sum(nil)
|
||||||
|
newChunk := &Chunk{
|
||||||
|
Key: h,
|
||||||
|
SData: job.chunk,
|
||||||
|
Size: job.size,
|
||||||
|
wg: swg,
|
||||||
|
}
|
||||||
|
|
||||||
|
// report hash of this chunk one level up (keys corresponds to the proper subslice of the parent chunk)
|
||||||
|
copy(job.key, h)
|
||||||
|
// send off new chunk to storage
|
||||||
|
if chunkC != nil {
|
||||||
|
if swg != nil {
|
||||||
|
swg.Add(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
job.parentWg.Done()
|
||||||
|
if chunkC != nil {
|
||||||
|
chunkC <- newChunk
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LazyChunkReader implements LazySectionReader
|
||||||
|
type LazyChunkReader struct {
|
||||||
|
key Key // root key
|
||||||
|
chunkC chan *Chunk // chunk channel to send retrieve requests on
|
||||||
|
chunk *Chunk // size of the entire subtree
|
||||||
|
off int64 // offset
|
||||||
|
chunkSize int64 // inherit from chunker
|
||||||
|
branches int64 // inherit from chunker
|
||||||
|
hashSize int64 // inherit from chunker
|
||||||
|
}
|
||||||
|
|
||||||
|
// implements the Joiner interface
|
||||||
|
func (self *TreeChunker) Join(key Key, chunkC chan *Chunk) LazySectionReader {
|
||||||
|
|
||||||
|
return &LazyChunkReader{
|
||||||
|
key: key,
|
||||||
|
chunkC: chunkC,
|
||||||
|
chunkSize: self.chunkSize,
|
||||||
|
branches: self.branches,
|
||||||
|
hashSize: self.hashSize,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size is meant to be called on the LazySectionReader
|
||||||
|
func (self *LazyChunkReader) Size(quitC chan bool) (n int64, err error) {
|
||||||
|
if self.chunk != nil {
|
||||||
|
return self.chunk.Size, nil
|
||||||
|
}
|
||||||
|
chunk := retrieve(self.key, self.chunkC, quitC)
|
||||||
|
if chunk == nil {
|
||||||
|
select {
|
||||||
|
case <-quitC:
|
||||||
|
return 0, errors.New("aborted")
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("root chunk not found for %v", self.key.Hex())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.chunk = chunk
|
||||||
|
return chunk.Size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// read at can be called numerous times
|
||||||
|
// concurrent reads are allowed
|
||||||
|
// Size() needs to be called synchronously on the LazyChunkReader first
|
||||||
|
func (self *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) {
|
||||||
|
// this is correct, a swarm doc cannot be zero length, so no EOF is expected
|
||||||
|
if len(b) == 0 {
|
||||||
|
// glog.V(logger.Detail).Infof("Size query for %v", chunk.Key.Log())
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
quitC := make(chan bool)
|
||||||
|
size, err := self.Size(quitC)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
// glog.V(logger.Detail).Infof("readAt: len(b): %v, off: %v, size: %v ", len(b), off, size)
|
||||||
|
|
||||||
|
errC := make(chan error)
|
||||||
|
// glog.V(logger.Detail).Infof("readAt: reading %v into %d bytes at offset %d.", self.chunk.Key.Log(), len(b), off)
|
||||||
|
|
||||||
|
// }
|
||||||
|
// glog.V(logger.Detail).Infof("-> want: %v, off: %v size: %v ", want, off, self.size)
|
||||||
|
var treeSize int64
|
||||||
|
var depth int
|
||||||
|
// calculate depth and max treeSize
|
||||||
|
treeSize = self.chunkSize
|
||||||
|
for ; treeSize < size; treeSize *= self.branches {
|
||||||
|
depth++
|
||||||
|
}
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
go self.join(b, off, off+int64(len(b)), depth, treeSize/self.branches, self.chunk, &wg, errC, quitC)
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(errC)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = <-errC
|
||||||
|
if err != nil {
|
||||||
|
close(quitC)
|
||||||
|
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
// glog.V(logger.Detail).Infof("ReadAt received %v", err)
|
||||||
|
// glog.V(logger.Detail).Infof("end: len(b): %v, off: %v, size: %v ", len(b), off, size)
|
||||||
|
if off+int64(len(b)) >= size {
|
||||||
|
// glog.V(logger.Detail).Infof(" len(b): %v EOF", len(b))
|
||||||
|
return len(b), io.EOF
|
||||||
|
}
|
||||||
|
// glog.V(logger.Detail).Infof("ReadAt returning at %d: %v", read, err)
|
||||||
|
return len(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeSize int64, chunk *Chunk, parentWg *sync.WaitGroup, errC chan error, quitC chan bool) {
|
||||||
|
defer parentWg.Done()
|
||||||
|
// return NewDPA(&LocalStore{})
|
||||||
|
// glog.V(logger.Detail).Infof("inh len(b): %v, off: %v eoff: %v ", len(b), off, eoff)
|
||||||
|
|
||||||
|
// glog.V(logger.Detail).Infof("depth: %v, loff: %v, eoff: %v, chunk.Size: %v, treeSize: %v", depth, off, eoff, chunk.Size, treeSize)
|
||||||
|
|
||||||
|
// chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
|
||||||
|
|
||||||
|
// find appropriate block level
|
||||||
|
for chunk.Size < treeSize && depth > 0 {
|
||||||
|
treeSize /= self.branches
|
||||||
|
depth--
|
||||||
|
}
|
||||||
|
|
||||||
|
// leaf chunk found
|
||||||
|
if depth == 0 {
|
||||||
|
// glog.V(logger.Detail).Infof("depth: %v, len(b): %v, off: %v, eoff: %v, chunk.Size: %v %v, treeSize: %v", depth, len(b), off, eoff, chunk.Size, len(chunk.SData), treeSize)
|
||||||
|
extra := 8 + eoff - int64(len(chunk.SData))
|
||||||
|
if extra > 0 {
|
||||||
|
eoff -= extra
|
||||||
|
}
|
||||||
|
copy(b, chunk.SData[8+off:8+eoff])
|
||||||
|
return // simply give back the chunks reader for content chunks
|
||||||
|
}
|
||||||
|
|
||||||
|
// subtree
|
||||||
|
start := off / treeSize
|
||||||
|
end := (eoff + treeSize - 1) / treeSize
|
||||||
|
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
defer wg.Wait()
|
||||||
|
// glog.V(logger.Detail).Infof("start %v,end %v", start, end)
|
||||||
|
|
||||||
|
for i := start; i < end; i++ {
|
||||||
|
soff := i * treeSize
|
||||||
|
roff := soff
|
||||||
|
seoff := soff + treeSize
|
||||||
|
|
||||||
|
if soff < off {
|
||||||
|
soff = off
|
||||||
|
}
|
||||||
|
if seoff > eoff {
|
||||||
|
seoff = eoff
|
||||||
|
}
|
||||||
|
if depth > 1 {
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
wg.Add(1)
|
||||||
|
go func(j int64) {
|
||||||
|
childKey := chunk.SData[8+j*self.hashSize : 8+(j+1)*self.hashSize]
|
||||||
|
// glog.V(logger.Detail).Infof("subtree ind.ex: %v -> %v", j, childKey.Log())
|
||||||
|
chunk := retrieve(childKey, self.chunkC, quitC)
|
||||||
|
if chunk == nil {
|
||||||
|
select {
|
||||||
|
case errC <- fmt.Errorf("chunk %v-%v not found", off, off+treeSize):
|
||||||
|
case <-quitC:
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if soff < off {
|
||||||
|
soff = off
|
||||||
|
}
|
||||||
|
self.join(b[soff-off:seoff-off], soff-roff, seoff-roff, depth-1, treeSize/self.branches, chunk, wg, errC, quitC)
|
||||||
|
}(i)
|
||||||
|
} //for
|
||||||
|
}
|
||||||
|
|
||||||
|
// the helper method submits chunks for a key to a oueue (DPA) and
|
||||||
|
// block until they time out or arrive
|
||||||
|
// abort if quitC is readable
|
||||||
|
func retrieve(key Key, chunkC chan *Chunk, quitC chan bool) *Chunk {
|
||||||
|
chunk := &Chunk{
|
||||||
|
Key: key,
|
||||||
|
C: make(chan bool), // close channel to signal data delivery
|
||||||
|
}
|
||||||
|
// glog.V(logger.Detail).Infof("chunk data sent for %v (key interval in chunk %v-%v)", ch.Key.Log(), j*self.chunker.hashSize, (j+1)*self.chunker.hashSize)
|
||||||
|
// submit chunk for retrieval
|
||||||
|
select {
|
||||||
|
case chunkC <- chunk: // submit retrieval request, someone should be listening on the other side (or we will time out globally)
|
||||||
|
case <-quitC:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// waiting for the chunk retrieval
|
||||||
|
select { // chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
|
||||||
|
|
||||||
|
case <-quitC:
|
||||||
|
// this is how we control process leakage (quitC is closed once join is finished (after timeout))
|
||||||
|
return nil
|
||||||
|
case <-chunk.C: // bells are ringing, data have been delivered
|
||||||
|
// glog.V(logger.Detail).Infof("chunk data received")
|
||||||
|
}
|
||||||
|
if len(chunk.SData) == 0 {
|
||||||
|
return nil // chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
|
||||||
|
|
||||||
|
}
|
||||||
|
return chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read keeps a cursor so cannot be called simulateously, see ReadAt
|
||||||
|
func (self *LazyChunkReader) Read(b []byte) (read int, err error) {
|
||||||
|
read, err = self.ReadAt(b, self.off)
|
||||||
|
// glog.V(logger.Detail).Infof("read: %v, off: %v, error: %v", read, self.off, err)
|
||||||
|
|
||||||
|
self.off += int64(read)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// completely analogous to standard SectionReader implementation
|
||||||
|
var errWhence = errors.New("Seek: invalid whence")
|
||||||
|
var errOffset = errors.New("Seek: invalid offset")
|
||||||
|
|
||||||
|
func (s *LazyChunkReader) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
switch whence {
|
||||||
|
default:
|
||||||
|
return 0, errWhence
|
||||||
|
case 0:
|
||||||
|
offset += 0
|
||||||
|
case 1:
|
||||||
|
offset += s.off
|
||||||
|
case 2:
|
||||||
|
if s.chunk == nil {
|
||||||
|
return 0, fmt.Errorf("seek from the end requires rootchunk for size. call Size first")
|
||||||
|
}
|
||||||
|
offset += s.chunk.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset < 0 {
|
||||||
|
return 0, errOffset
|
||||||
|
}
|
||||||
|
s.off = offset
|
||||||
|
return offset, nil
|
||||||
|
}
|
303
swarm/storage/chunker_test.go
Normal file
303
swarm/storage/chunker_test.go
Normal file
@ -0,0 +1,303 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Tests TreeChunker by splitting and joining a random byte slice
|
||||||
|
*/
|
||||||
|
|
||||||
|
type test interface {
|
||||||
|
Fatalf(string, ...interface{})
|
||||||
|
Logf(string, ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type chunkerTester struct {
|
||||||
|
inputs map[uint64][]byte
|
||||||
|
chunks map[string]*Chunk
|
||||||
|
t test
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *chunkerTester) checkChunks(t *testing.T, want int) {
|
||||||
|
l := len(self.chunks)
|
||||||
|
if l != want {
|
||||||
|
t.Errorf("expected %v chunks, got %v", want, l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *chunkerTester) Split(chunker Splitter, data io.Reader, size int64, chunkC chan *Chunk, swg *sync.WaitGroup) (key Key) {
|
||||||
|
// reset
|
||||||
|
self.chunks = make(map[string]*Chunk)
|
||||||
|
|
||||||
|
if self.inputs == nil {
|
||||||
|
self.inputs = make(map[uint64][]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
quitC := make(chan bool)
|
||||||
|
timeout := time.After(600 * time.Second)
|
||||||
|
if chunkC != nil {
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-timeout:
|
||||||
|
self.t.Fatalf("Join timeout error")
|
||||||
|
|
||||||
|
case chunk, ok := <-chunkC:
|
||||||
|
if !ok {
|
||||||
|
// glog.V(logger.Info).Infof("chunkC closed quitting")
|
||||||
|
close(quitC)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// glog.V(logger.Info).Infof("chunk %v received", len(self.chunks))
|
||||||
|
// self.chunks = append(self.chunks, chunk)
|
||||||
|
self.chunks[chunk.Key.String()] = chunk
|
||||||
|
if chunk.wg != nil {
|
||||||
|
chunk.wg.Done()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
key, err := chunker.Split(data, size, chunkC, swg, nil)
|
||||||
|
if err != nil {
|
||||||
|
self.t.Fatalf("Split error: %v", err)
|
||||||
|
}
|
||||||
|
if chunkC != nil {
|
||||||
|
if swg != nil {
|
||||||
|
// glog.V(logger.Info).Infof("Waiting for storage to finish")
|
||||||
|
swg.Wait()
|
||||||
|
// glog.V(logger.Info).Infof("Storage finished")
|
||||||
|
}
|
||||||
|
close(chunkC)
|
||||||
|
}
|
||||||
|
if chunkC != nil {
|
||||||
|
// glog.V(logger.Info).Infof("waiting for splitter finished")
|
||||||
|
<-quitC
|
||||||
|
// glog.V(logger.Info).Infof("Splitter finished")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *chunkerTester) Join(chunker Chunker, key Key, c int, chunkC chan *Chunk, quitC chan bool) LazySectionReader {
|
||||||
|
// reset but not the chunks
|
||||||
|
|
||||||
|
// glog.V(logger.Info).Infof("Splitter finished")
|
||||||
|
reader := chunker.Join(key, chunkC)
|
||||||
|
|
||||||
|
timeout := time.After(600 * time.Second)
|
||||||
|
// glog.V(logger.Info).Infof("Splitter finished")
|
||||||
|
i := 0
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-timeout:
|
||||||
|
self.t.Fatalf("Join timeout error")
|
||||||
|
|
||||||
|
case chunk, ok := <-chunkC:
|
||||||
|
if !ok {
|
||||||
|
close(quitC)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// glog.V(logger.Info).Infof("chunk %v: %v", i, chunk.Key.String())
|
||||||
|
// this just mocks the behaviour of a chunk store retrieval
|
||||||
|
stored, success := self.chunks[chunk.Key.String()]
|
||||||
|
// glog.V(logger.Info).Infof("chunk %v, success: %v", chunk.Key.String(), success)
|
||||||
|
if !success {
|
||||||
|
self.t.Fatalf("not found")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// glog.V(logger.Info).Infof("chunk %v: %v", i, chunk.Key.String())
|
||||||
|
chunk.SData = stored.SData
|
||||||
|
chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
|
||||||
|
close(chunk.C)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRandomData(splitter Splitter, n int, tester *chunkerTester) {
|
||||||
|
if tester.inputs == nil {
|
||||||
|
tester.inputs = make(map[uint64][]byte)
|
||||||
|
}
|
||||||
|
input, found := tester.inputs[uint64(n)]
|
||||||
|
var data io.Reader
|
||||||
|
if !found {
|
||||||
|
data, input = testDataReaderAndSlice(n)
|
||||||
|
tester.inputs[uint64(n)] = input
|
||||||
|
} else {
|
||||||
|
data = limitReader(bytes.NewReader(input), n)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkC := make(chan *Chunk, 1000)
|
||||||
|
swg := &sync.WaitGroup{}
|
||||||
|
|
||||||
|
key := tester.Split(splitter, data, int64(n), chunkC, swg)
|
||||||
|
tester.t.Logf(" Key = %v\n", key)
|
||||||
|
|
||||||
|
chunkC = make(chan *Chunk, 1000)
|
||||||
|
quitC := make(chan bool)
|
||||||
|
|
||||||
|
chunker := NewTreeChunker(NewChunkerParams())
|
||||||
|
reader := tester.Join(chunker, key, 0, chunkC, quitC)
|
||||||
|
output := make([]byte, n)
|
||||||
|
// glog.V(logger.Info).Infof(" Key = %v\n", key)
|
||||||
|
r, err := reader.Read(output)
|
||||||
|
// glog.V(logger.Info).Infof(" read = %v %v\n", r, err)
|
||||||
|
if r != n || err != io.EOF {
|
||||||
|
tester.t.Fatalf("read error read: %v n = %v err = %v\n", r, n, err)
|
||||||
|
}
|
||||||
|
if input != nil {
|
||||||
|
if !bytes.Equal(output, input) {
|
||||||
|
tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", input, output)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(chunkC)
|
||||||
|
<-quitC
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRandomData(t *testing.T) {
|
||||||
|
// sizes := []int{123456}
|
||||||
|
sizes := []int{1, 60, 83, 179, 253, 1024, 4095, 4096, 4097, 123456}
|
||||||
|
tester := &chunkerTester{t: t}
|
||||||
|
chunker := NewTreeChunker(NewChunkerParams())
|
||||||
|
for _, s := range sizes {
|
||||||
|
testRandomData(chunker, s, tester)
|
||||||
|
}
|
||||||
|
pyramid := NewPyramidChunker(NewChunkerParams())
|
||||||
|
for _, s := range sizes {
|
||||||
|
testRandomData(pyramid, s, tester)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readAll(reader LazySectionReader, result []byte) {
|
||||||
|
size := int64(len(result))
|
||||||
|
|
||||||
|
var end int64
|
||||||
|
for pos := int64(0); pos < size; pos += 1000 {
|
||||||
|
if pos+1000 > size {
|
||||||
|
end = size
|
||||||
|
} else {
|
||||||
|
end = pos + 1000
|
||||||
|
}
|
||||||
|
reader.ReadAt(result[pos:end], pos)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchReadAll(reader LazySectionReader) {
|
||||||
|
size, _ := reader.Size(nil)
|
||||||
|
output := make([]byte, 1000)
|
||||||
|
for pos := int64(0); pos < size; pos += 1000 {
|
||||||
|
reader.ReadAt(output, pos)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkJoin(n int, t *testing.B) {
|
||||||
|
t.ReportAllocs()
|
||||||
|
for i := 0; i < t.N; i++ {
|
||||||
|
chunker := NewTreeChunker(NewChunkerParams())
|
||||||
|
tester := &chunkerTester{t: t}
|
||||||
|
data := testDataReader(n)
|
||||||
|
|
||||||
|
chunkC := make(chan *Chunk, 1000)
|
||||||
|
swg := &sync.WaitGroup{}
|
||||||
|
|
||||||
|
key := tester.Split(chunker, data, int64(n), chunkC, swg)
|
||||||
|
// t.StartTimer()
|
||||||
|
chunkC = make(chan *Chunk, 1000)
|
||||||
|
quitC := make(chan bool)
|
||||||
|
reader := tester.Join(chunker, key, i, chunkC, quitC)
|
||||||
|
benchReadAll(reader)
|
||||||
|
close(chunkC)
|
||||||
|
<-quitC
|
||||||
|
// t.StopTimer()
|
||||||
|
}
|
||||||
|
stats := new(runtime.MemStats)
|
||||||
|
runtime.ReadMemStats(stats)
|
||||||
|
fmt.Println(stats.Sys)
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkSplitTree(n int, t *testing.B) {
|
||||||
|
t.ReportAllocs()
|
||||||
|
for i := 0; i < t.N; i++ {
|
||||||
|
chunker := NewTreeChunker(NewChunkerParams())
|
||||||
|
tester := &chunkerTester{t: t}
|
||||||
|
data := testDataReader(n)
|
||||||
|
// glog.V(logger.Info).Infof("splitting data of length %v", n)
|
||||||
|
tester.Split(chunker, data, int64(n), nil, nil)
|
||||||
|
}
|
||||||
|
stats := new(runtime.MemStats)
|
||||||
|
runtime.ReadMemStats(stats)
|
||||||
|
fmt.Println(stats.Sys)
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkSplitPyramid(n int, t *testing.B) {
|
||||||
|
t.ReportAllocs()
|
||||||
|
for i := 0; i < t.N; i++ {
|
||||||
|
splitter := NewPyramidChunker(NewChunkerParams())
|
||||||
|
tester := &chunkerTester{t: t}
|
||||||
|
data := testDataReader(n)
|
||||||
|
// glog.V(logger.Info).Infof("splitting data of length %v", n)
|
||||||
|
tester.Split(splitter, data, int64(n), nil, nil)
|
||||||
|
}
|
||||||
|
stats := new(runtime.MemStats)
|
||||||
|
runtime.ReadMemStats(stats)
|
||||||
|
fmt.Println(stats.Sys)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkJoin_2(t *testing.B) { benchmarkJoin(100, t) }
|
||||||
|
func BenchmarkJoin_3(t *testing.B) { benchmarkJoin(1000, t) }
|
||||||
|
func BenchmarkJoin_4(t *testing.B) { benchmarkJoin(10000, t) }
|
||||||
|
func BenchmarkJoin_5(t *testing.B) { benchmarkJoin(100000, t) }
|
||||||
|
func BenchmarkJoin_6(t *testing.B) { benchmarkJoin(1000000, t) }
|
||||||
|
func BenchmarkJoin_7(t *testing.B) { benchmarkJoin(10000000, t) }
|
||||||
|
func BenchmarkJoin_8(t *testing.B) { benchmarkJoin(100000000, t) }
|
||||||
|
|
||||||
|
func BenchmarkSplitTree_2(t *testing.B) { benchmarkSplitTree(100, t) }
|
||||||
|
func BenchmarkSplitTree_2h(t *testing.B) { benchmarkSplitTree(500, t) }
|
||||||
|
func BenchmarkSplitTree_3(t *testing.B) { benchmarkSplitTree(1000, t) }
|
||||||
|
func BenchmarkSplitTree_3h(t *testing.B) { benchmarkSplitTree(5000, t) }
|
||||||
|
func BenchmarkSplitTree_4(t *testing.B) { benchmarkSplitTree(10000, t) }
|
||||||
|
func BenchmarkSplitTree_4h(t *testing.B) { benchmarkSplitTree(50000, t) }
|
||||||
|
func BenchmarkSplitTree_5(t *testing.B) { benchmarkSplitTree(100000, t) }
|
||||||
|
func BenchmarkSplitTree_6(t *testing.B) { benchmarkSplitTree(1000000, t) }
|
||||||
|
func BenchmarkSplitTree_7(t *testing.B) { benchmarkSplitTree(10000000, t) }
|
||||||
|
func BenchmarkSplitTree_8(t *testing.B) { benchmarkSplitTree(100000000, t) }
|
||||||
|
|
||||||
|
func BenchmarkSplitPyramid_2(t *testing.B) { benchmarkSplitPyramid(100, t) }
|
||||||
|
func BenchmarkSplitPyramid_2h(t *testing.B) { benchmarkSplitPyramid(500, t) }
|
||||||
|
func BenchmarkSplitPyramid_3(t *testing.B) { benchmarkSplitPyramid(1000, t) }
|
||||||
|
func BenchmarkSplitPyramid_3h(t *testing.B) { benchmarkSplitPyramid(5000, t) }
|
||||||
|
func BenchmarkSplitPyramid_4(t *testing.B) { benchmarkSplitPyramid(10000, t) }
|
||||||
|
func BenchmarkSplitPyramid_4h(t *testing.B) { benchmarkSplitPyramid(50000, t) }
|
||||||
|
func BenchmarkSplitPyramid_5(t *testing.B) { benchmarkSplitPyramid(100000, t) }
|
||||||
|
func BenchmarkSplitPyramid_6(t *testing.B) { benchmarkSplitPyramid(1000000, t) }
|
||||||
|
func BenchmarkSplitPyramid_7(t *testing.B) { benchmarkSplitPyramid(10000000, t) }
|
||||||
|
func BenchmarkSplitPyramid_8(t *testing.B) { benchmarkSplitPyramid(100000000, t) }
|
||||||
|
|
||||||
|
// godep go test -bench ./swarm/storage -cpuprofile cpu.out -memprofile mem.out
|
117
swarm/storage/common_test.go
Normal file
117
swarm/storage/common_test.go
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
type limitedReader struct {
|
||||||
|
r io.Reader
|
||||||
|
off int64
|
||||||
|
size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func limitReader(r io.Reader, size int) *limitedReader {
|
||||||
|
return &limitedReader{r, 0, int64(size)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *limitedReader) Read(buf []byte) (int, error) {
|
||||||
|
limit := int64(len(buf))
|
||||||
|
left := self.size - self.off
|
||||||
|
if limit >= left {
|
||||||
|
limit = left
|
||||||
|
}
|
||||||
|
n, err := self.r.Read(buf[:limit])
|
||||||
|
if err == nil && limit == left {
|
||||||
|
err = io.EOF
|
||||||
|
}
|
||||||
|
self.off += int64(n)
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func testDataReader(l int) (r io.Reader) {
|
||||||
|
return limitReader(rand.Reader, l)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testDataReaderAndSlice(l int) (r io.Reader, slice []byte) {
|
||||||
|
slice = make([]byte, l)
|
||||||
|
if _, err := rand.Read(slice); err != nil {
|
||||||
|
panic("rand error")
|
||||||
|
}
|
||||||
|
r = limitReader(bytes.NewReader(slice), l)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func testStore(m ChunkStore, l int64, branches int64, t *testing.T) {
|
||||||
|
|
||||||
|
chunkC := make(chan *Chunk)
|
||||||
|
go func() {
|
||||||
|
for chunk := range chunkC {
|
||||||
|
m.Put(chunk)
|
||||||
|
if chunk.wg != nil {
|
||||||
|
chunk.wg.Done()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
chunker := NewTreeChunker(&ChunkerParams{
|
||||||
|
Branches: branches,
|
||||||
|
Hash: defaultHash,
|
||||||
|
})
|
||||||
|
swg := &sync.WaitGroup{}
|
||||||
|
key, err := chunker.Split(rand.Reader, l, chunkC, swg, nil)
|
||||||
|
swg.Wait()
|
||||||
|
close(chunkC)
|
||||||
|
chunkC = make(chan *Chunk)
|
||||||
|
|
||||||
|
quit := make(chan bool)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for ch := range chunkC {
|
||||||
|
go func(chunk *Chunk) {
|
||||||
|
storedChunk, err := m.Get(chunk.Key)
|
||||||
|
if err == notFound {
|
||||||
|
glog.V(logger.Detail).Infof("chunk '%v' not found", chunk.Key.Log())
|
||||||
|
} else if err != nil {
|
||||||
|
glog.V(logger.Detail).Infof("error retrieving chunk %v: %v", chunk.Key.Log(), err)
|
||||||
|
} else {
|
||||||
|
chunk.SData = storedChunk.SData
|
||||||
|
chunk.Size = storedChunk.Size
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("chunk '%v' not found", chunk.Key.Log())
|
||||||
|
close(chunk.C)
|
||||||
|
}(ch)
|
||||||
|
}
|
||||||
|
close(quit)
|
||||||
|
}()
|
||||||
|
r := chunker.Join(key, chunkC)
|
||||||
|
|
||||||
|
b := make([]byte, l)
|
||||||
|
n, err := r.ReadAt(b, 0)
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatalf("read error (%v/%v) %v", n, l, err)
|
||||||
|
}
|
||||||
|
close(chunkC)
|
||||||
|
<-quit
|
||||||
|
}
|
99
swarm/storage/database.go
Normal file
99
swarm/storage/database.go
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
// this is a clone of an earlier state of the ethereum ethdb/database
|
||||||
|
// no need for queueing/caching
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/compression/rle"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const openFileLimit = 128
|
||||||
|
|
||||||
|
type LDBDatabase struct {
|
||||||
|
db *leveldb.DB
|
||||||
|
comp bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLDBDatabase(file string) (*LDBDatabase, error) {
|
||||||
|
// Open the db
|
||||||
|
db, err := leveldb.OpenFile(file, &opt.Options{OpenFilesCacheCapacity: openFileLimit})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
database := &LDBDatabase{db: db, comp: false}
|
||||||
|
|
||||||
|
return database, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *LDBDatabase) Put(key []byte, value []byte) {
|
||||||
|
if self.comp {
|
||||||
|
value = rle.Compress(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := self.db.Put(key, value, nil)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error put", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *LDBDatabase) Get(key []byte) ([]byte, error) {
|
||||||
|
dat, err := self.db.Get(key, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.comp {
|
||||||
|
return rle.Decompress(dat)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dat, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *LDBDatabase) Delete(key []byte) error {
|
||||||
|
return self.db.Delete(key, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *LDBDatabase) LastKnownTD() []byte {
|
||||||
|
data, _ := self.Get([]byte("LTD"))
|
||||||
|
|
||||||
|
if len(data) == 0 {
|
||||||
|
data = []byte{0x0}
|
||||||
|
}
|
||||||
|
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *LDBDatabase) NewIterator() iterator.Iterator {
|
||||||
|
return self.db.NewIterator(nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *LDBDatabase) Write(batch *leveldb.Batch) error {
|
||||||
|
return self.db.Write(batch, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *LDBDatabase) Close() {
|
||||||
|
// Close the leveldb database
|
||||||
|
self.db.Close()
|
||||||
|
}
|
473
swarm/storage/dbstore.go
Normal file
473
swarm/storage/dbstore.go
Normal file
@ -0,0 +1,473 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// disk storage layer for the package bzz
|
||||||
|
// DbStore implements the ChunkStore interface and is used by the DPA as
|
||||||
|
// persistent storage of chunks
|
||||||
|
// it implements purging based on access count allowing for external control of
|
||||||
|
// max capacity
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultDbCapacity = 5000000
|
||||||
|
defaultRadius = 0 // not yet used
|
||||||
|
|
||||||
|
gcArraySize = 10000
|
||||||
|
gcArrayFreeRatio = 0.1
|
||||||
|
|
||||||
|
// key prefixes for leveldb storage
|
||||||
|
kpIndex = 0
|
||||||
|
kpData = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
keyAccessCnt = []byte{2}
|
||||||
|
keyEntryCnt = []byte{3}
|
||||||
|
keyDataIdx = []byte{4}
|
||||||
|
keyGCPos = []byte{5}
|
||||||
|
)
|
||||||
|
|
||||||
|
type gcItem struct {
|
||||||
|
idx uint64
|
||||||
|
value uint64
|
||||||
|
idxKey []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type DbStore struct {
|
||||||
|
db *LDBDatabase
|
||||||
|
|
||||||
|
// this should be stored in db, accessed transactionally
|
||||||
|
entryCnt, accessCnt, dataIdx, capacity uint64
|
||||||
|
|
||||||
|
gcPos, gcStartPos []byte
|
||||||
|
gcArray []*gcItem
|
||||||
|
|
||||||
|
hashfunc Hasher
|
||||||
|
|
||||||
|
lock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDbStore(path string, hash Hasher, capacity uint64, radius int) (s *DbStore, err error) {
|
||||||
|
s = new(DbStore)
|
||||||
|
|
||||||
|
s.hashfunc = hash
|
||||||
|
|
||||||
|
s.db, err = NewLDBDatabase(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.setCapacity(capacity)
|
||||||
|
|
||||||
|
s.gcStartPos = make([]byte, 1)
|
||||||
|
s.gcStartPos[0] = kpIndex
|
||||||
|
s.gcArray = make([]*gcItem, gcArraySize)
|
||||||
|
|
||||||
|
data, _ := s.db.Get(keyEntryCnt)
|
||||||
|
s.entryCnt = BytesToU64(data)
|
||||||
|
data, _ = s.db.Get(keyAccessCnt)
|
||||||
|
s.accessCnt = BytesToU64(data)
|
||||||
|
data, _ = s.db.Get(keyDataIdx)
|
||||||
|
s.dataIdx = BytesToU64(data)
|
||||||
|
s.gcPos, _ = s.db.Get(keyGCPos)
|
||||||
|
if s.gcPos == nil {
|
||||||
|
s.gcPos = s.gcStartPos
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type dpaDBIndex struct {
|
||||||
|
Idx uint64
|
||||||
|
Access uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func BytesToU64(data []byte) uint64 {
|
||||||
|
if len(data) < 8 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return binary.LittleEndian.Uint64(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func U64ToBytes(val uint64) []byte {
|
||||||
|
data := make([]byte, 8)
|
||||||
|
binary.LittleEndian.PutUint64(data, val)
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIndexGCValue(index *dpaDBIndex) uint64 {
|
||||||
|
return index.Access
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DbStore) updateIndexAccess(index *dpaDBIndex) {
|
||||||
|
index.Access = s.accessCnt
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIndexKey(hash Key) []byte {
|
||||||
|
HashSize := len(hash)
|
||||||
|
key := make([]byte, HashSize+1)
|
||||||
|
key[0] = 0
|
||||||
|
copy(key[1:], hash[:])
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDataKey(idx uint64) []byte {
|
||||||
|
key := make([]byte, 9)
|
||||||
|
key[0] = 1
|
||||||
|
binary.BigEndian.PutUint64(key[1:9], idx)
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeIndex(index *dpaDBIndex) []byte {
|
||||||
|
data, _ := rlp.EncodeToBytes(index)
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeData(chunk *Chunk) []byte {
|
||||||
|
return chunk.SData
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeIndex(data []byte, index *dpaDBIndex) {
|
||||||
|
dec := rlp.NewStream(bytes.NewReader(data), 0)
|
||||||
|
dec.Decode(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeData(data []byte, chunk *Chunk) {
|
||||||
|
chunk.SData = data
|
||||||
|
chunk.Size = int64(binary.LittleEndian.Uint64(data[0:8]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func gcListPartition(list []*gcItem, left int, right int, pivotIndex int) int {
|
||||||
|
pivotValue := list[pivotIndex].value
|
||||||
|
dd := list[pivotIndex]
|
||||||
|
list[pivotIndex] = list[right]
|
||||||
|
list[right] = dd
|
||||||
|
storeIndex := left
|
||||||
|
for i := left; i < right; i++ {
|
||||||
|
if list[i].value < pivotValue {
|
||||||
|
dd = list[storeIndex]
|
||||||
|
list[storeIndex] = list[i]
|
||||||
|
list[i] = dd
|
||||||
|
storeIndex++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dd = list[storeIndex]
|
||||||
|
list[storeIndex] = list[right]
|
||||||
|
list[right] = dd
|
||||||
|
return storeIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
func gcListSelect(list []*gcItem, left int, right int, n int) int {
|
||||||
|
if left == right {
|
||||||
|
return left
|
||||||
|
}
|
||||||
|
pivotIndex := (left + right) / 2
|
||||||
|
pivotIndex = gcListPartition(list, left, right, pivotIndex)
|
||||||
|
if n == pivotIndex {
|
||||||
|
return n
|
||||||
|
} else {
|
||||||
|
if n < pivotIndex {
|
||||||
|
return gcListSelect(list, left, pivotIndex-1, n)
|
||||||
|
} else {
|
||||||
|
return gcListSelect(list, pivotIndex+1, right, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DbStore) collectGarbage(ratio float32) {
|
||||||
|
it := s.db.NewIterator()
|
||||||
|
it.Seek(s.gcPos)
|
||||||
|
if it.Valid() {
|
||||||
|
s.gcPos = it.Key()
|
||||||
|
} else {
|
||||||
|
s.gcPos = nil
|
||||||
|
}
|
||||||
|
gcnt := 0
|
||||||
|
|
||||||
|
for (gcnt < gcArraySize) && (uint64(gcnt) < s.entryCnt) {
|
||||||
|
|
||||||
|
if (s.gcPos == nil) || (s.gcPos[0] != kpIndex) {
|
||||||
|
it.Seek(s.gcStartPos)
|
||||||
|
if it.Valid() {
|
||||||
|
s.gcPos = it.Key()
|
||||||
|
} else {
|
||||||
|
s.gcPos = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (s.gcPos == nil) || (s.gcPos[0] != kpIndex) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
gci := new(gcItem)
|
||||||
|
gci.idxKey = s.gcPos
|
||||||
|
var index dpaDBIndex
|
||||||
|
decodeIndex(it.Value(), &index)
|
||||||
|
gci.idx = index.Idx
|
||||||
|
// the smaller, the more likely to be gc'd
|
||||||
|
gci.value = getIndexGCValue(&index)
|
||||||
|
s.gcArray[gcnt] = gci
|
||||||
|
gcnt++
|
||||||
|
it.Next()
|
||||||
|
if it.Valid() {
|
||||||
|
s.gcPos = it.Key()
|
||||||
|
} else {
|
||||||
|
s.gcPos = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
it.Release()
|
||||||
|
|
||||||
|
cutidx := gcListSelect(s.gcArray, 0, gcnt-1, int(float32(gcnt)*ratio))
|
||||||
|
cutval := s.gcArray[cutidx].value
|
||||||
|
|
||||||
|
// fmt.Print(gcnt, " ", s.entryCnt, " ")
|
||||||
|
|
||||||
|
// actual gc
|
||||||
|
for i := 0; i < gcnt; i++ {
|
||||||
|
if s.gcArray[i].value <= cutval {
|
||||||
|
batch := new(leveldb.Batch)
|
||||||
|
batch.Delete(s.gcArray[i].idxKey)
|
||||||
|
batch.Delete(getDataKey(s.gcArray[i].idx))
|
||||||
|
s.entryCnt--
|
||||||
|
batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt))
|
||||||
|
s.db.Write(batch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fmt.Println(s.entryCnt)
|
||||||
|
|
||||||
|
s.db.Put(keyGCPos, s.gcPos)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DbStore) Counter() uint64 {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
return s.dataIdx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DbStore) Put(chunk *Chunk) {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
ikey := getIndexKey(chunk.Key)
|
||||||
|
var index dpaDBIndex
|
||||||
|
|
||||||
|
if s.tryAccessIdx(ikey, &index) {
|
||||||
|
if chunk.dbStored != nil {
|
||||||
|
close(chunk.dbStored)
|
||||||
|
}
|
||||||
|
return // already exists, only update access
|
||||||
|
}
|
||||||
|
|
||||||
|
data := encodeData(chunk)
|
||||||
|
//data := ethutil.Encode([]interface{}{entry})
|
||||||
|
|
||||||
|
if s.entryCnt >= s.capacity {
|
||||||
|
s.collectGarbage(gcArrayFreeRatio)
|
||||||
|
}
|
||||||
|
|
||||||
|
batch := new(leveldb.Batch)
|
||||||
|
|
||||||
|
batch.Put(getDataKey(s.dataIdx), data)
|
||||||
|
|
||||||
|
index.Idx = s.dataIdx
|
||||||
|
s.updateIndexAccess(&index)
|
||||||
|
|
||||||
|
idata := encodeIndex(&index)
|
||||||
|
batch.Put(ikey, idata)
|
||||||
|
|
||||||
|
batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt))
|
||||||
|
s.entryCnt++
|
||||||
|
batch.Put(keyDataIdx, U64ToBytes(s.dataIdx))
|
||||||
|
s.dataIdx++
|
||||||
|
batch.Put(keyAccessCnt, U64ToBytes(s.accessCnt))
|
||||||
|
s.accessCnt++
|
||||||
|
|
||||||
|
s.db.Write(batch)
|
||||||
|
if chunk.dbStored != nil {
|
||||||
|
close(chunk.dbStored)
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("DbStore.Put: %v. db storage counter: %v ", chunk.Key.Log(), s.dataIdx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to find index; if found, update access cnt and return true
|
||||||
|
func (s *DbStore) tryAccessIdx(ikey []byte, index *dpaDBIndex) bool {
|
||||||
|
idata, err := s.db.Get(ikey)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
decodeIndex(idata, index)
|
||||||
|
|
||||||
|
batch := new(leveldb.Batch)
|
||||||
|
|
||||||
|
batch.Put(keyAccessCnt, U64ToBytes(s.accessCnt))
|
||||||
|
s.accessCnt++
|
||||||
|
s.updateIndexAccess(index)
|
||||||
|
idata = encodeIndex(index)
|
||||||
|
batch.Put(ikey, idata)
|
||||||
|
|
||||||
|
s.db.Write(batch)
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DbStore) Get(key Key) (chunk *Chunk, err error) {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
var index dpaDBIndex
|
||||||
|
|
||||||
|
if s.tryAccessIdx(getIndexKey(key), &index) {
|
||||||
|
var data []byte
|
||||||
|
data, err = s.db.Get(getDataKey(index.Idx))
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := s.hashfunc()
|
||||||
|
hasher.Write(data)
|
||||||
|
hash := hasher.Sum(nil)
|
||||||
|
if bytes.Compare(hash, key) != 0 {
|
||||||
|
s.db.Delete(getDataKey(index.Idx))
|
||||||
|
err = fmt.Errorf("invalid chunk. hash=%x, key=%v", hash, key[:])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk = &Chunk{
|
||||||
|
Key: key,
|
||||||
|
}
|
||||||
|
decodeData(data, chunk)
|
||||||
|
} else {
|
||||||
|
err = notFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DbStore) updateAccessCnt(key Key) {
|
||||||
|
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
var index dpaDBIndex
|
||||||
|
s.tryAccessIdx(getIndexKey(key), &index) // result_chn == nil, only update access cnt
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DbStore) setCapacity(c uint64) {
|
||||||
|
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
s.capacity = c
|
||||||
|
|
||||||
|
if s.entryCnt > c {
|
||||||
|
var ratio float32
|
||||||
|
ratio = float32(1.01) - float32(c)/float32(s.entryCnt)
|
||||||
|
if ratio < gcArrayFreeRatio {
|
||||||
|
ratio = gcArrayFreeRatio
|
||||||
|
}
|
||||||
|
if ratio > 1 {
|
||||||
|
ratio = 1
|
||||||
|
}
|
||||||
|
for s.entryCnt > c {
|
||||||
|
s.collectGarbage(ratio)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DbStore) getEntryCnt() uint64 {
|
||||||
|
return s.entryCnt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DbStore) close() {
|
||||||
|
s.db.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// describes a section of the DbStore representing the unsynced
|
||||||
|
// domain relevant to a peer
|
||||||
|
// Start - Stop designate a continuous area Keys in an address space
|
||||||
|
// typically the addresses closer to us than to the peer but not closer
|
||||||
|
// another closer peer in between
|
||||||
|
// From - To designates a time interval typically from the last disconnect
|
||||||
|
// till the latest connection (real time traffic is relayed)
|
||||||
|
type DbSyncState struct {
|
||||||
|
Start, Stop Key
|
||||||
|
First, Last uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// implements the syncer iterator interface
|
||||||
|
// iterates by storage index (~ time of storage = first entry to db)
|
||||||
|
type dbSyncIterator struct {
|
||||||
|
it iterator.Iterator
|
||||||
|
DbSyncState
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialises a sync iterator from a syncToken (passed in with the handshake)
|
||||||
|
func (self *DbStore) NewSyncIterator(state DbSyncState) (si *dbSyncIterator, err error) {
|
||||||
|
if state.First > state.Last {
|
||||||
|
return nil, fmt.Errorf("no entries found")
|
||||||
|
}
|
||||||
|
si = &dbSyncIterator{
|
||||||
|
it: self.db.NewIterator(),
|
||||||
|
DbSyncState: state,
|
||||||
|
}
|
||||||
|
si.it.Seek(getIndexKey(state.Start))
|
||||||
|
return si, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// walk the area from Start to Stop and returns items within time interval
|
||||||
|
// First to Last
|
||||||
|
func (self *dbSyncIterator) Next() (key Key) {
|
||||||
|
for self.it.Valid() {
|
||||||
|
dbkey := self.it.Key()
|
||||||
|
if dbkey[0] != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
key = Key(make([]byte, len(dbkey)-1))
|
||||||
|
copy(key[:], dbkey[1:])
|
||||||
|
if bytes.Compare(key[:], self.Start) <= 0 {
|
||||||
|
self.it.Next()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if bytes.Compare(key[:], self.Stop) > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
var index dpaDBIndex
|
||||||
|
decodeIndex(self.it.Value(), &index)
|
||||||
|
self.it.Next()
|
||||||
|
if (index.Idx >= self.First) && (index.Idx < self.Last) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.it.Release()
|
||||||
|
return nil
|
||||||
|
}
|
191
swarm/storage/dbstore_test.go
Normal file
191
swarm/storage/dbstore_test.go
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func initDbStore(t *testing.T) *DbStore {
|
||||||
|
dir, err := ioutil.TempDir("", "bzz-storage-test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
m, err := NewDbStore(dir, MakeHashFunc(defaultHash), defaultDbCapacity, defaultRadius)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("can't create store:", err)
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func testDbStore(l int64, branches int64, t *testing.T) {
|
||||||
|
m := initDbStore(t)
|
||||||
|
defer m.close()
|
||||||
|
testStore(m, l, branches, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDbStore128_0x1000000(t *testing.T) {
|
||||||
|
testDbStore(0x1000000, 128, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDbStore128_10000_(t *testing.T) {
|
||||||
|
testDbStore(10000, 128, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDbStore128_1000_(t *testing.T) {
|
||||||
|
testDbStore(1000, 128, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDbStore128_100_(t *testing.T) {
|
||||||
|
testDbStore(100, 128, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDbStore2_100_(t *testing.T) {
|
||||||
|
testDbStore(100, 2, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDbStoreNotFound(t *testing.T) {
|
||||||
|
m := initDbStore(t)
|
||||||
|
defer m.close()
|
||||||
|
_, err := m.Get(ZeroKey)
|
||||||
|
if err != notFound {
|
||||||
|
t.Errorf("Expected notFound, got %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDbStoreSyncIterator(t *testing.T) {
|
||||||
|
m := initDbStore(t)
|
||||||
|
defer m.close()
|
||||||
|
keys := []Key{
|
||||||
|
Key(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
Key(common.Hex2Bytes("4000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
Key(common.Hex2Bytes("5000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
Key(common.Hex2Bytes("3000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
Key(common.Hex2Bytes("2000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
Key(common.Hex2Bytes("1000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
}
|
||||||
|
for _, key := range keys {
|
||||||
|
m.Put(NewChunk(key, nil))
|
||||||
|
}
|
||||||
|
it, err := m.NewSyncIterator(DbSyncState{
|
||||||
|
Start: Key(common.Hex2Bytes("1000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
Stop: Key(common.Hex2Bytes("4000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
First: 2,
|
||||||
|
Last: 4,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating NewSyncIterator")
|
||||||
|
}
|
||||||
|
|
||||||
|
var chunk Key
|
||||||
|
var res []Key
|
||||||
|
for {
|
||||||
|
chunk = it.Next()
|
||||||
|
if chunk == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
res = append(res, chunk)
|
||||||
|
}
|
||||||
|
if len(res) != 1 {
|
||||||
|
t.Fatalf("Expected 1 chunk, got %v: %v", len(res), res)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(res[0][:], keys[3]) {
|
||||||
|
t.Fatalf("Expected %v chunk, got %v", keys[3], res[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating NewSyncIterator")
|
||||||
|
}
|
||||||
|
|
||||||
|
it, err = m.NewSyncIterator(DbSyncState{
|
||||||
|
Start: Key(common.Hex2Bytes("1000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
Stop: Key(common.Hex2Bytes("5000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
First: 2,
|
||||||
|
Last: 4,
|
||||||
|
})
|
||||||
|
|
||||||
|
res = nil
|
||||||
|
for {
|
||||||
|
chunk = it.Next()
|
||||||
|
if chunk == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
res = append(res, chunk)
|
||||||
|
}
|
||||||
|
if len(res) != 2 {
|
||||||
|
t.Fatalf("Expected 2 chunk, got %v: %v", len(res), res)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(res[0][:], keys[3]) {
|
||||||
|
t.Fatalf("Expected %v chunk, got %v", keys[3], res[0])
|
||||||
|
}
|
||||||
|
if !bytes.Equal(res[1][:], keys[2]) {
|
||||||
|
t.Fatalf("Expected %v chunk, got %v", keys[2], res[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating NewSyncIterator")
|
||||||
|
}
|
||||||
|
|
||||||
|
it, err = m.NewSyncIterator(DbSyncState{
|
||||||
|
Start: Key(common.Hex2Bytes("1000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
Stop: Key(common.Hex2Bytes("4000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
First: 2,
|
||||||
|
Last: 5,
|
||||||
|
})
|
||||||
|
res = nil
|
||||||
|
for {
|
||||||
|
chunk = it.Next()
|
||||||
|
if chunk == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
res = append(res, chunk)
|
||||||
|
}
|
||||||
|
if len(res) != 2 {
|
||||||
|
t.Fatalf("Expected 2 chunk, got %v", len(res))
|
||||||
|
}
|
||||||
|
if !bytes.Equal(res[0][:], keys[4]) {
|
||||||
|
t.Fatalf("Expected %v chunk, got %v", keys[4], res[0])
|
||||||
|
}
|
||||||
|
if !bytes.Equal(res[1][:], keys[3]) {
|
||||||
|
t.Fatalf("Expected %v chunk, got %v", keys[3], res[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
it, err = m.NewSyncIterator(DbSyncState{
|
||||||
|
Start: Key(common.Hex2Bytes("2000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
Stop: Key(common.Hex2Bytes("4000000000000000000000000000000000000000000000000000000000000000")),
|
||||||
|
First: 2,
|
||||||
|
Last: 5,
|
||||||
|
})
|
||||||
|
res = nil
|
||||||
|
for {
|
||||||
|
chunk = it.Next()
|
||||||
|
if chunk == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
res = append(res, chunk)
|
||||||
|
}
|
||||||
|
if len(res) != 1 {
|
||||||
|
t.Fatalf("Expected 1 chunk, got %v", len(res))
|
||||||
|
}
|
||||||
|
if !bytes.Equal(res[0][:], keys[3]) {
|
||||||
|
t.Fatalf("Expected %v chunk, got %v", keys[3], res[0])
|
||||||
|
}
|
||||||
|
}
|
239
swarm/storage/dpa.go
Normal file
239
swarm/storage/dpa.go
Normal file
@ -0,0 +1,239 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
DPA provides the client API entrypoints Store and Retrieve to store and retrieve
|
||||||
|
It can store anything that has a byte slice representation, so files or serialised objects etc.
|
||||||
|
|
||||||
|
Storage: DPA calls the Chunker to segment the input datastream of any size to a merkle hashed tree of chunks. The key of the root block is returned to the client.
|
||||||
|
|
||||||
|
Retrieval: given the key of the root block, the DPA retrieves the block chunks and reconstructs the original data and passes it back as a lazy reader. A lazy reader is a reader with on-demand delayed processing, i.e. the chunks needed to reconstruct a large file are only fetched and processed if that particular part of the document is actually read.
|
||||||
|
|
||||||
|
As the chunker produces chunks, DPA dispatches them to its own chunk store
|
||||||
|
implementation for storage or retrieval.
|
||||||
|
*/
|
||||||
|
|
||||||
|
const (
|
||||||
|
storeChanCapacity = 100
|
||||||
|
retrieveChanCapacity = 100
|
||||||
|
singletonSwarmDbCapacity = 50000
|
||||||
|
singletonSwarmCacheCapacity = 500
|
||||||
|
maxStoreProcesses = 8
|
||||||
|
maxRetrieveProcesses = 8
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
notFound = errors.New("not found")
|
||||||
|
)
|
||||||
|
|
||||||
|
type DPA struct {
|
||||||
|
ChunkStore
|
||||||
|
storeC chan *Chunk
|
||||||
|
retrieveC chan *Chunk
|
||||||
|
Chunker Chunker
|
||||||
|
|
||||||
|
lock sync.Mutex
|
||||||
|
running bool
|
||||||
|
wg *sync.WaitGroup
|
||||||
|
quitC chan bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// for testing locally
|
||||||
|
func NewLocalDPA(datadir string) (*DPA, error) {
|
||||||
|
|
||||||
|
hash := MakeHashFunc("SHA256")
|
||||||
|
|
||||||
|
dbStore, err := NewDbStore(datadir, hash, singletonSwarmDbCapacity, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewDPA(&LocalStore{
|
||||||
|
NewMemStore(dbStore, singletonSwarmCacheCapacity),
|
||||||
|
dbStore,
|
||||||
|
}, NewChunkerParams()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDPA(store ChunkStore, params *ChunkerParams) *DPA {
|
||||||
|
chunker := NewTreeChunker(params)
|
||||||
|
return &DPA{
|
||||||
|
Chunker: chunker,
|
||||||
|
ChunkStore: store,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Public API. Main entry point for document retrieval directly. Used by the
|
||||||
|
// FS-aware API and httpaccess
|
||||||
|
// Chunk retrieval blocks on netStore requests with a timeout so reader will
|
||||||
|
// report error if retrieval of chunks within requested range time out.
|
||||||
|
func (self *DPA) Retrieve(key Key) LazySectionReader {
|
||||||
|
return self.Chunker.Join(key, self.retrieveC)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Public API. Main entry point for document storage directly. Used by the
|
||||||
|
// FS-aware API and httpaccess
|
||||||
|
func (self *DPA) Store(data io.Reader, size int64, swg *sync.WaitGroup, wwg *sync.WaitGroup) (key Key, err error) {
|
||||||
|
return self.Chunker.Split(data, size, self.storeC, swg, wwg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *DPA) Start() {
|
||||||
|
self.lock.Lock()
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
if self.running {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
self.running = true
|
||||||
|
self.retrieveC = make(chan *Chunk, retrieveChanCapacity)
|
||||||
|
self.storeC = make(chan *Chunk, storeChanCapacity)
|
||||||
|
self.quitC = make(chan bool)
|
||||||
|
self.storeLoop()
|
||||||
|
self.retrieveLoop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *DPA) Stop() {
|
||||||
|
self.lock.Lock()
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
if !self.running {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
self.running = false
|
||||||
|
close(self.quitC)
|
||||||
|
}
|
||||||
|
|
||||||
|
// retrieveLoop dispatches the parallel chunk retrieval requests received on the
|
||||||
|
// retrieve channel to its ChunkStore (NetStore or LocalStore)
|
||||||
|
func (self *DPA) retrieveLoop() {
|
||||||
|
for i := 0; i < maxRetrieveProcesses; i++ {
|
||||||
|
go self.retrieveWorker()
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("dpa: retrieve loop spawning %v workers", maxRetrieveProcesses)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *DPA) retrieveWorker() {
|
||||||
|
for chunk := range self.retrieveC {
|
||||||
|
glog.V(logger.Detail).Infof("dpa: retrieve loop : chunk %v", chunk.Key.Log())
|
||||||
|
storedChunk, err := self.Get(chunk.Key)
|
||||||
|
if err == notFound {
|
||||||
|
glog.V(logger.Detail).Infof("chunk %v not found", chunk.Key.Log())
|
||||||
|
} else if err != nil {
|
||||||
|
glog.V(logger.Detail).Infof("error retrieving chunk %v: %v", chunk.Key.Log(), err)
|
||||||
|
} else {
|
||||||
|
chunk.SData = storedChunk.SData
|
||||||
|
chunk.Size = storedChunk.Size
|
||||||
|
}
|
||||||
|
close(chunk.C)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-self.quitC:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// storeLoop dispatches the parallel chunk store request processors
|
||||||
|
// received on the store channel to its ChunkStore (NetStore or LocalStore)
|
||||||
|
func (self *DPA) storeLoop() {
|
||||||
|
for i := 0; i < maxStoreProcesses; i++ {
|
||||||
|
go self.storeWorker()
|
||||||
|
}
|
||||||
|
glog.V(logger.Detail).Infof("dpa: store spawning %v workers", maxStoreProcesses)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *DPA) storeWorker() {
|
||||||
|
|
||||||
|
for chunk := range self.storeC {
|
||||||
|
self.Put(chunk)
|
||||||
|
if chunk.wg != nil {
|
||||||
|
glog.V(logger.Detail).Infof("dpa: store processor %v", chunk.Key.Log())
|
||||||
|
chunk.wg.Done()
|
||||||
|
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-self.quitC:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DpaChunkStore implements the ChunkStore interface,
|
||||||
|
// this chunk access layer assumed 2 chunk stores
|
||||||
|
// local storage eg. LocalStore and network storage eg., NetStore
|
||||||
|
// access by calling network is blocking with a timeout
|
||||||
|
|
||||||
|
type dpaChunkStore struct {
|
||||||
|
n int
|
||||||
|
localStore ChunkStore
|
||||||
|
netStore ChunkStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDpaChunkStore(localStore, netStore ChunkStore) *dpaChunkStore {
|
||||||
|
return &dpaChunkStore{0, localStore, netStore}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get is the entrypoint for local retrieve requests
|
||||||
|
// waits for response or times out
|
||||||
|
func (self *dpaChunkStore) Get(key Key) (chunk *Chunk, err error) {
|
||||||
|
chunk, err = self.netStore.Get(key)
|
||||||
|
// timeout := time.Now().Add(searchTimeout)
|
||||||
|
if chunk.SData != nil {
|
||||||
|
glog.V(logger.Detail).Infof("DPA.Get: %v found locally, %d bytes", key.Log(), len(chunk.SData))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// TODO: use self.timer time.Timer and reset with defer disableTimer
|
||||||
|
timer := time.After(searchTimeout)
|
||||||
|
select {
|
||||||
|
case <-timer:
|
||||||
|
glog.V(logger.Detail).Infof("DPA.Get: %v request time out ", key.Log())
|
||||||
|
err = notFound
|
||||||
|
case <-chunk.Req.C:
|
||||||
|
glog.V(logger.Detail).Infof("DPA.Get: %v retrieved, %d bytes (%p)", key.Log(), len(chunk.SData), chunk)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put is the entrypoint for local store requests coming from storeLoop
|
||||||
|
func (self *dpaChunkStore) Put(entry *Chunk) {
|
||||||
|
chunk, err := self.localStore.Get(entry.Key)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Detail).Infof("DPA.Put: %v new chunk. call netStore.Put", entry.Key.Log())
|
||||||
|
chunk = entry
|
||||||
|
} else if chunk.SData == nil {
|
||||||
|
glog.V(logger.Detail).Infof("DPA.Put: %v request entry found", entry.Key.Log())
|
||||||
|
chunk.SData = entry.SData
|
||||||
|
chunk.Size = entry.Size
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Detail).Infof("DPA.Put: %v chunk already known", entry.Key.Log())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// from this point on the storage logic is the same with network storage requests
|
||||||
|
glog.V(logger.Detail).Infof("DPA.Put %v: %v", self.n, chunk.Key.Log())
|
||||||
|
self.n++
|
||||||
|
self.netStore.Put(chunk)
|
||||||
|
}
|
144
swarm/storage/dpa_test.go
Normal file
144
swarm/storage/dpa_test.go
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
const testDataSize = 0x1000000
|
||||||
|
|
||||||
|
func TestDPArandom(t *testing.T) {
|
||||||
|
dbStore := initDbStore(t)
|
||||||
|
dbStore.setCapacity(50000)
|
||||||
|
memStore := NewMemStore(dbStore, defaultCacheCapacity)
|
||||||
|
localStore := &LocalStore{
|
||||||
|
memStore,
|
||||||
|
dbStore,
|
||||||
|
}
|
||||||
|
chunker := NewTreeChunker(NewChunkerParams())
|
||||||
|
dpa := &DPA{
|
||||||
|
Chunker: chunker,
|
||||||
|
ChunkStore: localStore,
|
||||||
|
}
|
||||||
|
dpa.Start()
|
||||||
|
defer dpa.Stop()
|
||||||
|
defer os.RemoveAll("/tmp/bzz")
|
||||||
|
|
||||||
|
reader, slice := testDataReaderAndSlice(testDataSize)
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
key, err := dpa.Store(reader, testDataSize, wg, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Store error: %v", err)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
resultReader := dpa.Retrieve(key)
|
||||||
|
resultSlice := make([]byte, len(slice))
|
||||||
|
n, err := resultReader.ReadAt(resultSlice, 0)
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Errorf("Retrieve error: %v", err)
|
||||||
|
}
|
||||||
|
if n != len(slice) {
|
||||||
|
t.Errorf("Slice size error got %d, expected %d.", n, len(slice))
|
||||||
|
}
|
||||||
|
if !bytes.Equal(slice, resultSlice) {
|
||||||
|
t.Errorf("Comparison error.")
|
||||||
|
}
|
||||||
|
ioutil.WriteFile("/tmp/slice.bzz.16M", slice, 0666)
|
||||||
|
ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666)
|
||||||
|
localStore.memStore = NewMemStore(dbStore, defaultCacheCapacity)
|
||||||
|
resultReader = dpa.Retrieve(key)
|
||||||
|
for i, _ := range resultSlice {
|
||||||
|
resultSlice[i] = 0
|
||||||
|
}
|
||||||
|
n, err = resultReader.ReadAt(resultSlice, 0)
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Errorf("Retrieve error after removing memStore: %v", err)
|
||||||
|
}
|
||||||
|
if n != len(slice) {
|
||||||
|
t.Errorf("Slice size error after removing memStore got %d, expected %d.", n, len(slice))
|
||||||
|
}
|
||||||
|
if !bytes.Equal(slice, resultSlice) {
|
||||||
|
t.Errorf("Comparison error after removing memStore.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDPA_capacity(t *testing.T) {
|
||||||
|
dbStore := initDbStore(t)
|
||||||
|
memStore := NewMemStore(dbStore, defaultCacheCapacity)
|
||||||
|
localStore := &LocalStore{
|
||||||
|
memStore,
|
||||||
|
dbStore,
|
||||||
|
}
|
||||||
|
memStore.setCapacity(0)
|
||||||
|
chunker := NewTreeChunker(NewChunkerParams())
|
||||||
|
dpa := &DPA{
|
||||||
|
Chunker: chunker,
|
||||||
|
ChunkStore: localStore,
|
||||||
|
}
|
||||||
|
dpa.Start()
|
||||||
|
reader, slice := testDataReaderAndSlice(testDataSize)
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
key, err := dpa.Store(reader, testDataSize, wg, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Store error: %v", err)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
resultReader := dpa.Retrieve(key)
|
||||||
|
resultSlice := make([]byte, len(slice))
|
||||||
|
n, err := resultReader.ReadAt(resultSlice, 0)
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Errorf("Retrieve error: %v", err)
|
||||||
|
}
|
||||||
|
if n != len(slice) {
|
||||||
|
t.Errorf("Slice size error got %d, expected %d.", n, len(slice))
|
||||||
|
}
|
||||||
|
if !bytes.Equal(slice, resultSlice) {
|
||||||
|
t.Errorf("Comparison error.")
|
||||||
|
}
|
||||||
|
// Clear memStore
|
||||||
|
memStore.setCapacity(0)
|
||||||
|
// check whether it is, indeed, empty
|
||||||
|
dpa.ChunkStore = memStore
|
||||||
|
resultReader = dpa.Retrieve(key)
|
||||||
|
n, err = resultReader.ReadAt(resultSlice, 0)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("Was able to read %d bytes from an empty memStore.", len(slice))
|
||||||
|
}
|
||||||
|
// check how it works with localStore
|
||||||
|
dpa.ChunkStore = localStore
|
||||||
|
// localStore.dbStore.setCapacity(0)
|
||||||
|
resultReader = dpa.Retrieve(key)
|
||||||
|
for i, _ := range resultSlice {
|
||||||
|
resultSlice[i] = 0
|
||||||
|
}
|
||||||
|
n, err = resultReader.ReadAt(resultSlice, 0)
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Errorf("Retrieve error after clearing memStore: %v", err)
|
||||||
|
}
|
||||||
|
if n != len(slice) {
|
||||||
|
t.Errorf("Slice size error after clearing memStore got %d, expected %d.", n, len(slice))
|
||||||
|
}
|
||||||
|
if !bytes.Equal(slice, resultSlice) {
|
||||||
|
t.Errorf("Comparison error after clearing memStore.")
|
||||||
|
}
|
||||||
|
}
|
74
swarm/storage/localstore.go
Normal file
74
swarm/storage/localstore.go
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LocalStore is a combination of inmemory db over a disk persisted db
|
||||||
|
// implements a Get/Put with fallback (caching) logic using any 2 ChunkStores
|
||||||
|
type LocalStore struct {
|
||||||
|
memStore ChunkStore
|
||||||
|
DbStore ChunkStore
|
||||||
|
}
|
||||||
|
|
||||||
|
// This constructor uses MemStore and DbStore as components
|
||||||
|
func NewLocalStore(hash Hasher, params *StoreParams) (*LocalStore, error) {
|
||||||
|
dbStore, err := NewDbStore(params.ChunkDbPath, hash, params.DbCapacity, params.Radius)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &LocalStore{
|
||||||
|
memStore: NewMemStore(dbStore, params.CacheCapacity),
|
||||||
|
DbStore: dbStore,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LocalStore is itself a chunk store
|
||||||
|
// unsafe, in that the data is not integrity checked
|
||||||
|
func (self *LocalStore) Put(chunk *Chunk) {
|
||||||
|
chunk.dbStored = make(chan bool)
|
||||||
|
self.memStore.Put(chunk)
|
||||||
|
if chunk.wg != nil {
|
||||||
|
chunk.wg.Add(1)
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
self.DbStore.Put(chunk)
|
||||||
|
if chunk.wg != nil {
|
||||||
|
chunk.wg.Done()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get(chunk *Chunk) looks up a chunk in the local stores
|
||||||
|
// This method is blocking until the chunk is retrieved
|
||||||
|
// so additional timeout may be needed to wrap this call if
|
||||||
|
// ChunkStores are remote and can have long latency
|
||||||
|
func (self *LocalStore) Get(key Key) (chunk *Chunk, err error) {
|
||||||
|
chunk, err = self.memStore.Get(key)
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
chunk, err = self.DbStore.Get(key)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
|
||||||
|
self.memStore.Put(chunk)
|
||||||
|
return
|
||||||
|
}
|
316
swarm/storage/memstore.go
Normal file
316
swarm/storage/memstore.go
Normal file
@ -0,0 +1,316 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// memory storage layer for the package blockhash
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
memTreeLW = 2 // log2(subtree count) of the subtrees
|
||||||
|
memTreeFLW = 14 // log2(subtree count) of the root layer
|
||||||
|
dbForceUpdateAccessCnt = 1000
|
||||||
|
defaultCacheCapacity = 5000
|
||||||
|
)
|
||||||
|
|
||||||
|
type MemStore struct {
|
||||||
|
memtree *memTree
|
||||||
|
entryCnt, capacity uint // stored entries
|
||||||
|
accessCnt uint64 // access counter; oldest is thrown away when full
|
||||||
|
dbAccessCnt uint64
|
||||||
|
dbStore *DbStore
|
||||||
|
lock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
a hash prefix subtree containing subtrees or one storage entry (but never both)
|
||||||
|
|
||||||
|
- access[0] stores the smallest (oldest) access count value in this subtree
|
||||||
|
- if it contains more subtrees and its subtree count is at least 4, access[1:2]
|
||||||
|
stores the smallest access count in the first and second halves of subtrees
|
||||||
|
(so that access[0] = min(access[1], access[2])
|
||||||
|
- likewise, if subtree count is at least 8,
|
||||||
|
access[1] = min(access[3], access[4])
|
||||||
|
access[2] = min(access[5], access[6])
|
||||||
|
(access[] is a binary tree inside the multi-bit leveled hash tree)
|
||||||
|
*/
|
||||||
|
|
||||||
|
func NewMemStore(d *DbStore, capacity uint) (m *MemStore) {
|
||||||
|
m = &MemStore{}
|
||||||
|
m.memtree = newMemTree(memTreeFLW, nil, 0)
|
||||||
|
m.dbStore = d
|
||||||
|
m.setCapacity(capacity)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type memTree struct {
|
||||||
|
subtree []*memTree
|
||||||
|
parent *memTree
|
||||||
|
parentIdx uint
|
||||||
|
|
||||||
|
bits uint // log2(subtree count)
|
||||||
|
width uint // subtree count
|
||||||
|
|
||||||
|
entry *Chunk // if subtrees are present, entry should be nil
|
||||||
|
lastDBaccess uint64
|
||||||
|
access []uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMemTree(b uint, parent *memTree, pidx uint) (node *memTree) {
|
||||||
|
node = new(memTree)
|
||||||
|
node.bits = b
|
||||||
|
node.width = 1 << uint(b)
|
||||||
|
node.subtree = make([]*memTree, node.width)
|
||||||
|
node.access = make([]uint64, node.width-1)
|
||||||
|
node.parent = parent
|
||||||
|
node.parentIdx = pidx
|
||||||
|
if parent != nil {
|
||||||
|
parent.subtree[pidx] = node
|
||||||
|
}
|
||||||
|
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *memTree) updateAccess(a uint64) {
|
||||||
|
aidx := uint(0)
|
||||||
|
var aa uint64
|
||||||
|
oa := node.access[0]
|
||||||
|
for node.access[aidx] == oa {
|
||||||
|
node.access[aidx] = a
|
||||||
|
if aidx > 0 {
|
||||||
|
aa = node.access[((aidx-1)^1)+1]
|
||||||
|
aidx = (aidx - 1) >> 1
|
||||||
|
} else {
|
||||||
|
pidx := node.parentIdx
|
||||||
|
node = node.parent
|
||||||
|
if node == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
nn := node.subtree[pidx^1]
|
||||||
|
if nn != nil {
|
||||||
|
aa = nn.access[0]
|
||||||
|
} else {
|
||||||
|
aa = 0
|
||||||
|
}
|
||||||
|
aidx = (node.width + pidx - 2) >> 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if (aa != 0) && (aa < a) {
|
||||||
|
a = aa
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MemStore) setCapacity(c uint) {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
for c < s.entryCnt {
|
||||||
|
s.removeOldest()
|
||||||
|
}
|
||||||
|
s.capacity = c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MemStore) getEntryCnt() uint {
|
||||||
|
return s.entryCnt
|
||||||
|
}
|
||||||
|
|
||||||
|
// entry (not its copy) is going to be in MemStore
|
||||||
|
func (s *MemStore) Put(entry *Chunk) {
|
||||||
|
if s.capacity == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
if s.entryCnt >= s.capacity {
|
||||||
|
s.removeOldest()
|
||||||
|
}
|
||||||
|
|
||||||
|
s.accessCnt++
|
||||||
|
|
||||||
|
node := s.memtree
|
||||||
|
bitpos := uint(0)
|
||||||
|
for node.entry == nil {
|
||||||
|
l := entry.Key.bits(bitpos, node.bits)
|
||||||
|
st := node.subtree[l]
|
||||||
|
if st == nil {
|
||||||
|
st = newMemTree(memTreeLW, node, l)
|
||||||
|
bitpos += node.bits
|
||||||
|
node = st
|
||||||
|
break
|
||||||
|
}
|
||||||
|
bitpos += node.bits
|
||||||
|
node = st
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.entry != nil {
|
||||||
|
|
||||||
|
if node.entry.Key.isEqual(entry.Key) {
|
||||||
|
node.updateAccess(s.accessCnt)
|
||||||
|
if entry.SData == nil {
|
||||||
|
entry.Size = node.entry.Size
|
||||||
|
entry.SData = node.entry.SData
|
||||||
|
}
|
||||||
|
if entry.Req == nil {
|
||||||
|
entry.Req = node.entry.Req
|
||||||
|
}
|
||||||
|
entry.C = node.entry.C
|
||||||
|
node.entry = entry
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for node.entry != nil {
|
||||||
|
|
||||||
|
l := node.entry.Key.bits(bitpos, node.bits)
|
||||||
|
st := node.subtree[l]
|
||||||
|
if st == nil {
|
||||||
|
st = newMemTree(memTreeLW, node, l)
|
||||||
|
}
|
||||||
|
st.entry = node.entry
|
||||||
|
node.entry = nil
|
||||||
|
st.updateAccess(node.access[0])
|
||||||
|
|
||||||
|
l = entry.Key.bits(bitpos, node.bits)
|
||||||
|
st = node.subtree[l]
|
||||||
|
if st == nil {
|
||||||
|
st = newMemTree(memTreeLW, node, l)
|
||||||
|
}
|
||||||
|
bitpos += node.bits
|
||||||
|
node = st
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
node.entry = entry
|
||||||
|
node.lastDBaccess = s.dbAccessCnt
|
||||||
|
node.updateAccess(s.accessCnt)
|
||||||
|
s.entryCnt++
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MemStore) Get(hash Key) (chunk *Chunk, err error) {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
node := s.memtree
|
||||||
|
bitpos := uint(0)
|
||||||
|
for node.entry == nil {
|
||||||
|
l := hash.bits(bitpos, node.bits)
|
||||||
|
st := node.subtree[l]
|
||||||
|
if st == nil {
|
||||||
|
return nil, notFound
|
||||||
|
}
|
||||||
|
bitpos += node.bits
|
||||||
|
node = st
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.entry.Key.isEqual(hash) {
|
||||||
|
s.accessCnt++
|
||||||
|
node.updateAccess(s.accessCnt)
|
||||||
|
chunk = node.entry
|
||||||
|
if s.dbAccessCnt-node.lastDBaccess > dbForceUpdateAccessCnt {
|
||||||
|
s.dbAccessCnt++
|
||||||
|
node.lastDBaccess = s.dbAccessCnt
|
||||||
|
if s.dbStore != nil {
|
||||||
|
s.dbStore.updateAccessCnt(hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = notFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MemStore) removeOldest() {
|
||||||
|
node := s.memtree
|
||||||
|
|
||||||
|
for node.entry == nil {
|
||||||
|
|
||||||
|
aidx := uint(0)
|
||||||
|
av := node.access[aidx]
|
||||||
|
|
||||||
|
for aidx < node.width/2-1 {
|
||||||
|
if av == node.access[aidx*2+1] {
|
||||||
|
node.access[aidx] = node.access[aidx*2+2]
|
||||||
|
aidx = aidx*2 + 1
|
||||||
|
} else if av == node.access[aidx*2+2] {
|
||||||
|
node.access[aidx] = node.access[aidx*2+1]
|
||||||
|
aidx = aidx*2 + 2
|
||||||
|
} else {
|
||||||
|
panic(nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pidx := aidx*2 + 2 - node.width
|
||||||
|
if (node.subtree[pidx] != nil) && (av == node.subtree[pidx].access[0]) {
|
||||||
|
if node.subtree[pidx+1] != nil {
|
||||||
|
node.access[aidx] = node.subtree[pidx+1].access[0]
|
||||||
|
} else {
|
||||||
|
node.access[aidx] = 0
|
||||||
|
}
|
||||||
|
} else if (node.subtree[pidx+1] != nil) && (av == node.subtree[pidx+1].access[0]) {
|
||||||
|
if node.subtree[pidx] != nil {
|
||||||
|
node.access[aidx] = node.subtree[pidx].access[0]
|
||||||
|
} else {
|
||||||
|
node.access[aidx] = 0
|
||||||
|
}
|
||||||
|
pidx++
|
||||||
|
} else {
|
||||||
|
panic(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
//fmt.Println(pidx)
|
||||||
|
node = node.subtree[pidx]
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.entry.dbStored != nil {
|
||||||
|
<-node.entry.dbStored
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.entry.SData != nil {
|
||||||
|
node.entry = nil
|
||||||
|
s.entryCnt--
|
||||||
|
}
|
||||||
|
|
||||||
|
node.access[0] = 0
|
||||||
|
|
||||||
|
//---
|
||||||
|
|
||||||
|
aidx := uint(0)
|
||||||
|
for {
|
||||||
|
aa := node.access[aidx]
|
||||||
|
if aidx > 0 {
|
||||||
|
aidx = (aidx - 1) >> 1
|
||||||
|
} else {
|
||||||
|
pidx := node.parentIdx
|
||||||
|
node = node.parent
|
||||||
|
if node == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
aidx = (node.width + pidx - 2) >> 1
|
||||||
|
}
|
||||||
|
if (aa != 0) && ((aa < node.access[aidx]) || (node.access[aidx] == 0)) {
|
||||||
|
node.access[aidx] = aa
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
50
swarm/storage/memstore_test.go
Normal file
50
swarm/storage/memstore_test.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func testMemStore(l int64, branches int64, t *testing.T) {
|
||||||
|
m := NewMemStore(nil, defaultCacheCapacity)
|
||||||
|
testStore(m, l, branches, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemStore128_10000(t *testing.T) {
|
||||||
|
testMemStore(10000, 128, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemStore128_1000(t *testing.T) {
|
||||||
|
testMemStore(1000, 128, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemStore128_100(t *testing.T) {
|
||||||
|
testMemStore(100, 128, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemStore2_100(t *testing.T) {
|
||||||
|
testMemStore(100, 2, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemStoreNotFound(t *testing.T) {
|
||||||
|
m := NewMemStore(nil, defaultCacheCapacity)
|
||||||
|
_, err := m.Get(ZeroKey)
|
||||||
|
if err != notFound {
|
||||||
|
t.Errorf("Expected notFound, got %v", err)
|
||||||
|
}
|
||||||
|
}
|
134
swarm/storage/netstore.go
Normal file
134
swarm/storage/netstore.go
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
NetStore is a cloud storage access abstaction layer for swarm
|
||||||
|
it contains the shared logic of network served chunk store/retrieval requests
|
||||||
|
both local (coming from DPA api) and remote (coming from peers via bzz protocol)
|
||||||
|
it implements the ChunkStore interface and embeds LocalStore
|
||||||
|
|
||||||
|
It is called by the bzz protocol instances via Depo (the store/retrieve request handler)
|
||||||
|
a protocol instance is running on each peer, so this is heavily parallelised.
|
||||||
|
NetStore falls back to a backend (CloudStorage interface)
|
||||||
|
implemented by bzz/network/forwarder. forwarder or IPFS or IPΞS
|
||||||
|
*/
|
||||||
|
type NetStore struct {
|
||||||
|
hashfunc Hasher
|
||||||
|
localStore *LocalStore
|
||||||
|
cloud CloudStore
|
||||||
|
lock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// backend engine for cloud store
|
||||||
|
// It can be aggregate dispatching to several parallel implementations:
|
||||||
|
// bzz/network/forwarder. forwarder or IPFS or IPΞS
|
||||||
|
type CloudStore interface {
|
||||||
|
Store(*Chunk)
|
||||||
|
Deliver(*Chunk)
|
||||||
|
Retrieve(*Chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
type StoreParams struct {
|
||||||
|
ChunkDbPath string
|
||||||
|
DbCapacity uint64
|
||||||
|
CacheCapacity uint
|
||||||
|
Radius int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStoreParams(path string) (self *StoreParams) {
|
||||||
|
return &StoreParams{
|
||||||
|
ChunkDbPath: filepath.Join(path, "chunks"),
|
||||||
|
DbCapacity: defaultDbCapacity,
|
||||||
|
CacheCapacity: defaultCacheCapacity,
|
||||||
|
Radius: defaultRadius,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// netstore contructor, takes path argument that is used to initialise dbStore,
|
||||||
|
// the persistent (disk) storage component of LocalStore
|
||||||
|
// the second argument is the hive, the connection/logistics manager for the node
|
||||||
|
func NewNetStore(hash Hasher, lstore *LocalStore, cloud CloudStore, params *StoreParams) *NetStore {
|
||||||
|
return &NetStore{
|
||||||
|
hashfunc: hash,
|
||||||
|
localStore: lstore,
|
||||||
|
cloud: cloud,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// maximum number of peers that a retrieved message is delivered to
|
||||||
|
requesterCount = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// timeout interval before retrieval is timed out
|
||||||
|
searchTimeout = 3 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// store logic common to local and network chunk store requests
|
||||||
|
// ~ unsafe put in localdb no check if exists no extra copy no hash validation
|
||||||
|
// the chunk is forced to propagate (Cloud.Store) even if locally found!
|
||||||
|
// caller needs to make sure if that is wanted
|
||||||
|
func (self *NetStore) Put(entry *Chunk) {
|
||||||
|
self.localStore.Put(entry)
|
||||||
|
|
||||||
|
// handle deliveries
|
||||||
|
if entry.Req != nil {
|
||||||
|
glog.V(logger.Detail).Infof("NetStore.Put: localStore.Put %v hit existing request...delivering", entry.Key.Log())
|
||||||
|
// closing C singals to other routines (local requests)
|
||||||
|
// that the chunk is has been retrieved
|
||||||
|
close(entry.Req.C)
|
||||||
|
// deliver the chunk to requesters upstream
|
||||||
|
go self.cloud.Deliver(entry)
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Detail).Infof("NetStore.Put: localStore.Put %v stored locally", entry.Key.Log())
|
||||||
|
// handle propagating store requests
|
||||||
|
// go self.cloud.Store(entry)
|
||||||
|
go self.cloud.Store(entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// retrieve logic common for local and network chunk retrieval requests
|
||||||
|
func (self *NetStore) Get(key Key) (*Chunk, error) {
|
||||||
|
var err error
|
||||||
|
chunk, err := self.localStore.Get(key)
|
||||||
|
if err == nil {
|
||||||
|
if chunk.Req == nil {
|
||||||
|
glog.V(logger.Detail).Infof("NetStore.Get: %v found locally", key)
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Detail).Infof("NetStore.Get: %v hit on an existing request", key)
|
||||||
|
// no need to launch again
|
||||||
|
}
|
||||||
|
return chunk, err
|
||||||
|
}
|
||||||
|
// no data and no request status
|
||||||
|
glog.V(logger.Detail).Infof("NetStore.Get: %v not found locally. open new request", key)
|
||||||
|
chunk = NewChunk(key, newRequestStatus(key))
|
||||||
|
self.localStore.memStore.Put(chunk)
|
||||||
|
go self.cloud.Retrieve(chunk)
|
||||||
|
return chunk, nil
|
||||||
|
}
|
211
swarm/storage/pyramid.go
Normal file
211
swarm/storage/pyramid.go
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
processors = 8
|
||||||
|
)
|
||||||
|
|
||||||
|
type Tree struct {
|
||||||
|
Chunks int64
|
||||||
|
Levels []map[int64]*Node
|
||||||
|
Lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type Node struct {
|
||||||
|
Pending int64
|
||||||
|
Size uint64
|
||||||
|
Children []common.Hash
|
||||||
|
Last bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Node) String() string {
|
||||||
|
var children []string
|
||||||
|
for _, node := range self.Children {
|
||||||
|
children = append(children, node.Hex())
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("pending: %v, size: %v, last :%v, children: %v", self.Pending, self.Size, self.Last, strings.Join(children, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
type Task struct {
|
||||||
|
Index int64 // Index of the chunk being processed
|
||||||
|
Size uint64
|
||||||
|
Data []byte // Binary blob of the chunk
|
||||||
|
Last bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type PyramidChunker struct {
|
||||||
|
hashFunc Hasher
|
||||||
|
chunkSize int64
|
||||||
|
hashSize int64
|
||||||
|
branches int64
|
||||||
|
workerCount int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPyramidChunker(params *ChunkerParams) (self *PyramidChunker) {
|
||||||
|
self = &PyramidChunker{}
|
||||||
|
self.hashFunc = MakeHashFunc(params.Hash)
|
||||||
|
self.branches = params.Branches
|
||||||
|
self.hashSize = int64(self.hashFunc().Size())
|
||||||
|
self.chunkSize = self.hashSize * self.branches
|
||||||
|
self.workerCount = 1
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *PyramidChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, swg, wwg *sync.WaitGroup) (Key, error) {
|
||||||
|
|
||||||
|
chunks := (size + self.chunkSize - 1) / self.chunkSize
|
||||||
|
depth := int(math.Ceil(math.Log(float64(chunks))/math.Log(float64(self.branches)))) + 1
|
||||||
|
// glog.V(logger.Detail).Infof("chunks: %v, depth: %v", chunks, depth)
|
||||||
|
|
||||||
|
results := Tree{
|
||||||
|
Chunks: chunks,
|
||||||
|
Levels: make([]map[int64]*Node, depth),
|
||||||
|
}
|
||||||
|
for i := 0; i < depth; i++ {
|
||||||
|
results.Levels[i] = make(map[int64]*Node)
|
||||||
|
}
|
||||||
|
// Create a pool of workers to crunch through the file
|
||||||
|
tasks := make(chan *Task, 2*processors)
|
||||||
|
pend := new(sync.WaitGroup)
|
||||||
|
abortC := make(chan bool)
|
||||||
|
for i := 0; i < processors; i++ {
|
||||||
|
pend.Add(1)
|
||||||
|
go self.processor(pend, swg, tasks, chunkC, &results)
|
||||||
|
}
|
||||||
|
// Feed the chunks into the task pool
|
||||||
|
for index := 0; ; index++ {
|
||||||
|
buffer := make([]byte, self.chunkSize+8)
|
||||||
|
n, err := data.Read(buffer[8:])
|
||||||
|
last := err == io.ErrUnexpectedEOF || err == io.EOF
|
||||||
|
// glog.V(logger.Detail).Infof("n: %v, index: %v, depth: %v", n, index, depth)
|
||||||
|
if err != nil && !last {
|
||||||
|
// glog.V(logger.Info).Infof("error: %v", err)
|
||||||
|
close(abortC)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
binary.LittleEndian.PutUint64(buffer[:8], uint64(n))
|
||||||
|
pend.Add(1)
|
||||||
|
// glog.V(logger.Info).Infof("-> task %v (%v)", index, n)
|
||||||
|
select {
|
||||||
|
case tasks <- &Task{Index: int64(index), Size: uint64(n), Data: buffer[:n+8], Last: last}:
|
||||||
|
case <-abortC:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if last {
|
||||||
|
// glog.V(logger.Info).Infof("last task %v (%v)", index, n)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Wait for the workers and return
|
||||||
|
close(tasks)
|
||||||
|
pend.Wait()
|
||||||
|
|
||||||
|
// glog.V(logger.Info).Infof("len: %v", results.Levels[0][0])
|
||||||
|
key := results.Levels[0][0].Children[0][:]
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *PyramidChunker) processor(pend, swg *sync.WaitGroup, tasks chan *Task, chunkC chan *Chunk, results *Tree) {
|
||||||
|
defer pend.Done()
|
||||||
|
|
||||||
|
// glog.V(logger.Info).Infof("processor started")
|
||||||
|
// Start processing leaf chunks ad infinitum
|
||||||
|
hasher := self.hashFunc()
|
||||||
|
for task := range tasks {
|
||||||
|
depth, pow := len(results.Levels)-1, self.branches
|
||||||
|
// glog.V(logger.Info).Infof("task: %v, last: %v", task.Index, task.Last)
|
||||||
|
size := task.Size
|
||||||
|
data := task.Data
|
||||||
|
var node *Node
|
||||||
|
for depth >= 0 {
|
||||||
|
// New chunk received, reset the hasher and start processing
|
||||||
|
hasher.Reset()
|
||||||
|
if node == nil { // Leaf node, hash the data chunk
|
||||||
|
hasher.Write(task.Data)
|
||||||
|
} else { // Internal node, hash the children
|
||||||
|
size = node.Size
|
||||||
|
data = make([]byte, hasher.Size()*len(node.Children)+8)
|
||||||
|
binary.LittleEndian.PutUint64(data[:8], size)
|
||||||
|
|
||||||
|
hasher.Write(data[:8])
|
||||||
|
for i, hash := range node.Children {
|
||||||
|
copy(data[i*hasher.Size()+8:], hash[:])
|
||||||
|
hasher.Write(hash[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hash := hasher.Sum(nil)
|
||||||
|
last := task.Last || (node != nil) && node.Last
|
||||||
|
// Insert the subresult into the memoization tree
|
||||||
|
results.Lock.Lock()
|
||||||
|
if node = results.Levels[depth][task.Index/pow]; node == nil {
|
||||||
|
// Figure out the pending tasks
|
||||||
|
pending := self.branches
|
||||||
|
if task.Index/pow == results.Chunks/pow {
|
||||||
|
pending = (results.Chunks + pow/self.branches - 1) / (pow / self.branches) % self.branches
|
||||||
|
}
|
||||||
|
node = &Node{pending, 0, make([]common.Hash, pending), last}
|
||||||
|
results.Levels[depth][task.Index/pow] = node
|
||||||
|
// glog.V(logger.Info).Infof("create node %v, %v (%v children, all pending)", depth, task.Index/pow, pending)
|
||||||
|
}
|
||||||
|
node.Pending--
|
||||||
|
// glog.V(logger.Info).Infof("pending now: %v", node.Pending)
|
||||||
|
i := task.Index / (pow / self.branches) % self.branches
|
||||||
|
if last {
|
||||||
|
node.Last = true
|
||||||
|
}
|
||||||
|
copy(node.Children[i][:], hash)
|
||||||
|
node.Size += size
|
||||||
|
left := node.Pending
|
||||||
|
// glog.V(logger.Info).Infof("left pending now: %v, node size: %v", left, node.Size)
|
||||||
|
if chunkC != nil {
|
||||||
|
if swg != nil {
|
||||||
|
swg.Add(1)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case chunkC <- &Chunk{Key: hash, SData: data, wg: swg}:
|
||||||
|
// case <- self.quitC
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if depth+1 < len(results.Levels) {
|
||||||
|
delete(results.Levels[depth+1], task.Index/(pow/self.branches))
|
||||||
|
}
|
||||||
|
|
||||||
|
results.Lock.Unlock()
|
||||||
|
// If there's more work to be done, leave for others
|
||||||
|
// glog.V(logger.Info).Infof("left %v", left)
|
||||||
|
if left > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// We're the last ones in this batch, merge the children together
|
||||||
|
depth--
|
||||||
|
pow *= self.branches
|
||||||
|
}
|
||||||
|
pend.Done()
|
||||||
|
}
|
||||||
|
}
|
232
swarm/storage/types.go
Normal file
232
swarm/storage/types.go
Normal file
@ -0,0 +1,232 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Hasher func() hash.Hash
|
||||||
|
|
||||||
|
// Peer is the recorded as Source on the chunk
|
||||||
|
// should probably not be here? but network should wrap chunk object
|
||||||
|
type Peer interface{}
|
||||||
|
|
||||||
|
type Key []byte
|
||||||
|
|
||||||
|
func (x Key) Size() uint {
|
||||||
|
return uint(len(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x Key) isEqual(y Key) bool {
|
||||||
|
return bytes.Compare(x, y) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h Key) bits(i, j uint) uint {
|
||||||
|
ii := i >> 3
|
||||||
|
jj := i & 7
|
||||||
|
if ii >= h.Size() {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if jj+j <= 8 {
|
||||||
|
return uint((h[ii] >> jj) & ((1 << j) - 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
res := uint(h[ii] >> jj)
|
||||||
|
jj = 8 - jj
|
||||||
|
j -= jj
|
||||||
|
for j != 0 {
|
||||||
|
ii++
|
||||||
|
if j < 8 {
|
||||||
|
res += uint(h[ii]&((1<<j)-1)) << jj
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
res += uint(h[ii]) << jj
|
||||||
|
jj += 8
|
||||||
|
j -= 8
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsZeroKey(key Key) bool {
|
||||||
|
return len(key) == 0 || bytes.Equal(key, ZeroKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
var ZeroKey = Key(common.Hash{}.Bytes())
|
||||||
|
|
||||||
|
func MakeHashFunc(hash string) Hasher {
|
||||||
|
switch hash {
|
||||||
|
case "SHA256":
|
||||||
|
return crypto.SHA256.New
|
||||||
|
case "SHA3":
|
||||||
|
return sha3.NewKeccak256
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (key Key) Hex() string {
|
||||||
|
return fmt.Sprintf("%064x", []byte(key[:]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (key Key) Log() string {
|
||||||
|
if len(key[:]) < 4 {
|
||||||
|
return fmt.Sprintf("%x", []byte(key[:]))
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%08x", []byte(key[:4]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (key Key) String() string {
|
||||||
|
return fmt.Sprintf("%064x", []byte(key)[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (key Key) MarshalJSON() (out []byte, err error) {
|
||||||
|
return []byte(`"` + key.String() + `"`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (key *Key) UnmarshalJSON(value []byte) error {
|
||||||
|
s := string(value)
|
||||||
|
*key = make([]byte, 32)
|
||||||
|
h := common.Hex2Bytes(s[1 : len(s)-1])
|
||||||
|
copy(*key, h)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// each chunk when first requested opens a record associated with the request
|
||||||
|
// next time a request for the same chunk arrives, this record is updated
|
||||||
|
// this request status keeps track of the request ID-s as well as the requesting
|
||||||
|
// peers and has a channel that is closed when the chunk is retrieved. Multiple
|
||||||
|
// local callers can wait on this channel (or combined with a timeout, block with a
|
||||||
|
// select).
|
||||||
|
type RequestStatus struct {
|
||||||
|
Key Key
|
||||||
|
Source Peer
|
||||||
|
C chan bool
|
||||||
|
Requesters map[uint64][]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRequestStatus(key Key) *RequestStatus {
|
||||||
|
return &RequestStatus{
|
||||||
|
Key: key,
|
||||||
|
Requesters: make(map[uint64][]interface{}),
|
||||||
|
C: make(chan bool),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chunk also serves as a request object passed to ChunkStores
|
||||||
|
// in case it is a retrieval request, Data is nil and Size is 0
|
||||||
|
// Note that Size is not the size of the data chunk, which is Data.Size()
|
||||||
|
// but the size of the subtree encoded in the chunk
|
||||||
|
// 0 if request, to be supplied by the dpa
|
||||||
|
type Chunk struct {
|
||||||
|
Key Key // always
|
||||||
|
SData []byte // nil if request, to be supplied by dpa
|
||||||
|
Size int64 // size of the data covered by the subtree encoded in this chunk
|
||||||
|
Source Peer // peer
|
||||||
|
C chan bool // to signal data delivery by the dpa
|
||||||
|
Req *RequestStatus // request Status needed by netStore
|
||||||
|
wg *sync.WaitGroup // wg to synchronize
|
||||||
|
dbStored chan bool // never remove a chunk from memStore before it is written to dbStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewChunk(key Key, rs *RequestStatus) *Chunk {
|
||||||
|
return &Chunk{Key: key, Req: rs}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
The ChunkStore interface is implemented by :
|
||||||
|
|
||||||
|
- MemStore: a memory cache
|
||||||
|
- DbStore: local disk/db store
|
||||||
|
- LocalStore: a combination (sequence of) memStore and dbStore
|
||||||
|
- NetStore: cloud storage abstraction layer
|
||||||
|
- DPA: local requests for swarm storage and retrieval
|
||||||
|
*/
|
||||||
|
type ChunkStore interface {
|
||||||
|
Put(*Chunk) // effectively there is no error even if there is an error
|
||||||
|
Get(Key) (*Chunk, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Chunker is the interface to a component that is responsible for disassembling and assembling larger data and indended to be the dependency of a DPA storage system with fixed maximum chunksize.
|
||||||
|
|
||||||
|
It relies on the underlying chunking model.
|
||||||
|
|
||||||
|
When calling Split, the caller provides a channel (chan *Chunk) on which it receives chunks to store. The DPA delegates to storage layers (implementing ChunkStore interface).
|
||||||
|
|
||||||
|
Split returns an error channel, which the caller can monitor.
|
||||||
|
After getting notified that all the data has been split (the error channel is closed), the caller can safely read or save the root key. Optionally it times out if not all chunks get stored or not the entire stream of data has been processed. By inspecting the errc channel the caller can check if any explicit errors (typically IO read/write failures) occured during splitting.
|
||||||
|
|
||||||
|
When calling Join with a root key, the caller gets returned a seekable lazy reader. The caller again provides a channel on which the caller receives placeholder chunks with missing data. The DPA is supposed to forward this to the chunk stores and notify the chunker if the data has been delivered (i.e. retrieved from memory cache, disk-persisted db or cloud based swarm delivery). As the seekable reader is used, the chunker then puts these together the relevant parts on demand.
|
||||||
|
*/
|
||||||
|
type Splitter interface {
|
||||||
|
/*
|
||||||
|
When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes.
|
||||||
|
New chunks to store are coming to caller via the chunk storage channel, which the caller provides.
|
||||||
|
wg is a Waitgroup (can be nil) that can be used to block until the local storage finishes
|
||||||
|
The caller gets returned an error channel, if an error is encountered during splitting, it is fed to errC error channel.
|
||||||
|
A closed error signals process completion at which point the key can be considered final if there were no errors.
|
||||||
|
*/
|
||||||
|
Split(io.Reader, int64, chan *Chunk, *sync.WaitGroup, *sync.WaitGroup) (Key, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Joiner interface {
|
||||||
|
/*
|
||||||
|
Join reconstructs original content based on a root key.
|
||||||
|
When joining, the caller gets returned a Lazy SectionReader, which is
|
||||||
|
seekable and implements on-demand fetching of chunks as and where it is read.
|
||||||
|
New chunks to retrieve are coming to caller via the Chunk channel, which the caller provides.
|
||||||
|
If an error is encountered during joining, it appears as a reader error.
|
||||||
|
The SectionReader.
|
||||||
|
As a result, partial reads from a document are possible even if other parts
|
||||||
|
are corrupt or lost.
|
||||||
|
The chunks are not meant to be validated by the chunker when joining. This
|
||||||
|
is because it is left to the DPA to decide which sources are trusted.
|
||||||
|
*/
|
||||||
|
Join(key Key, chunkC chan *Chunk) LazySectionReader
|
||||||
|
}
|
||||||
|
|
||||||
|
type Chunker interface {
|
||||||
|
Joiner
|
||||||
|
Splitter
|
||||||
|
// returns the key length
|
||||||
|
// KeySize() int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size, Seek, Read, ReadAt
|
||||||
|
type LazySectionReader interface {
|
||||||
|
Size(chan bool) (int64, error)
|
||||||
|
io.Seeker
|
||||||
|
io.Reader
|
||||||
|
io.ReaderAt
|
||||||
|
}
|
||||||
|
|
||||||
|
type LazyTestSectionReader struct {
|
||||||
|
*io.SectionReader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *LazyTestSectionReader) Size(chan bool) (int64, error) {
|
||||||
|
return self.SectionReader.Size(), nil
|
||||||
|
}
|
309
swarm/swarm.go
Normal file
309
swarm/swarm.go
Normal file
@ -0,0 +1,309 @@
|
|||||||
|
// Copyright 2016 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package swarm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/contracts/chequebook"
|
||||||
|
"github.com/ethereum/go-ethereum/contracts/ens"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/node"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
|
httpapi "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/network"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// the swarm stack
|
||||||
|
type Swarm struct {
|
||||||
|
config *api.Config // swarm configuration
|
||||||
|
api *api.Api // high level api layer (fs/manifest)
|
||||||
|
dns api.Resolver // DNS registrar
|
||||||
|
dbAccess *network.DbAccess // access to local chunk db iterator and storage counter
|
||||||
|
storage storage.ChunkStore // internal access to storage, common interface to cloud storage backends
|
||||||
|
dpa *storage.DPA // distributed preimage archive, the local API to the storage with document level storage/retrieval support
|
||||||
|
depo network.StorageHandler // remote request handler, interface between bzz protocol and the storage
|
||||||
|
cloud storage.CloudStore // procurement, cloud storage backend (can multi-cloud)
|
||||||
|
hive *network.Hive // the logistic manager
|
||||||
|
backend chequebook.Backend // simple blockchain Backend
|
||||||
|
privateKey *ecdsa.PrivateKey
|
||||||
|
swapEnabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type SwarmAPI struct {
|
||||||
|
Api *api.Api
|
||||||
|
Backend chequebook.Backend
|
||||||
|
PrvKey *ecdsa.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Swarm) API() *SwarmAPI {
|
||||||
|
return &SwarmAPI{
|
||||||
|
Api: self.api,
|
||||||
|
Backend: self.backend,
|
||||||
|
PrvKey: self.privateKey,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// creates a new swarm service instance
|
||||||
|
// implements node.Service
|
||||||
|
func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, config *api.Config, swapEnabled, syncEnabled bool) (self *Swarm, err error) {
|
||||||
|
if bytes.Equal(common.FromHex(config.PublicKey), storage.ZeroKey) {
|
||||||
|
return nil, fmt.Errorf("empty public key")
|
||||||
|
}
|
||||||
|
if bytes.Equal(common.FromHex(config.BzzKey), storage.ZeroKey) {
|
||||||
|
return nil, fmt.Errorf("empty bzz key")
|
||||||
|
}
|
||||||
|
|
||||||
|
self = &Swarm{
|
||||||
|
config: config,
|
||||||
|
swapEnabled: swapEnabled,
|
||||||
|
backend: backend,
|
||||||
|
privateKey: config.Swap.PrivateKey(),
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("Setting up Swarm service components")
|
||||||
|
|
||||||
|
hash := storage.MakeHashFunc(config.ChunkerParams.Hash)
|
||||||
|
lstore, err := storage.NewLocalStore(hash, config.StoreParams)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup local store
|
||||||
|
glog.V(logger.Debug).Infof("Set up local storage")
|
||||||
|
|
||||||
|
self.dbAccess = network.NewDbAccess(lstore)
|
||||||
|
glog.V(logger.Debug).Infof("Set up local db access (iterator/counter)")
|
||||||
|
|
||||||
|
// set up the kademlia hive
|
||||||
|
self.hive = network.NewHive(
|
||||||
|
common.HexToHash(self.config.BzzKey), // key to hive (kademlia base address)
|
||||||
|
config.HiveParams, // configuration parameters
|
||||||
|
swapEnabled, // SWAP enabled
|
||||||
|
syncEnabled, // syncronisation enabled
|
||||||
|
)
|
||||||
|
glog.V(logger.Debug).Infof("Set up swarm network with Kademlia hive")
|
||||||
|
|
||||||
|
// setup cloud storage backend
|
||||||
|
cloud := network.NewForwarder(self.hive)
|
||||||
|
glog.V(logger.Debug).Infof("-> set swarm forwarder as cloud storage backend")
|
||||||
|
// setup cloud storage internal access layer
|
||||||
|
|
||||||
|
self.storage = storage.NewNetStore(hash, lstore, cloud, config.StoreParams)
|
||||||
|
glog.V(logger.Debug).Infof("-> swarm net store shared access layer to Swarm Chunk Store")
|
||||||
|
|
||||||
|
// set up Depo (storage handler = cloud storage access layer for incoming remote requests)
|
||||||
|
self.depo = network.NewDepo(hash, lstore, self.storage)
|
||||||
|
glog.V(logger.Debug).Infof("-> REmote Access to CHunks")
|
||||||
|
|
||||||
|
// set up DPA, the cloud storage local access layer
|
||||||
|
dpaChunkStore := storage.NewDpaChunkStore(lstore, self.storage)
|
||||||
|
glog.V(logger.Debug).Infof("-> Local Access to Swarm")
|
||||||
|
// Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage
|
||||||
|
self.dpa = storage.NewDPA(dpaChunkStore, self.config.ChunkerParams)
|
||||||
|
glog.V(logger.Debug).Infof("-> Content Store API")
|
||||||
|
|
||||||
|
// set up high level api
|
||||||
|
transactOpts := bind.NewKeyedTransactor(self.privateKey)
|
||||||
|
|
||||||
|
self.dns, err = ens.NewENS(transactOpts, config.EnsRoot, self.backend)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("-> Swarm Domain Name Registrar @ address %v", config.EnsRoot.Hex())
|
||||||
|
|
||||||
|
self.api = api.NewApi(self.dpa, self.dns)
|
||||||
|
// Manifests for Smart Hosting
|
||||||
|
glog.V(logger.Debug).Infof("-> Web3 virtual server API")
|
||||||
|
|
||||||
|
return self, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Start is called when the stack is started
|
||||||
|
* starts the network kademlia hive peer management
|
||||||
|
* (starts netStore level 0 api)
|
||||||
|
* starts DPA level 1 api (chunking -> store/retrieve requests)
|
||||||
|
* (starts level 2 api)
|
||||||
|
* starts http proxy server
|
||||||
|
* registers url scheme handlers for bzz, etc
|
||||||
|
* TODO: start subservices like sword, swear, swarmdns
|
||||||
|
*/
|
||||||
|
// implements the node.Service interface
|
||||||
|
func (self *Swarm) Start(net *p2p.Server) error {
|
||||||
|
connectPeer := func(url string) error {
|
||||||
|
node, err := discover.ParseNode(url)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid node URL: %v", err)
|
||||||
|
}
|
||||||
|
net.AddPeer(node)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// set chequebook
|
||||||
|
if self.swapEnabled {
|
||||||
|
ctx := context.Background() // The initial setup has no deadline.
|
||||||
|
err := self.SetChequebook(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Unable to set chequebook for SWAP: %v", err)
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("-> cheque book for SWAP: %v", self.config.Swap.Chequebook())
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Debug).Infof("SWAP disabled: no cheque book set")
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(logger.Warn).Infof("Starting Swarm service")
|
||||||
|
self.hive.Start(
|
||||||
|
discover.PubkeyID(&net.PrivateKey.PublicKey),
|
||||||
|
func() string { return net.ListenAddr },
|
||||||
|
connectPeer,
|
||||||
|
)
|
||||||
|
glog.V(logger.Info).Infof("Swarm network started on bzz address: %v", self.hive.Addr())
|
||||||
|
|
||||||
|
self.dpa.Start()
|
||||||
|
glog.V(logger.Debug).Infof("Swarm DPA started")
|
||||||
|
|
||||||
|
// start swarm http proxy server
|
||||||
|
if self.config.Port != "" {
|
||||||
|
go httpapi.StartHttpServer(self.api, self.config.Port)
|
||||||
|
}
|
||||||
|
glog.V(logger.Debug).Infof("Swarm http proxy started on port: %v", self.config.Port)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// implements the node.Service interface
|
||||||
|
// stops all component services.
|
||||||
|
func (self *Swarm) Stop() error {
|
||||||
|
self.dpa.Stop()
|
||||||
|
self.hive.Stop()
|
||||||
|
if ch := self.config.Swap.Chequebook(); ch != nil {
|
||||||
|
ch.Stop()
|
||||||
|
ch.Save()
|
||||||
|
}
|
||||||
|
return self.config.Save()
|
||||||
|
}
|
||||||
|
|
||||||
|
// implements the node.Service interface
|
||||||
|
func (self *Swarm) Protocols() []p2p.Protocol {
|
||||||
|
proto, err := network.Bzz(self.depo, self.backend, self.hive, self.dbAccess, self.config.Swap, self.config.SyncParams)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return []p2p.Protocol{proto}
|
||||||
|
}
|
||||||
|
|
||||||
|
// implements node.Service
|
||||||
|
// Apis returns the RPC Api descriptors the Swarm implementation offers
|
||||||
|
func (self *Swarm) APIs() []rpc.API {
|
||||||
|
return []rpc.API{
|
||||||
|
// public APIs
|
||||||
|
{
|
||||||
|
Namespace: "bzz",
|
||||||
|
Version: "0.1",
|
||||||
|
Service: api.NewStorage(self.api),
|
||||||
|
Public: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Namespace: "bzz",
|
||||||
|
Version: "0.1",
|
||||||
|
Service: &Info{self.config, chequebook.ContractParams},
|
||||||
|
Public: true,
|
||||||
|
},
|
||||||
|
// admin APIs
|
||||||
|
{
|
||||||
|
Namespace: "bzz",
|
||||||
|
Version: "0.1",
|
||||||
|
Service: api.NewFileSystem(self.api),
|
||||||
|
Public: false},
|
||||||
|
{
|
||||||
|
Namespace: "bzz",
|
||||||
|
Version: "0.1",
|
||||||
|
Service: api.NewControl(self.api, self.hive),
|
||||||
|
Public: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Namespace: "chequebook",
|
||||||
|
Version: chequebook.Version,
|
||||||
|
Service: chequebook.NewApi(self.config.Swap.Chequebook),
|
||||||
|
Public: false,
|
||||||
|
},
|
||||||
|
// {Namespace, Version, api.NewAdmin(self), false},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Swarm) Api() *api.Api {
|
||||||
|
return self.api
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChequebook ensures that the local checquebook is set up on chain.
|
||||||
|
func (self *Swarm) SetChequebook(ctx context.Context) error {
|
||||||
|
err := self.config.Swap.SetChequebook(ctx, self.backend, self.config.Path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
glog.V(logger.Info).Infof("new chequebook set (%v): saving config file, resetting all connections in the hive", self.config.Swap.Contract.Hex())
|
||||||
|
self.config.Save()
|
||||||
|
self.hive.DropAll()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Local swarm without netStore
|
||||||
|
func NewLocalSwarm(datadir, port string) (self *Swarm, err error) {
|
||||||
|
|
||||||
|
prvKey, err := crypto.GenerateKey()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := api.NewConfig(datadir, common.Address{}, prvKey)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
config.Port = port
|
||||||
|
|
||||||
|
dpa, err := storage.NewLocalDPA(datadir)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
self = &Swarm{
|
||||||
|
api: api.NewApi(dpa, nil),
|
||||||
|
config: config,
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// serialisable info about swarm
|
||||||
|
type Info struct {
|
||||||
|
*api.Config
|
||||||
|
*chequebook.Params
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Info) Info() *Info {
|
||||||
|
return self
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user