forked from cerc-io/plugeth
Merge pull request #17231 from ethersphere/develop
swarm: client-side MRU signatures ; BMT fixes ; network simulation tests
This commit is contained in:
commit
b536460f8e
@ -182,6 +182,18 @@ var (
|
||||
Usage: "Number of recent chunks cached in memory (default 5000)",
|
||||
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
|
||||
}
|
||||
SwarmResourceMultihashFlag = cli.BoolFlag{
|
||||
Name: "multihash",
|
||||
Usage: "Determines how to interpret data for a resource update. If not present, data will be interpreted as raw, literal data that will be included in the resource",
|
||||
}
|
||||
SwarmResourceNameFlag = cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "User-defined name for the new resource",
|
||||
}
|
||||
SwarmResourceDataOnCreateFlag = cli.StringFlag{
|
||||
Name: "data",
|
||||
Usage: "Initializes the resource with the given hex-encoded data. Data must be prefixed by 0x",
|
||||
}
|
||||
)
|
||||
|
||||
//declare a few constant error messages, useful for later error check comparisons in test
|
||||
@ -190,6 +202,15 @@ var (
|
||||
SWARM_ERR_SWAP_SET_NO_API = "SWAP is enabled but --swap-api is not set"
|
||||
)
|
||||
|
||||
// this help command gets added to any subcommand that does not define it explicitly
|
||||
var defaultSubcommandHelp = cli.Command{
|
||||
Action: func(ctx *cli.Context) { cli.ShowCommandHelpAndExit(ctx, "", 1) },
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "help",
|
||||
Usage: "shows this help",
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
var defaultNodeConfig = node.DefaultConfig
|
||||
|
||||
// This init function sets defaults so cmd/swarm can run alongside geth.
|
||||
@ -226,6 +247,41 @@ func init() {
|
||||
Flags: []cli.Flag{SwarmEncryptedFlag},
|
||||
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
|
||||
},
|
||||
{
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "resource",
|
||||
Usage: "(Advanced) Create and update Mutable Resources",
|
||||
ArgsUsage: "<create|update|info>",
|
||||
Description: "Works with Mutable Resource Updates",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: resourceCreate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "create",
|
||||
Usage: "creates a new Mutable Resource",
|
||||
ArgsUsage: "<frequency>",
|
||||
Description: "creates a new Mutable Resource",
|
||||
Flags: []cli.Flag{SwarmResourceNameFlag, SwarmResourceDataOnCreateFlag, SwarmResourceMultihashFlag},
|
||||
},
|
||||
{
|
||||
Action: resourceUpdate,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "update",
|
||||
Usage: "updates the content of an existing Mutable Resource",
|
||||
ArgsUsage: "<Manifest Address or ENS domain> <0x Hex data>",
|
||||
Description: "updates the content of an existing Mutable Resource",
|
||||
Flags: []cli.Flag{SwarmResourceMultihashFlag},
|
||||
},
|
||||
{
|
||||
Action: resourceInfo,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "info",
|
||||
Usage: "obtains information about an existing Mutable Resource",
|
||||
ArgsUsage: "<Manifest Address or ENS domain>",
|
||||
Description: "obtains information about an existing Mutable Resource",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Action: list,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
@ -377,6 +433,11 @@ pv(1) tool to get a progress bar:
|
||||
// See config.go
|
||||
DumpConfigCommand,
|
||||
}
|
||||
|
||||
// append a hidden help subcommand to all commands that have subcommands
|
||||
// if a help command was already defined above, that one will take precedence.
|
||||
addDefaultHelpSubcommands(app.Commands)
|
||||
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
@ -549,6 +610,26 @@ func getAccount(bzzaccount string, ctx *cli.Context, stack *node.Node) *ecdsa.Pr
|
||||
return decryptStoreAccount(ks, bzzaccount, utils.MakePasswordList(ctx))
|
||||
}
|
||||
|
||||
// getPrivKey returns the private key of the specified bzzaccount
|
||||
// Used only by client commands, such as `resource`
|
||||
func getPrivKey(ctx *cli.Context) *ecdsa.PrivateKey {
|
||||
// booting up the swarm node just as we do in bzzd action
|
||||
bzzconfig, err := buildConfig(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("unable to configure swarm: %v", err)
|
||||
}
|
||||
cfg := defaultNodeConfig
|
||||
if _, err := os.Stat(bzzconfig.Path); err == nil {
|
||||
cfg.DataDir = bzzconfig.Path
|
||||
}
|
||||
utils.SetNodeConfig(ctx, &cfg)
|
||||
stack, err := node.New(&cfg)
|
||||
if err != nil {
|
||||
utils.Fatalf("can't create node: %v", err)
|
||||
}
|
||||
return getAccount(bzzconfig.BzzAccount, ctx, stack)
|
||||
}
|
||||
|
||||
func decryptStoreAccount(ks *keystore.KeyStore, account string, passwords []string) *ecdsa.PrivateKey {
|
||||
var a accounts.Account
|
||||
var err error
|
||||
@ -613,3 +694,16 @@ func injectBootnodes(srv *p2p.Server, nodes []string) {
|
||||
srv.AddPeer(n)
|
||||
}
|
||||
}
|
||||
|
||||
// addDefaultHelpSubcommand scans through defined CLI commands and adds
|
||||
// a basic help subcommand to each
|
||||
// if a help command is already defined, it will take precedence over the default.
|
||||
func addDefaultHelpSubcommands(commands []cli.Command) {
|
||||
for i := range commands {
|
||||
cmd := &commands[i]
|
||||
if cmd.Subcommands != nil {
|
||||
cmd.Subcommands = append(cmd.Subcommands, defaultSubcommandHelp)
|
||||
addDefaultHelpSubcommands(cmd.Subcommands)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
169
cmd/swarm/mru.go
Normal file
169
cmd/swarm/mru.go
Normal file
@ -0,0 +1,169 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Command resource allows the user to create and update signed mutable resource updates
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mru"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func NewGenericSigner(ctx *cli.Context) mru.Signer {
|
||||
return mru.NewGenericSigner(getPrivKey(ctx))
|
||||
}
|
||||
|
||||
// swarm resource create <frequency> [--name <name>] [--data <0x Hexdata> [--multihash=false]]
|
||||
// swarm resource update <Manifest Address or ENS domain> <0x Hexdata> [--multihash=false]
|
||||
// swarm resource info <Manifest Address or ENS domain>
|
||||
|
||||
func resourceCreate(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
|
||||
initialData = ctx.String(SwarmResourceDataOnCreateFlag.Name)
|
||||
name = ctx.String(SwarmResourceNameFlag.Name)
|
||||
)
|
||||
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Incorrect number of arguments")
|
||||
cli.ShowCommandHelpAndExit(ctx, "create", 1)
|
||||
return
|
||||
}
|
||||
signer := NewGenericSigner(ctx)
|
||||
frequency, err := strconv.ParseUint(args[0], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Frequency formatting error: %s\n", err.Error())
|
||||
cli.ShowCommandHelpAndExit(ctx, "create", 1)
|
||||
return
|
||||
}
|
||||
|
||||
metadata := mru.ResourceMetadata{
|
||||
Name: name,
|
||||
Frequency: frequency,
|
||||
Owner: signer.Address(),
|
||||
}
|
||||
|
||||
var newResourceRequest *mru.Request
|
||||
if initialData != "" {
|
||||
initialDataBytes, err := hexutil.Decode(initialData)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing data: %s\n", err.Error())
|
||||
cli.ShowCommandHelpAndExit(ctx, "create", 1)
|
||||
return
|
||||
}
|
||||
newResourceRequest, err = mru.NewCreateUpdateRequest(&metadata)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error creating new resource request: %s", err)
|
||||
}
|
||||
newResourceRequest.SetData(initialDataBytes, multihash)
|
||||
if err = newResourceRequest.Sign(signer); err != nil {
|
||||
utils.Fatalf("Error signing resource update: %s", err.Error())
|
||||
}
|
||||
} else {
|
||||
newResourceRequest, err = mru.NewCreateRequest(&metadata)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error creating new resource request: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
manifestAddress, err := client.CreateResource(newResourceRequest)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error creating resource: %s", err.Error())
|
||||
return
|
||||
}
|
||||
fmt.Println(manifestAddress) // output manifest address to the user in a single line (useful for other commands to pick up)
|
||||
|
||||
}
|
||||
|
||||
func resourceUpdate(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
|
||||
)
|
||||
|
||||
if len(args) < 2 {
|
||||
fmt.Println("Incorrect number of arguments")
|
||||
cli.ShowCommandHelpAndExit(ctx, "update", 1)
|
||||
return
|
||||
}
|
||||
signer := NewGenericSigner(ctx)
|
||||
manifestAddressOrDomain := args[0]
|
||||
data, err := hexutil.Decode(args[1])
|
||||
if err != nil {
|
||||
utils.Fatalf("Error parsing data: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Retrieve resource status and metadata out of the manifest
|
||||
updateRequest, err := client.GetResourceMetadata(manifestAddressOrDomain)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error retrieving resource status: %s", err.Error())
|
||||
}
|
||||
|
||||
// set the new data
|
||||
updateRequest.SetData(data, multihash)
|
||||
|
||||
// sign update
|
||||
if err = updateRequest.Sign(signer); err != nil {
|
||||
utils.Fatalf("Error signing resource update: %s", err.Error())
|
||||
}
|
||||
|
||||
// post update
|
||||
err = client.UpdateResource(updateRequest)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error updating resource: %s", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func resourceInfo(ctx *cli.Context) {
|
||||
var (
|
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||
client = swarm.NewClient(bzzapi)
|
||||
)
|
||||
args := ctx.Args()
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Incorrect number of arguments.")
|
||||
cli.ShowCommandHelpAndExit(ctx, "info", 1)
|
||||
return
|
||||
}
|
||||
manifestAddressOrDomain := args[0]
|
||||
metadata, err := client.GetResourceMetadata(manifestAddressOrDomain)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error retrieving resource metadata: %s", err.Error())
|
||||
return
|
||||
}
|
||||
encodedMetadata, err := metadata.MarshalJSON()
|
||||
if err != nil {
|
||||
utils.Fatalf("Error encoding metadata to JSON for display:%s", err)
|
||||
}
|
||||
fmt.Println(string(encodedMetadata))
|
||||
}
|
@ -296,6 +296,13 @@ func (sn *SimNode) Stop() error {
|
||||
return sn.node.Stop()
|
||||
}
|
||||
|
||||
// Service returns a running service by name
|
||||
func (sn *SimNode) Service(name string) node.Service {
|
||||
sn.lock.RLock()
|
||||
defer sn.lock.RUnlock()
|
||||
return sn.running[name]
|
||||
}
|
||||
|
||||
// Services returns a copy of the underlying services
|
||||
func (sn *SimNode) Services() []node.Service {
|
||||
sn.lock.RLock()
|
||||
@ -307,6 +314,17 @@ func (sn *SimNode) Services() []node.Service {
|
||||
return services
|
||||
}
|
||||
|
||||
// ServiceMap returns a map by names of the underlying services
|
||||
func (sn *SimNode) ServiceMap() map[string]node.Service {
|
||||
sn.lock.RLock()
|
||||
defer sn.lock.RUnlock()
|
||||
services := make(map[string]node.Service, len(sn.running))
|
||||
for name, service := range sn.running {
|
||||
services[name] = service
|
||||
}
|
||||
return services
|
||||
}
|
||||
|
||||
// Server returns the underlying p2p.Server
|
||||
func (sn *SimNode) Server() *p2p.Server {
|
||||
return sn.node.Server()
|
||||
|
@ -351,11 +351,12 @@ func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string
|
||||
// we need to do some extra work if this is a mutable resource manifest
|
||||
if entry.ContentType == ResourceContentType {
|
||||
|
||||
// get the resource root chunk key
|
||||
log.Trace("resource type", "key", manifestAddr, "hash", entry.Hash)
|
||||
// get the resource rootAddr
|
||||
log.Trace("resource type", "menifestAddr", manifestAddr, "hash", entry.Hash)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
rsrc, err := a.resource.Load(ctx, storage.Address(common.FromHex(entry.Hash)))
|
||||
rootAddr := storage.Address(common.FromHex(entry.Hash))
|
||||
rsrc, err := a.resource.Load(ctx, rootAddr)
|
||||
if err != nil {
|
||||
apiGetNotFound.Inc(1)
|
||||
status = http.StatusNotFound
|
||||
@ -364,7 +365,8 @@ func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string
|
||||
}
|
||||
|
||||
// use this key to retrieve the latest update
|
||||
rsrc, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, &mru.LookupParams{})
|
||||
params := mru.LookupLatest(rootAddr)
|
||||
rsrc, err = a.resource.Lookup(ctx, params)
|
||||
if err != nil {
|
||||
apiGetNotFound.Inc(1)
|
||||
status = http.StatusNotFound
|
||||
@ -374,10 +376,10 @@ func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string
|
||||
|
||||
// if it's multihash, we will transparently serve the content this multihash points to
|
||||
// \TODO this resolve is rather expensive all in all, review to see if it can be achieved cheaper
|
||||
if rsrc.Multihash {
|
||||
if rsrc.Multihash() {
|
||||
|
||||
// get the data of the update
|
||||
_, rsrcData, err := a.resource.GetContent(rsrc.NameHash().Hex())
|
||||
_, rsrcData, err := a.resource.GetContent(rootAddr)
|
||||
if err != nil {
|
||||
apiGetNotFound.Inc(1)
|
||||
status = http.StatusNotFound
|
||||
@ -888,66 +890,39 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver
|
||||
return addr, manifestEntryMap, nil
|
||||
}
|
||||
|
||||
// ResourceLookup Looks up mutable resource updates at specific periods and versions
|
||||
func (a *API) ResourceLookup(ctx context.Context, addr storage.Address, period uint32, version uint32, maxLookup *mru.LookupParams) (string, []byte, error) {
|
||||
// ResourceLookup finds mutable resource updates at specific periods and versions
|
||||
func (a *API) ResourceLookup(ctx context.Context, params *mru.LookupParams) (string, []byte, error) {
|
||||
var err error
|
||||
rsrc, err := a.resource.Load(ctx, addr)
|
||||
rsrc, err := a.resource.Load(ctx, params.RootAddr())
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if version != 0 {
|
||||
if period == 0 {
|
||||
return "", nil, mru.NewError(mru.ErrInvalidValue, "Period can't be 0")
|
||||
}
|
||||
_, err = a.resource.LookupVersion(ctx, rsrc.NameHash(), period, version, true, maxLookup)
|
||||
} else if period != 0 {
|
||||
_, err = a.resource.LookupHistorical(ctx, rsrc.NameHash(), period, true, maxLookup)
|
||||
} else {
|
||||
_, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, maxLookup)
|
||||
}
|
||||
_, err = a.resource.Lookup(ctx, params)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
var data []byte
|
||||
_, data, err = a.resource.GetContent(rsrc.NameHash().Hex())
|
||||
_, data, err = a.resource.GetContent(params.RootAddr())
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return rsrc.Name(), data, nil
|
||||
}
|
||||
|
||||
// ResourceCreate creates Resource and returns its key
|
||||
func (a *API) ResourceCreate(ctx context.Context, name string, frequency uint64) (storage.Address, error) {
|
||||
key, _, err := a.resource.New(ctx, name, frequency)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
// Create Mutable resource
|
||||
func (a *API) ResourceCreate(ctx context.Context, request *mru.Request) error {
|
||||
return a.resource.New(ctx, request)
|
||||
}
|
||||
|
||||
// ResourceUpdateMultihash updates a Mutable Resource and marks the update's content to be of multihash type, which will be recognized upon retrieval.
|
||||
// It will fail if the data is not a valid multihash.
|
||||
func (a *API) ResourceUpdateMultihash(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) {
|
||||
return a.resourceUpdate(ctx, name, data, true)
|
||||
// ResourceNewRequest creates a Request object to update a specific mutable resource
|
||||
func (a *API) ResourceNewRequest(ctx context.Context, rootAddr storage.Address) (*mru.Request, error) {
|
||||
return a.resource.NewUpdateRequest(ctx, rootAddr)
|
||||
}
|
||||
|
||||
// ResourceUpdate updates a Mutable Resource with arbitrary data.
|
||||
// Upon retrieval the update will be retrieved verbatim as bytes.
|
||||
func (a *API) ResourceUpdate(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) {
|
||||
return a.resourceUpdate(ctx, name, data, false)
|
||||
}
|
||||
|
||||
func (a *API) resourceUpdate(ctx context.Context, name string, data []byte, multihash bool) (storage.Address, uint32, uint32, error) {
|
||||
var addr storage.Address
|
||||
var err error
|
||||
if multihash {
|
||||
addr, err = a.resource.UpdateMultihash(ctx, name, data)
|
||||
} else {
|
||||
addr, err = a.resource.Update(ctx, name, data)
|
||||
}
|
||||
period, _ := a.resource.GetLastPeriod(name)
|
||||
version, _ := a.resource.GetVersion(name)
|
||||
return addr, period, version, err
|
||||
func (a *API) ResourceUpdate(ctx context.Context, request *mru.SignedResourceUpdate) (storage.Address, error) {
|
||||
return a.resource.Update(ctx, request)
|
||||
}
|
||||
|
||||
// ResourceHashSize returned the size of the digest produced by the Mutable Resource hashing function
|
||||
@ -955,11 +930,6 @@ func (a *API) ResourceHashSize() int {
|
||||
return a.resource.HashSize
|
||||
}
|
||||
|
||||
// ResourceIsValidated checks if the Mutable Resource has an active content validator.
|
||||
func (a *API) ResourceIsValidated() bool {
|
||||
return a.resource.IsValidated()
|
||||
}
|
||||
|
||||
// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the address of the metadata chunk.
|
||||
func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (storage.Address, error) {
|
||||
trie, err := loadManifest(ctx, a.fileStore, addr, nil)
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mru"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -562,3 +563,89 @@ func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error)
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// CreateResource creates a Mutable Resource with the given name and frequency, initializing it with the provided
|
||||
// data. Data is interpreted as multihash or not depending on the multihash parameter.
|
||||
// startTime=0 means "now"
|
||||
// Returns the resulting Mutable Resource manifest address that you can use to include in an ENS Resolver (setContent)
|
||||
// or reference future updates (Client.UpdateResource)
|
||||
func (c *Client) CreateResource(request *mru.Request) (string, error) {
|
||||
responseStream, err := c.updateResource(request)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer responseStream.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(responseStream)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var manifestAddress string
|
||||
if err = json.Unmarshal(body, &manifestAddress); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return manifestAddress, nil
|
||||
}
|
||||
|
||||
// UpdateResource allows you to set a new version of your content
|
||||
func (c *Client) UpdateResource(request *mru.Request) error {
|
||||
_, err := c.updateResource(request)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) updateResource(request *mru.Request) (io.ReadCloser, error) {
|
||||
body, err := request.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", c.Gateway+"/bzz-resource:/", bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
|
||||
}
|
||||
|
||||
// GetResource returns a byte stream with the raw content of the resource
|
||||
// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver
|
||||
// points to that address
|
||||
func (c *Client) GetResource(manifestAddressOrDomain string) (io.ReadCloser, error) {
|
||||
|
||||
res, err := http.Get(c.Gateway + "/bzz-resource:/" + manifestAddressOrDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Body, nil
|
||||
|
||||
}
|
||||
|
||||
// GetResourceMetadata returns a structure that describes the Mutable Resource
|
||||
// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver
|
||||
// points to that address
|
||||
func (c *Client) GetResourceMetadata(manifestAddressOrDomain string) (*mru.Request, error) {
|
||||
|
||||
responseStream, err := c.GetResource(manifestAddressOrDomain + "/meta")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer responseStream.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(responseStream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var metadata mru.Request
|
||||
if err := metadata.UnmarshalJSON(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metadata, nil
|
||||
}
|
||||
|
@ -25,8 +25,12 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||
"github.com/ethereum/go-ethereum/swarm/multihash"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mru"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
)
|
||||
|
||||
@ -354,3 +358,159 @@ func TestClientMultipartUpload(t *testing.T) {
|
||||
checkDownloadFile(file)
|
||||
}
|
||||
}
|
||||
|
||||
func newTestSigner() (*mru.GenericSigner, error) {
|
||||
privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mru.NewGenericSigner(privKey), nil
|
||||
}
|
||||
|
||||
// test the transparent resolving of multihash resource types with bzz:// scheme
|
||||
//
|
||||
// first upload data, and store the multihash to the resulting manifest in a resource update
|
||||
// retrieving the update with the multihash should return the manifest pointing directly to the data
|
||||
// and raw retrieve of that hash should return the data
|
||||
func TestClientCreateResourceMultihash(t *testing.T) {
|
||||
|
||||
signer, _ := newTestSigner()
|
||||
|
||||
srv := testutil.NewTestSwarmServer(t, serverFunc)
|
||||
client := NewClient(srv.URL)
|
||||
defer srv.Close()
|
||||
|
||||
// add the data our multihash aliased manifest will point to
|
||||
databytes := []byte("bar")
|
||||
|
||||
swarmHash, err := client.UploadRaw(bytes.NewReader(databytes), int64(len(databytes)), false)
|
||||
if err != nil {
|
||||
t.Fatalf("Error uploading raw test data: %s", err)
|
||||
}
|
||||
|
||||
s := common.FromHex(swarmHash)
|
||||
mh := multihash.ToMultihash(s)
|
||||
|
||||
// our mutable resource "name"
|
||||
resourceName := "foo.eth"
|
||||
|
||||
createRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
|
||||
Name: resourceName,
|
||||
Frequency: 13,
|
||||
StartTime: srv.GetCurrentTime(),
|
||||
Owner: signer.Address(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createRequest.SetData(mh, true)
|
||||
if err := createRequest.Sign(signer); err != nil {
|
||||
t.Fatalf("Error signing update: %s", err)
|
||||
}
|
||||
|
||||
resourceManifestHash, err := client.CreateResource(createRequest)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating resource: %s", err)
|
||||
}
|
||||
|
||||
correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e"
|
||||
if resourceManifestHash != correctManifestAddrHex {
|
||||
t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash)
|
||||
}
|
||||
|
||||
reader, err := client.GetResource(correctManifestAddrHex)
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving resource: %s", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
gotData, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(mh, gotData) {
|
||||
t.Fatalf("Expected: %v, got %v", mh, gotData)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestClientCreateUpdateResource will check that mutable resources can be created and updated via the HTTP client.
|
||||
func TestClientCreateUpdateResource(t *testing.T) {
|
||||
|
||||
signer, _ := newTestSigner()
|
||||
|
||||
srv := testutil.NewTestSwarmServer(t, serverFunc)
|
||||
client := NewClient(srv.URL)
|
||||
defer srv.Close()
|
||||
|
||||
// set raw data for the resource
|
||||
databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...")
|
||||
|
||||
// our mutable resource name
|
||||
resourceName := "El Quijote"
|
||||
|
||||
createRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
|
||||
Name: resourceName,
|
||||
Frequency: 13,
|
||||
StartTime: srv.GetCurrentTime(),
|
||||
Owner: signer.Address(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createRequest.SetData(databytes, false)
|
||||
if err := createRequest.Sign(signer); err != nil {
|
||||
t.Fatalf("Error signing update: %s", err)
|
||||
}
|
||||
|
||||
resourceManifestHash, err := client.CreateResource(createRequest)
|
||||
|
||||
correctManifestAddrHex := "cc7904c17b49f9679e2d8006fe25e87e3f5c2072c2b49cab50f15e544471b30a"
|
||||
if resourceManifestHash != correctManifestAddrHex {
|
||||
t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash)
|
||||
}
|
||||
|
||||
reader, err := client.GetResource(correctManifestAddrHex)
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving resource: %s", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
gotData, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(databytes, gotData) {
|
||||
t.Fatalf("Expected: %v, got %v", databytes, gotData)
|
||||
}
|
||||
|
||||
// define different data
|
||||
databytes = []byte("... no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero ...")
|
||||
|
||||
updateRequest, err := client.GetResourceMetadata(correctManifestAddrHex)
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving update request template: %s", err)
|
||||
}
|
||||
|
||||
updateRequest.SetData(databytes, false)
|
||||
if err := updateRequest.Sign(signer); err != nil {
|
||||
t.Fatalf("Error signing update: %s", err)
|
||||
}
|
||||
|
||||
if err = client.UpdateResource(updateRequest); err != nil {
|
||||
t.Fatalf("Error updating resource: %s", err)
|
||||
}
|
||||
|
||||
reader, err = client.GetResource(correctManifestAddrHex)
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving resource: %s", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
gotData, err = ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(databytes, gotData) {
|
||||
t.Fatalf("Expected: %v, got %v", databytes, gotData)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -38,7 +38,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
@ -101,9 +100,11 @@ func NewServer(api *api.API, corsString string) *Server {
|
||||
server.Handler = c.Handler(mux)
|
||||
return server
|
||||
}
|
||||
|
||||
func (s *Server) ListenAndServe(addr string) error {
|
||||
return http.ListenAndServe(addr, s)
|
||||
}
|
||||
|
||||
func (s *Server) HandleRootPaths(w http.ResponseWriter, r *Request) {
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
@ -133,6 +134,7 @@ func (s *Server) HandleRootPaths(w http.ResponseWriter, r *Request) {
|
||||
Respond(w, r, "Not Found", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) HandleBzz(w http.ResponseWriter, r *Request) {
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
@ -240,12 +242,6 @@ func (s *Server) WrapHandler(parseBzzUri bool, h func(http.ResponseWriter, *Requ
|
||||
// https://developer.mozilla.org/en/docs/Web-based_protocol_handlers
|
||||
// electron (chromium) api for registering bzz url scheme handlers:
|
||||
// https://github.com/atom/electron/blob/master/docs/api/protocol.md
|
||||
|
||||
// browser API for registering bzz url scheme handlers:
|
||||
// https://developer.mozilla.org/en/docs/Web-based_protocol_handlers
|
||||
// electron (chromium) api for registering bzz url scheme handlers:
|
||||
// https://github.com/atom/electron/blob/master/docs/api/protocol.md
|
||||
|
||||
type Server struct {
|
||||
http.Handler
|
||||
api *api.API
|
||||
@ -340,7 +336,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) {
|
||||
|
||||
var addr storage.Address
|
||||
if r.uri.Addr != "" && r.uri.Addr != "encrypt" {
|
||||
addr, err = s.api.Resolve(ctx, r.uri)
|
||||
addr, err = s.api.Resolve(r.Context(), r.uri)
|
||||
if err != nil {
|
||||
postFilesFail.Inc(1)
|
||||
Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusInternalServerError)
|
||||
@ -348,7 +344,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) {
|
||||
}
|
||||
log.Debug("resolved key", "ruid", r.ruid, "key", addr)
|
||||
} else {
|
||||
addr, err = s.api.NewManifest(ctx, toEncrypt)
|
||||
addr, err = s.api.NewManifest(r.Context(), toEncrypt)
|
||||
if err != nil {
|
||||
postFilesFail.Inc(1)
|
||||
Respond(w, r, err.Error(), http.StatusInternalServerError)
|
||||
@ -521,9 +517,8 @@ func resourcePostMode(path string) (isRaw bool, frequency uint64, err error) {
|
||||
// If the latter is used, a subsequent bzz:// GET call to the manifest of the resource will return
|
||||
// the page that the multihash is pointing to, as if it held a normal swarm content manifest
|
||||
//
|
||||
// The resource name will be verbatim what is passed as the address part of the url.
|
||||
// For example, if a POST is made to /bzz-resource:/foo.eth/raw/13 a new resource with frequency 13
|
||||
// and name "foo.eth" will be created
|
||||
// The POST request admits a JSON structure as defined in the mru package: `mru.updateRequestJSON`
|
||||
// The requests can be to a) create a resource, b) update a resource or c) both a+b: create a resource and set the initial content
|
||||
func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) {
|
||||
log.Debug("handle.post.resource", "ruid", r.ruid)
|
||||
|
||||
@ -535,33 +530,54 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) {
|
||||
defer sp.Finish()
|
||||
|
||||
var err error
|
||||
var addr storage.Address
|
||||
var name string
|
||||
var outdata []byte
|
||||
isRaw, frequency, err := resourcePostMode(r.uri.Path)
|
||||
|
||||
// Creation and update must send mru.updateRequestJSON JSON structure
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
Respond(w, r, err.Error(), http.StatusBadRequest)
|
||||
Respond(w, r, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var updateRequest mru.Request
|
||||
if err := updateRequest.UnmarshalJSON(body); err != nil { // decodes request JSON
|
||||
Respond(w, r, err.Error(), http.StatusBadRequest) //TODO: send different status response depending on error
|
||||
return
|
||||
}
|
||||
|
||||
// new mutable resource creation will always have a frequency field larger than 0
|
||||
if frequency > 0 {
|
||||
if updateRequest.IsUpdate() {
|
||||
// Verify that the signature is intact and that the signer is authorized
|
||||
// to update this resource
|
||||
// Check this early, to avoid creating a resource and then not being able to set its first update.
|
||||
if err = updateRequest.Verify(); err != nil {
|
||||
Respond(w, r, err.Error(), http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
name = r.uri.Addr
|
||||
|
||||
// the key is the content addressed root chunk holding mutable resource metadata information
|
||||
addr, err = s.api.ResourceCreate(ctx, name, frequency)
|
||||
if updateRequest.IsNew() {
|
||||
err = s.api.ResourceCreate(r.Context(), &updateRequest)
|
||||
if err != nil {
|
||||
code, err2 := s.translateResourceError(w, r, "resource creation fail", err)
|
||||
|
||||
Respond(w, r, err2.Error(), code)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if updateRequest.IsUpdate() {
|
||||
_, err = s.api.ResourceUpdate(r.Context(), &updateRequest.SignedResourceUpdate)
|
||||
if err != nil {
|
||||
Respond(w, r, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// at this point both possible operations (create, update or both) were successful
|
||||
// so in case it was a new resource, then create a manifest and send it over.
|
||||
|
||||
if updateRequest.IsNew() {
|
||||
// we create a manifest so we can retrieve the resource with bzz:// later
|
||||
// this manifest has a special "resource type" manifest, and its hash is the key of the mutable resource
|
||||
// root chunk
|
||||
m, err := s.api.NewResourceManifest(ctx, addr.Hex())
|
||||
// metadata chunk (rootAddr)
|
||||
m, err := s.api.NewResourceManifest(r.Context(), updateRequest.RootAddr().Hex())
|
||||
if err != nil {
|
||||
Respond(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
@ -571,85 +587,21 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) {
|
||||
// the client can access the root chunk key directly through its Hash member
|
||||
// the manifest key should be set as content in the resolver of the ENS name
|
||||
// \TODO update manifest key automatically in ENS
|
||||
outdata, err = json.Marshal(m)
|
||||
outdata, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
Respond(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// to update the resource through http we need to retrieve the key for the mutable resource root chunk
|
||||
// that means that we retrieve the manifest and inspect its Hash member.
|
||||
manifestAddr := r.uri.Address()
|
||||
if manifestAddr == nil {
|
||||
manifestAddr, err = s.api.Resolve(ctx, r.uri)
|
||||
if err != nil {
|
||||
getFail.Inc(1)
|
||||
Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
w.Header().Set("Cache-Control", "max-age=2147483648")
|
||||
}
|
||||
|
||||
// get the root chunk key from the manifest
|
||||
addr, err = s.api.ResolveResourceManifest(ctx, manifestAddr)
|
||||
if err != nil {
|
||||
getFail.Inc(1)
|
||||
Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug("handle.post.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunkkey", addr)
|
||||
|
||||
name, _, err = s.api.ResourceLookup(ctx, addr, 0, 0, &mru.LookupParams{})
|
||||
if err != nil {
|
||||
Respond(w, r, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Creation and update must send data aswell. This data constitutes the update data itself.
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
Respond(w, r, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Multihash will be passed as hex-encoded data, so we need to parse this to bytes
|
||||
if isRaw {
|
||||
_, _, _, err = s.api.ResourceUpdate(ctx, name, data)
|
||||
if err != nil {
|
||||
Respond(w, r, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
bytesdata, err := hexutil.Decode(string(data))
|
||||
if err != nil {
|
||||
Respond(w, r, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
_, _, _, err = s.api.ResourceUpdateMultihash(ctx, name, bytesdata)
|
||||
if err != nil {
|
||||
Respond(w, r, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If we have data to return, write this now
|
||||
// \TODO there should always be data to return here
|
||||
if len(outdata) > 0 {
|
||||
w.Header().Add("Content-type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprint(w, string(outdata))
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Header().Add("Content-type", "application/json")
|
||||
}
|
||||
|
||||
// Retrieve mutable resource updates:
|
||||
// bzz-resource://<id> - get latest update
|
||||
// bzz-resource://<id>/<n> - get latest update on period n
|
||||
// bzz-resource://<id>/<n>/<m> - get update version m of period n
|
||||
// bzz-resource://<id>/meta - get metadata and next version information
|
||||
// <id> = ens name or hash
|
||||
// TODO: Enable pass maxPeriod parameter
|
||||
func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) {
|
||||
@ -669,31 +621,51 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) {
|
||||
w.Header().Set("Cache-Control", "max-age=2147483648")
|
||||
}
|
||||
|
||||
// get the root chunk key from the manifest
|
||||
key, err := s.api.ResolveResourceManifest(r.Context(), manifestAddr)
|
||||
// get the root chunk rootAddr from the manifest
|
||||
rootAddr, err := s.api.ResolveResourceManifest(r.Context(), manifestAddr)
|
||||
if err != nil {
|
||||
getFail.Inc(1)
|
||||
Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug("handle.get.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunk key", key)
|
||||
log.Debug("handle.get.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunk addr", rootAddr)
|
||||
|
||||
// determine if the query specifies period and version
|
||||
// determine if the query specifies period and version or it is a metadata query
|
||||
var params []string
|
||||
if len(r.uri.Path) > 0 {
|
||||
if r.uri.Path == "meta" {
|
||||
unsignedUpdateRequest, err := s.api.ResourceNewRequest(r.Context(), rootAddr)
|
||||
if err != nil {
|
||||
getFail.Inc(1)
|
||||
Respond(w, r, fmt.Sprintf("cannot retrieve resource metadata for rootAddr=%s: %s", rootAddr.Hex(), err), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
rawResponse, err := unsignedUpdateRequest.MarshalJSON()
|
||||
if err != nil {
|
||||
Respond(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Add("Content-type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprint(w, string(rawResponse))
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
params = strings.Split(r.uri.Path, "/")
|
||||
|
||||
}
|
||||
var name string
|
||||
var period uint64
|
||||
var version uint64
|
||||
var data []byte
|
||||
now := time.Now()
|
||||
|
||||
switch len(params) {
|
||||
case 0: // latest only
|
||||
name, data, err = s.api.ResourceLookup(r.Context(), key, 0, 0, nil)
|
||||
name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatest(rootAddr))
|
||||
case 2: // specific period and version
|
||||
var version uint64
|
||||
var period uint64
|
||||
version, err = strconv.ParseUint(params[1], 10, 32)
|
||||
if err != nil {
|
||||
break
|
||||
@ -702,13 +674,14 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) {
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
name, data, err = s.api.ResourceLookup(r.Context(), key, uint32(period), uint32(version), nil)
|
||||
name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupVersion(rootAddr, uint32(period), uint32(version)))
|
||||
case 1: // last version of specific period
|
||||
var period uint64
|
||||
period, err = strconv.ParseUint(params[0], 10, 32)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
name, data, err = s.api.ResourceLookup(r.Context(), key, uint32(period), uint32(version), nil)
|
||||
name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatestVersionInPeriod(rootAddr, uint32(period)))
|
||||
default: // bogus
|
||||
err = mru.NewError(storage.ErrInvalidValue, "invalid mutable resource request")
|
||||
}
|
||||
@ -766,7 +739,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) {
|
||||
var err error
|
||||
addr := r.uri.Address()
|
||||
if addr == nil {
|
||||
addr, err = s.api.Resolve(ctx, r.uri)
|
||||
addr, err = s.api.Resolve(r.Context(), r.uri)
|
||||
if err != nil {
|
||||
getFail.Inc(1)
|
||||
Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound)
|
||||
@ -781,7 +754,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) {
|
||||
// if path is set, interpret <key> as a manifest and return the
|
||||
// raw entry at the given path
|
||||
if r.uri.Path != "" {
|
||||
walker, err := s.api.NewManifestWalker(ctx, addr, nil)
|
||||
walker, err := s.api.NewManifestWalker(r.Context(), addr, nil)
|
||||
if err != nil {
|
||||
getFail.Inc(1)
|
||||
Respond(w, r, fmt.Sprintf("%s is not a manifest", addr), http.StatusBadRequest)
|
||||
@ -875,7 +848,7 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) {
|
||||
return
|
||||
}
|
||||
|
||||
addr, err := s.api.Resolve(ctx, r.uri)
|
||||
addr, err := s.api.Resolve(r.Context(), r.uri)
|
||||
if err != nil {
|
||||
getListFail.Inc(1)
|
||||
Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound)
|
||||
@ -935,7 +908,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) {
|
||||
manifestAddr := r.uri.Address()
|
||||
|
||||
if manifestAddr == nil {
|
||||
manifestAddr, err = s.api.Resolve(ctx, r.uri)
|
||||
manifestAddr, err = s.api.Resolve(r.Context(), r.uri)
|
||||
if err != nil {
|
||||
getFileFail.Inc(1)
|
||||
Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound)
|
||||
@ -947,8 +920,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) {
|
||||
}
|
||||
|
||||
log.Debug("handle.get.file: resolved", "ruid", r.ruid, "key", manifestAddr)
|
||||
|
||||
reader, contentType, status, contentKey, err := s.api.Get(ctx, manifestAddr, r.uri.Path)
|
||||
reader, contentType, status, contentKey, err := s.api.Get(r.Context(), manifestAddr, r.uri.Path)
|
||||
|
||||
etag := common.Bytes2Hex(contentKey)
|
||||
noneMatchEtag := r.Header.Get("If-None-Match")
|
||||
|
@ -34,12 +34,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"github.com/ethereum/go-ethereum/swarm/multihash"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mru"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
)
|
||||
|
||||
@ -94,6 +95,14 @@ func serverFunc(api *api.API) testutil.TestServer {
|
||||
return NewServer(api, "")
|
||||
}
|
||||
|
||||
func newTestSigner() (*mru.GenericSigner, error) {
|
||||
privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mru.NewGenericSigner(privKey), nil
|
||||
}
|
||||
|
||||
// test the transparent resolving of multihash resource types with bzz:// scheme
|
||||
//
|
||||
// first upload data, and store the multihash to the resulting manifest in a resource update
|
||||
@ -101,6 +110,8 @@ func serverFunc(api *api.API) testutil.TestServer {
|
||||
// and raw retrieve of that hash should return the data
|
||||
func TestBzzResourceMultihash(t *testing.T) {
|
||||
|
||||
signer, _ := newTestSigner()
|
||||
|
||||
srv := testutil.NewTestSwarmServer(t, serverFunc)
|
||||
defer srv.Close()
|
||||
|
||||
@ -123,15 +134,35 @@ func TestBzzResourceMultihash(t *testing.T) {
|
||||
s := common.FromHex(string(b))
|
||||
mh := multihash.ToMultihash(s)
|
||||
|
||||
mhHex := hexutil.Encode(mh)
|
||||
log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
|
||||
|
||||
// our mutable resource "name"
|
||||
keybytes := "foo.eth"
|
||||
|
||||
updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
|
||||
Name: keybytes,
|
||||
Frequency: 13,
|
||||
StartTime: srv.GetCurrentTime(),
|
||||
Owner: signer.Address(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
updateRequest.SetData(mh, true)
|
||||
|
||||
if err := updateRequest.Sign(signer); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
|
||||
|
||||
body, err := updateRequest.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// create the multihash update
|
||||
url = fmt.Sprintf("%s/bzz-resource:/%s/13", srv.URL, keybytes)
|
||||
resp, err = http.Post(url, "application/octet-stream", bytes.NewReader([]byte(mhHex)))
|
||||
url = fmt.Sprintf("%s/bzz-resource:/", srv.URL)
|
||||
resp, err = http.Post(url, "application/json", bytes.NewReader(body))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -149,9 +180,9 @@ func TestBzzResourceMultihash(t *testing.T) {
|
||||
t.Fatalf("data %s could not be unmarshaled: %v", b, err)
|
||||
}
|
||||
|
||||
correctManifestAddrHex := "d689648fb9e00ddc7ebcf474112d5881c5bf7dbc6e394681b1d224b11b59b5e0"
|
||||
correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e"
|
||||
if rsrcResp.Hex() != correctManifestAddrHex {
|
||||
t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp)
|
||||
t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
|
||||
}
|
||||
|
||||
// get bzz manifest transparent resource resolve
|
||||
@ -176,6 +207,8 @@ func TestBzzResourceMultihash(t *testing.T) {
|
||||
// Test resource updates using the raw update methods
|
||||
func TestBzzResource(t *testing.T) {
|
||||
srv := testutil.NewTestSwarmServer(t, serverFunc)
|
||||
signer, _ := newTestSigner()
|
||||
|
||||
defer srv.Close()
|
||||
|
||||
// our mutable resource "name"
|
||||
@ -188,9 +221,29 @@ func TestBzzResource(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
|
||||
Name: keybytes,
|
||||
Frequency: 13,
|
||||
StartTime: srv.GetCurrentTime(),
|
||||
Owner: signer.Address(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
updateRequest.SetData(databytes, false)
|
||||
|
||||
if err := updateRequest.Sign(signer); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
body, err := updateRequest.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// creates resource and sets update 1
|
||||
url := fmt.Sprintf("%s/bzz-resource:/%s/raw/13", srv.URL, []byte(keybytes))
|
||||
resp, err := http.Post(url, "application/octet-stream", bytes.NewReader(databytes))
|
||||
url := fmt.Sprintf("%s/bzz-resource:/", srv.URL)
|
||||
resp, err := http.Post(url, "application/json", bytes.NewReader(body))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -208,7 +261,7 @@ func TestBzzResource(t *testing.T) {
|
||||
t.Fatalf("data %s could not be unmarshaled: %v", b, err)
|
||||
}
|
||||
|
||||
correctManifestAddrHex := "d689648fb9e00ddc7ebcf474112d5881c5bf7dbc6e394681b1d224b11b59b5e0"
|
||||
correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e"
|
||||
if rsrcResp.Hex() != correctManifestAddrHex {
|
||||
t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
|
||||
}
|
||||
@ -235,8 +288,7 @@ func TestBzzResource(t *testing.T) {
|
||||
if len(manifest.Entries) != 1 {
|
||||
t.Fatalf("Manifest has %d entries", len(manifest.Entries))
|
||||
}
|
||||
|
||||
correctRootKeyHex := "f667277e004e8486c7a3631fd226802430e84e9a81b6085d31f512a591ae0065"
|
||||
correctRootKeyHex := "68f7ba07ac8867a4c841a4d4320e3cdc549df23702dc7285fcb6acf65df48562"
|
||||
if manifest.Entries[0].Hash != correctRootKeyHex {
|
||||
t.Fatalf("Expected manifest path '%s', got '%s'", correctRootKeyHex, manifest.Entries[0].Hash)
|
||||
}
|
||||
@ -262,6 +314,11 @@ func TestBzzResource(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNotFound {
|
||||
t.Fatalf("Expected get non-existent resource to fail with StatusNotFound (404), got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
|
||||
// get latest update (1.1) through resource directly
|
||||
@ -285,9 +342,36 @@ func TestBzzResource(t *testing.T) {
|
||||
|
||||
// update 2
|
||||
log.Info("update 2")
|
||||
url = fmt.Sprintf("%s/bzz-resource:/%s/raw", srv.URL, correctManifestAddrHex)
|
||||
|
||||
// 1.- get metadata about this resource
|
||||
url = fmt.Sprintf("%s/bzz-resource:/%s/", srv.URL, correctManifestAddrHex)
|
||||
resp, err = http.Get(url + "meta")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("Get resource metadata returned %s", resp.Status)
|
||||
}
|
||||
b, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
updateRequest = &mru.Request{}
|
||||
if err = updateRequest.UnmarshalJSON(b); err != nil {
|
||||
t.Fatalf("Error decoding resource metadata: %s", err)
|
||||
}
|
||||
data := []byte("foo")
|
||||
resp, err = http.Post(url, "application/octet-stream", bytes.NewReader(data))
|
||||
updateRequest.SetData(data, false)
|
||||
if err = updateRequest.Sign(signer); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
body, err = updateRequest.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err = http.Post(url, "application/json", bytes.NewReader(body))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
366
swarm/bmt/bmt.go
366
swarm/bmt/bmt.go
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt provides a binary merkle tree implementation
|
||||
// Package bmt provides a binary merkle tree implementation used for swarm chunk hash
|
||||
package bmt
|
||||
|
||||
import (
|
||||
@ -26,16 +26,16 @@ import (
|
||||
)
|
||||
|
||||
/*
|
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
|
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size.
|
||||
It is defined as the root hash of the binary merkle tree built over fixed size segments
|
||||
of the underlying chunk using any base hash function (e.g keccak 256 SHA3).
|
||||
Chunk with data shorter than the fixed size are hashed as if they had zero padding
|
||||
of the underlying chunk using any base hash function (e.g., keccak 256 SHA3).
|
||||
Chunks with data shorter than the fixed size are hashed as if they had zero padding.
|
||||
|
||||
BMT hash is used as the chunk hash function in swarm which in turn is the basis for the
|
||||
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
|
||||
|
||||
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
|
||||
segment is a substring of a chunk starting at a particular offset
|
||||
segment is a substring of a chunk starting at a particular offset.
|
||||
The size of the underlying segments is fixed to the size of the base hash (called the resolution
|
||||
of the BMT hash), Using Keccak256 SHA3 hash is 32 bytes, the EVM word size to optimize for on-chain BMT verification
|
||||
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
|
||||
@ -46,11 +46,12 @@ Two implementations are provided:
|
||||
that is simple to understand
|
||||
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
|
||||
control structure to coordinate the concurrent routines
|
||||
It implements the following interfaces
|
||||
* standard golang hash.Hash
|
||||
* SwarmHash
|
||||
* io.Writer
|
||||
* TODO: SegmentWriter
|
||||
|
||||
BMT Hasher implements the following interfaces
|
||||
* standard golang hash.Hash - synchronous, reusable
|
||||
* SwarmHash - SumWithSpan provided
|
||||
* io.Writer - synchronous left-to-right datawriter
|
||||
* AsyncWriter - concurrent section writes and asynchronous Sum call
|
||||
*/
|
||||
|
||||
const (
|
||||
@ -69,7 +70,7 @@ type BaseHasherFunc func() hash.Hash
|
||||
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
|
||||
// - implements the hash.Hash interface
|
||||
// - reuses a pool of trees for amortised memory allocation and resource control
|
||||
// - supports order-agnostic concurrent segment writes (TODO:)
|
||||
// - supports order-agnostic concurrent segment writes and section (double segment) writes
|
||||
// as well as sequential read and write
|
||||
// - the same hasher instance must not be called concurrently on more than one chunk
|
||||
// - the same hasher instance is synchronously reuseable
|
||||
@ -81,8 +82,7 @@ type Hasher struct {
|
||||
bmt *tree // prebuilt BMT resource for flowcontrol and proofs
|
||||
}
|
||||
|
||||
// New creates a reusable Hasher
|
||||
// implements the hash.Hash interface
|
||||
// New creates a reusable BMT Hasher that
|
||||
// pulls a new tree from a resource pool for hashing each chunk
|
||||
func New(p *TreePool) *Hasher {
|
||||
return &Hasher{
|
||||
@ -90,9 +90,9 @@ func New(p *TreePool) *Hasher {
|
||||
}
|
||||
}
|
||||
|
||||
// TreePool provides a pool of trees used as resources by Hasher
|
||||
// a tree popped from the pool is guaranteed to have clean state
|
||||
// for hashing a new chunk
|
||||
// TreePool provides a pool of trees used as resources by the BMT Hasher.
|
||||
// A tree popped from the pool is guaranteed to have a clean state ready
|
||||
// for hashing a new chunk.
|
||||
type TreePool struct {
|
||||
lock sync.Mutex
|
||||
c chan *tree // the channel to obtain a resource from the pool
|
||||
@ -101,7 +101,7 @@ type TreePool struct {
|
||||
SegmentCount int // the number of segments on the base level of the BMT
|
||||
Capacity int // pool capacity, controls concurrency
|
||||
Depth int // depth of the bmt trees = int(log2(segmentCount))+1
|
||||
Datalength int // the total length of the data (count * size)
|
||||
Size int // the total length of the data (count * size)
|
||||
count int // current count of (ever) allocated resources
|
||||
zerohashes [][]byte // lookup table for predictable padding subtrees for all levels
|
||||
}
|
||||
@ -112,12 +112,12 @@ func NewTreePool(hasher BaseHasherFunc, segmentCount, capacity int) *TreePool {
|
||||
// initialises the zerohashes lookup table
|
||||
depth := calculateDepthFor(segmentCount)
|
||||
segmentSize := hasher().Size()
|
||||
zerohashes := make([][]byte, depth)
|
||||
zerohashes := make([][]byte, depth+1)
|
||||
zeros := make([]byte, segmentSize)
|
||||
zerohashes[0] = zeros
|
||||
h := hasher()
|
||||
for i := 1; i < depth; i++ {
|
||||
zeros = doHash(h, nil, zeros, zeros)
|
||||
for i := 1; i < depth+1; i++ {
|
||||
zeros = doSum(h, nil, zeros, zeros)
|
||||
zerohashes[i] = zeros
|
||||
}
|
||||
return &TreePool{
|
||||
@ -126,7 +126,7 @@ func NewTreePool(hasher BaseHasherFunc, segmentCount, capacity int) *TreePool {
|
||||
SegmentSize: segmentSize,
|
||||
SegmentCount: segmentCount,
|
||||
Capacity: capacity,
|
||||
Datalength: segmentCount * segmentSize,
|
||||
Size: segmentCount * segmentSize,
|
||||
Depth: depth,
|
||||
zerohashes: zerohashes,
|
||||
}
|
||||
@ -155,7 +155,7 @@ func (p *TreePool) reserve() *tree {
|
||||
select {
|
||||
case t = <-p.c:
|
||||
default:
|
||||
t = newTree(p.SegmentSize, p.Depth)
|
||||
t = newTree(p.SegmentSize, p.Depth, p.hasher)
|
||||
p.count++
|
||||
}
|
||||
return t
|
||||
@ -173,29 +173,28 @@ func (p *TreePool) release(t *tree) {
|
||||
// the tree is 'locked' while not in the pool
|
||||
type tree struct {
|
||||
leaves []*node // leaf nodes of the tree, other nodes accessible via parent links
|
||||
cur int // index of rightmost currently open segment
|
||||
cursor int // index of rightmost currently open segment
|
||||
offset int // offset (cursor position) within currently open segment
|
||||
segment []byte // the rightmost open segment (not complete)
|
||||
section []byte // the rightmost open section (double segment)
|
||||
depth int // number of levels
|
||||
result chan []byte // result channel
|
||||
hash []byte // to record the result
|
||||
span []byte // The span of the data subsumed under the chunk
|
||||
}
|
||||
|
||||
// node is a reuseable segment hasher representing a node in a BMT
|
||||
type node struct {
|
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
parent *node // pointer to parent node in the BMT
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte // this is where the content segment is set
|
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
parent *node // pointer to parent node in the BMT
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte // this is where the two children sections are written
|
||||
hasher hash.Hash // preconstructed hasher on nodes
|
||||
}
|
||||
|
||||
// newNode constructs a segment hasher node in the BMT (used by newTree)
|
||||
func newNode(index int, parent *node) *node {
|
||||
func newNode(index int, parent *node, hasher hash.Hash) *node {
|
||||
return &node{
|
||||
parent: parent,
|
||||
isLeft: index%2 == 0,
|
||||
hasher: hasher,
|
||||
}
|
||||
}
|
||||
|
||||
@ -253,16 +252,21 @@ func (t *tree) draw(hash []byte) string {
|
||||
|
||||
// newTree initialises a tree by building up the nodes of a BMT
|
||||
// - segment size is stipulated to be the size of the hash
|
||||
func newTree(segmentSize, depth int) *tree {
|
||||
n := newNode(0, nil)
|
||||
func newTree(segmentSize, depth int, hashfunc func() hash.Hash) *tree {
|
||||
n := newNode(0, nil, hashfunc())
|
||||
prevlevel := []*node{n}
|
||||
// iterate over levels and creates 2^(depth-level) nodes
|
||||
// the 0 level is on double segment sections so we start at depth - 2 since
|
||||
count := 2
|
||||
for level := depth - 2; level >= 0; level-- {
|
||||
nodes := make([]*node, count)
|
||||
for i := 0; i < count; i++ {
|
||||
parent := prevlevel[i/2]
|
||||
nodes[i] = newNode(i, parent)
|
||||
var hasher hash.Hash
|
||||
if level == 0 {
|
||||
hasher = hashfunc()
|
||||
}
|
||||
nodes[i] = newNode(i, parent, hasher)
|
||||
}
|
||||
prevlevel = nodes
|
||||
count *= 2
|
||||
@ -270,13 +274,12 @@ func newTree(segmentSize, depth int) *tree {
|
||||
// the datanode level is the nodes on the last level
|
||||
return &tree{
|
||||
leaves: prevlevel,
|
||||
result: make(chan []byte, 1),
|
||||
segment: make([]byte, segmentSize),
|
||||
result: make(chan []byte),
|
||||
section: make([]byte, 2*segmentSize),
|
||||
}
|
||||
}
|
||||
|
||||
// methods needed by hash.Hash
|
||||
// methods needed to implement hash.Hash
|
||||
|
||||
// Size returns the size
|
||||
func (h *Hasher) Size() int {
|
||||
@ -285,63 +288,40 @@ func (h *Hasher) Size() int {
|
||||
|
||||
// BlockSize returns the block size
|
||||
func (h *Hasher) BlockSize() int {
|
||||
return h.pool.SegmentSize
|
||||
return 2 * h.pool.SegmentSize
|
||||
}
|
||||
|
||||
// Hash hashes the data and the span using the bmt hasher
|
||||
func Hash(h *Hasher, span, data []byte) []byte {
|
||||
h.ResetWithLength(span)
|
||||
h.Write(data)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// Datalength returns the maximum data size that is hashed by the hasher =
|
||||
// segment count times segment size
|
||||
func (h *Hasher) DataLength() int {
|
||||
return h.pool.Datalength
|
||||
}
|
||||
|
||||
// Sum returns the hash of the buffer
|
||||
// Sum returns the BMT root hash of the buffer
|
||||
// using Sum presupposes sequential synchronous writes (io.Writer interface)
|
||||
// hash.Hash interface Sum method appends the byte slice to the underlying
|
||||
// data before it calculates and returns the hash of the chunk
|
||||
// caller must make sure Sum is not called concurrently with Write, writeSection
|
||||
// and WriteSegment (TODO:)
|
||||
func (h *Hasher) Sum(b []byte) (r []byte) {
|
||||
return h.sum(b, true, true)
|
||||
}
|
||||
|
||||
// sum implements Sum taking parameters
|
||||
// * if the tree is released right away
|
||||
// * if sequential write is used (can read sections)
|
||||
func (h *Hasher) sum(b []byte, release, section bool) (r []byte) {
|
||||
t := h.bmt
|
||||
bh := h.pool.hasher()
|
||||
go h.writeSection(t.cur, t.section, true)
|
||||
bmtHash := <-t.result
|
||||
func (h *Hasher) Sum(b []byte) (s []byte) {
|
||||
t := h.getTree()
|
||||
// write the last section with final flag set to true
|
||||
go h.writeSection(t.cursor, t.section, true, true)
|
||||
// wait for the result
|
||||
s = <-t.result
|
||||
span := t.span
|
||||
// fmt.Println(t.draw(bmtHash))
|
||||
if release {
|
||||
h.releaseTree()
|
||||
}
|
||||
// release the tree resource back to the pool
|
||||
h.releaseTree()
|
||||
// b + sha3(span + BMT(pure_chunk))
|
||||
if span == nil {
|
||||
return append(b, bmtHash...)
|
||||
if len(span) == 0 {
|
||||
return append(b, s...)
|
||||
}
|
||||
return doHash(bh, b, span, bmtHash)
|
||||
return doSum(h.pool.hasher(), b, span, s)
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
// methods needed to implement the SwarmHash and the io.Writer interfaces
|
||||
|
||||
// Hasher implements the io.Writer interface
|
||||
|
||||
// Write fills the buffer to hash,
|
||||
// with every full segment calls writeSection
|
||||
// Write calls sequentially add to the buffer to be hashed,
|
||||
// with every full segment calls writeSection in a go routine
|
||||
func (h *Hasher) Write(b []byte) (int, error) {
|
||||
l := len(b)
|
||||
if l <= 0 {
|
||||
if l == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
t := h.bmt
|
||||
t := h.getTree()
|
||||
secsize := 2 * h.pool.SegmentSize
|
||||
// calculate length of missing bit to complete current open section
|
||||
smax := secsize - t.offset
|
||||
@ -359,20 +339,21 @@ func (h *Hasher) Write(b []byte) (int, error) {
|
||||
return l, nil
|
||||
}
|
||||
} else {
|
||||
if t.cur == h.pool.SegmentCount*2 {
|
||||
// if end of a section
|
||||
if t.cursor == h.pool.SegmentCount*2 {
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
// read full segments and the last possibly partial segment from the input buffer
|
||||
// read full sections and the last possibly partial section from the input buffer
|
||||
for smax < l {
|
||||
// section complete; push to tree asynchronously
|
||||
go h.writeSection(t.cur, t.section, false)
|
||||
go h.writeSection(t.cursor, t.section, true, false)
|
||||
// reset section
|
||||
t.section = make([]byte, secsize)
|
||||
// copy from imput buffer at smax to right half of section
|
||||
// copy from input buffer at smax to right half of section
|
||||
copy(t.section, b[smax:])
|
||||
// advance cursor
|
||||
t.cur++
|
||||
t.cursor++
|
||||
// smax here represents successive offsets in the input buffer
|
||||
smax += secsize
|
||||
}
|
||||
@ -382,83 +363,225 @@ func (h *Hasher) Write(b []byte) (int, error) {
|
||||
|
||||
// Reset needs to be called before writing to the hasher
|
||||
func (h *Hasher) Reset() {
|
||||
h.getTree()
|
||||
h.releaseTree()
|
||||
}
|
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
// methods needed to implement the SwarmHash interface
|
||||
|
||||
// ResetWithLength needs to be called before writing to the hasher
|
||||
// the argument is supposed to be the byte slice binary representation of
|
||||
// the length of the data subsumed under the hash, i.e., span
|
||||
func (h *Hasher) ResetWithLength(span []byte) {
|
||||
h.Reset()
|
||||
h.bmt.span = span
|
||||
h.getTree().span = span
|
||||
}
|
||||
|
||||
// releaseTree gives back the Tree to the pool whereby it unlocks
|
||||
// it resets tree, segment and index
|
||||
func (h *Hasher) releaseTree() {
|
||||
t := h.bmt
|
||||
if t != nil {
|
||||
t.cur = 0
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
h.bmt = nil
|
||||
go func() {
|
||||
t.cursor = 0
|
||||
t.offset = 0
|
||||
t.span = nil
|
||||
t.hash = nil
|
||||
h.bmt = nil
|
||||
t.section = make([]byte, h.pool.SegmentSize*2)
|
||||
t.segment = make([]byte, h.pool.SegmentSize)
|
||||
select {
|
||||
case <-t.result:
|
||||
default:
|
||||
}
|
||||
h.pool.release(t)
|
||||
}()
|
||||
}
|
||||
|
||||
// NewAsyncWriter extends Hasher with an interface for concurrent segment/section writes
|
||||
func (h *Hasher) NewAsyncWriter(double bool) *AsyncHasher {
|
||||
secsize := h.pool.SegmentSize
|
||||
if double {
|
||||
secsize *= 2
|
||||
}
|
||||
write := func(i int, section []byte, final bool) {
|
||||
h.writeSection(i, section, double, final)
|
||||
}
|
||||
return &AsyncHasher{
|
||||
Hasher: h,
|
||||
double: double,
|
||||
secsize: secsize,
|
||||
write: write,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: writeSegment writes the ith segment into the BMT tree
|
||||
// func (h *Hasher) writeSegment(i int, s []byte) {
|
||||
// go h.run(h.bmt.leaves[i/2], h.pool.hasher(), i%2 == 0, s)
|
||||
// }
|
||||
// SectionWriter is an asynchronous segment/section writer interface
|
||||
type SectionWriter interface {
|
||||
Reset() // standard init to be called before reuse
|
||||
Write(index int, data []byte) // write into section of index
|
||||
Sum(b []byte, length int, span []byte) []byte // returns the hash of the buffer
|
||||
SectionSize() int // size of the async section unit to use
|
||||
}
|
||||
|
||||
// AsyncHasher extends BMT Hasher with an asynchronous segment/section writer interface
|
||||
// AsyncHasher is unsafe and does not check indexes and section data lengths
|
||||
// it must be used with the right indexes and length and the right number of sections
|
||||
//
|
||||
// behaviour is undefined if
|
||||
// * non-final sections are shorter or longer than secsize
|
||||
// * if final section does not match length
|
||||
// * write a section with index that is higher than length/secsize
|
||||
// * set length in Sum call when length/secsize < maxsec
|
||||
//
|
||||
// * if Sum() is not called on a Hasher that is fully written
|
||||
// a process will block, can be terminated with Reset
|
||||
// * it will not leak processes if not all sections are written but it blocks
|
||||
// and keeps the resource which can be released calling Reset()
|
||||
type AsyncHasher struct {
|
||||
*Hasher // extends the Hasher
|
||||
mtx sync.Mutex // to lock the cursor access
|
||||
double bool // whether to use double segments (call Hasher.writeSection)
|
||||
secsize int // size of base section (size of hash or double)
|
||||
write func(i int, section []byte, final bool)
|
||||
}
|
||||
|
||||
// methods needed to implement AsyncWriter
|
||||
|
||||
// SectionSize returns the size of async section unit to use
|
||||
func (sw *AsyncHasher) SectionSize() int {
|
||||
return sw.secsize
|
||||
}
|
||||
|
||||
// Write writes the i-th section of the BMT base
|
||||
// this function can and is meant to be called concurrently
|
||||
// it sets max segment threadsafely
|
||||
func (sw *AsyncHasher) Write(i int, section []byte) {
|
||||
sw.mtx.Lock()
|
||||
defer sw.mtx.Unlock()
|
||||
t := sw.getTree()
|
||||
// cursor keeps track of the rightmost section written so far
|
||||
// if index is lower than cursor then just write non-final section as is
|
||||
if i < t.cursor {
|
||||
// if index is not the rightmost, safe to write section
|
||||
go sw.write(i, section, false)
|
||||
return
|
||||
}
|
||||
// if there is a previous rightmost section safe to write section
|
||||
if t.offset > 0 {
|
||||
if i == t.cursor {
|
||||
// i==cursor implies cursor was set by Hash call so we can write section as final one
|
||||
// since it can be shorter, first we copy it to the padded buffer
|
||||
t.section = make([]byte, sw.secsize)
|
||||
copy(t.section, section)
|
||||
go sw.write(i, t.section, true)
|
||||
return
|
||||
}
|
||||
// the rightmost section just changed, so we write the previous one as non-final
|
||||
go sw.write(t.cursor, t.section, false)
|
||||
}
|
||||
// set i as the index of the righmost section written so far
|
||||
// set t.offset to cursor*secsize+1
|
||||
t.cursor = i
|
||||
t.offset = i*sw.secsize + 1
|
||||
t.section = make([]byte, sw.secsize)
|
||||
copy(t.section, section)
|
||||
}
|
||||
|
||||
// Sum can be called any time once the length and the span is known
|
||||
// potentially even before all segments have been written
|
||||
// in such cases Sum will block until all segments are present and
|
||||
// the hash for the length can be calculated.
|
||||
//
|
||||
// b: digest is appended to b
|
||||
// length: known length of the input (unsafe; undefined if out of range)
|
||||
// meta: metadata to hash together with BMT root for the final digest
|
||||
// e.g., span for protection against existential forgery
|
||||
func (sw *AsyncHasher) Sum(b []byte, length int, meta []byte) (s []byte) {
|
||||
sw.mtx.Lock()
|
||||
t := sw.getTree()
|
||||
if length == 0 {
|
||||
sw.mtx.Unlock()
|
||||
s = sw.pool.zerohashes[sw.pool.Depth]
|
||||
} else {
|
||||
// for non-zero input the rightmost section is written to the tree asynchronously
|
||||
// if the actual last section has been written (t.cursor == length/t.secsize)
|
||||
maxsec := (length - 1) / sw.secsize
|
||||
if t.offset > 0 {
|
||||
go sw.write(t.cursor, t.section, maxsec == t.cursor)
|
||||
}
|
||||
// set cursor to maxsec so final section is written when it arrives
|
||||
t.cursor = maxsec
|
||||
t.offset = length
|
||||
result := t.result
|
||||
sw.mtx.Unlock()
|
||||
// wait for the result or reset
|
||||
s = <-result
|
||||
}
|
||||
// relesase the tree back to the pool
|
||||
sw.releaseTree()
|
||||
// if no meta is given just append digest to b
|
||||
if len(meta) == 0 {
|
||||
return append(b, s...)
|
||||
}
|
||||
// hash together meta and BMT root hash using the pools
|
||||
return doSum(sw.pool.hasher(), b, meta, s)
|
||||
}
|
||||
|
||||
// writeSection writes the hash of i-th section into level 1 node of the BMT tree
|
||||
func (h *Hasher) writeSection(i int, section []byte, final bool) {
|
||||
func (h *Hasher) writeSection(i int, section []byte, double bool, final bool) {
|
||||
// select the leaf node for the section
|
||||
n := h.bmt.leaves[i]
|
||||
isLeft := n.isLeft
|
||||
n = n.parent
|
||||
bh := h.pool.hasher()
|
||||
// hash the section
|
||||
s := doHash(bh, nil, section)
|
||||
var n *node
|
||||
var isLeft bool
|
||||
var hasher hash.Hash
|
||||
var level int
|
||||
t := h.getTree()
|
||||
if double {
|
||||
level++
|
||||
n = t.leaves[i]
|
||||
hasher = n.hasher
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
// hash the section
|
||||
section = doSum(hasher, nil, section)
|
||||
} else {
|
||||
n = t.leaves[i/2]
|
||||
hasher = n.hasher
|
||||
isLeft = i%2 == 0
|
||||
}
|
||||
// write hash into parent node
|
||||
if final {
|
||||
// for the last segment use writeFinalNode
|
||||
h.writeFinalNode(1, n, bh, isLeft, s)
|
||||
h.writeFinalNode(level, n, hasher, isLeft, section)
|
||||
} else {
|
||||
h.writeNode(n, bh, isLeft, s)
|
||||
h.writeNode(n, hasher, isLeft, section)
|
||||
}
|
||||
}
|
||||
|
||||
// writeNode pushes the data to the node
|
||||
// if it is the first of 2 sisters written the routine returns
|
||||
// if it is the first of 2 sisters written, the routine terminates
|
||||
// if it is the second, it calculates the hash and writes it
|
||||
// to the parent node recursively
|
||||
// since hashing the parent is synchronous the same hasher can be used
|
||||
func (h *Hasher) writeNode(n *node, bh hash.Hash, isLeft bool, s []byte) {
|
||||
level := 1
|
||||
for {
|
||||
// at the root of the bmt just write the result to the result channel
|
||||
if n == nil {
|
||||
h.bmt.result <- s
|
||||
h.getTree().result <- s
|
||||
return
|
||||
}
|
||||
// otherwise assign child hash to branc
|
||||
// otherwise assign child hash to left or right segment
|
||||
if isLeft {
|
||||
n.left = s
|
||||
} else {
|
||||
n.right = s
|
||||
}
|
||||
// the child-thread first arriving will quit
|
||||
// the child-thread first arriving will terminate
|
||||
if n.toggle() {
|
||||
return
|
||||
}
|
||||
// the thread coming later now can be sure both left and right children are written
|
||||
// it calculates the hash of left|right and pushes it to the parent
|
||||
s = doHash(bh, nil, n.left, n.right)
|
||||
// the thread coming second now can be sure both left and right children are written
|
||||
// so it calculates the hash of left|right and pushes it to the parent
|
||||
s = doSum(bh, nil, n.left, n.right)
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
level++
|
||||
@ -476,7 +599,7 @@ func (h *Hasher) writeFinalNode(level int, n *node, bh hash.Hash, isLeft bool, s
|
||||
// at the root of the bmt just write the result to the result channel
|
||||
if n == nil {
|
||||
if s != nil {
|
||||
h.bmt.result <- s
|
||||
h.getTree().result <- s
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -485,25 +608,28 @@ func (h *Hasher) writeFinalNode(level int, n *node, bh hash.Hash, isLeft bool, s
|
||||
// coming from left sister branch
|
||||
// when the final section's path is going via left child node
|
||||
// we include an all-zero subtree hash for the right level and toggle the node.
|
||||
// when the path is going through right child node, nothing to do
|
||||
n.right = h.pool.zerohashes[level]
|
||||
if s != nil {
|
||||
n.left = s
|
||||
// if a left final node carries a hash, it must be the first (and only thread)
|
||||
// so the toggle is already in passive state no need no call
|
||||
// yet thread needs to carry on pushing hash to parent
|
||||
noHash = false
|
||||
} else {
|
||||
// if again first thread then propagate nil and calculate no hash
|
||||
noHash = n.toggle()
|
||||
}
|
||||
} else {
|
||||
// right sister branch
|
||||
// if s is nil, then thread arrived first at previous node and here there will be two,
|
||||
// so no need to do anything
|
||||
if s != nil {
|
||||
// if hash was pushed from right child node, write right segment change state
|
||||
n.right = s
|
||||
// if toggle is true, we arrived first so no hashing just push nil to parent
|
||||
noHash = n.toggle()
|
||||
|
||||
} else {
|
||||
// if s is nil, then thread arrived first at previous node and here there will be two,
|
||||
// so no need to do anything and keep s = nil for parent
|
||||
noHash = true
|
||||
}
|
||||
}
|
||||
@ -513,15 +639,16 @@ func (h *Hasher) writeFinalNode(level int, n *node, bh hash.Hash, isLeft bool, s
|
||||
if noHash {
|
||||
s = nil
|
||||
} else {
|
||||
s = doHash(bh, nil, n.left, n.right)
|
||||
s = doSum(bh, nil, n.left, n.right)
|
||||
}
|
||||
// iterate to parent
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
level++
|
||||
}
|
||||
}
|
||||
|
||||
// getTree obtains a BMT resource by reserving one from the pool
|
||||
// getTree obtains a BMT resource by reserving one from the pool and assigns it to the bmt field
|
||||
func (h *Hasher) getTree() *tree {
|
||||
if h.bmt != nil {
|
||||
return h.bmt
|
||||
@ -539,7 +666,7 @@ func (n *node) toggle() bool {
|
||||
}
|
||||
|
||||
// calculates the hash of the data using hash.Hash
|
||||
func doHash(h hash.Hash, b []byte, data ...[]byte) []byte {
|
||||
func doSum(h hash.Hash, b []byte, data ...[]byte) []byte {
|
||||
h.Reset()
|
||||
for _, v := range data {
|
||||
h.Write(v)
|
||||
@ -547,6 +674,7 @@ func doHash(h hash.Hash, b []byte, data ...[]byte) []byte {
|
||||
return h.Sum(b)
|
||||
}
|
||||
|
||||
// hashstr is a pretty printer for bytes used in tree.draw
|
||||
func hashstr(b []byte) string {
|
||||
end := len(b)
|
||||
if end > 4 {
|
||||
|
@ -39,13 +39,12 @@ var counts = []int{1, 2, 3, 4, 5, 8, 9, 15, 16, 17, 32, 37, 42, 53, 63, 64, 65,
|
||||
// calculates the Keccak256 SHA3 hash of the data
|
||||
func sha3hash(data ...[]byte) []byte {
|
||||
h := sha3.NewKeccak256()
|
||||
return doHash(h, nil, data...)
|
||||
return doSum(h, nil, data...)
|
||||
}
|
||||
|
||||
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
|
||||
// all data lengths between 0 and 256 bytes
|
||||
// some small data lengths
|
||||
func TestRefHasher(t *testing.T) {
|
||||
|
||||
// the test struct is used to specify the expected BMT hash for
|
||||
// segment counts between from and to and lengths from 1 to datalength
|
||||
type test struct {
|
||||
@ -129,7 +128,7 @@ func TestRefHasher(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// tests if hasher responds with correct hash
|
||||
// tests if hasher responds with correct hash comparing the reference implementation return value
|
||||
func TestHasherEmptyData(t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
var data []byte
|
||||
@ -140,7 +139,7 @@ func TestHasherEmptyData(t *testing.T) {
|
||||
bmt := New(pool)
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
refHash := rbmt.Hash(data)
|
||||
expHash := Hash(bmt, nil, data)
|
||||
expHash := syncHash(bmt, nil, data)
|
||||
if !bytes.Equal(expHash, refHash) {
|
||||
t.Fatalf("hash mismatch with reference. expected %x, got %x", refHash, expHash)
|
||||
}
|
||||
@ -148,7 +147,8 @@ func TestHasherEmptyData(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasherCorrectness(t *testing.T) {
|
||||
// tests sequential write with entire max size written in one go
|
||||
func TestSyncHasherCorrectness(t *testing.T) {
|
||||
data := newData(BufferSize)
|
||||
hasher := sha3.NewKeccak256
|
||||
size := hasher().Size()
|
||||
@ -157,7 +157,7 @@ func TestHasherCorrectness(t *testing.T) {
|
||||
for _, count := range counts {
|
||||
t.Run(fmt.Sprintf("segments_%v", count), func(t *testing.T) {
|
||||
max := count * size
|
||||
incr := 1
|
||||
var incr int
|
||||
capacity := 1
|
||||
pool := NewTreePool(hasher, count, capacity)
|
||||
defer pool.Drain(0)
|
||||
@ -173,6 +173,44 @@ func TestHasherCorrectness(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// tests order-neutral concurrent writes with entire max size written in one go
|
||||
func TestAsyncCorrectness(t *testing.T) {
|
||||
data := newData(BufferSize)
|
||||
hasher := sha3.NewKeccak256
|
||||
size := hasher().Size()
|
||||
whs := []whenHash{first, last, random}
|
||||
|
||||
for _, double := range []bool{false, true} {
|
||||
for _, wh := range whs {
|
||||
for _, count := range counts {
|
||||
t.Run(fmt.Sprintf("double_%v_hash_when_%v_segments_%v", double, wh, count), func(t *testing.T) {
|
||||
max := count * size
|
||||
var incr int
|
||||
capacity := 1
|
||||
pool := NewTreePool(hasher, count, capacity)
|
||||
defer pool.Drain(0)
|
||||
for n := 1; n <= max; n += incr {
|
||||
incr = 1 + rand.Intn(5)
|
||||
bmt := New(pool)
|
||||
d := data[:n]
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
exp := rbmt.Hash(d)
|
||||
got := syncHash(bmt, nil, d)
|
||||
if !bytes.Equal(got, exp) {
|
||||
t.Fatalf("wrong sync hash for datalength %v: expected %x (ref), got %x", n, exp, got)
|
||||
}
|
||||
sw := bmt.NewAsyncWriter(double)
|
||||
got = asyncHashRandom(sw, nil, d, wh)
|
||||
if !bytes.Equal(got, exp) {
|
||||
t.Fatalf("wrong async hash for datalength %v: expected %x, got %x", n, exp, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the BMT hasher can be synchronously reused with poolsizes 1 and PoolSize
|
||||
func TestHasherReuse(t *testing.T) {
|
||||
t.Run(fmt.Sprintf("poolsize_%d", 1), func(t *testing.T) {
|
||||
@ -183,6 +221,7 @@ func TestHasherReuse(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// tests if bmt reuse is not corrupting result
|
||||
func testHasherReuse(poolsize int, t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, SegmentCount, poolsize)
|
||||
@ -191,7 +230,7 @@ func testHasherReuse(poolsize int, t *testing.T) {
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
data := newData(BufferSize)
|
||||
n := rand.Intn(bmt.DataLength())
|
||||
n := rand.Intn(bmt.Size())
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, SegmentCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -199,8 +238,8 @@ func testHasherReuse(poolsize int, t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tests if pool can be cleanly reused even in concurrent use
|
||||
func TestBMTHasherConcurrentUse(t *testing.T) {
|
||||
// Tests if pool can be cleanly reused even in concurrent use by several hasher
|
||||
func TestBMTConcurrentUse(t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, SegmentCount, PoolSize)
|
||||
defer pool.Drain(0)
|
||||
@ -211,7 +250,7 @@ func TestBMTHasherConcurrentUse(t *testing.T) {
|
||||
go func() {
|
||||
bmt := New(pool)
|
||||
data := newData(BufferSize)
|
||||
n := rand.Intn(bmt.DataLength())
|
||||
n := rand.Intn(bmt.Size())
|
||||
errc <- testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
}()
|
||||
}
|
||||
@ -234,7 +273,7 @@ LOOP:
|
||||
|
||||
// Tests BMT Hasher io.Writer interface is working correctly
|
||||
// even multiple short random write buffers
|
||||
func TestBMTHasherWriterBuffers(t *testing.T) {
|
||||
func TestBMTWriterBuffers(t *testing.T) {
|
||||
hasher := sha3.NewKeccak256
|
||||
|
||||
for _, count := range counts {
|
||||
@ -247,7 +286,7 @@ func TestBMTHasherWriterBuffers(t *testing.T) {
|
||||
data := newData(n)
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
refHash := rbmt.Hash(data)
|
||||
expHash := Hash(bmt, nil, data)
|
||||
expHash := syncHash(bmt, nil, data)
|
||||
if !bytes.Equal(expHash, refHash) {
|
||||
t.Fatalf("hash mismatch with reference. expected %x, got %x", refHash, expHash)
|
||||
}
|
||||
@ -308,57 +347,65 @@ func testHasherCorrectness(bmt *Hasher, hasher BaseHasherFunc, d []byte, n, coun
|
||||
data := d[:n]
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
exp := sha3hash(span, rbmt.Hash(data))
|
||||
got := Hash(bmt, span, data)
|
||||
got := syncHash(bmt, span, data)
|
||||
if !bytes.Equal(got, exp) {
|
||||
return fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
|
||||
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
|
||||
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
|
||||
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
|
||||
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
|
||||
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
|
||||
//
|
||||
func BenchmarkBMT(t *testing.B) {
|
||||
for size := 4096; size >= 128; size /= 2 {
|
||||
t.Run(fmt.Sprintf("%v_size_%v", "SHA3", size), func(t *testing.B) {
|
||||
benchmarkSHA3(t, size)
|
||||
})
|
||||
t.Run(fmt.Sprintf("%v_size_%v", "Baseline", size), func(t *testing.B) {
|
||||
benchmarkBMTBaseline(t, size)
|
||||
})
|
||||
t.Run(fmt.Sprintf("%v_size_%v", "REF", size), func(t *testing.B) {
|
||||
benchmarkRefHasher(t, size)
|
||||
})
|
||||
t.Run(fmt.Sprintf("%v_size_%v", "BMT", size), func(t *testing.B) {
|
||||
benchmarkBMT(t, size)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
|
||||
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
|
||||
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
|
||||
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
|
||||
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
|
||||
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
|
||||
type whenHash = int
|
||||
|
||||
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
|
||||
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
|
||||
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
|
||||
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
|
||||
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
|
||||
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
|
||||
const (
|
||||
first whenHash = iota
|
||||
last
|
||||
random
|
||||
)
|
||||
|
||||
func BenchmarkBMTHasher_4k(t *testing.B) { benchmarkBMTHasher(4096, t) }
|
||||
func BenchmarkBMTHasher_2k(t *testing.B) { benchmarkBMTHasher(4096/2, t) }
|
||||
func BenchmarkBMTHasher_1k(t *testing.B) { benchmarkBMTHasher(4096/4, t) }
|
||||
func BenchmarkBMTHasher_512b(t *testing.B) { benchmarkBMTHasher(4096/8, t) }
|
||||
func BenchmarkBMTHasher_256b(t *testing.B) { benchmarkBMTHasher(4096/16, t) }
|
||||
func BenchmarkBMTHasher_128b(t *testing.B) { benchmarkBMTHasher(4096/32, t) }
|
||||
func BenchmarkBMTAsync(t *testing.B) {
|
||||
whs := []whenHash{first, last, random}
|
||||
for size := 4096; size >= 128; size /= 2 {
|
||||
for _, wh := range whs {
|
||||
for _, double := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("double_%v_hash_when_%v_size_%v", double, wh, size), func(t *testing.B) {
|
||||
benchmarkBMTAsync(t, size, wh, double)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBMTHasherNoPool_4k(t *testing.B) { benchmarkBMTHasherPool(1, 4096, t) }
|
||||
func BenchmarkBMTHasherNoPool_2k(t *testing.B) { benchmarkBMTHasherPool(1, 4096/2, t) }
|
||||
func BenchmarkBMTHasherNoPool_1k(t *testing.B) { benchmarkBMTHasherPool(1, 4096/4, t) }
|
||||
func BenchmarkBMTHasherNoPool_512b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/8, t) }
|
||||
func BenchmarkBMTHasherNoPool_256b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/16, t) }
|
||||
func BenchmarkBMTHasherNoPool_128b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/32, t) }
|
||||
|
||||
func BenchmarkBMTHasherPool_4k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096, t) }
|
||||
func BenchmarkBMTHasherPool_2k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/2, t) }
|
||||
func BenchmarkBMTHasherPool_1k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/4, t) }
|
||||
func BenchmarkBMTHasherPool_512b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/8, t) }
|
||||
func BenchmarkBMTHasherPool_256b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/16, t) }
|
||||
func BenchmarkBMTHasherPool_128b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/32, t) }
|
||||
func BenchmarkPool(t *testing.B) {
|
||||
caps := []int{1, PoolSize}
|
||||
for size := 4096; size >= 128; size /= 2 {
|
||||
for _, c := range caps {
|
||||
t.Run(fmt.Sprintf("poolsize_%v_size_%v", c, size), func(t *testing.B) {
|
||||
benchmarkPool(t, c, size)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// benchmarks simple sha3 hash on chunks
|
||||
func benchmarkSHA3(n int, t *testing.B) {
|
||||
func benchmarkSHA3(t *testing.B, n int) {
|
||||
data := newData(n)
|
||||
hasher := sha3.NewKeccak256
|
||||
h := hasher()
|
||||
@ -366,9 +413,7 @@ func benchmarkSHA3(n int, t *testing.B) {
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
doSum(h, nil, data)
|
||||
}
|
||||
}
|
||||
|
||||
@ -377,7 +422,7 @@ func benchmarkSHA3(n int, t *testing.B) {
|
||||
// doing it on n PoolSize each reusing the base hasher
|
||||
// the premise is that this is the minimum computation needed for a BMT
|
||||
// therefore this serves as a theoretical optimum for concurrent implementations
|
||||
func benchmarkBMTBaseline(n int, t *testing.B) {
|
||||
func benchmarkBMTBaseline(t *testing.B, n int) {
|
||||
hasher := sha3.NewKeccak256
|
||||
hashSize := hasher().Size()
|
||||
data := newData(hashSize)
|
||||
@ -394,9 +439,7 @@ func benchmarkBMTBaseline(n int, t *testing.B) {
|
||||
defer wg.Done()
|
||||
h := hasher()
|
||||
for atomic.AddInt32(&i, 1) < count {
|
||||
h.Reset()
|
||||
h.Write(data)
|
||||
h.Sum(nil)
|
||||
doSum(h, nil, data)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -405,21 +448,39 @@ func benchmarkBMTBaseline(n int, t *testing.B) {
|
||||
}
|
||||
|
||||
// benchmarks BMT Hasher
|
||||
func benchmarkBMTHasher(n int, t *testing.B) {
|
||||
func benchmarkBMT(t *testing.B, n int) {
|
||||
data := newData(n)
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, SegmentCount, PoolSize)
|
||||
bmt := New(pool)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
bmt := New(pool)
|
||||
Hash(bmt, nil, data)
|
||||
syncHash(bmt, nil, data)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmarks BMT hasher with asynchronous concurrent segment/section writes
|
||||
func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
|
||||
data := newData(n)
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, SegmentCount, PoolSize)
|
||||
bmt := New(pool).NewAsyncWriter(double)
|
||||
idxs, segments := splitAndShuffle(bmt.SectionSize(), data)
|
||||
shuffle(len(idxs), func(i int, j int) {
|
||||
idxs[i], idxs[j] = idxs[j], idxs[i]
|
||||
})
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
asyncHash(bmt, nil, n, wh, idxs, segments)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmarks 100 concurrent bmt hashes with pool capacity
|
||||
func benchmarkBMTHasherPool(poolsize, n int, t *testing.B) {
|
||||
func benchmarkPool(t *testing.B, poolsize, n int) {
|
||||
data := newData(n)
|
||||
hasher := sha3.NewKeccak256
|
||||
pool := NewTreePool(hasher, SegmentCount, poolsize)
|
||||
@ -434,7 +495,7 @@ func benchmarkBMTHasherPool(poolsize, n int, t *testing.B) {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
bmt := New(pool)
|
||||
Hash(bmt, nil, data)
|
||||
syncHash(bmt, nil, data)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
@ -442,7 +503,7 @@ func benchmarkBMTHasherPool(poolsize, n int, t *testing.B) {
|
||||
}
|
||||
|
||||
// benchmarks the reference hasher
|
||||
func benchmarkRefHasher(n int, t *testing.B) {
|
||||
func benchmarkRefHasher(t *testing.B, n int) {
|
||||
data := newData(n)
|
||||
hasher := sha3.NewKeccak256
|
||||
rbmt := NewRefHasher(hasher, 128)
|
||||
@ -462,3 +523,93 @@ func newData(bufferSize int) []byte {
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// Hash hashes the data and the span using the bmt hasher
|
||||
func syncHash(h *Hasher, span, data []byte) []byte {
|
||||
h.ResetWithLength(span)
|
||||
h.Write(data)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
func splitAndShuffle(secsize int, data []byte) (idxs []int, segments [][]byte) {
|
||||
l := len(data)
|
||||
n := l / secsize
|
||||
if l%secsize > 0 {
|
||||
n++
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
idxs = append(idxs, i)
|
||||
end := (i + 1) * secsize
|
||||
if end > l {
|
||||
end = l
|
||||
}
|
||||
section := data[i*secsize : end]
|
||||
segments = append(segments, section)
|
||||
}
|
||||
shuffle(n, func(i int, j int) {
|
||||
idxs[i], idxs[j] = idxs[j], idxs[i]
|
||||
})
|
||||
return idxs, segments
|
||||
}
|
||||
|
||||
// splits the input data performs a random shuffle to mock async section writes
|
||||
func asyncHashRandom(bmt SectionWriter, span []byte, data []byte, wh whenHash) (s []byte) {
|
||||
idxs, segments := splitAndShuffle(bmt.SectionSize(), data)
|
||||
return asyncHash(bmt, span, len(data), wh, idxs, segments)
|
||||
}
|
||||
|
||||
// mock for async section writes for BMT SectionWriter
|
||||
// requires a permutation (a random shuffle) of list of all indexes of segments
|
||||
// and writes them in order to the appropriate section
|
||||
// the Sum function is called according to the wh parameter (first, last, random [relative to segment writes])
|
||||
func asyncHash(bmt SectionWriter, span []byte, l int, wh whenHash, idxs []int, segments [][]byte) (s []byte) {
|
||||
bmt.Reset()
|
||||
if l == 0 {
|
||||
return bmt.Sum(nil, l, span)
|
||||
}
|
||||
c := make(chan []byte, 1)
|
||||
hashf := func() {
|
||||
c <- bmt.Sum(nil, l, span)
|
||||
}
|
||||
maxsize := len(idxs)
|
||||
var r int
|
||||
if wh == random {
|
||||
r = rand.Intn(maxsize)
|
||||
}
|
||||
for i, idx := range idxs {
|
||||
bmt.Write(idx, segments[idx])
|
||||
if (wh == first || wh == random) && i == r {
|
||||
go hashf()
|
||||
}
|
||||
}
|
||||
if wh == last {
|
||||
return bmt.Sum(nil, l, span)
|
||||
}
|
||||
return <-c
|
||||
}
|
||||
|
||||
// this is also in swarm/network_test.go
|
||||
// shuffle pseudo-randomizes the order of elements.
|
||||
// n is the number of elements. Shuffle panics if n < 0.
|
||||
// swap swaps the elements with indexes i and j.
|
||||
func shuffle(n int, swap func(i, j int)) {
|
||||
if n < 0 {
|
||||
panic("invalid argument to Shuffle")
|
||||
}
|
||||
|
||||
// Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
|
||||
// Shuffle really ought not be called with n that doesn't fit in 32 bits.
|
||||
// Not only will it take a very long time, but with 2³¹! possible permutations,
|
||||
// there's no way that any PRNG can have a big enough internal state to
|
||||
// generate even a minuscule percentage of the possible permutations.
|
||||
// Nevertheless, the right API signature accepts an int n, so handle it as best we can.
|
||||
i := n - 1
|
||||
for ; i > 1<<31-1-1; i-- {
|
||||
j := int(rand.Int63n(int64(i + 1)))
|
||||
swap(i, j)
|
||||
}
|
||||
for ; i > 0; i-- {
|
||||
j := int(rand.Int31n(int32(i + 1)))
|
||||
swap(i, j)
|
||||
}
|
||||
}
|
||||
|
@ -120,6 +120,10 @@ func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
|
||||
|
||||
log.Trace("swarmfs mount: traversing manifest map")
|
||||
for suffix, entry := range manifestEntryMap {
|
||||
if suffix == "" { //empty suffix means that the file has no name - i.e. this is the default entry in a manifest. Since we cannot have files without a name, let us ignore this entry
|
||||
log.Warn("Manifest has an empty-path (default) entry which will be ignored in FUSE mount.")
|
||||
continue
|
||||
}
|
||||
addr := common.Hex2Bytes(entry.Hash)
|
||||
fullpath := "/" + suffix
|
||||
basepath := filepath.Dir(fullpath)
|
||||
|
81
swarm/network/simulation/bucket.go
Normal file
81
swarm/network/simulation/bucket.go
Normal file
@ -0,0 +1,81 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
)
|
||||
|
||||
// BucketKey is the type that should be used for keys in simulation buckets.
|
||||
type BucketKey string
|
||||
|
||||
// NodeItem returns an item set in ServiceFunc function for a particualar node.
|
||||
func (s *Simulation) NodeItem(id discover.NodeID, key interface{}) (value interface{}, ok bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if _, ok := s.buckets[id]; !ok {
|
||||
return nil, false
|
||||
}
|
||||
return s.buckets[id].Load(key)
|
||||
}
|
||||
|
||||
// SetNodeItem sets a new item associated with the node with provided NodeID.
|
||||
// Buckets should be used to avoid managing separate simulation global state.
|
||||
func (s *Simulation) SetNodeItem(id discover.NodeID, key interface{}, value interface{}) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.buckets[id].Store(key, value)
|
||||
}
|
||||
|
||||
// NodeItems returns a map of items from all nodes that are all set under the
|
||||
// same BucketKey.
|
||||
func (s *Simulation) NodesItems(key interface{}) (values map[discover.NodeID]interface{}) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
ids := s.NodeIDs()
|
||||
values = make(map[discover.NodeID]interface{}, len(ids))
|
||||
for _, id := range ids {
|
||||
if _, ok := s.buckets[id]; !ok {
|
||||
continue
|
||||
}
|
||||
if v, ok := s.buckets[id].Load(key); ok {
|
||||
values[id] = v
|
||||
}
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// UpNodesItems returns a map of items with the same BucketKey from all nodes that are up.
|
||||
func (s *Simulation) UpNodesItems(key interface{}) (values map[discover.NodeID]interface{}) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
ids := s.UpNodeIDs()
|
||||
values = make(map[discover.NodeID]interface{})
|
||||
for _, id := range ids {
|
||||
if _, ok := s.buckets[id]; !ok {
|
||||
continue
|
||||
}
|
||||
if v, ok := s.buckets[id].Load(key); ok {
|
||||
values[id] = v
|
||||
}
|
||||
}
|
||||
return values
|
||||
}
|
155
swarm/network/simulation/bucket_test.go
Normal file
155
swarm/network/simulation/bucket_test.go
Normal file
@ -0,0 +1,155 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
)
|
||||
|
||||
// TestServiceBucket tests all bucket functionalities using subtests.
|
||||
// It constructs a simulation of two nodes by adding items to their buckets
|
||||
// in ServiceFunc constructor, then by SetNodeItem. Testing UpNodesItems
|
||||
// is done by stopping one node and validating availability of its items.
|
||||
func TestServiceBucket(t *testing.T) {
|
||||
testKey := "Key"
|
||||
testValue := "Value"
|
||||
|
||||
sim := New(map[string]ServiceFunc{
|
||||
"noop": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
b.Store(testKey, testValue+ctx.Config.ID.String())
|
||||
return newNoopService(), nil, nil
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
id1, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id2, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Run("ServiceFunc bucket Store", func(t *testing.T) {
|
||||
v, ok := sim.NodeItem(id1, testKey)
|
||||
if !ok {
|
||||
t.Fatal("bucket item not found")
|
||||
}
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
t.Fatal("bucket item value is not string")
|
||||
}
|
||||
if s != testValue+id1.String() {
|
||||
t.Fatalf("expected %q, got %q", testValue+id1.String(), s)
|
||||
}
|
||||
|
||||
v, ok = sim.NodeItem(id2, testKey)
|
||||
if !ok {
|
||||
t.Fatal("bucket item not found")
|
||||
}
|
||||
s, ok = v.(string)
|
||||
if !ok {
|
||||
t.Fatal("bucket item value is not string")
|
||||
}
|
||||
if s != testValue+id2.String() {
|
||||
t.Fatalf("expected %q, got %q", testValue+id2.String(), s)
|
||||
}
|
||||
})
|
||||
|
||||
customKey := "anotherKey"
|
||||
customValue := "anotherValue"
|
||||
|
||||
t.Run("SetNodeItem", func(t *testing.T) {
|
||||
sim.SetNodeItem(id1, customKey, customValue)
|
||||
|
||||
v, ok := sim.NodeItem(id1, customKey)
|
||||
if !ok {
|
||||
t.Fatal("bucket item not found")
|
||||
}
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
t.Fatal("bucket item value is not string")
|
||||
}
|
||||
if s != customValue {
|
||||
t.Fatalf("expected %q, got %q", customValue, s)
|
||||
}
|
||||
|
||||
v, ok = sim.NodeItem(id2, customKey)
|
||||
if ok {
|
||||
t.Fatal("bucket item should not be found")
|
||||
}
|
||||
})
|
||||
|
||||
if err := sim.StopNode(id2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Run("UpNodesItems", func(t *testing.T) {
|
||||
items := sim.UpNodesItems(testKey)
|
||||
|
||||
v, ok := items[id1]
|
||||
if !ok {
|
||||
t.Errorf("node 1 item not found")
|
||||
}
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
t.Fatal("node 1 item value is not string")
|
||||
}
|
||||
if s != testValue+id1.String() {
|
||||
t.Fatalf("expected %q, got %q", testValue+id1.String(), s)
|
||||
}
|
||||
|
||||
v, ok = items[id2]
|
||||
if ok {
|
||||
t.Errorf("node 2 item should not be found")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("NodeItems", func(t *testing.T) {
|
||||
items := sim.NodesItems(testKey)
|
||||
|
||||
v, ok := items[id1]
|
||||
if !ok {
|
||||
t.Errorf("node 1 item not found")
|
||||
}
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
t.Fatal("node 1 item value is not string")
|
||||
}
|
||||
if s != testValue+id1.String() {
|
||||
t.Fatalf("expected %q, got %q", testValue+id1.String(), s)
|
||||
}
|
||||
|
||||
v, ok = items[id2]
|
||||
if !ok {
|
||||
t.Errorf("node 2 item not found")
|
||||
}
|
||||
s, ok = v.(string)
|
||||
if !ok {
|
||||
t.Fatal("node 1 item value is not string")
|
||||
}
|
||||
if s != testValue+id2.String() {
|
||||
t.Fatalf("expected %q, got %q", testValue+id2.String(), s)
|
||||
}
|
||||
})
|
||||
}
|
159
swarm/network/simulation/connect.go
Normal file
159
swarm/network/simulation/connect.go
Normal file
@ -0,0 +1,159 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
)
|
||||
|
||||
// ConnectToPivotNode connects the node with provided NodeID
|
||||
// to the pivot node, already set by Simulation.SetPivotNode method.
|
||||
// It is useful when constructing a star network topology
|
||||
// when simulation adds and removes nodes dynamically.
|
||||
func (s *Simulation) ConnectToPivotNode(id discover.NodeID) (err error) {
|
||||
pid := s.PivotNodeID()
|
||||
if pid == nil {
|
||||
return ErrNoPivotNode
|
||||
}
|
||||
return s.connect(*pid, id)
|
||||
}
|
||||
|
||||
// ConnectToLastNode connects the node with provided NodeID
|
||||
// to the last node that is up, and avoiding connection to self.
|
||||
// It is useful when constructing a chain network topology
|
||||
// when simulation adds and removes nodes dynamically.
|
||||
func (s *Simulation) ConnectToLastNode(id discover.NodeID) (err error) {
|
||||
ids := s.UpNodeIDs()
|
||||
l := len(ids)
|
||||
if l < 2 {
|
||||
return nil
|
||||
}
|
||||
lid := ids[l-1]
|
||||
if lid == id {
|
||||
lid = ids[l-2]
|
||||
}
|
||||
return s.connect(lid, id)
|
||||
}
|
||||
|
||||
// ConnectToRandomNode connects the node with provieded NodeID
|
||||
// to a random node that is up.
|
||||
func (s *Simulation) ConnectToRandomNode(id discover.NodeID) (err error) {
|
||||
n := s.randomUpNode(id)
|
||||
if n == nil {
|
||||
return ErrNodeNotFound
|
||||
}
|
||||
return s.connect(n.ID, id)
|
||||
}
|
||||
|
||||
// ConnectNodesFull connects all nodes one to another.
|
||||
// It provides a complete connectivity in the network
|
||||
// which should be rarely needed.
|
||||
func (s *Simulation) ConnectNodesFull(ids []discover.NodeID) (err error) {
|
||||
if ids == nil {
|
||||
ids = s.UpNodeIDs()
|
||||
}
|
||||
l := len(ids)
|
||||
for i := 0; i < l; i++ {
|
||||
for j := i + 1; j < l; j++ {
|
||||
err = s.connect(ids[i], ids[j])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectNodesChain connects all nodes in a chain topology.
|
||||
// If ids argument is nil, all nodes that are up will be connected.
|
||||
func (s *Simulation) ConnectNodesChain(ids []discover.NodeID) (err error) {
|
||||
if ids == nil {
|
||||
ids = s.UpNodeIDs()
|
||||
}
|
||||
l := len(ids)
|
||||
for i := 0; i < l-1; i++ {
|
||||
err = s.connect(ids[i], ids[i+1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectNodesRing connects all nodes in a ring topology.
|
||||
// If ids argument is nil, all nodes that are up will be connected.
|
||||
func (s *Simulation) ConnectNodesRing(ids []discover.NodeID) (err error) {
|
||||
if ids == nil {
|
||||
ids = s.UpNodeIDs()
|
||||
}
|
||||
l := len(ids)
|
||||
if l < 2 {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i < l-1; i++ {
|
||||
err = s.connect(ids[i], ids[i+1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return s.connect(ids[l-1], ids[0])
|
||||
}
|
||||
|
||||
// ConnectNodesStar connects all nodes in a star topology
|
||||
// with the center at provided NodeID.
|
||||
// If ids argument is nil, all nodes that are up will be connected.
|
||||
func (s *Simulation) ConnectNodesStar(id discover.NodeID, ids []discover.NodeID) (err error) {
|
||||
if ids == nil {
|
||||
ids = s.UpNodeIDs()
|
||||
}
|
||||
l := len(ids)
|
||||
for i := 0; i < l; i++ {
|
||||
if id == ids[i] {
|
||||
continue
|
||||
}
|
||||
err = s.connect(id, ids[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnectNodesStar connects all nodes in a star topology
|
||||
// with the center at already set pivot node.
|
||||
// If ids argument is nil, all nodes that are up will be connected.
|
||||
func (s *Simulation) ConnectNodesStarPivot(ids []discover.NodeID) (err error) {
|
||||
id := s.PivotNodeID()
|
||||
if id == nil {
|
||||
return ErrNoPivotNode
|
||||
}
|
||||
return s.ConnectNodesStar(*id, ids)
|
||||
}
|
||||
|
||||
// connect connects two nodes but ignores already connected error.
|
||||
func (s *Simulation) connect(oneID, otherID discover.NodeID) error {
|
||||
return ignoreAlreadyConnectedErr(s.Net.Connect(oneID, otherID))
|
||||
}
|
||||
|
||||
func ignoreAlreadyConnectedErr(err error) error {
|
||||
if err == nil || strings.Contains(err.Error(), "already connected") {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
306
swarm/network/simulation/connect_test.go
Normal file
306
swarm/network/simulation/connect_test.go
Normal file
@ -0,0 +1,306 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
)
|
||||
|
||||
func TestConnectToPivotNode(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
pid, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sim.SetPivotNode(pid)
|
||||
|
||||
id, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(sim.Net.Conns) > 0 {
|
||||
t.Fatal("no connections should exist after just adding nodes")
|
||||
}
|
||||
|
||||
err = sim.ConnectToPivotNode(id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if sim.Net.GetConn(id, pid) == nil {
|
||||
t.Error("node did not connect to pivot node")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectToLastNode(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
n := 10
|
||||
|
||||
ids, err := sim.AddNodes(n)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(sim.Net.Conns) > 0 {
|
||||
t.Fatal("no connections should exist after just adding nodes")
|
||||
}
|
||||
|
||||
err = sim.ConnectToLastNode(id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, i := range ids[:n-2] {
|
||||
if sim.Net.GetConn(id, i) != nil {
|
||||
t.Error("node connected to the node that is not the last")
|
||||
}
|
||||
}
|
||||
|
||||
if sim.Net.GetConn(id, ids[n-1]) == nil {
|
||||
t.Error("node did not connect to the last node")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectToRandomNode(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
n := 10
|
||||
|
||||
ids, err := sim.AddNodes(n)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(sim.Net.Conns) > 0 {
|
||||
t.Fatal("no connections should exist after just adding nodes")
|
||||
}
|
||||
|
||||
err = sim.ConnectToRandomNode(ids[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var cc int
|
||||
for i := 0; i < n; i++ {
|
||||
for j := i + 1; j < n; j++ {
|
||||
if sim.Net.GetConn(ids[i], ids[j]) != nil {
|
||||
cc++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cc != 1 {
|
||||
t.Errorf("expected one connection, got %v", cc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectNodesFull(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
ids, err := sim.AddNodes(12)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(sim.Net.Conns) > 0 {
|
||||
t.Fatal("no connections should exist after just adding nodes")
|
||||
}
|
||||
|
||||
err = sim.ConnectNodesFull(ids)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testFull(t, sim, ids)
|
||||
}
|
||||
|
||||
func testFull(t *testing.T, sim *Simulation, ids []discover.NodeID) {
|
||||
n := len(ids)
|
||||
var cc int
|
||||
for i := 0; i < n; i++ {
|
||||
for j := i + 1; j < n; j++ {
|
||||
if sim.Net.GetConn(ids[i], ids[j]) != nil {
|
||||
cc++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
want := n * (n - 1) / 2
|
||||
|
||||
if cc != want {
|
||||
t.Errorf("expected %v connection, got %v", want, cc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectNodesChain(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
ids, err := sim.AddNodes(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(sim.Net.Conns) > 0 {
|
||||
t.Fatal("no connections should exist after just adding nodes")
|
||||
}
|
||||
|
||||
err = sim.ConnectNodesChain(ids)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testChain(t, sim, ids)
|
||||
}
|
||||
|
||||
func testChain(t *testing.T, sim *Simulation, ids []discover.NodeID) {
|
||||
n := len(ids)
|
||||
for i := 0; i < n; i++ {
|
||||
for j := i + 1; j < n; j++ {
|
||||
c := sim.Net.GetConn(ids[i], ids[j])
|
||||
if i == j-1 {
|
||||
if c == nil {
|
||||
t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
|
||||
}
|
||||
} else {
|
||||
if c != nil {
|
||||
t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectNodesRing(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
ids, err := sim.AddNodes(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(sim.Net.Conns) > 0 {
|
||||
t.Fatal("no connections should exist after just adding nodes")
|
||||
}
|
||||
|
||||
err = sim.ConnectNodesRing(ids)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testRing(t, sim, ids)
|
||||
}
|
||||
|
||||
func testRing(t *testing.T, sim *Simulation, ids []discover.NodeID) {
|
||||
n := len(ids)
|
||||
for i := 0; i < n; i++ {
|
||||
for j := i + 1; j < n; j++ {
|
||||
c := sim.Net.GetConn(ids[i], ids[j])
|
||||
if i == j-1 || (i == 0 && j == n-1) {
|
||||
if c == nil {
|
||||
t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
|
||||
}
|
||||
} else {
|
||||
if c != nil {
|
||||
t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectToNodesStar(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
ids, err := sim.AddNodes(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(sim.Net.Conns) > 0 {
|
||||
t.Fatal("no connections should exist after just adding nodes")
|
||||
}
|
||||
|
||||
centerIndex := 2
|
||||
|
||||
err = sim.ConnectNodesStar(ids[centerIndex], ids)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testStar(t, sim, ids, centerIndex)
|
||||
}
|
||||
|
||||
func testStar(t *testing.T, sim *Simulation, ids []discover.NodeID, centerIndex int) {
|
||||
n := len(ids)
|
||||
for i := 0; i < n; i++ {
|
||||
for j := i + 1; j < n; j++ {
|
||||
c := sim.Net.GetConn(ids[i], ids[j])
|
||||
if i == centerIndex || j == centerIndex {
|
||||
if c == nil {
|
||||
t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
|
||||
}
|
||||
} else {
|
||||
if c != nil {
|
||||
t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectToNodesStarPivot(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
ids, err := sim.AddNodes(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(sim.Net.Conns) > 0 {
|
||||
t.Fatal("no connections should exist after just adding nodes")
|
||||
}
|
||||
|
||||
pivotIndex := 4
|
||||
|
||||
sim.SetPivotNode(ids[pivotIndex])
|
||||
|
||||
err = sim.ConnectNodesStarPivot(ids)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testStar(t, sim, ids, pivotIndex)
|
||||
}
|
157
swarm/network/simulation/events.go
Normal file
157
swarm/network/simulation/events.go
Normal file
@ -0,0 +1,157 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
// PeerEvent is the type of the channel returned by Simulation.PeerEvents.
|
||||
type PeerEvent struct {
|
||||
// NodeID is the ID of node that the event is caught on.
|
||||
NodeID discover.NodeID
|
||||
// Event is the event that is caught.
|
||||
Event *p2p.PeerEvent
|
||||
// Error is the error that may have happened during event watching.
|
||||
Error error
|
||||
}
|
||||
|
||||
// PeerEventsFilter defines a filter on PeerEvents to exclude messages with
|
||||
// defined properties. Use PeerEventsFilter methods to set required options.
|
||||
type PeerEventsFilter struct {
|
||||
t *p2p.PeerEventType
|
||||
protocol *string
|
||||
msgCode *uint64
|
||||
}
|
||||
|
||||
// NewPeerEventsFilter returns a new PeerEventsFilter instance.
|
||||
func NewPeerEventsFilter() *PeerEventsFilter {
|
||||
return &PeerEventsFilter{}
|
||||
}
|
||||
|
||||
// Type sets the filter to only one peer event type.
|
||||
func (f *PeerEventsFilter) Type(t p2p.PeerEventType) *PeerEventsFilter {
|
||||
f.t = &t
|
||||
return f
|
||||
}
|
||||
|
||||
// Protocol sets the filter to only one message protocol.
|
||||
func (f *PeerEventsFilter) Protocol(p string) *PeerEventsFilter {
|
||||
f.protocol = &p
|
||||
return f
|
||||
}
|
||||
|
||||
// MsgCode sets the filter to only one msg code.
|
||||
func (f *PeerEventsFilter) MsgCode(c uint64) *PeerEventsFilter {
|
||||
f.msgCode = &c
|
||||
return f
|
||||
}
|
||||
|
||||
// PeerEvents returns a channel of events that are captured by admin peerEvents
|
||||
// subscription nodes with provided NodeIDs. Additional filters can be set to ignore
|
||||
// events that are not relevant.
|
||||
func (s *Simulation) PeerEvents(ctx context.Context, ids []discover.NodeID, filters ...*PeerEventsFilter) <-chan PeerEvent {
|
||||
eventC := make(chan PeerEvent)
|
||||
|
||||
for _, id := range ids {
|
||||
s.shutdownWG.Add(1)
|
||||
go func(id discover.NodeID) {
|
||||
defer s.shutdownWG.Done()
|
||||
|
||||
client, err := s.Net.GetNode(id).Client()
|
||||
if err != nil {
|
||||
eventC <- PeerEvent{NodeID: id, Error: err}
|
||||
return
|
||||
}
|
||||
events := make(chan *p2p.PeerEvent)
|
||||
sub, err := client.Subscribe(ctx, "admin", events, "peerEvents")
|
||||
if err != nil {
|
||||
eventC <- PeerEvent{NodeID: id, Error: err}
|
||||
return
|
||||
}
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if err := ctx.Err(); err != nil {
|
||||
select {
|
||||
case eventC <- PeerEvent{NodeID: id, Error: err}:
|
||||
case <-s.Done():
|
||||
}
|
||||
}
|
||||
return
|
||||
case <-s.Done():
|
||||
return
|
||||
case e := <-events:
|
||||
match := len(filters) == 0 // if there are no filters match all events
|
||||
for _, f := range filters {
|
||||
if f.t != nil && *f.t != e.Type {
|
||||
continue
|
||||
}
|
||||
if f.protocol != nil && *f.protocol != e.Protocol {
|
||||
continue
|
||||
}
|
||||
if f.msgCode != nil && e.MsgCode != nil && *f.msgCode != *e.MsgCode {
|
||||
continue
|
||||
}
|
||||
// all filter parameters matched, break the loop
|
||||
match = true
|
||||
break
|
||||
}
|
||||
if match {
|
||||
select {
|
||||
case eventC <- PeerEvent{NodeID: id, Event: e}:
|
||||
case <-ctx.Done():
|
||||
if err := ctx.Err(); err != nil {
|
||||
select {
|
||||
case eventC <- PeerEvent{NodeID: id, Error: err}:
|
||||
case <-s.Done():
|
||||
}
|
||||
}
|
||||
return
|
||||
case <-s.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
if err != nil {
|
||||
select {
|
||||
case eventC <- PeerEvent{NodeID: id, Error: err}:
|
||||
case <-ctx.Done():
|
||||
if err := ctx.Err(); err != nil {
|
||||
select {
|
||||
case eventC <- PeerEvent{NodeID: id, Error: err}:
|
||||
case <-s.Done():
|
||||
}
|
||||
}
|
||||
return
|
||||
case <-s.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}(id)
|
||||
}
|
||||
|
||||
return eventC
|
||||
}
|
104
swarm/network/simulation/events_test.go
Normal file
104
swarm/network/simulation/events_test.go
Normal file
@ -0,0 +1,104 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestPeerEvents creates simulation, adds two nodes,
|
||||
// register for peer events, connects nodes in a chain
|
||||
// and waits for the number of connection events to
|
||||
// be received.
|
||||
func TestPeerEvents(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodes(2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
events := sim.PeerEvents(ctx, sim.NodeIDs())
|
||||
|
||||
// two nodes -> two connection events
|
||||
expectedEventCount := 2
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(expectedEventCount)
|
||||
|
||||
go func() {
|
||||
for e := range events {
|
||||
if e.Error != nil {
|
||||
if e.Error == context.Canceled {
|
||||
return
|
||||
}
|
||||
t.Error(e.Error)
|
||||
continue
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
}()
|
||||
|
||||
err = sim.ConnectNodesChain(sim.NodeIDs())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestPeerEventsTimeout(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodes(2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
events := sim.PeerEvents(ctx, sim.NodeIDs())
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
for e := range events {
|
||||
if e.Error == context.Canceled {
|
||||
return
|
||||
}
|
||||
if e.Error == context.DeadlineExceeded {
|
||||
close(done)
|
||||
return
|
||||
} else {
|
||||
t.Fatal(e.Error)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
t.Error("no context deadline received")
|
||||
case <-done:
|
||||
// all good, context deadline detected
|
||||
}
|
||||
}
|
140
swarm/network/simulation/example_test.go
Normal file
140
swarm/network/simulation/example_test.go
Normal file
@ -0,0 +1,140 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||
)
|
||||
|
||||
// Every node can have a Kademlia associated using the node bucket under
|
||||
// BucketKeyKademlia key. This allows to use WaitTillHealthy to block until
|
||||
// all nodes have the their Kadmlias healthy.
|
||||
func ExampleSimulation_WaitTillHealthy() {
|
||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
addr := network.NewAddrFromNodeID(ctx.Config.ID)
|
||||
hp := network.NewHiveParams()
|
||||
hp.Discovery = false
|
||||
config := &network.BzzConfig{
|
||||
OverlayAddr: addr.Over(),
|
||||
UnderlayAddr: addr.Under(),
|
||||
HiveParams: hp,
|
||||
}
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
// store kademlia in node's bucket under BucketKeyKademlia
|
||||
// so that it can be found by WaitTillHealthy method.
|
||||
b.Store(simulation.BucketKeyKademlia, kad)
|
||||
return network.NewBzz(config, kad, nil, nil, nil), nil, nil
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodesAndConnectRing(10)
|
||||
if err != nil {
|
||||
// handle error properly...
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
ill, err := sim.WaitTillHealthy(ctx, 2)
|
||||
if err != nil {
|
||||
// inspect the latest detected not healthy kademlias
|
||||
for id, kad := range ill {
|
||||
fmt.Println("Node", id)
|
||||
fmt.Println(kad.String())
|
||||
}
|
||||
// handle error...
|
||||
}
|
||||
|
||||
// continue with the test
|
||||
}
|
||||
|
||||
// Watch all peer events in the simulation network, buy receiving from a channel.
|
||||
func ExampleSimulation_PeerEvents() {
|
||||
sim := simulation.New(nil)
|
||||
defer sim.Close()
|
||||
|
||||
events := sim.PeerEvents(context.Background(), sim.NodeIDs())
|
||||
|
||||
go func() {
|
||||
for e := range events {
|
||||
if e.Error != nil {
|
||||
log.Error("peer event", "err", e.Error)
|
||||
continue
|
||||
}
|
||||
log.Info("peer event", "node", e.NodeID, "peer", e.Event.Peer, "msgcode", e.Event.MsgCode)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Detect when a nodes drop a peer.
|
||||
func ExampleSimulation_PeerEvents_disconnections() {
|
||||
sim := simulation.New(nil)
|
||||
defer sim.Close()
|
||||
|
||||
disconnections := sim.PeerEvents(
|
||||
context.Background(),
|
||||
sim.NodeIDs(),
|
||||
simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
|
||||
)
|
||||
|
||||
go func() {
|
||||
for d := range disconnections {
|
||||
if d.Error != nil {
|
||||
log.Error("peer drop", "err", d.Error)
|
||||
continue
|
||||
}
|
||||
log.Warn("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Watch multiple types of events or messages. In this case, they differ only
|
||||
// by MsgCode, but filters can be set for different types or protocols, too.
|
||||
func ExampleSimulation_PeerEvents_multipleFilters() {
|
||||
sim := simulation.New(nil)
|
||||
defer sim.Close()
|
||||
|
||||
msgs := sim.PeerEvents(
|
||||
context.Background(),
|
||||
sim.NodeIDs(),
|
||||
// Watch when bzz messages 1 and 4 are received.
|
||||
simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("bzz").MsgCode(1),
|
||||
simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("bzz").MsgCode(4),
|
||||
)
|
||||
|
||||
go func() {
|
||||
for m := range msgs {
|
||||
if m.Error != nil {
|
||||
log.Error("bzz message", "err", m.Error)
|
||||
continue
|
||||
}
|
||||
log.Info("bzz message", "node", m.NodeID, "peer", m.Event.Peer)
|
||||
}
|
||||
}()
|
||||
}
|
63
swarm/network/simulation/http.go
Normal file
63
swarm/network/simulation/http.go
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
)
|
||||
|
||||
// Package defaults.
|
||||
var (
|
||||
DefaultHTTPSimAddr = ":8888"
|
||||
)
|
||||
|
||||
//`With`(builder) pattern constructor for Simulation to
|
||||
//start with a HTTP server
|
||||
func (s *Simulation) WithServer(addr string) *Simulation {
|
||||
//assign default addr if nothing provided
|
||||
if addr == "" {
|
||||
addr = DefaultHTTPSimAddr
|
||||
}
|
||||
log.Info(fmt.Sprintf("Initializing simulation server on %s...", addr))
|
||||
//initialize the HTTP server
|
||||
s.handler = simulations.NewServer(s.Net)
|
||||
s.runC = make(chan struct{})
|
||||
//add swarm specific routes to the HTTP server
|
||||
s.addSimulationRoutes()
|
||||
s.httpSrv = &http.Server{
|
||||
Addr: addr,
|
||||
Handler: s.handler,
|
||||
}
|
||||
go s.httpSrv.ListenAndServe()
|
||||
return s
|
||||
}
|
||||
|
||||
//register additional HTTP routes
|
||||
func (s *Simulation) addSimulationRoutes() {
|
||||
s.handler.POST("/runsim", s.RunSimulation)
|
||||
}
|
||||
|
||||
// StartNetwork starts all nodes in the network
|
||||
func (s *Simulation) RunSimulation(w http.ResponseWriter, req *http.Request) {
|
||||
log.Debug("RunSimulation endpoint running")
|
||||
s.runC <- struct{}{}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
104
swarm/network/simulation/http_test.go
Normal file
104
swarm/network/simulation/http_test.go
Normal file
@ -0,0 +1,104 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
)
|
||||
|
||||
func TestSimulationWithHTTPServer(t *testing.T) {
|
||||
log.Debug("Init simulation")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
sim := New(
|
||||
map[string]ServiceFunc{
|
||||
"noop": func(_ *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
return newNoopService(), nil, nil
|
||||
},
|
||||
}).WithServer(DefaultHTTPSimAddr)
|
||||
defer sim.Close()
|
||||
log.Debug("Done.")
|
||||
|
||||
_, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debug("Starting sim round and let it time out...")
|
||||
//first test that running without sending to the channel will actually
|
||||
//block the simulation, so let it time out
|
||||
result := sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
|
||||
log.Debug("Just start the sim without any action and wait for the timeout")
|
||||
//ensure with a Sleep that simulation doesn't terminate before the timeout
|
||||
time.Sleep(2 * time.Second)
|
||||
return nil
|
||||
})
|
||||
|
||||
if result.Error != nil {
|
||||
if result.Error.Error() == "context deadline exceeded" {
|
||||
log.Debug("Expected timeout error received")
|
||||
} else {
|
||||
t.Fatal(result.Error)
|
||||
}
|
||||
}
|
||||
|
||||
//now run it again and send the expected signal on the waiting channel,
|
||||
//then close the simulation
|
||||
log.Debug("Starting sim round and wait for frontend signal...")
|
||||
//this time the timeout should be long enough so that it doesn't kick in too early
|
||||
ctx, cancel2 := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel2()
|
||||
go sendRunSignal(t)
|
||||
result = sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
|
||||
log.Debug("This run waits for the run signal from `frontend`...")
|
||||
//ensure with a Sleep that simulation doesn't terminate before the signal is received
|
||||
time.Sleep(2 * time.Second)
|
||||
return nil
|
||||
})
|
||||
if result.Error != nil {
|
||||
t.Fatal(result.Error)
|
||||
}
|
||||
log.Debug("Test terminated successfully")
|
||||
}
|
||||
|
||||
func sendRunSignal(t *testing.T) {
|
||||
//We need to first wait for the sim HTTP server to start running...
|
||||
time.Sleep(2 * time.Second)
|
||||
//then we can send the signal
|
||||
|
||||
log.Debug("Sending run signal to simulation: POST /runsim...")
|
||||
resp, err := http.Post(fmt.Sprintf("http://localhost%s/runsim", DefaultHTTPSimAddr), "application/json", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
log.Debug("Signal sent")
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("err %s", resp.Status)
|
||||
}
|
||||
}
|
96
swarm/network/simulation/kademlia.go
Normal file
96
swarm/network/simulation/kademlia.go
Normal file
@ -0,0 +1,96 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
)
|
||||
|
||||
// BucketKeyKademlia is the key to be used for storing the kademlia
|
||||
// instance for particuar node, usually inside the ServiceFunc function.
|
||||
var BucketKeyKademlia BucketKey = "kademlia"
|
||||
|
||||
// WaitTillHealthy is blocking until the health of all kademlias is true.
|
||||
// If error is not nil, a map of kademlia that was found not healthy is returned.
|
||||
func (s *Simulation) WaitTillHealthy(ctx context.Context, kadMinProxSize int) (ill map[discover.NodeID]*network.Kademlia, err error) {
|
||||
// Prepare PeerPot map for checking Kademlia health
|
||||
var ppmap map[string]*network.PeerPot
|
||||
kademlias := s.kademlias()
|
||||
addrs := make([][]byte, 0, len(kademlias))
|
||||
for _, k := range kademlias {
|
||||
addrs = append(addrs, k.BaseAddr())
|
||||
}
|
||||
ppmap = network.NewPeerPotMap(kadMinProxSize, addrs)
|
||||
|
||||
// Wait for healthy Kademlia on every node before checking files
|
||||
ticker := time.NewTicker(200 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
ill = make(map[discover.NodeID]*network.Kademlia)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ill, ctx.Err()
|
||||
case <-ticker.C:
|
||||
for k := range ill {
|
||||
delete(ill, k)
|
||||
}
|
||||
log.Debug("kademlia health check", "addr count", len(addrs))
|
||||
for id, k := range kademlias {
|
||||
//PeerPot for this node
|
||||
addr := common.Bytes2Hex(k.BaseAddr())
|
||||
pp := ppmap[addr]
|
||||
//call Healthy RPC
|
||||
h := k.Healthy(pp)
|
||||
//print info
|
||||
log.Debug(k.String())
|
||||
log.Debug("kademlia", "empty bins", pp.EmptyBins, "gotNN", h.GotNN, "knowNN", h.KnowNN, "full", h.Full)
|
||||
log.Debug("kademlia", "health", h.GotNN && h.KnowNN && h.Full, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
|
||||
log.Debug("kademlia", "ill condition", !h.GotNN || !h.Full, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
|
||||
if !h.GotNN || !h.Full {
|
||||
ill[id] = k
|
||||
}
|
||||
}
|
||||
if len(ill) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// kademlias returns all Kademlia instances that are set
|
||||
// in simulation bucket.
|
||||
func (s *Simulation) kademlias() (ks map[discover.NodeID]*network.Kademlia) {
|
||||
items := s.UpNodesItems(BucketKeyKademlia)
|
||||
ks = make(map[discover.NodeID]*network.Kademlia, len(items))
|
||||
for id, v := range items {
|
||||
k, ok := v.(*network.Kademlia)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
ks[id] = k
|
||||
}
|
||||
return ks
|
||||
}
|
67
swarm/network/simulation/kademlia_test.go
Normal file
67
swarm/network/simulation/kademlia_test.go
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
)
|
||||
|
||||
func TestWaitTillHealthy(t *testing.T) {
|
||||
sim := New(map[string]ServiceFunc{
|
||||
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
addr := network.NewAddrFromNodeID(ctx.Config.ID)
|
||||
hp := network.NewHiveParams()
|
||||
hp.Discovery = false
|
||||
config := &network.BzzConfig{
|
||||
OverlayAddr: addr.Over(),
|
||||
UnderlayAddr: addr.Under(),
|
||||
HiveParams: hp,
|
||||
}
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
// store kademlia in node's bucket under BucketKeyKademlia
|
||||
// so that it can be found by WaitTillHealthy method.
|
||||
b.Store(BucketKeyKademlia, kad)
|
||||
return network.NewBzz(config, kad, nil, nil, nil), nil, nil
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodesAndConnectRing(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
ill, err := sim.WaitTillHealthy(ctx, 2)
|
||||
if err != nil {
|
||||
for id, kad := range ill {
|
||||
t.Log("Node", id)
|
||||
t.Log(kad.String())
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
357
swarm/network/simulation/node.go
Normal file
357
swarm/network/simulation/node.go
Normal file
@ -0,0 +1,357 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
)
|
||||
|
||||
// NodeIDs returns NodeIDs for all nodes in the network.
|
||||
func (s *Simulation) NodeIDs() (ids []discover.NodeID) {
|
||||
nodes := s.Net.GetNodes()
|
||||
ids = make([]discover.NodeID, len(nodes))
|
||||
for i, node := range nodes {
|
||||
ids[i] = node.ID()
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// UpNodeIDs returns NodeIDs for nodes that are up in the network.
|
||||
func (s *Simulation) UpNodeIDs() (ids []discover.NodeID) {
|
||||
nodes := s.Net.GetNodes()
|
||||
for _, node := range nodes {
|
||||
if node.Up {
|
||||
ids = append(ids, node.ID())
|
||||
}
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// DownNodeIDs returns NodeIDs for nodes that are stopped in the network.
|
||||
func (s *Simulation) DownNodeIDs() (ids []discover.NodeID) {
|
||||
nodes := s.Net.GetNodes()
|
||||
for _, node := range nodes {
|
||||
if !node.Up {
|
||||
ids = append(ids, node.ID())
|
||||
}
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// AddNodeOption defines the option that can be passed
|
||||
// to Simulation.AddNode method.
|
||||
type AddNodeOption func(*adapters.NodeConfig)
|
||||
|
||||
// AddNodeWithMsgEvents sets the EnableMsgEvents option
|
||||
// to NodeConfig.
|
||||
func AddNodeWithMsgEvents(enable bool) AddNodeOption {
|
||||
return func(o *adapters.NodeConfig) {
|
||||
o.EnableMsgEvents = enable
|
||||
}
|
||||
}
|
||||
|
||||
// AddNodeWithService specifies a service that should be
|
||||
// started on a node. This option can be repeated as variadic
|
||||
// argument toe AddNode and other add node related methods.
|
||||
// If AddNodeWithService is not specified, all services will be started.
|
||||
func AddNodeWithService(serviceName string) AddNodeOption {
|
||||
return func(o *adapters.NodeConfig) {
|
||||
o.Services = append(o.Services, serviceName)
|
||||
}
|
||||
}
|
||||
|
||||
// AddNode creates a new node with random configuration,
|
||||
// applies provided options to the config and adds the node to network.
|
||||
// By default all services will be started on a node. If one or more
|
||||
// AddNodeWithService option are provided, only specified services will be started.
|
||||
func (s *Simulation) AddNode(opts ...AddNodeOption) (id discover.NodeID, err error) {
|
||||
conf := adapters.RandomNodeConfig()
|
||||
for _, o := range opts {
|
||||
o(conf)
|
||||
}
|
||||
if len(conf.Services) == 0 {
|
||||
conf.Services = s.serviceNames
|
||||
}
|
||||
node, err := s.Net.NewNodeWithConfig(conf)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
return node.ID(), s.Net.Start(node.ID())
|
||||
}
|
||||
|
||||
// AddNodes creates new nodes with random configurations,
|
||||
// applies provided options to the config and adds nodes to network.
|
||||
func (s *Simulation) AddNodes(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) {
|
||||
ids = make([]discover.NodeID, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
id, err := s.AddNode(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids = append(ids, id)
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// AddNodesAndConnectFull is a helpper method that combines
|
||||
// AddNodes and ConnectNodesFull. Only new nodes will be connected.
|
||||
func (s *Simulation) AddNodesAndConnectFull(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) {
|
||||
if count < 2 {
|
||||
return nil, errors.New("count of nodes must be at least 2")
|
||||
}
|
||||
ids, err = s.AddNodes(count, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = s.ConnectNodesFull(ids)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// AddNodesAndConnectChain is a helpper method that combines
|
||||
// AddNodes and ConnectNodesChain. The chain will be continued from the last
|
||||
// added node, if there is one in simulation using ConnectToLastNode method.
|
||||
func (s *Simulation) AddNodesAndConnectChain(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) {
|
||||
if count < 2 {
|
||||
return nil, errors.New("count of nodes must be at least 2")
|
||||
}
|
||||
id, err := s.AddNode(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = s.ConnectToLastNode(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids, err = s.AddNodes(count-1, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids = append([]discover.NodeID{id}, ids...)
|
||||
err = s.ConnectNodesChain(ids)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// AddNodesAndConnectRing is a helpper method that combines
|
||||
// AddNodes and ConnectNodesRing.
|
||||
func (s *Simulation) AddNodesAndConnectRing(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) {
|
||||
if count < 2 {
|
||||
return nil, errors.New("count of nodes must be at least 2")
|
||||
}
|
||||
ids, err = s.AddNodes(count, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = s.ConnectNodesRing(ids)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// AddNodesAndConnectStar is a helpper method that combines
|
||||
// AddNodes and ConnectNodesStar.
|
||||
func (s *Simulation) AddNodesAndConnectStar(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) {
|
||||
if count < 2 {
|
||||
return nil, errors.New("count of nodes must be at least 2")
|
||||
}
|
||||
ids, err = s.AddNodes(count, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = s.ConnectNodesStar(ids[0], ids[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
//Upload a snapshot
|
||||
//This method tries to open the json file provided, applies the config to all nodes
|
||||
//and then loads the snapshot into the Simulation network
|
||||
func (s *Simulation) UploadSnapshot(snapshotFile string, opts ...AddNodeOption) error {
|
||||
f, err := os.Open(snapshotFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
jsonbyte, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var snap simulations.Snapshot
|
||||
err = json.Unmarshal(jsonbyte, &snap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//the snapshot probably has the property EnableMsgEvents not set
|
||||
//just in case, set it to true!
|
||||
//(we need this to wait for messages before uploading)
|
||||
for _, n := range snap.Nodes {
|
||||
n.Node.Config.EnableMsgEvents = true
|
||||
n.Node.Config.Services = s.serviceNames
|
||||
for _, o := range opts {
|
||||
o(n.Node.Config)
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Waiting for p2p connections to be established...")
|
||||
|
||||
//now we can load the snapshot
|
||||
err = s.Net.Load(&snap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("Snapshot loaded")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPivotNode sets the NodeID of the network's pivot node.
|
||||
// Pivot node is just a specific node that should be treated
|
||||
// differently then other nodes in test. SetPivotNode and
|
||||
// PivotNodeID are just a convenient functions to set and
|
||||
// retrieve it.
|
||||
func (s *Simulation) SetPivotNode(id discover.NodeID) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.pivotNodeID = &id
|
||||
}
|
||||
|
||||
// PivotNodeID returns NodeID of the pivot node set by
|
||||
// Simulation.SetPivotNode method.
|
||||
func (s *Simulation) PivotNodeID() (id *discover.NodeID) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.pivotNodeID
|
||||
}
|
||||
|
||||
// StartNode starts a node by NodeID.
|
||||
func (s *Simulation) StartNode(id discover.NodeID) (err error) {
|
||||
return s.Net.Start(id)
|
||||
}
|
||||
|
||||
// StartRandomNode starts a random node.
|
||||
func (s *Simulation) StartRandomNode() (id discover.NodeID, err error) {
|
||||
n := s.randomDownNode()
|
||||
if n == nil {
|
||||
return id, ErrNodeNotFound
|
||||
}
|
||||
return n.ID, s.Net.Start(n.ID)
|
||||
}
|
||||
|
||||
// StartRandomNodes starts random nodes.
|
||||
func (s *Simulation) StartRandomNodes(count int) (ids []discover.NodeID, err error) {
|
||||
ids = make([]discover.NodeID, 0, count)
|
||||
downIDs := s.DownNodeIDs()
|
||||
for i := 0; i < count; i++ {
|
||||
n := s.randomNode(downIDs, ids...)
|
||||
if n == nil {
|
||||
return nil, ErrNodeNotFound
|
||||
}
|
||||
err = s.Net.Start(n.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids = append(ids, n.ID)
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// StopNode stops a node by NodeID.
|
||||
func (s *Simulation) StopNode(id discover.NodeID) (err error) {
|
||||
return s.Net.Stop(id)
|
||||
}
|
||||
|
||||
// StopRandomNode stops a random node.
|
||||
func (s *Simulation) StopRandomNode() (id discover.NodeID, err error) {
|
||||
n := s.randomUpNode()
|
||||
if n == nil {
|
||||
return id, ErrNodeNotFound
|
||||
}
|
||||
return n.ID, s.Net.Stop(n.ID)
|
||||
}
|
||||
|
||||
// StopRandomNodes stops random nodes.
|
||||
func (s *Simulation) StopRandomNodes(count int) (ids []discover.NodeID, err error) {
|
||||
ids = make([]discover.NodeID, 0, count)
|
||||
upIDs := s.UpNodeIDs()
|
||||
for i := 0; i < count; i++ {
|
||||
n := s.randomNode(upIDs, ids...)
|
||||
if n == nil {
|
||||
return nil, ErrNodeNotFound
|
||||
}
|
||||
err = s.Net.Stop(n.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids = append(ids, n.ID)
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// seed the random generator for Simulation.randomNode.
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// randomUpNode returns a random SimNode that is up.
|
||||
// Arguments are NodeIDs for nodes that should not be returned.
|
||||
func (s *Simulation) randomUpNode(exclude ...discover.NodeID) *adapters.SimNode {
|
||||
return s.randomNode(s.UpNodeIDs(), exclude...)
|
||||
}
|
||||
|
||||
// randomUpNode returns a random SimNode that is not up.
|
||||
func (s *Simulation) randomDownNode(exclude ...discover.NodeID) *adapters.SimNode {
|
||||
return s.randomNode(s.DownNodeIDs(), exclude...)
|
||||
}
|
||||
|
||||
// randomUpNode returns a random SimNode from the slice of NodeIDs.
|
||||
func (s *Simulation) randomNode(ids []discover.NodeID, exclude ...discover.NodeID) *adapters.SimNode {
|
||||
for _, e := range exclude {
|
||||
var i int
|
||||
for _, id := range ids {
|
||||
if id == e {
|
||||
ids = append(ids[:i], ids[i+1:]...)
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
l := len(ids)
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
n := s.Net.GetNode(ids[rand.Intn(l)])
|
||||
node, _ := n.Node.(*adapters.SimNode)
|
||||
return node
|
||||
}
|
462
swarm/network/simulation/node_test.go
Normal file
462
swarm/network/simulation/node_test.go
Normal file
@ -0,0 +1,462 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
)
|
||||
|
||||
func TestUpDownNodeIDs(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
ids, err := sim.AddNodes(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gotIDs := sim.NodeIDs()
|
||||
|
||||
if !equalNodeIDs(ids, gotIDs) {
|
||||
t.Error("returned nodes are not equal to added ones")
|
||||
}
|
||||
|
||||
stoppedIDs, err := sim.StopRandomNodes(3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gotIDs = sim.UpNodeIDs()
|
||||
|
||||
for _, id := range gotIDs {
|
||||
if !sim.Net.GetNode(id).Up {
|
||||
t.Errorf("node %s should not be down", id)
|
||||
}
|
||||
}
|
||||
|
||||
if !equalNodeIDs(ids, append(gotIDs, stoppedIDs...)) {
|
||||
t.Error("returned nodes are not equal to added ones")
|
||||
}
|
||||
|
||||
gotIDs = sim.DownNodeIDs()
|
||||
|
||||
for _, id := range gotIDs {
|
||||
if sim.Net.GetNode(id).Up {
|
||||
t.Errorf("node %s should not be up", id)
|
||||
}
|
||||
}
|
||||
|
||||
if !equalNodeIDs(stoppedIDs, gotIDs) {
|
||||
t.Error("returned nodes are not equal to the stopped ones")
|
||||
}
|
||||
}
|
||||
|
||||
func equalNodeIDs(one, other []discover.NodeID) bool {
|
||||
if len(one) != len(other) {
|
||||
return false
|
||||
}
|
||||
var count int
|
||||
for _, a := range one {
|
||||
var found bool
|
||||
for _, b := range other {
|
||||
if a == b {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
count++
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return count == len(one)
|
||||
}
|
||||
|
||||
func TestAddNode(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
id, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n := sim.Net.GetNode(id)
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
|
||||
if !n.Up {
|
||||
t.Error("node not started")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddNodeWithMsgEvents(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
id, err := sim.AddNode(AddNodeWithMsgEvents(true))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !sim.Net.GetNode(id).Config.EnableMsgEvents {
|
||||
t.Error("EnableMsgEvents is false")
|
||||
}
|
||||
|
||||
id, err = sim.AddNode(AddNodeWithMsgEvents(false))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if sim.Net.GetNode(id).Config.EnableMsgEvents {
|
||||
t.Error("EnableMsgEvents is true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddNodeWithService(t *testing.T) {
|
||||
sim := New(map[string]ServiceFunc{
|
||||
"noop1": noopServiceFunc,
|
||||
"noop2": noopServiceFunc,
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
id, err := sim.AddNode(AddNodeWithService("noop1"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n := sim.Net.GetNode(id).Node.(*adapters.SimNode)
|
||||
if n.Service("noop1") == nil {
|
||||
t.Error("service noop1 not found on node")
|
||||
}
|
||||
if n.Service("noop2") != nil {
|
||||
t.Error("service noop2 should not be found on node")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddNodes(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
nodesCount := 12
|
||||
|
||||
ids, err := sim.AddNodes(nodesCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
count := len(ids)
|
||||
if count != nodesCount {
|
||||
t.Errorf("expected %v nodes, got %v", nodesCount, count)
|
||||
}
|
||||
|
||||
count = len(sim.Net.GetNodes())
|
||||
if count != nodesCount {
|
||||
t.Errorf("expected %v nodes, got %v", nodesCount, count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddNodesAndConnectFull(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
n := 12
|
||||
|
||||
ids, err := sim.AddNodesAndConnectFull(n)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testFull(t, sim, ids)
|
||||
}
|
||||
|
||||
func TestAddNodesAndConnectChain(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodesAndConnectChain(12)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// add another set of nodes to test
|
||||
// if two chains are connected
|
||||
_, err = sim.AddNodesAndConnectChain(7)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testChain(t, sim, sim.UpNodeIDs())
|
||||
}
|
||||
|
||||
func TestAddNodesAndConnectRing(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
ids, err := sim.AddNodesAndConnectRing(12)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testRing(t, sim, ids)
|
||||
}
|
||||
|
||||
func TestAddNodesAndConnectStar(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
ids, err := sim.AddNodesAndConnectStar(12)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testStar(t, sim, ids, 0)
|
||||
}
|
||||
|
||||
//To test that uploading a snapshot works
|
||||
func TestUploadSnapshot(t *testing.T) {
|
||||
log.Debug("Creating simulation")
|
||||
s := New(map[string]ServiceFunc{
|
||||
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
addr := network.NewAddrFromNodeID(ctx.Config.ID)
|
||||
hp := network.NewHiveParams()
|
||||
hp.Discovery = false
|
||||
config := &network.BzzConfig{
|
||||
OverlayAddr: addr.Over(),
|
||||
UnderlayAddr: addr.Under(),
|
||||
HiveParams: hp,
|
||||
}
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
return network.NewBzz(config, kad, nil, nil, nil), nil, nil
|
||||
},
|
||||
})
|
||||
defer s.Close()
|
||||
|
||||
nodeCount := 16
|
||||
log.Debug("Uploading snapshot")
|
||||
err := s.UploadSnapshot(fmt.Sprintf("../stream/testing/snapshot_%d.json", nodeCount))
|
||||
if err != nil {
|
||||
t.Fatalf("Error uploading snapshot to simulation network: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
log.Debug("Starting simulation...")
|
||||
s.Run(ctx, func(ctx context.Context, sim *Simulation) error {
|
||||
log.Debug("Checking")
|
||||
nodes := sim.UpNodeIDs()
|
||||
if len(nodes) != nodeCount {
|
||||
t.Fatal("Simulation network node number doesn't match snapshot node number")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
log.Debug("Done.")
|
||||
}
|
||||
|
||||
func TestPivotNode(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
id, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id2, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if sim.PivotNodeID() != nil {
|
||||
t.Error("expected no pivot node")
|
||||
}
|
||||
|
||||
sim.SetPivotNode(id)
|
||||
|
||||
pid := sim.PivotNodeID()
|
||||
|
||||
if pid == nil {
|
||||
t.Error("pivot node not set")
|
||||
} else if *pid != id {
|
||||
t.Errorf("expected pivot node %s, got %s", id, *pid)
|
||||
}
|
||||
|
||||
sim.SetPivotNode(id2)
|
||||
|
||||
pid = sim.PivotNodeID()
|
||||
|
||||
if pid == nil {
|
||||
t.Error("pivot node not set")
|
||||
} else if *pid != id2 {
|
||||
t.Errorf("expected pivot node %s, got %s", id2, *pid)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartStopNode(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
id, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n := sim.Net.GetNode(id)
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if !n.Up {
|
||||
t.Error("node not started")
|
||||
}
|
||||
|
||||
err = sim.StopNode(id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n.Up {
|
||||
t.Error("node not stopped")
|
||||
}
|
||||
|
||||
// Sleep here to ensure that Network.watchPeerEvents defer function
|
||||
// has set the `node.Up = false` before we start the node again.
|
||||
// p2p/simulations/network.go:215
|
||||
//
|
||||
// The same node is stopped and started again, and upon start
|
||||
// watchPeerEvents is started in a goroutine. If the node is stopped
|
||||
// and then very quickly started, that goroutine may be scheduled later
|
||||
// then start and force `node.Up = false` in its defer function.
|
||||
// This will make this test unreliable.
|
||||
time.Sleep(time.Second)
|
||||
|
||||
err = sim.StartNode(id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !n.Up {
|
||||
t.Error("node not started")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartStopRandomNode(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodes(3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, err := sim.StopRandomNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n := sim.Net.GetNode(id)
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if n.Up {
|
||||
t.Error("node not stopped")
|
||||
}
|
||||
|
||||
id2, err := sim.StopRandomNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Sleep here to ensure that Network.watchPeerEvents defer function
|
||||
// has set the `node.Up = false` before we start the node again.
|
||||
// p2p/simulations/network.go:215
|
||||
//
|
||||
// The same node is stopped and started again, and upon start
|
||||
// watchPeerEvents is started in a goroutine. If the node is stopped
|
||||
// and then very quickly started, that goroutine may be scheduled later
|
||||
// then start and force `node.Up = false` in its defer function.
|
||||
// This will make this test unreliable.
|
||||
time.Sleep(time.Second)
|
||||
|
||||
idStarted, err := sim.StartRandomNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if idStarted != id && idStarted != id2 {
|
||||
t.Error("unexpected started node ID")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartStopRandomNodes(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodes(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ids, err := sim.StopRandomNodes(3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
n := sim.Net.GetNode(id)
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if n.Up {
|
||||
t.Error("node not stopped")
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep here to ensure that Network.watchPeerEvents defer function
|
||||
// has set the `node.Up = false` before we start the node again.
|
||||
// p2p/simulations/network.go:215
|
||||
//
|
||||
// The same node is stopped and started again, and upon start
|
||||
// watchPeerEvents is started in a goroutine. If the node is stopped
|
||||
// and then very quickly started, that goroutine may be scheduled later
|
||||
// then start and force `node.Up = false` in its defer function.
|
||||
// This will make this test unreliable.
|
||||
time.Sleep(time.Second)
|
||||
|
||||
ids, err = sim.StartRandomNodes(2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
n := sim.Net.GetNode(id)
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if !n.Up {
|
||||
t.Error("node not started")
|
||||
}
|
||||
}
|
||||
}
|
65
swarm/network/simulation/service.go
Normal file
65
swarm/network/simulation/service.go
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
)
|
||||
|
||||
// Service returns a single Service by name on a particular node
|
||||
// with provided id.
|
||||
func (s *Simulation) Service(name string, id discover.NodeID) node.Service {
|
||||
simNode, ok := s.Net.GetNode(id).Node.(*adapters.SimNode)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
services := simNode.ServiceMap()
|
||||
if len(services) == 0 {
|
||||
return nil
|
||||
}
|
||||
return services[name]
|
||||
}
|
||||
|
||||
// RandomService returns a single Service by name on a
|
||||
// randomly chosen node that is up.
|
||||
func (s *Simulation) RandomService(name string) node.Service {
|
||||
n := s.randomUpNode()
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
return n.Service(name)
|
||||
}
|
||||
|
||||
// Services returns all services with a provided name
|
||||
// from nodes that are up.
|
||||
func (s *Simulation) Services(name string) (services map[discover.NodeID]node.Service) {
|
||||
nodes := s.Net.GetNodes()
|
||||
services = make(map[discover.NodeID]node.Service)
|
||||
for _, node := range nodes {
|
||||
if !node.Up {
|
||||
continue
|
||||
}
|
||||
simNode, ok := node.Node.(*adapters.SimNode)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
services[node.ID()] = simNode.Service(name)
|
||||
}
|
||||
return services
|
||||
}
|
46
swarm/network/simulation/service_test.go
Normal file
46
swarm/network/simulation/service_test.go
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestService(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
id, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, ok := sim.Service("noop", id).(*noopService)
|
||||
if !ok {
|
||||
t.Fatalf("service is not of %T type", &noopService{})
|
||||
}
|
||||
|
||||
_, ok = sim.RandomService("noop").(*noopService)
|
||||
if !ok {
|
||||
t.Fatalf("service is not of %T type", &noopService{})
|
||||
}
|
||||
|
||||
_, ok = sim.Services("noop")[id].(*noopService)
|
||||
if !ok {
|
||||
t.Fatalf("service is not of %T type", &noopService{})
|
||||
}
|
||||
}
|
201
swarm/network/simulation/simulation.go
Normal file
201
swarm/network/simulation/simulation.go
Normal file
@ -0,0 +1,201 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
)
|
||||
|
||||
// Common errors that are returned by functions in this package.
|
||||
var (
|
||||
ErrNodeNotFound = errors.New("node not found")
|
||||
ErrNoPivotNode = errors.New("no pivot node set")
|
||||
)
|
||||
|
||||
// Simulation provides methods on network, nodes and services
|
||||
// to manage them.
|
||||
type Simulation struct {
|
||||
// Net is exposed as a way to access lower level functionalities
|
||||
// of p2p/simulations.Network.
|
||||
Net *simulations.Network
|
||||
|
||||
serviceNames []string
|
||||
cleanupFuncs []func()
|
||||
buckets map[discover.NodeID]*sync.Map
|
||||
pivotNodeID *discover.NodeID
|
||||
shutdownWG sync.WaitGroup
|
||||
done chan struct{}
|
||||
mu sync.RWMutex
|
||||
|
||||
httpSrv *http.Server //attach a HTTP server via SimulationOptions
|
||||
handler *simulations.Server //HTTP handler for the server
|
||||
runC chan struct{} //channel where frontend signals it is ready
|
||||
}
|
||||
|
||||
// ServiceFunc is used in New to declare new service constructor.
|
||||
// The first argument provides ServiceContext from the adapters package
|
||||
// giving for example the access to NodeID. Second argument is the sync.Map
|
||||
// where all "global" state related to the service should be kept.
|
||||
// All cleanups needed for constructed service and any other constructed
|
||||
// objects should ne provided in a single returned cleanup function.
|
||||
type ServiceFunc func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error)
|
||||
|
||||
// New creates a new Simulation instance with new
|
||||
// simulations.Network initialized with provided services.
|
||||
func New(services map[string]ServiceFunc) (s *Simulation) {
|
||||
s = &Simulation{
|
||||
buckets: make(map[discover.NodeID]*sync.Map),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
adapterServices := make(map[string]adapters.ServiceFunc, len(services))
|
||||
for name, serviceFunc := range services {
|
||||
s.serviceNames = append(s.serviceNames, name)
|
||||
adapterServices[name] = func(ctx *adapters.ServiceContext) (node.Service, error) {
|
||||
b := new(sync.Map)
|
||||
service, cleanup, err := serviceFunc(ctx, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if cleanup != nil {
|
||||
s.cleanupFuncs = append(s.cleanupFuncs, cleanup)
|
||||
}
|
||||
s.buckets[ctx.Config.ID] = b
|
||||
return service, nil
|
||||
}
|
||||
}
|
||||
|
||||
s.Net = simulations.NewNetwork(
|
||||
adapters.NewSimAdapter(adapterServices),
|
||||
&simulations.NetworkConfig{ID: "0"},
|
||||
)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// RunFunc is the function that will be called
|
||||
// on Simulation.Run method call.
|
||||
type RunFunc func(context.Context, *Simulation) error
|
||||
|
||||
// Result is the returned value of Simulation.Run method.
|
||||
type Result struct {
|
||||
Duration time.Duration
|
||||
Error error
|
||||
}
|
||||
|
||||
// Run calls the RunFunc function while taking care of
|
||||
// cancelation provided through the Context.
|
||||
func (s *Simulation) Run(ctx context.Context, f RunFunc) (r Result) {
|
||||
//if the option is set to run a HTTP server with the simulation,
|
||||
//init the server and start it
|
||||
start := time.Now()
|
||||
if s.httpSrv != nil {
|
||||
log.Info("Waiting for frontend to be ready...(send POST /runsim to HTTP server)")
|
||||
//wait for the frontend to connect
|
||||
select {
|
||||
case <-s.runC:
|
||||
case <-ctx.Done():
|
||||
return Result{
|
||||
Duration: time.Since(start),
|
||||
Error: ctx.Err(),
|
||||
}
|
||||
}
|
||||
log.Info("Received signal from frontend - starting simulation run.")
|
||||
}
|
||||
errc := make(chan error)
|
||||
quit := make(chan struct{})
|
||||
defer close(quit)
|
||||
go func() {
|
||||
select {
|
||||
case errc <- f(ctx, s):
|
||||
case <-quit:
|
||||
}
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
case err = <-errc:
|
||||
}
|
||||
return Result{
|
||||
Duration: time.Since(start),
|
||||
Error: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Maximal number of parallel calls to cleanup functions on
|
||||
// Simulation.Close.
|
||||
var maxParallelCleanups = 10
|
||||
|
||||
// Close calls all cleanup functions that are returned by
|
||||
// ServiceFunc, waits for all of them to finish and other
|
||||
// functions that explicitly block shutdownWG
|
||||
// (like Simulation.PeerEvents) and shuts down the network
|
||||
// at the end. It is used to clean all resources from the
|
||||
// simulation.
|
||||
func (s *Simulation) Close() {
|
||||
close(s.done)
|
||||
sem := make(chan struct{}, maxParallelCleanups)
|
||||
s.mu.RLock()
|
||||
cleanupFuncs := make([]func(), len(s.cleanupFuncs))
|
||||
for i, f := range s.cleanupFuncs {
|
||||
if f != nil {
|
||||
cleanupFuncs[i] = f
|
||||
}
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
for _, cleanup := range cleanupFuncs {
|
||||
s.shutdownWG.Add(1)
|
||||
sem <- struct{}{}
|
||||
go func(cleanup func()) {
|
||||
defer s.shutdownWG.Done()
|
||||
defer func() { <-sem }()
|
||||
|
||||
cleanup()
|
||||
}(cleanup)
|
||||
}
|
||||
if s.httpSrv != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
err := s.httpSrv.Shutdown(ctx)
|
||||
if err != nil {
|
||||
log.Error("Error shutting down HTTP server!", "err", err)
|
||||
}
|
||||
close(s.runC)
|
||||
}
|
||||
s.shutdownWG.Wait()
|
||||
s.Net.Shutdown()
|
||||
}
|
||||
|
||||
// Done returns a channel that is closed when the simulation
|
||||
// is closed by Close method. It is useful for signaling termination
|
||||
// of all possible goroutines that are created within the test.
|
||||
func (s *Simulation) Done() <-chan struct{} {
|
||||
return s.done
|
||||
}
|
207
swarm/network/simulation/simulation_test.go
Normal file
207
swarm/network/simulation/simulation_test.go
Normal file
@ -0,0 +1,207 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
var (
|
||||
loglevel = flag.Int("loglevel", 2, "verbosity of logs")
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.Parse()
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
// TestRun tests if Run method calls RunFunc and if it handles context properly.
|
||||
func TestRun(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
t.Run("call", func(t *testing.T) {
|
||||
expect := "something"
|
||||
var got string
|
||||
r := sim.Run(context.Background(), func(ctx context.Context, sim *Simulation) error {
|
||||
got = expect
|
||||
return nil
|
||||
})
|
||||
|
||||
if r.Error != nil {
|
||||
t.Errorf("unexpected error: %v", r.Error)
|
||||
}
|
||||
if got != expect {
|
||||
t.Errorf("expected %q, got %q", expect, got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("cancelation", func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
r := sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
return nil
|
||||
})
|
||||
|
||||
if r.Error != context.DeadlineExceeded {
|
||||
t.Errorf("unexpected error: %v", r.Error)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("context value and duration", func(t *testing.T) {
|
||||
ctx := context.WithValue(context.Background(), "hey", "there")
|
||||
sleep := 50 * time.Millisecond
|
||||
|
||||
r := sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
|
||||
if ctx.Value("hey") != "there" {
|
||||
return errors.New("expected context value not passed")
|
||||
}
|
||||
time.Sleep(sleep)
|
||||
return nil
|
||||
})
|
||||
|
||||
if r.Error != nil {
|
||||
t.Errorf("unexpected error: %v", r.Error)
|
||||
}
|
||||
if r.Duration < sleep {
|
||||
t.Errorf("reported run duration less then expected: %s", r.Duration)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestClose tests are Close method triggers all close functions and are all nodes not up anymore.
|
||||
func TestClose(t *testing.T) {
|
||||
var mu sync.Mutex
|
||||
var cleanupCount int
|
||||
|
||||
sleep := 50 * time.Millisecond
|
||||
|
||||
sim := New(map[string]ServiceFunc{
|
||||
"noop": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
return newNoopService(), func() {
|
||||
time.Sleep(sleep)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
cleanupCount++
|
||||
}, nil
|
||||
},
|
||||
})
|
||||
|
||||
nodeCount := 30
|
||||
|
||||
_, err := sim.AddNodes(nodeCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var upNodeCount int
|
||||
for _, n := range sim.Net.GetNodes() {
|
||||
if n.Up {
|
||||
upNodeCount++
|
||||
}
|
||||
}
|
||||
if upNodeCount != nodeCount {
|
||||
t.Errorf("all nodes should be up, insted only %v are up", upNodeCount)
|
||||
}
|
||||
|
||||
sim.Close()
|
||||
|
||||
if cleanupCount != nodeCount {
|
||||
t.Errorf("number of cleanups expected %v, got %v", nodeCount, cleanupCount)
|
||||
}
|
||||
|
||||
upNodeCount = 0
|
||||
for _, n := range sim.Net.GetNodes() {
|
||||
if n.Up {
|
||||
upNodeCount++
|
||||
}
|
||||
}
|
||||
if upNodeCount != 0 {
|
||||
t.Errorf("all nodes should be down, insted %v are up", upNodeCount)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDone checks if Close method triggers the closing of done channel.
|
||||
func TestDone(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
sleep := 50 * time.Millisecond
|
||||
timeout := 2 * time.Second
|
||||
|
||||
start := time.Now()
|
||||
go func() {
|
||||
time.Sleep(sleep)
|
||||
sim.Close()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
t.Error("done channel closing timmed out")
|
||||
case <-sim.Done():
|
||||
if d := time.Since(start); d < sleep {
|
||||
t.Errorf("done channel closed sooner then expected: %s", d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// a helper map for usual services that do not do anyting
|
||||
var noopServiceFuncMap = map[string]ServiceFunc{
|
||||
"noop": noopServiceFunc,
|
||||
}
|
||||
|
||||
// a helper function for most basic noop service
|
||||
func noopServiceFunc(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
return newNoopService(), nil, nil
|
||||
}
|
||||
|
||||
// noopService is the service that does not do anything
|
||||
// but implements node.Service interface.
|
||||
type noopService struct{}
|
||||
|
||||
func newNoopService() node.Service {
|
||||
return &noopService{}
|
||||
}
|
||||
|
||||
func (t *noopService) Protocols() []p2p.Protocol {
|
||||
return []p2p.Protocol{}
|
||||
}
|
||||
|
||||
func (t *noopService) APIs() []rpc.API {
|
||||
return []rpc.API{}
|
||||
}
|
||||
|
||||
func (t *noopService) Start(server *p2p.Server) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *noopService) Stop() error {
|
||||
return nil
|
||||
}
|
@ -28,15 +28,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
@ -261,16 +260,46 @@ type testSwarmNetworkOptions struct {
|
||||
// - May wait for Kademlia on every node to be healthy.
|
||||
// - Checking if a file is retrievable from all nodes.
|
||||
func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwarmNetworkStep) {
|
||||
dir, err := ioutil.TempDir("", "swarm-network-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
if o == nil {
|
||||
o = new(testSwarmNetworkOptions)
|
||||
}
|
||||
|
||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||
"swarm": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
||||
config := api.NewConfig()
|
||||
|
||||
dir, err := ioutil.TempDir("", "swarm-network-test-node")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cleanup = func() {
|
||||
err := os.RemoveAll(dir)
|
||||
if err != nil {
|
||||
log.Error("cleaning up swarm temp dir", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
config.Path = dir
|
||||
|
||||
privkey, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
return nil, cleanup, err
|
||||
}
|
||||
|
||||
config.Init(privkey)
|
||||
config.DeliverySkipCheck = o.SkipCheck
|
||||
|
||||
swarm, err := NewSwarm(config, nil)
|
||||
if err != nil {
|
||||
return nil, cleanup, err
|
||||
}
|
||||
bucket.Store(simulation.BucketKeyKademlia, swarm.bzz.Hive.Overlay.(*network.Kademlia))
|
||||
log.Info("new swarm", "bzzKey", config.BzzKey, "baseAddr", fmt.Sprintf("%x", swarm.bzz.BaseAddr()))
|
||||
return swarm, cleanup, nil
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
if o.Timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
@ -278,61 +307,20 @@ func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwa
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
swarms := make(map[discover.NodeID]*Swarm)
|
||||
files := make([]file, 0)
|
||||
|
||||
services := map[string]adapters.ServiceFunc{
|
||||
"swarm": func(ctx *adapters.ServiceContext) (node.Service, error) {
|
||||
config := api.NewConfig()
|
||||
|
||||
dir, err := ioutil.TempDir(dir, "node")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.Path = dir
|
||||
|
||||
privkey, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.Init(privkey)
|
||||
config.DeliverySkipCheck = o.SkipCheck
|
||||
|
||||
s, err := NewSwarm(config, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Info("new swarm", "bzzKey", config.BzzKey, "baseAddr", fmt.Sprintf("%x", s.bzz.BaseAddr()))
|
||||
swarms[ctx.Config.ID] = s
|
||||
return s, nil
|
||||
},
|
||||
}
|
||||
|
||||
a := adapters.NewSimAdapter(services)
|
||||
net := simulations.NewNetwork(a, &simulations.NetworkConfig{
|
||||
ID: "0",
|
||||
DefaultService: "swarm",
|
||||
})
|
||||
defer net.Shutdown()
|
||||
|
||||
trigger := make(chan discover.NodeID)
|
||||
|
||||
sim := simulations.NewSimulation(net)
|
||||
|
||||
for i, step := range steps {
|
||||
log.Debug("test sync step", "n", i+1, "nodes", step.nodeCount)
|
||||
|
||||
change := step.nodeCount - len(allNodeIDs(net))
|
||||
change := step.nodeCount - len(sim.UpNodeIDs())
|
||||
|
||||
if change > 0 {
|
||||
_, err := addNodes(change, net)
|
||||
_, err := sim.AddNodesAndConnectChain(change)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else if change < 0 {
|
||||
err := removeNodes(-change, net)
|
||||
_, err := sim.StopRandomNodes(-change)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -341,91 +329,43 @@ func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwa
|
||||
continue
|
||||
}
|
||||
|
||||
nodeIDs := allNodeIDs(net)
|
||||
shuffle(len(nodeIDs), func(i, j int) {
|
||||
nodeIDs[i], nodeIDs[j] = nodeIDs[j], nodeIDs[i]
|
||||
})
|
||||
for _, id := range nodeIDs {
|
||||
key, data, err := uploadFile(swarms[id])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
log.Trace("file uploaded", "node", id, "key", key.String())
|
||||
files = append(files, file{
|
||||
addr: key,
|
||||
data: data,
|
||||
nodeID: id,
|
||||
})
|
||||
}
|
||||
|
||||
// Prepare PeerPot map for checking Kademlia health
|
||||
var ppmap map[string]*network.PeerPot
|
||||
nIDs := allNodeIDs(net)
|
||||
addrs := make([][]byte, len(nIDs))
|
||||
if *waitKademlia {
|
||||
for i, id := range nIDs {
|
||||
addrs[i] = swarms[id].bzz.BaseAddr()
|
||||
}
|
||||
ppmap = network.NewPeerPotMap(2, addrs)
|
||||
}
|
||||
|
||||
var checkStatusM sync.Map
|
||||
var nodeStatusM sync.Map
|
||||
var totalFoundCount uint64
|
||||
|
||||
result := sim.Run(ctx, &simulations.Step{
|
||||
Action: func(ctx context.Context) error {
|
||||
if *waitKademlia {
|
||||
// Wait for healthy Kademlia on every node before checking files
|
||||
ticker := time.NewTicker(200 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
healthy := true
|
||||
log.Debug("kademlia health check", "node count", len(nIDs), "addr count", len(addrs))
|
||||
for i, id := range nIDs {
|
||||
swarm := swarms[id]
|
||||
//PeerPot for this node
|
||||
addr := common.Bytes2Hex(swarm.bzz.BaseAddr())
|
||||
pp := ppmap[addr]
|
||||
//call Healthy RPC
|
||||
h := swarm.bzz.Healthy(pp)
|
||||
//print info
|
||||
log.Debug(swarm.bzz.String())
|
||||
log.Debug("kademlia", "empty bins", pp.EmptyBins, "gotNN", h.GotNN, "knowNN", h.KnowNN, "full", h.Full)
|
||||
log.Debug("kademlia", "health", h.GotNN && h.KnowNN && h.Full, "addr", fmt.Sprintf("%x", swarm.bzz.BaseAddr()), "id", id, "i", i)
|
||||
log.Debug("kademlia", "ill condition", !h.GotNN || !h.Full, "addr", fmt.Sprintf("%x", swarm.bzz.BaseAddr()), "id", id, "i", i)
|
||||
if !h.GotNN || !h.Full {
|
||||
healthy = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if healthy {
|
||||
break
|
||||
}
|
||||
}
|
||||
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
|
||||
nodeIDs := sim.UpNodeIDs()
|
||||
shuffle(len(nodeIDs), func(i, j int) {
|
||||
nodeIDs[i], nodeIDs[j] = nodeIDs[j], nodeIDs[i]
|
||||
})
|
||||
for _, id := range nodeIDs {
|
||||
key, data, err := uploadFile(sim.Service("swarm", id).(*Swarm))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Trace("file uploaded", "node", id, "key", key.String())
|
||||
files = append(files, file{
|
||||
addr: key,
|
||||
data: data,
|
||||
nodeID: id,
|
||||
})
|
||||
}
|
||||
|
||||
go func() {
|
||||
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
|
||||
// or until the timeout is reached.
|
||||
for {
|
||||
if retrieve(net, files, swarms, trigger, &checkStatusM, &nodeStatusM, &totalFoundCount) == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
},
|
||||
Trigger: trigger,
|
||||
Expect: &simulations.Expectation{
|
||||
Nodes: allNodeIDs(net),
|
||||
Check: func(ctx context.Context, id discover.NodeID) (bool, error) {
|
||||
// The check is done by a goroutine in the action function.
|
||||
return true, nil
|
||||
},
|
||||
},
|
||||
if *waitKademlia {
|
||||
if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
|
||||
// or until the timeout is reached.
|
||||
for {
|
||||
if retrieve(sim, files, &checkStatusM, &nodeStatusM, &totalFoundCount) == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if result.Error != nil {
|
||||
t.Fatal(result.Error)
|
||||
}
|
||||
@ -433,70 +373,6 @@ func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwa
|
||||
}
|
||||
}
|
||||
|
||||
// allNodeIDs is returning NodeID for every node that is Up.
|
||||
func allNodeIDs(net *simulations.Network) (nodes []discover.NodeID) {
|
||||
for _, n := range net.GetNodes() {
|
||||
if n.Up {
|
||||
nodes = append(nodes, n.ID())
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// addNodes adds a number of nodes to the network.
|
||||
func addNodes(count int, net *simulations.Network) (ids []discover.NodeID, err error) {
|
||||
for i := 0; i < count; i++ {
|
||||
nodeIDs := allNodeIDs(net)
|
||||
l := len(nodeIDs)
|
||||
nodeconf := adapters.RandomNodeConfig()
|
||||
node, err := net.NewNodeWithConfig(nodeconf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create node: %v", err)
|
||||
}
|
||||
err = net.Start(node.ID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("start node: %v", err)
|
||||
}
|
||||
|
||||
log.Debug("created node", "id", node.ID())
|
||||
|
||||
// connect nodes in a chain
|
||||
if l > 0 {
|
||||
var otherNodeID discover.NodeID
|
||||
for i := l - 1; i >= 0; i-- {
|
||||
n := net.GetNode(nodeIDs[i])
|
||||
if n.Up {
|
||||
otherNodeID = n.ID()
|
||||
break
|
||||
}
|
||||
}
|
||||
log.Debug("connect nodes", "one", node.ID(), "other", otherNodeID)
|
||||
if err := net.Connect(node.ID(), otherNodeID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ids = append(ids, node.ID())
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// removeNodes stops a random nodes in the network.
|
||||
func removeNodes(count int, net *simulations.Network) error {
|
||||
for i := 0; i < count; i++ {
|
||||
// allNodeIDs are returning only the Up nodes.
|
||||
nodeIDs := allNodeIDs(net)
|
||||
if len(nodeIDs) == 0 {
|
||||
break
|
||||
}
|
||||
node := net.GetNode(nodeIDs[rand.Intn(len(nodeIDs))])
|
||||
if err := node.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("removed node", "id", node.ID())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// uploadFile, uploads a short file to the swarm instance
|
||||
// using the api.Put method.
|
||||
func uploadFile(swarm *Swarm) (storage.Address, string, error) {
|
||||
@ -522,10 +398,8 @@ func uploadFile(swarm *Swarm) (storage.Address, string, error) {
|
||||
// retrieve is the function that is used for checking the availability of
|
||||
// uploaded files in testSwarmNetwork test helper function.
|
||||
func retrieve(
|
||||
net *simulations.Network,
|
||||
sim *simulation.Simulation,
|
||||
files []file,
|
||||
swarms map[discover.NodeID]*Swarm,
|
||||
trigger chan discover.NodeID,
|
||||
checkStatusM *sync.Map,
|
||||
nodeStatusM *sync.Map,
|
||||
totalFoundCount *uint64,
|
||||
@ -537,7 +411,7 @@ func retrieve(
|
||||
var totalWg sync.WaitGroup
|
||||
errc := make(chan error)
|
||||
|
||||
nodeIDs := allNodeIDs(net)
|
||||
nodeIDs := sim.UpNodeIDs()
|
||||
|
||||
totalCheckCount := len(nodeIDs) * len(files)
|
||||
|
||||
@ -553,8 +427,8 @@ func retrieve(
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
swarm := sim.Service("swarm", id).(*Swarm)
|
||||
for _, f := range files {
|
||||
swarm := swarms[id]
|
||||
|
||||
checkKey := check{
|
||||
key: f.addr.String(),
|
||||
@ -601,7 +475,6 @@ func retrieve(
|
||||
if foundCount == checkCount {
|
||||
log.Info("all files are found for node", "id", id.String(), "duration", time.Since(start))
|
||||
nodeStatusM.Store(id, 0)
|
||||
trigger <- id
|
||||
return
|
||||
}
|
||||
log.Debug("files missing for node", "id", id.String(), "check", checkCount, "found", foundCount)
|
||||
|
61
swarm/storage/mru/doc.go
Normal file
61
swarm/storage/mru/doc.go
Normal file
@ -0,0 +1,61 @@
|
||||
// Package mru defines Mutable resource updates.
|
||||
// A Mutable Resource is an entity which allows updates to a resource
|
||||
// without resorting to ENS on each update.
|
||||
// The update scheme is built on swarm chunks with chunk keys following
|
||||
// a predictable, versionable pattern.
|
||||
//
|
||||
// Updates are defined to be periodic in nature, where the update frequency
|
||||
// is expressed in seconds.
|
||||
//
|
||||
// The root entry of a mutable resource is tied to a unique identifier that
|
||||
// is deterministically generated out of the metadata content that describes
|
||||
// the resource. This metadata includes a user-defined resource name, a resource
|
||||
// start time that indicates when the resource becomes valid,
|
||||
// the frequency in seconds with which the resource is expected to be updated, both of
|
||||
// which are stored as little-endian uint64 values in the database (for a
|
||||
// total of 16 bytes). It also contains the owner's address (ownerAddr)
|
||||
// This MRU info is stored in a separate content-addressed chunk
|
||||
// (call it the metadata chunk), with the following layout:
|
||||
//
|
||||
// (00|length|startTime|frequency|name|ownerAddr)
|
||||
//
|
||||
// (The two first zero-value bytes are used for disambiguation by the chunk validator,
|
||||
// and update chunk will always have a value > 0 there.)
|
||||
//
|
||||
// Each metadata chunk is identified by its rootAddr, calculated as follows:
|
||||
// metaHash=H(len(metadata), startTime, frequency,name)
|
||||
// rootAddr = H(metaHash, ownerAddr).
|
||||
// where H is the SHA3 hash function
|
||||
// This scheme effectively locks the root chunk so that only the owner of the private key
|
||||
// that ownerAddr was derived from can sign updates.
|
||||
//
|
||||
// The root entry tells the requester from when the mutable resource was
|
||||
// first added (Unix time in seconds) and in which moments to look for the
|
||||
// actual updates. Thus, a resource update for identifier "føø.bar"
|
||||
// starting at unix time 1528800000 with frequency 300 (every 5 mins) will have updates on 1528800300,
|
||||
// 1528800600, 1528800900 and so on.
|
||||
//
|
||||
// Actual data updates are also made in the form of swarm chunks. The keys
|
||||
// of the updates are the hash of a concatenation of properties as follows:
|
||||
//
|
||||
// updateAddr = H(period, version, rootAddr)
|
||||
// where H is the SHA3 hash function
|
||||
// The period is (currentTime - startTime) / frequency
|
||||
//
|
||||
// Using our previous example, this means that a period 3 will happen when the
|
||||
// clock hits 1528800900
|
||||
//
|
||||
// If more than one update is made in the same period, incremental
|
||||
// version numbers are used successively.
|
||||
//
|
||||
// A user looking up a resource would only need to know the rootAddr in order to get the versions
|
||||
//
|
||||
// the resource update data is:
|
||||
// resourcedata = headerlength|period|version|rootAddr|flags|metaHash
|
||||
// where flags is a 1-byte flags field. Flag 0 is set to 1 to indicate multihash
|
||||
//
|
||||
// the full update data that goes in the chunk payload is:
|
||||
// resourcedata|sign(resourcedata)
|
||||
//
|
||||
// headerlength is a 16 bit value containing the byte length of period|version|rootAddr|flags|metaHash
|
||||
package mru
|
@ -16,6 +16,10 @@
|
||||
|
||||
package mru
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
ErrInit = iota
|
||||
ErrNotFound
|
||||
@ -30,3 +34,40 @@ const (
|
||||
ErrPeriodDepth
|
||||
ErrCnt
|
||||
)
|
||||
|
||||
// Error is a the typed error object used for Mutable Resources
|
||||
type Error struct {
|
||||
code int
|
||||
err string
|
||||
}
|
||||
|
||||
// Error implements the error interface
|
||||
func (e *Error) Error() string {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Code returns the error code
|
||||
// Error codes are enumerated in the error.go file within the mru package
|
||||
func (e *Error) Code() int {
|
||||
return e.code
|
||||
}
|
||||
|
||||
// NewError creates a new Mutable Resource Error object with the specified code and custom error message
|
||||
func NewError(code int, s string) error {
|
||||
if code < 0 || code >= ErrCnt {
|
||||
panic("no such error code!")
|
||||
}
|
||||
r := &Error{
|
||||
err: s,
|
||||
}
|
||||
switch code {
|
||||
case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData:
|
||||
r.code = code
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// NewErrorf is a convenience version of NewError that incorporates printf-style formatting
|
||||
func NewErrorf(code int, format string, args ...interface{}) error {
|
||||
return NewError(code, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
514
swarm/storage/mru/handler.go
Normal file
514
swarm/storage/mru/handler.go
Normal file
@ -0,0 +1,514 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Handler is the API for Mutable Resources
|
||||
// It enables creating, updating, syncing and retrieving resources and their update data
|
||||
package mru
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
const chunkSize = 4096 // temporary until we implement FileStore in the resourcehandler
|
||||
|
||||
type Handler struct {
|
||||
chunkStore *storage.NetStore
|
||||
HashSize int
|
||||
resources map[uint64]*resource
|
||||
resourceLock sync.RWMutex
|
||||
storeTimeout time.Duration
|
||||
queryMaxPeriods uint32
|
||||
}
|
||||
|
||||
// HandlerParams pass parameters to the Handler constructor NewHandler
|
||||
// Signer and TimestampProvider are mandatory parameters
|
||||
type HandlerParams struct {
|
||||
QueryMaxPeriods uint32
|
||||
}
|
||||
|
||||
// hashPool contains a pool of ready hashers
|
||||
var hashPool sync.Pool
|
||||
var minimumChunkLength int
|
||||
|
||||
// init initializes the package and hashPool
|
||||
func init() {
|
||||
hashPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return storage.MakeHashFunc(resourceHashAlgorithm)()
|
||||
},
|
||||
}
|
||||
if minimumMetadataLength < minimumUpdateDataLength {
|
||||
minimumChunkLength = minimumMetadataLength
|
||||
} else {
|
||||
minimumChunkLength = minimumUpdateDataLength
|
||||
}
|
||||
}
|
||||
|
||||
// NewHandler creates a new Mutable Resource API
|
||||
func NewHandler(params *HandlerParams) (*Handler, error) {
|
||||
|
||||
rh := &Handler{
|
||||
resources: make(map[uint64]*resource),
|
||||
storeTimeout: defaultStoreTimeout,
|
||||
queryMaxPeriods: params.QueryMaxPeriods,
|
||||
}
|
||||
|
||||
for i := 0; i < hasherCount; i++ {
|
||||
hashfunc := storage.MakeHashFunc(resourceHashAlgorithm)()
|
||||
if rh.HashSize == 0 {
|
||||
rh.HashSize = hashfunc.Size()
|
||||
}
|
||||
hashPool.Put(hashfunc)
|
||||
}
|
||||
|
||||
return rh, nil
|
||||
}
|
||||
|
||||
// SetStore sets the store backend for the Mutable Resource API
|
||||
func (h *Handler) SetStore(store *storage.NetStore) {
|
||||
h.chunkStore = store
|
||||
}
|
||||
|
||||
// Validate is a chunk validation method
|
||||
// If it looks like a resource update, the chunk address is checked against the ownerAddr of the update's signature
|
||||
// It implements the storage.ChunkValidator interface
|
||||
func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
|
||||
|
||||
dataLength := len(data)
|
||||
if dataLength < minimumChunkLength {
|
||||
return false
|
||||
}
|
||||
|
||||
//metadata chunks have the first two bytes set to zero
|
||||
if data[0] == 0 && data[1] == 0 && dataLength >= minimumMetadataLength {
|
||||
//metadata chunk
|
||||
rootAddr, _ := metadataHash(data)
|
||||
valid := bytes.Equal(chunkAddr, rootAddr)
|
||||
if !valid {
|
||||
log.Debug(fmt.Sprintf("Invalid root metadata chunk with address: %s", chunkAddr.Hex()))
|
||||
}
|
||||
return valid
|
||||
}
|
||||
|
||||
// if it is not a metadata chunk, check if it is a properly formatted update chunk with
|
||||
// valid signature and proof of ownership of the resource it is trying
|
||||
// to update
|
||||
|
||||
// First, deserialize the chunk
|
||||
var r SignedResourceUpdate
|
||||
if err := r.fromChunk(chunkAddr, data); err != nil {
|
||||
log.Debug("Invalid resource chunk with address %s: %s ", chunkAddr.Hex(), err.Error())
|
||||
return false
|
||||
}
|
||||
|
||||
// check that the lookup information contained in the chunk matches the updateAddr (chunk search key)
|
||||
// that was used to retrieve this chunk
|
||||
// if this validation fails, someone forged a chunk.
|
||||
if !bytes.Equal(chunkAddr, r.updateHeader.UpdateAddr()) {
|
||||
log.Debug("period,version,rootAddr contained in update chunk do not match updateAddr %s", chunkAddr.Hex())
|
||||
return false
|
||||
}
|
||||
|
||||
// Verify signatures and that the signer actually owns the resource
|
||||
// If it fails, it means either the signature is not valid, data is corrupted
|
||||
// or someone is trying to update someone else's resource.
|
||||
if err := r.Verify(); err != nil {
|
||||
log.Debug("Invalid signature: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// GetContent retrieves the data payload of the last synced update of the Mutable Resource
|
||||
func (h *Handler) GetContent(rootAddr storage.Address) (storage.Address, []byte, error) {
|
||||
rsrc := h.get(rootAddr)
|
||||
if rsrc == nil || !rsrc.isSynced() {
|
||||
return nil, nil, NewError(ErrNotFound, " does not exist or is not synced")
|
||||
}
|
||||
return rsrc.lastKey, rsrc.data, nil
|
||||
}
|
||||
|
||||
// GetLastPeriod retrieves the period of the last synced update of the Mutable Resource
|
||||
func (h *Handler) GetLastPeriod(rootAddr storage.Address) (uint32, error) {
|
||||
rsrc := h.get(rootAddr)
|
||||
if rsrc == nil {
|
||||
return 0, NewError(ErrNotFound, " does not exist")
|
||||
} else if !rsrc.isSynced() {
|
||||
return 0, NewError(ErrNotSynced, " is not synced")
|
||||
}
|
||||
return rsrc.period, nil
|
||||
}
|
||||
|
||||
// GetVersion retrieves the period of the last synced update of the Mutable Resource
|
||||
func (h *Handler) GetVersion(rootAddr storage.Address) (uint32, error) {
|
||||
rsrc := h.get(rootAddr)
|
||||
if rsrc == nil {
|
||||
return 0, NewError(ErrNotFound, " does not exist")
|
||||
} else if !rsrc.isSynced() {
|
||||
return 0, NewError(ErrNotSynced, " is not synced")
|
||||
}
|
||||
return rsrc.version, nil
|
||||
}
|
||||
|
||||
// \TODO should be hashsize * branches from the chosen chunker, implement with FileStore
|
||||
func (h *Handler) chunkSize() int64 {
|
||||
return chunkSize
|
||||
}
|
||||
|
||||
// New creates a new metadata chunk out of the request passed in.
|
||||
func (h *Handler) New(ctx context.Context, request *Request) error {
|
||||
|
||||
// frequency 0 is invalid
|
||||
if request.metadata.Frequency == 0 {
|
||||
return NewError(ErrInvalidValue, "frequency cannot be 0 when creating a resource")
|
||||
}
|
||||
|
||||
// make sure owner is set to something
|
||||
if request.metadata.Owner == zeroAddr {
|
||||
return NewError(ErrInvalidValue, "ownerAddr must be set to create a new metadata chunk")
|
||||
}
|
||||
|
||||
// create the meta chunk and store it in swarm
|
||||
chunk, metaHash, err := request.metadata.newChunk()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if request.metaHash != nil && !bytes.Equal(request.metaHash, metaHash) ||
|
||||
request.rootAddr != nil && !bytes.Equal(request.rootAddr, chunk.Addr) {
|
||||
return NewError(ErrInvalidValue, "metaHash in UpdateRequest does not match actual metadata")
|
||||
}
|
||||
|
||||
request.metaHash = metaHash
|
||||
request.rootAddr = chunk.Addr
|
||||
|
||||
h.chunkStore.Put(ctx, chunk)
|
||||
log.Debug("new resource", "name", request.metadata.Name, "startTime", request.metadata.StartTime, "frequency", request.metadata.Frequency, "owner", request.metadata.Owner)
|
||||
|
||||
// create the internal index for the resource and populate it with its metadata
|
||||
rsrc := &resource{
|
||||
resourceUpdate: resourceUpdate{
|
||||
updateHeader: updateHeader{
|
||||
UpdateLookup: UpdateLookup{
|
||||
rootAddr: chunk.Addr,
|
||||
},
|
||||
},
|
||||
},
|
||||
ResourceMetadata: request.metadata,
|
||||
updated: time.Now(),
|
||||
}
|
||||
h.set(chunk.Addr, rsrc)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewUpdateRequest prepares an UpdateRequest structure with all the necessary information to
|
||||
// just add the desired data and sign it.
|
||||
// The resulting structure can then be signed and passed to Handler.Update to be verified and sent
|
||||
func (h *Handler) NewUpdateRequest(ctx context.Context, rootAddr storage.Address) (updateRequest *Request, err error) {
|
||||
|
||||
if rootAddr == nil {
|
||||
return nil, NewError(ErrInvalidValue, "rootAddr cannot be nil")
|
||||
}
|
||||
|
||||
// Make sure we have a cache of the metadata chunk
|
||||
rsrc, err := h.Load(ctx, rootAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
now := TimestampProvider.Now()
|
||||
|
||||
updateRequest = new(Request)
|
||||
updateRequest.period, err = getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err = h.lookup(rsrc, LookupLatestVersionInPeriod(rsrc.rootAddr, updateRequest.period)); err != nil {
|
||||
if err.(*Error).code != ErrNotFound {
|
||||
return nil, err
|
||||
}
|
||||
// not finding updates means that there is a network error
|
||||
// or that the resource really does not have updates in this period.
|
||||
}
|
||||
|
||||
updateRequest.multihash = rsrc.multihash
|
||||
updateRequest.rootAddr = rsrc.rootAddr
|
||||
updateRequest.metaHash = rsrc.metaHash
|
||||
updateRequest.metadata = rsrc.ResourceMetadata
|
||||
|
||||
// if we already have an update for this period then increment version
|
||||
// resource object MUST be in sync for version to be correct, but we checked this earlier in the method already
|
||||
if h.hasUpdate(rootAddr, updateRequest.period) {
|
||||
updateRequest.version = rsrc.version + 1
|
||||
} else {
|
||||
updateRequest.version = 1
|
||||
}
|
||||
|
||||
return updateRequest, nil
|
||||
}
|
||||
|
||||
// Lookup retrieves a specific or latest version of the resource update with metadata chunk at params.Root
|
||||
// Lookup works differently depending on the configuration of `LookupParams`
|
||||
// See the `LookupParams` documentation and helper functions:
|
||||
// `LookupLatest`, `LookupLatestVersionInPeriod` and `LookupVersion`
|
||||
// When looking for the latest update, it starts at the next period after the current time.
|
||||
// upon failure tries the corresponding keys of each previous period until one is found
|
||||
// (or startTime is reached, in which case there are no updates).
|
||||
func (h *Handler) Lookup(ctx context.Context, params *LookupParams) (*resource, error) {
|
||||
|
||||
rsrc := h.get(params.rootAddr)
|
||||
if rsrc == nil {
|
||||
return nil, NewError(ErrNothingToReturn, "resource not loaded")
|
||||
}
|
||||
return h.lookup(rsrc, params)
|
||||
}
|
||||
|
||||
// LookupPrevious returns the resource before the one currently loaded in the resource cache
|
||||
// This is useful where resource updates are used incrementally in contrast to
|
||||
// merely replacing content.
|
||||
// Requires a cached resource object to determine the current state of the resource.
|
||||
func (h *Handler) LookupPrevious(ctx context.Context, params *LookupParams) (*resource, error) {
|
||||
rsrc := h.get(params.rootAddr)
|
||||
if rsrc == nil {
|
||||
return nil, NewError(ErrNothingToReturn, "resource not loaded")
|
||||
}
|
||||
if !rsrc.isSynced() {
|
||||
return nil, NewError(ErrNotSynced, "LookupPrevious requires synced resource.")
|
||||
} else if rsrc.period == 0 {
|
||||
return nil, NewError(ErrNothingToReturn, " not found")
|
||||
}
|
||||
var version, period uint32
|
||||
if rsrc.version > 1 {
|
||||
version = rsrc.version - 1
|
||||
period = rsrc.period
|
||||
} else if rsrc.period == 1 {
|
||||
return nil, NewError(ErrNothingToReturn, "Current update is the oldest")
|
||||
} else {
|
||||
version = 0
|
||||
period = rsrc.period - 1
|
||||
}
|
||||
return h.lookup(rsrc, NewLookupParams(rsrc.rootAddr, period, version, params.Limit))
|
||||
}
|
||||
|
||||
// base code for public lookup methods
|
||||
func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error) {
|
||||
|
||||
lp := *params
|
||||
// we can't look for anything without a store
|
||||
if h.chunkStore == nil {
|
||||
return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups")
|
||||
}
|
||||
|
||||
var specificperiod bool
|
||||
if lp.period > 0 {
|
||||
specificperiod = true
|
||||
} else {
|
||||
// get the current time and the next period
|
||||
now := TimestampProvider.Now()
|
||||
|
||||
var period uint32
|
||||
period, err := getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lp.period = period
|
||||
}
|
||||
|
||||
// start from the last possible period, and iterate previous ones
|
||||
// (unless we want a specific period only) until we find a match.
|
||||
// If we hit startTime we're out of options
|
||||
var specificversion bool
|
||||
if lp.version > 0 {
|
||||
specificversion = true
|
||||
} else {
|
||||
lp.version = 1
|
||||
}
|
||||
|
||||
var hops uint32
|
||||
if lp.Limit == 0 {
|
||||
lp.Limit = h.queryMaxPeriods
|
||||
}
|
||||
log.Trace("resource lookup", "period", lp.period, "version", lp.version, "limit", lp.Limit)
|
||||
for lp.period > 0 {
|
||||
if lp.Limit != 0 && hops > lp.Limit {
|
||||
return nil, NewErrorf(ErrPeriodDepth, "Lookup exceeded max period hops (%d)", lp.Limit)
|
||||
}
|
||||
updateAddr := lp.UpdateAddr()
|
||||
chunk, err := h.chunkStore.GetWithTimeout(context.TODO(), updateAddr, defaultRetrieveTimeout)
|
||||
if err == nil {
|
||||
if specificversion {
|
||||
return h.updateIndex(rsrc, chunk)
|
||||
}
|
||||
// check if we have versions > 1. If a version fails, the previous version is used and returned.
|
||||
log.Trace("rsrc update version 1 found, checking for version updates", "period", lp.period, "updateAddr", updateAddr)
|
||||
for {
|
||||
newversion := lp.version + 1
|
||||
updateAddr := lp.UpdateAddr()
|
||||
newchunk, err := h.chunkStore.GetWithTimeout(context.TODO(), updateAddr, defaultRetrieveTimeout)
|
||||
if err != nil {
|
||||
return h.updateIndex(rsrc, chunk)
|
||||
}
|
||||
chunk = newchunk
|
||||
lp.version = newversion
|
||||
log.Trace("version update found, checking next", "version", lp.version, "period", lp.period, "updateAddr", updateAddr)
|
||||
}
|
||||
}
|
||||
if specificperiod {
|
||||
break
|
||||
}
|
||||
log.Trace("rsrc update not found, checking previous period", "period", lp.period, "updateAddr", updateAddr)
|
||||
lp.period--
|
||||
hops++
|
||||
}
|
||||
return nil, NewError(ErrNotFound, "no updates found")
|
||||
}
|
||||
|
||||
// Load retrieves the Mutable Resource metadata chunk stored at rootAddr
|
||||
// Upon retrieval it creates/updates the index entry for it with metadata corresponding to the chunk contents
|
||||
func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource, error) {
|
||||
chunk, err := h.chunkStore.GetWithTimeout(ctx, rootAddr, defaultRetrieveTimeout)
|
||||
if err != nil {
|
||||
return nil, NewError(ErrNotFound, err.Error())
|
||||
}
|
||||
|
||||
// create the index entry
|
||||
rsrc := &resource{}
|
||||
|
||||
if err := rsrc.ResourceMetadata.binaryGet(chunk.SData); err != nil { // Will fail if this is not really a metadata chunk
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rsrc.rootAddr, rsrc.metaHash = metadataHash(chunk.SData)
|
||||
if !bytes.Equal(rsrc.rootAddr, rootAddr) {
|
||||
return nil, NewError(ErrCorruptData, "Corrupt metadata chunk")
|
||||
}
|
||||
h.set(rootAddr, rsrc)
|
||||
log.Trace("resource index load", "rootkey", rootAddr, "name", rsrc.ResourceMetadata.Name, "starttime", rsrc.ResourceMetadata.StartTime, "frequency", rsrc.ResourceMetadata.Frequency)
|
||||
return rsrc, nil
|
||||
}
|
||||
|
||||
// update mutable resource index map with specified content
|
||||
func (h *Handler) updateIndex(rsrc *resource, chunk *storage.Chunk) (*resource, error) {
|
||||
|
||||
// retrieve metadata from chunk data and check that it matches this mutable resource
|
||||
var r SignedResourceUpdate
|
||||
if err := r.fromChunk(chunk.Addr, chunk.SData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Trace("resource index update", "name", rsrc.ResourceMetadata.Name, "updatekey", chunk.Addr, "period", r.period, "version", r.version)
|
||||
|
||||
// update our rsrcs entry map
|
||||
rsrc.lastKey = chunk.Addr
|
||||
rsrc.period = r.period
|
||||
rsrc.version = r.version
|
||||
rsrc.updated = time.Now()
|
||||
rsrc.data = make([]byte, len(r.data))
|
||||
rsrc.multihash = r.multihash
|
||||
copy(rsrc.data, r.data)
|
||||
rsrc.Reader = bytes.NewReader(rsrc.data)
|
||||
log.Debug("resource synced", "name", rsrc.ResourceMetadata.Name, "updateAddr", chunk.Addr, "period", rsrc.period, "version", rsrc.version)
|
||||
h.set(chunk.Addr, rsrc)
|
||||
return rsrc, nil
|
||||
}
|
||||
|
||||
// Update adds an actual data update
|
||||
// Uses the Mutable Resource metadata currently loaded in the resources map entry.
|
||||
// It is the caller's responsibility to make sure that this data is not stale.
|
||||
// Note that a Mutable Resource update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. An error will be returned if the total length of the chunk payload will exceed this limit.
|
||||
// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update
|
||||
// on the network.
|
||||
func (h *Handler) Update(ctx context.Context, r *SignedResourceUpdate) (storage.Address, error) {
|
||||
return h.update(ctx, r)
|
||||
}
|
||||
|
||||
// create and commit an update
|
||||
func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAddr storage.Address, err error) {
|
||||
|
||||
// we can't update anything without a store
|
||||
if h.chunkStore == nil {
|
||||
return nil, NewError(ErrInit, "Call Handler.SetStore() before updating")
|
||||
}
|
||||
|
||||
rsrc := h.get(r.rootAddr)
|
||||
if rsrc != nil && rsrc.period != 0 && rsrc.version != 0 && // This is the only cheap check we can do for sure
|
||||
rsrc.period == r.period && rsrc.version >= r.version { // without having to lookup update chunks
|
||||
|
||||
return nil, NewError(ErrInvalidValue, "A former update in this period is already known to exist")
|
||||
}
|
||||
|
||||
chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// send the chunk
|
||||
h.chunkStore.Put(ctx, chunk)
|
||||
log.Trace("resource update", "updateAddr", r.updateAddr, "lastperiod", r.period, "version", r.version, "data", chunk.SData, "multihash", r.multihash)
|
||||
|
||||
// update our resources map entry if the new update is older than the one we have, if we have it.
|
||||
if rsrc != nil && r.period > rsrc.period || (rsrc.period == r.period && r.version > rsrc.version) {
|
||||
rsrc.period = r.period
|
||||
rsrc.version = r.version
|
||||
rsrc.data = make([]byte, len(r.data))
|
||||
rsrc.updated = time.Now()
|
||||
rsrc.lastKey = r.updateAddr
|
||||
rsrc.multihash = r.multihash
|
||||
copy(rsrc.data, r.data)
|
||||
rsrc.Reader = bytes.NewReader(rsrc.data)
|
||||
}
|
||||
return r.updateAddr, nil
|
||||
}
|
||||
|
||||
// Retrieves the resource index value for the given nameHash
|
||||
func (h *Handler) get(rootAddr storage.Address) *resource {
|
||||
if len(rootAddr) < storage.KeyLength {
|
||||
log.Warn("Handler.get with invalid rootAddr")
|
||||
return nil
|
||||
}
|
||||
hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0]))
|
||||
h.resourceLock.RLock()
|
||||
defer h.resourceLock.RUnlock()
|
||||
rsrc := h.resources[hashKey]
|
||||
return rsrc
|
||||
}
|
||||
|
||||
// Sets the resource index value for the given nameHash
|
||||
func (h *Handler) set(rootAddr storage.Address, rsrc *resource) {
|
||||
if len(rootAddr) < storage.KeyLength {
|
||||
log.Warn("Handler.set with invalid rootAddr")
|
||||
return
|
||||
}
|
||||
hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0]))
|
||||
h.resourceLock.Lock()
|
||||
defer h.resourceLock.Unlock()
|
||||
h.resources[hashKey] = rsrc
|
||||
}
|
||||
|
||||
// Checks if we already have an update on this resource, according to the value in the current state of the resource index
|
||||
func (h *Handler) hasUpdate(rootAddr storage.Address, period uint32) bool {
|
||||
rsrc := h.get(rootAddr)
|
||||
return rsrc != nil && rsrc.period == period
|
||||
}
|
117
swarm/storage/mru/lookup.go
Normal file
117
swarm/storage/mru/lookup.go
Normal file
@ -0,0 +1,117 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mru
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
// LookupParams is used to specify constraints when performing an update lookup
|
||||
// Limit defines whether or not the lookup should be limited
|
||||
// If Limit is set to true then Max defines the amount of hops that can be performed
|
||||
type LookupParams struct {
|
||||
UpdateLookup
|
||||
Limit uint32
|
||||
}
|
||||
|
||||
// RootAddr returns the metadata chunk address
|
||||
func (r *LookupParams) RootAddr() storage.Address {
|
||||
return r.rootAddr
|
||||
}
|
||||
|
||||
func NewLookupParams(rootAddr storage.Address, period, version uint32, limit uint32) *LookupParams {
|
||||
return &LookupParams{
|
||||
UpdateLookup: UpdateLookup{
|
||||
period: period,
|
||||
version: version,
|
||||
rootAddr: rootAddr,
|
||||
},
|
||||
Limit: limit,
|
||||
}
|
||||
}
|
||||
|
||||
// LookupLatest generates lookup parameters that look for the latest version of a resource
|
||||
func LookupLatest(rootAddr storage.Address) *LookupParams {
|
||||
return NewLookupParams(rootAddr, 0, 0, 0)
|
||||
}
|
||||
|
||||
// LookupLatestVersionInPeriod generates lookup parameters that look for the latest version of a resource in a given period
|
||||
func LookupLatestVersionInPeriod(rootAddr storage.Address, period uint32) *LookupParams {
|
||||
return NewLookupParams(rootAddr, period, 0, 0)
|
||||
}
|
||||
|
||||
// LookupVersion generates lookup parameters that look for a specific version of a resource
|
||||
func LookupVersion(rootAddr storage.Address, period, version uint32) *LookupParams {
|
||||
return NewLookupParams(rootAddr, period, version, 0)
|
||||
}
|
||||
|
||||
// UpdateLookup represents the components of a resource update search key
|
||||
type UpdateLookup struct {
|
||||
period uint32
|
||||
version uint32
|
||||
rootAddr storage.Address
|
||||
}
|
||||
|
||||
// 4 bytes period
|
||||
// 4 bytes version
|
||||
// storage.Keylength for rootAddr
|
||||
const updateLookupLength = 4 + 4 + storage.KeyLength
|
||||
|
||||
// UpdateAddr calculates the resource update chunk address corresponding to this lookup key
|
||||
func (u *UpdateLookup) UpdateAddr() (updateAddr storage.Address) {
|
||||
serializedData := make([]byte, updateLookupLength)
|
||||
u.binaryPut(serializedData)
|
||||
hasher := hashPool.Get().(hash.Hash)
|
||||
defer hashPool.Put(hasher)
|
||||
hasher.Reset()
|
||||
hasher.Write(serializedData)
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
// binaryPut serializes this UpdateLookup instance into the provided slice
|
||||
func (u *UpdateLookup) binaryPut(serializedData []byte) error {
|
||||
if len(serializedData) != updateLookupLength {
|
||||
return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData))
|
||||
}
|
||||
if len(u.rootAddr) != storage.KeyLength {
|
||||
return NewError(ErrInvalidValue, "UpdateLookup.binaryPut called without rootAddr set")
|
||||
}
|
||||
binary.LittleEndian.PutUint32(serializedData[:4], u.period)
|
||||
binary.LittleEndian.PutUint32(serializedData[4:8], u.version)
|
||||
copy(serializedData[8:], u.rootAddr[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// binaryLength returns the expected size of this structure when serialized
|
||||
func (u *UpdateLookup) binaryLength() int {
|
||||
return updateLookupLength
|
||||
}
|
||||
|
||||
// binaryGet restores the current instance from the information contained in the passed slice
|
||||
func (u *UpdateLookup) binaryGet(serializedData []byte) error {
|
||||
if len(serializedData) != updateLookupLength {
|
||||
return NewErrorf(ErrInvalidValue, "Incorrect slice size to read UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData))
|
||||
}
|
||||
u.period = binary.LittleEndian.Uint32(serializedData[:4])
|
||||
u.version = binary.LittleEndian.Uint32(serializedData[4:8])
|
||||
u.rootAddr = storage.Address(make([]byte, storage.KeyLength))
|
||||
copy(u.rootAddr[:], serializedData[8:])
|
||||
return nil
|
||||
}
|
85
swarm/storage/mru/lookup_test.go
Normal file
85
swarm/storage/mru/lookup_test.go
Normal file
@ -0,0 +1,85 @@
|
||||
package mru
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
func getTestUpdateLookup() *UpdateLookup {
|
||||
metadata := *getTestMetadata()
|
||||
rootAddr, _, _, _ := metadata.serializeAndHash()
|
||||
return &UpdateLookup{
|
||||
period: 79,
|
||||
version: 2010,
|
||||
rootAddr: rootAddr,
|
||||
}
|
||||
}
|
||||
|
||||
func compareUpdateLookup(a, b *UpdateLookup) bool {
|
||||
return a.version == b.version &&
|
||||
a.period == b.period &&
|
||||
bytes.Equal(a.rootAddr, b.rootAddr)
|
||||
}
|
||||
|
||||
func TestUpdateLookupUpdateAddr(t *testing.T) {
|
||||
ul := getTestUpdateLookup()
|
||||
updateAddr := ul.UpdateAddr()
|
||||
compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8fbc8d4777ef6da790257eda80ab4321fabd08cbdbe67e4e3da6caca386d64e0")
|
||||
}
|
||||
|
||||
func TestUpdateLookupSerializer(t *testing.T) {
|
||||
serializedUpdateLookup := make([]byte, updateLookupLength)
|
||||
ul := getTestUpdateLookup()
|
||||
if err := ul.binaryPut(serializedUpdateLookup); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
compareByteSliceToExpectedHex(t, "serializedUpdateLookup", serializedUpdateLookup, "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb")
|
||||
|
||||
// set receiving slice to the wrong size
|
||||
serializedUpdateLookup = make([]byte, updateLookupLength+7)
|
||||
if err := ul.binaryPut(serializedUpdateLookup); err == nil {
|
||||
t.Fatalf("Expected UpdateLookup.binaryPut to fail when receiving slice has a length != %d", updateLookupLength)
|
||||
}
|
||||
|
||||
// set rootAddr to an invalid length
|
||||
ul.rootAddr = []byte{1, 2, 3, 4}
|
||||
serializedUpdateLookup = make([]byte, updateLookupLength)
|
||||
if err := ul.binaryPut(serializedUpdateLookup); err == nil {
|
||||
t.Fatal("Expected UpdateLookup.binaryPut to fail when rootAddr is not of the correct size")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateLookupDeserializer(t *testing.T) {
|
||||
serializedUpdateLookup, _ := hexutil.Decode("0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb")
|
||||
var recoveredUpdateLookup UpdateLookup
|
||||
if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
originalUpdateLookup := *getTestUpdateLookup()
|
||||
if !compareUpdateLookup(&originalUpdateLookup, &recoveredUpdateLookup) {
|
||||
t.Fatalf("Expected recovered UpdateLookup to match")
|
||||
}
|
||||
|
||||
// set source slice to the wrong size
|
||||
serializedUpdateLookup = make([]byte, updateLookupLength+4)
|
||||
if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err == nil {
|
||||
t.Fatalf("Expected UpdateLookup.binaryGet to fail when source slice has a length != %d", updateLookupLength)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateLookupSerializeDeserialize(t *testing.T) {
|
||||
serializedUpdateLookup := make([]byte, updateLookupLength)
|
||||
originalUpdateLookup := getTestUpdateLookup()
|
||||
if err := originalUpdateLookup.binaryPut(serializedUpdateLookup); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var recoveredUpdateLookup UpdateLookup
|
||||
if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !compareUpdateLookup(originalUpdateLookup, &recoveredUpdateLookup) {
|
||||
t.Fatalf("Expected recovered UpdateLookup to match")
|
||||
}
|
||||
}
|
189
swarm/storage/mru/metadata.go
Normal file
189
swarm/storage/mru/metadata.go
Normal file
@ -0,0 +1,189 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mru
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
// ResourceMetadata encapsulates the immutable information about a mutable resource :)
|
||||
// once serialized into a chunk, the resource can be retrieved by knowing its content-addressed rootAddr
|
||||
type ResourceMetadata struct {
|
||||
StartTime Timestamp // time at which the resource starts to be valid
|
||||
Frequency uint64 // expected update frequency for the resource
|
||||
Name string // name of the resource, for the reference of the user or to disambiguate resources with same starttime, frequency, owneraddr
|
||||
Owner common.Address // public address of the resource owner
|
||||
}
|
||||
|
||||
const frequencyLength = 8 // sizeof(uint64)
|
||||
const nameLengthLength = 1
|
||||
|
||||
// Resource metadata chunk layout:
|
||||
// 4 prefix bytes (chunkPrefixLength). The first two set to zero. The second two indicate the length
|
||||
// Timestamp: timestampLength bytes
|
||||
// frequency: frequencyLength bytes
|
||||
// name length: nameLengthLength bytes
|
||||
// name (variable length, can be empty, up to 255 bytes)
|
||||
// ownerAddr: common.AddressLength
|
||||
const minimumMetadataLength = chunkPrefixLength + timestampLength + frequencyLength + nameLengthLength + common.AddressLength
|
||||
|
||||
// binaryGet populates the resource metadata from a byte array
|
||||
func (r *ResourceMetadata) binaryGet(serializedData []byte) error {
|
||||
if len(serializedData) < minimumMetadataLength {
|
||||
return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short. Expected at least %d. Got %d.", minimumMetadataLength, len(serializedData))
|
||||
}
|
||||
|
||||
// first two bytes must be set to zero to indicate metadata chunks, so enforce this.
|
||||
if serializedData[0] != 0 || serializedData[1] != 0 {
|
||||
return NewError(ErrCorruptData, "Invalid metadata chunk")
|
||||
}
|
||||
|
||||
cursor := 2
|
||||
metadataLength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])) // metadataLength does not include the 4 prefix bytes
|
||||
if metadataLength+chunkPrefixLength != len(serializedData) {
|
||||
return NewErrorf(ErrCorruptData, "Incorrect declared metadata length. Expected %d, got %d.", metadataLength+chunkPrefixLength, len(serializedData))
|
||||
}
|
||||
|
||||
cursor += 2
|
||||
|
||||
if err := r.StartTime.binaryGet(serializedData[cursor : cursor+timestampLength]); err != nil {
|
||||
return err
|
||||
}
|
||||
cursor += timestampLength
|
||||
|
||||
r.Frequency = binary.LittleEndian.Uint64(serializedData[cursor : cursor+frequencyLength])
|
||||
cursor += frequencyLength
|
||||
|
||||
nameLength := int(serializedData[cursor])
|
||||
if nameLength+minimumMetadataLength > len(serializedData) {
|
||||
return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short when decoding resource name. Expected at least %d. Got %d.", nameLength+minimumMetadataLength, len(serializedData))
|
||||
}
|
||||
cursor++
|
||||
r.Name = string(serializedData[cursor : cursor+nameLength])
|
||||
cursor += nameLength
|
||||
|
||||
copy(r.Owner[:], serializedData[cursor:])
|
||||
cursor += common.AddressLength
|
||||
if cursor != len(serializedData) {
|
||||
return NewErrorf(ErrInvalidValue, "Metadata chunk has leftover data after deserialization. %d left to read", len(serializedData)-cursor)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// binaryPut encodes the metadata into a byte array
|
||||
func (r *ResourceMetadata) binaryPut(serializedData []byte) error {
|
||||
metadataChunkLength := r.binaryLength()
|
||||
if len(serializedData) != metadataChunkLength {
|
||||
return NewErrorf(ErrInvalidValue, "Need a slice of exactly %d bytes to serialize this metadata, but got a slice of size %d.", metadataChunkLength, len(serializedData))
|
||||
}
|
||||
|
||||
// root chunk has first two bytes both set to 0, which distinguishes from update bytes
|
||||
// therefore, skip the first two bytes of a zero-initialized array.
|
||||
cursor := 2
|
||||
binary.LittleEndian.PutUint16(serializedData[cursor:cursor+2], uint16(metadataChunkLength-chunkPrefixLength)) // metadataLength does not include the 4 prefix bytes
|
||||
cursor += 2
|
||||
|
||||
r.StartTime.binaryPut(serializedData[cursor : cursor+timestampLength])
|
||||
cursor += timestampLength
|
||||
|
||||
binary.LittleEndian.PutUint64(serializedData[cursor:cursor+frequencyLength], r.Frequency)
|
||||
cursor += frequencyLength
|
||||
|
||||
// Encode the name string as a 1 byte length followed by the encoded string.
|
||||
// Longer strings will be truncated.
|
||||
nameLength := len(r.Name)
|
||||
if nameLength > 255 {
|
||||
nameLength = 255
|
||||
}
|
||||
serializedData[cursor] = uint8(nameLength)
|
||||
cursor++
|
||||
copy(serializedData[cursor:cursor+nameLength], []byte(r.Name[:nameLength]))
|
||||
cursor += nameLength
|
||||
|
||||
copy(serializedData[cursor:cursor+common.AddressLength], r.Owner[:])
|
||||
cursor += common.AddressLength
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ResourceMetadata) binaryLength() int {
|
||||
return minimumMetadataLength + len(r.Name)
|
||||
}
|
||||
|
||||
// serializeAndHash returns the root chunk addr and metadata hash that help identify and ascertain ownership of this resource
|
||||
// returns the serialized metadata as a byproduct of having to hash it.
|
||||
func (r *ResourceMetadata) serializeAndHash() (rootAddr, metaHash []byte, chunkData []byte, err error) {
|
||||
|
||||
chunkData = make([]byte, r.binaryLength())
|
||||
if err := r.binaryPut(chunkData); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
rootAddr, metaHash = metadataHash(chunkData)
|
||||
return rootAddr, metaHash, chunkData, nil
|
||||
|
||||
}
|
||||
|
||||
// creates a metadata chunk out of a resourceMetadata structure
|
||||
func (metadata *ResourceMetadata) newChunk() (chunk *storage.Chunk, metaHash []byte, err error) {
|
||||
// the metadata chunk contains a timestamp of when the resource starts to be valid
|
||||
// and also how frequently it is expected to be updated
|
||||
// from this we know at what time we should look for updates, and how often
|
||||
// it also contains the name of the resource, so we know what resource we are working with
|
||||
|
||||
// the key (rootAddr) of the metadata chunk is content-addressed
|
||||
// if it wasn't we couldn't replace it later
|
||||
// resolving this relationship is left up to external agents (for example ENS)
|
||||
rootAddr, metaHash, chunkData, err := metadata.serializeAndHash()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// make the chunk and send it to swarm
|
||||
chunk = storage.NewChunk(rootAddr, nil)
|
||||
chunk.SData = chunkData
|
||||
chunk.Size = int64(len(chunkData))
|
||||
|
||||
return chunk, metaHash, nil
|
||||
}
|
||||
|
||||
// metadataHash returns the metadata chunk root address and metadata hash
|
||||
// that help identify and ascertain ownership of this resource
|
||||
// We compute it as rootAddr = H(ownerAddr, H(metadata))
|
||||
// Where H() is SHA3
|
||||
// metadata are all the metadata fields, except ownerAddr
|
||||
// ownerAddr is the public address of the resource owner
|
||||
// Update chunks must carry a rootAddr reference and metaHash in order to be verified
|
||||
// This way, a node that receives an update can check the signature, recover the public address
|
||||
// and check the ownership by computing H(ownerAddr, metaHash) and comparing it to the rootAddr
|
||||
// the resource is claiming to update without having to lookup the metadata chunk.
|
||||
// see verifyResourceOwnerhsip in signedupdate.go
|
||||
func metadataHash(chunkData []byte) (rootAddr, metaHash []byte) {
|
||||
hasher := hashPool.Get().(hash.Hash)
|
||||
defer hashPool.Put(hasher)
|
||||
hasher.Reset()
|
||||
hasher.Write(chunkData[:len(chunkData)-common.AddressLength])
|
||||
metaHash = hasher.Sum(nil)
|
||||
hasher.Reset()
|
||||
hasher.Write(metaHash)
|
||||
hasher.Write(chunkData[len(chunkData)-common.AddressLength:])
|
||||
rootAddr = hasher.Sum(nil)
|
||||
return
|
||||
}
|
126
swarm/storage/mru/metadata_test.go
Normal file
126
swarm/storage/mru/metadata_test.go
Normal file
@ -0,0 +1,126 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
package mru
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) {
|
||||
if hexutil.Encode(actualValue) != expectedHex {
|
||||
t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue))
|
||||
}
|
||||
}
|
||||
|
||||
func getTestMetadata() *ResourceMetadata {
|
||||
return &ResourceMetadata{
|
||||
Name: "world news report, every hour, on the hour",
|
||||
StartTime: Timestamp{
|
||||
Time: 1528880400,
|
||||
},
|
||||
Frequency: 3600,
|
||||
Owner: newCharlieSigner().Address(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataSerializerDeserializer(t *testing.T) {
|
||||
metadata := *getTestMetadata()
|
||||
|
||||
rootAddr, metaHash, chunkData, err := metadata.serializeAndHash() // creates hashes and marshals, in one go
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
const expectedRootAddr = "0xfb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb"
|
||||
const expectedMetaHash = "0xf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0"
|
||||
const expectedChunkData = "0x00004f0010dd205b00000000100e0000000000002a776f726c64206e657773207265706f72742c20657665727920686f75722c206f6e2074686520686f7572876a8936a7cd0b79ef0735ad0896c1afe278781c"
|
||||
|
||||
compareByteSliceToExpectedHex(t, "rootAddr", rootAddr, expectedRootAddr)
|
||||
compareByteSliceToExpectedHex(t, "metaHash", metaHash, expectedMetaHash)
|
||||
compareByteSliceToExpectedHex(t, "chunkData", chunkData, expectedChunkData)
|
||||
|
||||
recoveredMetadata := ResourceMetadata{}
|
||||
recoveredMetadata.binaryGet(chunkData)
|
||||
|
||||
if recoveredMetadata != metadata {
|
||||
t.Fatalf("Expected that the recovered metadata equals the marshalled metadata")
|
||||
}
|
||||
|
||||
// we are going to mess with the data, so create a backup to go back to it for the next test
|
||||
backup := make([]byte, len(chunkData))
|
||||
copy(backup, chunkData)
|
||||
|
||||
chunkData = []byte{1, 2, 3}
|
||||
if err := recoveredMetadata.binaryGet(chunkData); err == nil {
|
||||
t.Fatal("Expected binaryGet to fail since chunk is too small")
|
||||
}
|
||||
|
||||
// restore backup
|
||||
chunkData = make([]byte, len(backup))
|
||||
copy(chunkData, backup)
|
||||
|
||||
// mess with the prefix so it is not zero
|
||||
chunkData[0] = 7
|
||||
chunkData[1] = 9
|
||||
|
||||
if err := recoveredMetadata.binaryGet(chunkData); err == nil {
|
||||
t.Fatal("Expected binaryGet to fail since prefix bytes are not zero")
|
||||
}
|
||||
|
||||
// restore backup
|
||||
chunkData = make([]byte, len(backup))
|
||||
copy(chunkData, backup)
|
||||
|
||||
// mess with the length header to trigger an error
|
||||
chunkData[2] = 255
|
||||
chunkData[3] = 44
|
||||
if err := recoveredMetadata.binaryGet(chunkData); err == nil {
|
||||
t.Fatal("Expected binaryGet to fail since header length does not match")
|
||||
}
|
||||
|
||||
// restore backup
|
||||
chunkData = make([]byte, len(backup))
|
||||
copy(chunkData, backup)
|
||||
|
||||
// mess with name length header to trigger a chunk too short error
|
||||
chunkData[20] = 255
|
||||
if err := recoveredMetadata.binaryGet(chunkData); err == nil {
|
||||
t.Fatal("Expected binaryGet to fail since name length is incorrect")
|
||||
}
|
||||
|
||||
// restore backup
|
||||
chunkData = make([]byte, len(backup))
|
||||
copy(chunkData, backup)
|
||||
|
||||
// mess with name length header to trigger an leftover bytes to read error
|
||||
chunkData[20] = 3
|
||||
if err := recoveredMetadata.binaryGet(chunkData); err == nil {
|
||||
t.Fatal("Expected binaryGet to fail since name length is too small")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataSerializerLengthCheck(t *testing.T) {
|
||||
metadata := *getTestMetadata()
|
||||
|
||||
// make a slice that is too small to contain the metadata
|
||||
serializedMetadata := make([]byte, 4)
|
||||
|
||||
if err := metadata.binaryPut(serializedMetadata); err == nil {
|
||||
t.Fatal("Expected metadata.binaryPut to fail, since target slice is too small")
|
||||
}
|
||||
|
||||
}
|
297
swarm/storage/mru/request.go
Normal file
297
swarm/storage/mru/request.go
Normal file
@ -0,0 +1,297 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mru
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
// updateRequestJSON represents a JSON-serialized UpdateRequest
|
||||
type updateRequestJSON struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Frequency uint64 `json:"frequency,omitempty"`
|
||||
StartTime uint64 `json:"startTime,omitempty"`
|
||||
Owner string `json:"ownerAddr,omitempty"`
|
||||
RootAddr string `json:"rootAddr,omitempty"`
|
||||
MetaHash string `json:"metaHash,omitempty"`
|
||||
Version uint32 `json:"version,omitempty"`
|
||||
Period uint32 `json:"period,omitempty"`
|
||||
Data string `json:"data,omitempty"`
|
||||
Multihash bool `json:"multiHash"`
|
||||
Signature string `json:"signature,omitempty"`
|
||||
}
|
||||
|
||||
// Request represents an update and/or resource create message
|
||||
type Request struct {
|
||||
SignedResourceUpdate
|
||||
metadata ResourceMetadata
|
||||
isNew bool
|
||||
}
|
||||
|
||||
var zeroAddr = common.Address{}
|
||||
|
||||
// NewCreateUpdateRequest returns a ready to sign request to create and initialize a resource with data
|
||||
func NewCreateUpdateRequest(metadata *ResourceMetadata) (*Request, error) {
|
||||
|
||||
request, err := NewCreateRequest(metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get the current time
|
||||
now := TimestampProvider.Now().Time
|
||||
|
||||
request.version = 1
|
||||
request.period, err = getNextPeriod(metadata.StartTime.Time, now, metadata.Frequency)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return request, nil
|
||||
}
|
||||
|
||||
// NewCreateRequest returns a request to create a new resource
|
||||
func NewCreateRequest(metadata *ResourceMetadata) (request *Request, err error) {
|
||||
if metadata.StartTime.Time == 0 { // get the current time
|
||||
metadata.StartTime = TimestampProvider.Now()
|
||||
}
|
||||
|
||||
if metadata.Owner == zeroAddr {
|
||||
return nil, NewError(ErrInvalidValue, "OwnerAddr is not set")
|
||||
}
|
||||
|
||||
request = &Request{
|
||||
metadata: *metadata,
|
||||
}
|
||||
request.rootAddr, request.metaHash, _, err = request.metadata.serializeAndHash()
|
||||
request.isNew = true
|
||||
return request, nil
|
||||
}
|
||||
|
||||
// Frequency returns the resource's expected update frequency
|
||||
func (r *Request) Frequency() uint64 {
|
||||
return r.metadata.Frequency
|
||||
}
|
||||
|
||||
// Name returns the resource human-readable name
|
||||
func (r *Request) Name() string {
|
||||
return r.metadata.Name
|
||||
}
|
||||
|
||||
// Multihash returns true if the resource data should be interpreted as a multihash
|
||||
func (r *Request) Multihash() bool {
|
||||
return r.multihash
|
||||
}
|
||||
|
||||
// Period returns in which period the resource will be published
|
||||
func (r *Request) Period() uint32 {
|
||||
return r.period
|
||||
}
|
||||
|
||||
// Version returns the resource version to publish
|
||||
func (r *Request) Version() uint32 {
|
||||
return r.version
|
||||
}
|
||||
|
||||
// RootAddr returns the metadata chunk address
|
||||
func (r *Request) RootAddr() storage.Address {
|
||||
return r.rootAddr
|
||||
}
|
||||
|
||||
// StartTime returns the time that the resource was/will be created at
|
||||
func (r *Request) StartTime() Timestamp {
|
||||
return r.metadata.StartTime
|
||||
}
|
||||
|
||||
// Owner returns the resource owner's address
|
||||
func (r *Request) Owner() common.Address {
|
||||
return r.metadata.Owner
|
||||
}
|
||||
|
||||
// Sign executes the signature to validate the resource and sets the owner address field
|
||||
func (r *Request) Sign(signer Signer) error {
|
||||
if r.metadata.Owner != zeroAddr && r.metadata.Owner != signer.Address() {
|
||||
return NewError(ErrInvalidSignature, "Signer does not match current owner of the resource")
|
||||
}
|
||||
|
||||
if err := r.SignedResourceUpdate.Sign(signer); err != nil {
|
||||
return err
|
||||
}
|
||||
r.metadata.Owner = signer.Address()
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetData stores the payload data the resource will be updated with
|
||||
func (r *Request) SetData(data []byte, multihash bool) {
|
||||
r.data = data
|
||||
r.multihash = multihash
|
||||
r.signature = nil
|
||||
if !r.isNew {
|
||||
r.metadata.Frequency = 0 // mark as update
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Request) IsNew() bool {
|
||||
return r.metadata.Frequency > 0 && (r.period <= 1 || r.version <= 1)
|
||||
}
|
||||
|
||||
func (r *Request) IsUpdate() bool {
|
||||
return r.signature != nil
|
||||
}
|
||||
|
||||
// fromJSON takes an update request JSON and populates an UpdateRequest
|
||||
func (r *Request) fromJSON(j *updateRequestJSON) error {
|
||||
|
||||
r.version = j.Version
|
||||
r.period = j.Period
|
||||
r.multihash = j.Multihash
|
||||
r.metadata.Name = j.Name
|
||||
r.metadata.Frequency = j.Frequency
|
||||
r.metadata.StartTime.Time = j.StartTime
|
||||
|
||||
if err := decodeHexArray(r.metadata.Owner[:], j.Owner, "ownerAddr"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
if j.Data != "" {
|
||||
r.data, err = hexutil.Decode(j.Data)
|
||||
if err != nil {
|
||||
return NewError(ErrInvalidValue, "Cannot decode data")
|
||||
}
|
||||
}
|
||||
|
||||
var declaredRootAddr storage.Address
|
||||
var declaredMetaHash []byte
|
||||
|
||||
declaredRootAddr, err = decodeHexSlice(j.RootAddr, storage.KeyLength, "rootAddr")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
declaredMetaHash, err = decodeHexSlice(j.MetaHash, 32, "metaHash")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r.IsNew() {
|
||||
// for new resource creation, rootAddr and metaHash are optional because
|
||||
// we can derive them from the content itself.
|
||||
// however, if the user sent them, we check them for consistency.
|
||||
|
||||
r.rootAddr, r.metaHash, _, err = r.metadata.serializeAndHash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if j.RootAddr != "" && !bytes.Equal(declaredRootAddr, r.rootAddr) {
|
||||
return NewError(ErrInvalidValue, "rootAddr does not match resource metadata")
|
||||
}
|
||||
if j.MetaHash != "" && !bytes.Equal(declaredMetaHash, r.metaHash) {
|
||||
return NewError(ErrInvalidValue, "metaHash does not match resource metadata")
|
||||
}
|
||||
|
||||
} else {
|
||||
//Update message
|
||||
r.rootAddr = declaredRootAddr
|
||||
r.metaHash = declaredMetaHash
|
||||
}
|
||||
|
||||
if j.Signature != "" {
|
||||
sigBytes, err := hexutil.Decode(j.Signature)
|
||||
if err != nil || len(sigBytes) != signatureLength {
|
||||
return NewError(ErrInvalidSignature, "Cannot decode signature")
|
||||
}
|
||||
r.signature = new(Signature)
|
||||
r.updateAddr = r.UpdateAddr()
|
||||
copy(r.signature[:], sigBytes)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeHexArray(dst []byte, src, name string) error {
|
||||
bytes, err := decodeHexSlice(src, len(dst), name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bytes != nil {
|
||||
copy(dst, bytes)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeHexSlice(src string, expectedLength int, name string) (bytes []byte, err error) {
|
||||
if src != "" {
|
||||
bytes, err = hexutil.Decode(src)
|
||||
if err != nil || len(bytes) != expectedLength {
|
||||
return nil, NewErrorf(ErrInvalidValue, "Cannot decode %s", name)
|
||||
}
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object
|
||||
// Implements json.Unmarshaler interface
|
||||
func (r *Request) UnmarshalJSON(rawData []byte) error {
|
||||
var requestJSON updateRequestJSON
|
||||
if err := json.Unmarshal(rawData, &requestJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
return r.fromJSON(&requestJSON)
|
||||
}
|
||||
|
||||
// MarshalJSON takes an update request and encodes it as a JSON structure into a byte array
|
||||
// Implements json.Marshaler interface
|
||||
func (r *Request) MarshalJSON() (rawData []byte, err error) {
|
||||
var signatureString, dataHashString, rootAddrString, metaHashString string
|
||||
if r.signature != nil {
|
||||
signatureString = hexutil.Encode(r.signature[:])
|
||||
}
|
||||
if r.data != nil {
|
||||
dataHashString = hexutil.Encode(r.data)
|
||||
}
|
||||
if r.rootAddr != nil {
|
||||
rootAddrString = hexutil.Encode(r.rootAddr)
|
||||
}
|
||||
if r.metaHash != nil {
|
||||
metaHashString = hexutil.Encode(r.metaHash)
|
||||
}
|
||||
var ownerAddrString string
|
||||
if r.metadata.Frequency == 0 {
|
||||
ownerAddrString = ""
|
||||
} else {
|
||||
ownerAddrString = hexutil.Encode(r.metadata.Owner[:])
|
||||
}
|
||||
|
||||
requestJSON := &updateRequestJSON{
|
||||
Name: r.metadata.Name,
|
||||
Frequency: r.metadata.Frequency,
|
||||
StartTime: r.metadata.StartTime.Time,
|
||||
Version: r.version,
|
||||
Period: r.period,
|
||||
Owner: ownerAddrString,
|
||||
Data: dataHashString,
|
||||
Multihash: r.multihash,
|
||||
Signature: signatureString,
|
||||
RootAddr: rootAddrString,
|
||||
MetaHash: metaHashString,
|
||||
}
|
||||
|
||||
return json.Marshal(requestJSON)
|
||||
}
|
175
swarm/storage/mru/request_test.go
Normal file
175
swarm/storage/mru/request_test.go
Normal file
@ -0,0 +1,175 @@
|
||||
package mru
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func areEqualJSON(s1, s2 string) (bool, error) {
|
||||
//credit for the trick: turtlemonvh https://gist.github.com/turtlemonvh/e4f7404e28387fadb8ad275a99596f67
|
||||
var o1 interface{}
|
||||
var o2 interface{}
|
||||
|
||||
err := json.Unmarshal([]byte(s1), &o1)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Error mashalling string 1 :: %s", err.Error())
|
||||
}
|
||||
err = json.Unmarshal([]byte(s2), &o2)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Error mashalling string 2 :: %s", err.Error())
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(o1, o2), nil
|
||||
}
|
||||
|
||||
// TestEncodingDecodingUpdateRequests ensures that requests are serialized properly
|
||||
// while also checking cryptographically that only the owner of a resource can update it.
|
||||
func TestEncodingDecodingUpdateRequests(t *testing.T) {
|
||||
|
||||
signer := newCharlieSigner() //Charlie, our good guy
|
||||
falseSigner := newBobSigner() //Bob will play the bad guy again
|
||||
|
||||
// Create a resource to our good guy Charlie's name
|
||||
createRequest, err := NewCreateRequest(&ResourceMetadata{
|
||||
Name: "a good resource name",
|
||||
Frequency: 300,
|
||||
StartTime: Timestamp{Time: 1528900000},
|
||||
Owner: signer.Address()})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating resource name: %s", err)
|
||||
}
|
||||
|
||||
// We now encode the create message to simulate we send it over the wire
|
||||
messageRawData, err := createRequest.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Fatalf("Error encoding create resource request: %s", err)
|
||||
}
|
||||
|
||||
// ... the message arrives and is decoded...
|
||||
var recoveredCreateRequest Request
|
||||
if err := recoveredCreateRequest.UnmarshalJSON(messageRawData); err != nil {
|
||||
t.Fatalf("Error decoding create resource request: %s", err)
|
||||
}
|
||||
|
||||
// ... but verification should fail because it is not signed!
|
||||
if err := recoveredCreateRequest.Verify(); err == nil {
|
||||
t.Fatal("Expected Verify to fail since the message is not signed")
|
||||
}
|
||||
|
||||
// We now assume that the resource was created and propagated. With rootAddr we can retrieve the resource metadata
|
||||
// and recover the information above. To sign an update, we need the rootAddr and the metaHash to construct
|
||||
// proof of ownership
|
||||
|
||||
metaHash := createRequest.metaHash
|
||||
rootAddr := createRequest.rootAddr
|
||||
const expectedSignature = "0x1c2bab66dc4ed63783d62934e3a628e517888d6949aef0349f3bd677121db9aa09bbfb865904e6c50360e209e0fe6fe757f8a2474cf1b34169c99b95e3fd5a5101"
|
||||
const expectedJSON = `{"rootAddr":"0x6e744a730f7ea0881528576f0354b6268b98e35a6981ef703153ff1b8d32bbef","metaHash":"0x0c0d5c18b89da503af92302a1a64fab6acb60f78e288eb9c3d541655cd359b60","version":1,"period":7,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421","multiHash":false}`
|
||||
|
||||
//Put together an unsigned update request that we will serialize to send it to the signer.
|
||||
data := []byte("This hour's update: Swarm 99.0 has been released!")
|
||||
request := &Request{
|
||||
SignedResourceUpdate: SignedResourceUpdate{
|
||||
resourceUpdate: resourceUpdate{
|
||||
updateHeader: updateHeader{
|
||||
UpdateLookup: UpdateLookup{
|
||||
period: 7,
|
||||
version: 1,
|
||||
rootAddr: rootAddr,
|
||||
},
|
||||
multihash: false,
|
||||
metaHash: metaHash,
|
||||
},
|
||||
data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
messageRawData, err = request.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Fatalf("Error encoding update request: %s", err)
|
||||
}
|
||||
|
||||
equalJSON, err := areEqualJSON(string(messageRawData), expectedJSON)
|
||||
if err != nil {
|
||||
t.Fatalf("Error decoding update request JSON: %s", err)
|
||||
}
|
||||
if !equalJSON {
|
||||
t.Fatalf("Received a different JSON message. Expected %s, got %s", expectedJSON, string(messageRawData))
|
||||
}
|
||||
|
||||
// now the encoded message messageRawData is sent over the wire and arrives to the signer
|
||||
|
||||
//Attempt to extract an UpdateRequest out of the encoded message
|
||||
var recoveredRequest Request
|
||||
if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil {
|
||||
t.Fatalf("Error decoding update request: %s", err)
|
||||
}
|
||||
|
||||
//sign the request and see if it matches our predefined signature above.
|
||||
if err := recoveredRequest.Sign(signer); err != nil {
|
||||
t.Fatalf("Error signing request: %s", err)
|
||||
}
|
||||
|
||||
compareByteSliceToExpectedHex(t, "signature", recoveredRequest.signature[:], expectedSignature)
|
||||
|
||||
// mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON
|
||||
// to alter the signature field.
|
||||
var j updateRequestJSON
|
||||
if err := json.Unmarshal([]byte(expectedJSON), &j); err != nil {
|
||||
t.Fatal("Error unmarshalling test json, check expectedJSON constant")
|
||||
}
|
||||
j.Signature = "Certainly not a signature"
|
||||
corruptMessage, _ := json.Marshal(j) // encode the message with the bad signature
|
||||
var corruptRequest Request
|
||||
if err = corruptRequest.UnmarshalJSON(corruptMessage); err == nil {
|
||||
t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature")
|
||||
}
|
||||
|
||||
// Now imagine Evil Bob (why always Bob, poor Bob) attempts to update Charlie's resource,
|
||||
// signing a message with his private key
|
||||
if err := request.Sign(falseSigner); err != nil {
|
||||
t.Fatalf("Error signing: %s", err)
|
||||
}
|
||||
|
||||
// Now Bob encodes the message to send it over the wire...
|
||||
messageRawData, err = request.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Fatalf("Error encoding message:%s", err)
|
||||
}
|
||||
|
||||
// ... the message arrives to our Swarm node and it is decoded.
|
||||
recoveredRequest = Request{}
|
||||
if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil {
|
||||
t.Fatalf("Error decoding message:%s", err)
|
||||
}
|
||||
|
||||
// Before discovering Bob's misdemeanor, let's see what would happen if we mess
|
||||
// with the signature big time to see if Verify catches it
|
||||
savedSignature := *recoveredRequest.signature // save the signature for later
|
||||
binary.LittleEndian.PutUint64(recoveredRequest.signature[5:], 556845463424) // write some random data to break the signature
|
||||
if err = recoveredRequest.Verify(); err == nil {
|
||||
t.Fatal("Expected Verify to fail on corrupt signature")
|
||||
}
|
||||
|
||||
// restore the Evil Bob's signature from corruption
|
||||
*recoveredRequest.signature = savedSignature
|
||||
|
||||
// Now the signature is not corrupt, however Verify should now fail because Bob doesn't own the resource
|
||||
if err = recoveredRequest.Verify(); err == nil {
|
||||
t.Fatalf("Expected Verify to fail because this resource belongs to Charlie, not Bob the attacker:%s", err)
|
||||
}
|
||||
|
||||
// Sign with our friend Charlie's private key
|
||||
if err := recoveredRequest.Sign(signer); err != nil {
|
||||
t.Fatalf("Error signing with the correct private key: %s", err)
|
||||
}
|
||||
|
||||
// And now, Verify should work since this resource belongs to Charlie
|
||||
if err = recoveredRequest.Verify(); err != nil {
|
||||
t.Fatalf("Error verifying that Charlie, the good guy, can sign his resource:%s", err)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -23,20 +23,44 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// Signs resource updates
|
||||
const signatureLength = 65
|
||||
|
||||
// Signature is an alias for a static byte array with the size of a signature
|
||||
type Signature [signatureLength]byte
|
||||
|
||||
// Signer signs Mutable Resource update payloads
|
||||
type Signer interface {
|
||||
Sign(common.Hash) (Signature, error)
|
||||
Address() common.Address
|
||||
}
|
||||
|
||||
// GenericSigner implements the Signer interface
|
||||
// It is the vanilla signer that probably should be used in most cases
|
||||
type GenericSigner struct {
|
||||
PrivKey *ecdsa.PrivateKey
|
||||
address common.Address
|
||||
}
|
||||
|
||||
func (self *GenericSigner) Sign(data common.Hash) (signature Signature, err error) {
|
||||
signaturebytes, err := crypto.Sign(data.Bytes(), self.PrivKey)
|
||||
// NewGenericSigner builds a signer that will sign everything with the provided private key
|
||||
func NewGenericSigner(privKey *ecdsa.PrivateKey) *GenericSigner {
|
||||
return &GenericSigner{
|
||||
PrivKey: privKey,
|
||||
address: crypto.PubkeyToAddress(privKey.PublicKey),
|
||||
}
|
||||
}
|
||||
|
||||
// Sign signs the supplied data
|
||||
// It wraps the ethereum crypto.Sign() method
|
||||
func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) {
|
||||
signaturebytes, err := crypto.Sign(data.Bytes(), s.PrivKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
copy(signature[:], signaturebytes)
|
||||
return
|
||||
}
|
||||
|
||||
// PublicKey returns the public key of the signer's private key
|
||||
func (s *GenericSigner) Address() common.Address {
|
||||
return s.address
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
184
swarm/storage/mru/signedupdate.go
Normal file
184
swarm/storage/mru/signedupdate.go
Normal file
@ -0,0 +1,184 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mru
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"hash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
// SignedResourceUpdate represents a resource update with all the necessary information to prove ownership of the resource
|
||||
type SignedResourceUpdate struct {
|
||||
resourceUpdate // actual content that will be put on the chunk, less signature
|
||||
signature *Signature
|
||||
updateAddr storage.Address // resulting chunk address for the update (not serialized, for internal use)
|
||||
binaryData []byte // resulting serialized data (not serialized, for efficiency/internal use)
|
||||
}
|
||||
|
||||
// Verify checks that signatures are valid and that the signer owns the resource to be updated
|
||||
func (r *SignedResourceUpdate) Verify() (err error) {
|
||||
if len(r.data) == 0 {
|
||||
return NewError(ErrInvalidValue, "Update does not contain data")
|
||||
}
|
||||
if r.signature == nil {
|
||||
return NewError(ErrInvalidSignature, "Missing signature field")
|
||||
}
|
||||
|
||||
digest, err := r.GetDigest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get the address of the signer (which also checks that it's a valid signature)
|
||||
ownerAddr, err := getOwner(digest, *r.signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(r.updateAddr, r.UpdateAddr()) {
|
||||
return NewError(ErrInvalidSignature, "Signature address does not match with ownerAddr")
|
||||
}
|
||||
|
||||
// Check if who signed the resource update really owns the resource
|
||||
if !verifyOwner(ownerAddr, r.metaHash, r.rootAddr) {
|
||||
return NewErrorf(ErrUnauthorized, "signature is valid but signer does not own the resource: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign executes the signature to validate the resource
|
||||
func (r *SignedResourceUpdate) Sign(signer Signer) error {
|
||||
|
||||
r.binaryData = nil //invalidate serialized data
|
||||
digest, err := r.GetDigest() // computes digest and serializes into .binaryData
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signature, err := signer.Sign(digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Although the Signer interface returns the public address of the signer,
|
||||
// recover it from the signature to see if they match
|
||||
ownerAddress, err := getOwner(digest, signature)
|
||||
if err != nil {
|
||||
return NewError(ErrInvalidSignature, "Error verifying signature")
|
||||
}
|
||||
|
||||
if ownerAddress != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign!
|
||||
return NewError(ErrInvalidSignature, "Signer address does not match ownerAddr")
|
||||
}
|
||||
|
||||
r.signature = &signature
|
||||
r.updateAddr = r.UpdateAddr()
|
||||
return nil
|
||||
}
|
||||
|
||||
// create an update chunk.
|
||||
func (r *SignedResourceUpdate) toChunk() (*storage.Chunk, error) {
|
||||
|
||||
// Check that the update is signed and serialized
|
||||
// For efficiency, data is serialized during signature and cached in
|
||||
// the binaryData field when computing the signature digest in .getDigest()
|
||||
if r.signature == nil || r.binaryData == nil {
|
||||
return nil, NewError(ErrInvalidSignature, "newUpdateChunk called without a valid signature or payload data. Call .Sign() first.")
|
||||
}
|
||||
|
||||
chunk := storage.NewChunk(r.updateAddr, nil)
|
||||
resourceUpdateLength := r.resourceUpdate.binaryLength()
|
||||
chunk.SData = r.binaryData
|
||||
|
||||
// signature is the last item in the chunk data
|
||||
copy(chunk.SData[resourceUpdateLength:], r.signature[:])
|
||||
|
||||
chunk.Size = int64(len(chunk.SData))
|
||||
return chunk, nil
|
||||
}
|
||||
|
||||
// fromChunk populates this structure from chunk data. It does not verify the signature is valid.
|
||||
func (r *SignedResourceUpdate) fromChunk(updateAddr storage.Address, chunkdata []byte) error {
|
||||
// for update chunk layout see SignedResourceUpdate definition
|
||||
|
||||
//deserialize the resource update portion
|
||||
if err := r.resourceUpdate.binaryGet(chunkdata); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Extract the signature
|
||||
var signature *Signature
|
||||
cursor := r.resourceUpdate.binaryLength()
|
||||
sigdata := chunkdata[cursor : cursor+signatureLength]
|
||||
if len(sigdata) > 0 {
|
||||
signature = &Signature{}
|
||||
copy(signature[:], sigdata)
|
||||
}
|
||||
|
||||
r.signature = signature
|
||||
r.updateAddr = updateAddr
|
||||
r.binaryData = chunkdata
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// GetDigest creates the resource update digest used in signatures (formerly known as keyDataHash)
|
||||
// the serialized payload is cached in .binaryData
|
||||
func (r *SignedResourceUpdate) GetDigest() (result common.Hash, err error) {
|
||||
hasher := hashPool.Get().(hash.Hash)
|
||||
defer hashPool.Put(hasher)
|
||||
hasher.Reset()
|
||||
dataLength := r.resourceUpdate.binaryLength()
|
||||
if r.binaryData == nil {
|
||||
r.binaryData = make([]byte, dataLength+signatureLength)
|
||||
if err := r.resourceUpdate.binaryPut(r.binaryData[:dataLength]); err != nil {
|
||||
return result, err
|
||||
}
|
||||
}
|
||||
hasher.Write(r.binaryData[:dataLength]) //everything except the signature.
|
||||
|
||||
return common.BytesToHash(hasher.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// getOwner extracts the address of the resource update signer
|
||||
func getOwner(digest common.Hash, signature Signature) (common.Address, error) {
|
||||
pub, err := crypto.SigToPub(digest.Bytes(), signature[:])
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
return crypto.PubkeyToAddress(*pub), nil
|
||||
}
|
||||
|
||||
// verifyResourceOwnerhsip checks that the signer of the update actually owns the resource
|
||||
// H(ownerAddr, metaHash) is computed. If it matches the rootAddr the update chunk is claiming
|
||||
// to update, it is proven that signer of the resource update owns the resource.
|
||||
// See metadataHash in metadata.go for a more detailed explanation
|
||||
func verifyOwner(ownerAddr common.Address, metaHash []byte, rootAddr storage.Address) bool {
|
||||
hasher := hashPool.Get().(hash.Hash)
|
||||
defer hashPool.Put(hasher)
|
||||
hasher.Reset()
|
||||
hasher.Write(metaHash)
|
||||
hasher.Write(ownerAddr.Bytes())
|
||||
rootAddr2 := hasher.Sum(nil)
|
||||
return bytes.Equal(rootAddr2, rootAddr)
|
||||
}
|
56
swarm/storage/mru/testutil.go
Normal file
56
swarm/storage/mru/testutil.go
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mru
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
testDbDirName = "mru"
|
||||
)
|
||||
|
||||
type TestHandler struct {
|
||||
*Handler
|
||||
}
|
||||
|
||||
func (t *TestHandler) Close() {
|
||||
t.chunkStore.Close()
|
||||
}
|
||||
|
||||
// NewTestHandler creates Handler object to be used for testing purposes.
|
||||
func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) {
|
||||
path := filepath.Join(datadir, testDbDirName)
|
||||
rh, err := NewHandler(params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resource handler create fail: %v", err)
|
||||
}
|
||||
localstoreparams := storage.NewDefaultLocalStoreParams()
|
||||
localstoreparams.Init(path)
|
||||
localStore, err := storage.NewLocalStore(localstoreparams, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err)
|
||||
}
|
||||
localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHashAlgorithm)))
|
||||
localStore.Validators = append(localStore.Validators, rh)
|
||||
netStore := storage.NewNetStore(localStore, nil)
|
||||
rh.SetStore(netStore)
|
||||
return &TestHandler{rh}, nil
|
||||
}
|
71
swarm/storage/mru/timestampprovider.go
Normal file
71
swarm/storage/mru/timestampprovider.go
Normal file
@ -0,0 +1,71 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mru
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TimestampProvider sets the time source of the mru package
|
||||
var TimestampProvider timestampProvider = NewDefaultTimestampProvider()
|
||||
|
||||
// Encodes a point in time as a Unix epoch
|
||||
type Timestamp struct {
|
||||
Time uint64 // Unix epoch timestamp, in seconds
|
||||
}
|
||||
|
||||
// 8 bytes uint64 Time
|
||||
const timestampLength = 8
|
||||
|
||||
// timestampProvider interface describes a source of timestamp information
|
||||
type timestampProvider interface {
|
||||
Now() Timestamp // returns the current timestamp information
|
||||
}
|
||||
|
||||
// binaryGet populates the timestamp structure from the given byte slice
|
||||
func (t *Timestamp) binaryGet(data []byte) error {
|
||||
if len(data) != timestampLength {
|
||||
return NewError(ErrCorruptData, "timestamp data has the wrong size")
|
||||
}
|
||||
t.Time = binary.LittleEndian.Uint64(data[:8])
|
||||
return nil
|
||||
}
|
||||
|
||||
// binaryPut Serializes a Timestamp to a byte slice
|
||||
func (t *Timestamp) binaryPut(data []byte) error {
|
||||
if len(data) != timestampLength {
|
||||
return NewError(ErrCorruptData, "timestamp data has the wrong size")
|
||||
}
|
||||
binary.LittleEndian.PutUint64(data, t.Time)
|
||||
return nil
|
||||
}
|
||||
|
||||
type DefaultTimestampProvider struct {
|
||||
}
|
||||
|
||||
// NewDefaultTimestampProvider creates a system clock based timestamp provider
|
||||
func NewDefaultTimestampProvider() *DefaultTimestampProvider {
|
||||
return &DefaultTimestampProvider{}
|
||||
}
|
||||
|
||||
// Now returns the current time according to this provider
|
||||
func (dtp *DefaultTimestampProvider) Now() Timestamp {
|
||||
return Timestamp{
|
||||
Time: uint64(time.Now().Unix()),
|
||||
}
|
||||
}
|
147
swarm/storage/mru/update.go
Normal file
147
swarm/storage/mru/update.go
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mru
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/multihash"
|
||||
)
|
||||
|
||||
// resourceUpdate encapsulates the information sent as part of a resource update
|
||||
type resourceUpdate struct {
|
||||
updateHeader // metainformationa about this resource update
|
||||
data []byte // actual data payload
|
||||
}
|
||||
|
||||
// Update chunk layout
|
||||
// Prefix:
|
||||
// 2 bytes updateHeaderLength
|
||||
// 2 bytes data length
|
||||
const chunkPrefixLength = 2 + 2
|
||||
|
||||
// Header: (see updateHeader)
|
||||
// Data:
|
||||
// data (datalength bytes)
|
||||
//
|
||||
// Minimum size is Header + 1 (minimum data length, enforced)
|
||||
const minimumUpdateDataLength = updateHeaderLength + 1
|
||||
const maxUpdateDataLength = chunkSize - signatureLength - updateHeaderLength - chunkPrefixLength
|
||||
|
||||
// binaryPut serializes the resource update information into the given slice
|
||||
func (r *resourceUpdate) binaryPut(serializedData []byte) error {
|
||||
datalength := len(r.data)
|
||||
if datalength == 0 {
|
||||
return NewError(ErrInvalidValue, "cannot update a resource with no data")
|
||||
}
|
||||
|
||||
if datalength > maxUpdateDataLength {
|
||||
return NewErrorf(ErrInvalidValue, "data is too big (length=%d). Max length=%d", datalength, maxUpdateDataLength)
|
||||
}
|
||||
|
||||
if len(serializedData) != r.binaryLength() {
|
||||
return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength())
|
||||
}
|
||||
|
||||
if r.multihash {
|
||||
if _, _, err := multihash.GetMultihashLength(r.data); err != nil {
|
||||
return NewError(ErrInvalidValue, "Invalid multihash")
|
||||
}
|
||||
}
|
||||
|
||||
// Add prefix: updateHeaderLength and actual data length
|
||||
cursor := 0
|
||||
binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(updateHeaderLength))
|
||||
cursor += 2
|
||||
|
||||
// data length
|
||||
binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(datalength))
|
||||
cursor += 2
|
||||
|
||||
// serialize header (see updateHeader)
|
||||
if err := r.updateHeader.binaryPut(serializedData[cursor : cursor+updateHeaderLength]); err != nil {
|
||||
return err
|
||||
}
|
||||
cursor += updateHeaderLength
|
||||
|
||||
// add the data
|
||||
copy(serializedData[cursor:], r.data)
|
||||
cursor += datalength
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// binaryLength returns the expected number of bytes this structure will take to encode
|
||||
func (r *resourceUpdate) binaryLength() int {
|
||||
return chunkPrefixLength + updateHeaderLength + len(r.data)
|
||||
}
|
||||
|
||||
// binaryGet populates this instance from the information contained in the passed byte slice
|
||||
func (r *resourceUpdate) binaryGet(serializedData []byte) error {
|
||||
if len(serializedData) < minimumUpdateDataLength {
|
||||
return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength)
|
||||
}
|
||||
cursor := 0
|
||||
declaredHeaderlength := binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])
|
||||
if declaredHeaderlength != updateHeaderLength {
|
||||
return NewErrorf(ErrCorruptData, "Invalid header length. Expected %d, got %d", updateHeaderLength, declaredHeaderlength)
|
||||
}
|
||||
|
||||
cursor += 2
|
||||
datalength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2]))
|
||||
cursor += 2
|
||||
|
||||
if chunkPrefixLength+updateHeaderLength+datalength+signatureLength != len(serializedData) {
|
||||
return NewError(ErrNothingToReturn, "length specified in header is different than actual chunk size")
|
||||
}
|
||||
|
||||
// at this point we can be satisfied that we have the correct data length to read
|
||||
if err := r.updateHeader.binaryGet(serializedData[cursor : cursor+updateHeaderLength]); err != nil {
|
||||
return err
|
||||
}
|
||||
cursor += updateHeaderLength
|
||||
|
||||
data := serializedData[cursor : cursor+datalength]
|
||||
cursor += datalength
|
||||
|
||||
// if multihash content is indicated we check the validity of the multihash
|
||||
if r.updateHeader.multihash {
|
||||
mhLength, mhHeaderLength, err := multihash.GetMultihashLength(data)
|
||||
if err != nil {
|
||||
log.Error("multihash parse error", "err", err)
|
||||
return err
|
||||
}
|
||||
if datalength != mhLength+mhHeaderLength {
|
||||
log.Debug("multihash error", "datalength", datalength, "mhLength", mhLength, "mhHeaderLength", mhHeaderLength)
|
||||
return errors.New("Corrupt multihash data")
|
||||
}
|
||||
}
|
||||
|
||||
// now that all checks have passed, copy data into structure
|
||||
r.data = make([]byte, datalength)
|
||||
copy(r.data, data)
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Multihash specifies whether the resource data should be interpreted as multihash
|
||||
func (r *resourceUpdate) Multihash() bool {
|
||||
return r.multihash
|
||||
}
|
72
swarm/storage/mru/update_test.go
Normal file
72
swarm/storage/mru/update_test.go
Normal file
@ -0,0 +1,72 @@
|
||||
package mru
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const serializedUpdateHex = "0x490034004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf000456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f"
|
||||
const serializedUpdateMultihashHex = "0x490022004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0011b200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1c1e1f20"
|
||||
|
||||
func getTestResourceUpdate() *resourceUpdate {
|
||||
return &resourceUpdate{
|
||||
updateHeader: *getTestUpdateHeader(false),
|
||||
data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"),
|
||||
}
|
||||
}
|
||||
|
||||
func getTestResourceUpdateMultihash() *resourceUpdate {
|
||||
return &resourceUpdate{
|
||||
updateHeader: *getTestUpdateHeader(true),
|
||||
data: []byte{0x1b, 0x20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 28, 30, 31, 32},
|
||||
}
|
||||
}
|
||||
|
||||
func compareResourceUpdate(a, b *resourceUpdate) bool {
|
||||
return compareUpdateHeader(&a.updateHeader, &b.updateHeader) &&
|
||||
bytes.Equal(a.data, b.data)
|
||||
}
|
||||
|
||||
func TestResourceUpdateSerializer(t *testing.T) {
|
||||
var serializedUpdateLength = len(serializedUpdateHex)/2 - 1 // hack to calculate the byte length out of the hex representation
|
||||
update := getTestResourceUpdate()
|
||||
serializedUpdate := make([]byte, serializedUpdateLength)
|
||||
if err := update.binaryPut(serializedUpdate); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateHex)
|
||||
|
||||
// Test fail if update does not contain data
|
||||
update.data = nil
|
||||
if err := update.binaryPut(serializedUpdate); err == nil {
|
||||
t.Fatal("Expected resourceUpdate.binaryPut to fail since update does not contain data")
|
||||
}
|
||||
|
||||
// Test fail if update is too big
|
||||
update.data = make([]byte, 10000)
|
||||
if err := update.binaryPut(serializedUpdate); err == nil {
|
||||
t.Fatal("Expected resourceUpdate.binaryPut to fail since update is too big")
|
||||
}
|
||||
|
||||
// Test fail if passed slice is not of the exact size required for this update
|
||||
update.data = make([]byte, 1)
|
||||
if err := update.binaryPut(serializedUpdate); err == nil {
|
||||
t.Fatal("Expected resourceUpdate.binaryPut to fail since passed slice is not of the appropriate size")
|
||||
}
|
||||
|
||||
// Test serializing a multihash update
|
||||
var serializedUpdateMultihashLength = len(serializedUpdateMultihashHex)/2 - 1 // hack to calculate the byte length out of the hex representation
|
||||
update = getTestResourceUpdateMultihash()
|
||||
serializedUpdate = make([]byte, serializedUpdateMultihashLength)
|
||||
if err := update.binaryPut(serializedUpdate); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateMultihashHex)
|
||||
|
||||
// mess with the multihash to test it fails with a wrong multihash error
|
||||
update.data[1] = 79
|
||||
if err := update.binaryPut(serializedUpdate); err == nil {
|
||||
t.Fatal("Expected resourceUpdate.binaryPut to fail since data contains an invalid multihash")
|
||||
}
|
||||
|
||||
}
|
88
swarm/storage/mru/updateheader.go
Normal file
88
swarm/storage/mru/updateheader.go
Normal file
@ -0,0 +1,88 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mru
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
// updateHeader models the non-payload components of a Resource Update
|
||||
type updateHeader struct {
|
||||
UpdateLookup // UpdateLookup contains the information required to locate this resource (components of the search key used to find it)
|
||||
multihash bool // Whether the data in this Resource Update should be interpreted as multihash
|
||||
metaHash []byte // SHA3 hash of the metadata chunk (less ownerAddr). Used to prove ownerhsip of the resource.
|
||||
}
|
||||
|
||||
const metaHashLength = storage.KeyLength
|
||||
|
||||
// updateLookupLength bytes
|
||||
// 1 byte flags (multihash bool for now)
|
||||
// 32 bytes metaHash
|
||||
const updateHeaderLength = updateLookupLength + 1 + metaHashLength
|
||||
|
||||
// binaryPut serializes the resource header information into the given slice
|
||||
func (h *updateHeader) binaryPut(serializedData []byte) error {
|
||||
if len(serializedData) != updateHeaderLength {
|
||||
return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData))
|
||||
}
|
||||
if len(h.metaHash) != metaHashLength {
|
||||
return NewError(ErrInvalidValue, "updateHeader.binaryPut called without metaHash set")
|
||||
}
|
||||
if err := h.UpdateLookup.binaryPut(serializedData[:updateLookupLength]); err != nil {
|
||||
return err
|
||||
}
|
||||
cursor := updateLookupLength
|
||||
copy(serializedData[cursor:], h.metaHash[:metaHashLength])
|
||||
cursor += metaHashLength
|
||||
|
||||
var flags byte
|
||||
if h.multihash {
|
||||
flags |= 0x01
|
||||
}
|
||||
|
||||
serializedData[cursor] = flags
|
||||
cursor++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// binaryLength returns the expected size of this structure when serialized
|
||||
func (h *updateHeader) binaryLength() int {
|
||||
return updateHeaderLength
|
||||
}
|
||||
|
||||
// binaryGet restores the current updateHeader instance from the information contained in the passed slice
|
||||
func (h *updateHeader) binaryGet(serializedData []byte) error {
|
||||
if len(serializedData) != updateHeaderLength {
|
||||
return NewErrorf(ErrInvalidValue, "Incorrect slice size to read updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData))
|
||||
}
|
||||
|
||||
if err := h.UpdateLookup.binaryGet(serializedData[:updateLookupLength]); err != nil {
|
||||
return err
|
||||
}
|
||||
cursor := updateLookupLength
|
||||
h.metaHash = make([]byte, metaHashLength)
|
||||
copy(h.metaHash[:storage.KeyLength], serializedData[cursor:cursor+storage.KeyLength])
|
||||
cursor += metaHashLength
|
||||
|
||||
flags := serializedData[cursor]
|
||||
cursor++
|
||||
|
||||
h.multihash = flags&0x01 != 0
|
||||
|
||||
return nil
|
||||
}
|
64
swarm/storage/mru/updateheader_test.go
Normal file
64
swarm/storage/mru/updateheader_test.go
Normal file
@ -0,0 +1,64 @@
|
||||
package mru
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
const serializedUpdateHeaderMultihashHex = "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf001"
|
||||
|
||||
func getTestUpdateHeader(multihash bool) (header *updateHeader) {
|
||||
_, metaHash, _, _ := getTestMetadata().serializeAndHash()
|
||||
return &updateHeader{
|
||||
UpdateLookup: *getTestUpdateLookup(),
|
||||
multihash: multihash,
|
||||
metaHash: metaHash,
|
||||
}
|
||||
}
|
||||
|
||||
func compareUpdateHeader(a, b *updateHeader) bool {
|
||||
return compareUpdateLookup(&a.UpdateLookup, &b.UpdateLookup) &&
|
||||
a.multihash == b.multihash &&
|
||||
bytes.Equal(a.metaHash, b.metaHash)
|
||||
}
|
||||
|
||||
func TestUpdateHeaderSerializer(t *testing.T) {
|
||||
header := getTestUpdateHeader(true)
|
||||
serializedHeader := make([]byte, updateHeaderLength)
|
||||
if err := header.binaryPut(serializedHeader); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
compareByteSliceToExpectedHex(t, "serializedHeader", serializedHeader, serializedUpdateHeaderMultihashHex)
|
||||
|
||||
// trigger incorrect slice length error passing a slice that is 1 byte too big
|
||||
if err := header.binaryPut(make([]byte, updateHeaderLength+1)); err == nil {
|
||||
t.Fatal("Expected updateHeader.binaryPut to fail since supplied slice is of incorrect length")
|
||||
}
|
||||
|
||||
// trigger invalid metaHash error
|
||||
header.metaHash = nil
|
||||
if err := header.binaryPut(serializedHeader); err == nil {
|
||||
t.Fatal("Expected updateHeader.binaryPut to fail metaHash is of incorrect length")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateHeaderDeserializer(t *testing.T) {
|
||||
originalUpdate := getTestUpdateHeader(true)
|
||||
serializedData, _ := hexutil.Decode(serializedUpdateHeaderMultihashHex)
|
||||
var retrievedUpdate updateHeader
|
||||
if err := retrievedUpdate.binaryGet(serializedData); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !compareUpdateHeader(originalUpdate, &retrievedUpdate) {
|
||||
t.Fatalf("Expected deserialized structure to equal the original")
|
||||
}
|
||||
|
||||
// mess with source slice to test length checks
|
||||
serializedData = []byte{1, 2, 3}
|
||||
if err := retrievedUpdate.binaryGet(serializedData); err == nil {
|
||||
t.Fatal("Expected retrievedUpdate.binaryGet, since passed slice is too small")
|
||||
}
|
||||
|
||||
}
|
@ -192,25 +192,8 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
|
||||
self.fileStore = storage.NewFileStore(netStore, self.config.FileStoreParams)
|
||||
|
||||
var resourceHandler *mru.Handler
|
||||
rhparams := &mru.HandlerParams{
|
||||
// TODO: config parameter to set limits
|
||||
QueryMaxPeriods: &mru.LookupParams{
|
||||
Limit: false,
|
||||
},
|
||||
Signer: &mru.GenericSigner{
|
||||
PrivKey: self.privateKey,
|
||||
},
|
||||
}
|
||||
if resolver != nil {
|
||||
resolver.SetNameHash(ens.EnsNode)
|
||||
// Set HeaderGetter and OwnerValidator interfaces to resolver only if it is not nil.
|
||||
rhparams.HeaderGetter = resolver
|
||||
rhparams.OwnerValidator = resolver
|
||||
} else {
|
||||
log.Warn("No ETH API specified, resource updates will use block height approximation")
|
||||
// TODO: blockestimator should use saved values derived from last time ethclient was connected
|
||||
rhparams.HeaderGetter = mru.NewBlockEstimator()
|
||||
}
|
||||
rhparams := &mru.HandlerParams{}
|
||||
|
||||
resourceHandler, err = mru.NewHandler(rhparams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -17,15 +17,12 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mru"
|
||||
@ -35,16 +32,17 @@ type TestServer interface {
|
||||
ServeHTTP(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
type fakeBackend struct {
|
||||
blocknumber int64
|
||||
// simulated timeProvider
|
||||
type fakeTimeProvider struct {
|
||||
currentTime uint64
|
||||
}
|
||||
|
||||
func (f *fakeBackend) HeaderByNumber(context context.Context, _ string, bigblock *big.Int) (*types.Header, error) {
|
||||
f.blocknumber++
|
||||
biggie := big.NewInt(f.blocknumber)
|
||||
return &types.Header{
|
||||
Number: biggie,
|
||||
}, nil
|
||||
func (f *fakeTimeProvider) Tick() {
|
||||
f.currentTime++
|
||||
}
|
||||
|
||||
func (f *fakeTimeProvider) Now() mru.Timestamp {
|
||||
return mru.Timestamp{Time: f.currentTime}
|
||||
}
|
||||
|
||||
func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *TestSwarmServer {
|
||||
@ -68,24 +66,25 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *Tes
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rhparams := &mru.HandlerParams{
|
||||
QueryMaxPeriods: &mru.LookupParams{},
|
||||
HeaderGetter: &fakeBackend{
|
||||
blocknumber: 42,
|
||||
},
|
||||
|
||||
fakeTimeProvider := &fakeTimeProvider{
|
||||
currentTime: 42,
|
||||
}
|
||||
mru.TimestampProvider = fakeTimeProvider
|
||||
rhparams := &mru.HandlerParams{}
|
||||
rh, err := mru.NewTestHandler(resourceDir, rhparams)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a := api.NewAPI(fileStore, nil, rh)
|
||||
a := api.NewAPI(fileStore, nil, rh.Handler)
|
||||
srv := httptest.NewServer(serverFunc(a))
|
||||
return &TestSwarmServer{
|
||||
Server: srv,
|
||||
FileStore: fileStore,
|
||||
dir: dir,
|
||||
Hasher: storage.MakeHashFunc(storage.DefaultHash)(),
|
||||
Server: srv,
|
||||
FileStore: fileStore,
|
||||
dir: dir,
|
||||
Hasher: storage.MakeHashFunc(storage.DefaultHash)(),
|
||||
timestampProvider: fakeTimeProvider,
|
||||
cleanup: func() {
|
||||
srv.Close()
|
||||
rh.Close()
|
||||
@ -97,12 +96,17 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *Tes
|
||||
|
||||
type TestSwarmServer struct {
|
||||
*httptest.Server
|
||||
Hasher storage.SwarmHash
|
||||
FileStore *storage.FileStore
|
||||
dir string
|
||||
cleanup func()
|
||||
Hasher storage.SwarmHash
|
||||
FileStore *storage.FileStore
|
||||
dir string
|
||||
cleanup func()
|
||||
timestampProvider *fakeTimeProvider
|
||||
}
|
||||
|
||||
func (t *TestSwarmServer) Close() {
|
||||
t.cleanup()
|
||||
}
|
||||
|
||||
func (t *TestSwarmServer) GetCurrentTime() mru.Timestamp {
|
||||
return t.timestampProvider.Now()
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user