forked from cerc-io/plugeth
Swarm MRUs: Adaptive frequency / Predictable lookups / API simplification (#17559)
* swarm/storage/mru: Adaptive Frequency swarm/storage/mru/lookup: fixed getBaseTime Added NewEpoch constructor swarm/api/client: better error handling in GetResource() swarm/storage/mru: Renamed structures. Renamed ResourceMetadata to ResourceID. Renamed ResourceID.Name to ResourceID.Topic swarm/storage/mru: Added binarySerializer interface and test tools swarm/storage/mru/lookup: Changed base time to time and + marshallers swarm/storage/mru: Added ResourceID (former resourceMetadata) swarm/storage/mru: Added ResourceViewId and serialization tests swarm/storage/mru/lookup: fixed epoch unmarshaller. Added Epoch Equals swarm/storage/mru: Fixes as per review comments cmd/swarm: reworded resource create/update help text regarding topic swarm/storage/mru: Added UpdateLookup and serializer tests swarm/storage/mru: Added UpdateHeader, serializers and tests swarm/storage/mru: changed UpdateAddr / epoch to Base() swarm/storage/mru: Added resourceUpdate serializer and tests swarm/storage/mru: Added SignedResourceUpdate tests and serializers swarm/storage/mru/lookup: fixed GetFirstEpoch bug swarm/storage/mru: refactor, comments, cleanup Also added tests for Topic swarm/storage/mru: handler tests pass swarm/storage/mru: all resource package tests pass swarm/storage/mru: resource test pass after adding timestamp checking support swarm/storage/mru: Added JSON serializers to ResourceIDView structures swarm/storage/mru: Sever, client, API test pass swarm/storage/mru: server test pass swarm/storage/mru: Added topic length check swarm/storage/mru: removed some literals, improved "previous lookup" test case swarm/storage/mru: some fixes and comments as per review swarm/storage/mru: first working version without metadata chunk swarm/storage/mru: Various fixes as per review swarm/storage/mru: client test pass swarm/storage/mru: resource query strings and manifest-less queries swarm/storage/mru: simplify naming swarm/storage/mru: first autofreq working version swarm/storage/mru: renamed ToValues to AppendValues swarm/resource/mru: Added ToValues / FromValues for URL query strings swarm/storage/mru: Changed POST resource to work with query strings. No more JSON. swarm/storage/mru: removed resourceid swarm/storage/mru: Opened up structures swarm/storage/mru: Merged Request and SignedResourceUpdate swarm/storage/mru: removed initial data from CLI resource create swarm/storage/mru: Refactor Topic as a direct fixed-length array swarm/storage/mru/lookup: Comprehensive GetNextLevel tests swarm/storage/mru: Added comments Added length checks in Topic swarm/storage/mru: fixes in tests and some code comments swarm/storage/mru/lookup: new optimized lookup algorithm swarm/api: moved getResourceView to api out of server swarm/storage/mru: Lookup algorithm working swarm/storage/mru: comments and renamed NewLookupParams Deleted commented code swarm/storage/mru/lookup: renamed Epoch.LaterThan to After swarm/storage/mru/lookup: Comments and tidying naming swarm/storage/mru: fix lookup algorithm swarm/storage/mru: exposed lookup hint removed updateheader swarm/storage/mru/lookup: changed GetNextEpoch for initial values swarm/storage/mru: resource tests pass swarm/storage/mru: valueSerializer interface and tests swarm/storage/mru/lookup: Comments, improvements, fixes, more tests swarm/storage/mru: renamed UpdateLookup to ID, LookupParams to Query swarm/storage/mru: renamed query receiver var swarm/cmd: MRU CLI tests * cmd/swarm: remove rogue fmt * swarm/storage/mru: Add version / header for future use * swarm/storage/mru: Fixes/comments as per review cmd/swarm: remove rogue fmt swarm/storage/mru: Add version / header for future use- * swarm/storage/mru: fix linter errors * cmd/swarm: Speeded up TestCLIResourceUpdate
This commit is contained in:
parent
0da3b17a11
commit
2c110c81ee
@ -203,21 +203,29 @@ var (
|
|||||||
Usage: "Number of recent chunks cached in memory (default 5000)",
|
Usage: "Number of recent chunks cached in memory (default 5000)",
|
||||||
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
|
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
|
||||||
}
|
}
|
||||||
SwarmResourceMultihashFlag = cli.BoolFlag{
|
SwarmCompressedFlag = cli.BoolFlag{
|
||||||
Name: "multihash",
|
Name: "compressed",
|
||||||
Usage: "Determines how to interpret data for a resource update. If not present, data will be interpreted as raw, literal data that will be included in the resource",
|
Usage: "Prints encryption keys in compressed form",
|
||||||
}
|
}
|
||||||
SwarmResourceNameFlag = cli.StringFlag{
|
SwarmResourceNameFlag = cli.StringFlag{
|
||||||
Name: "name",
|
Name: "name",
|
||||||
Usage: "User-defined name for the new resource",
|
Usage: "User-defined name for the new resource, limited to 32 characters. If combined with topic, the resource will be a subtopic with this name",
|
||||||
|
}
|
||||||
|
SwarmResourceTopicFlag = cli.StringFlag{
|
||||||
|
Name: "topic",
|
||||||
|
Usage: "User-defined topic this resource is tracking, hex encoded. Limited to 64 hexadecimal characters",
|
||||||
}
|
}
|
||||||
SwarmResourceDataOnCreateFlag = cli.StringFlag{
|
SwarmResourceDataOnCreateFlag = cli.StringFlag{
|
||||||
Name: "data",
|
Name: "data",
|
||||||
Usage: "Initializes the resource with the given hex-encoded data. Data must be prefixed by 0x",
|
Usage: "Initializes the resource with the given hex-encoded data. Data must be prefixed by 0x",
|
||||||
}
|
}
|
||||||
SwarmCompressedFlag = cli.BoolFlag{
|
SwarmResourceManifestFlag = cli.StringFlag{
|
||||||
Name: "compressed",
|
Name: "manifest",
|
||||||
Usage: "Prints encryption keys in compressed form",
|
Usage: "Refers to the resource through a manifest",
|
||||||
|
}
|
||||||
|
SwarmResourceUserFlag = cli.StringFlag{
|
||||||
|
Name: "user",
|
||||||
|
Usage: "Indicates the user who updates the resource",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -347,27 +355,53 @@ func init() {
|
|||||||
Action: resourceCreate,
|
Action: resourceCreate,
|
||||||
CustomHelpTemplate: helpTemplate,
|
CustomHelpTemplate: helpTemplate,
|
||||||
Name: "create",
|
Name: "create",
|
||||||
Usage: "creates a new Mutable Resource",
|
Usage: "creates and publishes a new Mutable Resource manifest",
|
||||||
ArgsUsage: "<frequency>",
|
Description: `creates and publishes a new Mutable Resource manifest pointing to a specified user's updates about a particular topic.
|
||||||
Description: "creates a new Mutable Resource",
|
The resource topic can be built in the following ways:
|
||||||
Flags: []cli.Flag{SwarmResourceNameFlag, SwarmResourceDataOnCreateFlag, SwarmResourceMultihashFlag},
|
* use --topic to set the topic to an arbitrary binary hex string.
|
||||||
|
* use --name to set the topic to a human-readable name.
|
||||||
|
For example --name could be set to "profile-picture", meaning this Mutable Resource allows to get this user's current profile picture.
|
||||||
|
* use both --topic and --name to create named subtopics.
|
||||||
|
For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning
|
||||||
|
the Mutable Resource tracks a discussion about that contract.
|
||||||
|
The --user flag allows to have this manifest refer to a user other than yourself. If not specified,
|
||||||
|
it will then default to your local account (--bzzaccount)`,
|
||||||
|
Flags: []cli.Flag{SwarmResourceNameFlag, SwarmResourceTopicFlag, SwarmResourceUserFlag},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: resourceUpdate,
|
Action: resourceUpdate,
|
||||||
CustomHelpTemplate: helpTemplate,
|
CustomHelpTemplate: helpTemplate,
|
||||||
Name: "update",
|
Name: "update",
|
||||||
Usage: "updates the content of an existing Mutable Resource",
|
Usage: "updates the content of an existing Mutable Resource",
|
||||||
ArgsUsage: "<Manifest Address or ENS domain> <0x Hex data>",
|
ArgsUsage: "<0x Hex data>",
|
||||||
Description: "updates the content of an existing Mutable Resource",
|
Description: `publishes a new update on the specified topic
|
||||||
Flags: []cli.Flag{SwarmResourceMultihashFlag},
|
The resource topic can be built in the following ways:
|
||||||
|
* use --topic to set the topic to an arbitrary binary hex string.
|
||||||
|
* use --name to set the topic to a human-readable name.
|
||||||
|
For example --name could be set to "profile-picture", meaning this Mutable Resource allows to get this user's current profile picture.
|
||||||
|
* use both --topic and --name to create named subtopics.
|
||||||
|
For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning
|
||||||
|
the Mutable Resource tracks a discussion about that contract.
|
||||||
|
|
||||||
|
If you have a manifest, you can specify it with --manifest to refer to the resource,
|
||||||
|
instead of using --topic / --name
|
||||||
|
`,
|
||||||
|
Flags: []cli.Flag{SwarmResourceManifestFlag, SwarmResourceNameFlag, SwarmResourceTopicFlag},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: resourceInfo,
|
Action: resourceInfo,
|
||||||
CustomHelpTemplate: helpTemplate,
|
CustomHelpTemplate: helpTemplate,
|
||||||
Name: "info",
|
Name: "info",
|
||||||
Usage: "obtains information about an existing Mutable Resource",
|
Usage: "obtains information about an existing Mutable Resource",
|
||||||
ArgsUsage: "<Manifest Address or ENS domain>",
|
Description: `obtains information about an existing Mutable Resource
|
||||||
Description: "obtains information about an existing Mutable Resource",
|
The topic can be specified directly with the --topic flag as an hex string
|
||||||
|
If no topic is specified, the default topic (zero) will be used
|
||||||
|
The --name flag can be used to specify subtopics with a specific name.
|
||||||
|
The --user flag allows to refer to a user other than yourself. If not specified,
|
||||||
|
it will then default to your local account (--bzzaccount)
|
||||||
|
If you have a manifest, you can specify it with --manifest instead of --topic / --name / ---user
|
||||||
|
to refer to the resource`,
|
||||||
|
Flags: []cli.Flag{SwarmResourceManifestFlag, SwarmResourceNameFlag, SwarmResourceTopicFlag, SwarmResourceUserFlag},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
135
cmd/swarm/mru.go
135
cmd/swarm/mru.go
@ -19,10 +19,11 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||||
@ -34,62 +35,38 @@ func NewGenericSigner(ctx *cli.Context) mru.Signer {
|
|||||||
return mru.NewGenericSigner(getPrivKey(ctx))
|
return mru.NewGenericSigner(getPrivKey(ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getTopic(ctx *cli.Context) (topic mru.Topic) {
|
||||||
|
var name = ctx.String(SwarmResourceNameFlag.Name)
|
||||||
|
var relatedTopic = ctx.String(SwarmResourceTopicFlag.Name)
|
||||||
|
var relatedTopicBytes []byte
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if relatedTopic != "" {
|
||||||
|
relatedTopicBytes, err = hexutil.Decode(relatedTopic)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Error parsing topic: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
topic, err = mru.NewTopic(name, relatedTopicBytes)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Error parsing topic: %s", err)
|
||||||
|
}
|
||||||
|
return topic
|
||||||
|
}
|
||||||
|
|
||||||
// swarm resource create <frequency> [--name <name>] [--data <0x Hexdata> [--multihash=false]]
|
// swarm resource create <frequency> [--name <name>] [--data <0x Hexdata> [--multihash=false]]
|
||||||
// swarm resource update <Manifest Address or ENS domain> <0x Hexdata> [--multihash=false]
|
// swarm resource update <Manifest Address or ENS domain> <0x Hexdata> [--multihash=false]
|
||||||
// swarm resource info <Manifest Address or ENS domain>
|
// swarm resource info <Manifest Address or ENS domain>
|
||||||
|
|
||||||
func resourceCreate(ctx *cli.Context) {
|
func resourceCreate(ctx *cli.Context) {
|
||||||
args := ctx.Args()
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||||
client = swarm.NewClient(bzzapi)
|
client = swarm.NewClient(bzzapi)
|
||||||
multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
|
|
||||||
initialData = ctx.String(SwarmResourceDataOnCreateFlag.Name)
|
|
||||||
name = ctx.String(SwarmResourceNameFlag.Name)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(args) < 1 {
|
newResourceRequest := mru.NewFirstRequest(getTopic(ctx))
|
||||||
fmt.Println("Incorrect number of arguments")
|
newResourceRequest.View.User = resourceGetUser(ctx)
|
||||||
cli.ShowCommandHelpAndExit(ctx, "create", 1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
signer := NewGenericSigner(ctx)
|
|
||||||
frequency, err := strconv.ParseUint(args[0], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Frequency formatting error: %s\n", err.Error())
|
|
||||||
cli.ShowCommandHelpAndExit(ctx, "create", 1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
metadata := mru.ResourceMetadata{
|
|
||||||
Name: name,
|
|
||||||
Frequency: frequency,
|
|
||||||
Owner: signer.Address(),
|
|
||||||
}
|
|
||||||
|
|
||||||
var newResourceRequest *mru.Request
|
|
||||||
if initialData != "" {
|
|
||||||
initialDataBytes, err := hexutil.Decode(initialData)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Error parsing data: %s\n", err.Error())
|
|
||||||
cli.ShowCommandHelpAndExit(ctx, "create", 1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
newResourceRequest, err = mru.NewCreateUpdateRequest(&metadata)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("Error creating new resource request: %s", err)
|
|
||||||
}
|
|
||||||
newResourceRequest.SetData(initialDataBytes, multihash)
|
|
||||||
if err = newResourceRequest.Sign(signer); err != nil {
|
|
||||||
utils.Fatalf("Error signing resource update: %s", err.Error())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
newResourceRequest, err = mru.NewCreateRequest(&metadata)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("Error creating new resource request: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
manifestAddress, err := client.CreateResource(newResourceRequest)
|
manifestAddress, err := client.CreateResource(newResourceRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -104,32 +81,43 @@ func resourceUpdate(ctx *cli.Context) {
|
|||||||
args := ctx.Args()
|
args := ctx.Args()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||||
client = swarm.NewClient(bzzapi)
|
client = swarm.NewClient(bzzapi)
|
||||||
multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
|
manifestAddressOrDomain = ctx.String(SwarmResourceManifestFlag.Name)
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(args) < 2 {
|
if len(args) < 1 {
|
||||||
fmt.Println("Incorrect number of arguments")
|
fmt.Println("Incorrect number of arguments")
|
||||||
cli.ShowCommandHelpAndExit(ctx, "update", 1)
|
cli.ShowCommandHelpAndExit(ctx, "update", 1)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
signer := NewGenericSigner(ctx)
|
signer := NewGenericSigner(ctx)
|
||||||
manifestAddressOrDomain := args[0]
|
|
||||||
data, err := hexutil.Decode(args[1])
|
data, err := hexutil.Decode(args[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Error parsing data: %s", err.Error())
|
utils.Fatalf("Error parsing data: %s", err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var updateRequest *mru.Request
|
||||||
|
var query *mru.Query
|
||||||
|
|
||||||
|
if manifestAddressOrDomain == "" {
|
||||||
|
query = new(mru.Query)
|
||||||
|
query.User = signer.Address()
|
||||||
|
query.Topic = getTopic(ctx)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// Retrieve resource status and metadata out of the manifest
|
// Retrieve resource status and metadata out of the manifest
|
||||||
updateRequest, err := client.GetResourceMetadata(manifestAddressOrDomain)
|
updateRequest, err = client.GetResourceMetadata(query, manifestAddressOrDomain)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Error retrieving resource status: %s", err.Error())
|
utils.Fatalf("Error retrieving resource status: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// set the new data
|
// set the new data
|
||||||
updateRequest.SetData(data, multihash)
|
updateRequest.SetData(data)
|
||||||
|
|
||||||
// sign update
|
// sign update
|
||||||
if err = updateRequest.Sign(signer); err != nil {
|
if err = updateRequest.Sign(signer); err != nil {
|
||||||
@ -146,17 +134,19 @@ func resourceUpdate(ctx *cli.Context) {
|
|||||||
|
|
||||||
func resourceInfo(ctx *cli.Context) {
|
func resourceInfo(ctx *cli.Context) {
|
||||||
var (
|
var (
|
||||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
|
||||||
client = swarm.NewClient(bzzapi)
|
client = swarm.NewClient(bzzapi)
|
||||||
|
manifestAddressOrDomain = ctx.String(SwarmResourceManifestFlag.Name)
|
||||||
)
|
)
|
||||||
args := ctx.Args()
|
|
||||||
if len(args) < 1 {
|
var query *mru.Query
|
||||||
fmt.Println("Incorrect number of arguments.")
|
if manifestAddressOrDomain == "" {
|
||||||
cli.ShowCommandHelpAndExit(ctx, "info", 1)
|
query = new(mru.Query)
|
||||||
return
|
query.Topic = getTopic(ctx)
|
||||||
|
query.User = resourceGetUser(ctx)
|
||||||
}
|
}
|
||||||
manifestAddressOrDomain := args[0]
|
|
||||||
metadata, err := client.GetResourceMetadata(manifestAddressOrDomain)
|
metadata, err := client.GetResourceMetadata(query, manifestAddressOrDomain)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Error retrieving resource metadata: %s", err.Error())
|
utils.Fatalf("Error retrieving resource metadata: %s", err.Error())
|
||||||
return
|
return
|
||||||
@ -167,3 +157,16 @@ func resourceInfo(ctx *cli.Context) {
|
|||||||
}
|
}
|
||||||
fmt.Println(string(encodedMetadata))
|
fmt.Println(string(encodedMetadata))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func resourceGetUser(ctx *cli.Context) common.Address {
|
||||||
|
var user = ctx.String(SwarmResourceUserFlag.Name)
|
||||||
|
if user != "" {
|
||||||
|
return common.HexToAddress(user)
|
||||||
|
}
|
||||||
|
pk := getPrivKey(ctx)
|
||||||
|
if pk == nil {
|
||||||
|
utils.Fatalf("Cannot read private key. Must specify --user or --bzzaccount")
|
||||||
|
}
|
||||||
|
return crypto.PubkeyToAddress(pk.PublicKey)
|
||||||
|
|
||||||
|
}
|
||||||
|
182
cmd/swarm/mru_test.go
Normal file
182
cmd/swarm/mru_test.go
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
|
||||||
|
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCLIResourceUpdate(t *testing.T) {
|
||||||
|
|
||||||
|
srv := testutil.NewTestSwarmServer(t, func(api *api.API) testutil.TestServer {
|
||||||
|
return swarmhttp.NewServer(api, "")
|
||||||
|
}, nil)
|
||||||
|
log.Info("starting 1 node cluster")
|
||||||
|
defer srv.Close()
|
||||||
|
|
||||||
|
// create a private key file for signing
|
||||||
|
pkfile, err := ioutil.TempFile("", "swarm-test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer pkfile.Close()
|
||||||
|
defer os.Remove(pkfile.Name())
|
||||||
|
|
||||||
|
privkeyHex := "0000000000000000000000000000000000000000000000000000000000001979"
|
||||||
|
privKey, _ := crypto.HexToECDSA(privkeyHex)
|
||||||
|
address := crypto.PubkeyToAddress(privKey.PublicKey)
|
||||||
|
|
||||||
|
// save the private key to a file
|
||||||
|
_, err = io.WriteString(pkfile, privkeyHex)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// compose a topic. We'll be doing quotes about Miguel de Cervantes
|
||||||
|
var topic mru.Topic
|
||||||
|
subject := []byte("Miguel de Cervantes")
|
||||||
|
copy(topic[:], subject[:])
|
||||||
|
name := "quotes"
|
||||||
|
|
||||||
|
// prepare some data for the update
|
||||||
|
data := []byte("En boca cerrada no entran moscas")
|
||||||
|
hexData := hexutil.Encode(data)
|
||||||
|
|
||||||
|
flags := []string{
|
||||||
|
"--bzzapi", srv.URL,
|
||||||
|
"--bzzaccount", pkfile.Name(),
|
||||||
|
"resource", "update",
|
||||||
|
"--topic", topic.Hex(),
|
||||||
|
"--name", name,
|
||||||
|
hexData}
|
||||||
|
|
||||||
|
// create an update and expect an exit without errors
|
||||||
|
log.Info(fmt.Sprintf("updating a resource with 'swarm resource update'"))
|
||||||
|
cmd := runSwarm(t, flags...)
|
||||||
|
cmd.ExpectExit()
|
||||||
|
|
||||||
|
// now try to get the update using the client
|
||||||
|
client := swarm.NewClient(srv.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// build the same topic as before, this time
|
||||||
|
// we use NewTopic to create a topic automatically.
|
||||||
|
topic, err = mru.NewTopic(name, subject)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// View configures whose updates we will be looking up.
|
||||||
|
view := mru.View{
|
||||||
|
Topic: topic,
|
||||||
|
User: address,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build a query to get the latest update
|
||||||
|
query := mru.NewQueryLatest(&view, lookup.NoClue)
|
||||||
|
|
||||||
|
// retrieve content!
|
||||||
|
reader, err := client.GetResource(query, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
retrieved, err := ioutil.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check we retrieved the sent information
|
||||||
|
if !bytes.Equal(data, retrieved) {
|
||||||
|
t.Fatalf("Received %s, expected %s", retrieved, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now retrieve info for the next update
|
||||||
|
flags = []string{
|
||||||
|
"--bzzapi", srv.URL,
|
||||||
|
"resource", "info",
|
||||||
|
"--topic", topic.Hex(),
|
||||||
|
"--user", address.Hex(),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info(fmt.Sprintf("getting resource info with 'swarm resource info'"))
|
||||||
|
cmd = runSwarm(t, flags...)
|
||||||
|
_, matches := cmd.ExpectRegexp(`.*`) // regex hack to extract stdout
|
||||||
|
cmd.ExpectExit()
|
||||||
|
|
||||||
|
// verify we can deserialize the result as a valid JSON
|
||||||
|
var request mru.Request
|
||||||
|
err = json.Unmarshal([]byte(matches[0]), &request)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure the retrieved view is the same
|
||||||
|
if request.View != view {
|
||||||
|
t.Fatalf("Expected view to be: %s, got %s", view, request.View)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test publishing a manifest
|
||||||
|
flags = []string{
|
||||||
|
"--bzzapi", srv.URL,
|
||||||
|
"--bzzaccount", pkfile.Name(),
|
||||||
|
"resource", "create",
|
||||||
|
"--topic", topic.Hex(),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info(fmt.Sprintf("Publishing manifest with 'swarm resource create'"))
|
||||||
|
cmd = runSwarm(t, flags...)
|
||||||
|
_, matches = cmd.ExpectRegexp(`[a-f\d]{64}`) // regex hack to extract stdout
|
||||||
|
cmd.ExpectExit()
|
||||||
|
|
||||||
|
manifestAddress := matches[0] // read the received resource manifest
|
||||||
|
|
||||||
|
// now attempt to lookup the latest update using a manifest instead
|
||||||
|
reader, err = client.GetResource(nil, manifestAddress)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
retrieved, err = ioutil.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(data, retrieved) {
|
||||||
|
t.Fatalf("Received %s, expected %s", retrieved, data)
|
||||||
|
}
|
||||||
|
}
|
BIN
cmd/swarm/swarm
Executable file
BIN
cmd/swarm/swarm
Executable file
Binary file not shown.
176
swarm/api/api.go
176
swarm/api/api.go
@ -29,6 +29,8 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||||
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"mime"
|
"mime"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -401,77 +403,54 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage
|
|||||||
|
|
||||||
// we need to do some extra work if this is a mutable resource manifest
|
// we need to do some extra work if this is a mutable resource manifest
|
||||||
if entry.ContentType == ResourceContentType {
|
if entry.ContentType == ResourceContentType {
|
||||||
|
if entry.ResourceView == nil {
|
||||||
// get the resource rootAddr
|
return reader, mimeType, status, nil, fmt.Errorf("Cannot decode ResourceView in manifest")
|
||||||
log.Trace("resource type", "menifestAddr", manifestAddr, "hash", entry.Hash)
|
}
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
_, err := a.resource.Lookup(ctx, mru.NewQueryLatest(entry.ResourceView, lookup.NoClue))
|
||||||
defer cancel()
|
|
||||||
rootAddr := storage.Address(common.FromHex(entry.Hash))
|
|
||||||
rsrc, err := a.resource.Load(ctx, rootAddr)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
apiGetNotFound.Inc(1)
|
apiGetNotFound.Inc(1)
|
||||||
status = http.StatusNotFound
|
status = http.StatusNotFound
|
||||||
log.Debug(fmt.Sprintf("get resource content error: %v", err))
|
log.Debug(fmt.Sprintf("get resource content error: %v", err))
|
||||||
return reader, mimeType, status, nil, err
|
return reader, mimeType, status, nil, err
|
||||||
}
|
}
|
||||||
|
// get the data of the update
|
||||||
// use this key to retrieve the latest update
|
_, rsrcData, err := a.resource.GetContent(entry.ResourceView)
|
||||||
params := mru.LookupLatest(rootAddr)
|
|
||||||
rsrc, err = a.resource.Lookup(ctx, params)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
apiGetNotFound.Inc(1)
|
apiGetNotFound.Inc(1)
|
||||||
status = http.StatusNotFound
|
status = http.StatusNotFound
|
||||||
log.Debug(fmt.Sprintf("get resource content error: %v", err))
|
log.Warn(fmt.Sprintf("get resource content error: %v", err))
|
||||||
return reader, mimeType, status, nil, err
|
return reader, mimeType, status, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// if it's multihash, we will transparently serve the content this multihash points to
|
// extract multihash
|
||||||
// \TODO this resolve is rather expensive all in all, review to see if it can be achieved cheaper
|
decodedMultihash, err := multihash.FromMultihash(rsrcData)
|
||||||
if rsrc.Multihash() {
|
if err != nil {
|
||||||
|
apiGetInvalid.Inc(1)
|
||||||
|
status = http.StatusUnprocessableEntity
|
||||||
|
log.Warn("invalid resource multihash", "err", err)
|
||||||
|
return reader, mimeType, status, nil, err
|
||||||
|
}
|
||||||
|
manifestAddr = storage.Address(decodedMultihash)
|
||||||
|
log.Trace("resource is multihash", "key", manifestAddr)
|
||||||
|
|
||||||
// get the data of the update
|
// get the manifest the multihash digest points to
|
||||||
_, rsrcData, err := a.resource.GetContent(rootAddr)
|
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, NOOPDecrypt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
apiGetNotFound.Inc(1)
|
apiGetNotFound.Inc(1)
|
||||||
status = http.StatusNotFound
|
status = http.StatusNotFound
|
||||||
log.Warn(fmt.Sprintf("get resource content error: %v", err))
|
log.Warn(fmt.Sprintf("loadManifestTrie (resource multihash) error: %v", err))
|
||||||
return reader, mimeType, status, nil, err
|
return reader, mimeType, status, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate that data as multihash
|
// finally, get the manifest entry
|
||||||
decodedMultihash, err := multihash.FromMultihash(rsrcData)
|
// it will always be the entry on path ""
|
||||||
if err != nil {
|
entry, _ = trie.getEntry(path)
|
||||||
apiGetInvalid.Inc(1)
|
if entry == nil {
|
||||||
status = http.StatusUnprocessableEntity
|
status = http.StatusNotFound
|
||||||
log.Warn("invalid resource multihash", "err", err)
|
apiGetNotFound.Inc(1)
|
||||||
return reader, mimeType, status, nil, err
|
err = fmt.Errorf("manifest (resource multihash) entry for '%s' not found", path)
|
||||||
}
|
log.Trace("manifest (resource multihash) entry not found", "key", manifestAddr, "path", path)
|
||||||
manifestAddr = storage.Address(decodedMultihash)
|
return reader, mimeType, status, nil, err
|
||||||
log.Trace("resource is multihash", "key", manifestAddr)
|
|
||||||
|
|
||||||
// get the manifest the multihash digest points to
|
|
||||||
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, decrypt)
|
|
||||||
if err != nil {
|
|
||||||
apiGetNotFound.Inc(1)
|
|
||||||
status = http.StatusNotFound
|
|
||||||
log.Warn(fmt.Sprintf("loadManifestTrie (resource multihash) error: %v", err))
|
|
||||||
return reader, mimeType, status, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// finally, get the manifest entry
|
|
||||||
// it will always be the entry on path ""
|
|
||||||
entry, _ = trie.getEntry(path)
|
|
||||||
if entry == nil {
|
|
||||||
status = http.StatusNotFound
|
|
||||||
apiGetNotFound.Inc(1)
|
|
||||||
err = fmt.Errorf("manifest (resource multihash) entry for '%s' not found", path)
|
|
||||||
log.Trace("manifest (resource multihash) entry not found", "key", manifestAddr, "path", path)
|
|
||||||
return reader, mimeType, status, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// data is returned verbatim since it's not a multihash
|
|
||||||
return rsrc, "application/octet-stream", http.StatusOK, nil, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -966,37 +945,27 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ResourceLookup finds mutable resource updates at specific periods and versions
|
// ResourceLookup finds mutable resource updates at specific periods and versions
|
||||||
func (a *API) ResourceLookup(ctx context.Context, params *mru.LookupParams) (string, []byte, error) {
|
func (a *API) ResourceLookup(ctx context.Context, query *mru.Query) ([]byte, error) {
|
||||||
var err error
|
_, err := a.resource.Lookup(ctx, query)
|
||||||
rsrc, err := a.resource.Load(ctx, params.RootAddr())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return nil, err
|
||||||
}
|
|
||||||
_, err = a.resource.Lookup(ctx, params)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
}
|
||||||
var data []byte
|
var data []byte
|
||||||
_, data, err = a.resource.GetContent(params.RootAddr())
|
_, data, err = a.resource.GetContent(&query.View)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return rsrc.Name(), data, nil
|
return data, nil
|
||||||
}
|
|
||||||
|
|
||||||
// Create Mutable resource
|
|
||||||
func (a *API) ResourceCreate(ctx context.Context, request *mru.Request) error {
|
|
||||||
return a.resource.New(ctx, request)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResourceNewRequest creates a Request object to update a specific mutable resource
|
// ResourceNewRequest creates a Request object to update a specific mutable resource
|
||||||
func (a *API) ResourceNewRequest(ctx context.Context, rootAddr storage.Address) (*mru.Request, error) {
|
func (a *API) ResourceNewRequest(ctx context.Context, view *mru.View) (*mru.Request, error) {
|
||||||
return a.resource.NewUpdateRequest(ctx, rootAddr)
|
return a.resource.NewRequest(ctx, view)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResourceUpdate updates a Mutable Resource with arbitrary data.
|
// ResourceUpdate updates a Mutable Resource with arbitrary data.
|
||||||
// Upon retrieval the update will be retrieved verbatim as bytes.
|
// Upon retrieval the update will be retrieved verbatim as bytes.
|
||||||
func (a *API) ResourceUpdate(ctx context.Context, request *mru.SignedResourceUpdate) (storage.Address, error) {
|
func (a *API) ResourceUpdate(ctx context.Context, request *mru.Request) (storage.Address, error) {
|
||||||
return a.resource.Update(ctx, request)
|
return a.resource.Update(ctx, request)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1005,17 +974,62 @@ func (a *API) ResourceHashSize() int {
|
|||||||
return a.resource.HashSize
|
return a.resource.HashSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the address of the metadata chunk.
|
// ErrCannotLoadResourceManifest is returned when looking up a resource manifest fails
|
||||||
func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (storage.Address, error) {
|
var ErrCannotLoadResourceManifest = errors.New("Cannot load resource manifest")
|
||||||
|
|
||||||
|
// ErrNotAResourceManifest is returned when the address provided returned something other than a valid manifest
|
||||||
|
var ErrNotAResourceManifest = errors.New("Not a resource manifest")
|
||||||
|
|
||||||
|
// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the Resource's view ID.
|
||||||
|
func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (*mru.View, error) {
|
||||||
trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt)
|
trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot load resource manifest: %v", err)
|
return nil, ErrCannotLoadResourceManifest
|
||||||
}
|
}
|
||||||
|
|
||||||
entry, _ := trie.getEntry("")
|
entry, _ := trie.getEntry("")
|
||||||
if entry.ContentType != ResourceContentType {
|
if entry.ContentType != ResourceContentType {
|
||||||
return nil, fmt.Errorf("not a resource manifest: %s", addr)
|
return nil, ErrNotAResourceManifest
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage.Address(common.FromHex(entry.Hash)), nil
|
return entry.ResourceView, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrCannotResolveResourceURI is returned when the ENS resolver is not able to translate a name to a resource
|
||||||
|
var ErrCannotResolveResourceURI = errors.New("Cannot resolve Resource URI")
|
||||||
|
|
||||||
|
// ErrCannotResolveResourceView is returned when values provided are not enough or invalid to recreate a
|
||||||
|
// resource view out of them.
|
||||||
|
var ErrCannotResolveResourceView = errors.New("Cannot resolve resource view")
|
||||||
|
|
||||||
|
// ResolveResourceView attempts to extract View information out of the manifest, if provided
|
||||||
|
// If not, it attempts to extract the View out of a set of key-value pairs
|
||||||
|
func (a *API) ResolveResourceView(ctx context.Context, uri *URI, values mru.Values) (*mru.View, error) {
|
||||||
|
var view *mru.View
|
||||||
|
var err error
|
||||||
|
if uri.Addr != "" {
|
||||||
|
// resolve the content key.
|
||||||
|
manifestAddr := uri.Address()
|
||||||
|
if manifestAddr == nil {
|
||||||
|
manifestAddr, err = a.Resolve(ctx, uri.Addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrCannotResolveResourceURI
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the resource view from the manifest
|
||||||
|
view, err = a.ResolveResourceManifest(ctx, manifestAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.Debug("handle.get.resource: resolved", "manifestkey", manifestAddr, "view", view.Hex())
|
||||||
|
} else {
|
||||||
|
var v mru.View
|
||||||
|
if err := v.FromValues(values); err != nil {
|
||||||
|
return nil, ErrCannotResolveResourceView
|
||||||
|
|
||||||
|
}
|
||||||
|
view = &v
|
||||||
|
}
|
||||||
|
return view, nil
|
||||||
}
|
}
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
@ -595,13 +596,16 @@ func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error)
|
|||||||
return string(data), nil
|
return string(data), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrNoResourceUpdatesFound is returned when Swarm cannot find updates of the given resource
|
||||||
|
var ErrNoResourceUpdatesFound = errors.New("No updates found for this resource")
|
||||||
|
|
||||||
// CreateResource creates a Mutable Resource with the given name and frequency, initializing it with the provided
|
// CreateResource creates a Mutable Resource with the given name and frequency, initializing it with the provided
|
||||||
// data. Data is interpreted as multihash or not depending on the multihash parameter.
|
// data. Data is interpreted as multihash or not depending on the multihash parameter.
|
||||||
// startTime=0 means "now"
|
// startTime=0 means "now"
|
||||||
// Returns the resulting Mutable Resource manifest address that you can use to include in an ENS Resolver (setContent)
|
// Returns the resulting Mutable Resource manifest address that you can use to include in an ENS Resolver (setContent)
|
||||||
// or reference future updates (Client.UpdateResource)
|
// or reference future updates (Client.UpdateResource)
|
||||||
func (c *Client) CreateResource(request *mru.Request) (string, error) {
|
func (c *Client) CreateResource(request *mru.Request) (string, error) {
|
||||||
responseStream, err := c.updateResource(request)
|
responseStream, err := c.updateResource(request, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -621,17 +625,24 @@ func (c *Client) CreateResource(request *mru.Request) (string, error) {
|
|||||||
|
|
||||||
// UpdateResource allows you to set a new version of your content
|
// UpdateResource allows you to set a new version of your content
|
||||||
func (c *Client) UpdateResource(request *mru.Request) error {
|
func (c *Client) UpdateResource(request *mru.Request) error {
|
||||||
_, err := c.updateResource(request)
|
_, err := c.updateResource(request, false)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) updateResource(request *mru.Request) (io.ReadCloser, error) {
|
func (c *Client) updateResource(request *mru.Request, createManifest bool) (io.ReadCloser, error) {
|
||||||
body, err := request.MarshalJSON()
|
URL, err := url.Parse(c.Gateway)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
URL.Path = "/bzz-resource:/"
|
||||||
|
values := URL.Query()
|
||||||
|
body := request.AppendValues(values)
|
||||||
|
if createManifest {
|
||||||
|
values.Set("manifest", "1")
|
||||||
|
}
|
||||||
|
URL.RawQuery = values.Encode()
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", c.Gateway+"/bzz-resource:/", bytes.NewBuffer(body))
|
req, err := http.NewRequest("POST", URL.String(), bytes.NewBuffer(body))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -642,28 +653,61 @@ func (c *Client) updateResource(request *mru.Request) (io.ReadCloser, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return res.Body, nil
|
return res.Body, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetResource returns a byte stream with the raw content of the resource
|
// GetResource returns a byte stream with the raw content of the resource
|
||||||
// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver
|
// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver
|
||||||
// points to that address
|
// points to that address
|
||||||
func (c *Client) GetResource(manifestAddressOrDomain string) (io.ReadCloser, error) {
|
func (c *Client) GetResource(query *mru.Query, manifestAddressOrDomain string) (io.ReadCloser, error) {
|
||||||
|
return c.getResource(query, manifestAddressOrDomain, false)
|
||||||
|
}
|
||||||
|
|
||||||
res, err := http.Get(c.Gateway + "/bzz-resource:/" + manifestAddressOrDomain)
|
// getResource returns a byte stream with the raw content of the resource
|
||||||
|
// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver
|
||||||
|
// points to that address
|
||||||
|
// meta set to true will instruct the node return resource metainformation instead
|
||||||
|
func (c *Client) getResource(query *mru.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) {
|
||||||
|
URL, err := url.Parse(c.Gateway)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
URL.Path = "/bzz-resource:/" + manifestAddressOrDomain
|
||||||
|
values := URL.Query()
|
||||||
|
if query != nil {
|
||||||
|
query.AppendValues(values) //adds query parameters
|
||||||
|
}
|
||||||
|
if meta {
|
||||||
|
values.Set("meta", "1")
|
||||||
|
}
|
||||||
|
URL.RawQuery = values.Encode()
|
||||||
|
res, err := http.Get(URL.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return res.Body, nil
|
|
||||||
|
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
return nil, ErrNoResourceUpdatesFound
|
||||||
|
}
|
||||||
|
errorMessageBytes, err := ioutil.ReadAll(res.Body)
|
||||||
|
var errorMessage string
|
||||||
|
if err != nil {
|
||||||
|
errorMessage = "cannot retrieve error message: " + err.Error()
|
||||||
|
} else {
|
||||||
|
errorMessage = string(errorMessageBytes)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("Error retrieving resource: %s", errorMessage)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.Body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetResourceMetadata returns a structure that describes the Mutable Resource
|
// GetResourceMetadata returns a structure that describes the Mutable Resource
|
||||||
// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver
|
// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver
|
||||||
// points to that address
|
// points to that address
|
||||||
func (c *Client) GetResourceMetadata(manifestAddressOrDomain string) (*mru.Request, error) {
|
func (c *Client) GetResourceMetadata(query *mru.Query, manifestAddressOrDomain string) (*mru.Request, error) {
|
||||||
|
|
||||||
responseStream, err := c.GetResource(manifestAddressOrDomain + "/meta")
|
responseStream, err := c.getResource(query, manifestAddressOrDomain, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,8 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/swarm/api"
|
"github.com/ethereum/go-ethereum/swarm/api"
|
||||||
@ -391,19 +393,12 @@ func TestClientCreateResourceMultihash(t *testing.T) {
|
|||||||
s := common.FromHex(swarmHash)
|
s := common.FromHex(swarmHash)
|
||||||
mh := multihash.ToMultihash(s)
|
mh := multihash.ToMultihash(s)
|
||||||
|
|
||||||
// our mutable resource "name"
|
// our mutable resource topic
|
||||||
resourceName := "foo.eth"
|
topic, _ := mru.NewTopic("foo.eth", nil)
|
||||||
|
|
||||||
createRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
|
createRequest := mru.NewFirstRequest(topic)
|
||||||
Name: resourceName,
|
|
||||||
Frequency: 13,
|
createRequest.SetData(mh)
|
||||||
StartTime: srv.GetCurrentTime(),
|
|
||||||
Owner: signer.Address(),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
createRequest.SetData(mh, true)
|
|
||||||
if err := createRequest.Sign(signer); err != nil {
|
if err := createRequest.Sign(signer); err != nil {
|
||||||
t.Fatalf("Error signing update: %s", err)
|
t.Fatalf("Error signing update: %s", err)
|
||||||
}
|
}
|
||||||
@ -414,12 +409,18 @@ func TestClientCreateResourceMultihash(t *testing.T) {
|
|||||||
t.Fatalf("Error creating resource: %s", err)
|
t.Fatalf("Error creating resource: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e"
|
correctManifestAddrHex := "6ef40ba1492cf2a029dc9a8b5896c822cf689d3cd010842f4f1744e6db8824bd"
|
||||||
if resourceManifestHash != correctManifestAddrHex {
|
if resourceManifestHash != correctManifestAddrHex {
|
||||||
t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash)
|
t.Fatalf("Response resource manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, err := client.GetResource(correctManifestAddrHex)
|
// Check we get a not found error when trying to get the resource with a made-up manifest
|
||||||
|
_, err = client.GetResource(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
|
||||||
|
if err != ErrNoResourceUpdatesFound {
|
||||||
|
t.Fatalf("Expected to receive ErrNoResourceUpdatesFound error. Got: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
reader, err := client.GetResource(nil, correctManifestAddrHex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error retrieving resource: %s", err)
|
t.Fatalf("Error retrieving resource: %s", err)
|
||||||
}
|
}
|
||||||
@ -447,30 +448,22 @@ func TestClientCreateUpdateResource(t *testing.T) {
|
|||||||
databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...")
|
databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...")
|
||||||
|
|
||||||
// our mutable resource name
|
// our mutable resource name
|
||||||
resourceName := "El Quijote"
|
topic, _ := mru.NewTopic("El Quijote", nil)
|
||||||
|
createRequest := mru.NewFirstRequest(topic)
|
||||||
|
|
||||||
createRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
|
createRequest.SetData(databytes)
|
||||||
Name: resourceName,
|
|
||||||
Frequency: 13,
|
|
||||||
StartTime: srv.GetCurrentTime(),
|
|
||||||
Owner: signer.Address(),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
createRequest.SetData(databytes, false)
|
|
||||||
if err := createRequest.Sign(signer); err != nil {
|
if err := createRequest.Sign(signer); err != nil {
|
||||||
t.Fatalf("Error signing update: %s", err)
|
t.Fatalf("Error signing update: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resourceManifestHash, err := client.CreateResource(createRequest)
|
resourceManifestHash, err := client.CreateResource(createRequest)
|
||||||
|
|
||||||
correctManifestAddrHex := "cc7904c17b49f9679e2d8006fe25e87e3f5c2072c2b49cab50f15e544471b30a"
|
correctManifestAddrHex := "fcb8e75f53e480e197c083ad1976d265674d0ce776f2bf359c09c413fb5230b8"
|
||||||
if resourceManifestHash != correctManifestAddrHex {
|
if resourceManifestHash != correctManifestAddrHex {
|
||||||
t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash)
|
t.Fatalf("Response resource manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, err := client.GetResource(correctManifestAddrHex)
|
reader, err := client.GetResource(nil, correctManifestAddrHex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error retrieving resource: %s", err)
|
t.Fatalf("Error retrieving resource: %s", err)
|
||||||
}
|
}
|
||||||
@ -486,12 +479,12 @@ func TestClientCreateUpdateResource(t *testing.T) {
|
|||||||
// define different data
|
// define different data
|
||||||
databytes = []byte("... no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero ...")
|
databytes = []byte("... no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero ...")
|
||||||
|
|
||||||
updateRequest, err := client.GetResourceMetadata(correctManifestAddrHex)
|
updateRequest, err := client.GetResourceMetadata(nil, correctManifestAddrHex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error retrieving update request template: %s", err)
|
t.Fatalf("Error retrieving update request template: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
updateRequest.SetData(databytes, false)
|
updateRequest.SetData(databytes)
|
||||||
if err := updateRequest.Sign(signer); err != nil {
|
if err := updateRequest.Sign(signer); err != nil {
|
||||||
t.Fatalf("Error signing update: %s", err)
|
t.Fatalf("Error signing update: %s", err)
|
||||||
}
|
}
|
||||||
@ -500,7 +493,7 @@ func TestClientCreateUpdateResource(t *testing.T) {
|
|||||||
t.Fatalf("Error updating resource: %s", err)
|
t.Fatalf("Error updating resource: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, err = client.GetResource(correctManifestAddrHex)
|
reader, err = client.GetResource(nil, correctManifestAddrHex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error retrieving resource: %s", err)
|
t.Fatalf("Error retrieving resource: %s", err)
|
||||||
}
|
}
|
||||||
@ -513,4 +506,24 @@ func TestClientCreateUpdateResource(t *testing.T) {
|
|||||||
t.Fatalf("Expected: %v, got %v", databytes, gotData)
|
t.Fatalf("Expected: %v, got %v", databytes, gotData)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// now try retrieving resource without a manifest
|
||||||
|
|
||||||
|
view := &mru.View{
|
||||||
|
Topic: topic,
|
||||||
|
User: signer.Address(),
|
||||||
|
}
|
||||||
|
|
||||||
|
lookupParams := mru.NewQueryLatest(view, lookup.NoClue)
|
||||||
|
reader, err = client.GetResource(lookupParams, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error retrieving resource: %s", err)
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
gotData, err = ioutil.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(databytes, gotData) {
|
||||||
|
t.Fatalf("Expected: %v, got %v", databytes, gotData)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -487,6 +487,7 @@ func resourcePostMode(path string) (isRaw bool, frequency uint64, err error) {
|
|||||||
// The requests can be to a) create a resource, b) update a resource or c) both a+b: create a resource and set the initial content
|
// The requests can be to a) create a resource, b) update a resource or c) both a+b: create a resource and set the initial content
|
||||||
func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) {
|
||||||
ruid := GetRUID(r.Context())
|
ruid := GetRUID(r.Context())
|
||||||
|
uri := GetURI(r.Context())
|
||||||
log.Debug("handle.post.resource", "ruid", ruid)
|
log.Debug("handle.post.resource", "ruid", ruid)
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@ -496,9 +497,24 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) {
|
|||||||
RespondError(w, r, err.Error(), http.StatusInternalServerError)
|
RespondError(w, r, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
view, err := s.api.ResolveResourceView(r.Context(), uri, r.URL.Query())
|
||||||
|
if err != nil { // couldn't parse query string or retrieve manifest
|
||||||
|
getFail.Inc(1)
|
||||||
|
httpStatus := http.StatusBadRequest
|
||||||
|
if err == api.ErrCannotLoadResourceManifest || err == api.ErrCannotResolveResourceURI {
|
||||||
|
httpStatus = http.StatusNotFound
|
||||||
|
}
|
||||||
|
RespondError(w, r, fmt.Sprintf("cannot retrieve resource view: %s", err), httpStatus)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
var updateRequest mru.Request
|
var updateRequest mru.Request
|
||||||
if err := updateRequest.UnmarshalJSON(body); err != nil { // decodes request JSON
|
updateRequest.View = *view
|
||||||
RespondError(w, r, err.Error(), http.StatusBadRequest) //TODO: send different status response depending on error
|
query := r.URL.Query()
|
||||||
|
|
||||||
|
if err := updateRequest.FromValues(query, body); err != nil { // decodes request from query parameters
|
||||||
|
RespondError(w, r, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -510,56 +526,40 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) {
|
|||||||
RespondError(w, r, err.Error(), http.StatusForbidden)
|
RespondError(w, r, err.Error(), http.StatusForbidden)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
_, err = s.api.ResourceUpdate(r.Context(), &updateRequest)
|
||||||
|
|
||||||
if updateRequest.IsNew() {
|
|
||||||
err = s.api.ResourceCreate(r.Context(), &updateRequest)
|
|
||||||
if err != nil {
|
|
||||||
code, err2 := s.translateResourceError(w, r, "resource creation fail", err)
|
|
||||||
RespondError(w, r, err2.Error(), code)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if updateRequest.IsUpdate() {
|
|
||||||
_, err = s.api.ResourceUpdate(r.Context(), &updateRequest.SignedResourceUpdate)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, r, err.Error(), http.StatusInternalServerError)
|
RespondError(w, r, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// at this point both possible operations (create, update or both) were successful
|
if query.Get("manifest") == "1" {
|
||||||
// so in case it was a new resource, then create a manifest and send it over.
|
|
||||||
|
|
||||||
if updateRequest.IsNew() {
|
|
||||||
// we create a manifest so we can retrieve the resource with bzz:// later
|
// we create a manifest so we can retrieve the resource with bzz:// later
|
||||||
// this manifest has a special "resource type" manifest, and its hash is the key of the mutable resource
|
// this manifest has a special "resource type" manifest, and saves the
|
||||||
// metadata chunk (rootAddr)
|
// resource view ID used to retrieve the resource later
|
||||||
m, err := s.api.NewResourceManifest(r.Context(), updateRequest.RootAddr().Hex())
|
m, err := s.api.NewResourceManifest(r.Context(), &updateRequest.View)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError)
|
RespondError(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// the key to the manifest will be passed back to the client
|
// the key to the manifest will be passed back to the client
|
||||||
// the client can access the root chunk key directly through its Hash member
|
// the client can access the view directly through its resourceView member
|
||||||
// the manifest key should be set as content in the resolver of the ENS name
|
// the manifest key can be set as content in the resolver of the ENS name
|
||||||
// \TODO update manifest key automatically in ENS
|
|
||||||
outdata, err := json.Marshal(m)
|
outdata, err := json.Marshal(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError)
|
RespondError(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
fmt.Fprint(w, string(outdata))
|
fmt.Fprint(w, string(outdata))
|
||||||
|
|
||||||
|
w.Header().Add("Content-type", "application/json")
|
||||||
}
|
}
|
||||||
w.Header().Add("Content-type", "application/json")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve mutable resource updates:
|
// Retrieve mutable resource updates:
|
||||||
// bzz-resource://<id> - get latest update
|
// bzz-resource://<id> - get latest update
|
||||||
// bzz-resource://<id>/<n> - get latest update on period n
|
// bzz-resource://<id>/?period=n - get latest update on period n
|
||||||
// bzz-resource://<id>/<n>/<m> - get update version m of period n
|
// bzz-resource://<id>/?period=n&version=m - get update version m of period n
|
||||||
// bzz-resource://<id>/meta - get metadata and next version information
|
// bzz-resource://<id>/meta - get metadata and next version information
|
||||||
// <id> = ens name or hash
|
// <id> = ens name or hash
|
||||||
// TODO: Enable pass maxPeriod parameter
|
// TODO: Enable pass maxPeriod parameter
|
||||||
@ -569,84 +569,44 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) {
|
|||||||
log.Debug("handle.get.resource", "ruid", ruid)
|
log.Debug("handle.get.resource", "ruid", ruid)
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// resolve the content key.
|
view, err := s.api.ResolveResourceView(r.Context(), uri, r.URL.Query())
|
||||||
manifestAddr := uri.Address()
|
if err != nil { // couldn't parse query string or retrieve manifest
|
||||||
if manifestAddr == nil {
|
|
||||||
manifestAddr, err = s.api.Resolve(r.Context(), uri.Addr)
|
|
||||||
if err != nil {
|
|
||||||
getFail.Inc(1)
|
|
||||||
RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
w.Header().Set("Cache-Control", "max-age=2147483648")
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the root chunk rootAddr from the manifest
|
|
||||||
rootAddr, err := s.api.ResolveResourceManifest(r.Context(), manifestAddr)
|
|
||||||
if err != nil {
|
|
||||||
getFail.Inc(1)
|
getFail.Inc(1)
|
||||||
RespondError(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", uri.Addr, err), http.StatusNotFound)
|
httpStatus := http.StatusBadRequest
|
||||||
|
if err == api.ErrCannotLoadResourceManifest || err == api.ErrCannotResolveResourceURI {
|
||||||
|
httpStatus = http.StatusNotFound
|
||||||
|
}
|
||||||
|
RespondError(w, r, fmt.Sprintf("cannot retrieve resource view: %s", err), httpStatus)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("handle.get.resource: resolved", "ruid", ruid, "manifestkey", manifestAddr, "rootchunk addr", rootAddr)
|
|
||||||
|
|
||||||
// determine if the query specifies period and version or it is a metadata query
|
// determine if the query specifies period and version or it is a metadata query
|
||||||
var params []string
|
if r.URL.Query().Get("meta") == "1" {
|
||||||
if len(uri.Path) > 0 {
|
unsignedUpdateRequest, err := s.api.ResourceNewRequest(r.Context(), view)
|
||||||
if uri.Path == "meta" {
|
if err != nil {
|
||||||
unsignedUpdateRequest, err := s.api.ResourceNewRequest(r.Context(), rootAddr)
|
getFail.Inc(1)
|
||||||
if err != nil {
|
RespondError(w, r, fmt.Sprintf("cannot retrieve resource metadata for view=%s: %s", view.Hex(), err), http.StatusNotFound)
|
||||||
getFail.Inc(1)
|
|
||||||
RespondError(w, r, fmt.Sprintf("cannot retrieve resource metadata for rootAddr=%s: %s", rootAddr.Hex(), err), http.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rawResponse, err := unsignedUpdateRequest.MarshalJSON()
|
|
||||||
if err != nil {
|
|
||||||
RespondError(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.Header().Add("Content-type", "application/json")
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
fmt.Fprint(w, string(rawResponse))
|
|
||||||
return
|
return
|
||||||
|
|
||||||
}
|
}
|
||||||
|
rawResponse, err := unsignedUpdateRequest.MarshalJSON()
|
||||||
params = strings.Split(uri.Path, "/")
|
if err != nil {
|
||||||
|
RespondError(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Header().Add("Content-type", "application/json")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprint(w, string(rawResponse))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
var name string
|
|
||||||
var data []byte
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
switch len(params) {
|
lookupParams := &mru.Query{View: *view}
|
||||||
case 0: // latest only
|
if err = lookupParams.FromValues(r.URL.Query()); err != nil { // parse period, version
|
||||||
name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatest(rootAddr))
|
RespondError(w, r, fmt.Sprintf("invalid mutable resource request:%s", err), http.StatusBadRequest)
|
||||||
case 2: // specific period and version
|
return
|
||||||
var version uint64
|
|
||||||
var period uint64
|
|
||||||
version, err = strconv.ParseUint(params[1], 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
period, err = strconv.ParseUint(params[0], 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupVersion(rootAddr, uint32(period), uint32(version)))
|
|
||||||
case 1: // last version of specific period
|
|
||||||
var period uint64
|
|
||||||
period, err = strconv.ParseUint(params[0], 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatestVersionInPeriod(rootAddr, uint32(period)))
|
|
||||||
default: // bogus
|
|
||||||
err = mru.NewError(storage.ErrInvalidValue, "invalid mutable resource request")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data, err := s.api.ResourceLookup(r.Context(), lookupParams)
|
||||||
|
|
||||||
// any error from the switch statement will end up here
|
// any error from the switch statement will end up here
|
||||||
if err != nil {
|
if err != nil {
|
||||||
code, err2 := s.translateResourceError(w, r, "mutable resource lookup fail", err)
|
code, err2 := s.translateResourceError(w, r, "mutable resource lookup fail", err)
|
||||||
@ -655,9 +615,9 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// All ok, serve the retrieved update
|
// All ok, serve the retrieved update
|
||||||
log.Debug("Found update", "name", name, "ruid", ruid)
|
log.Debug("Found update", "view", view.Hex(), "ruid", ruid)
|
||||||
w.Header().Set("Content-Type", "application/octet-stream")
|
w.Header().Set("Content-Type", "application/octet-stream")
|
||||||
http.ServeContent(w, r, "", now, bytes.NewReader(data))
|
http.ServeContent(w, r, "", time.Now(), bytes.NewReader(data))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) translateResourceError(w http.ResponseWriter, r *http.Request, supErr string, err error) (int, error) {
|
func (s *Server) translateResourceError(w http.ResponseWriter, r *http.Request, supErr string, err error) (int, error) {
|
||||||
|
@ -30,12 +30,15 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
@ -121,8 +124,8 @@ func TestBzzResourceMultihash(t *testing.T) {
|
|||||||
|
|
||||||
// add the data our multihash aliased manifest will point to
|
// add the data our multihash aliased manifest will point to
|
||||||
databytes := "bar"
|
databytes := "bar"
|
||||||
url := fmt.Sprintf("%s/bzz:/", srv.URL)
|
testBzzUrl := fmt.Sprintf("%s/bzz:/", srv.URL)
|
||||||
resp, err := http.Post(url, "text/plain", bytes.NewReader([]byte(databytes)))
|
resp, err := http.Post(testBzzUrl, "text/plain", bytes.NewReader([]byte(databytes)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -140,33 +143,27 @@ func TestBzzResourceMultihash(t *testing.T) {
|
|||||||
|
|
||||||
log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
|
log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
|
||||||
|
|
||||||
// our mutable resource "name"
|
topic, _ := mru.NewTopic("foo.eth", nil)
|
||||||
keybytes := "foo.eth"
|
updateRequest := mru.NewFirstRequest(topic)
|
||||||
|
|
||||||
updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
|
updateRequest.SetData(mh)
|
||||||
Name: keybytes,
|
|
||||||
Frequency: 13,
|
|
||||||
StartTime: srv.GetCurrentTime(),
|
|
||||||
Owner: signer.Address(),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
updateRequest.SetData(mh, true)
|
|
||||||
|
|
||||||
if err := updateRequest.Sign(signer); err != nil {
|
if err := updateRequest.Sign(signer); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
|
log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
|
||||||
|
|
||||||
body, err := updateRequest.MarshalJSON()
|
testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
query := testUrl.Query()
|
||||||
|
body := updateRequest.AppendValues(query) // this adds all query parameters and returns the data to be posted
|
||||||
|
query.Set("manifest", "1") // indicate we want a manifest back
|
||||||
|
testUrl.RawQuery = query.Encode()
|
||||||
|
|
||||||
// create the multihash update
|
// create the multihash update
|
||||||
url = fmt.Sprintf("%s/bzz-resource:/", srv.URL)
|
resp, err = http.Post(testUrl.String(), "application/octet-stream", bytes.NewReader(body))
|
||||||
resp, err = http.Post(url, "application/json", bytes.NewReader(body))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -184,14 +181,14 @@ func TestBzzResourceMultihash(t *testing.T) {
|
|||||||
t.Fatalf("data %s could not be unmarshaled: %v", b, err)
|
t.Fatalf("data %s could not be unmarshaled: %v", b, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e"
|
correctManifestAddrHex := "6ef40ba1492cf2a029dc9a8b5896c822cf689d3cd010842f4f1744e6db8824bd"
|
||||||
if rsrcResp.Hex() != correctManifestAddrHex {
|
if rsrcResp.Hex() != correctManifestAddrHex {
|
||||||
t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
|
t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
|
||||||
}
|
}
|
||||||
|
|
||||||
// get bzz manifest transparent resource resolve
|
// get bzz manifest transparent resource resolve
|
||||||
url = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp)
|
testBzzUrl = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp)
|
||||||
resp, err = http.Get(url)
|
resp, err = http.Get(testBzzUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -215,39 +212,38 @@ func TestBzzResource(t *testing.T) {
|
|||||||
|
|
||||||
defer srv.Close()
|
defer srv.Close()
|
||||||
|
|
||||||
// our mutable resource "name"
|
|
||||||
keybytes := "foo.eth"
|
|
||||||
|
|
||||||
// data of update 1
|
// data of update 1
|
||||||
databytes := make([]byte, 666)
|
update1Data := make([]byte, 666)
|
||||||
_, err := rand.Read(databytes)
|
update1Timestamp := srv.CurrentTime
|
||||||
|
_, err := rand.Read(update1Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
//data for update 2
|
||||||
|
update2Data := []byte("foo")
|
||||||
|
|
||||||
updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
|
topic, _ := mru.NewTopic("foo.eth", nil)
|
||||||
Name: keybytes,
|
updateRequest := mru.NewFirstRequest(topic)
|
||||||
Frequency: 13,
|
|
||||||
StartTime: srv.GetCurrentTime(),
|
|
||||||
Owner: signer.Address(),
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
updateRequest.SetData(databytes, false)
|
updateRequest.SetData(update1Data)
|
||||||
|
|
||||||
if err := updateRequest.Sign(signer); err != nil {
|
if err := updateRequest.Sign(signer); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
body, err := updateRequest.MarshalJSON()
|
// creates resource and sets update 1
|
||||||
|
testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
urlQuery := testUrl.Query()
|
||||||
|
body := updateRequest.AppendValues(urlQuery) // this adds all query parameters
|
||||||
|
urlQuery.Set("manifest", "1") // indicate we want a manifest back
|
||||||
|
testUrl.RawQuery = urlQuery.Encode()
|
||||||
|
|
||||||
// creates resource and sets update 1
|
resp, err := http.Post(testUrl.String(), "application/octet-stream", bytes.NewReader(body))
|
||||||
url := fmt.Sprintf("%s/bzz-resource:/", srv.URL)
|
|
||||||
resp, err := http.Post(url, "application/json", bytes.NewReader(body))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -265,14 +261,14 @@ func TestBzzResource(t *testing.T) {
|
|||||||
t.Fatalf("data %s could not be unmarshaled: %v", b, err)
|
t.Fatalf("data %s could not be unmarshaled: %v", b, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e"
|
correctManifestAddrHex := "6ef40ba1492cf2a029dc9a8b5896c822cf689d3cd010842f4f1744e6db8824bd"
|
||||||
if rsrcResp.Hex() != correctManifestAddrHex {
|
if rsrcResp.Hex() != correctManifestAddrHex {
|
||||||
t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
|
t.Fatalf("Response resource manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the manifest
|
// get the manifest
|
||||||
url = fmt.Sprintf("%s/bzz-raw:/%s", srv.URL, rsrcResp)
|
testRawUrl := fmt.Sprintf("%s/bzz-raw:/%s", srv.URL, rsrcResp)
|
||||||
resp, err = http.Get(url)
|
resp, err = http.Get(testRawUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -292,20 +288,20 @@ func TestBzzResource(t *testing.T) {
|
|||||||
if len(manifest.Entries) != 1 {
|
if len(manifest.Entries) != 1 {
|
||||||
t.Fatalf("Manifest has %d entries", len(manifest.Entries))
|
t.Fatalf("Manifest has %d entries", len(manifest.Entries))
|
||||||
}
|
}
|
||||||
correctRootKeyHex := "68f7ba07ac8867a4c841a4d4320e3cdc549df23702dc7285fcb6acf65df48562"
|
correctViewHex := "0x666f6f2e65746800000000000000000000000000000000000000000000000000c96aaa54e2d44c299564da76e1cd3184a2386b8d"
|
||||||
if manifest.Entries[0].Hash != correctRootKeyHex {
|
if manifest.Entries[0].ResourceView.Hex() != correctViewHex {
|
||||||
t.Fatalf("Expected manifest path '%s', got '%s'", correctRootKeyHex, manifest.Entries[0].Hash)
|
t.Fatalf("Expected manifest Resource View '%s', got '%s'", correctViewHex, manifest.Entries[0].ResourceView.Hex())
|
||||||
}
|
}
|
||||||
|
|
||||||
// get bzz manifest transparent resource resolve
|
// get bzz manifest transparent resource resolve
|
||||||
url = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp)
|
testBzzUrl := fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp)
|
||||||
resp, err = http.Get(url)
|
resp, err = http.Get(testBzzUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode == http.StatusOK {
|
||||||
t.Fatalf("err %s", resp.Status)
|
t.Fatal("Expected error status since resource is not multihash. Received 200 OK")
|
||||||
}
|
}
|
||||||
b, err = ioutil.ReadAll(resp.Body)
|
b, err = ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -313,8 +309,8 @@ func TestBzzResource(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// get non-existent name, should fail
|
// get non-existent name, should fail
|
||||||
url = fmt.Sprintf("%s/bzz-resource:/bar", srv.URL)
|
testBzzResUrl := fmt.Sprintf("%s/bzz-resource:/bar", srv.URL)
|
||||||
resp, err = http.Get(url)
|
resp, err = http.Get(testBzzResUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -327,8 +323,8 @@ func TestBzzResource(t *testing.T) {
|
|||||||
|
|
||||||
// get latest update (1.1) through resource directly
|
// get latest update (1.1) through resource directly
|
||||||
log.Info("get update latest = 1.1", "addr", correctManifestAddrHex)
|
log.Info("get update latest = 1.1", "addr", correctManifestAddrHex)
|
||||||
url = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex)
|
testBzzResUrl = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex)
|
||||||
resp, err = http.Get(url)
|
resp, err = http.Get(testBzzResUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -340,16 +336,18 @@ func TestBzzResource(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(databytes, b) {
|
if !bytes.Equal(update1Data, b) {
|
||||||
t.Fatalf("Expected body '%x', got '%x'", databytes, b)
|
t.Fatalf("Expected body '%x', got '%x'", update1Data, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// update 2
|
// update 2
|
||||||
|
// Move the clock ahead 1 second
|
||||||
|
srv.CurrentTime++
|
||||||
log.Info("update 2")
|
log.Info("update 2")
|
||||||
|
|
||||||
// 1.- get metadata about this resource
|
// 1.- get metadata about this resource
|
||||||
url = fmt.Sprintf("%s/bzz-resource:/%s/", srv.URL, correctManifestAddrHex)
|
testBzzResUrl = fmt.Sprintf("%s/bzz-resource:/%s/", srv.URL, correctManifestAddrHex)
|
||||||
resp, err = http.Get(url + "meta")
|
resp, err = http.Get(testBzzResUrl + "?meta=1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -365,17 +363,19 @@ func TestBzzResource(t *testing.T) {
|
|||||||
if err = updateRequest.UnmarshalJSON(b); err != nil {
|
if err = updateRequest.UnmarshalJSON(b); err != nil {
|
||||||
t.Fatalf("Error decoding resource metadata: %s", err)
|
t.Fatalf("Error decoding resource metadata: %s", err)
|
||||||
}
|
}
|
||||||
data := []byte("foo")
|
updateRequest.SetData(update2Data)
|
||||||
updateRequest.SetData(data, false)
|
|
||||||
if err = updateRequest.Sign(signer); err != nil {
|
if err = updateRequest.Sign(signer); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
body, err = updateRequest.MarshalJSON()
|
testUrl, err = url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
urlQuery = testUrl.Query()
|
||||||
|
body = updateRequest.AppendValues(urlQuery) // this adds all query parameters
|
||||||
|
testUrl.RawQuery = urlQuery.Encode()
|
||||||
|
|
||||||
resp, err = http.Post(url, "application/json", bytes.NewReader(body))
|
resp, err = http.Post(testUrl.String(), "application/octet-stream", bytes.NewReader(body))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -386,8 +386,8 @@ func TestBzzResource(t *testing.T) {
|
|||||||
|
|
||||||
// get latest update (1.2) through resource directly
|
// get latest update (1.2) through resource directly
|
||||||
log.Info("get update 1.2")
|
log.Info("get update 1.2")
|
||||||
url = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex)
|
testBzzResUrl = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex)
|
||||||
resp, err = http.Get(url)
|
resp, err = http.Get(testBzzResUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -399,33 +399,23 @@ func TestBzzResource(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(data, b) {
|
if !bytes.Equal(update2Data, b) {
|
||||||
t.Fatalf("Expected body '%x', got '%x'", data, b)
|
t.Fatalf("Expected body '%x', got '%x'", update2Data, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// get latest update (1.2) with specified period
|
// test manifest-less queries
|
||||||
log.Info("get update latest = 1.2")
|
log.Info("get first update in update1Timestamp via direct query")
|
||||||
url = fmt.Sprintf("%s/bzz-resource:/%s/1", srv.URL, correctManifestAddrHex)
|
query := mru.NewQuery(&updateRequest.View, update1Timestamp, lookup.NoClue)
|
||||||
resp, err = http.Get(url)
|
|
||||||
|
urlq, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
t.Fatalf("err %s", resp.Status)
|
|
||||||
}
|
|
||||||
b, err = ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(data, b) {
|
|
||||||
t.Fatalf("Expected body '%x', got '%x'", data, b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get first update (1.1) with specified period and version
|
values := urlq.Query()
|
||||||
log.Info("get first update 1.1")
|
query.AppendValues(values) // this adds view query parameters
|
||||||
url = fmt.Sprintf("%s/bzz-resource:/%s/1/1", srv.URL, correctManifestAddrHex)
|
urlq.RawQuery = values.Encode()
|
||||||
resp, err = http.Get(url)
|
resp, err = http.Get(urlq.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -437,9 +427,10 @@ func TestBzzResource(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(databytes, b) {
|
if !bytes.Equal(update1Data, b) {
|
||||||
t.Fatalf("Expected body '%x', got '%x'", databytes, b)
|
t.Fatalf("Expected body '%x', got '%x'", update1Data, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBzzGetPath(t *testing.T) {
|
func TestBzzGetPath(t *testing.T) {
|
||||||
|
@ -27,6 +27,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
@ -46,14 +48,15 @@ type Manifest struct {
|
|||||||
|
|
||||||
// ManifestEntry represents an entry in a swarm manifest
|
// ManifestEntry represents an entry in a swarm manifest
|
||||||
type ManifestEntry struct {
|
type ManifestEntry struct {
|
||||||
Hash string `json:"hash,omitempty"`
|
Hash string `json:"hash,omitempty"`
|
||||||
Path string `json:"path,omitempty"`
|
Path string `json:"path,omitempty"`
|
||||||
ContentType string `json:"contentType,omitempty"`
|
ContentType string `json:"contentType,omitempty"`
|
||||||
Mode int64 `json:"mode,omitempty"`
|
Mode int64 `json:"mode,omitempty"`
|
||||||
Size int64 `json:"size,omitempty"`
|
Size int64 `json:"size,omitempty"`
|
||||||
ModTime time.Time `json:"mod_time,omitempty"`
|
ModTime time.Time `json:"mod_time,omitempty"`
|
||||||
Status int `json:"status,omitempty"`
|
Status int `json:"status,omitempty"`
|
||||||
Access *AccessEntry `json:"access,omitempty"`
|
Access *AccessEntry `json:"access,omitempty"`
|
||||||
|
ResourceView *mru.View `json:"resourceView,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ManifestList represents the result of listing files in a manifest
|
// ManifestList represents the result of listing files in a manifest
|
||||||
@ -79,11 +82,11 @@ func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address,
|
|||||||
|
|
||||||
// Manifest hack for supporting Mutable Resource Updates from the bzz: scheme
|
// Manifest hack for supporting Mutable Resource Updates from the bzz: scheme
|
||||||
// see swarm/api/api.go:API.Get() for more information
|
// see swarm/api/api.go:API.Get() for more information
|
||||||
func (a *API) NewResourceManifest(ctx context.Context, resourceAddr string) (storage.Address, error) {
|
func (a *API) NewResourceManifest(ctx context.Context, view *mru.View) (storage.Address, error) {
|
||||||
var manifest Manifest
|
var manifest Manifest
|
||||||
entry := ManifestEntry{
|
entry := ManifestEntry{
|
||||||
Hash: resourceAddr,
|
ResourceView: view,
|
||||||
ContentType: ResourceContentType,
|
ContentType: ResourceContentType,
|
||||||
}
|
}
|
||||||
manifest.Entries = append(manifest.Entries, entry)
|
manifest.Entries = append(manifest.Entries, entry)
|
||||||
data, err := json.Marshal(&manifest)
|
data, err := json.Marshal(&manifest)
|
||||||
|
44
swarm/storage/mru/binaryserializer.go
Normal file
44
swarm/storage/mru/binaryserializer.go
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mru
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
|
||||||
|
type binarySerializer interface {
|
||||||
|
binaryPut(serializedData []byte) error
|
||||||
|
binaryLength() int
|
||||||
|
binaryGet(serializedData []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values interface represents a string key-value store
|
||||||
|
// useful for building query strings
|
||||||
|
type Values interface {
|
||||||
|
Get(key string) string
|
||||||
|
Set(key, value string)
|
||||||
|
}
|
||||||
|
|
||||||
|
type valueSerializer interface {
|
||||||
|
FromValues(values Values) error
|
||||||
|
AppendValues(values Values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hex serializes the structure and converts it to a hex string
|
||||||
|
func Hex(bin binarySerializer) string {
|
||||||
|
b := make([]byte, bin.binaryLength())
|
||||||
|
bin.binaryPut(b)
|
||||||
|
return hexutil.Encode(b)
|
||||||
|
}
|
98
swarm/storage/mru/binaryserializer_test.go
Normal file
98
swarm/storage/mru/binaryserializer_test.go
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KV mocks a key value store
|
||||||
|
type KV map[string]string
|
||||||
|
|
||||||
|
func (kv KV) Get(key string) string {
|
||||||
|
return kv[key]
|
||||||
|
}
|
||||||
|
func (kv KV) Set(key, value string) {
|
||||||
|
kv[key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) {
|
||||||
|
if hexutil.Encode(actualValue) != expectedHex {
|
||||||
|
t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testBinarySerializerRecovery(t *testing.T, bin binarySerializer, expectedHex string) {
|
||||||
|
name := reflect.TypeOf(bin).Elem().Name()
|
||||||
|
serialized := make([]byte, bin.binaryLength())
|
||||||
|
if err := bin.binaryPut(serialized); err != nil {
|
||||||
|
t.Fatalf("%s.binaryPut error when trying to serialize structure: %s", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
compareByteSliceToExpectedHex(t, name, serialized, expectedHex)
|
||||||
|
|
||||||
|
recovered := reflect.New(reflect.TypeOf(bin).Elem()).Interface().(binarySerializer)
|
||||||
|
if err := recovered.binaryGet(serialized); err != nil {
|
||||||
|
t.Fatalf("%s.binaryGet error when trying to deserialize structure: %s", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(bin, recovered) {
|
||||||
|
t.Fatalf("Expected that the recovered %s equals the marshalled %s", name, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
serializedWrongLength := make([]byte, 1)
|
||||||
|
copy(serializedWrongLength[:], serialized)
|
||||||
|
if err := recovered.binaryGet(serializedWrongLength); err == nil {
|
||||||
|
t.Fatalf("Expected %s.binaryGet to fail since data is too small", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testBinarySerializerLengthCheck(t *testing.T, bin binarySerializer) {
|
||||||
|
name := reflect.TypeOf(bin).Elem().Name()
|
||||||
|
// make a slice that is too small to contain the metadata
|
||||||
|
serialized := make([]byte, bin.binaryLength()-1)
|
||||||
|
|
||||||
|
if err := bin.binaryPut(serialized); err == nil {
|
||||||
|
t.Fatalf("Expected %s.binaryPut to fail, since target slice is too small", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testValueSerializer(t *testing.T, v valueSerializer, expected KV) {
|
||||||
|
name := reflect.TypeOf(v).Elem().Name()
|
||||||
|
kv := make(KV)
|
||||||
|
|
||||||
|
v.AppendValues(kv)
|
||||||
|
if !reflect.DeepEqual(expected, kv) {
|
||||||
|
expj, _ := json.Marshal(expected)
|
||||||
|
gotj, _ := json.Marshal(kv)
|
||||||
|
t.Fatalf("Expected %s.AppendValues to return %s, got %s", name, string(expj), string(gotj))
|
||||||
|
}
|
||||||
|
|
||||||
|
recovered := reflect.New(reflect.TypeOf(v).Elem()).Interface().(valueSerializer)
|
||||||
|
err := recovered.FromValues(kv)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(recovered, v) {
|
||||||
|
t.Fatalf("Expected recovered %s to be the same", name)
|
||||||
|
}
|
||||||
|
}
|
48
swarm/storage/mru/cacheentry.go
Normal file
48
swarm/storage/mru/cacheentry.go
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
hasherCount = 8
|
||||||
|
resourceHashAlgorithm = storage.SHA3Hash
|
||||||
|
defaultRetrieveTimeout = 100 * time.Millisecond
|
||||||
|
)
|
||||||
|
|
||||||
|
// cacheEntry caches resource data and the metadata of its root chunk.
|
||||||
|
type cacheEntry struct {
|
||||||
|
ResourceUpdate
|
||||||
|
*bytes.Reader
|
||||||
|
lastKey storage.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
// implements storage.LazySectionReader
|
||||||
|
func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) {
|
||||||
|
return int64(len(r.ResourceUpdate.data)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//returns the resource's topic
|
||||||
|
func (r *cacheEntry) Topic() Topic {
|
||||||
|
return r.View.Topic
|
||||||
|
}
|
@ -1,61 +1,44 @@
|
|||||||
// Package mru defines Mutable resource updates.
|
/*
|
||||||
// A Mutable Resource is an entity which allows updates to a resource
|
Package mru defines Mutable resource updates.
|
||||||
// without resorting to ENS on each update.
|
|
||||||
// The update scheme is built on swarm chunks with chunk keys following
|
A Mutable Resource is an entity which allows updates to a resource
|
||||||
// a predictable, versionable pattern.
|
without resorting to ENS on each update.
|
||||||
//
|
The update scheme is built on swarm chunks with chunk keys following
|
||||||
// Updates are defined to be periodic in nature, where the update frequency
|
a predictable, versionable pattern.
|
||||||
// is expressed in seconds.
|
|
||||||
//
|
A Resource is tied to a unique identifier that is deterministically generated out of
|
||||||
// The root entry of a mutable resource is tied to a unique identifier that
|
the chosen topic.
|
||||||
// is deterministically generated out of the metadata content that describes
|
|
||||||
// the resource. This metadata includes a user-defined resource name, a resource
|
A Resource View is defined as a specific user's point of view about a particular resource.
|
||||||
// start time that indicates when the resource becomes valid,
|
Thus, a View is a Topic + the user's address (userAddr)
|
||||||
// the frequency in seconds with which the resource is expected to be updated, both of
|
|
||||||
// which are stored as little-endian uint64 values in the database (for a
|
Actual data updates are also made in the form of swarm chunks. The keys
|
||||||
// total of 16 bytes). It also contains the owner's address (ownerAddr)
|
of the updates are the hash of a concatenation of properties as follows:
|
||||||
// This MRU info is stored in a separate content-addressed chunk
|
|
||||||
// (call it the metadata chunk), with the following layout:
|
updateAddr = H(View, Epoch ID)
|
||||||
//
|
where H is the SHA3 hash function
|
||||||
// (00|length|startTime|frequency|name|ownerAddr)
|
View is the combination of Topic and the user address
|
||||||
//
|
Epoch ID is a time slot. See the lookup package for more information.
|
||||||
// (The two first zero-value bytes are used for disambiguation by the chunk validator,
|
|
||||||
// and update chunk will always have a value > 0 there.)
|
A user looking up a resource would only need to know the View in order to
|
||||||
//
|
another user's updates
|
||||||
// Each metadata chunk is identified by its rootAddr, calculated as follows:
|
|
||||||
// metaHash=H(len(metadata), startTime, frequency,name)
|
The resource update data is:
|
||||||
// rootAddr = H(metaHash, ownerAddr).
|
resourcedata = View|Epoch|data
|
||||||
// where H is the SHA3 hash function
|
|
||||||
// This scheme effectively locks the root chunk so that only the owner of the private key
|
the full update data that goes in the chunk payload is:
|
||||||
// that ownerAddr was derived from can sign updates.
|
resourcedata|sign(resourcedata)
|
||||||
//
|
|
||||||
// The root entry tells the requester from when the mutable resource was
|
Structure Summary:
|
||||||
// first added (Unix time in seconds) and in which moments to look for the
|
|
||||||
// actual updates. Thus, a resource update for identifier "føø.bar"
|
Request: Resource update with signature
|
||||||
// starting at unix time 1528800000 with frequency 300 (every 5 mins) will have updates on 1528800300,
|
ResourceUpdate: headers + data
|
||||||
// 1528800600, 1528800900 and so on.
|
Header: Protocol version and reserved for future use placeholders
|
||||||
//
|
ID: Information about how to locate a specific update
|
||||||
// Actual data updates are also made in the form of swarm chunks. The keys
|
View: Author of the update and what is updating
|
||||||
// of the updates are the hash of a concatenation of properties as follows:
|
Topic: Item that the updates are about
|
||||||
//
|
User: User who updates the resource
|
||||||
// updateAddr = H(period, version, rootAddr)
|
Epoch: time slot where the update is stored
|
||||||
// where H is the SHA3 hash function
|
|
||||||
// The period is (currentTime - startTime) / frequency
|
*/
|
||||||
//
|
|
||||||
// Using our previous example, this means that a period 3 will happen when the
|
|
||||||
// clock hits 1528800900
|
|
||||||
//
|
|
||||||
// If more than one update is made in the same period, incremental
|
|
||||||
// version numbers are used successively.
|
|
||||||
//
|
|
||||||
// A user looking up a resource would only need to know the rootAddr in order to get the versions
|
|
||||||
//
|
|
||||||
// the resource update data is:
|
|
||||||
// resourcedata = headerlength|period|version|rootAddr|flags|metaHash
|
|
||||||
// where flags is a 1-byte flags field. Flag 0 is set to 1 to indicate multihash
|
|
||||||
//
|
|
||||||
// the full update data that goes in the chunk payload is:
|
|
||||||
// resourcedata|sign(resourcedata)
|
|
||||||
//
|
|
||||||
// headerlength is a 16 bit value containing the byte length of period|version|rootAddr|flags|metaHash
|
|
||||||
package mru
|
package mru
|
||||||
|
@ -21,11 +21,12 @@ package mru
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
"github.com/ethereum/go-ethereum/swarm/log"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
)
|
)
|
||||||
@ -33,7 +34,7 @@ import (
|
|||||||
type Handler struct {
|
type Handler struct {
|
||||||
chunkStore *storage.NetStore
|
chunkStore *storage.NetStore
|
||||||
HashSize int
|
HashSize int
|
||||||
resources map[uint64]*resource
|
resources map[uint64]*cacheEntry
|
||||||
resourceLock sync.RWMutex
|
resourceLock sync.RWMutex
|
||||||
storeTimeout time.Duration
|
storeTimeout time.Duration
|
||||||
queryMaxPeriods uint32
|
queryMaxPeriods uint32
|
||||||
@ -42,12 +43,10 @@ type Handler struct {
|
|||||||
// HandlerParams pass parameters to the Handler constructor NewHandler
|
// HandlerParams pass parameters to the Handler constructor NewHandler
|
||||||
// Signer and TimestampProvider are mandatory parameters
|
// Signer and TimestampProvider are mandatory parameters
|
||||||
type HandlerParams struct {
|
type HandlerParams struct {
|
||||||
QueryMaxPeriods uint32
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// hashPool contains a pool of ready hashers
|
// hashPool contains a pool of ready hashers
|
||||||
var hashPool sync.Pool
|
var hashPool sync.Pool
|
||||||
var minimumChunkLength int
|
|
||||||
|
|
||||||
// init initializes the package and hashPool
|
// init initializes the package and hashPool
|
||||||
func init() {
|
func init() {
|
||||||
@ -56,19 +55,12 @@ func init() {
|
|||||||
return storage.MakeHashFunc(resourceHashAlgorithm)()
|
return storage.MakeHashFunc(resourceHashAlgorithm)()
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if minimumMetadataLength < minimumUpdateDataLength {
|
|
||||||
minimumChunkLength = minimumMetadataLength
|
|
||||||
} else {
|
|
||||||
minimumChunkLength = minimumUpdateDataLength
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHandler creates a new Mutable Resource API
|
// NewHandler creates a new Mutable Resource API
|
||||||
func NewHandler(params *HandlerParams) *Handler {
|
func NewHandler(params *HandlerParams) *Handler {
|
||||||
rh := &Handler{
|
rh := &Handler{
|
||||||
resources: make(map[uint64]*resource),
|
resources: make(map[uint64]*cacheEntry),
|
||||||
storeTimeout: defaultStoreTimeout,
|
|
||||||
queryMaxPeriods: params.QueryMaxPeriods,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < hasherCount; i++ {
|
for i := 0; i < hasherCount; i++ {
|
||||||
@ -88,44 +80,25 @@ func (h *Handler) SetStore(store *storage.NetStore) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Validate is a chunk validation method
|
// Validate is a chunk validation method
|
||||||
// If it looks like a resource update, the chunk address is checked against the ownerAddr of the update's signature
|
// If it looks like a resource update, the chunk address is checked against the userAddr of the update's signature
|
||||||
// It implements the storage.ChunkValidator interface
|
// It implements the storage.ChunkValidator interface
|
||||||
func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
|
func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
|
||||||
dataLength := len(data)
|
dataLength := len(data)
|
||||||
if dataLength < minimumChunkLength || dataLength > chunk.DefaultSize+8 {
|
if dataLength < minimumSignedUpdateLength {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//metadata chunks have the first two bytes set to zero
|
// check if it is a properly formatted update chunk with
|
||||||
if data[0] == 0 && data[1] == 0 && dataLength >= minimumMetadataLength {
|
|
||||||
//metadata chunk
|
|
||||||
rootAddr, _ := metadataHash(data)
|
|
||||||
valid := bytes.Equal(chunkAddr, rootAddr)
|
|
||||||
if !valid {
|
|
||||||
log.Debug("Invalid root metadata chunk with address", "addr", chunkAddr.Hex())
|
|
||||||
}
|
|
||||||
return valid
|
|
||||||
}
|
|
||||||
|
|
||||||
// if it is not a metadata chunk, check if it is a properly formatted update chunk with
|
|
||||||
// valid signature and proof of ownership of the resource it is trying
|
// valid signature and proof of ownership of the resource it is trying
|
||||||
// to update
|
// to update
|
||||||
|
|
||||||
// First, deserialize the chunk
|
// First, deserialize the chunk
|
||||||
var r SignedResourceUpdate
|
var r Request
|
||||||
if err := r.fromChunk(chunkAddr, data); err != nil {
|
if err := r.fromChunk(chunkAddr, data); err != nil {
|
||||||
log.Debug("Invalid resource chunk", "addr", chunkAddr.Hex(), "err", err.Error())
|
log.Debug("Invalid resource chunk", "addr", chunkAddr.Hex(), "err", err.Error())
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// check that the lookup information contained in the chunk matches the updateAddr (chunk search key)
|
|
||||||
// that was used to retrieve this chunk
|
|
||||||
// if this validation fails, someone forged a chunk.
|
|
||||||
if !bytes.Equal(chunkAddr, r.updateHeader.UpdateAddr()) {
|
|
||||||
log.Debug("period,version,rootAddr contained in update chunk do not match updateAddr", "addr", chunkAddr.Hex())
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify signatures and that the signer actually owns the resource
|
// Verify signatures and that the signer actually owns the resource
|
||||||
// If it fails, it means either the signature is not valid, data is corrupted
|
// If it fails, it means either the signature is not valid, data is corrupted
|
||||||
// or someone is trying to update someone else's resource.
|
// or someone is trying to update someone else's resource.
|
||||||
@ -138,301 +111,134 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetContent retrieves the data payload of the last synced update of the Mutable Resource
|
// GetContent retrieves the data payload of the last synced update of the Mutable Resource
|
||||||
func (h *Handler) GetContent(rootAddr storage.Address) (storage.Address, []byte, error) {
|
func (h *Handler) GetContent(view *View) (storage.Address, []byte, error) {
|
||||||
rsrc := h.get(rootAddr)
|
if view == nil {
|
||||||
if rsrc == nil || !rsrc.isSynced() {
|
return nil, nil, NewError(ErrInvalidValue, "view is nil")
|
||||||
return nil, nil, NewError(ErrNotFound, " does not exist or is not synced")
|
}
|
||||||
|
rsrc := h.get(view)
|
||||||
|
if rsrc == nil {
|
||||||
|
return nil, nil, NewError(ErrNotFound, "resource does not exist")
|
||||||
}
|
}
|
||||||
return rsrc.lastKey, rsrc.data, nil
|
return rsrc.lastKey, rsrc.data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLastPeriod retrieves the period of the last synced update of the Mutable Resource
|
// NewRequest prepares a Request structure with all the necessary information to
|
||||||
func (h *Handler) GetLastPeriod(rootAddr storage.Address) (uint32, error) {
|
|
||||||
rsrc := h.get(rootAddr)
|
|
||||||
if rsrc == nil {
|
|
||||||
return 0, NewError(ErrNotFound, " does not exist")
|
|
||||||
} else if !rsrc.isSynced() {
|
|
||||||
return 0, NewError(ErrNotSynced, " is not synced")
|
|
||||||
}
|
|
||||||
return rsrc.period, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVersion retrieves the period of the last synced update of the Mutable Resource
|
|
||||||
func (h *Handler) GetVersion(rootAddr storage.Address) (uint32, error) {
|
|
||||||
rsrc := h.get(rootAddr)
|
|
||||||
if rsrc == nil {
|
|
||||||
return 0, NewError(ErrNotFound, " does not exist")
|
|
||||||
} else if !rsrc.isSynced() {
|
|
||||||
return 0, NewError(ErrNotSynced, " is not synced")
|
|
||||||
}
|
|
||||||
return rsrc.version, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new metadata chunk out of the request passed in.
|
|
||||||
func (h *Handler) New(ctx context.Context, request *Request) error {
|
|
||||||
|
|
||||||
// frequency 0 is invalid
|
|
||||||
if request.metadata.Frequency == 0 {
|
|
||||||
return NewError(ErrInvalidValue, "frequency cannot be 0 when creating a resource")
|
|
||||||
}
|
|
||||||
|
|
||||||
// make sure owner is set to something
|
|
||||||
if request.metadata.Owner == zeroAddr {
|
|
||||||
return NewError(ErrInvalidValue, "ownerAddr must be set to create a new metadata chunk")
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the meta chunk and store it in swarm
|
|
||||||
chunk, metaHash, err := request.metadata.newChunk()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if request.metaHash != nil && !bytes.Equal(request.metaHash, metaHash) ||
|
|
||||||
request.rootAddr != nil && !bytes.Equal(request.rootAddr, chunk.Address()) {
|
|
||||||
return NewError(ErrInvalidValue, "metaHash in UpdateRequest does not match actual metadata")
|
|
||||||
}
|
|
||||||
|
|
||||||
request.metaHash = metaHash
|
|
||||||
request.rootAddr = chunk.Address()
|
|
||||||
|
|
||||||
h.chunkStore.Put(ctx, chunk)
|
|
||||||
log.Debug("new resource", "name", request.metadata.Name, "startTime", request.metadata.StartTime, "frequency", request.metadata.Frequency, "owner", request.metadata.Owner)
|
|
||||||
|
|
||||||
// create the internal index for the resource and populate it with its metadata
|
|
||||||
rsrc := &resource{
|
|
||||||
resourceUpdate: resourceUpdate{
|
|
||||||
updateHeader: updateHeader{
|
|
||||||
UpdateLookup: UpdateLookup{
|
|
||||||
rootAddr: chunk.Address(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ResourceMetadata: request.metadata,
|
|
||||||
updated: time.Now(),
|
|
||||||
}
|
|
||||||
h.set(chunk.Address(), rsrc)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUpdateRequest prepares an UpdateRequest structure with all the necessary information to
|
|
||||||
// just add the desired data and sign it.
|
// just add the desired data and sign it.
|
||||||
// The resulting structure can then be signed and passed to Handler.Update to be verified and sent
|
// The resulting structure can then be signed and passed to Handler.Update to be verified and sent
|
||||||
func (h *Handler) NewUpdateRequest(ctx context.Context, rootAddr storage.Address) (updateRequest *Request, err error) {
|
func (h *Handler) NewRequest(ctx context.Context, view *View) (request *Request, err error) {
|
||||||
|
if view == nil {
|
||||||
if rootAddr == nil {
|
return nil, NewError(ErrInvalidValue, "view cannot be nil")
|
||||||
return nil, NewError(ErrInvalidValue, "rootAddr cannot be nil")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure we have a cache of the metadata chunk
|
now := TimestampProvider.Now().Time
|
||||||
rsrc, err := h.Load(ctx, rootAddr)
|
request = new(Request)
|
||||||
|
request.Header.Version = ProtocolVersion
|
||||||
|
|
||||||
|
query := NewQueryLatest(view, lookup.NoClue)
|
||||||
|
|
||||||
|
rsrc, err := h.Lookup(ctx, query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
now := TimestampProvider.Now()
|
|
||||||
|
|
||||||
updateRequest = new(Request)
|
|
||||||
updateRequest.period, err = getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = h.lookup(rsrc, LookupLatestVersionInPeriod(rsrc.rootAddr, updateRequest.period)); err != nil {
|
|
||||||
if err.(*Error).code != ErrNotFound {
|
if err.(*Error).code != ErrNotFound {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// not finding updates means that there is a network error
|
// not finding updates means that there is a network error
|
||||||
// or that the resource really does not have updates in this period.
|
// or that the resource really does not have updates
|
||||||
}
|
}
|
||||||
|
|
||||||
updateRequest.multihash = rsrc.multihash
|
request.View = *view
|
||||||
updateRequest.rootAddr = rsrc.rootAddr
|
|
||||||
updateRequest.metaHash = rsrc.metaHash
|
|
||||||
updateRequest.metadata = rsrc.ResourceMetadata
|
|
||||||
|
|
||||||
// if we already have an update for this period then increment version
|
// if we already have an update, then find next epoch
|
||||||
// resource object MUST be in sync for version to be correct, but we checked this earlier in the method already
|
if rsrc != nil {
|
||||||
if h.hasUpdate(rootAddr, updateRequest.period) {
|
request.Epoch = lookup.GetNextEpoch(rsrc.Epoch, now)
|
||||||
updateRequest.version = rsrc.version + 1
|
|
||||||
} else {
|
} else {
|
||||||
updateRequest.version = 1
|
request.Epoch = lookup.GetFirstEpoch(now)
|
||||||
}
|
}
|
||||||
|
|
||||||
return updateRequest, nil
|
return request, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lookup retrieves a specific or latest version of the resource update with metadata chunk at params.Root
|
// Lookup retrieves a specific or latest version of the resource
|
||||||
// Lookup works differently depending on the configuration of `LookupParams`
|
// Lookup works differently depending on the configuration of `ID`
|
||||||
// See the `LookupParams` documentation and helper functions:
|
// See the `ID` documentation and helper functions:
|
||||||
// `LookupLatest`, `LookupLatestVersionInPeriod` and `LookupVersion`
|
// `LookupLatest` and `LookupBefore`
|
||||||
// When looking for the latest update, it starts at the next period after the current time.
|
// When looking for the latest update, it starts at the next period after the current time.
|
||||||
// upon failure tries the corresponding keys of each previous period until one is found
|
// upon failure tries the corresponding keys of each previous period until one is found
|
||||||
// (or startTime is reached, in which case there are no updates).
|
// (or startTime is reached, in which case there are no updates).
|
||||||
func (h *Handler) Lookup(ctx context.Context, params *LookupParams) (*resource, error) {
|
func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) {
|
||||||
|
|
||||||
rsrc := h.get(params.rootAddr)
|
timeLimit := query.TimeLimit
|
||||||
if rsrc == nil {
|
if timeLimit == 0 { // if time limit is set to zero, the user wants to get the latest update
|
||||||
return nil, NewError(ErrNothingToReturn, "resource not loaded")
|
timeLimit = TimestampProvider.Now().Time
|
||||||
}
|
}
|
||||||
return h.lookup(rsrc, params)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupPrevious returns the resource before the one currently loaded in the resource cache
|
if query.Hint == lookup.NoClue { // try to use our cache
|
||||||
// This is useful where resource updates are used incrementally in contrast to
|
entry := h.get(&query.View)
|
||||||
// merely replacing content.
|
if entry != nil && entry.Epoch.Time <= timeLimit { // avoid bad hints
|
||||||
// Requires a cached resource object to determine the current state of the resource.
|
query.Hint = entry.Epoch
|
||||||
func (h *Handler) LookupPrevious(ctx context.Context, params *LookupParams) (*resource, error) {
|
}
|
||||||
rsrc := h.get(params.rootAddr)
|
|
||||||
if rsrc == nil {
|
|
||||||
return nil, NewError(ErrNothingToReturn, "resource not loaded")
|
|
||||||
}
|
}
|
||||||
if !rsrc.isSynced() {
|
|
||||||
return nil, NewError(ErrNotSynced, "LookupPrevious requires synced resource.")
|
|
||||||
} else if rsrc.period == 0 {
|
|
||||||
return nil, NewError(ErrNothingToReturn, " not found")
|
|
||||||
}
|
|
||||||
var version, period uint32
|
|
||||||
if rsrc.version > 1 {
|
|
||||||
version = rsrc.version - 1
|
|
||||||
period = rsrc.period
|
|
||||||
} else if rsrc.period == 1 {
|
|
||||||
return nil, NewError(ErrNothingToReturn, "Current update is the oldest")
|
|
||||||
} else {
|
|
||||||
version = 0
|
|
||||||
period = rsrc.period - 1
|
|
||||||
}
|
|
||||||
return h.lookup(rsrc, NewLookupParams(rsrc.rootAddr, period, version, params.Limit))
|
|
||||||
}
|
|
||||||
|
|
||||||
// base code for public lookup methods
|
|
||||||
func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error) {
|
|
||||||
|
|
||||||
lp := *params
|
|
||||||
// we can't look for anything without a store
|
// we can't look for anything without a store
|
||||||
if h.chunkStore == nil {
|
if h.chunkStore == nil {
|
||||||
return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups")
|
return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups")
|
||||||
}
|
}
|
||||||
|
|
||||||
var specificperiod bool
|
var ul ID
|
||||||
if lp.period > 0 {
|
ul.View = query.View
|
||||||
specificperiod = true
|
var readCount int
|
||||||
} else {
|
|
||||||
// get the current time and the next period
|
|
||||||
now := TimestampProvider.Now()
|
|
||||||
|
|
||||||
var period uint32
|
// Invoke the lookup engine.
|
||||||
period, err := getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency)
|
// The callback will be called every time the lookup algorithm needs to guess
|
||||||
if err != nil {
|
requestPtr, err := lookup.Lookup(timeLimit, query.Hint, func(epoch lookup.Epoch, now uint64) (interface{}, error) {
|
||||||
return nil, err
|
readCount++
|
||||||
}
|
ul.Epoch = epoch
|
||||||
lp.period = period
|
ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout)
|
||||||
}
|
|
||||||
|
|
||||||
// start from the last possible period, and iterate previous ones
|
|
||||||
// (unless we want a specific period only) until we find a match.
|
|
||||||
// If we hit startTime we're out of options
|
|
||||||
var specificversion bool
|
|
||||||
if lp.version > 0 {
|
|
||||||
specificversion = true
|
|
||||||
} else {
|
|
||||||
lp.version = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
var hops uint32
|
|
||||||
if lp.Limit == 0 {
|
|
||||||
lp.Limit = h.queryMaxPeriods
|
|
||||||
}
|
|
||||||
log.Trace("resource lookup", "period", lp.period, "version", lp.version, "limit", lp.Limit)
|
|
||||||
for lp.period > 0 {
|
|
||||||
if lp.Limit != 0 && hops > lp.Limit {
|
|
||||||
return nil, NewErrorf(ErrPeriodDepth, "Lookup exceeded max period hops (%d)", lp.Limit)
|
|
||||||
}
|
|
||||||
updateAddr := lp.UpdateAddr()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), defaultRetrieveTimeout)
|
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
chunk, err := h.chunkStore.Get(ctx, updateAddr)
|
chunk, err := h.chunkStore.Get(ctx, ul.Addr())
|
||||||
if err == nil {
|
if err != nil { // TODO: check for catastrophic errors other than chunk not found
|
||||||
if specificversion {
|
return nil, nil
|
||||||
return h.updateIndex(rsrc, chunk)
|
|
||||||
}
|
|
||||||
// check if we have versions > 1. If a version fails, the previous version is used and returned.
|
|
||||||
log.Trace("rsrc update version 1 found, checking for version updates", "period", lp.period, "updateAddr", updateAddr)
|
|
||||||
for {
|
|
||||||
newversion := lp.version + 1
|
|
||||||
updateAddr := lp.UpdateAddr()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), defaultRetrieveTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
newchunk, err := h.chunkStore.Get(ctx, updateAddr)
|
|
||||||
if err != nil {
|
|
||||||
return h.updateIndex(rsrc, chunk)
|
|
||||||
}
|
|
||||||
chunk = newchunk
|
|
||||||
lp.version = newversion
|
|
||||||
log.Trace("version update found, checking next", "version", lp.version, "period", lp.period, "updateAddr", updateAddr)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if specificperiod {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
log.Trace("rsrc update not found, checking previous period", "period", lp.period, "updateAddr", updateAddr)
|
|
||||||
lp.period--
|
|
||||||
hops++
|
|
||||||
}
|
|
||||||
return nil, NewError(ErrNotFound, "no updates found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load retrieves the Mutable Resource metadata chunk stored at rootAddr
|
var request Request
|
||||||
// Upon retrieval it creates/updates the index entry for it with metadata corresponding to the chunk contents
|
if err := request.fromChunk(chunk.Address(), chunk.Data()); err != nil {
|
||||||
func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource, error) {
|
return nil, nil
|
||||||
//TODO: Maybe add timeout to context, defaultRetrieveTimeout?
|
}
|
||||||
ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout)
|
if request.Time <= timeLimit {
|
||||||
defer cancel()
|
return &request, nil
|
||||||
chunk, err := h.chunkStore.Get(ctx, rootAddr)
|
}
|
||||||
|
return nil, nil
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, NewError(ErrNotFound, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the index entry
|
|
||||||
rsrc := &resource{}
|
|
||||||
|
|
||||||
if err := rsrc.ResourceMetadata.binaryGet(chunk.Data()); err != nil { // Will fail if this is not really a metadata chunk
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rsrc.rootAddr, rsrc.metaHash = metadataHash(chunk.Data())
|
log.Info(fmt.Sprintf("Resource lookup finished in %d lookups", readCount))
|
||||||
if !bytes.Equal(rsrc.rootAddr, rootAddr) {
|
|
||||||
return nil, NewError(ErrCorruptData, "Corrupt metadata chunk")
|
request, _ := requestPtr.(*Request)
|
||||||
|
if request == nil {
|
||||||
|
return nil, NewError(ErrNotFound, "no updates found")
|
||||||
}
|
}
|
||||||
h.set(rootAddr, rsrc)
|
return h.updateCache(request)
|
||||||
log.Trace("resource index load", "rootkey", rootAddr, "name", rsrc.ResourceMetadata.Name, "starttime", rsrc.ResourceMetadata.StartTime, "frequency", rsrc.ResourceMetadata.Frequency)
|
|
||||||
return rsrc, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// update mutable resource index map with specified content
|
// update mutable resource cache map with specified content
|
||||||
func (h *Handler) updateIndex(rsrc *resource, chunk storage.Chunk) (*resource, error) {
|
func (h *Handler) updateCache(request *Request) (*cacheEntry, error) {
|
||||||
|
|
||||||
// retrieve metadata from chunk data and check that it matches this mutable resource
|
updateAddr := request.Addr()
|
||||||
var r SignedResourceUpdate
|
log.Trace("resource cache update", "topic", request.Topic.Hex(), "updatekey", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level)
|
||||||
if err := r.fromChunk(chunk.Address(), chunk.Data()); err != nil {
|
|
||||||
return nil, err
|
rsrc := h.get(&request.View)
|
||||||
|
if rsrc == nil {
|
||||||
|
rsrc = &cacheEntry{}
|
||||||
|
h.set(&request.View, rsrc)
|
||||||
}
|
}
|
||||||
log.Trace("resource index update", "name", rsrc.ResourceMetadata.Name, "updatekey", chunk.Address(), "period", r.period, "version", r.version)
|
|
||||||
|
|
||||||
// update our rsrcs entry map
|
// update our rsrcs entry map
|
||||||
rsrc.lastKey = chunk.Address()
|
rsrc.lastKey = updateAddr
|
||||||
rsrc.period = r.period
|
rsrc.ResourceUpdate = request.ResourceUpdate
|
||||||
rsrc.version = r.version
|
|
||||||
rsrc.updated = time.Now()
|
|
||||||
rsrc.data = make([]byte, len(r.data))
|
|
||||||
rsrc.multihash = r.multihash
|
|
||||||
copy(rsrc.data, r.data)
|
|
||||||
rsrc.Reader = bytes.NewReader(rsrc.data)
|
rsrc.Reader = bytes.NewReader(rsrc.data)
|
||||||
log.Debug("resource synced", "name", rsrc.ResourceMetadata.Name, "updateAddr", chunk.Address(), "period", rsrc.period, "version", rsrc.version)
|
|
||||||
h.set(chunk.Address(), rsrc)
|
|
||||||
return rsrc, nil
|
return rsrc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -442,23 +248,16 @@ func (h *Handler) updateIndex(rsrc *resource, chunk storage.Chunk) (*resource, e
|
|||||||
// Note that a Mutable Resource update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. An error will be returned if the total length of the chunk payload will exceed this limit.
|
// Note that a Mutable Resource update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. An error will be returned if the total length of the chunk payload will exceed this limit.
|
||||||
// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update
|
// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update
|
||||||
// on the network.
|
// on the network.
|
||||||
func (h *Handler) Update(ctx context.Context, r *SignedResourceUpdate) (storage.Address, error) {
|
func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) {
|
||||||
return h.update(ctx, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// create and commit an update
|
|
||||||
func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAddr storage.Address, err error) {
|
|
||||||
|
|
||||||
// we can't update anything without a store
|
// we can't update anything without a store
|
||||||
if h.chunkStore == nil {
|
if h.chunkStore == nil {
|
||||||
return nil, NewError(ErrInit, "Call Handler.SetStore() before updating")
|
return nil, NewError(ErrInit, "Call Handler.SetStore() before updating")
|
||||||
}
|
}
|
||||||
|
|
||||||
rsrc := h.get(r.rootAddr)
|
rsrc := h.get(&r.View)
|
||||||
if rsrc != nil && rsrc.period != 0 && rsrc.version != 0 && // This is the only cheap check we can do for sure
|
if rsrc != nil && rsrc.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure
|
||||||
rsrc.period == r.period && rsrc.version >= r.version { // without having to lookup update chunks
|
return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist")
|
||||||
|
|
||||||
return nil, NewError(ErrInvalidValue, "A former update in this period is already known to exist")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
|
chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
|
||||||
@ -468,49 +267,32 @@ func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAd
|
|||||||
|
|
||||||
// send the chunk
|
// send the chunk
|
||||||
h.chunkStore.Put(ctx, chunk)
|
h.chunkStore.Put(ctx, chunk)
|
||||||
log.Trace("resource update", "updateAddr", r.updateAddr, "lastperiod", r.period, "version", r.version, "data", chunk.Data(), "multihash", r.multihash)
|
log.Trace("resource update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data())
|
||||||
|
// update our resources map cache entry if the new update is older than the one we have, if we have it.
|
||||||
// update our resources map entry if the new update is older than the one we have, if we have it.
|
if rsrc != nil && r.Epoch.After(rsrc.Epoch) {
|
||||||
if rsrc != nil && (r.period > rsrc.period || (rsrc.period == r.period && r.version > rsrc.version)) {
|
rsrc.Epoch = r.Epoch
|
||||||
rsrc.period = r.period
|
|
||||||
rsrc.version = r.version
|
|
||||||
rsrc.data = make([]byte, len(r.data))
|
rsrc.data = make([]byte, len(r.data))
|
||||||
rsrc.updated = time.Now()
|
rsrc.lastKey = r.idAddr
|
||||||
rsrc.lastKey = r.updateAddr
|
|
||||||
rsrc.multihash = r.multihash
|
|
||||||
copy(rsrc.data, r.data)
|
copy(rsrc.data, r.data)
|
||||||
rsrc.Reader = bytes.NewReader(rsrc.data)
|
rsrc.Reader = bytes.NewReader(rsrc.data)
|
||||||
}
|
}
|
||||||
return r.updateAddr, nil
|
|
||||||
|
return r.idAddr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieves the resource index value for the given nameHash
|
// Retrieves the resource cache value for the given nameHash
|
||||||
func (h *Handler) get(rootAddr storage.Address) *resource {
|
func (h *Handler) get(view *View) *cacheEntry {
|
||||||
if len(rootAddr) < storage.AddressLength {
|
mapKey := view.mapKey()
|
||||||
log.Warn("Handler.get with invalid rootAddr")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0]))
|
|
||||||
h.resourceLock.RLock()
|
h.resourceLock.RLock()
|
||||||
defer h.resourceLock.RUnlock()
|
defer h.resourceLock.RUnlock()
|
||||||
rsrc := h.resources[hashKey]
|
rsrc := h.resources[mapKey]
|
||||||
return rsrc
|
return rsrc
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets the resource index value for the given nameHash
|
// Sets the resource cache value for the given View
|
||||||
func (h *Handler) set(rootAddr storage.Address, rsrc *resource) {
|
func (h *Handler) set(view *View, rsrc *cacheEntry) {
|
||||||
if len(rootAddr) < storage.AddressLength {
|
mapKey := view.mapKey()
|
||||||
log.Warn("Handler.set with invalid rootAddr")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0]))
|
|
||||||
h.resourceLock.Lock()
|
h.resourceLock.Lock()
|
||||||
defer h.resourceLock.Unlock()
|
defer h.resourceLock.Unlock()
|
||||||
h.resources[hashKey] = rsrc
|
h.resources[mapKey] = rsrc
|
||||||
}
|
|
||||||
|
|
||||||
// Checks if we already have an update on this resource, according to the value in the current state of the resource index
|
|
||||||
func (h *Handler) hasUpdate(rootAddr storage.Address, period uint32) bool {
|
|
||||||
rsrc := h.get(rootAddr)
|
|
||||||
return rsrc != nil && rsrc.period == period
|
|
||||||
}
|
}
|
||||||
|
520
swarm/storage/mru/handler_test.go
Normal file
520
swarm/storage/mru/handler_test.go
Normal file
@ -0,0 +1,520 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
loglevel = flag.Int("loglevel", 3, "loglevel")
|
||||||
|
startTime = Timestamp{
|
||||||
|
Time: uint64(4200),
|
||||||
|
}
|
||||||
|
cleanF func()
|
||||||
|
resourceName = "føø.bar"
|
||||||
|
hashfunc = storage.MakeHashFunc(storage.DefaultHash)
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
flag.Parse()
|
||||||
|
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// simulated timeProvider
|
||||||
|
type fakeTimeProvider struct {
|
||||||
|
currentTime uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeTimeProvider) Tick() {
|
||||||
|
f.currentTime++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeTimeProvider) Set(time uint64) {
|
||||||
|
f.currentTime = time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeTimeProvider) FastForward(offset uint64) {
|
||||||
|
f.currentTime += offset
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeTimeProvider) Now() Timestamp {
|
||||||
|
return Timestamp{
|
||||||
|
Time: f.currentTime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// make updates and retrieve them based on periods and versions
|
||||||
|
func TestResourceHandler(t *testing.T) {
|
||||||
|
|
||||||
|
// make fake timeProvider
|
||||||
|
clock := &fakeTimeProvider{
|
||||||
|
currentTime: startTime.Time, // clock starts at t=4200
|
||||||
|
}
|
||||||
|
|
||||||
|
// signer containing private key
|
||||||
|
signer := newAliceSigner()
|
||||||
|
|
||||||
|
rh, datadir, teardownTest, err := setupTest(clock, signer)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer teardownTest()
|
||||||
|
|
||||||
|
// create a new resource
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
topic, _ := NewTopic("Mess with mru code and see what ghost catches you", nil)
|
||||||
|
view := View{
|
||||||
|
Topic: topic,
|
||||||
|
User: signer.Address(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// data for updates:
|
||||||
|
updates := []string{
|
||||||
|
"blinky", // t=4200
|
||||||
|
"pinky", // t=4242
|
||||||
|
"inky", // t=4284
|
||||||
|
"clyde", // t=4285
|
||||||
|
}
|
||||||
|
|
||||||
|
request := NewFirstRequest(view.Topic) // this timestamps the update at t = 4200 (start time)
|
||||||
|
resourcekey := make(map[string]storage.Address)
|
||||||
|
data := []byte(updates[0])
|
||||||
|
request.SetData(data)
|
||||||
|
if err := request.Sign(signer); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resourcekey[updates[0]], err = rh.Update(ctx, request)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// move the clock ahead 21 seconds
|
||||||
|
clock.FastForward(21) // t=4221
|
||||||
|
|
||||||
|
request, err = rh.NewRequest(ctx, &request.View) // this timestamps the update at t = 4221
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if request.Epoch.Base() != 0 || request.Epoch.Level != lookup.HighestLevel-1 {
|
||||||
|
t.Fatalf("Suggested epoch BaseTime should be 0 and Epoch level should be %d", lookup.HighestLevel-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
request.Epoch.Level = lookup.HighestLevel // force level 25 instead of 24 to make it fail
|
||||||
|
data = []byte(updates[1])
|
||||||
|
request.SetData(data)
|
||||||
|
if err := request.Sign(signer); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resourcekey[updates[1]], err = rh.Update(ctx, request)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected update to fail since an update in this epoch already exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
// move the clock ahead 21 seconds
|
||||||
|
clock.FastForward(21) // t=4242
|
||||||
|
request, err = rh.NewRequest(ctx, &request.View)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
request.SetData(data)
|
||||||
|
if err := request.Sign(signer); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resourcekey[updates[1]], err = rh.Update(ctx, request)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// move the clock ahead 42 seconds
|
||||||
|
clock.FastForward(42) // t=4284
|
||||||
|
request, err = rh.NewRequest(ctx, &request.View)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
data = []byte(updates[2])
|
||||||
|
request.SetData(data)
|
||||||
|
if err := request.Sign(signer); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resourcekey[updates[2]], err = rh.Update(ctx, request)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// move the clock ahead 1 second
|
||||||
|
clock.FastForward(1) // t=4285
|
||||||
|
request, err = rh.NewRequest(ctx, &request.View)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if request.Epoch.Base() != 0 || request.Epoch.Level != 22 {
|
||||||
|
t.Fatalf("Expected epoch base time to be %d, got %d. Expected epoch level to be %d, got %d", 0, request.Epoch.Base(), 22, request.Epoch.Level)
|
||||||
|
}
|
||||||
|
data = []byte(updates[3])
|
||||||
|
request.SetData(data)
|
||||||
|
|
||||||
|
if err := request.Sign(signer); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resourcekey[updates[3]], err = rh.Update(ctx, request)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
rh.Close()
|
||||||
|
|
||||||
|
// check we can retrieve the updates after close
|
||||||
|
clock.FastForward(2000) // t=6285
|
||||||
|
|
||||||
|
rhparams := &HandlerParams{}
|
||||||
|
|
||||||
|
rh2, err := NewTestHandler(datadir, rhparams)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rsrc2, err := rh2.Lookup(ctx, NewQueryLatest(&request.View, lookup.NoClue))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// last update should be "clyde"
|
||||||
|
if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) {
|
||||||
|
t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1])
|
||||||
|
}
|
||||||
|
if rsrc2.Level != 22 {
|
||||||
|
t.Fatalf("resource epoch level was %d, expected 22", rsrc2.Level)
|
||||||
|
}
|
||||||
|
if rsrc2.Base() != 0 {
|
||||||
|
t.Fatalf("resource epoch base time was %d, expected 0", rsrc2.Base())
|
||||||
|
}
|
||||||
|
log.Debug("Latest lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data)
|
||||||
|
|
||||||
|
// specific point in time
|
||||||
|
rsrc, err := rh2.Lookup(ctx, NewQuery(&request.View, 4284, lookup.NoClue))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// check data
|
||||||
|
if !bytes.Equal(rsrc.data, []byte(updates[2])) {
|
||||||
|
t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2])
|
||||||
|
}
|
||||||
|
log.Debug("Historical lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data)
|
||||||
|
|
||||||
|
// beyond the first should yield an error
|
||||||
|
rsrc, err = rh2.Lookup(ctx, NewQuery(&request.View, startTime.Time-1, lookup.NoClue))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected previous to fail, returned epoch %s data %v", rsrc.Epoch.String(), rsrc.data)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
const Day = 60 * 60 * 24
|
||||||
|
const Year = Day * 365
|
||||||
|
const Month = Day * 30
|
||||||
|
|
||||||
|
func generateData(x uint64) []byte {
|
||||||
|
return []byte(fmt.Sprintf("%d", x))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSparseUpdates(t *testing.T) {
|
||||||
|
|
||||||
|
// make fake timeProvider
|
||||||
|
timeProvider := &fakeTimeProvider{
|
||||||
|
currentTime: startTime.Time,
|
||||||
|
}
|
||||||
|
|
||||||
|
// signer containing private key
|
||||||
|
signer := newAliceSigner()
|
||||||
|
|
||||||
|
rh, datadir, teardownTest, err := setupTest(timeProvider, signer)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer teardownTest()
|
||||||
|
defer os.RemoveAll(datadir)
|
||||||
|
|
||||||
|
// create a new resource
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
topic, _ := NewTopic("Very slow updates", nil)
|
||||||
|
view := View{
|
||||||
|
Topic: topic,
|
||||||
|
User: signer.Address(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// publish one update every 5 years since Unix 0 until today
|
||||||
|
today := uint64(1533799046)
|
||||||
|
var epoch lookup.Epoch
|
||||||
|
var lastUpdateTime uint64
|
||||||
|
for T := uint64(0); T < today; T += 5 * Year {
|
||||||
|
request := NewFirstRequest(view.Topic)
|
||||||
|
request.Epoch = lookup.GetNextEpoch(epoch, T)
|
||||||
|
request.data = generateData(T) // this generates some data that depends on T, so we can check later
|
||||||
|
request.Sign(signer)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := rh.Update(ctx, request); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
epoch = request.Epoch
|
||||||
|
lastUpdateTime = T
|
||||||
|
}
|
||||||
|
|
||||||
|
query := NewQuery(&view, today, lookup.NoClue)
|
||||||
|
|
||||||
|
_, err = rh.Lookup(ctx, query)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, content, err := rh.GetContent(&view)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(generateData(lastUpdateTime), content) {
|
||||||
|
t.Fatalf("Expected to recover last written value %d, got %s", lastUpdateTime, string(content))
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookup the closest update to 35*Year + 6* Month (~ June 2005):
|
||||||
|
// it should find the update we put on 35*Year, since we were updating every 5 years.
|
||||||
|
|
||||||
|
query.TimeLimit = 35*Year + 6*Month
|
||||||
|
|
||||||
|
_, err = rh.Lookup(ctx, query)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, content, err = rh.GetContent(&view)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(generateData(35*Year), content) {
|
||||||
|
t.Fatalf("Expected to recover %d, got %s", 35*Year, string(content))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidator(t *testing.T) {
|
||||||
|
|
||||||
|
// make fake timeProvider
|
||||||
|
timeProvider := &fakeTimeProvider{
|
||||||
|
currentTime: startTime.Time,
|
||||||
|
}
|
||||||
|
|
||||||
|
// signer containing private key. Alice will be the good girl
|
||||||
|
signer := newAliceSigner()
|
||||||
|
|
||||||
|
// set up sim timeProvider
|
||||||
|
rh, _, teardownTest, err := setupTest(timeProvider, signer)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer teardownTest()
|
||||||
|
|
||||||
|
// create new resource
|
||||||
|
topic, _ := NewTopic(resourceName, nil)
|
||||||
|
view := View{
|
||||||
|
Topic: topic,
|
||||||
|
User: signer.Address(),
|
||||||
|
}
|
||||||
|
mr := NewFirstRequest(view.Topic)
|
||||||
|
|
||||||
|
// chunk with address
|
||||||
|
data := []byte("foo")
|
||||||
|
mr.SetData(data)
|
||||||
|
if err := mr.Sign(signer); err != nil {
|
||||||
|
t.Fatalf("sign fail: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk, err := mr.toChunk()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !rh.Validate(chunk.Address(), chunk.Data()) {
|
||||||
|
t.Fatal("Chunk validator fail on update chunk")
|
||||||
|
}
|
||||||
|
|
||||||
|
address := chunk.Address()
|
||||||
|
// mess with the address
|
||||||
|
address[0] = 11
|
||||||
|
address[15] = 99
|
||||||
|
|
||||||
|
if rh.Validate(address, chunk.Data()) {
|
||||||
|
t.Fatal("Expected Validate to fail with false chunk address")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tests that the content address validator correctly checks the data
|
||||||
|
// tests that resource update chunks are passed through content address validator
|
||||||
|
// there is some redundancy in this test as it also tests content addressed chunks,
|
||||||
|
// which should be evaluated as invalid chunks by this validator
|
||||||
|
func TestValidatorInStore(t *testing.T) {
|
||||||
|
|
||||||
|
// make fake timeProvider
|
||||||
|
TimestampProvider = &fakeTimeProvider{
|
||||||
|
currentTime: startTime.Time,
|
||||||
|
}
|
||||||
|
|
||||||
|
// signer containing private key
|
||||||
|
signer := newAliceSigner()
|
||||||
|
|
||||||
|
// set up localstore
|
||||||
|
datadir, err := ioutil.TempDir("", "storage-testresourcevalidator")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(datadir)
|
||||||
|
|
||||||
|
handlerParams := storage.NewDefaultLocalStoreParams()
|
||||||
|
handlerParams.Init(datadir)
|
||||||
|
store, err := storage.NewLocalStore(handlerParams, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// set up resource handler and add is as a validator to the localstore
|
||||||
|
rhParams := &HandlerParams{}
|
||||||
|
rh := NewHandler(rhParams)
|
||||||
|
store.Validators = append(store.Validators, rh)
|
||||||
|
|
||||||
|
// create content addressed chunks, one good, one faulty
|
||||||
|
chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2)
|
||||||
|
goodChunk := chunks[0]
|
||||||
|
badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data())
|
||||||
|
|
||||||
|
topic, _ := NewTopic("xyzzy", nil)
|
||||||
|
view := View{
|
||||||
|
Topic: topic,
|
||||||
|
User: signer.Address(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a resource update chunk with correct publickey
|
||||||
|
id := ID{
|
||||||
|
Epoch: lookup.Epoch{Time: 42,
|
||||||
|
Level: 1,
|
||||||
|
},
|
||||||
|
View: view,
|
||||||
|
}
|
||||||
|
|
||||||
|
updateAddr := id.Addr()
|
||||||
|
data := []byte("bar")
|
||||||
|
|
||||||
|
r := new(Request)
|
||||||
|
r.idAddr = updateAddr
|
||||||
|
r.ResourceUpdate.ID = id
|
||||||
|
r.data = data
|
||||||
|
|
||||||
|
r.Sign(signer)
|
||||||
|
|
||||||
|
uglyChunk, err := r.toChunk()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// put the chunks in the store and check their error status
|
||||||
|
err = store.Put(context.Background(), goodChunk)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error on good content address chunk with resource validator only, but got nil")
|
||||||
|
}
|
||||||
|
err = store.Put(context.Background(), badChunk)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error on bad content address chunk with resource validator only, but got nil")
|
||||||
|
}
|
||||||
|
err = store.Put(context.Background(), uglyChunk)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create rpc and resourcehandler
|
||||||
|
func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, datadir string, teardown func(), err error) {
|
||||||
|
|
||||||
|
var fsClean func()
|
||||||
|
var rpcClean func()
|
||||||
|
cleanF = func() {
|
||||||
|
if fsClean != nil {
|
||||||
|
fsClean()
|
||||||
|
}
|
||||||
|
if rpcClean != nil {
|
||||||
|
rpcClean()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// temp datadir
|
||||||
|
datadir, err = ioutil.TempDir("", "rh")
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", nil, err
|
||||||
|
}
|
||||||
|
fsClean = func() {
|
||||||
|
os.RemoveAll(datadir)
|
||||||
|
}
|
||||||
|
|
||||||
|
TimestampProvider = timeProvider
|
||||||
|
rhparams := &HandlerParams{}
|
||||||
|
rh, err = NewTestHandler(datadir, rhparams)
|
||||||
|
return rh, datadir, cleanF, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAliceSigner() *GenericSigner {
|
||||||
|
privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
|
||||||
|
return NewGenericSigner(privKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBobSigner() *GenericSigner {
|
||||||
|
privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca")
|
||||||
|
return NewGenericSigner(privKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCharlieSigner() *GenericSigner {
|
||||||
|
privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca")
|
||||||
|
return NewGenericSigner(privKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) {
|
||||||
|
chunk, err := rh.chunkStore.Get(context.TODO(), addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var r Request
|
||||||
|
if err := r.fromChunk(addr, chunk.Data()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return r.data, nil
|
||||||
|
}
|
123
swarm/storage/mru/id.go
Normal file
123
swarm/storage/mru/id.go
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID uniquely identifies an update on the network.
|
||||||
|
type ID struct {
|
||||||
|
View `json:"view"`
|
||||||
|
lookup.Epoch `json:"epoch"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID layout:
|
||||||
|
// View viewLength bytes
|
||||||
|
// Epoch EpochLength
|
||||||
|
const idLength = viewLength + lookup.EpochLength
|
||||||
|
|
||||||
|
// Addr calculates the resource update chunk address corresponding to this ID
|
||||||
|
func (u *ID) Addr() (updateAddr storage.Address) {
|
||||||
|
serializedData := make([]byte, idLength)
|
||||||
|
var cursor int
|
||||||
|
u.View.binaryPut(serializedData[cursor : cursor+viewLength])
|
||||||
|
cursor += viewLength
|
||||||
|
|
||||||
|
eid := u.Epoch.ID()
|
||||||
|
copy(serializedData[cursor:cursor+lookup.EpochLength], eid[:])
|
||||||
|
|
||||||
|
hasher := hashPool.Get().(hash.Hash)
|
||||||
|
defer hashPool.Put(hasher)
|
||||||
|
hasher.Reset()
|
||||||
|
hasher.Write(serializedData)
|
||||||
|
return hasher.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// binaryPut serializes this instance into the provided slice
|
||||||
|
func (u *ID) binaryPut(serializedData []byte) error {
|
||||||
|
if len(serializedData) != idLength {
|
||||||
|
return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize ID. Expected %d, got %d", idLength, len(serializedData))
|
||||||
|
}
|
||||||
|
var cursor int
|
||||||
|
if err := u.View.binaryPut(serializedData[cursor : cursor+viewLength]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cursor += viewLength
|
||||||
|
|
||||||
|
epochBytes, err := u.Epoch.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
copy(serializedData[cursor:cursor+lookup.EpochLength], epochBytes[:])
|
||||||
|
cursor += lookup.EpochLength
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// binaryLength returns the expected size of this structure when serialized
|
||||||
|
func (u *ID) binaryLength() int {
|
||||||
|
return idLength
|
||||||
|
}
|
||||||
|
|
||||||
|
// binaryGet restores the current instance from the information contained in the passed slice
|
||||||
|
func (u *ID) binaryGet(serializedData []byte) error {
|
||||||
|
if len(serializedData) != idLength {
|
||||||
|
return NewErrorf(ErrInvalidValue, "Incorrect slice size to read ID. Expected %d, got %d", idLength, len(serializedData))
|
||||||
|
}
|
||||||
|
|
||||||
|
var cursor int
|
||||||
|
if err := u.View.binaryGet(serializedData[cursor : cursor+viewLength]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cursor += viewLength
|
||||||
|
|
||||||
|
if err := u.Epoch.UnmarshalBinary(serializedData[cursor : cursor+lookup.EpochLength]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cursor += lookup.EpochLength
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromValues deserializes this instance from a string key-value store
|
||||||
|
// useful to parse query strings
|
||||||
|
func (u *ID) FromValues(values Values) error {
|
||||||
|
level, _ := strconv.ParseUint(values.Get("level"), 10, 32)
|
||||||
|
u.Epoch.Level = uint8(level)
|
||||||
|
u.Epoch.Time, _ = strconv.ParseUint(values.Get("time"), 10, 64)
|
||||||
|
|
||||||
|
if u.View.User == (common.Address{}) {
|
||||||
|
return u.View.FromValues(values)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendValues serializes this structure into the provided string key-value store
|
||||||
|
// useful to build query strings
|
||||||
|
func (u *ID) AppendValues(values Values) {
|
||||||
|
values.Set("level", fmt.Sprintf("%d", u.Epoch.Level))
|
||||||
|
values.Set("time", fmt.Sprintf("%d", u.Epoch.Time))
|
||||||
|
u.View.AppendValues(values)
|
||||||
|
}
|
28
swarm/storage/mru/id_test.go
Normal file
28
swarm/storage/mru/id_test.go
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getTestID() *ID {
|
||||||
|
return &ID{
|
||||||
|
View: *getTestView(),
|
||||||
|
Epoch: lookup.GetFirstEpoch(1000),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIDAddr(t *testing.T) {
|
||||||
|
ul := getTestID()
|
||||||
|
updateAddr := ul.Addr()
|
||||||
|
compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8b24583ec293e085f4c78aaee66d1bc5abfb8b4233304d14a349afa57af2a783")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIDSerializer(t *testing.T) {
|
||||||
|
testBinarySerializerRecovery(t, getTestID(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIDLengthCheck(t *testing.T) {
|
||||||
|
testBinarySerializerLengthCheck(t, getTestID())
|
||||||
|
}
|
@ -1,117 +0,0 @@
|
|||||||
// Copyright 2018 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package mru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"hash"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LookupParams is used to specify constraints when performing an update lookup
|
|
||||||
// Limit defines whether or not the lookup should be limited
|
|
||||||
// If Limit is set to true then Max defines the amount of hops that can be performed
|
|
||||||
type LookupParams struct {
|
|
||||||
UpdateLookup
|
|
||||||
Limit uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// RootAddr returns the metadata chunk address
|
|
||||||
func (r *LookupParams) RootAddr() storage.Address {
|
|
||||||
return r.rootAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLookupParams(rootAddr storage.Address, period, version uint32, limit uint32) *LookupParams {
|
|
||||||
return &LookupParams{
|
|
||||||
UpdateLookup: UpdateLookup{
|
|
||||||
period: period,
|
|
||||||
version: version,
|
|
||||||
rootAddr: rootAddr,
|
|
||||||
},
|
|
||||||
Limit: limit,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupLatest generates lookup parameters that look for the latest version of a resource
|
|
||||||
func LookupLatest(rootAddr storage.Address) *LookupParams {
|
|
||||||
return NewLookupParams(rootAddr, 0, 0, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupLatestVersionInPeriod generates lookup parameters that look for the latest version of a resource in a given period
|
|
||||||
func LookupLatestVersionInPeriod(rootAddr storage.Address, period uint32) *LookupParams {
|
|
||||||
return NewLookupParams(rootAddr, period, 0, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupVersion generates lookup parameters that look for a specific version of a resource
|
|
||||||
func LookupVersion(rootAddr storage.Address, period, version uint32) *LookupParams {
|
|
||||||
return NewLookupParams(rootAddr, period, version, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateLookup represents the components of a resource update search key
|
|
||||||
type UpdateLookup struct {
|
|
||||||
period uint32
|
|
||||||
version uint32
|
|
||||||
rootAddr storage.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4 bytes period
|
|
||||||
// 4 bytes version
|
|
||||||
// storage.Keylength for rootAddr
|
|
||||||
const updateLookupLength = 4 + 4 + storage.AddressLength
|
|
||||||
|
|
||||||
// UpdateAddr calculates the resource update chunk address corresponding to this lookup key
|
|
||||||
func (u *UpdateLookup) UpdateAddr() (updateAddr storage.Address) {
|
|
||||||
serializedData := make([]byte, updateLookupLength)
|
|
||||||
u.binaryPut(serializedData)
|
|
||||||
hasher := hashPool.Get().(hash.Hash)
|
|
||||||
defer hashPool.Put(hasher)
|
|
||||||
hasher.Reset()
|
|
||||||
hasher.Write(serializedData)
|
|
||||||
return hasher.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// binaryPut serializes this UpdateLookup instance into the provided slice
|
|
||||||
func (u *UpdateLookup) binaryPut(serializedData []byte) error {
|
|
||||||
if len(serializedData) != updateLookupLength {
|
|
||||||
return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData))
|
|
||||||
}
|
|
||||||
if len(u.rootAddr) != storage.AddressLength {
|
|
||||||
return NewError(ErrInvalidValue, "UpdateLookup.binaryPut called without rootAddr set")
|
|
||||||
}
|
|
||||||
binary.LittleEndian.PutUint32(serializedData[:4], u.period)
|
|
||||||
binary.LittleEndian.PutUint32(serializedData[4:8], u.version)
|
|
||||||
copy(serializedData[8:], u.rootAddr[:])
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// binaryLength returns the expected size of this structure when serialized
|
|
||||||
func (u *UpdateLookup) binaryLength() int {
|
|
||||||
return updateLookupLength
|
|
||||||
}
|
|
||||||
|
|
||||||
// binaryGet restores the current instance from the information contained in the passed slice
|
|
||||||
func (u *UpdateLookup) binaryGet(serializedData []byte) error {
|
|
||||||
if len(serializedData) != updateLookupLength {
|
|
||||||
return NewErrorf(ErrInvalidValue, "Incorrect slice size to read UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData))
|
|
||||||
}
|
|
||||||
u.period = binary.LittleEndian.Uint32(serializedData[:4])
|
|
||||||
u.version = binary.LittleEndian.Uint32(serializedData[4:8])
|
|
||||||
u.rootAddr = storage.Address(make([]byte, storage.AddressLength))
|
|
||||||
copy(u.rootAddr[:], serializedData[8:])
|
|
||||||
return nil
|
|
||||||
}
|
|
91
swarm/storage/mru/lookup/epoch.go
Normal file
91
swarm/storage/mru/lookup/epoch.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package lookup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Epoch represents a time slot at a particular frequency level
|
||||||
|
type Epoch struct {
|
||||||
|
Time uint64 `json:"time"` // Time stores the time at which the update or lookup takes place
|
||||||
|
Level uint8 `json:"level"` // Level indicates the frequency level as the exponent of a power of 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// EpochID is a unique identifier for an Epoch, based on its level and base time.
|
||||||
|
type EpochID [8]byte
|
||||||
|
|
||||||
|
// EpochLength stores the serialized binary length of an Epoch
|
||||||
|
const EpochLength = 8
|
||||||
|
|
||||||
|
// MaxTime contains the highest possible time value an Epoch can handle
|
||||||
|
const MaxTime uint64 = (1 << 56) - 1
|
||||||
|
|
||||||
|
// Base returns the base time of the Epoch
|
||||||
|
func (e *Epoch) Base() uint64 {
|
||||||
|
return getBaseTime(e.Time, e.Level)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID Returns the unique identifier of this epoch
|
||||||
|
func (e *Epoch) ID() EpochID {
|
||||||
|
base := e.Base()
|
||||||
|
var id EpochID
|
||||||
|
binary.LittleEndian.PutUint64(id[:], base)
|
||||||
|
id[7] = e.Level
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary implements the encoding.BinaryMarshaller interface
|
||||||
|
func (e *Epoch) MarshalBinary() (data []byte, err error) {
|
||||||
|
b := make([]byte, 8)
|
||||||
|
binary.LittleEndian.PutUint64(b[:], e.Time)
|
||||||
|
b[7] = e.Level
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary implements the encoding.BinaryUnmarshaller interface
|
||||||
|
func (e *Epoch) UnmarshalBinary(data []byte) error {
|
||||||
|
if len(data) != EpochLength {
|
||||||
|
return errors.New("Invalid data unmarshalling Epoch")
|
||||||
|
}
|
||||||
|
b := make([]byte, 8)
|
||||||
|
copy(b, data)
|
||||||
|
e.Level = b[7]
|
||||||
|
b[7] = 0
|
||||||
|
e.Time = binary.LittleEndian.Uint64(b)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// After returns true if this epoch occurs later or exactly at the other epoch.
|
||||||
|
func (e *Epoch) After(epoch Epoch) bool {
|
||||||
|
if e.Time == epoch.Time {
|
||||||
|
return e.Level < epoch.Level
|
||||||
|
}
|
||||||
|
return e.Time >= epoch.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equals compares two epochs and returns true if they refer to the same time period.
|
||||||
|
func (e *Epoch) Equals(epoch Epoch) bool {
|
||||||
|
return e.Level == epoch.Level && e.Base() == epoch.Base()
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the Stringer interface.
|
||||||
|
func (e *Epoch) String() string {
|
||||||
|
return fmt.Sprintf("Epoch{Time:%d, Level:%d}", e.Time, e.Level)
|
||||||
|
}
|
57
swarm/storage/mru/lookup/epoch_test.go
Normal file
57
swarm/storage/mru/lookup/epoch_test.go
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
package lookup_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMarshallers(t *testing.T) {
|
||||||
|
|
||||||
|
for i := uint64(1); i < lookup.MaxTime; i *= 3 {
|
||||||
|
e := lookup.Epoch{
|
||||||
|
Time: i,
|
||||||
|
Level: uint8(i % 20),
|
||||||
|
}
|
||||||
|
b, err := e.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
var e2 lookup.Epoch
|
||||||
|
if err := e2.UnmarshalBinary(b); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if e != e2 {
|
||||||
|
t.Fatal("Expected unmarshalled epoch to be equal to marshalled onet.Fatal(err)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAfter(t *testing.T) {
|
||||||
|
a := lookup.Epoch{
|
||||||
|
Time: 5,
|
||||||
|
Level: 3,
|
||||||
|
}
|
||||||
|
b := lookup.Epoch{
|
||||||
|
Time: 6,
|
||||||
|
Level: 3,
|
||||||
|
}
|
||||||
|
c := lookup.Epoch{
|
||||||
|
Time: 6,
|
||||||
|
Level: 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.After(a) != true {
|
||||||
|
t.Fatal("Expected 'after' to be true, got false")
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.After(b) != false {
|
||||||
|
t.Fatal("Expected 'after' to be false when both epochs are identical, got true")
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.After(c) != true {
|
||||||
|
t.Fatal("Expected 'after' to be true when both epochs have the same time but the level is lower in the first one, but got false")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
180
swarm/storage/mru/lookup/lookup.go
Normal file
180
swarm/storage/mru/lookup/lookup.go
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package lookup defines resource lookup algorithms and provides tools to place updates
|
||||||
|
so they can be found
|
||||||
|
*/
|
||||||
|
package lookup
|
||||||
|
|
||||||
|
const maxuint64 = ^uint64(0)
|
||||||
|
|
||||||
|
// LowestLevel establishes the frequency resolution of the lookup algorithm as a power of 2.
|
||||||
|
const LowestLevel uint8 = 0 // default is 0 (1 second)
|
||||||
|
|
||||||
|
// HighestLevel sets the lowest frequency the algorithm will operate at, as a power of 2.
|
||||||
|
// 25 -> 2^25 equals to roughly one year.
|
||||||
|
const HighestLevel = 25 // default is 25 (~1 year)
|
||||||
|
|
||||||
|
// DefaultLevel sets what level will be chosen to search when there is no hint
|
||||||
|
const DefaultLevel = HighestLevel
|
||||||
|
|
||||||
|
//Algorithm is the function signature of a lookup algorithm
|
||||||
|
type Algorithm func(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error)
|
||||||
|
|
||||||
|
// Lookup finds the update with the highest timestamp that is smaller or equal than 'now'
|
||||||
|
// It takes a hint which should be the epoch where the last known update was
|
||||||
|
// If you don't know in what epoch the last update happened, simply submit lookup.NoClue
|
||||||
|
// read() will be called on each lookup attempt
|
||||||
|
// Returns an error only if read() returns an error
|
||||||
|
// Returns nil if an update was not found
|
||||||
|
var Lookup Algorithm = FluzCapacitorAlgorithm
|
||||||
|
|
||||||
|
// ReadFunc is a handler called by Lookup each time it attempts to find a value
|
||||||
|
// It should return <nil> if a value is not found
|
||||||
|
// It should return <nil> if a value is found, but its timestamp is higher than "now"
|
||||||
|
// It should only return an error in case the handler wants to stop the
|
||||||
|
// lookup process entirely.
|
||||||
|
type ReadFunc func(epoch Epoch, now uint64) (interface{}, error)
|
||||||
|
|
||||||
|
// NoClue is a hint that can be provided when the Lookup caller does not have
|
||||||
|
// a clue about where the last update may be
|
||||||
|
var NoClue = Epoch{}
|
||||||
|
|
||||||
|
// getBaseTime returns the epoch base time of the given
|
||||||
|
// time and level
|
||||||
|
func getBaseTime(t uint64, level uint8) uint64 {
|
||||||
|
return t & (maxuint64 << level)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hint creates a hint based only on the last known update time
|
||||||
|
func Hint(last uint64) Epoch {
|
||||||
|
return Epoch{
|
||||||
|
Time: last,
|
||||||
|
Level: DefaultLevel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNextLevel returns the frequency level a next update should be placed at, provided where
|
||||||
|
// the last update was and what time it is now.
|
||||||
|
// This is the first nonzero bit of the XOR of 'last' and 'now', counting from the highest significant bit
|
||||||
|
// but limited to not return a level that is smaller than the last-1
|
||||||
|
func GetNextLevel(last Epoch, now uint64) uint8 {
|
||||||
|
// First XOR the last epoch base time with the current clock.
|
||||||
|
// This will set all the common most significant bits to zero.
|
||||||
|
mix := (last.Base() ^ now)
|
||||||
|
|
||||||
|
// Then, make sure we stop the below loop before one level below the current, by setting
|
||||||
|
// that level's bit to 1.
|
||||||
|
// If the next level is lower than the current one, it must be exactly level-1 and not lower.
|
||||||
|
mix |= (1 << (last.Level - 1))
|
||||||
|
|
||||||
|
// if the last update was more than 2^highestLevel seconds ago, choose the highest level
|
||||||
|
if mix > (maxuint64 >> (64 - HighestLevel - 1)) {
|
||||||
|
return HighestLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
// set up a mask to scan for nonzero bits, starting at the highest level
|
||||||
|
mask := uint64(1 << (HighestLevel))
|
||||||
|
|
||||||
|
for i := uint8(HighestLevel); i > LowestLevel; i-- {
|
||||||
|
if mix&mask != 0 { // if we find a nonzero bit, this is the level the next update should be at.
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
mask = mask >> 1 // move our bit one position to the right
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNextEpoch returns the epoch where the next update should be located
|
||||||
|
// according to where the previous update was
|
||||||
|
// and what time it is now.
|
||||||
|
func GetNextEpoch(last Epoch, now uint64) Epoch {
|
||||||
|
if last == NoClue {
|
||||||
|
return GetFirstEpoch(now)
|
||||||
|
}
|
||||||
|
level := GetNextLevel(last, now)
|
||||||
|
return Epoch{
|
||||||
|
Level: level,
|
||||||
|
Time: now,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFirstEpoch returns the epoch where the first update should be located
|
||||||
|
// based on what time it is now.
|
||||||
|
func GetFirstEpoch(now uint64) Epoch {
|
||||||
|
return Epoch{Level: HighestLevel, Time: now}
|
||||||
|
}
|
||||||
|
|
||||||
|
var worstHint = Epoch{Time: 0, Level: 63}
|
||||||
|
|
||||||
|
// FluzCapacitorAlgorithm works by narrowing the epoch search area if an update is found
|
||||||
|
// going back and forth in time
|
||||||
|
// First, it will attempt to find an update where it should be now if the hint was
|
||||||
|
// really the last update. If that lookup fails, then the last update must be either the hint itself
|
||||||
|
// or the epochs right below. If however, that lookup succeeds, then the update must be
|
||||||
|
// that one or within the epochs right below.
|
||||||
|
// see the guide for a more graphical representation
|
||||||
|
func FluzCapacitorAlgorithm(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) {
|
||||||
|
var lastFound interface{}
|
||||||
|
var epoch Epoch
|
||||||
|
if hint == NoClue {
|
||||||
|
hint = worstHint
|
||||||
|
}
|
||||||
|
|
||||||
|
t := now
|
||||||
|
|
||||||
|
for {
|
||||||
|
epoch = GetNextEpoch(hint, t)
|
||||||
|
value, err = read(epoch, now)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if value != nil {
|
||||||
|
lastFound = value
|
||||||
|
if epoch.Level == LowestLevel || epoch.Equals(hint) {
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
hint = epoch
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if epoch.Base() == hint.Base() {
|
||||||
|
if lastFound != nil {
|
||||||
|
return lastFound, nil
|
||||||
|
}
|
||||||
|
// we have reached the hint itself
|
||||||
|
if hint == worstHint {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
// check it out
|
||||||
|
value, err = read(hint, now)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if value != nil {
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
// bad hint.
|
||||||
|
epoch = hint
|
||||||
|
hint = worstHint
|
||||||
|
}
|
||||||
|
base := epoch.Base()
|
||||||
|
if base == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
t = base - 1
|
||||||
|
}
|
||||||
|
}
|
416
swarm/storage/mru/lookup/lookup_test.go
Normal file
416
swarm/storage/mru/lookup/lookup_test.go
Normal file
File diff suppressed because one or more lines are too long
@ -1,85 +0,0 @@
|
|||||||
package mru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getTestUpdateLookup() *UpdateLookup {
|
|
||||||
metadata := *getTestMetadata()
|
|
||||||
rootAddr, _, _, _ := metadata.serializeAndHash()
|
|
||||||
return &UpdateLookup{
|
|
||||||
period: 79,
|
|
||||||
version: 2010,
|
|
||||||
rootAddr: rootAddr,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func compareUpdateLookup(a, b *UpdateLookup) bool {
|
|
||||||
return a.version == b.version &&
|
|
||||||
a.period == b.period &&
|
|
||||||
bytes.Equal(a.rootAddr, b.rootAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateLookupUpdateAddr(t *testing.T) {
|
|
||||||
ul := getTestUpdateLookup()
|
|
||||||
updateAddr := ul.UpdateAddr()
|
|
||||||
compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8fbc8d4777ef6da790257eda80ab4321fabd08cbdbe67e4e3da6caca386d64e0")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateLookupSerializer(t *testing.T) {
|
|
||||||
serializedUpdateLookup := make([]byte, updateLookupLength)
|
|
||||||
ul := getTestUpdateLookup()
|
|
||||||
if err := ul.binaryPut(serializedUpdateLookup); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
compareByteSliceToExpectedHex(t, "serializedUpdateLookup", serializedUpdateLookup, "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb")
|
|
||||||
|
|
||||||
// set receiving slice to the wrong size
|
|
||||||
serializedUpdateLookup = make([]byte, updateLookupLength+7)
|
|
||||||
if err := ul.binaryPut(serializedUpdateLookup); err == nil {
|
|
||||||
t.Fatalf("Expected UpdateLookup.binaryPut to fail when receiving slice has a length != %d", updateLookupLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set rootAddr to an invalid length
|
|
||||||
ul.rootAddr = []byte{1, 2, 3, 4}
|
|
||||||
serializedUpdateLookup = make([]byte, updateLookupLength)
|
|
||||||
if err := ul.binaryPut(serializedUpdateLookup); err == nil {
|
|
||||||
t.Fatal("Expected UpdateLookup.binaryPut to fail when rootAddr is not of the correct size")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateLookupDeserializer(t *testing.T) {
|
|
||||||
serializedUpdateLookup, _ := hexutil.Decode("0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb")
|
|
||||||
var recoveredUpdateLookup UpdateLookup
|
|
||||||
if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
originalUpdateLookup := *getTestUpdateLookup()
|
|
||||||
if !compareUpdateLookup(&originalUpdateLookup, &recoveredUpdateLookup) {
|
|
||||||
t.Fatalf("Expected recovered UpdateLookup to match")
|
|
||||||
}
|
|
||||||
|
|
||||||
// set source slice to the wrong size
|
|
||||||
serializedUpdateLookup = make([]byte, updateLookupLength+4)
|
|
||||||
if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err == nil {
|
|
||||||
t.Fatalf("Expected UpdateLookup.binaryGet to fail when source slice has a length != %d", updateLookupLength)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateLookupSerializeDeserialize(t *testing.T) {
|
|
||||||
serializedUpdateLookup := make([]byte, updateLookupLength)
|
|
||||||
originalUpdateLookup := getTestUpdateLookup()
|
|
||||||
if err := originalUpdateLookup.binaryPut(serializedUpdateLookup); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var recoveredUpdateLookup UpdateLookup
|
|
||||||
if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !compareUpdateLookup(originalUpdateLookup, &recoveredUpdateLookup) {
|
|
||||||
t.Fatalf("Expected recovered UpdateLookup to match")
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,187 +0,0 @@
|
|||||||
// Copyright 2018 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package mru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"hash"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ResourceMetadata encapsulates the immutable information about a mutable resource :)
|
|
||||||
// once serialized into a chunk, the resource can be retrieved by knowing its content-addressed rootAddr
|
|
||||||
type ResourceMetadata struct {
|
|
||||||
StartTime Timestamp // time at which the resource starts to be valid
|
|
||||||
Frequency uint64 // expected update frequency for the resource
|
|
||||||
Name string // name of the resource, for the reference of the user or to disambiguate resources with same starttime, frequency, owneraddr
|
|
||||||
Owner common.Address // public address of the resource owner
|
|
||||||
}
|
|
||||||
|
|
||||||
const frequencyLength = 8 // sizeof(uint64)
|
|
||||||
const nameLengthLength = 1
|
|
||||||
|
|
||||||
// Resource metadata chunk layout:
|
|
||||||
// 4 prefix bytes (chunkPrefixLength). The first two set to zero. The second two indicate the length
|
|
||||||
// Timestamp: timestampLength bytes
|
|
||||||
// frequency: frequencyLength bytes
|
|
||||||
// name length: nameLengthLength bytes
|
|
||||||
// name (variable length, can be empty, up to 255 bytes)
|
|
||||||
// ownerAddr: common.AddressLength
|
|
||||||
const minimumMetadataLength = chunkPrefixLength + timestampLength + frequencyLength + nameLengthLength + common.AddressLength
|
|
||||||
|
|
||||||
// binaryGet populates the resource metadata from a byte array
|
|
||||||
func (r *ResourceMetadata) binaryGet(serializedData []byte) error {
|
|
||||||
if len(serializedData) < minimumMetadataLength {
|
|
||||||
return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short. Expected at least %d. Got %d.", minimumMetadataLength, len(serializedData))
|
|
||||||
}
|
|
||||||
|
|
||||||
// first two bytes must be set to zero to indicate metadata chunks, so enforce this.
|
|
||||||
if serializedData[0] != 0 || serializedData[1] != 0 {
|
|
||||||
return NewError(ErrCorruptData, "Invalid metadata chunk")
|
|
||||||
}
|
|
||||||
|
|
||||||
cursor := 2
|
|
||||||
metadataLength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])) // metadataLength does not include the 4 prefix bytes
|
|
||||||
if metadataLength+chunkPrefixLength != len(serializedData) {
|
|
||||||
return NewErrorf(ErrCorruptData, "Incorrect declared metadata length. Expected %d, got %d.", metadataLength+chunkPrefixLength, len(serializedData))
|
|
||||||
}
|
|
||||||
|
|
||||||
cursor += 2
|
|
||||||
|
|
||||||
if err := r.StartTime.binaryGet(serializedData[cursor : cursor+timestampLength]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cursor += timestampLength
|
|
||||||
|
|
||||||
r.Frequency = binary.LittleEndian.Uint64(serializedData[cursor : cursor+frequencyLength])
|
|
||||||
cursor += frequencyLength
|
|
||||||
|
|
||||||
nameLength := int(serializedData[cursor])
|
|
||||||
if nameLength+minimumMetadataLength > len(serializedData) {
|
|
||||||
return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short when decoding resource name. Expected at least %d. Got %d.", nameLength+minimumMetadataLength, len(serializedData))
|
|
||||||
}
|
|
||||||
cursor++
|
|
||||||
r.Name = string(serializedData[cursor : cursor+nameLength])
|
|
||||||
cursor += nameLength
|
|
||||||
|
|
||||||
copy(r.Owner[:], serializedData[cursor:])
|
|
||||||
cursor += common.AddressLength
|
|
||||||
if cursor != len(serializedData) {
|
|
||||||
return NewErrorf(ErrInvalidValue, "Metadata chunk has leftover data after deserialization. %d left to read", len(serializedData)-cursor)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// binaryPut encodes the metadata into a byte array
|
|
||||||
func (r *ResourceMetadata) binaryPut(serializedData []byte) error {
|
|
||||||
metadataChunkLength := r.binaryLength()
|
|
||||||
if len(serializedData) != metadataChunkLength {
|
|
||||||
return NewErrorf(ErrInvalidValue, "Need a slice of exactly %d bytes to serialize this metadata, but got a slice of size %d.", metadataChunkLength, len(serializedData))
|
|
||||||
}
|
|
||||||
|
|
||||||
// root chunk has first two bytes both set to 0, which distinguishes from update bytes
|
|
||||||
// therefore, skip the first two bytes of a zero-initialized array.
|
|
||||||
cursor := 2
|
|
||||||
binary.LittleEndian.PutUint16(serializedData[cursor:cursor+2], uint16(metadataChunkLength-chunkPrefixLength)) // metadataLength does not include the 4 prefix bytes
|
|
||||||
cursor += 2
|
|
||||||
|
|
||||||
r.StartTime.binaryPut(serializedData[cursor : cursor+timestampLength])
|
|
||||||
cursor += timestampLength
|
|
||||||
|
|
||||||
binary.LittleEndian.PutUint64(serializedData[cursor:cursor+frequencyLength], r.Frequency)
|
|
||||||
cursor += frequencyLength
|
|
||||||
|
|
||||||
// Encode the name string as a 1 byte length followed by the encoded string.
|
|
||||||
// Longer strings will be truncated.
|
|
||||||
nameLength := len(r.Name)
|
|
||||||
if nameLength > 255 {
|
|
||||||
nameLength = 255
|
|
||||||
}
|
|
||||||
serializedData[cursor] = uint8(nameLength)
|
|
||||||
cursor++
|
|
||||||
copy(serializedData[cursor:cursor+nameLength], []byte(r.Name[:nameLength]))
|
|
||||||
cursor += nameLength
|
|
||||||
|
|
||||||
copy(serializedData[cursor:cursor+common.AddressLength], r.Owner[:])
|
|
||||||
cursor += common.AddressLength
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ResourceMetadata) binaryLength() int {
|
|
||||||
return minimumMetadataLength + len(r.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// serializeAndHash returns the root chunk addr and metadata hash that help identify and ascertain ownership of this resource
|
|
||||||
// returns the serialized metadata as a byproduct of having to hash it.
|
|
||||||
func (r *ResourceMetadata) serializeAndHash() (rootAddr, metaHash []byte, chunkData []byte, err error) {
|
|
||||||
|
|
||||||
chunkData = make([]byte, r.binaryLength())
|
|
||||||
if err := r.binaryPut(chunkData); err != nil {
|
|
||||||
return nil, nil, nil, err
|
|
||||||
}
|
|
||||||
rootAddr, metaHash = metadataHash(chunkData)
|
|
||||||
return rootAddr, metaHash, chunkData, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// creates a metadata chunk out of a resourceMetadata structure
|
|
||||||
func (metadata *ResourceMetadata) newChunk() (chunk storage.Chunk, metaHash []byte, err error) {
|
|
||||||
// the metadata chunk contains a timestamp of when the resource starts to be valid
|
|
||||||
// and also how frequently it is expected to be updated
|
|
||||||
// from this we know at what time we should look for updates, and how often
|
|
||||||
// it also contains the name of the resource, so we know what resource we are working with
|
|
||||||
|
|
||||||
// the key (rootAddr) of the metadata chunk is content-addressed
|
|
||||||
// if it wasn't we couldn't replace it later
|
|
||||||
// resolving this relationship is left up to external agents (for example ENS)
|
|
||||||
rootAddr, metaHash, chunkData, err := metadata.serializeAndHash()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// make the chunk and send it to swarm
|
|
||||||
chunk = storage.NewChunk(rootAddr, chunkData)
|
|
||||||
|
|
||||||
return chunk, metaHash, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// metadataHash returns the metadata chunk root address and metadata hash
|
|
||||||
// that help identify and ascertain ownership of this resource
|
|
||||||
// We compute it as rootAddr = H(ownerAddr, H(metadata))
|
|
||||||
// Where H() is SHA3
|
|
||||||
// metadata are all the metadata fields, except ownerAddr
|
|
||||||
// ownerAddr is the public address of the resource owner
|
|
||||||
// Update chunks must carry a rootAddr reference and metaHash in order to be verified
|
|
||||||
// This way, a node that receives an update can check the signature, recover the public address
|
|
||||||
// and check the ownership by computing H(ownerAddr, metaHash) and comparing it to the rootAddr
|
|
||||||
// the resource is claiming to update without having to lookup the metadata chunk.
|
|
||||||
// see verifyResourceOwnerhsip in signedupdate.go
|
|
||||||
func metadataHash(chunkData []byte) (rootAddr, metaHash []byte) {
|
|
||||||
hasher := hashPool.Get().(hash.Hash)
|
|
||||||
defer hashPool.Put(hasher)
|
|
||||||
hasher.Reset()
|
|
||||||
hasher.Write(chunkData[:len(chunkData)-common.AddressLength])
|
|
||||||
metaHash = hasher.Sum(nil)
|
|
||||||
hasher.Reset()
|
|
||||||
hasher.Write(metaHash)
|
|
||||||
hasher.Write(chunkData[len(chunkData)-common.AddressLength:])
|
|
||||||
rootAddr = hasher.Sum(nil)
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,126 +0,0 @@
|
|||||||
// Copyright 2018 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
package mru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) {
|
|
||||||
if hexutil.Encode(actualValue) != expectedHex {
|
|
||||||
t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTestMetadata() *ResourceMetadata {
|
|
||||||
return &ResourceMetadata{
|
|
||||||
Name: "world news report, every hour, on the hour",
|
|
||||||
StartTime: Timestamp{
|
|
||||||
Time: 1528880400,
|
|
||||||
},
|
|
||||||
Frequency: 3600,
|
|
||||||
Owner: newCharlieSigner().Address(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMetadataSerializerDeserializer(t *testing.T) {
|
|
||||||
metadata := *getTestMetadata()
|
|
||||||
|
|
||||||
rootAddr, metaHash, chunkData, err := metadata.serializeAndHash() // creates hashes and marshals, in one go
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
const expectedRootAddr = "0xfb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb"
|
|
||||||
const expectedMetaHash = "0xf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0"
|
|
||||||
const expectedChunkData = "0x00004f0010dd205b00000000100e0000000000002a776f726c64206e657773207265706f72742c20657665727920686f75722c206f6e2074686520686f7572876a8936a7cd0b79ef0735ad0896c1afe278781c"
|
|
||||||
|
|
||||||
compareByteSliceToExpectedHex(t, "rootAddr", rootAddr, expectedRootAddr)
|
|
||||||
compareByteSliceToExpectedHex(t, "metaHash", metaHash, expectedMetaHash)
|
|
||||||
compareByteSliceToExpectedHex(t, "chunkData", chunkData, expectedChunkData)
|
|
||||||
|
|
||||||
recoveredMetadata := ResourceMetadata{}
|
|
||||||
recoveredMetadata.binaryGet(chunkData)
|
|
||||||
|
|
||||||
if recoveredMetadata != metadata {
|
|
||||||
t.Fatalf("Expected that the recovered metadata equals the marshalled metadata")
|
|
||||||
}
|
|
||||||
|
|
||||||
// we are going to mess with the data, so create a backup to go back to it for the next test
|
|
||||||
backup := make([]byte, len(chunkData))
|
|
||||||
copy(backup, chunkData)
|
|
||||||
|
|
||||||
chunkData = []byte{1, 2, 3}
|
|
||||||
if err := recoveredMetadata.binaryGet(chunkData); err == nil {
|
|
||||||
t.Fatal("Expected binaryGet to fail since chunk is too small")
|
|
||||||
}
|
|
||||||
|
|
||||||
// restore backup
|
|
||||||
chunkData = make([]byte, len(backup))
|
|
||||||
copy(chunkData, backup)
|
|
||||||
|
|
||||||
// mess with the prefix so it is not zero
|
|
||||||
chunkData[0] = 7
|
|
||||||
chunkData[1] = 9
|
|
||||||
|
|
||||||
if err := recoveredMetadata.binaryGet(chunkData); err == nil {
|
|
||||||
t.Fatal("Expected binaryGet to fail since prefix bytes are not zero")
|
|
||||||
}
|
|
||||||
|
|
||||||
// restore backup
|
|
||||||
chunkData = make([]byte, len(backup))
|
|
||||||
copy(chunkData, backup)
|
|
||||||
|
|
||||||
// mess with the length header to trigger an error
|
|
||||||
chunkData[2] = 255
|
|
||||||
chunkData[3] = 44
|
|
||||||
if err := recoveredMetadata.binaryGet(chunkData); err == nil {
|
|
||||||
t.Fatal("Expected binaryGet to fail since header length does not match")
|
|
||||||
}
|
|
||||||
|
|
||||||
// restore backup
|
|
||||||
chunkData = make([]byte, len(backup))
|
|
||||||
copy(chunkData, backup)
|
|
||||||
|
|
||||||
// mess with name length header to trigger a chunk too short error
|
|
||||||
chunkData[20] = 255
|
|
||||||
if err := recoveredMetadata.binaryGet(chunkData); err == nil {
|
|
||||||
t.Fatal("Expected binaryGet to fail since name length is incorrect")
|
|
||||||
}
|
|
||||||
|
|
||||||
// restore backup
|
|
||||||
chunkData = make([]byte, len(backup))
|
|
||||||
copy(chunkData, backup)
|
|
||||||
|
|
||||||
// mess with name length header to trigger an leftover bytes to read error
|
|
||||||
chunkData[20] = 3
|
|
||||||
if err := recoveredMetadata.binaryGet(chunkData); err == nil {
|
|
||||||
t.Fatal("Expected binaryGet to fail since name length is too small")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMetadataSerializerLengthCheck(t *testing.T) {
|
|
||||||
metadata := *getTestMetadata()
|
|
||||||
|
|
||||||
// make a slice that is too small to contain the metadata
|
|
||||||
serializedMetadata := make([]byte, 4)
|
|
||||||
|
|
||||||
if err := metadata.binaryPut(serializedMetadata); err == nil {
|
|
||||||
t.Fatal("Expected metadata.binaryPut to fail, since target slice is too small")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
78
swarm/storage/mru/query.go
Normal file
78
swarm/storage/mru/query.go
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Query is used to specify constraints when performing an update lookup
|
||||||
|
// TimeLimit indicates an upper bound for the search. Set to 0 for "now"
|
||||||
|
type Query struct {
|
||||||
|
View
|
||||||
|
Hint lookup.Epoch
|
||||||
|
TimeLimit uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromValues deserializes this instance from a string key-value store
|
||||||
|
// useful to parse query strings
|
||||||
|
func (q *Query) FromValues(values Values) error {
|
||||||
|
time, _ := strconv.ParseUint(values.Get("time"), 10, 64)
|
||||||
|
q.TimeLimit = uint64(time)
|
||||||
|
|
||||||
|
level, _ := strconv.ParseUint(values.Get("hint.level"), 10, 32)
|
||||||
|
q.Hint.Level = uint8(level)
|
||||||
|
q.Hint.Time, _ = strconv.ParseUint(values.Get("hint.time"), 10, 64)
|
||||||
|
if q.View.User == (common.Address{}) {
|
||||||
|
return q.View.FromValues(values)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendValues serializes this structure into the provided string key-value store
|
||||||
|
// useful to build query strings
|
||||||
|
func (q *Query) AppendValues(values Values) {
|
||||||
|
if q.TimeLimit != 0 {
|
||||||
|
values.Set("time", fmt.Sprintf("%d", q.TimeLimit))
|
||||||
|
}
|
||||||
|
if q.Hint.Level != 0 {
|
||||||
|
values.Set("hint.level", fmt.Sprintf("%d", q.Hint.Level))
|
||||||
|
}
|
||||||
|
if q.Hint.Time != 0 {
|
||||||
|
values.Set("hint.time", fmt.Sprintf("%d", q.Hint.Time))
|
||||||
|
}
|
||||||
|
q.View.AppendValues(values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQuery constructs an Query structure to find updates on or before `time`
|
||||||
|
// if time == 0, the latest update will be looked up
|
||||||
|
func NewQuery(view *View, time uint64, hint lookup.Epoch) *Query {
|
||||||
|
return &Query{
|
||||||
|
TimeLimit: time,
|
||||||
|
View: *view,
|
||||||
|
Hint: hint,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQueryLatest generates lookup parameters that look for the latest version of a resource
|
||||||
|
func NewQueryLatest(view *View, hint lookup.Epoch) *Query {
|
||||||
|
return NewQuery(view, 0, hint)
|
||||||
|
}
|
38
swarm/storage/mru/query_test.go
Normal file
38
swarm/storage/mru/query_test.go
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getTestQuery() *Query {
|
||||||
|
ul := getTestID()
|
||||||
|
return &Query{
|
||||||
|
TimeLimit: 5000,
|
||||||
|
View: ul.View,
|
||||||
|
Hint: ul.Epoch,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestQueryValues(t *testing.T) {
|
||||||
|
var expected = KV{"hint.level": "25", "hint.time": "1000", "time": "5000", "topic": "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000", "user": "0x876A8936A7Cd0b79Ef0735AD0896c1AFe278781c"}
|
||||||
|
|
||||||
|
query := getTestQuery()
|
||||||
|
testValueSerializer(t, query, expected)
|
||||||
|
|
||||||
|
}
|
@ -19,157 +19,218 @@ package mru
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"hash"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||||
)
|
)
|
||||||
|
|
||||||
// updateRequestJSON represents a JSON-serialized UpdateRequest
|
|
||||||
type updateRequestJSON struct {
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Frequency uint64 `json:"frequency,omitempty"`
|
|
||||||
StartTime uint64 `json:"startTime,omitempty"`
|
|
||||||
Owner string `json:"ownerAddr,omitempty"`
|
|
||||||
RootAddr string `json:"rootAddr,omitempty"`
|
|
||||||
MetaHash string `json:"metaHash,omitempty"`
|
|
||||||
Version uint32 `json:"version,omitempty"`
|
|
||||||
Period uint32 `json:"period,omitempty"`
|
|
||||||
Data string `json:"data,omitempty"`
|
|
||||||
Multihash bool `json:"multiHash"`
|
|
||||||
Signature string `json:"signature,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Request represents an update and/or resource create message
|
// Request represents an update and/or resource create message
|
||||||
type Request struct {
|
type Request struct {
|
||||||
SignedResourceUpdate
|
ResourceUpdate // actual content that will be put on the chunk, less signature
|
||||||
metadata ResourceMetadata
|
Signature *Signature
|
||||||
isNew bool
|
idAddr storage.Address // cached chunk address for the update (not serialized, for internal use)
|
||||||
|
binaryData []byte // cached serialized data (does not get serialized again!, for efficiency/internal use)
|
||||||
}
|
}
|
||||||
|
|
||||||
var zeroAddr = common.Address{}
|
// updateRequestJSON represents a JSON-serialized UpdateRequest
|
||||||
|
type updateRequestJSON struct {
|
||||||
|
ID
|
||||||
|
ProtocolVersion uint8 `json:"protocolVersion"`
|
||||||
|
Data string `json:"data,omitempty"`
|
||||||
|
Signature string `json:"signature,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
// NewCreateUpdateRequest returns a ready to sign request to create and initialize a resource with data
|
// Request layout
|
||||||
func NewCreateUpdateRequest(metadata *ResourceMetadata) (*Request, error) {
|
// resourceUpdate bytes
|
||||||
|
// SignatureLength bytes
|
||||||
|
const minimumSignedUpdateLength = minimumUpdateDataLength + signatureLength
|
||||||
|
|
||||||
request, err := NewCreateRequest(metadata)
|
// NewFirstRequest returns a ready to sign request to publish a first update
|
||||||
if err != nil {
|
func NewFirstRequest(topic Topic) *Request {
|
||||||
return nil, err
|
|
||||||
}
|
request := new(Request)
|
||||||
|
|
||||||
// get the current time
|
// get the current time
|
||||||
now := TimestampProvider.Now().Time
|
now := TimestampProvider.Now().Time
|
||||||
|
request.Epoch = lookup.GetFirstEpoch(now)
|
||||||
|
request.View.Topic = topic
|
||||||
|
request.Header.Version = ProtocolVersion
|
||||||
|
|
||||||
request.version = 1
|
return request
|
||||||
request.period, err = getNextPeriod(metadata.StartTime.Time, now, metadata.Frequency)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return request, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCreateRequest returns a request to create a new resource
|
|
||||||
func NewCreateRequest(metadata *ResourceMetadata) (request *Request, err error) {
|
|
||||||
if metadata.StartTime.Time == 0 { // get the current time
|
|
||||||
metadata.StartTime = TimestampProvider.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
if metadata.Owner == zeroAddr {
|
|
||||||
return nil, NewError(ErrInvalidValue, "OwnerAddr is not set")
|
|
||||||
}
|
|
||||||
|
|
||||||
request = &Request{
|
|
||||||
metadata: *metadata,
|
|
||||||
}
|
|
||||||
request.rootAddr, request.metaHash, _, err = request.metadata.serializeAndHash()
|
|
||||||
request.isNew = true
|
|
||||||
return request, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Frequency returns the resource's expected update frequency
|
|
||||||
func (r *Request) Frequency() uint64 {
|
|
||||||
return r.metadata.Frequency
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the resource human-readable name
|
|
||||||
func (r *Request) Name() string {
|
|
||||||
return r.metadata.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multihash returns true if the resource data should be interpreted as a multihash
|
|
||||||
func (r *Request) Multihash() bool {
|
|
||||||
return r.multihash
|
|
||||||
}
|
|
||||||
|
|
||||||
// Period returns in which period the resource will be published
|
|
||||||
func (r *Request) Period() uint32 {
|
|
||||||
return r.period
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version returns the resource version to publish
|
|
||||||
func (r *Request) Version() uint32 {
|
|
||||||
return r.version
|
|
||||||
}
|
|
||||||
|
|
||||||
// RootAddr returns the metadata chunk address
|
|
||||||
func (r *Request) RootAddr() storage.Address {
|
|
||||||
return r.rootAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartTime returns the time that the resource was/will be created at
|
|
||||||
func (r *Request) StartTime() Timestamp {
|
|
||||||
return r.metadata.StartTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Owner returns the resource owner's address
|
|
||||||
func (r *Request) Owner() common.Address {
|
|
||||||
return r.metadata.Owner
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign executes the signature to validate the resource and sets the owner address field
|
|
||||||
func (r *Request) Sign(signer Signer) error {
|
|
||||||
if r.metadata.Owner != zeroAddr && r.metadata.Owner != signer.Address() {
|
|
||||||
return NewError(ErrInvalidSignature, "Signer does not match current owner of the resource")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := r.SignedResourceUpdate.Sign(signer); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.metadata.Owner = signer.Address()
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetData stores the payload data the resource will be updated with
|
// SetData stores the payload data the resource will be updated with
|
||||||
func (r *Request) SetData(data []byte, multihash bool) {
|
func (r *Request) SetData(data []byte) {
|
||||||
r.data = data
|
r.data = data
|
||||||
r.multihash = multihash
|
r.Signature = nil
|
||||||
r.signature = nil
|
|
||||||
if !r.isNew {
|
|
||||||
r.metadata.Frequency = 0 // mark as update
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Request) IsNew() bool {
|
|
||||||
return r.metadata.Frequency > 0 && (r.period <= 1 || r.version <= 1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsUpdate returns true if this request models a signed update or otherwise it is a signature request
|
||||||
func (r *Request) IsUpdate() bool {
|
func (r *Request) IsUpdate() bool {
|
||||||
return r.signature != nil
|
return r.Signature != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify checks that signatures are valid and that the signer owns the resource to be updated
|
||||||
|
func (r *Request) Verify() (err error) {
|
||||||
|
if len(r.data) == 0 {
|
||||||
|
return NewError(ErrInvalidValue, "Update does not contain data")
|
||||||
|
}
|
||||||
|
if r.Signature == nil {
|
||||||
|
return NewError(ErrInvalidSignature, "Missing signature field")
|
||||||
|
}
|
||||||
|
|
||||||
|
digest, err := r.GetDigest()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the address of the signer (which also checks that it's a valid signature)
|
||||||
|
r.View.User, err = getUserAddr(digest, *r.Signature)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that the lookup information contained in the chunk matches the updateAddr (chunk search key)
|
||||||
|
// that was used to retrieve this chunk
|
||||||
|
// if this validation fails, someone forged a chunk.
|
||||||
|
if !bytes.Equal(r.idAddr, r.Addr()) {
|
||||||
|
return NewError(ErrInvalidSignature, "Signature address does not match with update user address")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign executes the signature to validate the resource
|
||||||
|
func (r *Request) Sign(signer Signer) error {
|
||||||
|
r.View.User = signer.Address()
|
||||||
|
r.binaryData = nil //invalidate serialized data
|
||||||
|
digest, err := r.GetDigest() // computes digest and serializes into .binaryData
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signature, err := signer.Sign(digest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Although the Signer interface returns the public address of the signer,
|
||||||
|
// recover it from the signature to see if they match
|
||||||
|
userAddr, err := getUserAddr(digest, signature)
|
||||||
|
if err != nil {
|
||||||
|
return NewError(ErrInvalidSignature, "Error verifying signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
if userAddr != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign!
|
||||||
|
return NewError(ErrInvalidSignature, "Signer address does not match update user address")
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Signature = &signature
|
||||||
|
r.idAddr = r.Addr()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDigest creates the resource update digest used in signatures
|
||||||
|
// the serialized payload is cached in .binaryData
|
||||||
|
func (r *Request) GetDigest() (result common.Hash, err error) {
|
||||||
|
hasher := hashPool.Get().(hash.Hash)
|
||||||
|
defer hashPool.Put(hasher)
|
||||||
|
hasher.Reset()
|
||||||
|
dataLength := r.ResourceUpdate.binaryLength()
|
||||||
|
if r.binaryData == nil {
|
||||||
|
r.binaryData = make([]byte, dataLength+signatureLength)
|
||||||
|
if err := r.ResourceUpdate.binaryPut(r.binaryData[:dataLength]); err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hasher.Write(r.binaryData[:dataLength]) //everything except the signature.
|
||||||
|
|
||||||
|
return common.BytesToHash(hasher.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// create an update chunk.
|
||||||
|
func (r *Request) toChunk() (storage.Chunk, error) {
|
||||||
|
|
||||||
|
// Check that the update is signed and serialized
|
||||||
|
// For efficiency, data is serialized during signature and cached in
|
||||||
|
// the binaryData field when computing the signature digest in .getDigest()
|
||||||
|
if r.Signature == nil || r.binaryData == nil {
|
||||||
|
return nil, NewError(ErrInvalidSignature, "toChunk called without a valid signature or payload data. Call .Sign() first.")
|
||||||
|
}
|
||||||
|
|
||||||
|
resourceUpdateLength := r.ResourceUpdate.binaryLength()
|
||||||
|
|
||||||
|
// signature is the last item in the chunk data
|
||||||
|
copy(r.binaryData[resourceUpdateLength:], r.Signature[:])
|
||||||
|
|
||||||
|
chunk := storage.NewChunk(r.idAddr, r.binaryData)
|
||||||
|
return chunk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fromChunk populates this structure from chunk data. It does not verify the signature is valid.
|
||||||
|
func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error {
|
||||||
|
// for update chunk layout see Request definition
|
||||||
|
|
||||||
|
//deserialize the resource update portion
|
||||||
|
if err := r.ResourceUpdate.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract the signature
|
||||||
|
var signature *Signature
|
||||||
|
cursor := r.ResourceUpdate.binaryLength()
|
||||||
|
sigdata := chunkdata[cursor : cursor+signatureLength]
|
||||||
|
if len(sigdata) > 0 {
|
||||||
|
signature = &Signature{}
|
||||||
|
copy(signature[:], sigdata)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Signature = signature
|
||||||
|
r.idAddr = updateAddr
|
||||||
|
r.binaryData = chunkdata
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromValues deserializes this instance from a string key-value store
|
||||||
|
// useful to parse query strings
|
||||||
|
func (r *Request) FromValues(values Values, data []byte) error {
|
||||||
|
signatureBytes, err := hexutil.Decode(values.Get("signature"))
|
||||||
|
if err != nil {
|
||||||
|
r.Signature = nil
|
||||||
|
} else {
|
||||||
|
if len(signatureBytes) != signatureLength {
|
||||||
|
return NewError(ErrInvalidSignature, "Incorrect signature length")
|
||||||
|
}
|
||||||
|
r.Signature = new(Signature)
|
||||||
|
copy(r.Signature[:], signatureBytes)
|
||||||
|
}
|
||||||
|
err = r.ResourceUpdate.FromValues(values, data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r.idAddr = r.Addr()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendValues serializes this structure into the provided string key-value store
|
||||||
|
// useful to build query strings
|
||||||
|
func (r *Request) AppendValues(values Values) []byte {
|
||||||
|
if r.Signature != nil {
|
||||||
|
values.Set("signature", hexutil.Encode(r.Signature[:]))
|
||||||
|
}
|
||||||
|
return r.ResourceUpdate.AppendValues(values)
|
||||||
}
|
}
|
||||||
|
|
||||||
// fromJSON takes an update request JSON and populates an UpdateRequest
|
// fromJSON takes an update request JSON and populates an UpdateRequest
|
||||||
func (r *Request) fromJSON(j *updateRequestJSON) error {
|
func (r *Request) fromJSON(j *updateRequestJSON) error {
|
||||||
|
|
||||||
r.version = j.Version
|
r.ID = j.ID
|
||||||
r.period = j.Period
|
r.Header.Version = j.ProtocolVersion
|
||||||
r.multihash = j.Multihash
|
|
||||||
r.metadata.Name = j.Name
|
|
||||||
r.metadata.Frequency = j.Frequency
|
|
||||||
r.metadata.StartTime.Time = j.StartTime
|
|
||||||
|
|
||||||
if err := decodeHexArray(r.metadata.Owner[:], j.Owner, "ownerAddr"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
if j.Data != "" {
|
if j.Data != "" {
|
||||||
@ -179,73 +240,18 @@ func (r *Request) fromJSON(j *updateRequestJSON) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var declaredRootAddr storage.Address
|
|
||||||
var declaredMetaHash []byte
|
|
||||||
|
|
||||||
declaredRootAddr, err = decodeHexSlice(j.RootAddr, storage.AddressLength, "rootAddr")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
declaredMetaHash, err = decodeHexSlice(j.MetaHash, 32, "metaHash")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.IsNew() {
|
|
||||||
// for new resource creation, rootAddr and metaHash are optional because
|
|
||||||
// we can derive them from the content itself.
|
|
||||||
// however, if the user sent them, we check them for consistency.
|
|
||||||
|
|
||||||
r.rootAddr, r.metaHash, _, err = r.metadata.serializeAndHash()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if j.RootAddr != "" && !bytes.Equal(declaredRootAddr, r.rootAddr) {
|
|
||||||
return NewError(ErrInvalidValue, "rootAddr does not match resource metadata")
|
|
||||||
}
|
|
||||||
if j.MetaHash != "" && !bytes.Equal(declaredMetaHash, r.metaHash) {
|
|
||||||
return NewError(ErrInvalidValue, "metaHash does not match resource metadata")
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
//Update message
|
|
||||||
r.rootAddr = declaredRootAddr
|
|
||||||
r.metaHash = declaredMetaHash
|
|
||||||
}
|
|
||||||
|
|
||||||
if j.Signature != "" {
|
if j.Signature != "" {
|
||||||
sigBytes, err := hexutil.Decode(j.Signature)
|
sigBytes, err := hexutil.Decode(j.Signature)
|
||||||
if err != nil || len(sigBytes) != signatureLength {
|
if err != nil || len(sigBytes) != signatureLength {
|
||||||
return NewError(ErrInvalidSignature, "Cannot decode signature")
|
return NewError(ErrInvalidSignature, "Cannot decode signature")
|
||||||
}
|
}
|
||||||
r.signature = new(Signature)
|
r.Signature = new(Signature)
|
||||||
r.updateAddr = r.UpdateAddr()
|
r.idAddr = r.Addr()
|
||||||
copy(r.signature[:], sigBytes)
|
copy(r.Signature[:], sigBytes)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeHexArray(dst []byte, src, name string) error {
|
|
||||||
bytes, err := decodeHexSlice(src, len(dst), name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if bytes != nil {
|
|
||||||
copy(dst, bytes)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeHexSlice(src string, expectedLength int, name string) (bytes []byte, err error) {
|
|
||||||
if src != "" {
|
|
||||||
bytes, err = hexutil.Decode(src)
|
|
||||||
if err != nil || len(bytes) != expectedLength {
|
|
||||||
return nil, NewErrorf(ErrInvalidValue, "Cannot decode %s", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bytes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object
|
// UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object
|
||||||
// Implements json.Unmarshaler interface
|
// Implements json.Unmarshaler interface
|
||||||
func (r *Request) UnmarshalJSON(rawData []byte) error {
|
func (r *Request) UnmarshalJSON(rawData []byte) error {
|
||||||
@ -259,38 +265,19 @@ func (r *Request) UnmarshalJSON(rawData []byte) error {
|
|||||||
// MarshalJSON takes an update request and encodes it as a JSON structure into a byte array
|
// MarshalJSON takes an update request and encodes it as a JSON structure into a byte array
|
||||||
// Implements json.Marshaler interface
|
// Implements json.Marshaler interface
|
||||||
func (r *Request) MarshalJSON() (rawData []byte, err error) {
|
func (r *Request) MarshalJSON() (rawData []byte, err error) {
|
||||||
var signatureString, dataHashString, rootAddrString, metaHashString string
|
var signatureString, dataString string
|
||||||
if r.signature != nil {
|
if r.Signature != nil {
|
||||||
signatureString = hexutil.Encode(r.signature[:])
|
signatureString = hexutil.Encode(r.Signature[:])
|
||||||
}
|
}
|
||||||
if r.data != nil {
|
if r.data != nil {
|
||||||
dataHashString = hexutil.Encode(r.data)
|
dataString = hexutil.Encode(r.data)
|
||||||
}
|
|
||||||
if r.rootAddr != nil {
|
|
||||||
rootAddrString = hexutil.Encode(r.rootAddr)
|
|
||||||
}
|
|
||||||
if r.metaHash != nil {
|
|
||||||
metaHashString = hexutil.Encode(r.metaHash)
|
|
||||||
}
|
|
||||||
var ownerAddrString string
|
|
||||||
if r.metadata.Frequency == 0 {
|
|
||||||
ownerAddrString = ""
|
|
||||||
} else {
|
|
||||||
ownerAddrString = hexutil.Encode(r.metadata.Owner[:])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
requestJSON := &updateRequestJSON{
|
requestJSON := &updateRequestJSON{
|
||||||
Name: r.metadata.Name,
|
ID: r.ID,
|
||||||
Frequency: r.metadata.Frequency,
|
ProtocolVersion: r.Header.Version,
|
||||||
StartTime: r.metadata.StartTime.Time,
|
Data: dataString,
|
||||||
Version: r.version,
|
Signature: signatureString,
|
||||||
Period: r.period,
|
|
||||||
Owner: ownerAddrString,
|
|
||||||
Data: dataHashString,
|
|
||||||
Multihash: r.multihash,
|
|
||||||
Signature: signatureString,
|
|
||||||
RootAddr: rootAddrString,
|
|
||||||
MetaHash: metaHashString,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return json.Marshal(requestJSON)
|
return json.Marshal(requestJSON)
|
||||||
|
@ -1,11 +1,32 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package mru
|
package mru
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||||
)
|
)
|
||||||
|
|
||||||
func areEqualJSON(s1, s2 string) (bool, error) {
|
func areEqualJSON(s1, s2 string) (bool, error) {
|
||||||
@ -29,19 +50,13 @@ func areEqualJSON(s1, s2 string) (bool, error) {
|
|||||||
// while also checking cryptographically that only the owner of a resource can update it.
|
// while also checking cryptographically that only the owner of a resource can update it.
|
||||||
func TestEncodingDecodingUpdateRequests(t *testing.T) {
|
func TestEncodingDecodingUpdateRequests(t *testing.T) {
|
||||||
|
|
||||||
signer := newCharlieSigner() //Charlie, our good guy
|
charlie := newCharlieSigner() //Charlie
|
||||||
falseSigner := newBobSigner() //Bob will play the bad guy again
|
bob := newBobSigner() //Bob
|
||||||
|
|
||||||
// Create a resource to our good guy Charlie's name
|
// Create a resource to our good guy Charlie's name
|
||||||
createRequest, err := NewCreateRequest(&ResourceMetadata{
|
topic, _ := NewTopic("a good resource name", nil)
|
||||||
Name: "a good resource name",
|
createRequest := NewFirstRequest(topic)
|
||||||
Frequency: 300,
|
createRequest.User = charlie.Address()
|
||||||
StartTime: Timestamp{Time: 1528900000},
|
|
||||||
Owner: signer.Address()})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error creating resource name: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We now encode the create message to simulate we send it over the wire
|
// We now encode the create message to simulate we send it over the wire
|
||||||
messageRawData, err := createRequest.MarshalJSON()
|
messageRawData, err := createRequest.MarshalJSON()
|
||||||
@ -64,27 +79,21 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) {
|
|||||||
// and recover the information above. To sign an update, we need the rootAddr and the metaHash to construct
|
// and recover the information above. To sign an update, we need the rootAddr and the metaHash to construct
|
||||||
// proof of ownership
|
// proof of ownership
|
||||||
|
|
||||||
metaHash := createRequest.metaHash
|
const expectedSignature = "0x32c2d2c7224e24e4d3ae6a10595fc6e945f1b3ecdf548a04d8247c240a50c9240076aa7730abad6c8a46dfea00cfb8f43b6211f02db5c4cc5ed8584cb0212a4d00"
|
||||||
rootAddr := createRequest.rootAddr
|
const expectedJSON = `{"view":{"topic":"0x6120676f6f64207265736f75726365206e616d65000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}`
|
||||||
const expectedSignature = "0x1c2bab66dc4ed63783d62934e3a628e517888d6949aef0349f3bd677121db9aa09bbfb865904e6c50360e209e0fe6fe757f8a2474cf1b34169c99b95e3fd5a5101"
|
|
||||||
const expectedJSON = `{"rootAddr":"0x6e744a730f7ea0881528576f0354b6268b98e35a6981ef703153ff1b8d32bbef","metaHash":"0x0c0d5c18b89da503af92302a1a64fab6acb60f78e288eb9c3d541655cd359b60","version":1,"period":7,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421","multiHash":false}`
|
|
||||||
|
|
||||||
//Put together an unsigned update request that we will serialize to send it to the signer.
|
//Put together an unsigned update request that we will serialize to send it to the signer.
|
||||||
data := []byte("This hour's update: Swarm 99.0 has been released!")
|
data := []byte("This hour's update: Swarm 99.0 has been released!")
|
||||||
request := &Request{
|
request := &Request{
|
||||||
SignedResourceUpdate: SignedResourceUpdate{
|
ResourceUpdate: ResourceUpdate{
|
||||||
resourceUpdate: resourceUpdate{
|
ID: ID{
|
||||||
updateHeader: updateHeader{
|
Epoch: lookup.Epoch{
|
||||||
UpdateLookup: UpdateLookup{
|
Time: 1000,
|
||||||
period: 7,
|
Level: 1,
|
||||||
version: 1,
|
|
||||||
rootAddr: rootAddr,
|
|
||||||
},
|
|
||||||
multihash: false,
|
|
||||||
metaHash: metaHash,
|
|
||||||
},
|
},
|
||||||
data: data,
|
View: createRequest.ResourceUpdate.View,
|
||||||
},
|
},
|
||||||
|
data: data,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,11 +119,11 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//sign the request and see if it matches our predefined signature above.
|
//sign the request and see if it matches our predefined signature above.
|
||||||
if err := recoveredRequest.Sign(signer); err != nil {
|
if err := recoveredRequest.Sign(charlie); err != nil {
|
||||||
t.Fatalf("Error signing request: %s", err)
|
t.Fatalf("Error signing request: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
compareByteSliceToExpectedHex(t, "signature", recoveredRequest.signature[:], expectedSignature)
|
compareByteSliceToExpectedHex(t, "signature", recoveredRequest.Signature[:], expectedSignature)
|
||||||
|
|
||||||
// mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON
|
// mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON
|
||||||
// to alter the signature field.
|
// to alter the signature field.
|
||||||
@ -129,9 +138,9 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) {
|
|||||||
t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature")
|
t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now imagine Evil Bob (why always Bob, poor Bob) attempts to update Charlie's resource,
|
// Now imagine Bob wants to create an update of his own about the same resource,
|
||||||
// signing a message with his private key
|
// signing a message with his private key
|
||||||
if err := request.Sign(falseSigner); err != nil {
|
if err := request.Sign(bob); err != nil {
|
||||||
t.Fatalf("Error signing: %s", err)
|
t.Fatalf("Error signing: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,29 +156,159 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) {
|
|||||||
t.Fatalf("Error decoding message:%s", err)
|
t.Fatalf("Error decoding message:%s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Before discovering Bob's misdemeanor, let's see what would happen if we mess
|
// Before checking what happened with Bob's update, let's see what would happen if we mess
|
||||||
// with the signature big time to see if Verify catches it
|
// with the signature big time to see if Verify catches it
|
||||||
savedSignature := *recoveredRequest.signature // save the signature for later
|
savedSignature := *recoveredRequest.Signature // save the signature for later
|
||||||
binary.LittleEndian.PutUint64(recoveredRequest.signature[5:], 556845463424) // write some random data to break the signature
|
binary.LittleEndian.PutUint64(recoveredRequest.Signature[5:], 556845463424) // write some random data to break the signature
|
||||||
if err = recoveredRequest.Verify(); err == nil {
|
if err = recoveredRequest.Verify(); err == nil {
|
||||||
t.Fatal("Expected Verify to fail on corrupt signature")
|
t.Fatal("Expected Verify to fail on corrupt signature")
|
||||||
}
|
}
|
||||||
|
|
||||||
// restore the Evil Bob's signature from corruption
|
// restore the Bob's signature from corruption
|
||||||
*recoveredRequest.signature = savedSignature
|
*recoveredRequest.Signature = savedSignature
|
||||||
|
|
||||||
// Now the signature is not corrupt, however Verify should now fail because Bob doesn't own the resource
|
// Now the signature is not corrupt
|
||||||
if err = recoveredRequest.Verify(); err == nil {
|
if err = recoveredRequest.Verify(); err != nil {
|
||||||
t.Fatalf("Expected Verify to fail because this resource belongs to Charlie, not Bob the attacker:%s", err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sign with our friend Charlie's private key
|
// Reuse object and sign with our friend Charlie's private key
|
||||||
if err := recoveredRequest.Sign(signer); err != nil {
|
if err := recoveredRequest.Sign(charlie); err != nil {
|
||||||
t.Fatalf("Error signing with the correct private key: %s", err)
|
t.Fatalf("Error signing with the correct private key: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// And now, Verify should work since this resource belongs to Charlie
|
// And now, Verify should work since this update now belongs to Charlie
|
||||||
if err = recoveredRequest.Verify(); err != nil {
|
if err = recoveredRequest.Verify(); err != nil {
|
||||||
t.Fatalf("Error verifying that Charlie, the good guy, can sign his resource:%s", err)
|
t.Fatalf("Error verifying that Charlie, can sign a reused request object:%s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mess with the lookup key to make sure Verify fails:
|
||||||
|
recoveredRequest.Time = 77999 // this will alter the lookup key
|
||||||
|
if err = recoveredRequest.Verify(); err == nil {
|
||||||
|
t.Fatalf("Expected Verify to fail since the lookup key has been altered")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTestRequest() *Request {
|
||||||
|
return &Request{
|
||||||
|
ResourceUpdate: *getTestResourceUpdate(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateChunkSerializationErrorChecking(t *testing.T) {
|
||||||
|
|
||||||
|
// Test that parseUpdate fails if the chunk is too small
|
||||||
|
var r Request
|
||||||
|
if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1+signatureLength)); err == nil {
|
||||||
|
t.Fatalf("Expected request.fromChunk to fail when chunkData contains less than %d bytes", minimumUpdateDataLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
r = *getTestRequest()
|
||||||
|
|
||||||
|
_, err := r.toChunk()
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected request.toChunk to fail when there is no data")
|
||||||
|
}
|
||||||
|
r.data = []byte("Al bien hacer jamás le falta premio") // put some arbitrary length data
|
||||||
|
_, err = r.toChunk()
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected request.toChunk to fail when there is no signature")
|
||||||
|
}
|
||||||
|
|
||||||
|
charlie := newCharlieSigner()
|
||||||
|
if err := r.Sign(charlie); err != nil {
|
||||||
|
t.Fatalf("error signing:%s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk, err := r.toChunk()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating update chunk:%s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
compareByteSliceToExpectedHex(t, "chunk", chunk.Data(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019416c206269656e206861636572206a616dc3a173206c652066616c7461207072656d696f5a0ffe0bc27f207cd5b00944c8b9cee93e08b89b5ada777f123ac535189333f174a6a4ca2f43a92c4a477a49d774813c36ce8288552c58e6205b0ac35d0507eb00")
|
||||||
|
|
||||||
|
var recovered Request
|
||||||
|
recovered.fromChunk(chunk.Address(), chunk.Data())
|
||||||
|
if !reflect.DeepEqual(recovered, r) {
|
||||||
|
t.Fatal("Expected recovered SignedResource update to equal the original one")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that signature address matches update signer address
|
||||||
|
func TestReverse(t *testing.T) {
|
||||||
|
|
||||||
|
epoch := lookup.Epoch{
|
||||||
|
Time: 7888,
|
||||||
|
Level: 6,
|
||||||
|
}
|
||||||
|
|
||||||
|
// make fake timeProvider
|
||||||
|
timeProvider := &fakeTimeProvider{
|
||||||
|
currentTime: startTime.Time,
|
||||||
|
}
|
||||||
|
|
||||||
|
// signer containing private key
|
||||||
|
signer := newAliceSigner()
|
||||||
|
|
||||||
|
// set up rpc and create resourcehandler
|
||||||
|
_, _, teardownTest, err := setupTest(timeProvider, signer)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer teardownTest()
|
||||||
|
|
||||||
|
topic, _ := NewTopic("Cervantes quotes", nil)
|
||||||
|
view := View{
|
||||||
|
Topic: topic,
|
||||||
|
User: signer.Address(),
|
||||||
|
}
|
||||||
|
|
||||||
|
data := []byte("Donde una puerta se cierra, otra se abre")
|
||||||
|
|
||||||
|
request := new(Request)
|
||||||
|
request.View = view
|
||||||
|
request.Epoch = epoch
|
||||||
|
request.data = data
|
||||||
|
|
||||||
|
// generate a chunk key for this request
|
||||||
|
key := request.Addr()
|
||||||
|
|
||||||
|
if err = request.Sign(signer); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk, err := request.toChunk()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that we can recover the owner account from the update chunk's signature
|
||||||
|
var checkUpdate Request
|
||||||
|
if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
checkdigest, err := checkUpdate.GetDigest()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
recoveredaddress, err := getUserAddr(checkdigest, *checkUpdate.Signature)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Retrieve address from signature fail: %v", err)
|
||||||
|
}
|
||||||
|
originaladdress := crypto.PubkeyToAddress(signer.PrivKey.PublicKey)
|
||||||
|
|
||||||
|
// check that the metadata retrieved from the chunk matches what we gave it
|
||||||
|
if recoveredaddress != originaladdress {
|
||||||
|
t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(key[:], chunk.Address()[:]) {
|
||||||
|
t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Address())
|
||||||
|
}
|
||||||
|
if epoch != checkUpdate.Epoch {
|
||||||
|
t.Fatalf("Expected epoch to be '%s', was '%s'", epoch.String(), checkUpdate.Epoch.String())
|
||||||
|
}
|
||||||
|
if !bytes.Equal(data, checkUpdate.data) {
|
||||||
|
t.Fatalf("Expected data '%x', was '%x'", data, checkUpdate.data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,76 +0,0 @@
|
|||||||
// Copyright 2018 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package mru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultStoreTimeout = 4000 * time.Millisecond
|
|
||||||
hasherCount = 8
|
|
||||||
resourceHashAlgorithm = storage.SHA3Hash
|
|
||||||
defaultRetrieveTimeout = 100 * time.Millisecond
|
|
||||||
)
|
|
||||||
|
|
||||||
// resource caches resource data and the metadata of its root chunk.
|
|
||||||
type resource struct {
|
|
||||||
resourceUpdate
|
|
||||||
ResourceMetadata
|
|
||||||
*bytes.Reader
|
|
||||||
lastKey storage.Address
|
|
||||||
updated time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *resource) Context() context.Context {
|
|
||||||
return context.TODO()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO Expire content after a defined period (to force resync)
|
|
||||||
func (r *resource) isSynced() bool {
|
|
||||||
return !r.updated.IsZero()
|
|
||||||
}
|
|
||||||
|
|
||||||
// implements storage.LazySectionReader
|
|
||||||
func (r *resource) Size(ctx context.Context, _ chan bool) (int64, error) {
|
|
||||||
if !r.isSynced() {
|
|
||||||
return 0, NewError(ErrNotSynced, "Not synced")
|
|
||||||
}
|
|
||||||
return int64(len(r.resourceUpdate.data)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
//returns the resource's human-readable name
|
|
||||||
func (r *resource) Name() string {
|
|
||||||
return r.ResourceMetadata.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper function to calculate the next update period number from the current time, start time and frequency
|
|
||||||
func getNextPeriod(start uint64, current uint64, frequency uint64) (uint32, error) {
|
|
||||||
if current < start {
|
|
||||||
return 0, NewErrorf(ErrInvalidValue, "given current time value %d < start time %d", current, start)
|
|
||||||
}
|
|
||||||
if frequency == 0 {
|
|
||||||
return 0, NewError(ErrInvalidValue, "frequency is 0")
|
|
||||||
}
|
|
||||||
timeDiff := current - start
|
|
||||||
period := timeDiff / frequency
|
|
||||||
return uint32(period + 1), nil
|
|
||||||
}
|
|
@ -60,7 +60,16 @@ func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error)
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublicKey returns the public key of the signer's private key
|
// Address returns the public key of the signer's private key
|
||||||
func (s *GenericSigner) Address() common.Address {
|
func (s *GenericSigner) Address() common.Address {
|
||||||
return s.address
|
return s.address
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getUserAddr extracts the address of the resource update signer
|
||||||
|
func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) {
|
||||||
|
pub, err := crypto.SigToPub(digest.Bytes(), signature[:])
|
||||||
|
if err != nil {
|
||||||
|
return common.Address{}, err
|
||||||
|
}
|
||||||
|
return crypto.PubkeyToAddress(*pub), nil
|
||||||
|
}
|
||||||
|
@ -1,902 +0,0 @@
|
|||||||
// Copyright 2018 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package mru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/binary"
|
|
||||||
"flag"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/contracts/ens"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/multihash"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
loglevel = flag.Int("loglevel", 3, "loglevel")
|
|
||||||
testHasher = storage.MakeHashFunc(resourceHashAlgorithm)()
|
|
||||||
startTime = Timestamp{
|
|
||||||
Time: uint64(4200),
|
|
||||||
}
|
|
||||||
resourceFrequency = uint64(42)
|
|
||||||
cleanF func()
|
|
||||||
resourceName = "føø.bar"
|
|
||||||
hashfunc = storage.MakeHashFunc(storage.DefaultHash)
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
flag.Parse()
|
|
||||||
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
|
|
||||||
}
|
|
||||||
|
|
||||||
// simulated timeProvider
|
|
||||||
type fakeTimeProvider struct {
|
|
||||||
currentTime uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakeTimeProvider) Tick() {
|
|
||||||
f.currentTime++
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakeTimeProvider) Now() Timestamp {
|
|
||||||
return Timestamp{
|
|
||||||
Time: f.currentTime,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateChunkSerializationErrorChecking(t *testing.T) {
|
|
||||||
|
|
||||||
// Test that parseUpdate fails if the chunk is too small
|
|
||||||
var r SignedResourceUpdate
|
|
||||||
if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1)); err == nil {
|
|
||||||
t.Fatalf("Expected parseUpdate to fail when chunkData contains less than %d bytes", minimumUpdateDataLength)
|
|
||||||
}
|
|
||||||
|
|
||||||
r = SignedResourceUpdate{}
|
|
||||||
// Test that parseUpdate fails when the length header does not match the data array length
|
|
||||||
fakeChunk := make([]byte, 150)
|
|
||||||
binary.LittleEndian.PutUint16(fakeChunk, 44)
|
|
||||||
if err := r.fromChunk(storage.ZeroAddr, fakeChunk); err == nil {
|
|
||||||
t.Fatal("Expected parseUpdate to fail when the header length does not match the actual data array passed in")
|
|
||||||
}
|
|
||||||
|
|
||||||
r = SignedResourceUpdate{
|
|
||||||
resourceUpdate: resourceUpdate{
|
|
||||||
updateHeader: updateHeader{
|
|
||||||
UpdateLookup: UpdateLookup{
|
|
||||||
rootAddr: make([]byte, 79), // put the wrong length, should be storage.AddressLength
|
|
||||||
},
|
|
||||||
metaHash: nil,
|
|
||||||
multihash: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err := r.toChunk()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("Expected newUpdateChunk to fail when rootAddr or metaHash have the wrong length")
|
|
||||||
}
|
|
||||||
r.rootAddr = make([]byte, storage.AddressLength)
|
|
||||||
r.metaHash = make([]byte, storage.AddressLength)
|
|
||||||
_, err = r.toChunk()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("Expected newUpdateChunk to fail when there is no data")
|
|
||||||
}
|
|
||||||
r.data = make([]byte, 79) // put some arbitrary length data
|
|
||||||
_, err = r.toChunk()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected newUpdateChunk to fail when there is no signature", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
alice := newAliceSigner()
|
|
||||||
if err := r.Sign(alice); err != nil {
|
|
||||||
t.Fatalf("error signing:%s", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
_, err = r.toChunk()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error creating update chunk:%s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.multihash = true
|
|
||||||
r.data[1] = 79 // mess with the multihash, corrupting one byte of it.
|
|
||||||
if err := r.Sign(alice); err == nil {
|
|
||||||
t.Fatal("expected Sign() to fail when an invalid multihash is in data and multihash=true", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check that signature address matches update signer address
|
|
||||||
func TestReverse(t *testing.T) {
|
|
||||||
|
|
||||||
period := uint32(4)
|
|
||||||
version := uint32(2)
|
|
||||||
|
|
||||||
// make fake timeProvider
|
|
||||||
timeProvider := &fakeTimeProvider{
|
|
||||||
currentTime: startTime.Time,
|
|
||||||
}
|
|
||||||
|
|
||||||
// signer containing private key
|
|
||||||
signer := newAliceSigner()
|
|
||||||
|
|
||||||
// set up rpc and create resourcehandler
|
|
||||||
_, _, teardownTest, err := setupTest(timeProvider, signer)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer teardownTest()
|
|
||||||
|
|
||||||
metadata := ResourceMetadata{
|
|
||||||
Name: resourceName,
|
|
||||||
StartTime: startTime,
|
|
||||||
Frequency: resourceFrequency,
|
|
||||||
Owner: signer.Address(),
|
|
||||||
}
|
|
||||||
|
|
||||||
rootAddr, metaHash, _, err := metadata.serializeAndHash()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate some bogus data for the chunk and sign it
|
|
||||||
data := make([]byte, 8)
|
|
||||||
_, err = rand.Read(data)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
testHasher.Reset()
|
|
||||||
testHasher.Write(data)
|
|
||||||
|
|
||||||
update := &SignedResourceUpdate{
|
|
||||||
resourceUpdate: resourceUpdate{
|
|
||||||
updateHeader: updateHeader{
|
|
||||||
UpdateLookup: UpdateLookup{
|
|
||||||
period: period,
|
|
||||||
version: version,
|
|
||||||
rootAddr: rootAddr,
|
|
||||||
},
|
|
||||||
metaHash: metaHash,
|
|
||||||
},
|
|
||||||
data: data,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
// generate a hash for t=4200 version 1
|
|
||||||
key := update.UpdateAddr()
|
|
||||||
|
|
||||||
if err = update.Sign(signer); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk, err := update.toChunk()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// check that we can recover the owner account from the update chunk's signature
|
|
||||||
var checkUpdate SignedResourceUpdate
|
|
||||||
if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
checkdigest, err := checkUpdate.GetDigest()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
recoveredaddress, err := getOwner(checkdigest, *checkUpdate.signature)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Retrieve address from signature fail: %v", err)
|
|
||||||
}
|
|
||||||
originaladdress := crypto.PubkeyToAddress(signer.PrivKey.PublicKey)
|
|
||||||
|
|
||||||
// check that the metadata retrieved from the chunk matches what we gave it
|
|
||||||
if recoveredaddress != originaladdress {
|
|
||||||
t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(key[:], chunk.Address()[:]) {
|
|
||||||
t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Address())
|
|
||||||
}
|
|
||||||
if period != checkUpdate.period {
|
|
||||||
t.Fatalf("Expected period '%d', was '%d'", period, checkUpdate.period)
|
|
||||||
}
|
|
||||||
if version != checkUpdate.version {
|
|
||||||
t.Fatalf("Expected version '%d', was '%d'", version, checkUpdate.version)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(data, checkUpdate.data) {
|
|
||||||
t.Fatalf("Expectedn data '%x', was '%x'", data, checkUpdate.data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// make updates and retrieve them based on periods and versions
|
|
||||||
func TestResourceHandler(t *testing.T) {
|
|
||||||
|
|
||||||
// make fake timeProvider
|
|
||||||
timeProvider := &fakeTimeProvider{
|
|
||||||
currentTime: startTime.Time,
|
|
||||||
}
|
|
||||||
|
|
||||||
// signer containing private key
|
|
||||||
signer := newAliceSigner()
|
|
||||||
|
|
||||||
rh, datadir, teardownTest, err := setupTest(timeProvider, signer)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer teardownTest()
|
|
||||||
|
|
||||||
// create a new resource
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
metadata := &ResourceMetadata{
|
|
||||||
Name: resourceName,
|
|
||||||
Frequency: resourceFrequency,
|
|
||||||
StartTime: Timestamp{Time: timeProvider.Now().Time},
|
|
||||||
Owner: signer.Address(),
|
|
||||||
}
|
|
||||||
|
|
||||||
request, err := NewCreateUpdateRequest(metadata)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
request.Sign(signer)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
err = rh.New(ctx, request)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk, err := rh.chunkStore.Get(ctx, storage.Address(request.rootAddr))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if len(chunk.Data()) < 16 {
|
|
||||||
t.Fatalf("chunk data must be minimum 16 bytes, is %d", len(chunk.Data()))
|
|
||||||
}
|
|
||||||
|
|
||||||
var recoveredMetadata ResourceMetadata
|
|
||||||
|
|
||||||
recoveredMetadata.binaryGet(chunk.Data())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if recoveredMetadata.StartTime.Time != timeProvider.currentTime {
|
|
||||||
t.Fatalf("stored startTime %d does not match provided startTime %d", recoveredMetadata.StartTime.Time, timeProvider.currentTime)
|
|
||||||
}
|
|
||||||
if recoveredMetadata.Frequency != resourceFrequency {
|
|
||||||
t.Fatalf("stored frequency %d does not match provided frequency %d", recoveredMetadata.Frequency, resourceFrequency)
|
|
||||||
}
|
|
||||||
|
|
||||||
// data for updates:
|
|
||||||
updates := []string{
|
|
||||||
"blinky",
|
|
||||||
"pinky",
|
|
||||||
"inky",
|
|
||||||
"clyde",
|
|
||||||
}
|
|
||||||
|
|
||||||
// update halfway to first period. period=1, version=1
|
|
||||||
resourcekey := make(map[string]storage.Address)
|
|
||||||
fwdClock(int(resourceFrequency/2), timeProvider)
|
|
||||||
data := []byte(updates[0])
|
|
||||||
request.SetData(data, false)
|
|
||||||
if err := request.Sign(signer); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
resourcekey[updates[0]], err = rh.Update(ctx, &request.SignedResourceUpdate)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// update on first period with version = 1 to make it fail since there is already one update with version=1
|
|
||||||
request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if request.version != 2 || request.period != 1 {
|
|
||||||
t.Fatal("Suggested period should be 1 and version should be 2")
|
|
||||||
}
|
|
||||||
|
|
||||||
request.version = 1 // force version 1 instead of 2 to make it fail
|
|
||||||
data = []byte(updates[1])
|
|
||||||
request.SetData(data, false)
|
|
||||||
if err := request.Sign(signer); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("Expected update to fail since this version already exists")
|
|
||||||
}
|
|
||||||
|
|
||||||
// update on second period with version = 1, correct. period=2, version=1
|
|
||||||
fwdClock(int(resourceFrequency/2), timeProvider)
|
|
||||||
request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
request.SetData(data, false)
|
|
||||||
if err := request.Sign(signer); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fwdClock(int(resourceFrequency), timeProvider)
|
|
||||||
// Update on third period, with version = 1
|
|
||||||
request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
data = []byte(updates[2])
|
|
||||||
request.SetData(data, false)
|
|
||||||
if err := request.Sign(signer); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
resourcekey[updates[2]], err = rh.Update(ctx, &request.SignedResourceUpdate)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// update just after third period
|
|
||||||
fwdClock(1, timeProvider)
|
|
||||||
request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if request.period != 3 || request.version != 2 {
|
|
||||||
t.Fatal("Suggested period should be 3 and version should be 2")
|
|
||||||
}
|
|
||||||
data = []byte(updates[3])
|
|
||||||
request.SetData(data, false)
|
|
||||||
|
|
||||||
if err := request.Sign(signer); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
resourcekey[updates[3]], err = rh.Update(ctx, &request.SignedResourceUpdate)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
rh.Close()
|
|
||||||
|
|
||||||
// check we can retrieve the updates after close
|
|
||||||
// it will match on second iteration startTime + (resourceFrequency * 3)
|
|
||||||
fwdClock(int(resourceFrequency*2)-1, timeProvider)
|
|
||||||
|
|
||||||
rhparams := &HandlerParams{}
|
|
||||||
|
|
||||||
rh2, err := NewTestHandler(datadir, rhparams)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rsrc2, err := rh2.Load(context.TODO(), request.rootAddr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = rh2.Lookup(ctx, LookupLatest(request.rootAddr))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// last update should be "clyde", version two, time= startTime + (resourcefrequency * 3)
|
|
||||||
if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) {
|
|
||||||
t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1])
|
|
||||||
}
|
|
||||||
if rsrc2.version != 2 {
|
|
||||||
t.Fatalf("resource version was %d, expected 2", rsrc2.version)
|
|
||||||
}
|
|
||||||
if rsrc2.period != 3 {
|
|
||||||
t.Fatalf("resource period was %d, expected 3", rsrc2.period)
|
|
||||||
}
|
|
||||||
log.Debug("Latest lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data)
|
|
||||||
|
|
||||||
// specific period, latest version
|
|
||||||
rsrc, err := rh2.Lookup(ctx, LookupLatestVersionInPeriod(request.rootAddr, 3))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// check data
|
|
||||||
if !bytes.Equal(rsrc.data, []byte(updates[len(updates)-1])) {
|
|
||||||
t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[len(updates)-1])
|
|
||||||
}
|
|
||||||
log.Debug("Historical lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data)
|
|
||||||
|
|
||||||
// specific period, specific version
|
|
||||||
lookupParams := LookupVersion(request.rootAddr, 3, 1)
|
|
||||||
rsrc, err = rh2.Lookup(ctx, lookupParams)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// check data
|
|
||||||
if !bytes.Equal(rsrc.data, []byte(updates[2])) {
|
|
||||||
t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2])
|
|
||||||
}
|
|
||||||
log.Debug("Specific version lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data)
|
|
||||||
|
|
||||||
// we are now at third update
|
|
||||||
// check backwards stepping to the first
|
|
||||||
for i := 1; i >= 0; i-- {
|
|
||||||
rsrc, err := rh2.LookupPrevious(ctx, lookupParams)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(rsrc.data, []byte(updates[i])) {
|
|
||||||
t.Fatalf("resource data (previous) was %v, expected %v", rsrc.data, updates[i])
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// beyond the first should yield an error
|
|
||||||
rsrc, err = rh2.LookupPrevious(ctx, lookupParams)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected previous to fail, returned period %d version %d data %v", rsrc.period, rsrc.version, rsrc.data)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMultihash(t *testing.T) {
|
|
||||||
|
|
||||||
// make fake timeProvider
|
|
||||||
timeProvider := &fakeTimeProvider{
|
|
||||||
currentTime: startTime.Time,
|
|
||||||
}
|
|
||||||
|
|
||||||
// signer containing private key
|
|
||||||
signer := newAliceSigner()
|
|
||||||
|
|
||||||
// set up rpc and create resourcehandler
|
|
||||||
rh, datadir, teardownTest, err := setupTest(timeProvider, signer)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer teardownTest()
|
|
||||||
|
|
||||||
// create a new resource
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
metadata := &ResourceMetadata{
|
|
||||||
Name: resourceName,
|
|
||||||
Frequency: resourceFrequency,
|
|
||||||
StartTime: Timestamp{Time: timeProvider.Now().Time},
|
|
||||||
Owner: signer.Address(),
|
|
||||||
}
|
|
||||||
|
|
||||||
mr, err := NewCreateRequest(metadata)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
err = rh.New(ctx, mr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// we're naïvely assuming keccak256 for swarm hashes
|
|
||||||
// if it ever changes this test should also change
|
|
||||||
multihashbytes := ens.EnsNode("foo")
|
|
||||||
multihashmulti := multihash.ToMultihash(multihashbytes.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
mr.SetData(multihashmulti, true)
|
|
||||||
mr.Sign(signer)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
multihashkey, err := rh.Update(ctx, &mr.SignedResourceUpdate)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sha1bytes := make([]byte, multihash.MultihashLength)
|
|
||||||
sha1multi := multihash.ToMultihash(sha1bytes)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
mr.SetData(sha1multi, true)
|
|
||||||
mr.Sign(signer)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
sha1key, err := rh.Update(ctx, &mr.SignedResourceUpdate)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// invalid multihashes
|
|
||||||
mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
mr.SetData(multihashmulti[1:], true)
|
|
||||||
mr.Sign(signer)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
_, err = rh.Update(ctx, &mr.SignedResourceUpdate)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("Expected update to fail with first byte skipped")
|
|
||||||
}
|
|
||||||
mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
mr.SetData(multihashmulti[:len(multihashmulti)-2], true)
|
|
||||||
mr.Sign(signer)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = rh.Update(ctx, &mr.SignedResourceUpdate)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("Expected update to fail with last byte skipped")
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := getUpdateDirect(rh.Handler, multihashkey)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
multihashdecode, err := multihash.FromMultihash(data)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) {
|
|
||||||
t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes())
|
|
||||||
}
|
|
||||||
data, err = getUpdateDirect(rh.Handler, sha1key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
shadecode, err := multihash.FromMultihash(data)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(shadecode, sha1bytes) {
|
|
||||||
t.Fatalf("Decoded hash '%x' does not match original hash '%x'", shadecode, sha1bytes)
|
|
||||||
}
|
|
||||||
rh.Close()
|
|
||||||
|
|
||||||
rhparams := &HandlerParams{}
|
|
||||||
// test with signed data
|
|
||||||
rh2, err := NewTestHandler(datadir, rhparams)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
mr, err = NewCreateRequest(metadata)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
err = rh2.New(ctx, mr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
mr.SetData(multihashmulti, true)
|
|
||||||
mr.Sign(signer)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
multihashsignedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
mr, err = rh2.NewUpdateRequest(ctx, mr.rootAddr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
mr.SetData(sha1multi, true)
|
|
||||||
mr.Sign(signer)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sha1signedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err = getUpdateDirect(rh2.Handler, multihashsignedkey)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
multihashdecode, err = multihash.FromMultihash(data)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) {
|
|
||||||
t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes())
|
|
||||||
}
|
|
||||||
data, err = getUpdateDirect(rh2.Handler, sha1signedkey)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
shadecode, err = multihash.FromMultihash(data)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(shadecode, sha1bytes) {
|
|
||||||
t.Fatalf("Decoded hash '%x' does not match original hash '%x'", shadecode, sha1bytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// \TODO verify testing of signature validation and enforcement
|
|
||||||
func TestValidator(t *testing.T) {
|
|
||||||
|
|
||||||
// make fake timeProvider
|
|
||||||
timeProvider := &fakeTimeProvider{
|
|
||||||
currentTime: startTime.Time,
|
|
||||||
}
|
|
||||||
|
|
||||||
// signer containing private key. Alice will be the good girl
|
|
||||||
signer := newAliceSigner()
|
|
||||||
|
|
||||||
// fake signer for false results. Bob will play the bad guy today.
|
|
||||||
falseSigner := newBobSigner()
|
|
||||||
|
|
||||||
// set up sim timeProvider
|
|
||||||
rh, _, teardownTest, err := setupTest(timeProvider, signer)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer teardownTest()
|
|
||||||
|
|
||||||
// create new resource
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
metadata := &ResourceMetadata{
|
|
||||||
Name: resourceName,
|
|
||||||
Frequency: resourceFrequency,
|
|
||||||
StartTime: Timestamp{Time: timeProvider.Now().Time},
|
|
||||||
Owner: signer.Address(),
|
|
||||||
}
|
|
||||||
mr, err := NewCreateRequest(metadata)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
mr.Sign(signer)
|
|
||||||
|
|
||||||
err = rh.New(ctx, mr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Create resource fail: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// chunk with address
|
|
||||||
data := []byte("foo")
|
|
||||||
mr.SetData(data, false)
|
|
||||||
if err := mr.Sign(signer); err != nil {
|
|
||||||
t.Fatalf("sign fail: %v", err)
|
|
||||||
}
|
|
||||||
chunk, err := mr.SignedResourceUpdate.toChunk()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !rh.Validate(chunk.Address(), chunk.Data()) {
|
|
||||||
t.Fatal("Chunk validator fail on update chunk")
|
|
||||||
}
|
|
||||||
|
|
||||||
// chunk with address made from different publickey
|
|
||||||
if err := mr.Sign(falseSigner); err == nil {
|
|
||||||
t.Fatalf("Expected Sign to fail since we are using a different OwnerAddr: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// chunk with address made from different publickey
|
|
||||||
mr.metadata.Owner = zeroAddr // set to zero to bypass .Sign() check
|
|
||||||
if err := mr.Sign(falseSigner); err != nil {
|
|
||||||
t.Fatalf("sign fail: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk, err = mr.SignedResourceUpdate.toChunk()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if rh.Validate(chunk.Address(), chunk.Data()) {
|
|
||||||
t.Fatal("Chunk validator did not fail on update chunk with false address")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
metadata = &ResourceMetadata{
|
|
||||||
Name: resourceName,
|
|
||||||
StartTime: TimestampProvider.Now(),
|
|
||||||
Frequency: resourceFrequency,
|
|
||||||
Owner: signer.Address(),
|
|
||||||
}
|
|
||||||
chunk, _, err = metadata.newChunk()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !rh.Validate(chunk.Address(), chunk.Data()) {
|
|
||||||
t.Fatal("Chunk validator fail on metadata chunk")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// tests that the content address validator correctly checks the data
|
|
||||||
// tests that resource update chunks are passed through content address validator
|
|
||||||
// there is some redundancy in this test as it also tests content addressed chunks,
|
|
||||||
// which should be evaluated as invalid chunks by this validator
|
|
||||||
func TestValidatorInStore(t *testing.T) {
|
|
||||||
|
|
||||||
// make fake timeProvider
|
|
||||||
TimestampProvider = &fakeTimeProvider{
|
|
||||||
currentTime: startTime.Time,
|
|
||||||
}
|
|
||||||
|
|
||||||
// signer containing private key
|
|
||||||
signer := newAliceSigner()
|
|
||||||
|
|
||||||
// set up localstore
|
|
||||||
datadir, err := ioutil.TempDir("", "storage-testresourcevalidator")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(datadir)
|
|
||||||
|
|
||||||
params := storage.NewDefaultLocalStoreParams()
|
|
||||||
params.Init(datadir)
|
|
||||||
store, err := storage.NewLocalStore(params, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set up resource handler and add is as a validator to the localstore
|
|
||||||
rhParams := &HandlerParams{}
|
|
||||||
rh := NewHandler(rhParams)
|
|
||||||
store.Validators = append(store.Validators, rh)
|
|
||||||
|
|
||||||
// create content addressed chunks, one good, one faulty
|
|
||||||
chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2)
|
|
||||||
goodChunk := chunks[0]
|
|
||||||
badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data())
|
|
||||||
|
|
||||||
metadata := &ResourceMetadata{
|
|
||||||
StartTime: startTime,
|
|
||||||
Name: "xyzzy",
|
|
||||||
Frequency: resourceFrequency,
|
|
||||||
Owner: signer.Address(),
|
|
||||||
}
|
|
||||||
|
|
||||||
rootChunk, metaHash, err := metadata.newChunk()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// create a resource update chunk with correct publickey
|
|
||||||
updateLookup := UpdateLookup{
|
|
||||||
period: 42,
|
|
||||||
version: 1,
|
|
||||||
rootAddr: rootChunk.Address(),
|
|
||||||
}
|
|
||||||
|
|
||||||
updateAddr := updateLookup.UpdateAddr()
|
|
||||||
data := []byte("bar")
|
|
||||||
|
|
||||||
r := SignedResourceUpdate{
|
|
||||||
updateAddr: updateAddr,
|
|
||||||
resourceUpdate: resourceUpdate{
|
|
||||||
updateHeader: updateHeader{
|
|
||||||
UpdateLookup: updateLookup,
|
|
||||||
metaHash: metaHash,
|
|
||||||
},
|
|
||||||
data: data,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Sign(signer)
|
|
||||||
|
|
||||||
uglyChunk, err := r.toChunk()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// put the chunks in the store and check their error status
|
|
||||||
err = store.Put(context.Background(), goodChunk)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected error on good content address chunk with resource validator only, but got nil")
|
|
||||||
}
|
|
||||||
err = store.Put(context.Background(), badChunk)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected error on bad content address chunk with resource validator only, but got nil")
|
|
||||||
}
|
|
||||||
err = store.Put(context.Background(), uglyChunk)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fast-forward clock
|
|
||||||
func fwdClock(count int, timeProvider *fakeTimeProvider) {
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
timeProvider.Tick()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// create rpc and resourcehandler
|
|
||||||
func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, datadir string, teardown func(), err error) {
|
|
||||||
|
|
||||||
var fsClean func()
|
|
||||||
var rpcClean func()
|
|
||||||
cleanF = func() {
|
|
||||||
if fsClean != nil {
|
|
||||||
fsClean()
|
|
||||||
}
|
|
||||||
if rpcClean != nil {
|
|
||||||
rpcClean()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// temp datadir
|
|
||||||
datadir, err = ioutil.TempDir("", "rh")
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", nil, err
|
|
||||||
}
|
|
||||||
fsClean = func() {
|
|
||||||
os.RemoveAll(datadir)
|
|
||||||
}
|
|
||||||
|
|
||||||
TimestampProvider = timeProvider
|
|
||||||
rhparams := &HandlerParams{}
|
|
||||||
rh, err = NewTestHandler(datadir, rhparams)
|
|
||||||
return rh, datadir, cleanF, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAliceSigner() *GenericSigner {
|
|
||||||
privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
|
|
||||||
return NewGenericSigner(privKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBobSigner() *GenericSigner {
|
|
||||||
privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca")
|
|
||||||
return NewGenericSigner(privKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newCharlieSigner() *GenericSigner {
|
|
||||||
privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca")
|
|
||||||
return NewGenericSigner(privKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) {
|
|
||||||
chunk, err := rh.chunkStore.Get(context.TODO(), addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var r SignedResourceUpdate
|
|
||||||
if err := r.fromChunk(addr, chunk.Data()); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return r.data, nil
|
|
||||||
}
|
|
@ -1,181 +0,0 @@
|
|||||||
// Copyright 2018 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package mru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"hash"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SignedResourceUpdate represents a resource update with all the necessary information to prove ownership of the resource
|
|
||||||
type SignedResourceUpdate struct {
|
|
||||||
resourceUpdate // actual content that will be put on the chunk, less signature
|
|
||||||
signature *Signature
|
|
||||||
updateAddr storage.Address // resulting chunk address for the update (not serialized, for internal use)
|
|
||||||
binaryData []byte // resulting serialized data (not serialized, for efficiency/internal use)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify checks that signatures are valid and that the signer owns the resource to be updated
|
|
||||||
func (r *SignedResourceUpdate) Verify() (err error) {
|
|
||||||
if len(r.data) == 0 {
|
|
||||||
return NewError(ErrInvalidValue, "Update does not contain data")
|
|
||||||
}
|
|
||||||
if r.signature == nil {
|
|
||||||
return NewError(ErrInvalidSignature, "Missing signature field")
|
|
||||||
}
|
|
||||||
|
|
||||||
digest, err := r.GetDigest()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the address of the signer (which also checks that it's a valid signature)
|
|
||||||
ownerAddr, err := getOwner(digest, *r.signature)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(r.updateAddr, r.UpdateAddr()) {
|
|
||||||
return NewError(ErrInvalidSignature, "Signature address does not match with ownerAddr")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if who signed the resource update really owns the resource
|
|
||||||
if !verifyOwner(ownerAddr, r.metaHash, r.rootAddr) {
|
|
||||||
return NewErrorf(ErrUnauthorized, "signature is valid but signer does not own the resource: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign executes the signature to validate the resource
|
|
||||||
func (r *SignedResourceUpdate) Sign(signer Signer) error {
|
|
||||||
|
|
||||||
r.binaryData = nil //invalidate serialized data
|
|
||||||
digest, err := r.GetDigest() // computes digest and serializes into .binaryData
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
signature, err := signer.Sign(digest)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Although the Signer interface returns the public address of the signer,
|
|
||||||
// recover it from the signature to see if they match
|
|
||||||
ownerAddress, err := getOwner(digest, signature)
|
|
||||||
if err != nil {
|
|
||||||
return NewError(ErrInvalidSignature, "Error verifying signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
if ownerAddress != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign!
|
|
||||||
return NewError(ErrInvalidSignature, "Signer address does not match ownerAddr")
|
|
||||||
}
|
|
||||||
|
|
||||||
r.signature = &signature
|
|
||||||
r.updateAddr = r.UpdateAddr()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// create an update chunk.
|
|
||||||
func (r *SignedResourceUpdate) toChunk() (storage.Chunk, error) {
|
|
||||||
|
|
||||||
// Check that the update is signed and serialized
|
|
||||||
// For efficiency, data is serialized during signature and cached in
|
|
||||||
// the binaryData field when computing the signature digest in .getDigest()
|
|
||||||
if r.signature == nil || r.binaryData == nil {
|
|
||||||
return nil, NewError(ErrInvalidSignature, "newUpdateChunk called without a valid signature or payload data. Call .Sign() first.")
|
|
||||||
}
|
|
||||||
|
|
||||||
resourceUpdateLength := r.resourceUpdate.binaryLength()
|
|
||||||
// signature is the last item in the chunk data
|
|
||||||
copy(r.binaryData[resourceUpdateLength:], r.signature[:])
|
|
||||||
|
|
||||||
chunk := storage.NewChunk(r.updateAddr, r.binaryData)
|
|
||||||
return chunk, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fromChunk populates this structure from chunk data. It does not verify the signature is valid.
|
|
||||||
func (r *SignedResourceUpdate) fromChunk(updateAddr storage.Address, chunkdata []byte) error {
|
|
||||||
// for update chunk layout see SignedResourceUpdate definition
|
|
||||||
|
|
||||||
//deserialize the resource update portion
|
|
||||||
if err := r.resourceUpdate.binaryGet(chunkdata); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract the signature
|
|
||||||
var signature *Signature
|
|
||||||
cursor := r.resourceUpdate.binaryLength()
|
|
||||||
sigdata := chunkdata[cursor : cursor+signatureLength]
|
|
||||||
if len(sigdata) > 0 {
|
|
||||||
signature = &Signature{}
|
|
||||||
copy(signature[:], sigdata)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.signature = signature
|
|
||||||
r.updateAddr = updateAddr
|
|
||||||
r.binaryData = chunkdata
|
|
||||||
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDigest creates the resource update digest used in signatures (formerly known as keyDataHash)
|
|
||||||
// the serialized payload is cached in .binaryData
|
|
||||||
func (r *SignedResourceUpdate) GetDigest() (result common.Hash, err error) {
|
|
||||||
hasher := hashPool.Get().(hash.Hash)
|
|
||||||
defer hashPool.Put(hasher)
|
|
||||||
hasher.Reset()
|
|
||||||
dataLength := r.resourceUpdate.binaryLength()
|
|
||||||
if r.binaryData == nil {
|
|
||||||
r.binaryData = make([]byte, dataLength+signatureLength)
|
|
||||||
if err := r.resourceUpdate.binaryPut(r.binaryData[:dataLength]); err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hasher.Write(r.binaryData[:dataLength]) //everything except the signature.
|
|
||||||
|
|
||||||
return common.BytesToHash(hasher.Sum(nil)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getOwner extracts the address of the resource update signer
|
|
||||||
func getOwner(digest common.Hash, signature Signature) (common.Address, error) {
|
|
||||||
pub, err := crypto.SigToPub(digest.Bytes(), signature[:])
|
|
||||||
if err != nil {
|
|
||||||
return common.Address{}, err
|
|
||||||
}
|
|
||||||
return crypto.PubkeyToAddress(*pub), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyResourceOwnerhsip checks that the signer of the update actually owns the resource
|
|
||||||
// H(ownerAddr, metaHash) is computed. If it matches the rootAddr the update chunk is claiming
|
|
||||||
// to update, it is proven that signer of the resource update owns the resource.
|
|
||||||
// See metadataHash in metadata.go for a more detailed explanation
|
|
||||||
func verifyOwner(ownerAddr common.Address, metaHash []byte, rootAddr storage.Address) bool {
|
|
||||||
hasher := hashPool.Get().(hash.Hash)
|
|
||||||
defer hashPool.Put(hasher)
|
|
||||||
hasher.Reset()
|
|
||||||
hasher.Write(metaHash)
|
|
||||||
hasher.Write(ownerAddr.Bytes())
|
|
||||||
rootAddr2 := hasher.Sum(nil)
|
|
||||||
return bytes.Equal(rootAddr2, rootAddr)
|
|
||||||
}
|
|
@ -18,15 +18,16 @@ package mru
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TimestampProvider sets the time source of the mru package
|
// TimestampProvider sets the time source of the mru package
|
||||||
var TimestampProvider timestampProvider = NewDefaultTimestampProvider()
|
var TimestampProvider timestampProvider = NewDefaultTimestampProvider()
|
||||||
|
|
||||||
// Encodes a point in time as a Unix epoch
|
// Timestamp encodes a point in time as a Unix epoch
|
||||||
type Timestamp struct {
|
type Timestamp struct {
|
||||||
Time uint64 // Unix epoch timestamp, in seconds
|
Time uint64 `json:"time"` // Unix epoch timestamp, in seconds
|
||||||
}
|
}
|
||||||
|
|
||||||
// 8 bytes uint64 Time
|
// 8 bytes uint64 Time
|
||||||
@ -55,6 +56,18 @@ func (t *Timestamp) binaryPut(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements the json.Unmarshaller interface
|
||||||
|
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||||
|
return json.Unmarshal(data, &t.Time)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON implements the json.Marshaller interface
|
||||||
|
func (t *Timestamp) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(t.Time)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultTimestampProvider is a TimestampProvider that uses system time
|
||||||
|
// as time source
|
||||||
type DefaultTimestampProvider struct {
|
type DefaultTimestampProvider struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
105
swarm/storage/mru/topic.go
Normal file
105
swarm/storage/mru/topic.go
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/bitutil"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TopicLength establishes the max length of a topic string
|
||||||
|
const TopicLength = storage.AddressLength
|
||||||
|
|
||||||
|
// Topic represents what a resource talks about
|
||||||
|
type Topic [TopicLength]byte
|
||||||
|
|
||||||
|
// ErrTopicTooLong is returned when creating a topic with a name/related content too long
|
||||||
|
var ErrTopicTooLong = fmt.Errorf("Topic is too long. Max length is %d", TopicLength)
|
||||||
|
|
||||||
|
// NewTopic creates a new topic from a provided name and "related content" byte array,
|
||||||
|
// merging the two together.
|
||||||
|
// If relatedContent or name are longer than TopicLength, they will be truncated and an error returned
|
||||||
|
// name can be an empty string
|
||||||
|
// relatedContent can be nil
|
||||||
|
func NewTopic(name string, relatedContent []byte) (topic Topic, err error) {
|
||||||
|
if relatedContent != nil {
|
||||||
|
contentLength := len(relatedContent)
|
||||||
|
if contentLength > TopicLength {
|
||||||
|
contentLength = TopicLength
|
||||||
|
err = ErrTopicTooLong
|
||||||
|
}
|
||||||
|
copy(topic[:], relatedContent[:contentLength])
|
||||||
|
}
|
||||||
|
nameBytes := []byte(name)
|
||||||
|
nameLength := len(nameBytes)
|
||||||
|
if nameLength > TopicLength {
|
||||||
|
nameLength = TopicLength
|
||||||
|
err = ErrTopicTooLong
|
||||||
|
}
|
||||||
|
bitutil.XORBytes(topic[:], topic[:], nameBytes[:nameLength])
|
||||||
|
return topic, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hex will return the topic encoded as an hex string
|
||||||
|
func (t *Topic) Hex() string {
|
||||||
|
return hexutil.Encode(t[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromHex will parse a hex string into this Topic instance
|
||||||
|
func (t *Topic) FromHex(hex string) error {
|
||||||
|
bytes, err := hexutil.Decode(hex)
|
||||||
|
if err != nil || len(bytes) != len(t) {
|
||||||
|
return NewErrorf(ErrInvalidValue, "Cannot decode topic")
|
||||||
|
}
|
||||||
|
copy(t[:], bytes)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name will try to extract the resource name out of the topic
|
||||||
|
func (t *Topic) Name(relatedContent []byte) string {
|
||||||
|
nameBytes := *t
|
||||||
|
if relatedContent != nil {
|
||||||
|
contentLength := len(relatedContent)
|
||||||
|
if contentLength > TopicLength {
|
||||||
|
contentLength = TopicLength
|
||||||
|
}
|
||||||
|
bitutil.XORBytes(nameBytes[:], t[:], relatedContent[:contentLength])
|
||||||
|
}
|
||||||
|
z := bytes.IndexByte(nameBytes[:], 0)
|
||||||
|
if z < 0 {
|
||||||
|
z = TopicLength
|
||||||
|
}
|
||||||
|
return string(nameBytes[:z])
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements the json.Unmarshaller interface
|
||||||
|
func (t *Topic) UnmarshalJSON(data []byte) error {
|
||||||
|
var hex string
|
||||||
|
json.Unmarshal(data, &hex)
|
||||||
|
return t.FromHex(hex)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON implements the json.Marshaller interface
|
||||||
|
func (t *Topic) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(t.Hex())
|
||||||
|
}
|
50
swarm/storage/mru/topic_test.go
Normal file
50
swarm/storage/mru/topic_test.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTopic(t *testing.T) {
|
||||||
|
related, _ := hexutil.Decode("0xabcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789")
|
||||||
|
topicName := "test-topic"
|
||||||
|
topic, _ := NewTopic(topicName, related)
|
||||||
|
hex := topic.Hex()
|
||||||
|
expectedHex := "0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789"
|
||||||
|
if hex != expectedHex {
|
||||||
|
t.Fatalf("Expected %s, got %s", expectedHex, hex)
|
||||||
|
}
|
||||||
|
|
||||||
|
var topic2 Topic
|
||||||
|
topic2.FromHex(hex)
|
||||||
|
if topic2 != topic {
|
||||||
|
t.Fatal("Expected recovered topic to be equal to original one")
|
||||||
|
}
|
||||||
|
|
||||||
|
if topic2.Name(related) != topicName {
|
||||||
|
t.Fatal("Retrieved name does not match")
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes, err := topic2.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
expectedJSON := `"0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789"`
|
||||||
|
equal, err := areEqualJSON(expectedJSON, string(bytes))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !equal {
|
||||||
|
t.Fatalf("Expected JSON to be %s, got %s", expectedJSON, string(bytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = topic2.UnmarshalJSON(bytes)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if topic2 != topic {
|
||||||
|
t.Fatal("Expected recovered topic to be equal to original one")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -17,36 +17,35 @@
|
|||||||
package mru
|
package mru
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"fmt"
|
||||||
"errors"
|
"strconv"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||||
"github.com/ethereum/go-ethereum/swarm/log"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/multihash"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// resourceUpdate encapsulates the information sent as part of a resource update
|
// ProtocolVersion defines the current version of the protocol that will be included in each update message
|
||||||
type resourceUpdate struct {
|
const ProtocolVersion uint8 = 0
|
||||||
updateHeader // metainformationa about this resource update
|
|
||||||
data []byte // actual data payload
|
const headerLength = 8
|
||||||
|
|
||||||
|
// Header defines a update message header including a protocol version byte
|
||||||
|
type Header struct {
|
||||||
|
Version uint8 // Protocol version
|
||||||
|
Padding [headerLength - 1]uint8 // reserved for future use
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update chunk layout
|
// ResourceUpdate encapsulates the information sent as part of a resource update
|
||||||
// Prefix:
|
type ResourceUpdate struct {
|
||||||
// 2 bytes updateHeaderLength
|
Header Header //
|
||||||
// 2 bytes data length
|
ID // Resource update identifying information
|
||||||
const chunkPrefixLength = 2 + 2
|
data []byte // actual data payload
|
||||||
|
}
|
||||||
|
|
||||||
// Header: (see updateHeader)
|
const minimumUpdateDataLength = idLength + headerLength + 1
|
||||||
// Data:
|
const maxUpdateDataLength = chunk.DefaultSize - signatureLength - idLength - headerLength
|
||||||
// data (datalength bytes)
|
|
||||||
//
|
|
||||||
// Minimum size is Header + 1 (minimum data length, enforced)
|
|
||||||
const minimumUpdateDataLength = updateHeaderLength + 1
|
|
||||||
const maxUpdateDataLength = chunk.DefaultSize - signatureLength - updateHeaderLength - chunkPrefixLength
|
|
||||||
|
|
||||||
// binaryPut serializes the resource update information into the given slice
|
// binaryPut serializes the resource update information into the given slice
|
||||||
func (r *resourceUpdate) binaryPut(serializedData []byte) error {
|
func (r *ResourceUpdate) binaryPut(serializedData []byte) error {
|
||||||
datalength := len(r.data)
|
datalength := len(r.data)
|
||||||
if datalength == 0 {
|
if datalength == 0 {
|
||||||
return NewError(ErrInvalidValue, "cannot update a resource with no data")
|
return NewError(ErrInvalidValue, "cannot update a resource with no data")
|
||||||
@ -60,26 +59,17 @@ func (r *resourceUpdate) binaryPut(serializedData []byte) error {
|
|||||||
return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength())
|
return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength())
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.multihash {
|
var cursor int
|
||||||
if _, _, err := multihash.GetMultihashLength(r.data); err != nil {
|
// serialize Header
|
||||||
return NewError(ErrInvalidValue, "Invalid multihash")
|
serializedData[cursor] = r.Header.Version
|
||||||
}
|
copy(serializedData[cursor+1:headerLength], r.Header.Padding[:headerLength-1])
|
||||||
}
|
cursor += headerLength
|
||||||
|
|
||||||
// Add prefix: updateHeaderLength and actual data length
|
// serialize ID
|
||||||
cursor := 0
|
if err := r.ID.binaryPut(serializedData[cursor : cursor+idLength]); err != nil {
|
||||||
binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(updateHeaderLength))
|
|
||||||
cursor += 2
|
|
||||||
|
|
||||||
// data length
|
|
||||||
binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(datalength))
|
|
||||||
cursor += 2
|
|
||||||
|
|
||||||
// serialize header (see updateHeader)
|
|
||||||
if err := r.updateHeader.binaryPut(serializedData[cursor : cursor+updateHeaderLength]); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cursor += updateHeaderLength
|
cursor += idLength
|
||||||
|
|
||||||
// add the data
|
// add the data
|
||||||
copy(serializedData[cursor:], r.data)
|
copy(serializedData[cursor:], r.data)
|
||||||
@ -89,60 +79,54 @@ func (r *resourceUpdate) binaryPut(serializedData []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// binaryLength returns the expected number of bytes this structure will take to encode
|
// binaryLength returns the expected number of bytes this structure will take to encode
|
||||||
func (r *resourceUpdate) binaryLength() int {
|
func (r *ResourceUpdate) binaryLength() int {
|
||||||
return chunkPrefixLength + updateHeaderLength + len(r.data)
|
return idLength + headerLength + len(r.data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// binaryGet populates this instance from the information contained in the passed byte slice
|
// binaryGet populates this instance from the information contained in the passed byte slice
|
||||||
func (r *resourceUpdate) binaryGet(serializedData []byte) error {
|
func (r *ResourceUpdate) binaryGet(serializedData []byte) error {
|
||||||
if len(serializedData) < minimumUpdateDataLength {
|
if len(serializedData) < minimumUpdateDataLength {
|
||||||
return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength)
|
return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength)
|
||||||
}
|
}
|
||||||
cursor := 0
|
dataLength := len(serializedData) - idLength - headerLength
|
||||||
declaredHeaderlength := binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])
|
|
||||||
if declaredHeaderlength != updateHeaderLength {
|
|
||||||
return NewErrorf(ErrCorruptData, "Invalid header length. Expected %d, got %d", updateHeaderLength, declaredHeaderlength)
|
|
||||||
}
|
|
||||||
|
|
||||||
cursor += 2
|
|
||||||
datalength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2]))
|
|
||||||
cursor += 2
|
|
||||||
|
|
||||||
if chunkPrefixLength+updateHeaderLength+datalength+signatureLength != len(serializedData) {
|
|
||||||
return NewError(ErrNothingToReturn, "length specified in header is different than actual chunk size")
|
|
||||||
}
|
|
||||||
|
|
||||||
// at this point we can be satisfied that we have the correct data length to read
|
// at this point we can be satisfied that we have the correct data length to read
|
||||||
if err := r.updateHeader.binaryGet(serializedData[cursor : cursor+updateHeaderLength]); err != nil {
|
|
||||||
|
var cursor int
|
||||||
|
|
||||||
|
// deserialize Header
|
||||||
|
r.Header.Version = serializedData[cursor] // extract the protocol version
|
||||||
|
copy(r.Header.Padding[:headerLength-1], serializedData[cursor+1:headerLength]) // extract the padding
|
||||||
|
cursor += headerLength
|
||||||
|
|
||||||
|
if err := r.ID.binaryGet(serializedData[cursor : cursor+idLength]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cursor += updateHeaderLength
|
cursor += idLength
|
||||||
|
|
||||||
data := serializedData[cursor : cursor+datalength]
|
data := serializedData[cursor : cursor+dataLength]
|
||||||
cursor += datalength
|
cursor += dataLength
|
||||||
|
|
||||||
// if multihash content is indicated we check the validity of the multihash
|
|
||||||
if r.updateHeader.multihash {
|
|
||||||
mhLength, mhHeaderLength, err := multihash.GetMultihashLength(data)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("multihash parse error", "err", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if datalength != mhLength+mhHeaderLength {
|
|
||||||
log.Debug("multihash error", "datalength", datalength, "mhLength", mhLength, "mhHeaderLength", mhHeaderLength)
|
|
||||||
return errors.New("Corrupt multihash data")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// now that all checks have passed, copy data into structure
|
// now that all checks have passed, copy data into structure
|
||||||
r.data = make([]byte, datalength)
|
r.data = make([]byte, dataLength)
|
||||||
copy(r.data, data)
|
copy(r.data, data)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Multihash specifies whether the resource data should be interpreted as multihash
|
// FromValues deserializes this instance from a string key-value store
|
||||||
func (r *resourceUpdate) Multihash() bool {
|
// useful to parse query strings
|
||||||
return r.multihash
|
func (r *ResourceUpdate) FromValues(values Values, data []byte) error {
|
||||||
|
r.data = data
|
||||||
|
version, _ := strconv.ParseUint(values.Get("protocolVersion"), 10, 32)
|
||||||
|
r.Header.Version = uint8(version)
|
||||||
|
return r.ID.FromValues(values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendValues serializes this structure into the provided string key-value store
|
||||||
|
// useful to build query strings
|
||||||
|
func (r *ResourceUpdate) AppendValues(values Values) []byte {
|
||||||
|
r.ID.AppendValues(values)
|
||||||
|
values.Set("protocolVersion", fmt.Sprintf("%d", r.Header.Version))
|
||||||
|
return r.data
|
||||||
}
|
}
|
||||||
|
@ -1,72 +1,50 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package mru
|
package mru
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
const serializedUpdateHex = "0x490034004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf000456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f"
|
func getTestResourceUpdate() *ResourceUpdate {
|
||||||
const serializedUpdateMultihashHex = "0x490022004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0011b200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1c1e1f20"
|
return &ResourceUpdate{
|
||||||
|
ID: *getTestID(),
|
||||||
func getTestResourceUpdate() *resourceUpdate {
|
data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"),
|
||||||
return &resourceUpdate{
|
|
||||||
updateHeader: *getTestUpdateHeader(false),
|
|
||||||
data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTestResourceUpdateMultihash() *resourceUpdate {
|
|
||||||
return &resourceUpdate{
|
|
||||||
updateHeader: *getTestUpdateHeader(true),
|
|
||||||
data: []byte{0x1b, 0x20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 28, 30, 31, 32},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func compareResourceUpdate(a, b *resourceUpdate) bool {
|
|
||||||
return compareUpdateHeader(&a.updateHeader, &b.updateHeader) &&
|
|
||||||
bytes.Equal(a.data, b.data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestResourceUpdateSerializer(t *testing.T) {
|
func TestResourceUpdateSerializer(t *testing.T) {
|
||||||
var serializedUpdateLength = len(serializedUpdateHex)/2 - 1 // hack to calculate the byte length out of the hex representation
|
testBinarySerializerRecovery(t, getTestResourceUpdate(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f")
|
||||||
update := getTestResourceUpdate()
|
}
|
||||||
serializedUpdate := make([]byte, serializedUpdateLength)
|
|
||||||
if err := update.binaryPut(serializedUpdate); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateHex)
|
|
||||||
|
|
||||||
// Test fail if update does not contain data
|
|
||||||
update.data = nil
|
|
||||||
if err := update.binaryPut(serializedUpdate); err == nil {
|
|
||||||
t.Fatal("Expected resourceUpdate.binaryPut to fail since update does not contain data")
|
|
||||||
}
|
|
||||||
|
|
||||||
|
func TestResourceUpdateLengthCheck(t *testing.T) {
|
||||||
|
testBinarySerializerLengthCheck(t, getTestResourceUpdate())
|
||||||
// Test fail if update is too big
|
// Test fail if update is too big
|
||||||
update.data = make([]byte, 10000)
|
update := getTestResourceUpdate()
|
||||||
if err := update.binaryPut(serializedUpdate); err == nil {
|
update.data = make([]byte, maxUpdateDataLength+100)
|
||||||
|
serialized := make([]byte, update.binaryLength())
|
||||||
|
if err := update.binaryPut(serialized); err == nil {
|
||||||
t.Fatal("Expected resourceUpdate.binaryPut to fail since update is too big")
|
t.Fatal("Expected resourceUpdate.binaryPut to fail since update is too big")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test fail if passed slice is not of the exact size required for this update
|
// test fail if data is empty or nil
|
||||||
update.data = make([]byte, 1)
|
update.data = nil
|
||||||
if err := update.binaryPut(serializedUpdate); err == nil {
|
serialized = make([]byte, update.binaryLength())
|
||||||
t.Fatal("Expected resourceUpdate.binaryPut to fail since passed slice is not of the appropriate size")
|
if err := update.binaryPut(serialized); err == nil {
|
||||||
|
t.Fatal("Expected resourceUpdate.binaryPut to fail since data is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test serializing a multihash update
|
|
||||||
var serializedUpdateMultihashLength = len(serializedUpdateMultihashHex)/2 - 1 // hack to calculate the byte length out of the hex representation
|
|
||||||
update = getTestResourceUpdateMultihash()
|
|
||||||
serializedUpdate = make([]byte, serializedUpdateMultihashLength)
|
|
||||||
if err := update.binaryPut(serializedUpdate); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateMultihashHex)
|
|
||||||
|
|
||||||
// mess with the multihash to test it fails with a wrong multihash error
|
|
||||||
update.data[1] = 79
|
|
||||||
if err := update.binaryPut(serializedUpdate); err == nil {
|
|
||||||
t.Fatal("Expected resourceUpdate.binaryPut to fail since data contains an invalid multihash")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,88 +0,0 @@
|
|||||||
// Copyright 2018 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package mru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
// updateHeader models the non-payload components of a Resource Update
|
|
||||||
type updateHeader struct {
|
|
||||||
UpdateLookup // UpdateLookup contains the information required to locate this resource (components of the search key used to find it)
|
|
||||||
multihash bool // Whether the data in this Resource Update should be interpreted as multihash
|
|
||||||
metaHash []byte // SHA3 hash of the metadata chunk (less ownerAddr). Used to prove ownerhsip of the resource.
|
|
||||||
}
|
|
||||||
|
|
||||||
const metaHashLength = storage.AddressLength
|
|
||||||
|
|
||||||
// updateLookupLength bytes
|
|
||||||
// 1 byte flags (multihash bool for now)
|
|
||||||
// 32 bytes metaHash
|
|
||||||
const updateHeaderLength = updateLookupLength + 1 + metaHashLength
|
|
||||||
|
|
||||||
// binaryPut serializes the resource header information into the given slice
|
|
||||||
func (h *updateHeader) binaryPut(serializedData []byte) error {
|
|
||||||
if len(serializedData) != updateHeaderLength {
|
|
||||||
return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData))
|
|
||||||
}
|
|
||||||
if len(h.metaHash) != metaHashLength {
|
|
||||||
return NewError(ErrInvalidValue, "updateHeader.binaryPut called without metaHash set")
|
|
||||||
}
|
|
||||||
if err := h.UpdateLookup.binaryPut(serializedData[:updateLookupLength]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cursor := updateLookupLength
|
|
||||||
copy(serializedData[cursor:], h.metaHash[:metaHashLength])
|
|
||||||
cursor += metaHashLength
|
|
||||||
|
|
||||||
var flags byte
|
|
||||||
if h.multihash {
|
|
||||||
flags |= 0x01
|
|
||||||
}
|
|
||||||
|
|
||||||
serializedData[cursor] = flags
|
|
||||||
cursor++
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// binaryLength returns the expected size of this structure when serialized
|
|
||||||
func (h *updateHeader) binaryLength() int {
|
|
||||||
return updateHeaderLength
|
|
||||||
}
|
|
||||||
|
|
||||||
// binaryGet restores the current updateHeader instance from the information contained in the passed slice
|
|
||||||
func (h *updateHeader) binaryGet(serializedData []byte) error {
|
|
||||||
if len(serializedData) != updateHeaderLength {
|
|
||||||
return NewErrorf(ErrInvalidValue, "Incorrect slice size to read updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.UpdateLookup.binaryGet(serializedData[:updateLookupLength]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cursor := updateLookupLength
|
|
||||||
h.metaHash = make([]byte, metaHashLength)
|
|
||||||
copy(h.metaHash[:storage.AddressLength], serializedData[cursor:cursor+storage.AddressLength])
|
|
||||||
cursor += metaHashLength
|
|
||||||
|
|
||||||
flags := serializedData[cursor]
|
|
||||||
cursor++
|
|
||||||
|
|
||||||
h.multihash = flags&0x01 != 0
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,64 +0,0 @@
|
|||||||
package mru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
const serializedUpdateHeaderMultihashHex = "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf001"
|
|
||||||
|
|
||||||
func getTestUpdateHeader(multihash bool) (header *updateHeader) {
|
|
||||||
_, metaHash, _, _ := getTestMetadata().serializeAndHash()
|
|
||||||
return &updateHeader{
|
|
||||||
UpdateLookup: *getTestUpdateLookup(),
|
|
||||||
multihash: multihash,
|
|
||||||
metaHash: metaHash,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func compareUpdateHeader(a, b *updateHeader) bool {
|
|
||||||
return compareUpdateLookup(&a.UpdateLookup, &b.UpdateLookup) &&
|
|
||||||
a.multihash == b.multihash &&
|
|
||||||
bytes.Equal(a.metaHash, b.metaHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateHeaderSerializer(t *testing.T) {
|
|
||||||
header := getTestUpdateHeader(true)
|
|
||||||
serializedHeader := make([]byte, updateHeaderLength)
|
|
||||||
if err := header.binaryPut(serializedHeader); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
compareByteSliceToExpectedHex(t, "serializedHeader", serializedHeader, serializedUpdateHeaderMultihashHex)
|
|
||||||
|
|
||||||
// trigger incorrect slice length error passing a slice that is 1 byte too big
|
|
||||||
if err := header.binaryPut(make([]byte, updateHeaderLength+1)); err == nil {
|
|
||||||
t.Fatal("Expected updateHeader.binaryPut to fail since supplied slice is of incorrect length")
|
|
||||||
}
|
|
||||||
|
|
||||||
// trigger invalid metaHash error
|
|
||||||
header.metaHash = nil
|
|
||||||
if err := header.binaryPut(serializedHeader); err == nil {
|
|
||||||
t.Fatal("Expected updateHeader.binaryPut to fail metaHash is of incorrect length")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateHeaderDeserializer(t *testing.T) {
|
|
||||||
originalUpdate := getTestUpdateHeader(true)
|
|
||||||
serializedData, _ := hexutil.Decode(serializedUpdateHeaderMultihashHex)
|
|
||||||
var retrievedUpdate updateHeader
|
|
||||||
if err := retrievedUpdate.binaryGet(serializedData); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !compareUpdateHeader(originalUpdate, &retrievedUpdate) {
|
|
||||||
t.Fatalf("Expected deserialized structure to equal the original")
|
|
||||||
}
|
|
||||||
|
|
||||||
// mess with source slice to test length checks
|
|
||||||
serializedData = []byte{1, 2, 3}
|
|
||||||
if err := retrievedUpdate.binaryGet(serializedData); err == nil {
|
|
||||||
t.Fatal("Expected retrievedUpdate.binaryGet, since passed slice is too small")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
125
swarm/storage/mru/view.go
Normal file
125
swarm/storage/mru/view.go
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"hash"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// View represents a particular user's view of a resource
|
||||||
|
type View struct {
|
||||||
|
Topic Topic `json:"topic"`
|
||||||
|
User common.Address `json:"user"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// View layout:
|
||||||
|
// TopicLength bytes
|
||||||
|
// userAddr common.AddressLength bytes
|
||||||
|
const viewLength = TopicLength + common.AddressLength
|
||||||
|
|
||||||
|
// mapKey calculates a unique id for this view for the cache map in `Handler`
|
||||||
|
func (u *View) mapKey() uint64 {
|
||||||
|
serializedData := make([]byte, viewLength)
|
||||||
|
u.binaryPut(serializedData)
|
||||||
|
hasher := hashPool.Get().(hash.Hash)
|
||||||
|
defer hashPool.Put(hasher)
|
||||||
|
hasher.Reset()
|
||||||
|
hasher.Write(serializedData)
|
||||||
|
hash := hasher.Sum(nil)
|
||||||
|
return *(*uint64)(unsafe.Pointer(&hash[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// binaryPut serializes this View instance into the provided slice
|
||||||
|
func (u *View) binaryPut(serializedData []byte) error {
|
||||||
|
if len(serializedData) != viewLength {
|
||||||
|
return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize View. Expected %d, got %d", viewLength, len(serializedData))
|
||||||
|
}
|
||||||
|
var cursor int
|
||||||
|
copy(serializedData[cursor:cursor+TopicLength], u.Topic[:TopicLength])
|
||||||
|
cursor += TopicLength
|
||||||
|
|
||||||
|
copy(serializedData[cursor:cursor+common.AddressLength], u.User[:])
|
||||||
|
cursor += common.AddressLength
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// binaryLength returns the expected size of this structure when serialized
|
||||||
|
func (u *View) binaryLength() int {
|
||||||
|
return viewLength
|
||||||
|
}
|
||||||
|
|
||||||
|
// binaryGet restores the current instance from the information contained in the passed slice
|
||||||
|
func (u *View) binaryGet(serializedData []byte) error {
|
||||||
|
if len(serializedData) != viewLength {
|
||||||
|
return NewErrorf(ErrInvalidValue, "Incorrect slice size to read View. Expected %d, got %d", viewLength, len(serializedData))
|
||||||
|
}
|
||||||
|
|
||||||
|
var cursor int
|
||||||
|
copy(u.Topic[:], serializedData[cursor:cursor+TopicLength])
|
||||||
|
cursor += TopicLength
|
||||||
|
|
||||||
|
copy(u.User[:], serializedData[cursor:cursor+common.AddressLength])
|
||||||
|
cursor += common.AddressLength
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hex serializes the View to a hex string
|
||||||
|
func (u *View) Hex() string {
|
||||||
|
serializedData := make([]byte, viewLength)
|
||||||
|
u.binaryPut(serializedData)
|
||||||
|
return hexutil.Encode(serializedData)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromValues deserializes this instance from a string key-value store
|
||||||
|
// useful to parse query strings
|
||||||
|
func (u *View) FromValues(values Values) (err error) {
|
||||||
|
topic := values.Get("topic")
|
||||||
|
if topic != "" {
|
||||||
|
if err := u.Topic.FromHex(values.Get("topic")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else { // see if the user set name and relatedcontent
|
||||||
|
name := values.Get("name")
|
||||||
|
relatedContent, _ := hexutil.Decode(values.Get("relatedcontent"))
|
||||||
|
if len(relatedContent) > 0 {
|
||||||
|
if len(relatedContent) < storage.AddressLength {
|
||||||
|
return NewErrorf(ErrInvalidValue, "relatedcontent field must be a hex-encoded byte array exactly %d bytes long", storage.AddressLength)
|
||||||
|
}
|
||||||
|
relatedContent = relatedContent[:storage.AddressLength]
|
||||||
|
}
|
||||||
|
u.Topic, err = NewTopic(name, relatedContent)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
u.User = common.HexToAddress(values.Get("user"))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendValues serializes this structure into the provided string key-value store
|
||||||
|
// useful to build query strings
|
||||||
|
func (u *View) AppendValues(values Values) {
|
||||||
|
values.Set("topic", u.Topic.Hex())
|
||||||
|
values.Set("user", u.User.Hex())
|
||||||
|
}
|
36
swarm/storage/mru/view_test.go
Normal file
36
swarm/storage/mru/view_test.go
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
package mru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getTestView() *View {
|
||||||
|
topic, _ := NewTopic("world news report, every hour", nil)
|
||||||
|
return &View{
|
||||||
|
Topic: topic,
|
||||||
|
User: newCharlieSigner().Address(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestViewSerializerDeserializer(t *testing.T) {
|
||||||
|
testBinarySerializerRecovery(t, getTestView(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781c")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMetadataSerializerLengthCheck(t *testing.T) {
|
||||||
|
testBinarySerializerLengthCheck(t, getTestView())
|
||||||
|
}
|
@ -32,19 +32,6 @@ type TestServer interface {
|
|||||||
ServeHTTP(http.ResponseWriter, *http.Request)
|
ServeHTTP(http.ResponseWriter, *http.Request)
|
||||||
}
|
}
|
||||||
|
|
||||||
// simulated timeProvider
|
|
||||||
type fakeTimeProvider struct {
|
|
||||||
currentTime uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakeTimeProvider) Tick() {
|
|
||||||
f.currentTime++
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakeTimeProvider) Now() mru.Timestamp {
|
|
||||||
return mru.Timestamp{Time: f.currentTime}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, resolver api.Resolver) *TestSwarmServer {
|
func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, resolver api.Resolver) *TestSwarmServer {
|
||||||
dir, err := ioutil.TempDir("", "swarm-storage-test")
|
dir, err := ioutil.TempDir("", "swarm-storage-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -67,10 +54,6 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fakeTimeProvider := &fakeTimeProvider{
|
|
||||||
currentTime: 42,
|
|
||||||
}
|
|
||||||
mru.TimestampProvider = fakeTimeProvider
|
|
||||||
rhparams := &mru.HandlerParams{}
|
rhparams := &mru.HandlerParams{}
|
||||||
rh, err := mru.NewTestHandler(resourceDir, rhparams)
|
rh, err := mru.NewTestHandler(resourceDir, rhparams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -79,34 +62,36 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso
|
|||||||
|
|
||||||
a := api.NewAPI(fileStore, resolver, rh.Handler, nil)
|
a := api.NewAPI(fileStore, resolver, rh.Handler, nil)
|
||||||
srv := httptest.NewServer(serverFunc(a))
|
srv := httptest.NewServer(serverFunc(a))
|
||||||
return &TestSwarmServer{
|
tss := &TestSwarmServer{
|
||||||
Server: srv,
|
Server: srv,
|
||||||
FileStore: fileStore,
|
FileStore: fileStore,
|
||||||
dir: dir,
|
dir: dir,
|
||||||
Hasher: storage.MakeHashFunc(storage.DefaultHash)(),
|
Hasher: storage.MakeHashFunc(storage.DefaultHash)(),
|
||||||
timestampProvider: fakeTimeProvider,
|
|
||||||
cleanup: func() {
|
cleanup: func() {
|
||||||
srv.Close()
|
srv.Close()
|
||||||
rh.Close()
|
rh.Close()
|
||||||
os.RemoveAll(dir)
|
os.RemoveAll(dir)
|
||||||
os.RemoveAll(resourceDir)
|
os.RemoveAll(resourceDir)
|
||||||
},
|
},
|
||||||
|
CurrentTime: 42,
|
||||||
}
|
}
|
||||||
|
mru.TimestampProvider = tss
|
||||||
|
return tss
|
||||||
}
|
}
|
||||||
|
|
||||||
type TestSwarmServer struct {
|
type TestSwarmServer struct {
|
||||||
*httptest.Server
|
*httptest.Server
|
||||||
Hasher storage.SwarmHash
|
Hasher storage.SwarmHash
|
||||||
FileStore *storage.FileStore
|
FileStore *storage.FileStore
|
||||||
dir string
|
dir string
|
||||||
cleanup func()
|
cleanup func()
|
||||||
timestampProvider *fakeTimeProvider
|
CurrentTime uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestSwarmServer) Close() {
|
func (t *TestSwarmServer) Close() {
|
||||||
t.cleanup()
|
t.cleanup()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestSwarmServer) GetCurrentTime() mru.Timestamp {
|
func (t *TestSwarmServer) Now() mru.Timestamp {
|
||||||
return t.timestampProvider.Now()
|
return mru.Timestamp{Time: t.CurrentTime}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user