forked from cerc-io/plugeth
swarm/storage/mru: Renamed all comments to Feeds
This commit is contained in:
parent
f1e86ad9cf
commit
b35622cf3c
@ -30,7 +30,7 @@ const (
|
||||
defaultRetrieveTimeout = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// cacheEntry caches resource data and the metadata of its root chunk.
|
||||
// cacheEntry caches the last known update of a specific Feed.
|
||||
type cacheEntry struct {
|
||||
Update
|
||||
*bytes.Reader
|
||||
@ -42,7 +42,7 @@ func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) {
|
||||
return int64(len(r.Update.data)), nil
|
||||
}
|
||||
|
||||
//returns the resource's topic
|
||||
//returns the Feed's topic
|
||||
func (r *cacheEntry) Topic() Topic {
|
||||
return r.Feed.Topic
|
||||
}
|
||||
|
@ -1,43 +1,42 @@
|
||||
/*
|
||||
Package feeds defines Swarm Feeds.
|
||||
|
||||
A Mutable Resource is an entity which allows updates to a resource
|
||||
Swarm Feeds allows a user to build an update feed about a particular topic
|
||||
without resorting to ENS on each update.
|
||||
The update scheme is built on swarm chunks with chunk keys following
|
||||
a predictable, versionable pattern.
|
||||
|
||||
A Resource is tied to a unique identifier that is deterministically generated out of
|
||||
A Feed is tied to a unique identifier that is deterministically generated out of
|
||||
the chosen topic.
|
||||
|
||||
A Resource View is defined as a specific user's point of view about a particular resource.
|
||||
Thus, a View is a Topic + the user's address (userAddr)
|
||||
A Feed is defined as the series of updates of a specific user about a particular topic
|
||||
|
||||
Actual data updates are also made in the form of swarm chunks. The keys
|
||||
of the updates are the hash of a concatenation of properties as follows:
|
||||
|
||||
updateAddr = H(View, Epoch ID)
|
||||
updateAddr = H(Feed, Epoch ID)
|
||||
where H is the SHA3 hash function
|
||||
View is the combination of Topic and the user address
|
||||
Feed is the combination of Topic and the user address
|
||||
Epoch ID is a time slot. See the lookup package for more information.
|
||||
|
||||
A user looking up a resource would only need to know the View in order to
|
||||
another user's updates
|
||||
A user looking up a the latest update in a Feed only needs to know the Topic
|
||||
and the other user's address.
|
||||
|
||||
The resource update data is:
|
||||
resourcedata = View|Epoch|data
|
||||
The Feed Update data is:
|
||||
updatedata = Feed|Epoch|data
|
||||
|
||||
the full update data that goes in the chunk payload is:
|
||||
The full update data that goes in the chunk payload is:
|
||||
resourcedata|sign(resourcedata)
|
||||
|
||||
Structure Summary:
|
||||
|
||||
Request: Resource update with signature
|
||||
ResourceUpdate: headers + data
|
||||
Request: Feed Update with signature
|
||||
Update: headers + data
|
||||
Header: Protocol version and reserved for future use placeholders
|
||||
ID: Information about how to locate a specific update
|
||||
View: Author of the update and what is updating
|
||||
Feed: Represents a user's series of publications about a specific Topic
|
||||
Topic: Item that the updates are about
|
||||
User: User who updates the resource
|
||||
User: User who updates the Feed
|
||||
Epoch: time slot where the update is stored
|
||||
|
||||
*/
|
||||
|
@ -52,7 +52,7 @@ func (e *Error) Code() int {
|
||||
return e.code
|
||||
}
|
||||
|
||||
// NewError creates a new Mutable Resource Error object with the specified code and custom error message
|
||||
// NewError creates a new Swarm Feeds Error object with the specified code and custom error message
|
||||
func NewError(code int, s string) error {
|
||||
if code < 0 || code >= ErrCnt {
|
||||
panic("no such error code!")
|
||||
|
@ -57,7 +57,7 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// NewHandler creates a new Mutable Resource API
|
||||
// NewHandler creates a new Swarm Feeds API
|
||||
func NewHandler(params *HandlerParams) *Handler {
|
||||
fh := &Handler{
|
||||
cache: make(map[uint64]*cacheEntry),
|
||||
@ -74,13 +74,13 @@ func NewHandler(params *HandlerParams) *Handler {
|
||||
return fh
|
||||
}
|
||||
|
||||
// SetStore sets the store backend for the Mutable Resource API
|
||||
// SetStore sets the store backend for the Swarm Feeds API
|
||||
func (h *Handler) SetStore(store *storage.NetStore) {
|
||||
h.chunkStore = store
|
||||
}
|
||||
|
||||
// Validate is a chunk validation method
|
||||
// If it looks like a resource update, the chunk address is checked against the userAddr of the update's signature
|
||||
// If it looks like a feed update, the chunk address is checked against the userAddr of the update's signature
|
||||
// It implements the storage.ChunkValidator interface
|
||||
func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
|
||||
dataLength := len(data)
|
||||
@ -89,7 +89,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
|
||||
}
|
||||
|
||||
// check if it is a properly formatted update chunk with
|
||||
// valid signature and proof of ownership of the resource it is trying
|
||||
// valid signature and proof of ownership of the feed it is trying
|
||||
// to update
|
||||
|
||||
// First, deserialize the chunk
|
||||
@ -99,9 +99,9 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Verify signatures and that the signer actually owns the resource
|
||||
// Verify signatures and that the signer actually owns the feed
|
||||
// If it fails, it means either the signature is not valid, data is corrupted
|
||||
// or someone is trying to update someone else's resource.
|
||||
// or someone is trying to update someone else's feed.
|
||||
if err := r.Verify(); err != nil {
|
||||
log.Debug("Invalid feed update signature", "err", err)
|
||||
return false
|
||||
@ -110,14 +110,14 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetContent retrieves the data payload of the last synced update of the Mutable Resource
|
||||
// GetContent retrieves the data payload of the last synced update of the Feed
|
||||
func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) {
|
||||
if feed == nil {
|
||||
return nil, nil, NewError(ErrInvalidValue, "view is nil")
|
||||
return nil, nil, NewError(ErrInvalidValue, "feed is nil")
|
||||
}
|
||||
feedUpdate := h.get(feed)
|
||||
if feedUpdate == nil {
|
||||
return nil, nil, NewError(ErrNotFound, "resource does not exist")
|
||||
return nil, nil, NewError(ErrNotFound, "feed update not cached")
|
||||
}
|
||||
return feedUpdate.lastKey, feedUpdate.data, nil
|
||||
}
|
||||
@ -142,7 +142,7 @@ func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request,
|
||||
return nil, err
|
||||
}
|
||||
// not finding updates means that there is a network error
|
||||
// or that the resource really does not have updates
|
||||
// or that the feed really does not have updates
|
||||
}
|
||||
|
||||
request.Feed = *feed
|
||||
@ -157,13 +157,10 @@ func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request,
|
||||
return request, nil
|
||||
}
|
||||
|
||||
// Lookup retrieves a specific or latest version of the resource
|
||||
// Lookup works differently depending on the configuration of `ID`
|
||||
// See the `ID` documentation and helper functions:
|
||||
// `LookupLatest` and `LookupBefore`
|
||||
// When looking for the latest update, it starts at the next period after the current time.
|
||||
// upon failure tries the corresponding keys of each previous period until one is found
|
||||
// (or startTime is reached, in which case there are no updates).
|
||||
// Lookup retrieves a specific or latest feed update
|
||||
// Lookup works differently depending on the configuration of `query`
|
||||
// See the `query` documentation and helper functions:
|
||||
// `NewQueryLatest` and `NewQuery`
|
||||
func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) {
|
||||
|
||||
timeLimit := query.TimeLimit
|
||||
@ -213,17 +210,17 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info(fmt.Sprintf("Resource lookup finished in %d lookups", readCount))
|
||||
log.Info(fmt.Sprintf("Feed lookup finished in %d lookups", readCount))
|
||||
|
||||
request, _ := requestPtr.(*Request)
|
||||
if request == nil {
|
||||
return nil, NewError(ErrNotFound, "no updates found")
|
||||
return nil, NewError(ErrNotFound, "no feed updates found")
|
||||
}
|
||||
return h.updateCache(request)
|
||||
|
||||
}
|
||||
|
||||
// update mutable resource cache map with specified content
|
||||
// update feed updates cache with specified content
|
||||
func (h *Handler) updateCache(request *Request) (*cacheEntry, error) {
|
||||
|
||||
updateAddr := request.Addr()
|
||||
@ -242,10 +239,10 @@ func (h *Handler) updateCache(request *Request) (*cacheEntry, error) {
|
||||
return feedUpdate, nil
|
||||
}
|
||||
|
||||
// Update adds an actual data update
|
||||
// Uses the Mutable Resource metadata currently loaded in the resources map entry.
|
||||
// It is the caller's responsibility to make sure that this data is not stale.
|
||||
// Note that a Mutable Resource update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. An error will be returned if the total length of the chunk payload will exceed this limit.
|
||||
// Update publishes a feed update
|
||||
// Note that a Feed update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature.
|
||||
// This results in a max payload of `maxUpdateDataLength` (check update.go for more details)
|
||||
// An error will be returned if the total length of the chunk payload will exceed this limit.
|
||||
// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update
|
||||
// on the network.
|
||||
func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) {
|
||||
@ -280,18 +277,18 @@ func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Ad
|
||||
return r.idAddr, nil
|
||||
}
|
||||
|
||||
// Retrieves the resource cache value for the given nameHash
|
||||
func (h *Handler) get(view *Feed) *cacheEntry {
|
||||
mapKey := view.mapKey()
|
||||
// Retrieves the feed update cache value for the given nameHash
|
||||
func (h *Handler) get(feed *Feed) *cacheEntry {
|
||||
mapKey := feed.mapKey()
|
||||
h.cacheLock.RLock()
|
||||
defer h.cacheLock.RUnlock()
|
||||
feedUpdate := h.cache[mapKey]
|
||||
return feedUpdate
|
||||
}
|
||||
|
||||
// Sets the resource cache value for the given View
|
||||
func (h *Handler) set(view *Feed, feedUpdate *cacheEntry) {
|
||||
mapKey := view.mapKey()
|
||||
// Sets the feed update cache value for the given Feed
|
||||
func (h *Handler) set(feed *Feed, feedUpdate *cacheEntry) {
|
||||
mapKey := feed.mapKey()
|
||||
h.cacheLock.Lock()
|
||||
defer h.cacheLock.Unlock()
|
||||
h.cache[mapKey] = feedUpdate
|
||||
|
@ -89,12 +89,12 @@ func TestFeedsHandler(t *testing.T) {
|
||||
}
|
||||
defer teardownTest()
|
||||
|
||||
// create a new resource
|
||||
// create a new Feed
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
topic, _ := NewTopic("Mess with Swarm Feeds code and see what ghost catches you", nil)
|
||||
view := Feed{
|
||||
feed := Feed{
|
||||
Topic: topic,
|
||||
User: signer.Address(),
|
||||
}
|
||||
@ -107,7 +107,7 @@ func TestFeedsHandler(t *testing.T) {
|
||||
"clyde", // t=4285
|
||||
}
|
||||
|
||||
request := NewFirstRequest(view.Topic) // this timestamps the update at t = 4200 (start time)
|
||||
request := NewFirstRequest(feed.Topic) // this timestamps the update at t = 4200 (start time)
|
||||
chunkAddress := make(map[string]storage.Address)
|
||||
data := []byte(updates[0])
|
||||
request.SetData(data)
|
||||
@ -205,38 +205,38 @@ func TestFeedsHandler(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rsrc2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue))
|
||||
update2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// last update should be "clyde"
|
||||
if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) {
|
||||
t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1])
|
||||
if !bytes.Equal(update2.data, []byte(updates[len(updates)-1])) {
|
||||
t.Fatalf("feed update data was %v, expected %v", string(update2.data), updates[len(updates)-1])
|
||||
}
|
||||
if rsrc2.Level != 22 {
|
||||
t.Fatalf("resource epoch level was %d, expected 22", rsrc2.Level)
|
||||
if update2.Level != 22 {
|
||||
t.Fatalf("feed update epoch level was %d, expected 22", update2.Level)
|
||||
}
|
||||
if rsrc2.Base() != 0 {
|
||||
t.Fatalf("resource epoch base time was %d, expected 0", rsrc2.Base())
|
||||
if update2.Base() != 0 {
|
||||
t.Fatalf("feed update epoch base time was %d, expected 0", update2.Base())
|
||||
}
|
||||
log.Debug("Latest lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data)
|
||||
log.Debug("Latest lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data)
|
||||
|
||||
// specific point in time
|
||||
rsrc, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue))
|
||||
update, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// check data
|
||||
if !bytes.Equal(rsrc.data, []byte(updates[2])) {
|
||||
t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2])
|
||||
if !bytes.Equal(update.data, []byte(updates[2])) {
|
||||
t.Fatalf("feed update data (historical) was %v, expected %v", string(update2.data), updates[2])
|
||||
}
|
||||
log.Debug("Historical lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data)
|
||||
log.Debug("Historical lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data)
|
||||
|
||||
// beyond the first should yield an error
|
||||
rsrc, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue))
|
||||
update, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue))
|
||||
if err == nil {
|
||||
t.Fatalf("expected previous to fail, returned epoch %s data %v", rsrc.Epoch.String(), rsrc.data)
|
||||
t.Fatalf("expected previous to fail, returned epoch %s data %v", update.Epoch.String(), update.data)
|
||||
}
|
||||
|
||||
}
|
||||
@ -266,11 +266,11 @@ func TestSparseUpdates(t *testing.T) {
|
||||
defer teardownTest()
|
||||
defer os.RemoveAll(datadir)
|
||||
|
||||
// create a new resource
|
||||
// create a new Feed
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
topic, _ := NewTopic("Very slow updates", nil)
|
||||
view := Feed{
|
||||
feed := Feed{
|
||||
Topic: topic,
|
||||
User: signer.Address(),
|
||||
}
|
||||
@ -280,7 +280,7 @@ func TestSparseUpdates(t *testing.T) {
|
||||
var epoch lookup.Epoch
|
||||
var lastUpdateTime uint64
|
||||
for T := uint64(0); T < today; T += 5 * Year {
|
||||
request := NewFirstRequest(view.Topic)
|
||||
request := NewFirstRequest(feed.Topic)
|
||||
request.Epoch = lookup.GetNextEpoch(epoch, T)
|
||||
request.data = generateData(T) // this generates some data that depends on T, so we can check later
|
||||
request.Sign(signer)
|
||||
@ -295,14 +295,14 @@ func TestSparseUpdates(t *testing.T) {
|
||||
lastUpdateTime = T
|
||||
}
|
||||
|
||||
query := NewQuery(&view, today, lookup.NoClue)
|
||||
query := NewQuery(&feed, today, lookup.NoClue)
|
||||
|
||||
_, err = rh.Lookup(ctx, query)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, content, err := rh.GetContent(&view)
|
||||
_, content, err := rh.GetContent(&feed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -321,7 +321,7 @@ func TestSparseUpdates(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, content, err = rh.GetContent(&view)
|
||||
_, content, err = rh.GetContent(&feed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -348,7 +348,7 @@ func TestValidator(t *testing.T) {
|
||||
}
|
||||
defer teardownTest()
|
||||
|
||||
// create new resource
|
||||
// create new Feed
|
||||
topic, _ := NewTopic(subtopicName, nil)
|
||||
feed := Feed{
|
||||
Topic: topic,
|
||||
@ -382,7 +382,7 @@ func TestValidator(t *testing.T) {
|
||||
}
|
||||
|
||||
// tests that the content address validator correctly checks the data
|
||||
// tests that resource update chunks are passed through content address validator
|
||||
// tests that Feed update chunks are passed through content address validator
|
||||
// there is some redundancy in this test as it also tests content addressed chunks,
|
||||
// which should be evaluated as invalid chunks by this validator
|
||||
func TestValidatorInStore(t *testing.T) {
|
||||
@ -409,7 +409,7 @@ func TestValidatorInStore(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// set up resource handler and add is as a validator to the localstore
|
||||
// set up Swarm Feeds handler and add is as a validator to the localstore
|
||||
fhParams := &HandlerParams{}
|
||||
fh := NewHandler(fhParams)
|
||||
store.Validators = append(store.Validators, fh)
|
||||
@ -425,7 +425,7 @@ func TestValidatorInStore(t *testing.T) {
|
||||
User: signer.Address(),
|
||||
}
|
||||
|
||||
// create a resource update chunk with correct publickey
|
||||
// create a feed update chunk with correct publickey
|
||||
id := ID{
|
||||
Epoch: lookup.Epoch{Time: 42,
|
||||
Level: 1,
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
|
||||
// ID uniquely identifies an update on the network.
|
||||
type ID struct {
|
||||
Feed `json:"view"`
|
||||
Feed `json:"feed"`
|
||||
lookup.Epoch `json:"epoch"`
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ type ID struct {
|
||||
// Epoch EpochLength
|
||||
const idLength = feedLength + lookup.EpochLength
|
||||
|
||||
// Addr calculates the resource update chunk address corresponding to this ID
|
||||
// Addr calculates the feed update chunk address corresponding to this ID
|
||||
func (u *ID) Addr() (updateAddr storage.Address) {
|
||||
serializedData := make([]byte, idLength)
|
||||
var cursor int
|
||||
|
@ -15,7 +15,7 @@
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/*
|
||||
Package lookup defines resource lookup algorithms and provides tools to place updates
|
||||
Package lookup defines Feed lookup algorithms and provides tools to place updates
|
||||
so they can be found
|
||||
*/
|
||||
package lookup
|
||||
|
@ -72,7 +72,7 @@ func NewQuery(feed *Feed, time uint64, hint lookup.Epoch) *Query {
|
||||
}
|
||||
}
|
||||
|
||||
// NewQueryLatest generates lookup parameters that look for the latest version of a resource
|
||||
// NewQueryLatest generates lookup parameters that look for the latest update to a feed
|
||||
func NewQueryLatest(feed *Feed, hint lookup.Epoch) *Query {
|
||||
return NewQuery(feed, 0, hint)
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mru/lookup"
|
||||
)
|
||||
|
||||
// Request represents an update and/or resource create message
|
||||
// Request represents a request to sign or signed Feed Update message
|
||||
type Request struct {
|
||||
Update // actual content that will be put on the chunk, less signature
|
||||
Signature *Signature
|
||||
@ -62,7 +62,7 @@ func NewFirstRequest(topic Topic) *Request {
|
||||
return request
|
||||
}
|
||||
|
||||
// SetData stores the payload data the resource will be updated with
|
||||
// SetData stores the payload data the feed update will be updated with
|
||||
func (r *Request) SetData(data []byte) {
|
||||
r.data = data
|
||||
r.Signature = nil
|
||||
@ -73,7 +73,7 @@ func (r *Request) IsUpdate() bool {
|
||||
return r.Signature != nil
|
||||
}
|
||||
|
||||
// Verify checks that signatures are valid and that the signer owns the resource to be updated
|
||||
// Verify checks that signatures are valid
|
||||
func (r *Request) Verify() (err error) {
|
||||
if len(r.data) == 0 {
|
||||
return NewError(ErrInvalidValue, "Update does not contain data")
|
||||
@ -103,7 +103,7 @@ func (r *Request) Verify() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign executes the signature to validate the resource
|
||||
// Sign executes the signature to validate the update message
|
||||
func (r *Request) Sign(signer Signer) error {
|
||||
r.Feed.User = signer.Address()
|
||||
r.binaryData = nil //invalidate serialized data
|
||||
@ -133,7 +133,7 @@ func (r *Request) Sign(signer Signer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDigest creates the resource update digest used in signatures
|
||||
// GetDigest creates the feed update digest used in signatures
|
||||
// the serialized payload is cached in .binaryData
|
||||
func (r *Request) GetDigest() (result common.Hash, err error) {
|
||||
hasher := hashPool.Get().(hash.Hash)
|
||||
@ -174,7 +174,7 @@ func (r *Request) toChunk() (storage.Chunk, error) {
|
||||
func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error {
|
||||
// for update chunk layout see Request definition
|
||||
|
||||
//deserialize the resource update portion
|
||||
//deserialize the feed update portion
|
||||
if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func areEqualJSON(s1, s2 string) (bool, error) {
|
||||
}
|
||||
|
||||
// TestEncodingDecodingUpdateRequests ensures that requests are serialized properly
|
||||
// while also checking cryptographically that only the owner of a resource can update it.
|
||||
// while also checking cryptographically that only the owner of a Feed can update it.
|
||||
func TestEncodingDecodingUpdateRequests(t *testing.T) {
|
||||
|
||||
charlie := newCharlieSigner() //Charlie
|
||||
@ -75,12 +75,10 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) {
|
||||
t.Fatal("Expected Verify to fail since the message is not signed")
|
||||
}
|
||||
|
||||
// We now assume that the resource was created and propagated. With rootAddr we can retrieve the resource metadata
|
||||
// and recover the information above. To sign an update, we need the rootAddr and the metaHash to construct
|
||||
// proof of ownership
|
||||
// We now assume that the feed ypdate was created and propagated.
|
||||
|
||||
const expectedSignature = "0x32c2d2c7224e24e4d3ae6a10595fc6e945f1b3ecdf548a04d8247c240a50c9240076aa7730abad6c8a46dfea00cfb8f43b6211f02db5c4cc5ed8584cb0212a4d00"
|
||||
const expectedJSON = `{"view":{"topic":"0x6120676f6f64207265736f75726365206e616d65000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}`
|
||||
const expectedSignature = "0x7235b27a68372ddebcf78eba48543fa460864b0b0e99cb533fcd3664820e603312d29426dd00fb39628f5299480a69bf6e462838d78de49ce0704c754c9deb2601"
|
||||
const expectedJSON = `{"feed":{"topic":"0x6120676f6f6420746f706963206e616d65000000000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}`
|
||||
|
||||
//Put together an unsigned update request that we will serialize to send it to the signer.
|
||||
data := []byte("This hour's update: Swarm 99.0 has been released!")
|
||||
@ -138,7 +136,7 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) {
|
||||
t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature")
|
||||
}
|
||||
|
||||
// Now imagine Bob wants to create an update of his own about the same resource,
|
||||
// Now imagine Bob wants to create an update of his own about the same Feed,
|
||||
// signing a message with his private key
|
||||
if err := request.Sign(bob); err != nil {
|
||||
t.Fatalf("Error signing: %s", err)
|
||||
@ -258,7 +256,7 @@ func TestReverse(t *testing.T) {
|
||||
defer teardownTest()
|
||||
|
||||
topic, _ := NewTopic("Cervantes quotes", nil)
|
||||
view := Feed{
|
||||
feed := Feed{
|
||||
Topic: topic,
|
||||
User: signer.Address(),
|
||||
}
|
||||
@ -266,7 +264,7 @@ func TestReverse(t *testing.T) {
|
||||
data := []byte("Donde una puerta se cierra, otra se abre")
|
||||
|
||||
request := new(Request)
|
||||
request.Feed = view
|
||||
request.Feed = feed
|
||||
request.Epoch = epoch
|
||||
request.data = data
|
||||
|
||||
|
@ -28,7 +28,7 @@ const signatureLength = 65
|
||||
// Signature is an alias for a static byte array with the size of a signature
|
||||
type Signature [signatureLength]byte
|
||||
|
||||
// Signer signs Mutable Resource update payloads
|
||||
// Signer signs Feed update payloads
|
||||
type Signer interface {
|
||||
Sign(common.Hash) (Signature, error)
|
||||
Address() common.Address
|
||||
@ -65,7 +65,7 @@ func (s *GenericSigner) Address() common.Address {
|
||||
return s.address
|
||||
}
|
||||
|
||||
// getUserAddr extracts the address of the resource update signer
|
||||
// getUserAddr extracts the address of the Feed update signer
|
||||
func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) {
|
||||
pub, err := crypto.SigToPub(digest.Bytes(), signature[:])
|
||||
if err != nil {
|
@ -74,7 +74,7 @@ func (t *Topic) FromHex(hex string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Name will try to extract the resource name out of the topic
|
||||
// Name will try to extract the topic name out of the Topic
|
||||
func (t *Topic) Name(relatedContent []byte) string {
|
||||
nameBytes := *t
|
||||
if relatedContent != nil {
|
||||
|
@ -37,7 +37,7 @@ type Header struct {
|
||||
// Update encapsulates the information sent as part of a feed update
|
||||
type Update struct {
|
||||
Header Header //
|
||||
ID // Resource update identifying information
|
||||
ID // Feed Update identifying information
|
||||
data []byte // actual data payload
|
||||
}
|
||||
|
||||
@ -86,7 +86,7 @@ func (r *Update) binaryLength() int {
|
||||
// binaryGet populates this instance from the information contained in the passed byte slice
|
||||
func (r *Update) binaryGet(serializedData []byte) error {
|
||||
if len(serializedData) < minimumUpdateDataLength {
|
||||
return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength)
|
||||
return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a feed update chunk", minimumUpdateDataLength)
|
||||
}
|
||||
dataLength := len(serializedData) - idLength - headerLength
|
||||
// at this point we can be satisfied that we have the correct data length to read
|
||||
|
@ -25,13 +25,13 @@ import (
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
// Feed represents a particular user's view of a resource
|
||||
// Feed represents a particular user's stream of updates on a Topic
|
||||
type Feed struct {
|
||||
Topic Topic `json:"topic"`
|
||||
User common.Address `json:"user"`
|
||||
}
|
||||
|
||||
// View layout:
|
||||
// Feed layout:
|
||||
// TopicLength bytes
|
||||
// userAddr common.AddressLength bytes
|
||||
const feedLength = TopicLength + common.AddressLength
|
||||
@ -51,7 +51,7 @@ func (u *Feed) mapKey() uint64 {
|
||||
// binaryPut serializes this Feed instance into the provided slice
|
||||
func (u *Feed) binaryPut(serializedData []byte) error {
|
||||
if len(serializedData) != feedLength {
|
||||
return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize View. Expected %d, got %d", feedLength, len(serializedData))
|
||||
return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize Feed. Expected %d, got %d", feedLength, len(serializedData))
|
||||
}
|
||||
var cursor int
|
||||
copy(serializedData[cursor:cursor+TopicLength], u.Topic[:TopicLength])
|
||||
|
Loading…
Reference in New Issue
Block a user