forked from cerc-io/plugeth
swarm, cmd/swarm: address ineffectual assignments (#18048)
* swarm, cmd/swarm: address ineffectual assignments * swarm/network: remove unused vars from testHandshake * swarm/storage/feed: revert cursor changes
This commit is contained in:
parent
81533deae5
commit
cf3b187bde
@ -114,6 +114,9 @@ func accessNewPass(ctx *cli.Context) {
|
|||||||
utils.Fatalf("error getting session key: %v", err)
|
utils.Fatalf("error getting session key: %v", err)
|
||||||
}
|
}
|
||||||
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae)
|
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("had an error generating the manifest: %v", err)
|
||||||
|
}
|
||||||
if dryRun {
|
if dryRun {
|
||||||
err = printManifests(m, nil)
|
err = printManifests(m, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -147,6 +150,9 @@ func accessNewPK(ctx *cli.Context) {
|
|||||||
utils.Fatalf("error getting session key: %v", err)
|
utils.Fatalf("error getting session key: %v", err)
|
||||||
}
|
}
|
||||||
m, err := api.GenerateAccessControlManifest(ctx, ref, sessionKey, ae)
|
m, err := api.GenerateAccessControlManifest(ctx, ref, sessionKey, ae)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("had an error generating the manifest: %v", err)
|
||||||
|
}
|
||||||
if dryRun {
|
if dryRun {
|
||||||
err = printManifests(m, nil)
|
err = printManifests(m, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -80,6 +80,9 @@ func TestCLISwarmFs(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir")
|
dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
dummyContent := "somerandomtestcontentthatshouldbeasserted"
|
dummyContent := "somerandomtestcontentthatshouldbeasserted"
|
||||||
dirs := []string{
|
dirs := []string{
|
||||||
|
@ -243,8 +243,7 @@ func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDownload)
|
defer os.RemoveAll(tmpDownload)
|
||||||
bzzLocator := "bzz:/" + hash
|
bzzLocator := "bzz:/" + hash
|
||||||
flagss := []string{}
|
flagss := []string{
|
||||||
flagss = []string{
|
|
||||||
"--bzzapi", cluster.Nodes[0].URL,
|
"--bzzapi", cluster.Nodes[0].URL,
|
||||||
"down",
|
"down",
|
||||||
"--recursive",
|
"--recursive",
|
||||||
|
@ -458,6 +458,9 @@ func DoACT(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees
|
|||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
sessionKey, err := NewSessionKeyPK(privateKey, granteePub, salt)
|
sessionKey, err := NewSessionKeyPK(privateKey, granteePub, salt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
hasher := sha3.NewKeccak256()
|
hasher := sha3.NewKeccak256()
|
||||||
hasher.Write(append(sessionKey, 0))
|
hasher.Write(append(sessionKey, 0))
|
||||||
|
@ -457,6 +457,9 @@ func TestClientCreateUpdateFeed(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
feedManifestHash, err := client.CreateFeedWithManifest(createRequest)
|
feedManifestHash, err := client.CreateFeedWithManifest(createRequest)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
correctManifestAddrHex := "0e9b645ebc3da167b1d56399adc3276f7a08229301b72a03336be0e7d4b71882"
|
correctManifestAddrHex := "0e9b645ebc3da167b1d56399adc3276f7a08229301b72a03336be0e7d4b71882"
|
||||||
if feedManifestHash != correctManifestAddrHex {
|
if feedManifestHash != correctManifestAddrHex {
|
||||||
|
@ -122,6 +122,10 @@ func (fs *FileSystem) Upload(lpath, index string, toEncrypt bool) (string, error
|
|||||||
var wait func(context.Context) error
|
var wait func(context.Context) error
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
hash, wait, err = fs.api.fileStore.Store(ctx, f, stat.Size(), toEncrypt)
|
hash, wait, err = fs.api.fileStore.Store(ctx, f, stat.Size(), toEncrypt)
|
||||||
|
if err != nil {
|
||||||
|
errors[i] = err
|
||||||
|
return
|
||||||
|
}
|
||||||
if hash != nil {
|
if hash != nil {
|
||||||
list[i].Hash = hash.Hex()
|
list[i].Hash = hash.Hex()
|
||||||
}
|
}
|
||||||
|
@ -366,7 +366,7 @@ func (s *Server) handleMultipartUpload(r *http.Request, boundary string, mw *api
|
|||||||
}
|
}
|
||||||
|
|
||||||
var size int64
|
var size int64
|
||||||
var reader io.Reader = part
|
var reader io.Reader
|
||||||
if contentLength := part.Header.Get("Content-Length"); contentLength != "" {
|
if contentLength := part.Header.Get("Content-Length"); contentLength != "" {
|
||||||
size, err = strconv.ParseInt(contentLength, 10, 64)
|
size, err = strconv.ParseInt(contentLength, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -263,7 +263,7 @@ func TestBzzFeed(t *testing.T) {
|
|||||||
if resp.StatusCode == http.StatusOK {
|
if resp.StatusCode == http.StatusOK {
|
||||||
t.Fatal("Expected error status since feed update does not contain multihash. Received 200 OK")
|
t.Fatal("Expected error status since feed update does not contain multihash. Received 200 OK")
|
||||||
}
|
}
|
||||||
b, err = ioutil.ReadAll(resp.Body)
|
_, err = ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -491,6 +491,9 @@ func testBzzGetPath(encrypted bool, t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
respbody, err = ioutil.ReadAll(resp.Body)
|
respbody, err = ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error while reading response body: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if string(respbody) != testmanifest[v] {
|
if string(respbody) != testmanifest[v] {
|
||||||
isexpectedfailrequest := false
|
isexpectedfailrequest := false
|
||||||
|
@ -557,7 +557,6 @@ func (mt *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *manif
|
|||||||
if path != entry.Path {
|
if path != entry.Path {
|
||||||
return nil, 0
|
return nil, 0
|
||||||
}
|
}
|
||||||
pos = epl
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, 0
|
return nil, 0
|
||||||
|
@ -70,6 +70,9 @@ func TestHiveStatePersistance(t *testing.T) {
|
|||||||
defer os.RemoveAll(dir)
|
defer os.RemoveAll(dir)
|
||||||
|
|
||||||
store, err := state.NewDBStore(dir) //start the hive with an empty dbstore
|
store, err := state.NewDBStore(dir) //start the hive with an empty dbstore
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
params := NewHiveParams()
|
params := NewHiveParams()
|
||||||
s, pp := newHiveTester(t, params, 5, store)
|
s, pp := newHiveTester(t, params, 5, store)
|
||||||
@ -90,6 +93,9 @@ func TestHiveStatePersistance(t *testing.T) {
|
|||||||
store.Close()
|
store.Close()
|
||||||
|
|
||||||
persistedStore, err := state.NewDBStore(dir) //start the hive with an empty dbstore
|
persistedStore, err := state.NewDBStore(dir) //start the hive with an empty dbstore
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
s1, pp := newHiveTester(t, params, 1, persistedStore)
|
s1, pp := newHiveTester(t, params, 1, persistedStore)
|
||||||
|
|
||||||
|
@ -153,17 +153,7 @@ func newBzzHandshakeTester(t *testing.T, n int, addr *BzzAddr, lightNode bool) *
|
|||||||
|
|
||||||
// should test handshakes in one exchange? parallelisation
|
// should test handshakes in one exchange? parallelisation
|
||||||
func (s *bzzTester) testHandshake(lhs, rhs *HandshakeMsg, disconnects ...*p2ptest.Disconnect) error {
|
func (s *bzzTester) testHandshake(lhs, rhs *HandshakeMsg, disconnects ...*p2ptest.Disconnect) error {
|
||||||
var peers []enode.ID
|
if err := s.TestExchanges(HandshakeMsgExchange(lhs, rhs, rhs.Addr.ID())...); err != nil {
|
||||||
id := rhs.Addr.ID()
|
|
||||||
if len(disconnects) > 0 {
|
|
||||||
for _, d := range disconnects {
|
|
||||||
peers = append(peers, d.Peer)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
peers = []enode.ID{id}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.TestExchanges(HandshakeMsgExchange(lhs, rhs, id)...); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ func TestServiceBucket(t *testing.T) {
|
|||||||
t.Fatalf("expected %q, got %q", customValue, s)
|
t.Fatalf("expected %q, got %q", customValue, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
v, ok = sim.NodeItem(id2, customKey)
|
_, ok = sim.NodeItem(id2, customKey)
|
||||||
if ok {
|
if ok {
|
||||||
t.Fatal("bucket item should not be found")
|
t.Fatal("bucket item should not be found")
|
||||||
}
|
}
|
||||||
@ -119,7 +119,7 @@ func TestServiceBucket(t *testing.T) {
|
|||||||
t.Fatalf("expected %q, got %q", testValue+id1.String(), s)
|
t.Fatalf("expected %q, got %q", testValue+id1.String(), s)
|
||||||
}
|
}
|
||||||
|
|
||||||
v, ok = items[id2]
|
_, ok = items[id2]
|
||||||
if ok {
|
if ok {
|
||||||
t.Errorf("node 2 item should not be found")
|
t.Errorf("node 2 item should not be found")
|
||||||
}
|
}
|
||||||
|
@ -62,6 +62,9 @@ func createMockStore(globalStore *mockdb.GlobalStore, id enode.ID, addr *network
|
|||||||
params.Init(datadir)
|
params.Init(datadir)
|
||||||
params.BaseKey = addr.Over()
|
params.BaseKey = addr.Over()
|
||||||
lstore, err = storage.NewLocalStore(params, mockStore)
|
lstore, err = storage.NewLocalStore(params, mockStore)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
return lstore, datadir, nil
|
return lstore, datadir, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -252,7 +252,13 @@ func newServices() adapters.Services {
|
|||||||
ctxlocal, cancel := context.WithTimeout(context.Background(), time.Second)
|
ctxlocal, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
keys, err := wapi.NewKeyPair(ctxlocal)
|
keys, err := wapi.NewKeyPair(ctxlocal)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
privkey, err := w.GetPrivateKey(keys)
|
privkey, err := w.GetPrivateKey(keys)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
psparams := pss.NewPssParams().WithPrivateKey(privkey)
|
psparams := pss.NewPssParams().WithPrivateKey(privkey)
|
||||||
pskad := kademlia(ctx.Config.ID)
|
pskad := kademlia(ctx.Config.ID)
|
||||||
ps, err := pss.NewPss(pskad, psparams)
|
ps, err := pss.NewPss(pskad, psparams)
|
||||||
|
@ -223,7 +223,13 @@ func newServices(allowRaw bool) adapters.Services {
|
|||||||
ctxlocal, cancel := context.WithTimeout(context.Background(), time.Second)
|
ctxlocal, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
keys, err := wapi.NewKeyPair(ctxlocal)
|
keys, err := wapi.NewKeyPair(ctxlocal)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
privkey, err := w.GetPrivateKey(keys)
|
privkey, err := w.GetPrivateKey(keys)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
pssp := pss.NewPssParams().WithPrivateKey(privkey)
|
pssp := pss.NewPssParams().WithPrivateKey(privkey)
|
||||||
pssp.MsgTTL = time.Second * 30
|
pssp.MsgTTL = time.Second * 30
|
||||||
pssp.AllowRaw = allowRaw
|
pssp.AllowRaw = allowRaw
|
||||||
|
@ -93,11 +93,17 @@ func testProtocol(t *testing.T) {
|
|||||||
lctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
lctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
|
lsub, err := clients[0].Subscribe(lctx, "pss", lmsgC, "receive", topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
defer lsub.Unsubscribe()
|
defer lsub.Unsubscribe()
|
||||||
rmsgC := make(chan APIMsg)
|
rmsgC := make(chan APIMsg)
|
||||||
rctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
rctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
|
rsub, err := clients[1].Subscribe(rctx, "pss", rmsgC, "receive", topic)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
defer rsub.Unsubscribe()
|
defer rsub.Unsubscribe()
|
||||||
|
|
||||||
// set reciprocal public keys
|
// set reciprocal public keys
|
||||||
|
@ -69,7 +69,7 @@ func (s *DBStore) Get(key string, i interface{}) (err error) {
|
|||||||
|
|
||||||
// Put stores an object that implements Binary for a specific key.
|
// Put stores an object that implements Binary for a specific key.
|
||||||
func (s *DBStore) Put(key string, i interface{}) (err error) {
|
func (s *DBStore) Put(key string, i interface{}) (err error) {
|
||||||
bytes := []byte{}
|
var bytes []byte
|
||||||
|
|
||||||
marshaler, ok := i.(encoding.BinaryMarshaler)
|
marshaler, ok := i.(encoding.BinaryMarshaler)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -112,6 +112,9 @@ func testPersistedStore(t *testing.T, store Store) {
|
|||||||
|
|
||||||
as := []string{}
|
as := []string{}
|
||||||
err = store.Get("key2", &as)
|
err = store.Get("key2", &as)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
if len(as) != 3 {
|
if len(as) != 3 {
|
||||||
t.Fatalf("serialized array did not match expectation")
|
t.Fatalf("serialized array did not match expectation")
|
||||||
|
@ -59,7 +59,7 @@ func (s *InmemoryStore) Get(key string, i interface{}) (err error) {
|
|||||||
func (s *InmemoryStore) Put(key string, i interface{}) (err error) {
|
func (s *InmemoryStore) Put(key string, i interface{}) (err error) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
bytes := []byte{}
|
var bytes []byte
|
||||||
|
|
||||||
marshaler, ok := i.(encoding.BinaryMarshaler)
|
marshaler, ok := i.(encoding.BinaryMarshaler)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -244,11 +244,8 @@ func GenerateRandomChunk(dataSize int64) Chunk {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func GenerateRandomChunks(dataSize int64, count int) (chunks []Chunk) {
|
func GenerateRandomChunks(dataSize int64, count int) (chunks []Chunk) {
|
||||||
if dataSize > ch.DefaultSize {
|
|
||||||
dataSize = ch.DefaultSize
|
|
||||||
}
|
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
ch := GenerateRandomChunk(ch.DefaultSize)
|
ch := GenerateRandomChunk(dataSize)
|
||||||
chunks = append(chunks, ch)
|
chunks = append(chunks, ch)
|
||||||
}
|
}
|
||||||
return chunks
|
return chunks
|
||||||
|
Loading…
Reference in New Issue
Block a user