swarm/network: bump network id for 0.4 release (#19580)

* swarm/network: bump network id for 0.4 release

* swarm/network: bump bzz protocol version

* swarm/docs: migration document v0.3 to v0.4

* swarm/storage/feed: gofmt lookup_test.go
This commit is contained in:
Anton Evangelatov 2019-05-16 17:29:12 +02:00 committed by Péter Szilágyi
parent 1e067202a2
commit 60386b3545
5 changed files with 42 additions and 10 deletions

View File

@ -0,0 +1,31 @@
Swarm DB migration notes
=========================
Swarm `v0.4` introduces major changes to the existing codebase. Among other things, the storage layer has been rewritten to be more modular and flexible
in a manner that will accomodate for our future needs. Since Swarm at this point does not provide any storage guarantees, we have made the decision to not impose any migrations on our public cluster nor on our users. What this essentially means is that local storage will be purged on `v0.4`. We have nevertheless, provided a procedure below for those of you running private clusters and would like to migrate the data to the new local storage format.
You are highly encouraged to report to us any bugs or problems caused by running the migration steps below.
**Note**: we highly recommend you run the commands below with `--verbosity 5` flag and open an issue with the relevant terminal output in case something goes wrong.
**Important**: since you would be creating an export of your local store, the potential disk usage might peak at `x2-x3` times the normal Swarm data folder size. Please make sure you have enough disk space, backup mediums or other form of local/network attached storage _before_ executing the following steps!
**Important**: when trying to run Swarm with an old local store format, the Swarm binary will refuse to start showing an error message.
You will need the following information for the migration procedure:
1. Your `datadir` path. This is indicated with the `--datadir` flag when running Swarm. If you do not specify this flag, the `datadir` will reside by default on `$HOME/.ethereum`.
2. Your chunk directory location. This would normally be located in your `datadir/swarm/bzz-<your bzz account>/chunks`. We will refer to this as `chunkDir` below.
3. Your `bzzAddr`. This is _not_ your `--bzzaccount`! You can find your `bzzAddr` when starting Swarm by looking for the following line:
```
INFO [03-21|17:25:04.791] Swarm network started bzzaddr=ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c
```
The migration process is done in the following manner:
1. Try to run the updated Swarm binary, it should complain about the local store format and exit. If it does - execute the following steps:
2. `$ swarm --verbosity 5 db export <chunkDir> <exportLocation>/<exportFilename>.tar <bzzAddr>`
3. Move or Remove your existing `chunkDir`
4. Run the new Swarm binary as your would start your Swarm node normally. The binary should now load normally and not complain. This step creates a new empty chunk store. Please shut down the node after it starts correctly.
5. `$ swarm --verbosity 5 db import --legacy <chunkDir> <exportLocation>/<exportFilename>.tar <bzzAddr>`
6. Wait patientally for the `Imported X chunks successfully` message.
7. Start your Swarm node as you normally would
8. Have a beer

View File

@ -33,7 +33,7 @@ import (
) )
const ( const (
DefaultNetworkID = 3 DefaultNetworkID = 4
// timeout for waiting // timeout for waiting
bzzHandshakeTimeout = 3000 * time.Millisecond bzzHandshakeTimeout = 3000 * time.Millisecond
) )
@ -43,7 +43,7 @@ var DefaultTestNetworkID = rand.Uint64()
// BzzSpec is the spec of the generic swarm handshake // BzzSpec is the spec of the generic swarm handshake
var BzzSpec = &protocols.Spec{ var BzzSpec = &protocols.Spec{
Name: "bzz", Name: "bzz",
Version: 8, Version: 9,
MaxMsgSize: 10 * 1024 * 1024, MaxMsgSize: 10 * 1024 * 1024,
Messages: []interface{}{ Messages: []interface{}{
HandshakeMsg{}, HandshakeMsg{},

View File

@ -36,7 +36,7 @@ import (
) )
const ( const (
TestProtocolVersion = 8 TestProtocolVersion = 9
) )
var TestProtocolNetworkID = DefaultTestNetworkID var TestProtocolNetworkID = DefaultTestNetworkID

View File

@ -18,6 +18,7 @@ package stream
import ( import (
"context" "context"
"fmt"
"strconv" "strconv"
"time" "time"
@ -58,7 +59,7 @@ func RegisterSwarmSyncerServer(streamer *Registry, netStore *storage.NetStore) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return NewSwarmSyncerServer(po, netStore, p.ID().String()+"|"+string(po)) return NewSwarmSyncerServer(po, netStore, fmt.Sprintf("%s|%d", p.ID(), po))
}) })
// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) { // streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
// return NewOutgoingProvableSwarmSyncer(po, db) // return NewOutgoingProvableSwarmSyncer(po, db)
@ -146,16 +147,16 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
if batchSize >= BatchSize { if batchSize >= BatchSize {
iterate = false iterate = false
metrics.GetOrRegisterCounter("syncer.set-next-batch.full-batch", nil).Inc(1) metrics.GetOrRegisterCounter("syncer.set-next-batch.full-batch", nil).Inc(1)
log.Debug("syncer pull subscription - batch size reached", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID) log.Trace("syncer pull subscription - batch size reached", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
} }
if timer == nil { if timer == nil {
timer = time.NewTimer(batchTimeout) timer = time.NewTimer(batchTimeout)
} else { } else {
log.Debug("syncer pull subscription - stopping timer", "correlateId", s.correlateId) log.Trace("syncer pull subscription - stopping timer", "correlateId", s.correlateId)
if !timer.Stop() { if !timer.Stop() {
<-timer.C <-timer.C
} }
log.Debug("syncer pull subscription - channel drained, resetting timer", "correlateId", s.correlateId) log.Trace("syncer pull subscription - channel drained, resetting timer", "correlateId", s.correlateId)
timer.Reset(batchTimeout) timer.Reset(batchTimeout)
} }
timerC = timer.C timerC = timer.C
@ -164,10 +165,10 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
// received after some time // received after some time
iterate = false iterate = false
metrics.GetOrRegisterCounter("syncer.set-next-batch.timer-expire", nil).Inc(1) metrics.GetOrRegisterCounter("syncer.set-next-batch.timer-expire", nil).Inc(1)
log.Debug("syncer pull subscription timer expired", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID) log.Trace("syncer pull subscription timer expired", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
case <-s.quit: case <-s.quit:
iterate = false iterate = false
log.Debug("syncer pull subscription - quit received", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID) log.Trace("syncer pull subscription - quit received", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
} }
} }
if batchStartID == nil { if batchStartID == nil {

File diff suppressed because one or more lines are too long