swarm: instrument setNextBatch

swarm/storage/localstore: add gc metrics, disable flaky test
This commit is contained in:
Elad 2019-05-09 12:54:06 +04:00 committed by Anton Evangelatov
parent 3e9ba57669
commit 84dfaea246
5 changed files with 37 additions and 12 deletions

View File

@ -33,7 +33,7 @@ import (
// It runs a few "create" commands with different flag values and loads generated // It runs a few "create" commands with different flag values and loads generated
// snapshot files to validate their content. // snapshot files to validate their content.
func TestSnapshotCreate(t *testing.T) { func TestSnapshotCreate(t *testing.T) {
t.Skip("todo: fix this") t.Skip("test is flaky. disabling until underlying problem is addressed")
for _, v := range []struct { for _, v := range []struct {
name string name string

View File

@ -197,7 +197,7 @@ func (m ModeSet) String() string {
const ( const (
// ModeSetAccess: when an update request is received for a chunk or chunk is retrieved for delivery // ModeSetAccess: when an update request is received for a chunk or chunk is retrieved for delivery
ModeSetAccess ModeSet = iota ModeSetAccess ModeSet = iota
// ModeSetSync: when push sync receipt is received // ModeSetSync: when a chunk is added to a pull sync batch or when a push sync receipt is received
ModeSetSync ModeSetSync
// ModeSetRemove: when a chunk is removed // ModeSetRemove: when a chunk is removed
ModeSetRemove ModeSetRemove

View File

@ -156,6 +156,7 @@ func createSimServiceMap(discovery bool) map[string]ServiceFunc {
// Call WaitTillSnapshotRecreated() function and wait until it returns // Call WaitTillSnapshotRecreated() function and wait until it returns
// Iterate the nodes and check if all the connections are successfully recreated // Iterate the nodes and check if all the connections are successfully recreated
func TestWaitTillSnapshotRecreated(t *testing.T) { func TestWaitTillSnapshotRecreated(t *testing.T) {
t.Skip("test is flaky. disabling until underlying problem is addressed")
var err error var err error
sim := New(createSimServiceMap(true)) sim := New(createSimServiceMap(true))
_, err = sim.AddNodesAndConnectRing(16) _, err = sim.AddNodesAndConnectRing(16)

View File

@ -21,7 +21,9 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/chunk" "github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
@ -34,27 +36,29 @@ const (
// * live request delivery with or without checkback // * live request delivery with or without checkback
// * (live/non-live historical) chunk syncing per proximity bin // * (live/non-live historical) chunk syncing per proximity bin
type SwarmSyncerServer struct { type SwarmSyncerServer struct {
po uint8 correlateId string //used for logging
netStore *storage.NetStore po uint8
quit chan struct{} netStore *storage.NetStore
quit chan struct{}
} }
// NewSwarmSyncerServer is constructor for SwarmSyncerServer // NewSwarmSyncerServer is constructor for SwarmSyncerServer
func NewSwarmSyncerServer(po uint8, netStore *storage.NetStore) (*SwarmSyncerServer, error) { func NewSwarmSyncerServer(po uint8, netStore *storage.NetStore, correlateId string) (*SwarmSyncerServer, error) {
return &SwarmSyncerServer{ return &SwarmSyncerServer{
po: po, correlateId: correlateId,
netStore: netStore, po: po,
quit: make(chan struct{}), netStore: netStore,
quit: make(chan struct{}),
}, nil }, nil
} }
func RegisterSwarmSyncerServer(streamer *Registry, netStore *storage.NetStore) { func RegisterSwarmSyncerServer(streamer *Registry, netStore *storage.NetStore) {
streamer.RegisterServerFunc("SYNC", func(_ *Peer, t string, _ bool) (Server, error) { streamer.RegisterServerFunc("SYNC", func(p *Peer, t string, _ bool) (Server, error) {
po, err := ParseSyncBinKey(t) po, err := ParseSyncBinKey(t)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return NewSwarmSyncerServer(po, netStore) return NewSwarmSyncerServer(po, netStore, p.ID().String()+"|"+string(po))
}) })
// streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) { // streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
// return NewOutgoingProvableSwarmSyncer(po, db) // return NewOutgoingProvableSwarmSyncer(po, db)
@ -92,7 +96,7 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
if from > 0 { if from > 0 {
from-- from--
} }
batchStart := time.Now()
descriptors, stop := s.netStore.SubscribePull(context.Background(), s.po, from, to) descriptors, stop := s.netStore.SubscribePull(context.Background(), s.po, from, to)
defer stop() defer stop()
@ -106,7 +110,10 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
timer *time.Timer timer *time.Timer
timerC <-chan time.Time timerC <-chan time.Time
) )
defer func() { defer func() {
metrics.GetOrRegisterResettingTimer("syncer.set-next-batch.total-time", nil).UpdateSince(batchStart)
metrics.GetOrRegisterCounter("syncer.set-next-batch.batch-size", nil).Inc(int64(batchSize))
if timer != nil { if timer != nil {
timer.Stop() timer.Stop()
} }
@ -125,6 +132,8 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
// validating that the chunk is successfully stored by the peer. // validating that the chunk is successfully stored by the peer.
err := s.netStore.Set(context.Background(), chunk.ModeSetSync, d.Address) err := s.netStore.Set(context.Background(), chunk.ModeSetSync, d.Address)
if err != nil { if err != nil {
metrics.GetOrRegisterCounter("syncer.set-next-batch.set-sync-err", nil).Inc(1)
log.Debug("syncer pull subscription - err setting chunk as synced", "correlateId", s.correlateId, "err", err)
return nil, 0, 0, nil, err return nil, 0, 0, nil, err
} }
batchSize++ batchSize++
@ -136,13 +145,17 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
batchEndID = d.BinID batchEndID = d.BinID
if batchSize >= BatchSize { if batchSize >= BatchSize {
iterate = false iterate = false
metrics.GetOrRegisterCounter("syncer.set-next-batch.full-batch", nil).Inc(1)
log.Debug("syncer pull subscription - batch size reached", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
} }
if timer == nil { if timer == nil {
timer = time.NewTimer(batchTimeout) timer = time.NewTimer(batchTimeout)
} else { } else {
log.Debug("syncer pull subscription - stopping timer", "correlateId", s.correlateId)
if !timer.Stop() { if !timer.Stop() {
<-timer.C <-timer.C
} }
log.Debug("syncer pull subscription - channel drained, resetting timer", "correlateId", s.correlateId)
timer.Reset(batchTimeout) timer.Reset(batchTimeout)
} }
timerC = timer.C timerC = timer.C
@ -150,8 +163,12 @@ func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint6
// return batch if new chunks are not // return batch if new chunks are not
// received after some time // received after some time
iterate = false iterate = false
metrics.GetOrRegisterCounter("syncer.set-next-batch.timer-expire", nil).Inc(1)
log.Debug("syncer pull subscription timer expired", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
case <-s.quit: case <-s.quit:
iterate = false iterate = false
metrics.GetOrRegisterCounter("syncer.set-next-batch.quit-sig", nil).Inc(1)
log.Debug("syncer pull subscription - quit received", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
} }
} }
if batchStartID == nil { if batchStartID == nil {

View File

@ -98,12 +98,17 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
if err != nil { if err != nil {
return 0, true, err return 0, true, err
} }
metrics.GetOrRegisterGauge(metricName+".gcsize", nil).Update(int64(gcSize))
done = true done = true
err = db.gcIndex.Iterate(func(item shed.Item) (stop bool, err error) { err = db.gcIndex.Iterate(func(item shed.Item) (stop bool, err error) {
if gcSize-collectedCount <= target { if gcSize-collectedCount <= target {
return true, nil return true, nil
} }
metrics.GetOrRegisterGauge(metricName+".storets", nil).Update(item.StoreTimestamp)
metrics.GetOrRegisterGauge(metricName+".accessts", nil).Update(item.AccessTimestamp)
// delete from retrieve, pull, gc // delete from retrieve, pull, gc
db.retrievalDataIndex.DeleteInBatch(batch, item) db.retrievalDataIndex.DeleteInBatch(batch, item)
db.retrievalAccessIndex.DeleteInBatch(batch, item) db.retrievalAccessIndex.DeleteInBatch(batch, item)
@ -121,11 +126,13 @@ func (db *DB) collectGarbage() (collectedCount uint64, done bool, err error) {
if err != nil { if err != nil {
return 0, false, err return 0, false, err
} }
metrics.GetOrRegisterCounter(metricName+".collected-count", nil).Inc(int64(collectedCount))
db.gcSize.PutInBatch(batch, gcSize-collectedCount) db.gcSize.PutInBatch(batch, gcSize-collectedCount)
err = db.shed.WriteBatch(batch) err = db.shed.WriteBatch(batch)
if err != nil { if err != nil {
metrics.GetOrRegisterCounter(metricName+".writebatch.err", nil).Inc(1)
return 0, false, err return 0, false, err
} }
return collectedCount, done, nil return collectedCount, done, nil