remove comments

This commit is contained in:
Shrenuj Bansal 2022-11-15 10:34:25 -05:00
parent b95d1a6323
commit 4b11b453be
2 changed files with 0 additions and 140 deletions

View File

@ -268,14 +268,6 @@ func (cc *Consensus) finishBootstrap() {
// shutdown, along with the libp2p transport.
func (cc *Consensus) Shutdown(ctx context.Context) error {
//cc.shutdownLock.Lock()
//defer cc.shutdownLock.Unlock()
//if cc.shutdown {
// logger.Debug("already shutdown")
// return nil
//}
logger.Info("stopping Consensus component")
// Raft Shutdown
@ -291,7 +283,6 @@ func (cc *Consensus) Shutdown(ctx context.Context) error {
}
}
//cc.shutdown = true
cc.cancel()
close(cc.rpcReady)
return nil
@ -511,12 +502,6 @@ func (cc *Consensus) Clean(ctx context.Context) error {
// The list will be sorted alphabetically.
func (cc *Consensus) Peers(ctx context.Context) ([]peer.ID, error) {
//cc.shutdownLock.RLock() // prevent shutdown while here
//defer cc.shutdownLock.RUnlock()
//
//if cc.shutdown { // things hang a lot in this case
// return nil, errors.New("consensus is shutdown")
//}
peers := []peer.ID{}
raftPeers, err := cc.raft.Peers(ctx)
if err != nil {
@ -539,28 +524,3 @@ func (cc *Consensus) IsLeader(ctx context.Context) bool {
leader, _ := cc.Leader(ctx)
return leader == cc.host.ID()
}
// OfflineState state returns a cluster state by reading the Raft data and
// writing it to the given datastore which is then wrapped as a state.RaftState.
// Usually an in-memory datastore suffices. The given datastore should be
// thread-safe.
//func OfflineState(cfg *Config, store ds.Datastore) (state.RaftState, error) {
// r, snapExists, err := LastStateRaw(cfg)
// if err != nil {
// return nil, err
// }
//
// st, err := dsstate.New(context.Background(), store, cfg.DatastoreNamespace, dsstate.DefaultHandle())
// if err != nil {
// return nil, err
// }
// if !snapExists {
// return st, nil
// }
//
// err = st.Unmarshal(r)
// if err != nil {
// return nil, err
// }
// return st, nil
//}

View File

@ -521,106 +521,6 @@ func (rw *raftWrapper) Peers(ctx context.Context) ([]string, error) {
return ids, nil
}
// latestSnapshot looks for the most recent raft snapshot stored at the
// provided basedir. It returns the snapshot's metadata, and a reader
// to the snapshot's bytes
//func latestSnapshot(raftDataFolder string) (*hraft.SnapshotMeta, io.ReadCloser, error) {
// store, err := hraft.NewFileSnapshotStore(raftDataFolder, RaftMaxSnapshots, nil)
// if err != nil {
// return nil, nil, err
// }
// snapMetas, err := store.List()
// if err != nil {
// return nil, nil, err
// }
// if len(snapMetas) == 0 { // no error if snapshot isn't found
// return nil, nil, nil
// }
// meta, r, err := store.Open(snapMetas[0].ID)
// if err != nil {
// return nil, nil, err
// }
// return meta, r, nil
//}
// LastStateRaw returns the bytes of the last snapshot stored, its metadata,
// and a flag indicating whether any snapshot was found.
//func LastStateRaw(cfg *Config) (io.Reader, bool, error) {
// // Read most recent snapshot
// dataFolder := cfg.GetDataFolder()
// if _, err := os.Stat(dataFolder); os.IsNotExist(err) {
// // nothing to read
// return nil, false, nil
// }
//
// meta, r, err := latestSnapshot(dataFolder)
// if err != nil {
// return nil, false, err
// }
// if meta == nil { // no snapshots could be read
// return nil, false, nil
// }
// return r, true, nil
//}
// SnapshotSave saves the provided state to a snapshot in the
// raft data path. Old raft data is backed up and replaced
// by the new snapshot. pids contains the config-specified
// peer ids to include in the snapshot metadata if no snapshot exists
// from which to copy the raft metadata
//func SnapshotSave(cfg *Config, newState state.RaftState, pids []peer.ID) error {
// dataFolder := cfg.GetDataFolder()
// err := makeDataFolder(dataFolder)
// if err != nil {
// return err
// }
// meta, _, err := latestSnapshot(dataFolder)
// if err != nil {
// return err
// }
//
// // make a new raft snapshot
// var raftSnapVersion hraft.SnapshotVersion = 1 // As of hraft v1.0.0 this is always 1
// configIndex := uint64(1)
// var raftIndex uint64
// var raftTerm uint64
// var srvCfg hraft.Configuration
// if meta != nil {
// raftIndex = meta.Index
// raftTerm = meta.Term
// srvCfg = meta.Configuration
// CleanupRaft(cfg)
// } else {
// // Begin the log after the index of a fresh start so that
// // the snapshot's state propagate's during bootstrap
// raftIndex = uint64(2)
// raftTerm = uint64(1)
// srvCfg = makeServerConf(pids)
// }
//
// snapshotStore, err := hraft.NewFileSnapshotStoreWithLogger(dataFolder, RaftMaxSnapshots, nil)
// if err != nil {
// return err
// }
// _, dummyTransport := hraft.NewInmemTransport("")
//
// sink, err := snapshotStore.Create(raftSnapVersion, raftIndex, raftTerm, srvCfg, configIndex, dummyTransport)
// if err != nil {
// return err
// }
//
// err = p2praft.EncodeSnapshot(newState, sink)
// if err != nil {
// sink.Cancel()
// return err
// }
// err = sink.Close()
// if err != nil {
// return err
// }
// return nil
//}
// CleanupRaft moves the current data folder to a backup location
//func CleanupRaft(cfg *Config) error {
// dataFolder := cfg.GetDataFolder()