forked from cerc-io/plugeth
		
	cmd, ethdb, vendor: integrate leveldb iostats (#16277)
* cmd, dashboard, ethdb, vendor: send iostats to dashboard * ethdb: change names * ethdb: handle parsing errors * ethdb: handle iostats syntax error * ethdb: r -> w
This commit is contained in:
		
							parent
							
								
									4871e25f5f
								
							
						
					
					
						commit
						39c16c8a1e
					
				| @ -225,6 +225,13 @@ func importChain(ctx *cli.Context) error { | ||||
| 		utils.Fatalf("Failed to read database stats: %v", err) | ||||
| 	} | ||||
| 	fmt.Println(stats) | ||||
| 
 | ||||
| 	ioStats, err := db.LDB().GetProperty("leveldb.iostats") | ||||
| 	if err != nil { | ||||
| 		utils.Fatalf("Failed to read database iostats: %v", err) | ||||
| 	} | ||||
| 	fmt.Println(ioStats) | ||||
| 
 | ||||
| 	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses()) | ||||
| 	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads()) | ||||
| 
 | ||||
| @ -255,6 +262,12 @@ func importChain(ctx *cli.Context) error { | ||||
| 	} | ||||
| 	fmt.Println(stats) | ||||
| 
 | ||||
| 	ioStats, err = db.LDB().GetProperty("leveldb.iostats") | ||||
| 	if err != nil { | ||||
| 		utils.Fatalf("Failed to read database iostats: %v", err) | ||||
| 	} | ||||
| 	fmt.Println(ioStats) | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -270,8 +270,8 @@ func (db *Dashboard) collectData() { | ||||
| 		prevNetworkEgress  = metrics.DefaultRegistry.Get("p2p/OutboundTraffic").(metrics.Meter).Count() | ||||
| 		prevProcessCPUTime = getProcessCPUTime() | ||||
| 		prevSystemCPUUsage = systemCPUUsage | ||||
| 		prevDiskRead       = metrics.DefaultRegistry.Get("eth/db/chaindata/compact/input").(metrics.Meter).Count() | ||||
| 		prevDiskWrite      = metrics.DefaultRegistry.Get("eth/db/chaindata/compact/output").(metrics.Meter).Count() | ||||
| 		prevDiskRead       = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/read").(metrics.Meter).Count() | ||||
| 		prevDiskWrite      = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/write").(metrics.Meter).Count() | ||||
| 
 | ||||
| 		frequency = float64(db.config.Refresh / time.Second) | ||||
| 		numCPU    = float64(runtime.NumCPU()) | ||||
| @ -289,8 +289,8 @@ func (db *Dashboard) collectData() { | ||||
| 				curNetworkEgress  = metrics.DefaultRegistry.Get("p2p/OutboundTraffic").(metrics.Meter).Count() | ||||
| 				curProcessCPUTime = getProcessCPUTime() | ||||
| 				curSystemCPUUsage = systemCPUUsage | ||||
| 				curDiskRead       = metrics.DefaultRegistry.Get("eth/db/chaindata/compact/input").(metrics.Meter).Count() | ||||
| 				curDiskWrite      = metrics.DefaultRegistry.Get("eth/db/chaindata/compact/output").(metrics.Meter).Count() | ||||
| 				curDiskRead       = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/read").(metrics.Meter).Count() | ||||
| 				curDiskWrite      = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/write").(metrics.Meter).Count() | ||||
| 
 | ||||
| 				deltaNetworkIngress = float64(curNetworkIngress - prevNetworkIngress) | ||||
| 				deltaNetworkEgress  = float64(curNetworkEgress - prevNetworkEgress) | ||||
|  | ||||
| @ -37,15 +37,11 @@ type LDBDatabase struct { | ||||
| 	fn string      // filename for reporting
 | ||||
| 	db *leveldb.DB // LevelDB instance
 | ||||
| 
 | ||||
| 	getTimer       metrics.Timer // Timer for measuring the database get request counts and latencies
 | ||||
| 	putTimer       metrics.Timer // Timer for measuring the database put request counts and latencies
 | ||||
| 	delTimer       metrics.Timer // Timer for measuring the database delete request counts and latencies
 | ||||
| 	missMeter      metrics.Meter // Meter for measuring the missed database get requests
 | ||||
| 	readMeter      metrics.Meter // Meter for measuring the database get request data usage
 | ||||
| 	writeMeter     metrics.Meter // Meter for measuring the database put request data usage
 | ||||
| 	compTimeMeter  metrics.Meter // Meter for measuring the total time spent in database compaction
 | ||||
| 	compReadMeter  metrics.Meter // Meter for measuring the data read during compaction
 | ||||
| 	compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
 | ||||
| 	diskReadMeter  metrics.Meter // Meter for measuring the effective amount of data read
 | ||||
| 	diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written
 | ||||
| 
 | ||||
| 	quitLock sync.Mutex      // Mutex protecting the quit channel access
 | ||||
| 	quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
 | ||||
| @ -94,16 +90,9 @@ func (db *LDBDatabase) Path() string { | ||||
| 
 | ||||
| // Put puts the given key / value to the queue
 | ||||
| func (db *LDBDatabase) Put(key []byte, value []byte) error { | ||||
| 	// Measure the database put latency, if requested
 | ||||
| 	if db.putTimer != nil { | ||||
| 		defer db.putTimer.UpdateSince(time.Now()) | ||||
| 	} | ||||
| 	// Generate the data to write to disk, update the meter and write
 | ||||
| 	//value = rle.Compress(value)
 | ||||
| 
 | ||||
| 	if db.writeMeter != nil { | ||||
| 		db.writeMeter.Mark(int64(len(value))) | ||||
| 	} | ||||
| 	return db.db.Put(key, value, nil) | ||||
| } | ||||
| 
 | ||||
| @ -113,32 +102,17 @@ func (db *LDBDatabase) Has(key []byte) (bool, error) { | ||||
| 
 | ||||
| // Get returns the given key if it's present.
 | ||||
| func (db *LDBDatabase) Get(key []byte) ([]byte, error) { | ||||
| 	// Measure the database get latency, if requested
 | ||||
| 	if db.getTimer != nil { | ||||
| 		defer db.getTimer.UpdateSince(time.Now()) | ||||
| 	} | ||||
| 	// Retrieve the key and increment the miss counter if not found
 | ||||
| 	dat, err := db.db.Get(key, nil) | ||||
| 	if err != nil { | ||||
| 		if db.missMeter != nil { | ||||
| 			db.missMeter.Mark(1) | ||||
| 		} | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	// Otherwise update the actually retrieved amount of data
 | ||||
| 	if db.readMeter != nil { | ||||
| 		db.readMeter.Mark(int64(len(dat))) | ||||
| 	} | ||||
| 	return dat, nil | ||||
| 	//return rle.Decompress(dat)
 | ||||
| } | ||||
| 
 | ||||
| // Delete deletes the key from the queue and database
 | ||||
| func (db *LDBDatabase) Delete(key []byte) error { | ||||
| 	// Measure the database delete latency, if requested
 | ||||
| 	if db.delTimer != nil { | ||||
| 		defer db.delTimer.UpdateSince(time.Now()) | ||||
| 	} | ||||
| 	// Execute the actual operation
 | ||||
| 	return db.db.Delete(key, nil) | ||||
| } | ||||
| @ -178,15 +152,11 @@ func (db *LDBDatabase) Meter(prefix string) { | ||||
| 		return | ||||
| 	} | ||||
| 	// Initialize all the metrics collector at the requested prefix
 | ||||
| 	db.getTimer = metrics.NewRegisteredTimer(prefix+"user/gets", nil) | ||||
| 	db.putTimer = metrics.NewRegisteredTimer(prefix+"user/puts", nil) | ||||
| 	db.delTimer = metrics.NewRegisteredTimer(prefix+"user/dels", nil) | ||||
| 	db.missMeter = metrics.NewRegisteredMeter(prefix+"user/misses", nil) | ||||
| 	db.readMeter = metrics.NewRegisteredMeter(prefix+"user/reads", nil) | ||||
| 	db.writeMeter = metrics.NewRegisteredMeter(prefix+"user/writes", nil) | ||||
| 	db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil) | ||||
| 	db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil) | ||||
| 	db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil) | ||||
| 	db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil) | ||||
| 	db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil) | ||||
| 
 | ||||
| 	// Create a quit channel for the periodic collector and run it
 | ||||
| 	db.quitLock.Lock() | ||||
| @ -207,12 +177,17 @@ func (db *LDBDatabase) Meter(prefix string) { | ||||
| //      1   |         85 |     109.27913 |      28.09293 |     213.92493 |     214.26294
 | ||||
| //      2   |        523 |    1000.37159 |       7.26059 |      66.86342 |      66.77884
 | ||||
| //      3   |        570 |    1113.18458 |       0.00000 |       0.00000 |       0.00000
 | ||||
| //
 | ||||
| // This is how the iostats look like (currently):
 | ||||
| // Read(MB):3895.04860 Write(MB):3654.64712
 | ||||
| func (db *LDBDatabase) meter(refresh time.Duration) { | ||||
| 	// Create the counters to store current and previous values
 | ||||
| 	counters := make([][]float64, 2) | ||||
| 	// Create the counters to store current and previous compaction values
 | ||||
| 	compactions := make([][]float64, 2) | ||||
| 	for i := 0; i < 2; i++ { | ||||
| 		counters[i] = make([]float64, 3) | ||||
| 		compactions[i] = make([]float64, 3) | ||||
| 	} | ||||
| 	// Create storage for iostats.
 | ||||
| 	var iostats [2]float64 | ||||
| 	// Iterate ad infinitum and collect the stats
 | ||||
| 	for i := 1; ; i++ { | ||||
| 		// Retrieve the database stats
 | ||||
| @ -233,8 +208,8 @@ func (db *LDBDatabase) meter(refresh time.Duration) { | ||||
| 		lines = lines[3:] | ||||
| 
 | ||||
| 		// Iterate over all the table rows, and accumulate the entries
 | ||||
| 		for j := 0; j < len(counters[i%2]); j++ { | ||||
| 			counters[i%2][j] = 0 | ||||
| 		for j := 0; j < len(compactions[i%2]); j++ { | ||||
| 			compactions[i%2][j] = 0 | ||||
| 		} | ||||
| 		for _, line := range lines { | ||||
| 			parts := strings.Split(line, "|") | ||||
| @ -247,19 +222,60 @@ func (db *LDBDatabase) meter(refresh time.Duration) { | ||||
| 					db.log.Error("Compaction entry parsing failed", "err", err) | ||||
| 					return | ||||
| 				} | ||||
| 				counters[i%2][idx] += value | ||||
| 				compactions[i%2][idx] += value | ||||
| 			} | ||||
| 		} | ||||
| 		// Update all the requested meters
 | ||||
| 		if db.compTimeMeter != nil { | ||||
| 			db.compTimeMeter.Mark(int64((counters[i%2][0] - counters[(i-1)%2][0]) * 1000 * 1000 * 1000)) | ||||
| 			db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000)) | ||||
| 		} | ||||
| 		if db.compReadMeter != nil { | ||||
| 			db.compReadMeter.Mark(int64((counters[i%2][1] - counters[(i-1)%2][1]) * 1024 * 1024)) | ||||
| 			db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024)) | ||||
| 		} | ||||
| 		if db.compWriteMeter != nil { | ||||
| 			db.compWriteMeter.Mark(int64((counters[i%2][2] - counters[(i-1)%2][2]) * 1024 * 1024)) | ||||
| 			db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024)) | ||||
| 		} | ||||
| 
 | ||||
| 		// Retrieve the database iostats.
 | ||||
| 		ioStats, err := db.db.GetProperty("leveldb.iostats") | ||||
| 		if err != nil { | ||||
| 			db.log.Error("Failed to read database iostats", "err", err) | ||||
| 			return | ||||
| 		} | ||||
| 		parts := strings.Split(ioStats, " ") | ||||
| 		if len(parts) < 2 { | ||||
| 			db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) | ||||
| 			return | ||||
| 		} | ||||
| 		r := strings.Split(parts[0], ":") | ||||
| 		if len(r) < 2 { | ||||
| 			db.log.Error("Bad syntax of read entry", "entry", parts[0]) | ||||
| 			return | ||||
| 		} | ||||
| 		read, err := strconv.ParseFloat(r[1], 64) | ||||
| 		if err != nil { | ||||
| 			db.log.Error("Read entry parsing failed", "err", err) | ||||
| 			return | ||||
| 		} | ||||
| 		w := strings.Split(parts[1], ":") | ||||
| 		if len(w) < 2 { | ||||
| 			db.log.Error("Bad syntax of write entry", "entry", parts[1]) | ||||
| 			return | ||||
| 		} | ||||
| 		write, err := strconv.ParseFloat(w[1], 64) | ||||
| 		if err != nil { | ||||
| 			db.log.Error("Write entry parsing failed", "err", err) | ||||
| 			return | ||||
| 		} | ||||
| 		if db.diskReadMeter != nil { | ||||
| 			db.diskReadMeter.Mark(int64((read - iostats[0]) * 1024 * 1024)) | ||||
| 		} | ||||
| 		if db.diskWriteMeter != nil { | ||||
| 			db.diskWriteMeter.Mark(int64((write - iostats[1]) * 1024 * 1024)) | ||||
| 		} | ||||
| 		iostats[0] = read | ||||
| 		iostats[1] = write | ||||
| 
 | ||||
| 		// Sleep a bit, then repeat the stats collection
 | ||||
| 		select { | ||||
| 		case errc := <-db.quitChan: | ||||
|  | ||||
							
								
								
									
										6
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/db.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/db.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -906,6 +906,8 @@ func (db *DB) GetSnapshot() (*Snapshot, error) { | ||||
| //		Returns the number of files at level 'n'.
 | ||||
| //	leveldb.stats
 | ||||
| //		Returns statistics of the underlying DB.
 | ||||
| //	leveldb.iostats
 | ||||
| //		Returns statistics of effective disk read and write.
 | ||||
| //	leveldb.writedelay
 | ||||
| //		Returns cumulative write delay caused by compaction.
 | ||||
| //	leveldb.sstables
 | ||||
| @ -959,6 +961,10 @@ func (db *DB) GetProperty(name string) (value string, err error) { | ||||
| 				level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), | ||||
| 				float64(read)/1048576.0, float64(write)/1048576.0) | ||||
| 		} | ||||
| 	case p == "iostats": | ||||
| 		value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f", | ||||
| 			float64(db.s.stor.reads())/1048576.0, | ||||
| 			float64(db.s.stor.writes())/1048576.0) | ||||
| 	case p == "writedelay": | ||||
| 		writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay)) | ||||
| 		value = fmt.Sprintf("DelayN:%d Delay:%s", writeDelayN, writeDelay) | ||||
|  | ||||
							
								
								
									
										2
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -88,7 +88,7 @@ type Iterator interface { | ||||
| 	// its contents may change on the next call to any 'seeks method'.
 | ||||
| 	Key() []byte | ||||
| 
 | ||||
| 	// Value returns the key of the current key/value pair, or nil if done.
 | ||||
| 	// Value returns the value of the current key/value pair, or nil if done.
 | ||||
| 	// The caller should not modify the contents of the returned slice, and
 | ||||
| 	// its contents may change on the next call to any 'seeks method'.
 | ||||
| 	Value() []byte | ||||
|  | ||||
							
								
								
									
										2
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -329,7 +329,7 @@ func (p *DB) Delete(key []byte) error { | ||||
| 
 | ||||
| 	h := p.nodeData[node+nHeight] | ||||
| 	for i, n := range p.prevNode[:h] { | ||||
| 		m := n + 4 + i | ||||
| 		m := n + nNext + i | ||||
| 		p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] | ||||
| 	} | ||||
| 
 | ||||
|  | ||||
							
								
								
									
										4
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/session.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/session.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -42,7 +42,7 @@ type session struct { | ||||
| 	stTempFileNum    int64 | ||||
| 	stSeqNum         uint64 // last mem compacted seq; need external synchronization
 | ||||
| 
 | ||||
| 	stor     storage.Storage | ||||
| 	stor     *iStorage | ||||
| 	storLock storage.Locker | ||||
| 	o        *cachedOptions | ||||
| 	icmp     *iComparer | ||||
| @ -68,7 +68,7 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { | ||||
| 		return | ||||
| 	} | ||||
| 	s = &session{ | ||||
| 		stor:     stor, | ||||
| 		stor:     newIStorage(stor), | ||||
| 		storLock: storLock, | ||||
| 		fileRef:  make(map[int64]int), | ||||
| 	} | ||||
|  | ||||
							
								
								
									
										63
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/storage.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										63
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/storage.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,63 @@ | ||||
| package leveldb | ||||
| 
 | ||||
| import ( | ||||
| 	"github.com/syndtr/goleveldb/leveldb/storage" | ||||
| 	"sync/atomic" | ||||
| ) | ||||
| 
 | ||||
| type iStorage struct { | ||||
| 	storage.Storage | ||||
| 	read  uint64 | ||||
| 	write uint64 | ||||
| } | ||||
| 
 | ||||
| func (c *iStorage) Open(fd storage.FileDesc) (storage.Reader, error) { | ||||
| 	r, err := c.Storage.Open(fd) | ||||
| 	return &iStorageReader{r, c}, err | ||||
| } | ||||
| 
 | ||||
| func (c *iStorage) Create(fd storage.FileDesc) (storage.Writer, error) { | ||||
| 	w, err := c.Storage.Create(fd) | ||||
| 	return &iStorageWriter{w, c}, err | ||||
| } | ||||
| 
 | ||||
| func (c *iStorage) reads() uint64 { | ||||
| 	return atomic.LoadUint64(&c.read) | ||||
| } | ||||
| 
 | ||||
| func (c *iStorage) writes() uint64 { | ||||
| 	return atomic.LoadUint64(&c.write) | ||||
| } | ||||
| 
 | ||||
| // newIStorage returns the given storage wrapped by iStorage.
 | ||||
| func newIStorage(s storage.Storage) *iStorage { | ||||
| 	return &iStorage{s, 0, 0} | ||||
| } | ||||
| 
 | ||||
| type iStorageReader struct { | ||||
| 	storage.Reader | ||||
| 	c *iStorage | ||||
| } | ||||
| 
 | ||||
| func (r *iStorageReader) Read(p []byte) (n int, err error) { | ||||
| 	n, err = r.Reader.Read(p) | ||||
| 	atomic.AddUint64(&r.c.read, uint64(n)) | ||||
| 	return n, err | ||||
| } | ||||
| 
 | ||||
| func (r *iStorageReader) ReadAt(p []byte, off int64) (n int, err error) { | ||||
| 	n, err = r.Reader.ReadAt(p, off) | ||||
| 	atomic.AddUint64(&r.c.read, uint64(n)) | ||||
| 	return n, err | ||||
| } | ||||
| 
 | ||||
| type iStorageWriter struct { | ||||
| 	storage.Writer | ||||
| 	c *iStorage | ||||
| } | ||||
| 
 | ||||
| func (w *iStorageWriter) Write(p []byte) (n int, err error) { | ||||
| 	n, err = w.Writer.Write(p) | ||||
| 	atomic.AddUint64(&w.c.write, uint64(n)) | ||||
| 	return n, err | ||||
| } | ||||
							
								
								
									
										4
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -8,7 +8,6 @@ package storage | ||||
| 
 | ||||
| import ( | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| ) | ||||
| 
 | ||||
| type plan9FileLock struct { | ||||
| @ -48,8 +47,7 @@ func rename(oldpath, newpath string) error { | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	_, fname := filepath.Split(newpath) | ||||
| 	return os.Rename(oldpath, fname) | ||||
| 	return os.Rename(oldpath, newpath) | ||||
| } | ||||
| 
 | ||||
| func syncDir(name string) error { | ||||
|  | ||||
							
								
								
									
										2
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/util/util.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/syndtr/goleveldb/leveldb/util/util.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @ -19,7 +19,7 @@ var ( | ||||
| // Releaser is the interface that wraps the basic Release method.
 | ||||
| type Releaser interface { | ||||
| 	// Release releases associated resources. Release should always success
 | ||||
| 	// and can be called multipe times without causing error.
 | ||||
| 	// and can be called multiple times without causing error.
 | ||||
| 	Release() | ||||
| } | ||||
| 
 | ||||
|  | ||||
							
								
								
									
										58
									
								
								vendor/vendor.json
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										58
									
								
								vendor/vendor.json
									
									
									
									
										vendored
									
									
								
							| @ -406,76 +406,76 @@ | ||||
| 			"revisionTime": "2017-07-05T02:17:15Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "rpu5ZHjXlV13UKA7L1d5MTOyQwA=", | ||||
| 			"checksumSHA1": "3QsnhPTXGytTbW3uDvQLgSo9s9M=", | ||||
| 			"path": "github.com/syndtr/goleveldb/leveldb", | ||||
| 			"revision": "211f780988068502fe874c44dae530528ebd840f", | ||||
| 			"revisionTime": "2018-01-28T14:04:16Z" | ||||
| 			"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5", | ||||
| 			"revisionTime": "2018-03-07T11:33:52Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "EKIow7XkgNdWvR/982ffIZxKG8Y=", | ||||
| 			"path": "github.com/syndtr/goleveldb/leveldb/cache", | ||||
| 			"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199", | ||||
| 			"revisionTime": "2017-07-25T06:48:36Z" | ||||
| 			"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5", | ||||
| 			"revisionTime": "2018-03-07T11:33:52Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "5KPgnvCPlR0ysDAqo6jApzRQ3tw=", | ||||
| 			"path": "github.com/syndtr/goleveldb/leveldb/comparer", | ||||
| 			"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199", | ||||
| 			"revisionTime": "2017-07-25T06:48:36Z" | ||||
| 			"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5", | ||||
| 			"revisionTime": "2018-03-07T11:33:52Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "1DRAxdlWzS4U0xKN/yQ/fdNN7f0=", | ||||
| 			"path": "github.com/syndtr/goleveldb/leveldb/errors", | ||||
| 			"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199", | ||||
| 			"revisionTime": "2017-07-25T06:48:36Z" | ||||
| 			"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5", | ||||
| 			"revisionTime": "2018-03-07T11:33:52Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "eqKeD6DS7eNCtxVYZEHHRKkyZrw=", | ||||
| 			"path": "github.com/syndtr/goleveldb/leveldb/filter", | ||||
| 			"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199", | ||||
| 			"revisionTime": "2017-07-25T06:48:36Z" | ||||
| 			"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5", | ||||
| 			"revisionTime": "2018-03-07T11:33:52Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "8dXuAVIsbtaMiGGuHjzGR6Ny/5c=", | ||||
| 			"checksumSHA1": "weSsccMav4BCerDpSLzh3mMxAYo=", | ||||
| 			"path": "github.com/syndtr/goleveldb/leveldb/iterator", | ||||
| 			"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199", | ||||
| 			"revisionTime": "2017-07-25T06:48:36Z" | ||||
| 			"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5", | ||||
| 			"revisionTime": "2018-03-07T11:33:52Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "gJY7bRpELtO0PJpZXgPQ2BYFJ88=", | ||||
| 			"path": "github.com/syndtr/goleveldb/leveldb/journal", | ||||
| 			"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199", | ||||
| 			"revisionTime": "2017-07-25T06:48:36Z" | ||||
| 			"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5", | ||||
| 			"revisionTime": "2018-03-07T11:33:52Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "j+uaQ6DwJ50dkIdfMQu1TXdlQcY=", | ||||
| 			"checksumSHA1": "MtYY1b2234y/MlS+djL8tXVAcQs=", | ||||
| 			"path": "github.com/syndtr/goleveldb/leveldb/memdb", | ||||
| 			"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199", | ||||
| 			"revisionTime": "2017-07-25T06:48:36Z" | ||||
| 			"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5", | ||||
| 			"revisionTime": "2018-03-07T11:33:52Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "UmQeotV+m8/FduKEfLOhjdp18rs=", | ||||
| 			"path": "github.com/syndtr/goleveldb/leveldb/opt", | ||||
| 			"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199", | ||||
| 			"revisionTime": "2017-07-25T06:48:36Z" | ||||
| 			"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5", | ||||
| 			"revisionTime": "2018-03-07T11:33:52Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "tQ2AqXXAEy9icbZI9dLVdZGvWMw=", | ||||
| 			"checksumSHA1": "QCSae2ub87f8awH+PKMpd8ZYOtg=", | ||||
| 			"path": "github.com/syndtr/goleveldb/leveldb/storage", | ||||
| 			"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199", | ||||
| 			"revisionTime": "2017-07-25T06:48:36Z" | ||||
| 			"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5", | ||||
| 			"revisionTime": "2018-03-07T11:33:52Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "gWFPMz8OQeul0t54RM66yMTX49g=", | ||||
| 			"path": "github.com/syndtr/goleveldb/leveldb/table", | ||||
| 			"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199", | ||||
| 			"revisionTime": "2017-07-25T06:48:36Z" | ||||
| 			"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5", | ||||
| 			"revisionTime": "2018-03-07T11:33:52Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "4zil8Gwg8VPkDn1YzlgCvtukJFU=", | ||||
| 			"checksumSHA1": "V/Dh7NV0/fy/5jX1KaAjmGcNbzI=", | ||||
| 			"path": "github.com/syndtr/goleveldb/leveldb/util", | ||||
| 			"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199", | ||||
| 			"revisionTime": "2017-07-25T06:48:36Z" | ||||
| 			"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5", | ||||
| 			"revisionTime": "2018-03-07T11:33:52Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "TT1rac6kpQp2vz24m5yDGUNQ/QQ=", | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user