forked from cerc-io/plugeth
cmd, ethdb, vendor: integrate leveldb iostats (#16277)
* cmd, dashboard, ethdb, vendor: send iostats to dashboard * ethdb: change names * ethdb: handle parsing errors * ethdb: handle iostats syntax error * ethdb: r -> w
This commit is contained in:
parent
4871e25f5f
commit
39c16c8a1e
@ -225,6 +225,13 @@ func importChain(ctx *cli.Context) error {
|
|||||||
utils.Fatalf("Failed to read database stats: %v", err)
|
utils.Fatalf("Failed to read database stats: %v", err)
|
||||||
}
|
}
|
||||||
fmt.Println(stats)
|
fmt.Println(stats)
|
||||||
|
|
||||||
|
ioStats, err := db.LDB().GetProperty("leveldb.iostats")
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Failed to read database iostats: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Println(ioStats)
|
||||||
|
|
||||||
fmt.Printf("Trie cache misses: %d\n", trie.CacheMisses())
|
fmt.Printf("Trie cache misses: %d\n", trie.CacheMisses())
|
||||||
fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
|
fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
|
||||||
|
|
||||||
@ -255,6 +262,12 @@ func importChain(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
fmt.Println(stats)
|
fmt.Println(stats)
|
||||||
|
|
||||||
|
ioStats, err = db.LDB().GetProperty("leveldb.iostats")
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Failed to read database iostats: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Println(ioStats)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,8 +270,8 @@ func (db *Dashboard) collectData() {
|
|||||||
prevNetworkEgress = metrics.DefaultRegistry.Get("p2p/OutboundTraffic").(metrics.Meter).Count()
|
prevNetworkEgress = metrics.DefaultRegistry.Get("p2p/OutboundTraffic").(metrics.Meter).Count()
|
||||||
prevProcessCPUTime = getProcessCPUTime()
|
prevProcessCPUTime = getProcessCPUTime()
|
||||||
prevSystemCPUUsage = systemCPUUsage
|
prevSystemCPUUsage = systemCPUUsage
|
||||||
prevDiskRead = metrics.DefaultRegistry.Get("eth/db/chaindata/compact/input").(metrics.Meter).Count()
|
prevDiskRead = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/read").(metrics.Meter).Count()
|
||||||
prevDiskWrite = metrics.DefaultRegistry.Get("eth/db/chaindata/compact/output").(metrics.Meter).Count()
|
prevDiskWrite = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/write").(metrics.Meter).Count()
|
||||||
|
|
||||||
frequency = float64(db.config.Refresh / time.Second)
|
frequency = float64(db.config.Refresh / time.Second)
|
||||||
numCPU = float64(runtime.NumCPU())
|
numCPU = float64(runtime.NumCPU())
|
||||||
@ -289,8 +289,8 @@ func (db *Dashboard) collectData() {
|
|||||||
curNetworkEgress = metrics.DefaultRegistry.Get("p2p/OutboundTraffic").(metrics.Meter).Count()
|
curNetworkEgress = metrics.DefaultRegistry.Get("p2p/OutboundTraffic").(metrics.Meter).Count()
|
||||||
curProcessCPUTime = getProcessCPUTime()
|
curProcessCPUTime = getProcessCPUTime()
|
||||||
curSystemCPUUsage = systemCPUUsage
|
curSystemCPUUsage = systemCPUUsage
|
||||||
curDiskRead = metrics.DefaultRegistry.Get("eth/db/chaindata/compact/input").(metrics.Meter).Count()
|
curDiskRead = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/read").(metrics.Meter).Count()
|
||||||
curDiskWrite = metrics.DefaultRegistry.Get("eth/db/chaindata/compact/output").(metrics.Meter).Count()
|
curDiskWrite = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/write").(metrics.Meter).Count()
|
||||||
|
|
||||||
deltaNetworkIngress = float64(curNetworkIngress - prevNetworkIngress)
|
deltaNetworkIngress = float64(curNetworkIngress - prevNetworkIngress)
|
||||||
deltaNetworkEgress = float64(curNetworkEgress - prevNetworkEgress)
|
deltaNetworkEgress = float64(curNetworkEgress - prevNetworkEgress)
|
||||||
|
@ -37,15 +37,11 @@ type LDBDatabase struct {
|
|||||||
fn string // filename for reporting
|
fn string // filename for reporting
|
||||||
db *leveldb.DB // LevelDB instance
|
db *leveldb.DB // LevelDB instance
|
||||||
|
|
||||||
getTimer metrics.Timer // Timer for measuring the database get request counts and latencies
|
|
||||||
putTimer metrics.Timer // Timer for measuring the database put request counts and latencies
|
|
||||||
delTimer metrics.Timer // Timer for measuring the database delete request counts and latencies
|
|
||||||
missMeter metrics.Meter // Meter for measuring the missed database get requests
|
|
||||||
readMeter metrics.Meter // Meter for measuring the database get request data usage
|
|
||||||
writeMeter metrics.Meter // Meter for measuring the database put request data usage
|
|
||||||
compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
|
compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
|
||||||
compReadMeter metrics.Meter // Meter for measuring the data read during compaction
|
compReadMeter metrics.Meter // Meter for measuring the data read during compaction
|
||||||
compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
|
compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
|
||||||
|
diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read
|
||||||
|
diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written
|
||||||
|
|
||||||
quitLock sync.Mutex // Mutex protecting the quit channel access
|
quitLock sync.Mutex // Mutex protecting the quit channel access
|
||||||
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
|
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
|
||||||
@ -94,16 +90,9 @@ func (db *LDBDatabase) Path() string {
|
|||||||
|
|
||||||
// Put puts the given key / value to the queue
|
// Put puts the given key / value to the queue
|
||||||
func (db *LDBDatabase) Put(key []byte, value []byte) error {
|
func (db *LDBDatabase) Put(key []byte, value []byte) error {
|
||||||
// Measure the database put latency, if requested
|
|
||||||
if db.putTimer != nil {
|
|
||||||
defer db.putTimer.UpdateSince(time.Now())
|
|
||||||
}
|
|
||||||
// Generate the data to write to disk, update the meter and write
|
// Generate the data to write to disk, update the meter and write
|
||||||
//value = rle.Compress(value)
|
//value = rle.Compress(value)
|
||||||
|
|
||||||
if db.writeMeter != nil {
|
|
||||||
db.writeMeter.Mark(int64(len(value)))
|
|
||||||
}
|
|
||||||
return db.db.Put(key, value, nil)
|
return db.db.Put(key, value, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,32 +102,17 @@ func (db *LDBDatabase) Has(key []byte) (bool, error) {
|
|||||||
|
|
||||||
// Get returns the given key if it's present.
|
// Get returns the given key if it's present.
|
||||||
func (db *LDBDatabase) Get(key []byte) ([]byte, error) {
|
func (db *LDBDatabase) Get(key []byte) ([]byte, error) {
|
||||||
// Measure the database get latency, if requested
|
|
||||||
if db.getTimer != nil {
|
|
||||||
defer db.getTimer.UpdateSince(time.Now())
|
|
||||||
}
|
|
||||||
// Retrieve the key and increment the miss counter if not found
|
// Retrieve the key and increment the miss counter if not found
|
||||||
dat, err := db.db.Get(key, nil)
|
dat, err := db.db.Get(key, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if db.missMeter != nil {
|
|
||||||
db.missMeter.Mark(1)
|
|
||||||
}
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Otherwise update the actually retrieved amount of data
|
|
||||||
if db.readMeter != nil {
|
|
||||||
db.readMeter.Mark(int64(len(dat)))
|
|
||||||
}
|
|
||||||
return dat, nil
|
return dat, nil
|
||||||
//return rle.Decompress(dat)
|
//return rle.Decompress(dat)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete deletes the key from the queue and database
|
// Delete deletes the key from the queue and database
|
||||||
func (db *LDBDatabase) Delete(key []byte) error {
|
func (db *LDBDatabase) Delete(key []byte) error {
|
||||||
// Measure the database delete latency, if requested
|
|
||||||
if db.delTimer != nil {
|
|
||||||
defer db.delTimer.UpdateSince(time.Now())
|
|
||||||
}
|
|
||||||
// Execute the actual operation
|
// Execute the actual operation
|
||||||
return db.db.Delete(key, nil)
|
return db.db.Delete(key, nil)
|
||||||
}
|
}
|
||||||
@ -178,15 +152,11 @@ func (db *LDBDatabase) Meter(prefix string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Initialize all the metrics collector at the requested prefix
|
// Initialize all the metrics collector at the requested prefix
|
||||||
db.getTimer = metrics.NewRegisteredTimer(prefix+"user/gets", nil)
|
|
||||||
db.putTimer = metrics.NewRegisteredTimer(prefix+"user/puts", nil)
|
|
||||||
db.delTimer = metrics.NewRegisteredTimer(prefix+"user/dels", nil)
|
|
||||||
db.missMeter = metrics.NewRegisteredMeter(prefix+"user/misses", nil)
|
|
||||||
db.readMeter = metrics.NewRegisteredMeter(prefix+"user/reads", nil)
|
|
||||||
db.writeMeter = metrics.NewRegisteredMeter(prefix+"user/writes", nil)
|
|
||||||
db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
|
db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
|
||||||
db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
|
db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
|
||||||
db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
|
db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
|
||||||
|
db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil)
|
||||||
|
db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil)
|
||||||
|
|
||||||
// Create a quit channel for the periodic collector and run it
|
// Create a quit channel for the periodic collector and run it
|
||||||
db.quitLock.Lock()
|
db.quitLock.Lock()
|
||||||
@ -207,12 +177,17 @@ func (db *LDBDatabase) Meter(prefix string) {
|
|||||||
// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294
|
// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294
|
||||||
// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884
|
// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884
|
||||||
// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000
|
// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000
|
||||||
|
//
|
||||||
|
// This is how the iostats look like (currently):
|
||||||
|
// Read(MB):3895.04860 Write(MB):3654.64712
|
||||||
func (db *LDBDatabase) meter(refresh time.Duration) {
|
func (db *LDBDatabase) meter(refresh time.Duration) {
|
||||||
// Create the counters to store current and previous values
|
// Create the counters to store current and previous compaction values
|
||||||
counters := make([][]float64, 2)
|
compactions := make([][]float64, 2)
|
||||||
for i := 0; i < 2; i++ {
|
for i := 0; i < 2; i++ {
|
||||||
counters[i] = make([]float64, 3)
|
compactions[i] = make([]float64, 3)
|
||||||
}
|
}
|
||||||
|
// Create storage for iostats.
|
||||||
|
var iostats [2]float64
|
||||||
// Iterate ad infinitum and collect the stats
|
// Iterate ad infinitum and collect the stats
|
||||||
for i := 1; ; i++ {
|
for i := 1; ; i++ {
|
||||||
// Retrieve the database stats
|
// Retrieve the database stats
|
||||||
@ -233,8 +208,8 @@ func (db *LDBDatabase) meter(refresh time.Duration) {
|
|||||||
lines = lines[3:]
|
lines = lines[3:]
|
||||||
|
|
||||||
// Iterate over all the table rows, and accumulate the entries
|
// Iterate over all the table rows, and accumulate the entries
|
||||||
for j := 0; j < len(counters[i%2]); j++ {
|
for j := 0; j < len(compactions[i%2]); j++ {
|
||||||
counters[i%2][j] = 0
|
compactions[i%2][j] = 0
|
||||||
}
|
}
|
||||||
for _, line := range lines {
|
for _, line := range lines {
|
||||||
parts := strings.Split(line, "|")
|
parts := strings.Split(line, "|")
|
||||||
@ -247,19 +222,60 @@ func (db *LDBDatabase) meter(refresh time.Duration) {
|
|||||||
db.log.Error("Compaction entry parsing failed", "err", err)
|
db.log.Error("Compaction entry parsing failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
counters[i%2][idx] += value
|
compactions[i%2][idx] += value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Update all the requested meters
|
// Update all the requested meters
|
||||||
if db.compTimeMeter != nil {
|
if db.compTimeMeter != nil {
|
||||||
db.compTimeMeter.Mark(int64((counters[i%2][0] - counters[(i-1)%2][0]) * 1000 * 1000 * 1000))
|
db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
|
||||||
}
|
}
|
||||||
if db.compReadMeter != nil {
|
if db.compReadMeter != nil {
|
||||||
db.compReadMeter.Mark(int64((counters[i%2][1] - counters[(i-1)%2][1]) * 1024 * 1024))
|
db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
|
||||||
}
|
}
|
||||||
if db.compWriteMeter != nil {
|
if db.compWriteMeter != nil {
|
||||||
db.compWriteMeter.Mark(int64((counters[i%2][2] - counters[(i-1)%2][2]) * 1024 * 1024))
|
db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Retrieve the database iostats.
|
||||||
|
ioStats, err := db.db.GetProperty("leveldb.iostats")
|
||||||
|
if err != nil {
|
||||||
|
db.log.Error("Failed to read database iostats", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
parts := strings.Split(ioStats, " ")
|
||||||
|
if len(parts) < 2 {
|
||||||
|
db.log.Error("Bad syntax of ioStats", "ioStats", ioStats)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r := strings.Split(parts[0], ":")
|
||||||
|
if len(r) < 2 {
|
||||||
|
db.log.Error("Bad syntax of read entry", "entry", parts[0])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
read, err := strconv.ParseFloat(r[1], 64)
|
||||||
|
if err != nil {
|
||||||
|
db.log.Error("Read entry parsing failed", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w := strings.Split(parts[1], ":")
|
||||||
|
if len(w) < 2 {
|
||||||
|
db.log.Error("Bad syntax of write entry", "entry", parts[1])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
write, err := strconv.ParseFloat(w[1], 64)
|
||||||
|
if err != nil {
|
||||||
|
db.log.Error("Write entry parsing failed", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if db.diskReadMeter != nil {
|
||||||
|
db.diskReadMeter.Mark(int64((read - iostats[0]) * 1024 * 1024))
|
||||||
|
}
|
||||||
|
if db.diskWriteMeter != nil {
|
||||||
|
db.diskWriteMeter.Mark(int64((write - iostats[1]) * 1024 * 1024))
|
||||||
|
}
|
||||||
|
iostats[0] = read
|
||||||
|
iostats[1] = write
|
||||||
|
|
||||||
// Sleep a bit, then repeat the stats collection
|
// Sleep a bit, then repeat the stats collection
|
||||||
select {
|
select {
|
||||||
case errc := <-db.quitChan:
|
case errc := <-db.quitChan:
|
||||||
|
6
vendor/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
6
vendor/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
@ -906,6 +906,8 @@ func (db *DB) GetSnapshot() (*Snapshot, error) {
|
|||||||
// Returns the number of files at level 'n'.
|
// Returns the number of files at level 'n'.
|
||||||
// leveldb.stats
|
// leveldb.stats
|
||||||
// Returns statistics of the underlying DB.
|
// Returns statistics of the underlying DB.
|
||||||
|
// leveldb.iostats
|
||||||
|
// Returns statistics of effective disk read and write.
|
||||||
// leveldb.writedelay
|
// leveldb.writedelay
|
||||||
// Returns cumulative write delay caused by compaction.
|
// Returns cumulative write delay caused by compaction.
|
||||||
// leveldb.sstables
|
// leveldb.sstables
|
||||||
@ -959,6 +961,10 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
|||||||
level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
|
level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
|
||||||
float64(read)/1048576.0, float64(write)/1048576.0)
|
float64(read)/1048576.0, float64(write)/1048576.0)
|
||||||
}
|
}
|
||||||
|
case p == "iostats":
|
||||||
|
value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f",
|
||||||
|
float64(db.s.stor.reads())/1048576.0,
|
||||||
|
float64(db.s.stor.writes())/1048576.0)
|
||||||
case p == "writedelay":
|
case p == "writedelay":
|
||||||
writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay))
|
writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay))
|
||||||
value = fmt.Sprintf("DelayN:%d Delay:%s", writeDelayN, writeDelay)
|
value = fmt.Sprintf("DelayN:%d Delay:%s", writeDelayN, writeDelay)
|
||||||
|
2
vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
generated
vendored
2
vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
generated
vendored
@ -88,7 +88,7 @@ type Iterator interface {
|
|||||||
// its contents may change on the next call to any 'seeks method'.
|
// its contents may change on the next call to any 'seeks method'.
|
||||||
Key() []byte
|
Key() []byte
|
||||||
|
|
||||||
// Value returns the key of the current key/value pair, or nil if done.
|
// Value returns the value of the current key/value pair, or nil if done.
|
||||||
// The caller should not modify the contents of the returned slice, and
|
// The caller should not modify the contents of the returned slice, and
|
||||||
// its contents may change on the next call to any 'seeks method'.
|
// its contents may change on the next call to any 'seeks method'.
|
||||||
Value() []byte
|
Value() []byte
|
||||||
|
2
vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
generated
vendored
2
vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
generated
vendored
@ -329,7 +329,7 @@ func (p *DB) Delete(key []byte) error {
|
|||||||
|
|
||||||
h := p.nodeData[node+nHeight]
|
h := p.nodeData[node+nHeight]
|
||||||
for i, n := range p.prevNode[:h] {
|
for i, n := range p.prevNode[:h] {
|
||||||
m := n + 4 + i
|
m := n + nNext + i
|
||||||
p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i]
|
p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
4
vendor/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
4
vendor/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
@ -42,7 +42,7 @@ type session struct {
|
|||||||
stTempFileNum int64
|
stTempFileNum int64
|
||||||
stSeqNum uint64 // last mem compacted seq; need external synchronization
|
stSeqNum uint64 // last mem compacted seq; need external synchronization
|
||||||
|
|
||||||
stor storage.Storage
|
stor *iStorage
|
||||||
storLock storage.Locker
|
storLock storage.Locker
|
||||||
o *cachedOptions
|
o *cachedOptions
|
||||||
icmp *iComparer
|
icmp *iComparer
|
||||||
@ -68,7 +68,7 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
s = &session{
|
s = &session{
|
||||||
stor: stor,
|
stor: newIStorage(stor),
|
||||||
storLock: storLock,
|
storLock: storLock,
|
||||||
fileRef: make(map[int64]int),
|
fileRef: make(map[int64]int),
|
||||||
}
|
}
|
||||||
|
63
vendor/github.com/syndtr/goleveldb/leveldb/storage.go
generated
vendored
Normal file
63
vendor/github.com/syndtr/goleveldb/leveldb/storage.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
package leveldb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
type iStorage struct {
|
||||||
|
storage.Storage
|
||||||
|
read uint64
|
||||||
|
write uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *iStorage) Open(fd storage.FileDesc) (storage.Reader, error) {
|
||||||
|
r, err := c.Storage.Open(fd)
|
||||||
|
return &iStorageReader{r, c}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *iStorage) Create(fd storage.FileDesc) (storage.Writer, error) {
|
||||||
|
w, err := c.Storage.Create(fd)
|
||||||
|
return &iStorageWriter{w, c}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *iStorage) reads() uint64 {
|
||||||
|
return atomic.LoadUint64(&c.read)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *iStorage) writes() uint64 {
|
||||||
|
return atomic.LoadUint64(&c.write)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newIStorage returns the given storage wrapped by iStorage.
|
||||||
|
func newIStorage(s storage.Storage) *iStorage {
|
||||||
|
return &iStorage{s, 0, 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
type iStorageReader struct {
|
||||||
|
storage.Reader
|
||||||
|
c *iStorage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *iStorageReader) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = r.Reader.Read(p)
|
||||||
|
atomic.AddUint64(&r.c.read, uint64(n))
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *iStorageReader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||||
|
n, err = r.Reader.ReadAt(p, off)
|
||||||
|
atomic.AddUint64(&r.c.read, uint64(n))
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type iStorageWriter struct {
|
||||||
|
storage.Writer
|
||||||
|
c *iStorage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *iStorageWriter) Write(p []byte) (n int, err error) {
|
||||||
|
n, err = w.Writer.Write(p)
|
||||||
|
atomic.AddUint64(&w.c.write, uint64(n))
|
||||||
|
return n, err
|
||||||
|
}
|
4
vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
generated
vendored
4
vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
generated
vendored
@ -8,7 +8,6 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type plan9FileLock struct {
|
type plan9FileLock struct {
|
||||||
@ -48,8 +47,7 @@ func rename(oldpath, newpath string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, fname := filepath.Split(newpath)
|
return os.Rename(oldpath, newpath)
|
||||||
return os.Rename(oldpath, fname)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func syncDir(name string) error {
|
func syncDir(name string) error {
|
||||||
|
2
vendor/github.com/syndtr/goleveldb/leveldb/util/util.go
generated
vendored
2
vendor/github.com/syndtr/goleveldb/leveldb/util/util.go
generated
vendored
@ -19,7 +19,7 @@ var (
|
|||||||
// Releaser is the interface that wraps the basic Release method.
|
// Releaser is the interface that wraps the basic Release method.
|
||||||
type Releaser interface {
|
type Releaser interface {
|
||||||
// Release releases associated resources. Release should always success
|
// Release releases associated resources. Release should always success
|
||||||
// and can be called multipe times without causing error.
|
// and can be called multiple times without causing error.
|
||||||
Release()
|
Release()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
58
vendor/vendor.json
vendored
58
vendor/vendor.json
vendored
@ -406,76 +406,76 @@
|
|||||||
"revisionTime": "2017-07-05T02:17:15Z"
|
"revisionTime": "2017-07-05T02:17:15Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "rpu5ZHjXlV13UKA7L1d5MTOyQwA=",
|
"checksumSHA1": "3QsnhPTXGytTbW3uDvQLgSo9s9M=",
|
||||||
"path": "github.com/syndtr/goleveldb/leveldb",
|
"path": "github.com/syndtr/goleveldb/leveldb",
|
||||||
"revision": "211f780988068502fe874c44dae530528ebd840f",
|
"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
|
||||||
"revisionTime": "2018-01-28T14:04:16Z"
|
"revisionTime": "2018-03-07T11:33:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "EKIow7XkgNdWvR/982ffIZxKG8Y=",
|
"checksumSHA1": "EKIow7XkgNdWvR/982ffIZxKG8Y=",
|
||||||
"path": "github.com/syndtr/goleveldb/leveldb/cache",
|
"path": "github.com/syndtr/goleveldb/leveldb/cache",
|
||||||
"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199",
|
"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
|
||||||
"revisionTime": "2017-07-25T06:48:36Z"
|
"revisionTime": "2018-03-07T11:33:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "5KPgnvCPlR0ysDAqo6jApzRQ3tw=",
|
"checksumSHA1": "5KPgnvCPlR0ysDAqo6jApzRQ3tw=",
|
||||||
"path": "github.com/syndtr/goleveldb/leveldb/comparer",
|
"path": "github.com/syndtr/goleveldb/leveldb/comparer",
|
||||||
"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199",
|
"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
|
||||||
"revisionTime": "2017-07-25T06:48:36Z"
|
"revisionTime": "2018-03-07T11:33:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "1DRAxdlWzS4U0xKN/yQ/fdNN7f0=",
|
"checksumSHA1": "1DRAxdlWzS4U0xKN/yQ/fdNN7f0=",
|
||||||
"path": "github.com/syndtr/goleveldb/leveldb/errors",
|
"path": "github.com/syndtr/goleveldb/leveldb/errors",
|
||||||
"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199",
|
"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
|
||||||
"revisionTime": "2017-07-25T06:48:36Z"
|
"revisionTime": "2018-03-07T11:33:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "eqKeD6DS7eNCtxVYZEHHRKkyZrw=",
|
"checksumSHA1": "eqKeD6DS7eNCtxVYZEHHRKkyZrw=",
|
||||||
"path": "github.com/syndtr/goleveldb/leveldb/filter",
|
"path": "github.com/syndtr/goleveldb/leveldb/filter",
|
||||||
"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199",
|
"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
|
||||||
"revisionTime": "2017-07-25T06:48:36Z"
|
"revisionTime": "2018-03-07T11:33:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "8dXuAVIsbtaMiGGuHjzGR6Ny/5c=",
|
"checksumSHA1": "weSsccMav4BCerDpSLzh3mMxAYo=",
|
||||||
"path": "github.com/syndtr/goleveldb/leveldb/iterator",
|
"path": "github.com/syndtr/goleveldb/leveldb/iterator",
|
||||||
"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199",
|
"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
|
||||||
"revisionTime": "2017-07-25T06:48:36Z"
|
"revisionTime": "2018-03-07T11:33:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "gJY7bRpELtO0PJpZXgPQ2BYFJ88=",
|
"checksumSHA1": "gJY7bRpELtO0PJpZXgPQ2BYFJ88=",
|
||||||
"path": "github.com/syndtr/goleveldb/leveldb/journal",
|
"path": "github.com/syndtr/goleveldb/leveldb/journal",
|
||||||
"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199",
|
"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
|
||||||
"revisionTime": "2017-07-25T06:48:36Z"
|
"revisionTime": "2018-03-07T11:33:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "j+uaQ6DwJ50dkIdfMQu1TXdlQcY=",
|
"checksumSHA1": "MtYY1b2234y/MlS+djL8tXVAcQs=",
|
||||||
"path": "github.com/syndtr/goleveldb/leveldb/memdb",
|
"path": "github.com/syndtr/goleveldb/leveldb/memdb",
|
||||||
"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199",
|
"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
|
||||||
"revisionTime": "2017-07-25T06:48:36Z"
|
"revisionTime": "2018-03-07T11:33:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "UmQeotV+m8/FduKEfLOhjdp18rs=",
|
"checksumSHA1": "UmQeotV+m8/FduKEfLOhjdp18rs=",
|
||||||
"path": "github.com/syndtr/goleveldb/leveldb/opt",
|
"path": "github.com/syndtr/goleveldb/leveldb/opt",
|
||||||
"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199",
|
"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
|
||||||
"revisionTime": "2017-07-25T06:48:36Z"
|
"revisionTime": "2018-03-07T11:33:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "tQ2AqXXAEy9icbZI9dLVdZGvWMw=",
|
"checksumSHA1": "QCSae2ub87f8awH+PKMpd8ZYOtg=",
|
||||||
"path": "github.com/syndtr/goleveldb/leveldb/storage",
|
"path": "github.com/syndtr/goleveldb/leveldb/storage",
|
||||||
"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199",
|
"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
|
||||||
"revisionTime": "2017-07-25T06:48:36Z"
|
"revisionTime": "2018-03-07T11:33:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "gWFPMz8OQeul0t54RM66yMTX49g=",
|
"checksumSHA1": "gWFPMz8OQeul0t54RM66yMTX49g=",
|
||||||
"path": "github.com/syndtr/goleveldb/leveldb/table",
|
"path": "github.com/syndtr/goleveldb/leveldb/table",
|
||||||
"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199",
|
"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
|
||||||
"revisionTime": "2017-07-25T06:48:36Z"
|
"revisionTime": "2018-03-07T11:33:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "4zil8Gwg8VPkDn1YzlgCvtukJFU=",
|
"checksumSHA1": "V/Dh7NV0/fy/5jX1KaAjmGcNbzI=",
|
||||||
"path": "github.com/syndtr/goleveldb/leveldb/util",
|
"path": "github.com/syndtr/goleveldb/leveldb/util",
|
||||||
"revision": "b89cc31ef7977104127d34c1bd31ebd1a9db2199",
|
"revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
|
||||||
"revisionTime": "2017-07-25T06:48:36Z"
|
"revisionTime": "2018-03-07T11:33:52Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "TT1rac6kpQp2vz24m5yDGUNQ/QQ=",
|
"checksumSHA1": "TT1rac6kpQp2vz24m5yDGUNQ/QQ=",
|
||||||
|
Loading…
Reference in New Issue
Block a user