vendor, ethdb: resume write operation asap (#17144)

* vendor: update leveldb

* ethdb: remove useless warning log
This commit is contained in:
gary rong 2018-07-12 16:07:51 +08:00 committed by Péter Szilágyi
parent a9835c1816
commit e8824f6e74
6 changed files with 276 additions and 173 deletions

View File

@ -34,9 +34,7 @@ import (
) )
const ( const (
writeDelayNThreshold = 200 writePauseWarningThrottler = 1 * time.Minute
writeDelayThreshold = 350 * time.Millisecond
writeDelayWarningThrottler = 1 * time.Minute
) )
var OpenFileLimit = 64 var OpenFileLimit = 64
@ -206,8 +204,6 @@ func (db *LDBDatabase) meter(refresh time.Duration) {
// Create storage and warning log tracer for write delay. // Create storage and warning log tracer for write delay.
var ( var (
delaystats [2]int64 delaystats [2]int64
lastWriteDelay time.Time
lastWriteDelayN time.Time
lastWritePaused time.Time lastWritePaused time.Time
) )
@ -293,36 +289,17 @@ func (db *LDBDatabase) meter(refresh time.Duration) {
} }
if db.writeDelayNMeter != nil { if db.writeDelayNMeter != nil {
db.writeDelayNMeter.Mark(delayN - delaystats[0]) db.writeDelayNMeter.Mark(delayN - delaystats[0])
// If the write delay number been collected in the last minute exceeds the predefined threshold,
// print a warning log here.
// If a warning that db performance is laggy has been displayed,
// any subsequent warnings will be withhold for 1 minute to don't overwhelm the user.
if int(db.writeDelayNMeter.Rate1()) > writeDelayNThreshold &&
time.Now().After(lastWriteDelayN.Add(writeDelayWarningThrottler)) {
db.log.Warn("Write delay number exceeds the threshold (200 per second) in the last minute")
lastWriteDelayN = time.Now()
}
} }
if db.writeDelayMeter != nil { if db.writeDelayMeter != nil {
db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1]) db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
// If the write delay duration been collected in the last minute exceeds the predefined threshold,
// print a warning log here.
// If a warning that db performance is laggy has been displayed,
// any subsequent warnings will be withhold for 1 minute to don't overwhelm the user.
if int64(db.writeDelayMeter.Rate1()) > writeDelayThreshold.Nanoseconds() &&
time.Now().After(lastWriteDelay.Add(writeDelayWarningThrottler)) {
db.log.Warn("Write delay duration exceeds the threshold (35% of the time) in the last minute")
lastWriteDelay = time.Now()
}
} }
// If a warning that db is performing compaction has been displayed, any subsequent // If a warning that db is performing compaction has been displayed, any subsequent
// warnings will be withheld for one minute not to overwhelm the user. // warnings will be withheld for one minute not to overwhelm the user.
if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 && if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
time.Now().After(lastWritePaused.Add(writeDelayWarningThrottler)) { time.Now().After(lastWritePaused.Add(writePauseWarningThrottler)) {
db.log.Warn("Database compacting, degraded performance") db.log.Warn("Database compacting, degraded performance")
lastWritePaused = time.Now() lastWritePaused = time.Now()
} }
delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
// Retrieve the database iostats. // Retrieve the database iostats.

View File

@ -640,6 +640,16 @@ func (db *DB) tableNeedCompaction() bool {
return v.needCompaction() return v.needCompaction()
} }
// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted.
func (db *DB) resumeWrite() bool {
v := db.s.version()
defer v.release()
if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() {
return true
}
return false
}
func (db *DB) pauseCompaction(ch chan<- struct{}) { func (db *DB) pauseCompaction(ch chan<- struct{}) {
select { select {
case ch <- struct{}{}: case ch <- struct{}{}:
@ -653,6 +663,7 @@ type cCmd interface {
} }
type cAuto struct { type cAuto struct {
// Note for table compaction, an empty ackC represents it's a compaction waiting command.
ackC chan<- error ackC chan<- error
} }
@ -765,8 +776,10 @@ func (db *DB) mCompaction() {
} }
func (db *DB) tCompaction() { func (db *DB) tCompaction() {
var x cCmd var (
var ackQ []cCmd x cCmd
ackQ, waitQ []cCmd
)
defer func() { defer func() {
if x := recover(); x != nil { if x := recover(); x != nil {
@ -778,6 +791,10 @@ func (db *DB) tCompaction() {
ackQ[i].ack(ErrClosed) ackQ[i].ack(ErrClosed)
ackQ[i] = nil ackQ[i] = nil
} }
for i := range waitQ {
waitQ[i].ack(ErrClosed)
waitQ[i] = nil
}
if x != nil { if x != nil {
x.ack(ErrClosed) x.ack(ErrClosed)
} }
@ -795,12 +812,25 @@ func (db *DB) tCompaction() {
return return
default: default:
} }
// Resume write operation as soon as possible.
if len(waitQ) > 0 && db.resumeWrite() {
for i := range waitQ {
waitQ[i].ack(nil)
waitQ[i] = nil
}
waitQ = waitQ[:0]
}
} else { } else {
for i := range ackQ { for i := range ackQ {
ackQ[i].ack(nil) ackQ[i].ack(nil)
ackQ[i] = nil ackQ[i] = nil
} }
ackQ = ackQ[:0] ackQ = ackQ[:0]
for i := range waitQ {
waitQ[i].ack(nil)
waitQ[i] = nil
}
waitQ = waitQ[:0]
select { select {
case x = <-db.tcompCmdC: case x = <-db.tcompCmdC:
case ch := <-db.tcompPauseC: case ch := <-db.tcompPauseC:
@ -813,7 +843,11 @@ func (db *DB) tCompaction() {
if x != nil { if x != nil {
switch cmd := x.(type) { switch cmd := x.(type) {
case cAuto: case cAuto:
ackQ = append(ackQ, x) if cmd.ackC != nil {
waitQ = append(waitQ, x)
} else {
ackQ = append(ackQ, x)
}
case cRange: case cRange:
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)) x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
default: default:

View File

@ -9,10 +9,12 @@ package storage
import ( import (
"errors" "errors"
"fmt" "fmt"
"io"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
"sort"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -42,6 +44,30 @@ func (lock *fileStorageLock) Unlock() {
} }
} }
type int64Slice []int64
func (p int64Slice) Len() int { return len(p) }
func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func writeFileSynced(filename string, data []byte, perm os.FileMode) error {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
}
if err1 := f.Sync(); err == nil {
err = err1
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
const logSizeThreshold = 1024 * 1024 // 1 MiB const logSizeThreshold = 1024 * 1024 // 1 MiB
// fileStorage is a file-system backed storage. // fileStorage is a file-system backed storage.
@ -60,7 +86,7 @@ type fileStorage struct {
day int day int
} }
// OpenFile returns a new filesytem-backed storage implementation with the given // OpenFile returns a new filesystem-backed storage implementation with the given
// path. This also acquire a file lock, so any subsequent attempt to open the // path. This also acquire a file lock, so any subsequent attempt to open the
// same path will fail. // same path will fail.
// //
@ -189,7 +215,8 @@ func (fs *fileStorage) doLog(t time.Time, str string) {
// write // write
fs.buf = append(fs.buf, []byte(str)...) fs.buf = append(fs.buf, []byte(str)...)
fs.buf = append(fs.buf, '\n') fs.buf = append(fs.buf, '\n')
fs.logw.Write(fs.buf) n, _ := fs.logw.Write(fs.buf)
fs.logSize += int64(n)
} }
func (fs *fileStorage) Log(str string) { func (fs *fileStorage) Log(str string) {
@ -210,7 +237,46 @@ func (fs *fileStorage) log(str string) {
} }
} }
func (fs *fileStorage) SetMeta(fd FileDesc) (err error) { func (fs *fileStorage) setMeta(fd FileDesc) error {
content := fsGenName(fd) + "\n"
// Check and backup old CURRENT file.
currentPath := filepath.Join(fs.path, "CURRENT")
if _, err := os.Stat(currentPath); err == nil {
b, err := ioutil.ReadFile(currentPath)
if err != nil {
fs.log(fmt.Sprintf("backup CURRENT: %v", err))
return err
}
if string(b) == content {
// Content not changed, do nothing.
return nil
}
if err := writeFileSynced(currentPath+".bak", b, 0644); err != nil {
fs.log(fmt.Sprintf("backup CURRENT: %v", err))
return err
}
} else if !os.IsNotExist(err) {
return err
}
path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num)
if err := writeFileSynced(path, []byte(content), 0644); err != nil {
fs.log(fmt.Sprintf("create CURRENT.%d: %v", fd.Num, err))
return err
}
// Replace CURRENT file.
if err := rename(path, currentPath); err != nil {
fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err))
return err
}
// Sync root directory.
if err := syncDir(fs.path); err != nil {
fs.log(fmt.Sprintf("syncDir: %v", err))
return err
}
return nil
}
func (fs *fileStorage) SetMeta(fd FileDesc) error {
if !FileDescOk(fd) { if !FileDescOk(fd) {
return ErrInvalidFile return ErrInvalidFile
} }
@ -223,44 +289,10 @@ func (fs *fileStorage) SetMeta(fd FileDesc) (err error) {
if fs.open < 0 { if fs.open < 0 {
return ErrClosed return ErrClosed
} }
defer func() { return fs.setMeta(fd)
if err != nil {
fs.log(fmt.Sprintf("CURRENT: %v", err))
}
}()
path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num)
w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return
}
_, err = fmt.Fprintln(w, fsGenName(fd))
if err != nil {
fs.log(fmt.Sprintf("write CURRENT.%d: %v", fd.Num, err))
return
}
if err = w.Sync(); err != nil {
fs.log(fmt.Sprintf("flush CURRENT.%d: %v", fd.Num, err))
return
}
if err = w.Close(); err != nil {
fs.log(fmt.Sprintf("close CURRENT.%d: %v", fd.Num, err))
return
}
if err != nil {
return
}
if err = rename(path, filepath.Join(fs.path, "CURRENT")); err != nil {
fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err))
return
}
// Sync root directory.
if err = syncDir(fs.path); err != nil {
fs.log(fmt.Sprintf("syncDir: %v", err))
}
return
} }
func (fs *fileStorage) GetMeta() (fd FileDesc, err error) { func (fs *fileStorage) GetMeta() (FileDesc, error) {
fs.mu.Lock() fs.mu.Lock()
defer fs.mu.Unlock() defer fs.mu.Unlock()
if fs.open < 0 { if fs.open < 0 {
@ -268,7 +300,7 @@ func (fs *fileStorage) GetMeta() (fd FileDesc, err error) {
} }
dir, err := os.Open(fs.path) dir, err := os.Open(fs.path)
if err != nil { if err != nil {
return return FileDesc{}, err
} }
names, err := dir.Readdirnames(0) names, err := dir.Readdirnames(0)
// Close the dir first before checking for Readdirnames error. // Close the dir first before checking for Readdirnames error.
@ -276,94 +308,134 @@ func (fs *fileStorage) GetMeta() (fd FileDesc, err error) {
fs.log(fmt.Sprintf("close dir: %v", ce)) fs.log(fmt.Sprintf("close dir: %v", ce))
} }
if err != nil { if err != nil {
return return FileDesc{}, err
} }
// Find latest CURRENT file. // Try this in order:
var rem []string // - CURRENT.[0-9]+ ('pending rename' file, descending order)
var pend bool // - CURRENT
var cerr error // - CURRENT.bak
//
// Skip corrupted file or file that point to a missing target file.
type currentFile struct {
name string
fd FileDesc
}
tryCurrent := func(name string) (*currentFile, error) {
b, err := ioutil.ReadFile(filepath.Join(fs.path, name))
if err != nil {
if os.IsNotExist(err) {
err = os.ErrNotExist
}
return nil, err
}
var fd FileDesc
if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd) {
fs.log(fmt.Sprintf("%s: corrupted content: %q", name, b))
err := &ErrCorrupted{
Err: errors.New("leveldb/storage: corrupted or incomplete CURRENT file"),
}
return nil, err
}
if _, err := os.Stat(filepath.Join(fs.path, fsGenName(fd))); err != nil {
if os.IsNotExist(err) {
fs.log(fmt.Sprintf("%s: missing target file: %s", name, fd))
err = os.ErrNotExist
}
return nil, err
}
return &currentFile{name: name, fd: fd}, nil
}
tryCurrents := func(names []string) (*currentFile, error) {
var (
cur *currentFile
// Last corruption error.
lastCerr error
)
for _, name := range names {
var err error
cur, err = tryCurrent(name)
if err == nil {
break
} else if err == os.ErrNotExist {
// Fallback to the next file.
} else if isCorrupted(err) {
lastCerr = err
// Fallback to the next file.
} else {
// In case the error is due to permission, etc.
return nil, err
}
}
if cur == nil {
err := os.ErrNotExist
if lastCerr != nil {
err = lastCerr
}
return nil, err
}
return cur, nil
}
// Try 'pending rename' files.
var nums []int64
for _, name := range names { for _, name := range names {
if strings.HasPrefix(name, "CURRENT") { if strings.HasPrefix(name, "CURRENT.") && name != "CURRENT.bak" {
pend1 := len(name) > 7 i, err := strconv.ParseInt(name[8:], 10, 64)
var pendNum int64 if err == nil {
// Make sure it is valid name for a CURRENT file, otherwise skip it. nums = append(nums, i)
if pend1 {
if name[7] != '.' || len(name) < 9 {
fs.log(fmt.Sprintf("skipping %s: invalid file name", name))
continue
}
var e1 error
if pendNum, e1 = strconv.ParseInt(name[8:], 10, 0); e1 != nil {
fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", name, e1))
continue
}
} }
path := filepath.Join(fs.path, name) }
r, e1 := os.OpenFile(path, os.O_RDONLY, 0) }
if e1 != nil { var (
return FileDesc{}, e1 pendCur *currentFile
} pendErr = os.ErrNotExist
b, e1 := ioutil.ReadAll(r) pendNames []string
if e1 != nil { )
r.Close() if len(nums) > 0 {
return FileDesc{}, e1 sort.Sort(sort.Reverse(int64Slice(nums)))
} pendNames = make([]string, len(nums))
var fd1 FileDesc for i, num := range nums {
if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd1) { pendNames[i] = fmt.Sprintf("CURRENT.%d", num)
fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", name)) }
if pend1 { pendCur, pendErr = tryCurrents(pendNames)
rem = append(rem, name) if pendErr != nil && pendErr != os.ErrNotExist && !isCorrupted(pendErr) {
} return FileDesc{}, pendErr
if !pend1 || cerr == nil { }
metaFd, _ := fsParseName(name) }
cerr = &ErrCorrupted{
Fd: metaFd, // Try CURRENT and CURRENT.bak.
Err: errors.New("leveldb/storage: corrupted or incomplete meta file"), curCur, curErr := tryCurrents([]string{"CURRENT", "CURRENT.bak"})
if curErr != nil && curErr != os.ErrNotExist && !isCorrupted(curErr) {
return FileDesc{}, curErr
}
// pendCur takes precedence, but guards against obsolete pendCur.
if pendCur != nil && (curCur == nil || pendCur.fd.Num > curCur.fd.Num) {
curCur = pendCur
}
if curCur != nil {
// Restore CURRENT file to proper state.
if !fs.readOnly && (curCur.name != "CURRENT" || len(pendNames) != 0) {
// Ignore setMeta errors, however don't delete obsolete files if we
// catch error.
if err := fs.setMeta(curCur.fd); err == nil {
// Remove 'pending rename' files.
for _, name := range pendNames {
if err := os.Remove(filepath.Join(fs.path, name)); err != nil {
fs.log(fmt.Sprintf("remove %s: %v", name, err))
} }
} }
} else if pend1 && pendNum != fd1.Num {
fs.log(fmt.Sprintf("skipping %s: inconsistent pending-file num: %d vs %d", name, pendNum, fd1.Num))
rem = append(rem, name)
} else if fd1.Num < fd.Num {
fs.log(fmt.Sprintf("skipping %s: obsolete", name))
if pend1 {
rem = append(rem, name)
}
} else {
fd = fd1
pend = pend1
}
if err := r.Close(); err != nil {
fs.log(fmt.Sprintf("close %s: %v", name, err))
} }
} }
return curCur.fd, nil
} }
// Don't remove any files if there is no valid CURRENT file.
if fd.Zero() { // Nothing found.
if cerr != nil { if isCorrupted(pendErr) {
err = cerr return FileDesc{}, pendErr
} else {
err = os.ErrNotExist
}
return
} }
if !fs.readOnly { return FileDesc{}, curErr
// Rename pending CURRENT file to an effective CURRENT.
if pend {
path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num)
if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil {
fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", fd.Num, err))
}
}
// Remove obsolete or incomplete pending CURRENT files.
for _, name := range rem {
path := filepath.Join(fs.path, name)
if err := os.Remove(path); err != nil {
fs.log(fmt.Sprintf("remove %s: %v", name, err))
}
}
}
return
} }
func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) { func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) {

View File

@ -67,13 +67,25 @@ func isErrInvalid(err error) bool {
if err == os.ErrInvalid { if err == os.ErrInvalid {
return true return true
} }
// Go < 1.8
if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL { if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL {
return true return true
} }
// Go >= 1.8 returns *os.PathError instead
if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL {
return true
}
return false return false
} }
func syncDir(name string) error { func syncDir(name string) error {
// As per fsync manpage, Linux seems to expect fsync on directory, however
// some system don't support this, so we will ignore syscall.EINVAL.
//
// From fsync(2):
// Calling fsync() does not necessarily ensure that the entry in the
// directory containing the file has also reached disk. For that an
// explicit fsync() on a file descriptor for the directory is also needed.
f, err := os.Open(name) f, err := os.Open(name)
if err != nil { if err != nil {
return err return err

View File

@ -55,6 +55,14 @@ type ErrCorrupted struct {
Err error Err error
} }
func isCorrupted(err error) bool {
switch err.(type) {
case *ErrCorrupted:
return true
}
return false
}
func (e *ErrCorrupted) Error() string { func (e *ErrCorrupted) Error() string {
if !e.Fd.Zero() { if !e.Fd.Zero() {
return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd)

52
vendor/vendor.json vendored
View File

@ -424,76 +424,76 @@
"revisionTime": "2017-07-05T02:17:15Z" "revisionTime": "2017-07-05T02:17:15Z"
}, },
{ {
"checksumSHA1": "TJV50D0q8E3vtc90ibC+qOYdjrw=", "checksumSHA1": "k6zbR5hiI10hkWtiK91rIY5s5/E=",
"path": "github.com/syndtr/goleveldb/leveldb", "path": "github.com/syndtr/goleveldb/leveldb",
"revision": "59047f74db0d042c8d8dd8e30bb030bc774a7d7a", "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-05-21T04:45:49Z" "revisionTime": "2018-07-08T03:05:51Z"
}, },
{ {
"checksumSHA1": "EKIow7XkgNdWvR/982ffIZxKG8Y=", "checksumSHA1": "EKIow7XkgNdWvR/982ffIZxKG8Y=",
"path": "github.com/syndtr/goleveldb/leveldb/cache", "path": "github.com/syndtr/goleveldb/leveldb/cache",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a", "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-05-02T07:23:49Z" "revisionTime": "2018-07-08T03:05:51Z"
}, },
{ {
"checksumSHA1": "5KPgnvCPlR0ysDAqo6jApzRQ3tw=", "checksumSHA1": "5KPgnvCPlR0ysDAqo6jApzRQ3tw=",
"path": "github.com/syndtr/goleveldb/leveldb/comparer", "path": "github.com/syndtr/goleveldb/leveldb/comparer",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a", "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-05-02T07:23:49Z" "revisionTime": "2018-07-08T03:05:51Z"
}, },
{ {
"checksumSHA1": "1DRAxdlWzS4U0xKN/yQ/fdNN7f0=", "checksumSHA1": "1DRAxdlWzS4U0xKN/yQ/fdNN7f0=",
"path": "github.com/syndtr/goleveldb/leveldb/errors", "path": "github.com/syndtr/goleveldb/leveldb/errors",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a", "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-05-02T07:23:49Z" "revisionTime": "2018-07-08T03:05:51Z"
}, },
{ {
"checksumSHA1": "eqKeD6DS7eNCtxVYZEHHRKkyZrw=", "checksumSHA1": "eqKeD6DS7eNCtxVYZEHHRKkyZrw=",
"path": "github.com/syndtr/goleveldb/leveldb/filter", "path": "github.com/syndtr/goleveldb/leveldb/filter",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a", "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-05-02T07:23:49Z" "revisionTime": "2018-07-08T03:05:51Z"
}, },
{ {
"checksumSHA1": "weSsccMav4BCerDpSLzh3mMxAYo=", "checksumSHA1": "weSsccMav4BCerDpSLzh3mMxAYo=",
"path": "github.com/syndtr/goleveldb/leveldb/iterator", "path": "github.com/syndtr/goleveldb/leveldb/iterator",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a", "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-05-02T07:23:49Z" "revisionTime": "2018-07-08T03:05:51Z"
}, },
{ {
"checksumSHA1": "gJY7bRpELtO0PJpZXgPQ2BYFJ88=", "checksumSHA1": "gJY7bRpELtO0PJpZXgPQ2BYFJ88=",
"path": "github.com/syndtr/goleveldb/leveldb/journal", "path": "github.com/syndtr/goleveldb/leveldb/journal",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a", "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-05-02T07:23:49Z" "revisionTime": "2018-07-08T03:05:51Z"
}, },
{ {
"checksumSHA1": "MtYY1b2234y/MlS+djL8tXVAcQs=", "checksumSHA1": "MtYY1b2234y/MlS+djL8tXVAcQs=",
"path": "github.com/syndtr/goleveldb/leveldb/memdb", "path": "github.com/syndtr/goleveldb/leveldb/memdb",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a", "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-05-02T07:23:49Z" "revisionTime": "2018-07-08T03:05:51Z"
}, },
{ {
"checksumSHA1": "UmQeotV+m8/FduKEfLOhjdp18rs=", "checksumSHA1": "UmQeotV+m8/FduKEfLOhjdp18rs=",
"path": "github.com/syndtr/goleveldb/leveldb/opt", "path": "github.com/syndtr/goleveldb/leveldb/opt",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a", "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-05-02T07:23:49Z" "revisionTime": "2018-07-08T03:05:51Z"
}, },
{ {
"checksumSHA1": "7H3fa12T7WoMAeXq1+qG5O7LD0w=", "checksumSHA1": "ZnyuciM+R19NG8L5YS3TIJdo1e8=",
"path": "github.com/syndtr/goleveldb/leveldb/storage", "path": "github.com/syndtr/goleveldb/leveldb/storage",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a", "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-05-02T07:23:49Z" "revisionTime": "2018-07-08T03:05:51Z"
}, },
{ {
"checksumSHA1": "gWFPMz8OQeul0t54RM66yMTX49g=", "checksumSHA1": "gWFPMz8OQeul0t54RM66yMTX49g=",
"path": "github.com/syndtr/goleveldb/leveldb/table", "path": "github.com/syndtr/goleveldb/leveldb/table",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a", "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-05-02T07:23:49Z" "revisionTime": "2018-07-08T03:05:51Z"
}, },
{ {
"checksumSHA1": "V/Dh7NV0/fy/5jX1KaAjmGcNbzI=", "checksumSHA1": "V/Dh7NV0/fy/5jX1KaAjmGcNbzI=",
"path": "github.com/syndtr/goleveldb/leveldb/util", "path": "github.com/syndtr/goleveldb/leveldb/util",
"revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a", "revision": "c4c61651e9e37fa117f53c5a906d3b63090d8445",
"revisionTime": "2018-05-02T07:23:49Z" "revisionTime": "2018-07-08T03:05:51Z"
}, },
{ {
"checksumSHA1": "TT1rac6kpQp2vz24m5yDGUNQ/QQ=", "checksumSHA1": "TT1rac6kpQp2vz24m5yDGUNQ/QQ=",