forked from cerc-io/plugeth
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer * vendor, core/rawdb, cmd/geth: add db inspector * core, cmd/utils: check ancient store path forceily * cmd/geth, common, core/rawdb: a few fixes * cmd/geth: support windows file rename and fix rename error * core: support ancient plugin * core, cmd: streaming file copy * cmd, consensus, core, tests: keep genesis in leveldb * core: write txlookup during ancient init * core: bump database version
This commit is contained in:
parent
42c746d6f4
commit
37d280da41
@ -18,8 +18,12 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@ -167,6 +171,37 @@ Remove blockchain and state databases`,
|
|||||||
The arguments are interpreted as block numbers or hashes.
|
The arguments are interpreted as block numbers or hashes.
|
||||||
Use "ethereum dump 0" to dump the genesis block.`,
|
Use "ethereum dump 0" to dump the genesis block.`,
|
||||||
}
|
}
|
||||||
|
migrateAncientCommand = cli.Command{
|
||||||
|
Action: utils.MigrateFlags(migrateAncient),
|
||||||
|
Name: "migrate-ancient",
|
||||||
|
Usage: "migrate ancient database forcibly",
|
||||||
|
ArgsUsage: " ",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
utils.DataDirFlag,
|
||||||
|
utils.AncientFlag,
|
||||||
|
utils.CacheFlag,
|
||||||
|
utils.TestnetFlag,
|
||||||
|
utils.RinkebyFlag,
|
||||||
|
utils.GoerliFlag,
|
||||||
|
},
|
||||||
|
Category: "BLOCKCHAIN COMMANDS",
|
||||||
|
}
|
||||||
|
inspectCommand = cli.Command{
|
||||||
|
Action: utils.MigrateFlags(inspect),
|
||||||
|
Name: "inspect",
|
||||||
|
Usage: "Inspect the storage size for each type of data in the database",
|
||||||
|
ArgsUsage: " ",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
utils.DataDirFlag,
|
||||||
|
utils.AncientFlag,
|
||||||
|
utils.CacheFlag,
|
||||||
|
utils.TestnetFlag,
|
||||||
|
utils.RinkebyFlag,
|
||||||
|
utils.GoerliFlag,
|
||||||
|
utils.SyncModeFlag,
|
||||||
|
},
|
||||||
|
Category: "BLOCKCHAIN COMMANDS",
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// initGenesis will initialise the given JSON format genesis file and writes it as
|
// initGenesis will initialise the given JSON format genesis file and writes it as
|
||||||
@ -423,19 +458,37 @@ func copyDb(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func removeDB(ctx *cli.Context) error {
|
func removeDB(ctx *cli.Context) error {
|
||||||
stack, _ := makeConfigNode(ctx)
|
stack, config := makeConfigNode(ctx)
|
||||||
|
|
||||||
for _, name := range []string{"chaindata", "lightchaindata"} {
|
for i, name := range []string{"chaindata", "lightchaindata"} {
|
||||||
// Ensure the database exists in the first place
|
// Ensure the database exists in the first place
|
||||||
logger := log.New("database", name)
|
logger := log.New("database", name)
|
||||||
|
|
||||||
|
var (
|
||||||
|
dbdirs []string
|
||||||
|
freezer string
|
||||||
|
)
|
||||||
dbdir := stack.ResolvePath(name)
|
dbdir := stack.ResolvePath(name)
|
||||||
if !common.FileExist(dbdir) {
|
if !common.FileExist(dbdir) {
|
||||||
logger.Info("Database doesn't exist, skipping", "path", dbdir)
|
logger.Info("Database doesn't exist, skipping", "path", dbdir)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
dbdirs = append(dbdirs, dbdir)
|
||||||
|
if i == 0 {
|
||||||
|
freezer = config.Eth.DatabaseFreezer
|
||||||
|
switch {
|
||||||
|
case freezer == "":
|
||||||
|
freezer = filepath.Join(dbdir, "ancient")
|
||||||
|
case !filepath.IsAbs(freezer):
|
||||||
|
freezer = config.Node.ResolvePath(freezer)
|
||||||
|
}
|
||||||
|
if common.FileExist(freezer) {
|
||||||
|
dbdirs = append(dbdirs, freezer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := len(dbdirs) - 1; i >= 0; i-- {
|
||||||
// Confirm removal and execute
|
// Confirm removal and execute
|
||||||
fmt.Println(dbdir)
|
fmt.Println(dbdirs[i])
|
||||||
confirm, err := console.Stdin.PromptConfirm("Remove this database?")
|
confirm, err := console.Stdin.PromptConfirm("Remove this database?")
|
||||||
switch {
|
switch {
|
||||||
case err != nil:
|
case err != nil:
|
||||||
@ -444,10 +497,11 @@ func removeDB(ctx *cli.Context) error {
|
|||||||
logger.Warn("Database deletion aborted")
|
logger.Warn("Database deletion aborted")
|
||||||
default:
|
default:
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
os.RemoveAll(dbdir)
|
os.RemoveAll(dbdirs[i])
|
||||||
logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
|
logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -479,8 +533,140 @@ func dump(ctx *cli.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func migrateAncient(ctx *cli.Context) error {
|
||||||
|
node, config := makeConfigNode(ctx)
|
||||||
|
defer node.Close()
|
||||||
|
|
||||||
|
dbdir := config.Node.ResolvePath("chaindata")
|
||||||
|
kvdb, err := rawdb.NewLevelDBDatabase(dbdir, 128, 1024, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer kvdb.Close()
|
||||||
|
|
||||||
|
freezer := config.Eth.DatabaseFreezer
|
||||||
|
switch {
|
||||||
|
case freezer == "":
|
||||||
|
freezer = filepath.Join(dbdir, "ancient")
|
||||||
|
case !filepath.IsAbs(freezer):
|
||||||
|
freezer = config.Node.ResolvePath(freezer)
|
||||||
|
}
|
||||||
|
stored := rawdb.ReadAncientPath(kvdb)
|
||||||
|
if stored != freezer && stored != "" {
|
||||||
|
confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Are you sure to migrate ancient database from %s to %s?", stored, freezer))
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
utils.Fatalf("%v", err)
|
||||||
|
case !confirm:
|
||||||
|
log.Warn("Ancient database migration aborted")
|
||||||
|
default:
|
||||||
|
if err := rename(stored, freezer); err != nil {
|
||||||
|
// Renaming a file can fail if the source and destination
|
||||||
|
// are on different file systems.
|
||||||
|
if err := moveAncient(stored, freezer); err != nil {
|
||||||
|
utils.Fatalf("Migrate ancient database failed, %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rawdb.WriteAncientPath(kvdb, freezer)
|
||||||
|
log.Info("Ancient database successfully migrated")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func inspect(ctx *cli.Context) error {
|
||||||
|
node, _ := makeConfigNode(ctx)
|
||||||
|
defer node.Close()
|
||||||
|
|
||||||
|
_, chainDb := utils.MakeChain(ctx, node)
|
||||||
|
defer chainDb.Close()
|
||||||
|
|
||||||
|
return rawdb.InspectDatabase(chainDb)
|
||||||
|
}
|
||||||
|
|
||||||
// hashish returns true for strings that look like hashes.
|
// hashish returns true for strings that look like hashes.
|
||||||
func hashish(x string) bool {
|
func hashish(x string) bool {
|
||||||
_, err := strconv.Atoi(x)
|
_, err := strconv.Atoi(x)
|
||||||
return err != nil
|
return err != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// copyFileSynced copies data from source file to destination
|
||||||
|
// and synces the dest file forcibly.
|
||||||
|
func copyFileSynced(src string, dest string, info os.FileInfo) error {
|
||||||
|
srcf, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer srcf.Close()
|
||||||
|
|
||||||
|
destf, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, info.Mode().Perm())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// The maximum size of ancient file is 2GB, 4MB buffer is suitable here.
|
||||||
|
buff := make([]byte, 4*1024*1024)
|
||||||
|
for {
|
||||||
|
rn, err := srcf.Read(buff)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if rn == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if wn, err := destf.Write(buff[:rn]); err != nil || wn != rn {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err1 := destf.Sync(); err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
if err1 := destf.Close(); err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyDirSynced recursively copies files under the specified dir
|
||||||
|
// to dest and synces the dest dir forcibly.
|
||||||
|
func copyDirSynced(src string, dest string, info os.FileInfo) error {
|
||||||
|
if err := os.MkdirAll(dest, os.ModePerm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer os.Chmod(dest, info.Mode())
|
||||||
|
|
||||||
|
objects, err := ioutil.ReadDir(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, obj := range objects {
|
||||||
|
// All files in ancient database should be flatten files.
|
||||||
|
if !obj.Mode().IsRegular() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
subsrc, subdest := filepath.Join(src, obj.Name()), filepath.Join(dest, obj.Name())
|
||||||
|
if err := copyFileSynced(subsrc, subdest, obj); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return syncDir(dest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// moveAncient migrates ancient database from source to destination.
|
||||||
|
func moveAncient(src string, dest string) error {
|
||||||
|
srcinfo, err := os.Stat(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !srcinfo.IsDir() {
|
||||||
|
return errors.New("ancient directory expected")
|
||||||
|
}
|
||||||
|
if destinfo, err := os.Lstat(dest); !os.IsNotExist(err) {
|
||||||
|
if destinfo.Mode()&os.ModeSymlink != 0 {
|
||||||
|
return errors.New("symbolic link datadir is not supported")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := copyDirSynced(src, dest, srcinfo); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.RemoveAll(src)
|
||||||
|
}
|
||||||
|
@ -204,6 +204,8 @@ func init() {
|
|||||||
copydbCommand,
|
copydbCommand,
|
||||||
removedbCommand,
|
removedbCommand,
|
||||||
dumpCommand,
|
dumpCommand,
|
||||||
|
migrateAncientCommand,
|
||||||
|
inspectCommand,
|
||||||
// See accountcmd.go:
|
// See accountcmd.go:
|
||||||
accountCommand,
|
accountCommand,
|
||||||
walletCommand,
|
walletCommand,
|
||||||
|
51
cmd/geth/os_unix.go
Normal file
51
cmd/geth/os_unix.go
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license.
|
||||||
|
//
|
||||||
|
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func rename(oldpath, newpath string) error {
|
||||||
|
return os.Rename(oldpath, newpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isErrInvalid(err error) bool {
|
||||||
|
if err == os.ErrInvalid {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Go < 1.8
|
||||||
|
if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Go >= 1.8 returns *os.PathError instead
|
||||||
|
if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func syncDir(name string) error {
|
||||||
|
// As per fsync manpage, Linux seems to expect fsync on directory, however
|
||||||
|
// some system don't support this, so we will ignore syscall.EINVAL.
|
||||||
|
//
|
||||||
|
// From fsync(2):
|
||||||
|
// Calling fsync() does not necessarily ensure that the entry in the
|
||||||
|
// directory containing the file has also reached disk. For that an
|
||||||
|
// explicit fsync() on a file descriptor for the directory is also needed.
|
||||||
|
f, err := os.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
if err := f.Sync(); err != nil && !isErrInvalid(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
43
cmd/geth/os_windows.go
Normal file
43
cmd/geth/os_windows.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
procMoveFileExW = modkernel32.NewProc("MoveFileExW")
|
||||||
|
)
|
||||||
|
|
||||||
|
const _MOVEFILE_REPLACE_EXISTING = 1
|
||||||
|
|
||||||
|
func moveFileEx(from *uint16, to *uint16, flags uint32) error {
|
||||||
|
r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
|
||||||
|
if r1 == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
return error(e1)
|
||||||
|
}
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func rename(oldpath, newpath string) error {
|
||||||
|
from, err := syscall.UTF16PtrFromString(oldpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
to, err := syscall.UTF16PtrFromString(newpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING)
|
||||||
|
}
|
||||||
|
|
||||||
|
func syncDir(name string) error { return nil }
|
@ -302,6 +302,8 @@ func ExportPreimages(db ethdb.Database, fn string) error {
|
|||||||
}
|
}
|
||||||
// Iterate over the preimages and export them
|
// Iterate over the preimages and export them
|
||||||
it := db.NewIteratorWithPrefix([]byte("secure-key-"))
|
it := db.NewIteratorWithPrefix([]byte("secure-key-"))
|
||||||
|
defer it.Release()
|
||||||
|
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
if err := rlp.Encode(writer, it.Value()); err != nil {
|
if err := rlp.Encode(writer, it.Value()); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1573,7 +1573,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
|
|||||||
if ctx.GlobalString(SyncModeFlag.Name) == "light" {
|
if ctx.GlobalString(SyncModeFlag.Name) == "light" {
|
||||||
name = "lightchaindata"
|
name = "lightchaindata"
|
||||||
}
|
}
|
||||||
chainDb, err := stack.OpenDatabaseWithFreezer(name, cache, handles, "", "")
|
chainDb, err := stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("Could not open database: %v", err)
|
Fatalf("Could not open database: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,11 @@ type StorageSize float64
|
|||||||
|
|
||||||
// String implements the stringer interface.
|
// String implements the stringer interface.
|
||||||
func (s StorageSize) String() string {
|
func (s StorageSize) String() string {
|
||||||
if s > 1048576 {
|
if s > 1099511627776 {
|
||||||
|
return fmt.Sprintf("%.2f TiB", s/1099511627776)
|
||||||
|
} else if s > 1073741824 {
|
||||||
|
return fmt.Sprintf("%.2f GiB", s/1073741824)
|
||||||
|
} else if s > 1048576 {
|
||||||
return fmt.Sprintf("%.2f MiB", s/1048576)
|
return fmt.Sprintf("%.2f MiB", s/1048576)
|
||||||
} else if s > 1024 {
|
} else if s > 1024 {
|
||||||
return fmt.Sprintf("%.2f KiB", s/1024)
|
return fmt.Sprintf("%.2f KiB", s/1024)
|
||||||
@ -38,7 +42,11 @@ func (s StorageSize) String() string {
|
|||||||
// TerminalString implements log.TerminalStringer, formatting a string for console
|
// TerminalString implements log.TerminalStringer, formatting a string for console
|
||||||
// output during logging.
|
// output during logging.
|
||||||
func (s StorageSize) TerminalString() string {
|
func (s StorageSize) TerminalString() string {
|
||||||
if s > 1048576 {
|
if s > 1099511627776 {
|
||||||
|
return fmt.Sprintf("%.2fTiB", s/1099511627776)
|
||||||
|
} else if s > 1073741824 {
|
||||||
|
return fmt.Sprintf("%.2fGiB", s/1073741824)
|
||||||
|
} else if s > 1048576 {
|
||||||
return fmt.Sprintf("%.2fMiB", s/1048576)
|
return fmt.Sprintf("%.2fMiB", s/1048576)
|
||||||
} else if s > 1024 {
|
} else if s > 1024 {
|
||||||
return fmt.Sprintf("%.2fKiB", s/1024)
|
return fmt.Sprintf("%.2fKiB", s/1024)
|
||||||
|
@ -93,7 +93,10 @@ const (
|
|||||||
// - Version 6
|
// - Version 6
|
||||||
// The following incompatible database changes were added:
|
// The following incompatible database changes were added:
|
||||||
// * Transaction lookup information stores the corresponding block number instead of block hash
|
// * Transaction lookup information stores the corresponding block number instead of block hash
|
||||||
BlockChainVersion uint64 = 6
|
// - Version 7
|
||||||
|
// The following incompatible database changes were added:
|
||||||
|
// * Use freezer as the ancient database to maintain all ancient data
|
||||||
|
BlockChainVersion uint64 = 7
|
||||||
)
|
)
|
||||||
|
|
||||||
// CacheConfig contains the configuration values for the trie caching/pruning
|
// CacheConfig contains the configuration values for the trie caching/pruning
|
||||||
@ -215,10 +218,35 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
|||||||
if bc.genesisBlock == nil {
|
if bc.genesisBlock == nil {
|
||||||
return nil, ErrNoGenesis
|
return nil, ErrNoGenesis
|
||||||
}
|
}
|
||||||
|
// Initialize the chain with ancient data if it isn't empty.
|
||||||
|
if bc.empty() {
|
||||||
|
if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
|
||||||
|
for i := uint64(0); i < frozen; i++ {
|
||||||
|
// Inject hash<->number mapping.
|
||||||
|
hash := rawdb.ReadCanonicalHash(bc.db, i)
|
||||||
|
if hash == (common.Hash{}) {
|
||||||
|
return nil, errors.New("broken ancient database")
|
||||||
|
}
|
||||||
|
rawdb.WriteHeaderNumber(bc.db, hash, i)
|
||||||
|
|
||||||
|
// Inject txlookup indexes.
|
||||||
|
block := rawdb.ReadBlock(bc.db, hash, i)
|
||||||
|
if block == nil {
|
||||||
|
return nil, errors.New("broken ancient database")
|
||||||
|
}
|
||||||
|
rawdb.WriteTxLookupEntries(bc.db, block)
|
||||||
|
}
|
||||||
|
hash := rawdb.ReadCanonicalHash(bc.db, frozen-1)
|
||||||
|
rawdb.WriteHeadHeaderHash(bc.db, hash)
|
||||||
|
rawdb.WriteHeadFastBlockHash(bc.db, hash)
|
||||||
|
|
||||||
|
log.Info("Initialized chain with ancients", "number", frozen-1, "hash", hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
if err := bc.loadLastState(); err != nil {
|
if err := bc.loadLastState(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if frozen, err := bc.db.Ancients(); err == nil && frozen >= 1 {
|
if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
|
||||||
var (
|
var (
|
||||||
needRewind bool
|
needRewind bool
|
||||||
low uint64
|
low uint64
|
||||||
@ -278,6 +306,20 @@ func (bc *BlockChain) GetVMConfig() *vm.Config {
|
|||||||
return &bc.vmConfig
|
return &bc.vmConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// empty returns an indicator whether the blockchain is empty.
|
||||||
|
// Note, it's a special case that we connect a non-empty ancient
|
||||||
|
// database with an empty node, so that we can plugin the ancient
|
||||||
|
// into node seamlessly.
|
||||||
|
func (bc *BlockChain) empty() bool {
|
||||||
|
genesis := bc.genesisBlock.Hash()
|
||||||
|
for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
|
||||||
|
if hash != genesis {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// loadLastState loads the last known chain state from the database. This method
|
// loadLastState loads the last known chain state from the database. This method
|
||||||
// assumes that the chain manager mutex is held.
|
// assumes that the chain manager mutex is held.
|
||||||
func (bc *BlockChain) loadLastState() error {
|
func (bc *BlockChain) loadLastState() error {
|
||||||
@ -383,7 +425,9 @@ func (bc *BlockChain) SetHead(head uint64) error {
|
|||||||
if num+1 <= frozen {
|
if num+1 <= frozen {
|
||||||
// Truncate all relative data(header, total difficulty, body, receipt
|
// Truncate all relative data(header, total difficulty, body, receipt
|
||||||
// and canonical hash) from ancient store.
|
// and canonical hash) from ancient store.
|
||||||
bc.db.TruncateAncients(num + 1)
|
if err := bc.db.TruncateAncients(num + 1); err != nil {
|
||||||
|
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Remove the hash <-> number mapping from the active store.
|
// Remove the hash <-> number mapping from the active store.
|
||||||
rawdb.DeleteHeaderNumber(db, hash)
|
rawdb.DeleteHeaderNumber(db, hash)
|
||||||
@ -948,6 +992,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
var deleted types.Blocks
|
||||||
for i, block := range blockChain {
|
for i, block := range blockChain {
|
||||||
// Short circuit insertion if shutting down or processing failed
|
// Short circuit insertion if shutting down or processing failed
|
||||||
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
|
||||||
@ -961,16 +1006,38 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
if !bc.HasHeader(block.Hash(), block.NumberU64()) {
|
if !bc.HasHeader(block.Hash(), block.NumberU64()) {
|
||||||
return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
|
return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
|
||||||
}
|
}
|
||||||
// Compute all the non-consensus fields of the receipts
|
var (
|
||||||
if err := receiptChain[i].DeriveFields(bc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()); err != nil {
|
start = time.Now()
|
||||||
return i, fmt.Errorf("failed to derive receipts data: %v", err)
|
logged = time.Now()
|
||||||
|
count int
|
||||||
|
)
|
||||||
|
// Migrate all ancient blocks. This can happen if someone upgrades from Geth
|
||||||
|
// 1.8.x to 1.9.x mid-fast-sync. Perhaps we can get rid of this path in the
|
||||||
|
// long term.
|
||||||
|
for {
|
||||||
|
// We can ignore the error here since light client won't hit this code path.
|
||||||
|
frozen, _ := bc.db.Ancients()
|
||||||
|
if frozen >= block.NumberU64() {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
// Initialize freezer with genesis block first
|
h := rawdb.ReadCanonicalHash(bc.db, frozen)
|
||||||
if frozen, err := bc.db.Ancients(); err == nil && frozen == 0 && block.NumberU64() == 1 {
|
b := rawdb.ReadBlock(bc.db, h, frozen)
|
||||||
genesisBlock := rawdb.ReadBlock(bc.db, rawdb.ReadCanonicalHash(bc.db, 0), 0)
|
size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, frozen, bc.chainConfig), rawdb.ReadTd(bc.db, h, frozen))
|
||||||
size += rawdb.WriteAncientBlock(bc.db, genesisBlock, nil, genesisBlock.Difficulty())
|
count += 1
|
||||||
|
|
||||||
|
// Always keep genesis block in active database.
|
||||||
|
if b.NumberU64() != 0 {
|
||||||
|
deleted = append(deleted, b)
|
||||||
}
|
}
|
||||||
// Flush data into ancient store.
|
if time.Since(logged) > 8*time.Second {
|
||||||
|
log.Info("Migrating ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
logged = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if count > 0 {
|
||||||
|
log.Info("Migrated ancient blocks", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
}
|
||||||
|
// Flush data into ancient database.
|
||||||
size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
|
size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
|
||||||
rawdb.WriteTxLookupEntries(batch, block)
|
rawdb.WriteTxLookupEntries(batch, block)
|
||||||
|
|
||||||
@ -992,15 +1059,8 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
}
|
}
|
||||||
previous = nil // disable rollback explicitly
|
previous = nil // disable rollback explicitly
|
||||||
|
|
||||||
// Remove the ancient data from the active store
|
|
||||||
cleanGenesis := len(blockChain) > 0 && blockChain[0].NumberU64() == 1
|
|
||||||
if cleanGenesis {
|
|
||||||
// Migrate genesis block to ancient store too.
|
|
||||||
rawdb.DeleteBlockWithoutNumber(batch, rawdb.ReadCanonicalHash(bc.db, 0), 0)
|
|
||||||
rawdb.DeleteCanonicalHash(batch, 0)
|
|
||||||
}
|
|
||||||
// Wipe out canonical block data.
|
// Wipe out canonical block data.
|
||||||
for _, block := range blockChain {
|
for _, block := range append(deleted, blockChain...) {
|
||||||
rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
|
rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
|
||||||
rawdb.DeleteCanonicalHash(batch, block.NumberU64())
|
rawdb.DeleteCanonicalHash(batch, block.NumberU64())
|
||||||
}
|
}
|
||||||
@ -1008,8 +1068,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
batch.Reset()
|
batch.Reset()
|
||||||
|
|
||||||
// Wipe out side chain too.
|
// Wipe out side chain too.
|
||||||
for _, block := range blockChain {
|
for _, block := range append(deleted, blockChain...) {
|
||||||
for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) {
|
for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) {
|
||||||
rawdb.DeleteBlock(batch, hash, block.NumberU64())
|
rawdb.DeleteBlock(batch, hash, block.NumberU64())
|
||||||
}
|
}
|
||||||
@ -1035,10 +1096,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
stats.ignored++
|
stats.ignored++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Compute all the non-consensus fields of the receipts
|
|
||||||
if err := receiptChain[i].DeriveFields(bc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()); err != nil {
|
|
||||||
return i, fmt.Errorf("failed to derive receipts data: %v", err)
|
|
||||||
}
|
|
||||||
// Write all the data out into the database
|
// Write all the data out into the database
|
||||||
rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
|
rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
|
||||||
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
|
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
|
||||||
|
@ -716,6 +716,20 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|||||||
height := uint64(1024)
|
height := uint64(1024)
|
||||||
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil)
|
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil)
|
||||||
|
|
||||||
|
// makeDb creates a db instance for testing.
|
||||||
|
makeDb := func() (ethdb.Database, func()) {
|
||||||
|
dir, err := ioutil.TempDir("", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp freezer dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(dir)
|
||||||
|
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
|
}
|
||||||
|
gspec.MustCommit(db)
|
||||||
|
return db, func() { os.RemoveAll(dir) }
|
||||||
|
}
|
||||||
// Configure a subchain to roll back
|
// Configure a subchain to roll back
|
||||||
remove := []common.Hash{}
|
remove := []common.Hash{}
|
||||||
for _, block := range blocks[height/2:] {
|
for _, block := range blocks[height/2:] {
|
||||||
@ -734,9 +748,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Import the chain as an archive node and ensure all pointers are updated
|
// Import the chain as an archive node and ensure all pointers are updated
|
||||||
archiveDb := rawdb.NewMemoryDatabase()
|
archiveDb, delfn := makeDb()
|
||||||
gspec.MustCommit(archiveDb)
|
defer delfn()
|
||||||
|
|
||||||
archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
|
archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
|
||||||
if n, err := archive.InsertChain(blocks); err != nil {
|
if n, err := archive.InsertChain(blocks); err != nil {
|
||||||
t.Fatalf("failed to process block %d: %v", n, err)
|
t.Fatalf("failed to process block %d: %v", n, err)
|
||||||
@ -748,8 +761,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|||||||
assert(t, "archive", archive, height/2, height/2, height/2)
|
assert(t, "archive", archive, height/2, height/2, height/2)
|
||||||
|
|
||||||
// Import the chain as a non-archive node and ensure all pointers are updated
|
// Import the chain as a non-archive node and ensure all pointers are updated
|
||||||
fastDb := rawdb.NewMemoryDatabase()
|
fastDb, delfn := makeDb()
|
||||||
gspec.MustCommit(fastDb)
|
defer delfn()
|
||||||
fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
|
fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
|
||||||
defer fast.Stop()
|
defer fast.Stop()
|
||||||
|
|
||||||
@ -768,16 +781,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|||||||
assert(t, "fast", fast, height/2, height/2, 0)
|
assert(t, "fast", fast, height/2, height/2, 0)
|
||||||
|
|
||||||
// Import the chain as a ancient-first node and ensure all pointers are updated
|
// Import the chain as a ancient-first node and ensure all pointers are updated
|
||||||
frdir, err := ioutil.TempDir("", "")
|
ancientDb, delfn := makeDb()
|
||||||
if err != nil {
|
defer delfn()
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(frdir)
|
|
||||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
||||||
}
|
|
||||||
gspec.MustCommit(ancientDb)
|
|
||||||
ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
|
ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
|
||||||
defer ancient.Stop()
|
defer ancient.Stop()
|
||||||
|
|
||||||
@ -795,9 +800,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Import the chain as a light node and ensure all pointers are updated
|
// Import the chain as a light node and ensure all pointers are updated
|
||||||
lightDb := rawdb.NewMemoryDatabase()
|
lightDb, delfn := makeDb()
|
||||||
gspec.MustCommit(lightDb)
|
defer delfn()
|
||||||
|
|
||||||
light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
|
light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
|
||||||
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
|
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
|
||||||
t.Fatalf("failed to insert header %d: %v", n, err)
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
||||||
@ -1892,10 +1896,18 @@ func testInsertKnownChainData(t *testing.T, typ string) {
|
|||||||
b.SetCoinbase(common.Address{1})
|
b.SetCoinbase(common.Address{1})
|
||||||
b.OffsetTime(-9) // A higher difficulty
|
b.OffsetTime(-9) // A higher difficulty
|
||||||
})
|
})
|
||||||
|
|
||||||
// Import the shared chain and the original canonical one
|
// Import the shared chain and the original canonical one
|
||||||
chaindb := rawdb.NewMemoryDatabase()
|
dir, err := ioutil.TempDir("", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp freezer dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(dir)
|
||||||
|
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
|
}
|
||||||
new(Genesis).MustCommit(chaindb)
|
new(Genesis).MustCommit(chaindb)
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
|
||||||
chain, err := NewBlockChain(chaindb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
|
chain, err := NewBlockChain(chaindb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1992,7 +2004,6 @@ func testInsertKnownChainData(t *testing.T, typ string) {
|
|||||||
// The head shouldn't change.
|
// The head shouldn't change.
|
||||||
asserter(t, blocks3[len(blocks3)-1])
|
asserter(t, blocks3[len(blocks3)-1])
|
||||||
|
|
||||||
if typ != "headers" {
|
|
||||||
// Rollback the heavier chain and re-insert the longer chain again
|
// Rollback the heavier chain and re-insert the longer chain again
|
||||||
for i := 0; i < len(blocks3); i++ {
|
for i := 0; i < len(blocks3); i++ {
|
||||||
rollback = append(rollback, blocks3[i].Hash())
|
rollback = append(rollback, blocks3[i].Hash())
|
||||||
@ -2004,7 +2015,6 @@ func testInsertKnownChainData(t *testing.T, typ string) {
|
|||||||
}
|
}
|
||||||
asserter(t, blocks2[len(blocks2)-1])
|
asserter(t, blocks2[len(blocks2)-1])
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// getLongAndShortChains returns two chains,
|
// getLongAndShortChains returns two chains,
|
||||||
// A is longer, B is heavier
|
// A is longer, B is heavier
|
||||||
|
@ -170,6 +170,22 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constant
|
|||||||
return genesis.Config, block.Hash(), err
|
return genesis.Config, block.Hash(), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We have the genesis block in database(perhaps in ancient database)
|
||||||
|
// but the corresponding state is missing.
|
||||||
|
header := rawdb.ReadHeader(db, stored, 0)
|
||||||
|
if _, err := state.New(header.Root, state.NewDatabaseWithCache(db, 0)); err != nil {
|
||||||
|
if genesis == nil {
|
||||||
|
genesis = DefaultGenesisBlock()
|
||||||
|
}
|
||||||
|
// Ensure the stored genesis matches with the given one.
|
||||||
|
hash := genesis.ToBlock(nil).Hash()
|
||||||
|
if hash != stored {
|
||||||
|
return genesis.Config, hash, &GenesisMismatchError{stored, hash}
|
||||||
|
}
|
||||||
|
block, err := genesis.Commit(db)
|
||||||
|
return genesis.Config, block.Hash(), err
|
||||||
|
}
|
||||||
|
|
||||||
// Check whether the genesis block is already written.
|
// Check whether the genesis block is already written.
|
||||||
if genesis != nil {
|
if genesis != nil {
|
||||||
hash := genesis.ToBlock(nil).Hash()
|
hash := genesis.ToBlock(nil).Hash()
|
||||||
@ -277,6 +293,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
|
|||||||
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
|
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
|
||||||
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
|
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
|
||||||
rawdb.WriteHeadBlockHash(db, block.Hash())
|
rawdb.WriteHeadBlockHash(db, block.Hash())
|
||||||
|
rawdb.WriteHeadFastBlockHash(db, block.Hash())
|
||||||
rawdb.WriteHeadHeaderHash(db, block.Hash())
|
rawdb.WriteHeadHeaderHash(db, block.Hash())
|
||||||
|
|
||||||
config := g.Config
|
config := g.Config
|
||||||
|
@ -274,10 +274,15 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
|
|||||||
return i, errors.New("aborted")
|
return i, errors.New("aborted")
|
||||||
}
|
}
|
||||||
// If the header's already known, skip it, otherwise store
|
// If the header's already known, skip it, otherwise store
|
||||||
if hc.HasHeader(header.Hash(), header.Number.Uint64()) {
|
hash := header.Hash()
|
||||||
|
if hc.HasHeader(hash, header.Number.Uint64()) {
|
||||||
|
externTd := hc.GetTd(hash, header.Number.Uint64())
|
||||||
|
localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64())
|
||||||
|
if externTd == nil || externTd.Cmp(localTd) <= 0 {
|
||||||
stats.ignored++
|
stats.ignored++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if err := writeHeader(header); err != nil {
|
if err := writeHeader(header); err != nil {
|
||||||
return i, err
|
return i, err
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,16 @@ func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
|
|||||||
return &number
|
return &number
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteHeaderNumber removes hash to number mapping.
|
// WriteHeaderNumber stores the hash->number mapping.
|
||||||
|
func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
||||||
|
key := headerNumberKey(hash)
|
||||||
|
enc := encodeBlockNumber(number)
|
||||||
|
if err := db.Put(key, enc); err != nil {
|
||||||
|
log.Crit("Failed to store hash to number mapping", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteHeaderNumber removes hash->number mapping.
|
||||||
func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
|
func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
|
||||||
if err := db.Delete(headerNumberKey(hash)); err != nil {
|
if err := db.Delete(headerNumberKey(hash)); err != nil {
|
||||||
log.Crit("Failed to delete hash to number mapping", "err", err)
|
log.Crit("Failed to delete hash to number mapping", "err", err)
|
||||||
@ -206,22 +215,19 @@ func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header
|
|||||||
// WriteHeader stores a block header into the database and also stores the hash-
|
// WriteHeader stores a block header into the database and also stores the hash-
|
||||||
// to-number mapping.
|
// to-number mapping.
|
||||||
func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
|
func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
|
||||||
// Write the hash -> number mapping
|
|
||||||
var (
|
var (
|
||||||
hash = header.Hash()
|
hash = header.Hash()
|
||||||
number = header.Number.Uint64()
|
number = header.Number.Uint64()
|
||||||
encoded = encodeBlockNumber(number)
|
|
||||||
)
|
)
|
||||||
key := headerNumberKey(hash)
|
// Write the hash -> number mapping
|
||||||
if err := db.Put(key, encoded); err != nil {
|
WriteHeaderNumber(db, hash, number)
|
||||||
log.Crit("Failed to store hash to number mapping", "err", err)
|
|
||||||
}
|
|
||||||
// Write the encoded header
|
// Write the encoded header
|
||||||
data, err := rlp.EncodeToBytes(header)
|
data, err := rlp.EncodeToBytes(header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Crit("Failed to RLP encode header", "err", err)
|
log.Crit("Failed to RLP encode header", "err", err)
|
||||||
}
|
}
|
||||||
key = headerKey(number, hash)
|
key := headerKey(number, hash)
|
||||||
if err := db.Put(key, data); err != nil {
|
if err := db.Put(key, data); err != nil {
|
||||||
log.Crit("Failed to store header", "err", err)
|
log.Crit("Failed to store header", "err", err)
|
||||||
}
|
}
|
||||||
|
@ -80,6 +80,20 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadAncientPath retrieves ancient database path which is recorded during the
|
||||||
|
// first node setup or forcibly changed by user.
|
||||||
|
func ReadAncientPath(db ethdb.KeyValueReader) string {
|
||||||
|
data, _ := db.Get(ancientKey)
|
||||||
|
return string(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteAncientPath writes ancient database path into the key-value database.
|
||||||
|
func WriteAncientPath(db ethdb.KeyValueWriter, path string) {
|
||||||
|
if err := db.Put(ancientKey, []byte(path)); err != nil {
|
||||||
|
log.Crit("Failed to store ancient path", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ReadPreimage retrieves a single preimage of the provided hash.
|
// ReadPreimage retrieves a single preimage of the provided hash.
|
||||||
func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
||||||
data, _ := db.Get(preimageKey(hash))
|
data, _ := db.Get(preimageKey(hash))
|
||||||
|
@ -17,11 +17,17 @@
|
|||||||
package rawdb
|
package rawdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/ethdb/leveldb"
|
"github.com/ethereum/go-ethereum/ethdb/leveldb"
|
||||||
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/olekukonko/tablewriter"
|
||||||
)
|
)
|
||||||
|
|
||||||
// freezerdb is a database wrapper that enabled freezer data retrievals.
|
// freezerdb is a database wrapper that enabled freezer data retrievals.
|
||||||
@ -66,6 +72,11 @@ func (db *nofreezedb) Ancients() (uint64, error) {
|
|||||||
return 0, errNotSupported
|
return 0, errNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AncientSize returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
|
||||||
|
return 0, errNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
// AppendAncient returns an error as we don't have a backing chain freezer.
|
// AppendAncient returns an error as we don't have a backing chain freezer.
|
||||||
func (db *nofreezedb) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
|
func (db *nofreezedb) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
|
||||||
return errNotSupported
|
return errNotSupported
|
||||||
@ -140,5 +151,128 @@ func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer
|
|||||||
kvdb.Close()
|
kvdb.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// Make sure we always use the same ancient store.
|
||||||
|
//
|
||||||
|
// | stored == nil | stored != nil
|
||||||
|
// ----------------+------------------+----------------------
|
||||||
|
// freezer == nil | non-freezer mode | ancient store missing
|
||||||
|
// freezer != nil | initialize | ensure consistency
|
||||||
|
stored := ReadAncientPath(kvdb)
|
||||||
|
if stored == "" && freezer != "" {
|
||||||
|
WriteAncientPath(kvdb, freezer)
|
||||||
|
} else if stored != freezer {
|
||||||
|
log.Warn("Ancient path mismatch", "stored", stored, "given", freezer)
|
||||||
|
log.Crit("Please use a consistent ancient path or migrate it via the command line tool `geth migrate-ancient`")
|
||||||
|
}
|
||||||
return frdb, nil
|
return frdb, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InspectDatabase traverses the entire database and checks the size
|
||||||
|
// of all different categories of data.
|
||||||
|
func InspectDatabase(db ethdb.Database) error {
|
||||||
|
it := db.NewIterator()
|
||||||
|
defer it.Release()
|
||||||
|
|
||||||
|
var (
|
||||||
|
count int64
|
||||||
|
start = time.Now()
|
||||||
|
logged = time.Now()
|
||||||
|
|
||||||
|
// Key-value store statistics
|
||||||
|
total common.StorageSize
|
||||||
|
headerSize common.StorageSize
|
||||||
|
bodySize common.StorageSize
|
||||||
|
receiptSize common.StorageSize
|
||||||
|
tdSize common.StorageSize
|
||||||
|
numHashPairing common.StorageSize
|
||||||
|
hashNumPairing common.StorageSize
|
||||||
|
trieSize common.StorageSize
|
||||||
|
txlookupSize common.StorageSize
|
||||||
|
preimageSize common.StorageSize
|
||||||
|
bloomBitsSize common.StorageSize
|
||||||
|
|
||||||
|
// Ancient store statistics
|
||||||
|
ancientHeaders common.StorageSize
|
||||||
|
ancientBodies common.StorageSize
|
||||||
|
ancientReceipts common.StorageSize
|
||||||
|
ancientHashes common.StorageSize
|
||||||
|
ancientTds common.StorageSize
|
||||||
|
|
||||||
|
// Les statistic
|
||||||
|
ChtTrieNodes common.StorageSize
|
||||||
|
BloomTrieNodes common.StorageSize
|
||||||
|
)
|
||||||
|
// Inspect key-value database first.
|
||||||
|
for it.Next() {
|
||||||
|
var (
|
||||||
|
key = it.Key()
|
||||||
|
size = common.StorageSize(len(key) + len(it.Value()))
|
||||||
|
)
|
||||||
|
total += size
|
||||||
|
switch {
|
||||||
|
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
|
||||||
|
tdSize += size
|
||||||
|
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
|
||||||
|
numHashPairing += size
|
||||||
|
case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
|
||||||
|
headerSize += size
|
||||||
|
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
|
||||||
|
hashNumPairing += size
|
||||||
|
case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
|
||||||
|
bodySize += size
|
||||||
|
case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
|
||||||
|
receiptSize += size
|
||||||
|
case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
|
||||||
|
txlookupSize += size
|
||||||
|
case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength):
|
||||||
|
preimageSize += size
|
||||||
|
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
|
||||||
|
bloomBitsSize += size
|
||||||
|
case bytes.HasPrefix(key, []byte("cht-")) && len(key) == 4+common.HashLength:
|
||||||
|
ChtTrieNodes += size
|
||||||
|
case bytes.HasPrefix(key, []byte("blt-")) && len(key) == 4+common.HashLength:
|
||||||
|
BloomTrieNodes += size
|
||||||
|
case len(key) == common.HashLength:
|
||||||
|
trieSize += size
|
||||||
|
}
|
||||||
|
count += 1
|
||||||
|
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
|
||||||
|
log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
logged = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Inspect append-only file store then.
|
||||||
|
ancients := []*common.StorageSize{&ancientHeaders, &ancientBodies, &ancientReceipts, &ancientHashes, &ancientTds}
|
||||||
|
for i, category := range []string{freezerHeaderTable, freezerBodiesTable, freezerReceiptTable, freezerHashTable, freezerDifficultyTable} {
|
||||||
|
if size, err := db.AncientSize(category); err == nil {
|
||||||
|
*ancients[i] += common.StorageSize(size)
|
||||||
|
total += common.StorageSize(size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Display the database statistic.
|
||||||
|
stats := [][]string{
|
||||||
|
{"Key-Value store", "Headers", headerSize.String()},
|
||||||
|
{"Key-Value store", "Bodies", bodySize.String()},
|
||||||
|
{"Key-Value store", "Receipts", receiptSize.String()},
|
||||||
|
{"Key-Value store", "Difficulties", tdSize.String()},
|
||||||
|
{"Key-Value store", "Block number->hash", numHashPairing.String()},
|
||||||
|
{"Key-Value store", "Block hash->number", hashNumPairing.String()},
|
||||||
|
{"Key-Value store", "Transaction index", txlookupSize.String()},
|
||||||
|
{"Key-Value store", "Bloombit index", bloomBitsSize.String()},
|
||||||
|
{"Key-Value store", "Trie nodes", trieSize.String()},
|
||||||
|
{"Key-Value store", "Trie preimages", preimageSize.String()},
|
||||||
|
{"Ancient store", "Headers", ancientHeaders.String()},
|
||||||
|
{"Ancient store", "Bodies", ancientBodies.String()},
|
||||||
|
{"Ancient store", "Receipts", ancientReceipts.String()},
|
||||||
|
{"Ancient store", "Difficulties", ancientTds.String()},
|
||||||
|
{"Ancient store", "Block number->hash", ancientHashes.String()},
|
||||||
|
{"Light client", "CHT trie nodes", ChtTrieNodes.String()},
|
||||||
|
{"Light client", "Bloom trie nodes", BloomTrieNodes.String()},
|
||||||
|
}
|
||||||
|
table := tablewriter.NewWriter(os.Stdout)
|
||||||
|
table.SetHeader([]string{"Database", "Category", "Size"})
|
||||||
|
table.SetFooter([]string{"", "Total", total.String()})
|
||||||
|
table.AppendBulk(stats)
|
||||||
|
table.Render()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
@ -39,6 +40,10 @@ var (
|
|||||||
// errOutOrderInsertion is returned if the user attempts to inject out-of-order
|
// errOutOrderInsertion is returned if the user attempts to inject out-of-order
|
||||||
// binary blobs into the freezer.
|
// binary blobs into the freezer.
|
||||||
errOutOrderInsertion = errors.New("the append operation is out-order")
|
errOutOrderInsertion = errors.New("the append operation is out-order")
|
||||||
|
|
||||||
|
// errSymlinkDatadir is returned if the ancient directory specified by user
|
||||||
|
// is a symbolic link.
|
||||||
|
errSymlinkDatadir = errors.New("symbolic link datadir is not supported")
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -78,6 +83,13 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
|
|||||||
readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
|
readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
|
||||||
writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil)
|
writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil)
|
||||||
)
|
)
|
||||||
|
// Ensure the datadir is not a symbolic link if it exists.
|
||||||
|
if info, err := os.Lstat(datadir); !os.IsNotExist(err) {
|
||||||
|
if info.Mode()&os.ModeSymlink != 0 {
|
||||||
|
log.Warn("Symbolic link ancient database is not supported", "path", datadir)
|
||||||
|
return nil, errSymlinkDatadir
|
||||||
|
}
|
||||||
|
}
|
||||||
// Leveldb uses LOCK as the filelock filename. To prevent the
|
// Leveldb uses LOCK as the filelock filename. To prevent the
|
||||||
// name collision, we use FLOCK as the lock name.
|
// name collision, we use FLOCK as the lock name.
|
||||||
lock, _, err := fileutil.Flock(filepath.Join(datadir, "FLOCK"))
|
lock, _, err := fileutil.Flock(filepath.Join(datadir, "FLOCK"))
|
||||||
@ -107,6 +119,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
|
|||||||
lock.Release()
|
lock.Release()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
log.Info("Opened ancient database", "database", datadir)
|
||||||
return freezer, nil
|
return freezer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -149,6 +162,14 @@ func (f *freezer) Ancients() (uint64, error) {
|
|||||||
return atomic.LoadUint64(&f.frozen), nil
|
return atomic.LoadUint64(&f.frozen), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AncientSize returns the ancient size of the specified category.
|
||||||
|
func (f *freezer) AncientSize(kind string) (uint64, error) {
|
||||||
|
if table := f.tables[kind]; table != nil {
|
||||||
|
return table.size()
|
||||||
|
}
|
||||||
|
return 0, errUnknownTable
|
||||||
|
}
|
||||||
|
|
||||||
// AppendAncient injects all binary blobs belong to block at the end of the
|
// AppendAncient injects all binary blobs belong to block at the end of the
|
||||||
// append-only immutable table files.
|
// append-only immutable table files.
|
||||||
//
|
//
|
||||||
|
@ -515,6 +515,19 @@ func (t *freezerTable) has(number uint64) bool {
|
|||||||
return atomic.LoadUint64(&t.items) > number
|
return atomic.LoadUint64(&t.items) > number
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// size returns the total data size in the freezer table.
|
||||||
|
func (t *freezerTable) size() (uint64, error) {
|
||||||
|
t.lock.RLock()
|
||||||
|
defer t.lock.RUnlock()
|
||||||
|
|
||||||
|
stat, err := t.index.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
total := uint64(t.maxFileSize)*uint64(t.headId-t.tailId) + uint64(t.headBytes) + uint64(stat.Size())
|
||||||
|
return total, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Sync pushes any pending data from memory out to disk. This is an expensive
|
// Sync pushes any pending data from memory out to disk. This is an expensive
|
||||||
// operation, so use it with care.
|
// operation, so use it with care.
|
||||||
func (t *freezerTable) Sync() error {
|
func (t *freezerTable) Sync() error {
|
||||||
|
@ -41,6 +41,9 @@ var (
|
|||||||
// fastTrieProgressKey tracks the number of trie entries imported during fast sync.
|
// fastTrieProgressKey tracks the number of trie entries imported during fast sync.
|
||||||
fastTrieProgressKey = []byte("TrieSync")
|
fastTrieProgressKey = []byte("TrieSync")
|
||||||
|
|
||||||
|
// ancientKey tracks the absolute path of ancient database.
|
||||||
|
ancientKey = []byte("AncientPath")
|
||||||
|
|
||||||
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
|
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
|
||||||
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
|
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
|
||||||
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
|
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
|
||||||
|
@ -68,6 +68,12 @@ func (t *table) Ancients() (uint64, error) {
|
|||||||
return t.db.Ancients()
|
return t.db.Ancients()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AncientSize is a noop passthrough that just forwards the request to the underlying
|
||||||
|
// database.
|
||||||
|
func (t *table) AncientSize(kind string) (uint64, error) {
|
||||||
|
return t.db.AncientSize(kind)
|
||||||
|
}
|
||||||
|
|
||||||
// AppendAncient is a noop passthrough that just forwards the request to the underlying
|
// AppendAncient is a noop passthrough that just forwards the request to the underlying
|
||||||
// database.
|
// database.
|
||||||
func (t *table) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
|
func (t *table) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
|
||||||
|
@ -478,21 +478,21 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
|
|||||||
}
|
}
|
||||||
if d.mode == FastSync {
|
if d.mode == FastSync {
|
||||||
// Set the ancient data limitation.
|
// Set the ancient data limitation.
|
||||||
// If we are running fast sync, all block data not greater than ancientLimit will
|
// If we are running fast sync, all block data older than ancientLimit will be
|
||||||
// be written to the ancient store. Otherwise, block data will be written to active
|
// written to the ancient store. More recent data will be written to the active
|
||||||
// database and then wait freezer to migrate.
|
// database and will wait for the freezer to migrate.
|
||||||
//
|
//
|
||||||
// If there is checkpoint available, then calculate the ancientLimit through
|
// If there is a checkpoint available, then calculate the ancientLimit through
|
||||||
// checkpoint. Otherwise calculate the ancient limit through the advertised
|
// that. Otherwise calculate the ancient limit through the advertised height
|
||||||
// height by remote peer.
|
// of the remote peer.
|
||||||
//
|
//
|
||||||
// The reason for picking checkpoint first is: there exists an attack vector
|
// The reason for picking checkpoint first is that a malicious peer can give us
|
||||||
// for height that: a malicious peer can give us a fake(very high) height,
|
// a fake (very high) height, forcing the ancient limit to also be very high.
|
||||||
// so that the ancient limit is also very high. And then the peer start to
|
// The peer would start to feed us valid blocks until head, resulting in all of
|
||||||
// feed us valid blocks until head. All of these blocks might be written into
|
// the blocks might be written into the ancient store. A following mini-reorg
|
||||||
// the ancient store, the safe region for freezer is not enough.
|
// could cause issues.
|
||||||
if d.checkpoint != 0 && d.checkpoint > MaxForkAncestry+1 {
|
if d.checkpoint != 0 && d.checkpoint > MaxForkAncestry+1 {
|
||||||
d.ancientLimit = height - MaxForkAncestry - 1
|
d.ancientLimit = d.checkpoint
|
||||||
} else if height > MaxForkAncestry+1 {
|
} else if height > MaxForkAncestry+1 {
|
||||||
d.ancientLimit = height - MaxForkAncestry - 1
|
d.ancientLimit = height - MaxForkAncestry - 1
|
||||||
}
|
}
|
||||||
|
@ -76,8 +76,11 @@ type AncientReader interface {
|
|||||||
// Ancient retrieves an ancient binary blob from the append-only immutable files.
|
// Ancient retrieves an ancient binary blob from the append-only immutable files.
|
||||||
Ancient(kind string, number uint64) ([]byte, error)
|
Ancient(kind string, number uint64) ([]byte, error)
|
||||||
|
|
||||||
// Ancients returns the ancient store length
|
// Ancients returns the ancient item numbers in the ancient store.
|
||||||
Ancients() (uint64, error)
|
Ancients() (uint64, error)
|
||||||
|
|
||||||
|
// AncientSize returns the ancient size of the specified category.
|
||||||
|
AncientSize(kind string) (uint64, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AncientWriter contains the methods required to write to immutable ancient data.
|
// AncientWriter contains the methods required to write to immutable ancient data.
|
||||||
|
121
vendor/github.com/olekukonko/tablewriter/README.md
generated
vendored
121
vendor/github.com/olekukonko/tablewriter/README.md
generated
vendored
@ -1,7 +1,9 @@
|
|||||||
ASCII Table Writer
|
ASCII Table Writer
|
||||||
=========
|
=========
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter) [![Total views](https://sourcegraph.com/api/repos/github.com/olekukonko/tablewriter/counters/views.png)](https://sourcegraph.com/github.com/olekukonko/tablewriter)
|
[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter)
|
||||||
|
[![Total views](https://img.shields.io/sourcegraph/rrc/github.com/olekukonko/tablewriter.svg)](https://sourcegraph.com/github.com/olekukonko/tablewriter)
|
||||||
|
[![Godoc](https://godoc.org/github.com/olekukonko/tablewriter?status.svg)](https://godoc.org/github.com/olekukonko/tablewriter)
|
||||||
|
|
||||||
Generate ASCII table on the fly ... Installation is simple as
|
Generate ASCII table on the fly ... Installation is simple as
|
||||||
|
|
||||||
@ -22,7 +24,8 @@ Generate ASCII table on the fly ... Installation is simple as
|
|||||||
- Enable or disable table border
|
- Enable or disable table border
|
||||||
- Set custom footer support
|
- Set custom footer support
|
||||||
- Optional identical cells merging
|
- Optional identical cells merging
|
||||||
|
- Set custom caption
|
||||||
|
- Optional reflowing of paragrpahs in multi-line cells.
|
||||||
|
|
||||||
#### Example 1 - Basic
|
#### Example 1 - Basic
|
||||||
```go
|
```go
|
||||||
@ -75,21 +78,21 @@ table.Render()
|
|||||||
```
|
```
|
||||||
|
|
||||||
DATE | DESCRIPTION | CV2 | AMOUNT
|
DATE | DESCRIPTION | CV2 | AMOUNT
|
||||||
+----------+--------------------------+-------+---------+
|
-----------+--------------------------+-------+----------
|
||||||
1/1/2014 | Domain name | 2233 | $10.98
|
1/1/2014 | Domain name | 2233 | $10.98
|
||||||
1/1/2014 | January Hosting | 2233 | $54.95
|
1/1/2014 | January Hosting | 2233 | $54.95
|
||||||
1/4/2014 | February Hosting | 2233 | $51.00
|
1/4/2014 | February Hosting | 2233 | $51.00
|
||||||
1/4/2014 | February Extra Bandwidth | 2233 | $30.00
|
1/4/2014 | February Extra Bandwidth | 2233 | $30.00
|
||||||
+----------+--------------------------+-------+---------+
|
-----------+--------------------------+-------+----------
|
||||||
TOTAL | $146 93
|
TOTAL | $146 93
|
||||||
+-------+---------+
|
--------+----------
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
#### Example 3 - CSV
|
#### Example 3 - CSV
|
||||||
```go
|
```go
|
||||||
table, _ := tablewriter.NewCSV(os.Stdout, "test_info.csv", true)
|
table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test_info.csv", true)
|
||||||
table.SetAlignment(tablewriter.ALIGN_LEFT) // Set Alignment
|
table.SetAlignment(tablewriter.ALIGN_LEFT) // Set Alignment
|
||||||
table.Render()
|
table.Render()
|
||||||
```
|
```
|
||||||
@ -107,12 +110,12 @@ table.Render()
|
|||||||
|
|
||||||
#### Example 4 - Custom Separator
|
#### Example 4 - Custom Separator
|
||||||
```go
|
```go
|
||||||
table, _ := tablewriter.NewCSV(os.Stdout, "test.csv", true)
|
table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test.csv", true)
|
||||||
table.SetRowLine(true) // Enable row line
|
table.SetRowLine(true) // Enable row line
|
||||||
|
|
||||||
// Change table lines
|
// Change table lines
|
||||||
table.SetCenterSeparator("*")
|
table.SetCenterSeparator("*")
|
||||||
table.SetColumnSeparator("‡")
|
table.SetColumnSeparator("╪")
|
||||||
table.SetRowSeparator("-")
|
table.SetRowSeparator("-")
|
||||||
|
|
||||||
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
||||||
@ -132,7 +135,7 @@ table.Render()
|
|||||||
*------------*-----------*---------*
|
*------------*-----------*---------*
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Example 5 - Markdown Format
|
#### Example 5 - Markdown Format
|
||||||
```go
|
```go
|
||||||
data := [][]string{
|
data := [][]string{
|
||||||
[]string{"1/1/2014", "Domain name", "2233", "$10.98"},
|
[]string{"1/1/2014", "Domain name", "2233", "$10.98"},
|
||||||
@ -194,11 +197,109 @@ table.Render()
|
|||||||
+----------+--------------------------+-------+---------+
|
+----------+--------------------------+-------+---------+
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### Table with color
|
||||||
|
```go
|
||||||
|
data := [][]string{
|
||||||
|
[]string{"1/1/2014", "Domain name", "2233", "$10.98"},
|
||||||
|
[]string{"1/1/2014", "January Hosting", "2233", "$54.95"},
|
||||||
|
[]string{"1/4/2014", "February Hosting", "2233", "$51.00"},
|
||||||
|
[]string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
|
||||||
|
}
|
||||||
|
|
||||||
|
table := tablewriter.NewWriter(os.Stdout)
|
||||||
|
table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
|
||||||
|
table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer
|
||||||
|
table.SetBorder(false) // Set Border to false
|
||||||
|
|
||||||
|
table.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgGreenColor},
|
||||||
|
tablewriter.Colors{tablewriter.FgHiRedColor, tablewriter.Bold, tablewriter.BgBlackColor},
|
||||||
|
tablewriter.Colors{tablewriter.BgRedColor, tablewriter.FgWhiteColor},
|
||||||
|
tablewriter.Colors{tablewriter.BgCyanColor, tablewriter.FgWhiteColor})
|
||||||
|
|
||||||
|
table.SetColumnColor(tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor},
|
||||||
|
tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiRedColor},
|
||||||
|
tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor},
|
||||||
|
tablewriter.Colors{tablewriter.Bold, tablewriter.FgBlackColor})
|
||||||
|
|
||||||
|
table.SetFooterColor(tablewriter.Colors{}, tablewriter.Colors{},
|
||||||
|
tablewriter.Colors{tablewriter.Bold},
|
||||||
|
tablewriter.Colors{tablewriter.FgHiRedColor})
|
||||||
|
|
||||||
|
table.AppendBulk(data)
|
||||||
|
table.Render()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Table with color Output
|
||||||
|
![Table with Color](https://cloud.githubusercontent.com/assets/6460392/21101956/bbc7b356-c0a1-11e6-9f36-dba694746efc.png)
|
||||||
|
|
||||||
|
#### Example 6 - Set table caption
|
||||||
|
```go
|
||||||
|
data := [][]string{
|
||||||
|
[]string{"A", "The Good", "500"},
|
||||||
|
[]string{"B", "The Very very Bad Man", "288"},
|
||||||
|
[]string{"C", "The Ugly", "120"},
|
||||||
|
[]string{"D", "The Gopher", "800"},
|
||||||
|
}
|
||||||
|
|
||||||
|
table := tablewriter.NewWriter(os.Stdout)
|
||||||
|
table.SetHeader([]string{"Name", "Sign", "Rating"})
|
||||||
|
table.SetCaption(true, "Movie ratings.")
|
||||||
|
|
||||||
|
for _, v := range data {
|
||||||
|
table.Append(v)
|
||||||
|
}
|
||||||
|
table.Render() // Send output
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: Caption text will wrap with total width of rendered table.
|
||||||
|
|
||||||
|
##### Output 6
|
||||||
|
```
|
||||||
|
+------+-----------------------+--------+
|
||||||
|
| NAME | SIGN | RATING |
|
||||||
|
+------+-----------------------+--------+
|
||||||
|
| A | The Good | 500 |
|
||||||
|
| B | The Very very Bad Man | 288 |
|
||||||
|
| C | The Ugly | 120 |
|
||||||
|
| D | The Gopher | 800 |
|
||||||
|
+------+-----------------------+--------+
|
||||||
|
Movie ratings.
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Render table into a string
|
||||||
|
|
||||||
|
Instead of rendering the table to `io.Stdout` you can also render it into a string. Go 1.10 introduced the `strings.Builder` type which implements the `io.Writer` interface and can therefore be used for this task. Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/olekukonko/tablewriter"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
tableString := &strings.Builder{}
|
||||||
|
table := tablewriter.NewWriter(tableString)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Code to fill the table
|
||||||
|
*/
|
||||||
|
|
||||||
|
table.Render()
|
||||||
|
|
||||||
|
fmt.Println(tableString.String())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
#### TODO
|
#### TODO
|
||||||
- ~~Import Directly from CSV~~ - `done`
|
- ~~Import Directly from CSV~~ - `done`
|
||||||
- ~~Support for `SetFooter`~~ - `done`
|
- ~~Support for `SetFooter`~~ - `done`
|
||||||
- ~~Support for `SetBorder`~~ - `done`
|
- ~~Support for `SetBorder`~~ - `done`
|
||||||
- ~~Support table with uneven rows~~ - `done`
|
- ~~Support table with uneven rows~~ - `done`
|
||||||
- Support custom alignment
|
- ~~Support custom alignment~~
|
||||||
- General Improvement & Optimisation
|
- General Improvement & Optimisation
|
||||||
- `NewHTML` Parse table from HTML
|
- `NewHTML` Parse table from HTML
|
||||||
|
366
vendor/github.com/olekukonko/tablewriter/table.go
generated
vendored
366
vendor/github.com/olekukonko/tablewriter/table.go
generated
vendored
@ -36,8 +36,8 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
decimal = regexp.MustCompile(`^-*\d*\.?\d*$`)
|
decimal = regexp.MustCompile(`^-?(?:\d{1,3}(?:,\d{3})*|\d+)(?:\.\d+)?$`)
|
||||||
percent = regexp.MustCompile(`^-*\d*\.?\d*$%$`)
|
percent = regexp.MustCompile(`^-?\d+\.?\d*$%$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
type Border struct {
|
type Border struct {
|
||||||
@ -53,10 +53,13 @@ type Table struct {
|
|||||||
lines [][][]string
|
lines [][][]string
|
||||||
cs map[int]int
|
cs map[int]int
|
||||||
rs map[int]int
|
rs map[int]int
|
||||||
headers []string
|
headers [][]string
|
||||||
footers []string
|
footers [][]string
|
||||||
|
caption bool
|
||||||
|
captionText string
|
||||||
autoFmt bool
|
autoFmt bool
|
||||||
autoWrap bool
|
autoWrap bool
|
||||||
|
reflowText bool
|
||||||
mW int
|
mW int
|
||||||
pCenter string
|
pCenter string
|
||||||
pRow string
|
pRow string
|
||||||
@ -72,6 +75,10 @@ type Table struct {
|
|||||||
hdrLine bool
|
hdrLine bool
|
||||||
borders Border
|
borders Border
|
||||||
colSize int
|
colSize int
|
||||||
|
headerParams []string
|
||||||
|
columnsParams []string
|
||||||
|
footerParams []string
|
||||||
|
columnsAlign []int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start New Table
|
// Start New Table
|
||||||
@ -83,10 +90,13 @@ func NewWriter(writer io.Writer) *Table {
|
|||||||
lines: [][][]string{},
|
lines: [][][]string{},
|
||||||
cs: make(map[int]int),
|
cs: make(map[int]int),
|
||||||
rs: make(map[int]int),
|
rs: make(map[int]int),
|
||||||
headers: []string{},
|
headers: [][]string{},
|
||||||
footers: []string{},
|
footers: [][]string{},
|
||||||
|
caption: false,
|
||||||
|
captionText: "Table caption.",
|
||||||
autoFmt: true,
|
autoFmt: true,
|
||||||
autoWrap: true,
|
autoWrap: true,
|
||||||
|
reflowText: true,
|
||||||
mW: MAX_ROW_WIDTH,
|
mW: MAX_ROW_WIDTH,
|
||||||
pCenter: CENTER,
|
pCenter: CENTER,
|
||||||
pRow: ROW,
|
pRow: ROW,
|
||||||
@ -100,12 +110,16 @@ func NewWriter(writer io.Writer) *Table {
|
|||||||
rowLine: false,
|
rowLine: false,
|
||||||
hdrLine: true,
|
hdrLine: true,
|
||||||
borders: Border{Left: true, Right: true, Bottom: true, Top: true},
|
borders: Border{Left: true, Right: true, Bottom: true, Top: true},
|
||||||
colSize: -1}
|
colSize: -1,
|
||||||
|
headerParams: []string{},
|
||||||
|
columnsParams: []string{},
|
||||||
|
footerParams: []string{},
|
||||||
|
columnsAlign: []int{}}
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
// Render table output
|
// Render table output
|
||||||
func (t Table) Render() {
|
func (t *Table) Render() {
|
||||||
if t.borders.Top {
|
if t.borders.Top {
|
||||||
t.printLine(true)
|
t.printLine(true)
|
||||||
}
|
}
|
||||||
@ -115,20 +129,27 @@ func (t Table) Render() {
|
|||||||
} else {
|
} else {
|
||||||
t.printRows()
|
t.printRows()
|
||||||
}
|
}
|
||||||
|
|
||||||
if !t.rowLine && t.borders.Bottom {
|
if !t.rowLine && t.borders.Bottom {
|
||||||
t.printLine(true)
|
t.printLine(true)
|
||||||
}
|
}
|
||||||
t.printFooter()
|
t.printFooter()
|
||||||
|
|
||||||
|
if t.caption {
|
||||||
|
t.printCaption()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
headerRowIdx = -1
|
||||||
|
footerRowIdx = -2
|
||||||
|
)
|
||||||
|
|
||||||
// Set table header
|
// Set table header
|
||||||
func (t *Table) SetHeader(keys []string) {
|
func (t *Table) SetHeader(keys []string) {
|
||||||
t.colSize = len(keys)
|
t.colSize = len(keys)
|
||||||
for i, v := range keys {
|
for i, v := range keys {
|
||||||
t.parseDimension(v, i, -1)
|
lines := t.parseDimension(v, i, headerRowIdx)
|
||||||
t.headers = append(t.headers, v)
|
t.headers = append(t.headers, lines)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -136,8 +157,16 @@ func (t *Table) SetHeader(keys []string) {
|
|||||||
func (t *Table) SetFooter(keys []string) {
|
func (t *Table) SetFooter(keys []string) {
|
||||||
//t.colSize = len(keys)
|
//t.colSize = len(keys)
|
||||||
for i, v := range keys {
|
for i, v := range keys {
|
||||||
t.parseDimension(v, i, -1)
|
lines := t.parseDimension(v, i, footerRowIdx)
|
||||||
t.footers = append(t.footers, v)
|
t.footers = append(t.footers, lines)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set table Caption
|
||||||
|
func (t *Table) SetCaption(caption bool, captionText ...string) {
|
||||||
|
t.caption = caption
|
||||||
|
if len(captionText) == 1 {
|
||||||
|
t.captionText = captionText[0]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,11 +180,21 @@ func (t *Table) SetAutoWrapText(auto bool) {
|
|||||||
t.autoWrap = auto
|
t.autoWrap = auto
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Turn automatic reflowing of multiline text when rewrapping. Default is on (true).
|
||||||
|
func (t *Table) SetReflowDuringAutoWrap(auto bool) {
|
||||||
|
t.reflowText = auto
|
||||||
|
}
|
||||||
|
|
||||||
// Set the Default column width
|
// Set the Default column width
|
||||||
func (t *Table) SetColWidth(width int) {
|
func (t *Table) SetColWidth(width int) {
|
||||||
t.mW = width
|
t.mW = width
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the minimal width for a column
|
||||||
|
func (t *Table) SetColMinWidth(column int, width int) {
|
||||||
|
t.cs[column] = width
|
||||||
|
}
|
||||||
|
|
||||||
// Set the Column Separator
|
// Set the Column Separator
|
||||||
func (t *Table) SetColumnSeparator(sep string) {
|
func (t *Table) SetColumnSeparator(sep string) {
|
||||||
t.pColumn = sep
|
t.pColumn = sep
|
||||||
@ -186,6 +225,22 @@ func (t *Table) SetAlignment(align int) {
|
|||||||
t.align = align
|
t.align = align
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Table) SetColumnAlignment(keys []int) {
|
||||||
|
for _, v := range keys {
|
||||||
|
switch v {
|
||||||
|
case ALIGN_CENTER:
|
||||||
|
break
|
||||||
|
case ALIGN_LEFT:
|
||||||
|
break
|
||||||
|
case ALIGN_RIGHT:
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
v = ALIGN_DEFAULT
|
||||||
|
}
|
||||||
|
t.columnsAlign = append(t.columnsAlign, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Set New Line
|
// Set New Line
|
||||||
func (t *Table) SetNewLine(nl string) {
|
func (t *Table) SetNewLine(nl string) {
|
||||||
t.newLine = nl
|
t.newLine = nl
|
||||||
@ -249,16 +304,44 @@ func (t *Table) AppendBulk(rows [][]string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NumLines to get the number of lines
|
||||||
|
func (t *Table) NumLines() int {
|
||||||
|
return len(t.lines)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear rows
|
||||||
|
func (t *Table) ClearRows() {
|
||||||
|
t.lines = [][][]string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear footer
|
||||||
|
func (t *Table) ClearFooter() {
|
||||||
|
t.footers = [][]string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Center based on position and border.
|
||||||
|
func (t *Table) center(i int) string {
|
||||||
|
if i == -1 && !t.borders.Left {
|
||||||
|
return t.pRow
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == len(t.cs)-1 && !t.borders.Right {
|
||||||
|
return t.pRow
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.pCenter
|
||||||
|
}
|
||||||
|
|
||||||
// Print line based on row width
|
// Print line based on row width
|
||||||
func (t Table) printLine(nl bool) {
|
func (t *Table) printLine(nl bool) {
|
||||||
fmt.Fprint(t.out, t.pCenter)
|
fmt.Fprint(t.out, t.center(-1))
|
||||||
for i := 0; i < len(t.cs); i++ {
|
for i := 0; i < len(t.cs); i++ {
|
||||||
v := t.cs[i]
|
v := t.cs[i]
|
||||||
fmt.Fprintf(t.out, "%s%s%s%s",
|
fmt.Fprintf(t.out, "%s%s%s%s",
|
||||||
t.pRow,
|
t.pRow,
|
||||||
strings.Repeat(string(t.pRow), v),
|
strings.Repeat(string(t.pRow), v),
|
||||||
t.pRow,
|
t.pRow,
|
||||||
t.pCenter)
|
t.center(i))
|
||||||
}
|
}
|
||||||
if nl {
|
if nl {
|
||||||
fmt.Fprint(t.out, t.newLine)
|
fmt.Fprint(t.out, t.newLine)
|
||||||
@ -266,7 +349,7 @@ func (t Table) printLine(nl bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Print line based on row width with our without cell separator
|
// Print line based on row width with our without cell separator
|
||||||
func (t Table) printLineOptionalCellSeparators(nl bool, displayCellSeparator []bool) {
|
func (t *Table) printLineOptionalCellSeparators(nl bool, displayCellSeparator []bool) {
|
||||||
fmt.Fprint(t.out, t.pCenter)
|
fmt.Fprint(t.out, t.pCenter)
|
||||||
for i := 0; i < len(t.cs); i++ {
|
for i := 0; i < len(t.cs); i++ {
|
||||||
v := t.cs[i]
|
v := t.cs[i]
|
||||||
@ -303,43 +386,64 @@ func pad(align int) func(string, string, int) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Print heading information
|
// Print heading information
|
||||||
func (t Table) printHeading() {
|
func (t *Table) printHeading() {
|
||||||
// Check if headers is available
|
// Check if headers is available
|
||||||
if len(t.headers) < 1 {
|
if len(t.headers) < 1 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if border is set
|
|
||||||
// Replace with space if not set
|
|
||||||
fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE))
|
|
||||||
|
|
||||||
// Identify last column
|
// Identify last column
|
||||||
end := len(t.cs) - 1
|
end := len(t.cs) - 1
|
||||||
|
|
||||||
// Get pad function
|
// Get pad function
|
||||||
padFunc := pad(t.hAlign)
|
padFunc := pad(t.hAlign)
|
||||||
|
|
||||||
// Print Heading column
|
// Checking for ANSI escape sequences for header
|
||||||
for i := 0; i <= end; i++ {
|
is_esc_seq := false
|
||||||
v := t.cs[i]
|
if len(t.headerParams) > 0 {
|
||||||
h := t.headers[i]
|
is_esc_seq = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Maximum height.
|
||||||
|
max := t.rs[headerRowIdx]
|
||||||
|
|
||||||
|
// Print Heading
|
||||||
|
for x := 0; x < max; x++ {
|
||||||
|
// Check if border is set
|
||||||
|
// Replace with space if not set
|
||||||
|
fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE))
|
||||||
|
|
||||||
|
for y := 0; y <= end; y++ {
|
||||||
|
v := t.cs[y]
|
||||||
|
h := ""
|
||||||
|
if y < len(t.headers) && x < len(t.headers[y]) {
|
||||||
|
h = t.headers[y][x]
|
||||||
|
}
|
||||||
if t.autoFmt {
|
if t.autoFmt {
|
||||||
h = Title(h)
|
h = Title(h)
|
||||||
}
|
}
|
||||||
pad := ConditionString((i == end && !t.borders.Left), SPACE, t.pColumn)
|
pad := ConditionString((y == end && !t.borders.Left), SPACE, t.pColumn)
|
||||||
|
|
||||||
|
if is_esc_seq {
|
||||||
|
fmt.Fprintf(t.out, " %s %s",
|
||||||
|
format(padFunc(h, SPACE, v),
|
||||||
|
t.headerParams[y]), pad)
|
||||||
|
} else {
|
||||||
fmt.Fprintf(t.out, " %s %s",
|
fmt.Fprintf(t.out, " %s %s",
|
||||||
padFunc(h, SPACE, v),
|
padFunc(h, SPACE, v),
|
||||||
pad)
|
pad)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// Next line
|
// Next line
|
||||||
fmt.Fprint(t.out, t.newLine)
|
fmt.Fprint(t.out, t.newLine)
|
||||||
|
}
|
||||||
if t.hdrLine {
|
if t.hdrLine {
|
||||||
t.printLine(true)
|
t.printLine(true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print heading information
|
// Print heading information
|
||||||
func (t Table) printFooter() {
|
func (t *Table) printFooter() {
|
||||||
// Check if headers is available
|
// Check if headers is available
|
||||||
if len(t.footers) < 1 {
|
if len(t.footers) < 1 {
|
||||||
return
|
return
|
||||||
@ -349,9 +453,6 @@ func (t Table) printFooter() {
|
|||||||
if !t.borders.Bottom {
|
if !t.borders.Bottom {
|
||||||
t.printLine(true)
|
t.printLine(true)
|
||||||
}
|
}
|
||||||
// Check if border is set
|
|
||||||
// Replace with space if not set
|
|
||||||
fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE))
|
|
||||||
|
|
||||||
// Identify last column
|
// Identify last column
|
||||||
end := len(t.cs) - 1
|
end := len(t.cs) - 1
|
||||||
@ -359,25 +460,56 @@ func (t Table) printFooter() {
|
|||||||
// Get pad function
|
// Get pad function
|
||||||
padFunc := pad(t.fAlign)
|
padFunc := pad(t.fAlign)
|
||||||
|
|
||||||
// Print Heading column
|
// Checking for ANSI escape sequences for header
|
||||||
for i := 0; i <= end; i++ {
|
is_esc_seq := false
|
||||||
v := t.cs[i]
|
if len(t.footerParams) > 0 {
|
||||||
f := t.footers[i]
|
is_esc_seq = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Maximum height.
|
||||||
|
max := t.rs[footerRowIdx]
|
||||||
|
|
||||||
|
// Print Footer
|
||||||
|
erasePad := make([]bool, len(t.footers))
|
||||||
|
for x := 0; x < max; x++ {
|
||||||
|
// Check if border is set
|
||||||
|
// Replace with space if not set
|
||||||
|
fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE))
|
||||||
|
|
||||||
|
for y := 0; y <= end; y++ {
|
||||||
|
v := t.cs[y]
|
||||||
|
f := ""
|
||||||
|
if y < len(t.footers) && x < len(t.footers[y]) {
|
||||||
|
f = t.footers[y][x]
|
||||||
|
}
|
||||||
if t.autoFmt {
|
if t.autoFmt {
|
||||||
f = Title(f)
|
f = Title(f)
|
||||||
}
|
}
|
||||||
pad := ConditionString((i == end && !t.borders.Top), SPACE, t.pColumn)
|
pad := ConditionString((y == end && !t.borders.Top), SPACE, t.pColumn)
|
||||||
|
|
||||||
if len(t.footers[i]) == 0 {
|
if erasePad[y] || (x == 0 && len(f) == 0) {
|
||||||
pad = SPACE
|
pad = SPACE
|
||||||
|
erasePad[y] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if is_esc_seq {
|
||||||
|
fmt.Fprintf(t.out, " %s %s",
|
||||||
|
format(padFunc(f, SPACE, v),
|
||||||
|
t.footerParams[y]), pad)
|
||||||
|
} else {
|
||||||
fmt.Fprintf(t.out, " %s %s",
|
fmt.Fprintf(t.out, " %s %s",
|
||||||
padFunc(f, SPACE, v),
|
padFunc(f, SPACE, v),
|
||||||
pad)
|
pad)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//fmt.Fprintf(t.out, " %s %s",
|
||||||
|
// padFunc(f, SPACE, v),
|
||||||
|
// pad)
|
||||||
|
}
|
||||||
// Next line
|
// Next line
|
||||||
fmt.Fprint(t.out, t.newLine)
|
fmt.Fprint(t.out, t.newLine)
|
||||||
//t.printLine(true)
|
//t.printLine(true)
|
||||||
|
}
|
||||||
|
|
||||||
hasPrinted := false
|
hasPrinted := false
|
||||||
|
|
||||||
@ -385,7 +517,7 @@ func (t Table) printFooter() {
|
|||||||
v := t.cs[i]
|
v := t.cs[i]
|
||||||
pad := t.pRow
|
pad := t.pRow
|
||||||
center := t.pCenter
|
center := t.pCenter
|
||||||
length := len(t.footers[i])
|
length := len(t.footers[i][0])
|
||||||
|
|
||||||
if length > 0 {
|
if length > 0 {
|
||||||
hasPrinted = true
|
hasPrinted = true
|
||||||
@ -398,6 +530,9 @@ func (t Table) printFooter() {
|
|||||||
|
|
||||||
// Print first junction
|
// Print first junction
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
|
if length > 0 && !t.borders.Left {
|
||||||
|
center = t.pRow
|
||||||
|
}
|
||||||
fmt.Fprint(t.out, center)
|
fmt.Fprint(t.out, center)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -405,18 +540,29 @@ func (t Table) printFooter() {
|
|||||||
if length == 0 {
|
if length == 0 {
|
||||||
pad = SPACE
|
pad = SPACE
|
||||||
}
|
}
|
||||||
// Ignore left space of it has printed before
|
// Ignore left space as it has printed before
|
||||||
if hasPrinted || t.borders.Left {
|
if hasPrinted || t.borders.Left {
|
||||||
pad = t.pRow
|
pad = t.pRow
|
||||||
center = t.pCenter
|
center = t.pCenter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Change Center end position
|
||||||
|
if center != SPACE {
|
||||||
|
if i == end && !t.borders.Right {
|
||||||
|
center = t.pRow
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Change Center start position
|
// Change Center start position
|
||||||
if center == SPACE {
|
if center == SPACE {
|
||||||
if i < end && len(t.footers[i+1]) != 0 {
|
if i < end && len(t.footers[i+1][0]) != 0 {
|
||||||
|
if !t.borders.Left {
|
||||||
|
center = t.pRow
|
||||||
|
} else {
|
||||||
center = t.pCenter
|
center = t.pCenter
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Print the footer
|
// Print the footer
|
||||||
fmt.Fprintf(t.out, "%s%s%s%s",
|
fmt.Fprintf(t.out, "%s%s%s%s",
|
||||||
@ -428,22 +574,53 @@ func (t Table) printFooter() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprint(t.out, t.newLine)
|
fmt.Fprint(t.out, t.newLine)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print caption text
|
||||||
|
func (t Table) printCaption() {
|
||||||
|
width := t.getTableWidth()
|
||||||
|
paragraph, _ := WrapString(t.captionText, width)
|
||||||
|
for linecount := 0; linecount < len(paragraph); linecount++ {
|
||||||
|
fmt.Fprintln(t.out, paragraph[linecount])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the total number of characters in a row
|
||||||
|
func (t Table) getTableWidth() int {
|
||||||
|
var chars int
|
||||||
|
for _, v := range t.cs {
|
||||||
|
chars += v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add chars, spaces, seperators to calculate the total width of the table.
|
||||||
|
// ncols := t.colSize
|
||||||
|
// spaces := ncols * 2
|
||||||
|
// seps := ncols + 1
|
||||||
|
|
||||||
|
return (chars + (3 * t.colSize) + 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t Table) printRows() {
|
func (t Table) printRows() {
|
||||||
for i, lines := range t.lines {
|
for i, lines := range t.lines {
|
||||||
t.printRow(lines, i)
|
t.printRow(lines, i)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Table) fillAlignment(num int) {
|
||||||
|
if len(t.columnsAlign) < num {
|
||||||
|
t.columnsAlign = make([]int, num)
|
||||||
|
for i := range t.columnsAlign {
|
||||||
|
t.columnsAlign[i] = t.align
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print Row Information
|
// Print Row Information
|
||||||
// Adjust column alignment based on type
|
// Adjust column alignment based on type
|
||||||
|
|
||||||
func (t Table) printRow(columns [][]string, colKey int) {
|
func (t *Table) printRow(columns [][]string, rowIdx int) {
|
||||||
// Get Maximum Height
|
// Get Maximum Height
|
||||||
max := t.rs[colKey]
|
max := t.rs[rowIdx]
|
||||||
total := len(columns)
|
total := len(columns)
|
||||||
|
|
||||||
// TODO Fix uneven col size
|
// TODO Fix uneven col size
|
||||||
@ -455,9 +632,15 @@ func (t Table) printRow(columns [][]string, colKey int) {
|
|||||||
//}
|
//}
|
||||||
|
|
||||||
// Pad Each Height
|
// Pad Each Height
|
||||||
// pads := []int{}
|
|
||||||
pads := []int{}
|
pads := []int{}
|
||||||
|
|
||||||
|
// Checking for ANSI escape sequences for columns
|
||||||
|
is_esc_seq := false
|
||||||
|
if len(t.columnsParams) > 0 {
|
||||||
|
is_esc_seq = true
|
||||||
|
}
|
||||||
|
t.fillAlignment(total)
|
||||||
|
|
||||||
for i, line := range columns {
|
for i, line := range columns {
|
||||||
length := len(line)
|
length := len(line)
|
||||||
pad := max - length
|
pad := max - length
|
||||||
@ -476,9 +659,14 @@ func (t Table) printRow(columns [][]string, colKey int) {
|
|||||||
fmt.Fprintf(t.out, SPACE)
|
fmt.Fprintf(t.out, SPACE)
|
||||||
str := columns[y][x]
|
str := columns[y][x]
|
||||||
|
|
||||||
|
// Embedding escape sequence with column value
|
||||||
|
if is_esc_seq {
|
||||||
|
str = format(str, t.columnsParams[y])
|
||||||
|
}
|
||||||
|
|
||||||
// This would print alignment
|
// This would print alignment
|
||||||
// Default alignment would use multiple configuration
|
// Default alignment would use multiple configuration
|
||||||
switch t.align {
|
switch t.columnsAlign[y] {
|
||||||
case ALIGN_CENTER: //
|
case ALIGN_CENTER: //
|
||||||
fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y]))
|
fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y]))
|
||||||
case ALIGN_RIGHT:
|
case ALIGN_RIGHT:
|
||||||
@ -514,7 +702,7 @@ func (t Table) printRow(columns [][]string, colKey int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Print the rows of the table and merge the cells that are identical
|
// Print the rows of the table and merge the cells that are identical
|
||||||
func (t Table) printRowsMergeCells() {
|
func (t *Table) printRowsMergeCells() {
|
||||||
var previousLine []string
|
var previousLine []string
|
||||||
var displayCellBorder []bool
|
var displayCellBorder []bool
|
||||||
var tmpWriter bytes.Buffer
|
var tmpWriter bytes.Buffer
|
||||||
@ -537,14 +725,19 @@ func (t Table) printRowsMergeCells() {
|
|||||||
// Print Row Information to a writer and merge identical cells.
|
// Print Row Information to a writer and merge identical cells.
|
||||||
// Adjust column alignment based on type
|
// Adjust column alignment based on type
|
||||||
|
|
||||||
func (t Table) printRowMergeCells(writer io.Writer, columns [][]string, colKey int, previousLine []string) ([]string, []bool) {
|
func (t *Table) printRowMergeCells(writer io.Writer, columns [][]string, rowIdx int, previousLine []string) ([]string, []bool) {
|
||||||
// Get Maximum Height
|
// Get Maximum Height
|
||||||
max := t.rs[colKey]
|
max := t.rs[rowIdx]
|
||||||
total := len(columns)
|
total := len(columns)
|
||||||
|
|
||||||
// Pad Each Height
|
// Pad Each Height
|
||||||
pads := []int{}
|
pads := []int{}
|
||||||
|
|
||||||
|
// Checking for ANSI escape sequences for columns
|
||||||
|
is_esc_seq := false
|
||||||
|
if len(t.columnsParams) > 0 {
|
||||||
|
is_esc_seq = true
|
||||||
|
}
|
||||||
for i, line := range columns {
|
for i, line := range columns {
|
||||||
length := len(line)
|
length := len(line)
|
||||||
pad := max - length
|
pad := max - length
|
||||||
@ -555,6 +748,7 @@ func (t Table) printRowMergeCells(writer io.Writer, columns [][]string, colKey i
|
|||||||
}
|
}
|
||||||
|
|
||||||
var displayCellBorder []bool
|
var displayCellBorder []bool
|
||||||
|
t.fillAlignment(total)
|
||||||
for x := 0; x < max; x++ {
|
for x := 0; x < max; x++ {
|
||||||
for y := 0; y < total; y++ {
|
for y := 0; y < total; y++ {
|
||||||
|
|
||||||
@ -565,6 +759,11 @@ func (t Table) printRowMergeCells(writer io.Writer, columns [][]string, colKey i
|
|||||||
|
|
||||||
str := columns[y][x]
|
str := columns[y][x]
|
||||||
|
|
||||||
|
// Embedding escape sequence with column value
|
||||||
|
if is_esc_seq {
|
||||||
|
str = format(str, t.columnsParams[y])
|
||||||
|
}
|
||||||
|
|
||||||
if t.autoMergeCells {
|
if t.autoMergeCells {
|
||||||
//Store the full line to merge mutli-lines cells
|
//Store the full line to merge mutli-lines cells
|
||||||
fullLine := strings.Join(columns[y], " ")
|
fullLine := strings.Join(columns[y], " ")
|
||||||
@ -580,7 +779,7 @@ func (t Table) printRowMergeCells(writer io.Writer, columns [][]string, colKey i
|
|||||||
|
|
||||||
// This would print alignment
|
// This would print alignment
|
||||||
// Default alignment would use multiple configuration
|
// Default alignment would use multiple configuration
|
||||||
switch t.align {
|
switch t.columnsAlign[y] {
|
||||||
case ALIGN_CENTER: //
|
case ALIGN_CENTER: //
|
||||||
fmt.Fprintf(writer, "%s", Pad(str, SPACE, t.cs[y]))
|
fmt.Fprintf(writer, "%s", Pad(str, SPACE, t.cs[y]))
|
||||||
case ALIGN_RIGHT:
|
case ALIGN_RIGHT:
|
||||||
@ -614,43 +813,58 @@ func (t Table) printRowMergeCells(writer io.Writer, columns [][]string, colKey i
|
|||||||
func (t *Table) parseDimension(str string, colKey, rowKey int) []string {
|
func (t *Table) parseDimension(str string, colKey, rowKey int) []string {
|
||||||
var (
|
var (
|
||||||
raw []string
|
raw []string
|
||||||
max int
|
maxWidth int
|
||||||
)
|
)
|
||||||
w := DisplayWidth(str)
|
|
||||||
// Calculate Width
|
|
||||||
// Check if with is grater than maximum width
|
|
||||||
if w > t.mW {
|
|
||||||
w = t.mW
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if width exists
|
|
||||||
v, ok := t.cs[colKey]
|
|
||||||
if !ok || v < w || v == 0 {
|
|
||||||
t.cs[colKey] = w
|
|
||||||
}
|
|
||||||
|
|
||||||
if rowKey == -1 {
|
|
||||||
return raw
|
|
||||||
}
|
|
||||||
// Calculate Height
|
|
||||||
if t.autoWrap {
|
|
||||||
raw, _ = WrapString(str, t.cs[colKey])
|
|
||||||
} else {
|
|
||||||
raw = getLines(str)
|
raw = getLines(str)
|
||||||
}
|
maxWidth = 0
|
||||||
|
|
||||||
for _, line := range raw {
|
for _, line := range raw {
|
||||||
if w := DisplayWidth(line); w > max {
|
if w := DisplayWidth(line); w > maxWidth {
|
||||||
max = w
|
maxWidth = w
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the with is the same length as maximum word
|
// If wrapping, ensure that all paragraphs in the cell fit in the
|
||||||
// Important for cases where the width is smaller than maxu word
|
// specified width.
|
||||||
if max > t.cs[colKey] {
|
if t.autoWrap {
|
||||||
t.cs[colKey] = max
|
// If there's a maximum allowed width for wrapping, use that.
|
||||||
|
if maxWidth > t.mW {
|
||||||
|
maxWidth = t.mW
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// In the process of doing so, we need to recompute maxWidth. This
|
||||||
|
// is because perhaps a word in the cell is longer than the
|
||||||
|
// allowed maximum width in t.mW.
|
||||||
|
newMaxWidth := maxWidth
|
||||||
|
newRaw := make([]string, 0, len(raw))
|
||||||
|
|
||||||
|
if t.reflowText {
|
||||||
|
// Make a single paragraph of everything.
|
||||||
|
raw = []string{strings.Join(raw, " ")}
|
||||||
|
}
|
||||||
|
for i, para := range raw {
|
||||||
|
paraLines, _ := WrapString(para, maxWidth)
|
||||||
|
for _, line := range paraLines {
|
||||||
|
if w := DisplayWidth(line); w > newMaxWidth {
|
||||||
|
newMaxWidth = w
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i > 0 {
|
||||||
|
newRaw = append(newRaw, " ")
|
||||||
|
}
|
||||||
|
newRaw = append(newRaw, paraLines...)
|
||||||
|
}
|
||||||
|
raw = newRaw
|
||||||
|
maxWidth = newMaxWidth
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the new known maximum width.
|
||||||
|
v, ok := t.cs[colKey]
|
||||||
|
if !ok || v < maxWidth || v == 0 {
|
||||||
|
t.cs[colKey] = maxWidth
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remember the number of lines for the row printer.
|
||||||
h := len(raw)
|
h := len(raw)
|
||||||
v, ok = t.rs[rowKey]
|
v, ok = t.rs[rowKey]
|
||||||
|
|
||||||
|
134
vendor/github.com/olekukonko/tablewriter/table_with_color.go
generated
vendored
Normal file
134
vendor/github.com/olekukonko/tablewriter/table_with_color.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
package tablewriter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const ESC = "\033"
|
||||||
|
const SEP = ";"
|
||||||
|
|
||||||
|
const (
|
||||||
|
BgBlackColor int = iota + 40
|
||||||
|
BgRedColor
|
||||||
|
BgGreenColor
|
||||||
|
BgYellowColor
|
||||||
|
BgBlueColor
|
||||||
|
BgMagentaColor
|
||||||
|
BgCyanColor
|
||||||
|
BgWhiteColor
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
FgBlackColor int = iota + 30
|
||||||
|
FgRedColor
|
||||||
|
FgGreenColor
|
||||||
|
FgYellowColor
|
||||||
|
FgBlueColor
|
||||||
|
FgMagentaColor
|
||||||
|
FgCyanColor
|
||||||
|
FgWhiteColor
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
BgHiBlackColor int = iota + 100
|
||||||
|
BgHiRedColor
|
||||||
|
BgHiGreenColor
|
||||||
|
BgHiYellowColor
|
||||||
|
BgHiBlueColor
|
||||||
|
BgHiMagentaColor
|
||||||
|
BgHiCyanColor
|
||||||
|
BgHiWhiteColor
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
FgHiBlackColor int = iota + 90
|
||||||
|
FgHiRedColor
|
||||||
|
FgHiGreenColor
|
||||||
|
FgHiYellowColor
|
||||||
|
FgHiBlueColor
|
||||||
|
FgHiMagentaColor
|
||||||
|
FgHiCyanColor
|
||||||
|
FgHiWhiteColor
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
Normal = 0
|
||||||
|
Bold = 1
|
||||||
|
UnderlineSingle = 4
|
||||||
|
Italic
|
||||||
|
)
|
||||||
|
|
||||||
|
type Colors []int
|
||||||
|
|
||||||
|
func startFormat(seq string) string {
|
||||||
|
return fmt.Sprintf("%s[%sm", ESC, seq)
|
||||||
|
}
|
||||||
|
|
||||||
|
func stopFormat() string {
|
||||||
|
return fmt.Sprintf("%s[%dm", ESC, Normal)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Making the SGR (Select Graphic Rendition) sequence.
|
||||||
|
func makeSequence(codes []int) string {
|
||||||
|
codesInString := []string{}
|
||||||
|
for _, code := range codes {
|
||||||
|
codesInString = append(codesInString, strconv.Itoa(code))
|
||||||
|
}
|
||||||
|
return strings.Join(codesInString, SEP)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adding ANSI escape sequences before and after string
|
||||||
|
func format(s string, codes interface{}) string {
|
||||||
|
var seq string
|
||||||
|
|
||||||
|
switch v := codes.(type) {
|
||||||
|
|
||||||
|
case string:
|
||||||
|
seq = v
|
||||||
|
case []int:
|
||||||
|
seq = makeSequence(v)
|
||||||
|
default:
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(seq) == 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return startFormat(seq) + s + stopFormat()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adding header colors (ANSI codes)
|
||||||
|
func (t *Table) SetHeaderColor(colors ...Colors) {
|
||||||
|
if t.colSize != len(colors) {
|
||||||
|
panic("Number of header colors must be equal to number of headers.")
|
||||||
|
}
|
||||||
|
for i := 0; i < len(colors); i++ {
|
||||||
|
t.headerParams = append(t.headerParams, makeSequence(colors[i]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adding column colors (ANSI codes)
|
||||||
|
func (t *Table) SetColumnColor(colors ...Colors) {
|
||||||
|
if t.colSize != len(colors) {
|
||||||
|
panic("Number of column colors must be equal to number of headers.")
|
||||||
|
}
|
||||||
|
for i := 0; i < len(colors); i++ {
|
||||||
|
t.columnsParams = append(t.columnsParams, makeSequence(colors[i]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adding column colors (ANSI codes)
|
||||||
|
func (t *Table) SetFooterColor(colors ...Colors) {
|
||||||
|
if len(t.footers) != len(colors) {
|
||||||
|
panic("Number of footer colors must be equal to number of footer.")
|
||||||
|
}
|
||||||
|
for i := 0; i < len(colors); i++ {
|
||||||
|
t.footerParams = append(t.footerParams, makeSequence(colors[i]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Color(colors ...int) []int {
|
||||||
|
return colors
|
||||||
|
}
|
4
vendor/github.com/olekukonko/tablewriter/test.csv
generated
vendored
4
vendor/github.com/olekukonko/tablewriter/test.csv
generated
vendored
@ -1,4 +0,0 @@
|
|||||||
first_name,last_name,ssn
|
|
||||||
John,Barry,123456
|
|
||||||
Kathy,Smith,687987
|
|
||||||
Bob,McCornick,3979870
|
|
|
4
vendor/github.com/olekukonko/tablewriter/test_info.csv
generated
vendored
4
vendor/github.com/olekukonko/tablewriter/test_info.csv
generated
vendored
@ -1,4 +0,0 @@
|
|||||||
Field,Type,Null,Key,Default,Extra
|
|
||||||
user_id,smallint(5),NO,PRI,NULL,auto_increment
|
|
||||||
username,varchar(10),NO,,NULL,
|
|
||||||
password,varchar(100),NO,,NULL,
|
|
|
31
vendor/github.com/olekukonko/tablewriter/util.go
generated
vendored
31
vendor/github.com/olekukonko/tablewriter/util.go
generated
vendored
@ -30,17 +30,38 @@ func ConditionString(cond bool, valid, inValid string) string {
|
|||||||
return inValid
|
return inValid
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isNumOrSpace(r rune) bool {
|
||||||
|
return ('0' <= r && r <= '9') || r == ' '
|
||||||
|
}
|
||||||
|
|
||||||
// Format Table Header
|
// Format Table Header
|
||||||
// Replace _ , . and spaces
|
// Replace _ , . and spaces
|
||||||
func Title(name string) string {
|
func Title(name string) string {
|
||||||
name = strings.Replace(name, "_", " ", -1)
|
origLen := len(name)
|
||||||
name = strings.Replace(name, ".", " ", -1)
|
rs := []rune(name)
|
||||||
|
for i, r := range rs {
|
||||||
|
switch r {
|
||||||
|
case '_':
|
||||||
|
rs[i] = ' '
|
||||||
|
case '.':
|
||||||
|
// ignore floating number 0.0
|
||||||
|
if (i != 0 && !isNumOrSpace(rs[i-1])) || (i != len(rs)-1 && !isNumOrSpace(rs[i+1])) {
|
||||||
|
rs[i] = ' '
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
name = string(rs)
|
||||||
name = strings.TrimSpace(name)
|
name = strings.TrimSpace(name)
|
||||||
|
if len(name) == 0 && origLen > 0 {
|
||||||
|
// Keep at least one character. This is important to preserve
|
||||||
|
// empty lines in multi-line headers/footers.
|
||||||
|
name = " "
|
||||||
|
}
|
||||||
return strings.ToUpper(name)
|
return strings.ToUpper(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pad String
|
// Pad String
|
||||||
// Attempts to play string in the center
|
// Attempts to place string in the center
|
||||||
func Pad(s, pad string, width int) string {
|
func Pad(s, pad string, width int) string {
|
||||||
gap := width - DisplayWidth(s)
|
gap := width - DisplayWidth(s)
|
||||||
if gap > 0 {
|
if gap > 0 {
|
||||||
@ -52,7 +73,7 @@ func Pad(s, pad string, width int) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Pad String Right position
|
// Pad String Right position
|
||||||
// This would pace string at the left side fo the screen
|
// This would place string at the left side of the screen
|
||||||
func PadRight(s, pad string, width int) string {
|
func PadRight(s, pad string, width int) string {
|
||||||
gap := width - DisplayWidth(s)
|
gap := width - DisplayWidth(s)
|
||||||
if gap > 0 {
|
if gap > 0 {
|
||||||
@ -62,7 +83,7 @@ func PadRight(s, pad string, width int) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Pad String Left position
|
// Pad String Left position
|
||||||
// This would pace string at the right side fo the screen
|
// This would place string at the right side of the screen
|
||||||
func PadLeft(s, pad string, width int) string {
|
func PadLeft(s, pad string, width int) string {
|
||||||
gap := width - DisplayWidth(s)
|
gap := width - DisplayWidth(s)
|
||||||
if gap > 0 {
|
if gap > 0 {
|
||||||
|
16
vendor/github.com/olekukonko/tablewriter/wrap.go
generated
vendored
16
vendor/github.com/olekukonko/tablewriter/wrap.go
generated
vendored
@ -10,7 +10,8 @@ package tablewriter
|
|||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode/utf8"
|
|
||||||
|
"github.com/mattn/go-runewidth"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -27,7 +28,7 @@ func WrapString(s string, lim int) ([]string, int) {
|
|||||||
var lines []string
|
var lines []string
|
||||||
max := 0
|
max := 0
|
||||||
for _, v := range words {
|
for _, v := range words {
|
||||||
max = len(v)
|
max = runewidth.StringWidth(v)
|
||||||
if max > lim {
|
if max > lim {
|
||||||
lim = max
|
lim = max
|
||||||
}
|
}
|
||||||
@ -55,9 +56,9 @@ func WrapWords(words []string, spc, lim, pen int) [][]string {
|
|||||||
length := make([][]int, n)
|
length := make([][]int, n)
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
length[i] = make([]int, n)
|
length[i] = make([]int, n)
|
||||||
length[i][i] = utf8.RuneCountInString(words[i])
|
length[i][i] = runewidth.StringWidth(words[i])
|
||||||
for j := i + 1; j < n; j++ {
|
for j := i + 1; j < n; j++ {
|
||||||
length[i][j] = length[i][j-1] + spc + utf8.RuneCountInString(words[j])
|
length[i][j] = length[i][j-1] + spc + runewidth.StringWidth(words[j])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
nbrk := make([]int, n)
|
nbrk := make([]int, n)
|
||||||
@ -94,10 +95,5 @@ func WrapWords(words []string, spc, lim, pen int) [][]string {
|
|||||||
|
|
||||||
// getLines decomposes a multiline string into a slice of strings.
|
// getLines decomposes a multiline string into a slice of strings.
|
||||||
func getLines(s string) []string {
|
func getLines(s string) []string {
|
||||||
var lines []string
|
return strings.Split(s, nl)
|
||||||
|
|
||||||
for _, line := range strings.Split(s, nl) {
|
|
||||||
lines = append(lines, line)
|
|
||||||
}
|
|
||||||
return lines
|
|
||||||
}
|
}
|
||||||
|
6
vendor/vendor.json
vendored
6
vendor/vendor.json
vendored
@ -311,10 +311,10 @@
|
|||||||
"revisionTime": "2017-04-03T15:03:10Z"
|
"revisionTime": "2017-04-03T15:03:10Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "h+oCMj21PiQfIdBog0eyUtF1djs=",
|
"checksumSHA1": "HZJ2dhzXoMi8n+iY80A9vsnyQUk=",
|
||||||
"path": "github.com/olekukonko/tablewriter",
|
"path": "github.com/olekukonko/tablewriter",
|
||||||
"revision": "febf2d34b54a69ce7530036c7503b1c9fbfdf0bb",
|
"revision": "7e037d187b0c13d81ccf0dd1c6b990c2759e6597",
|
||||||
"revisionTime": "2017-01-28T05:05:32Z"
|
"revisionTime": "2019-04-09T13:48:02Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "a/DHmc9bdsYlZZcwp6i3xhvV7Pk=",
|
"checksumSHA1": "a/DHmc9bdsYlZZcwp6i3xhvV7Pk=",
|
||||||
|
Loading…
Reference in New Issue
Block a user