forked from cerc-io/plugeth
30d706c35e
* p2p/discover, p2p/discv5: add marshaling methods to Node * p2p/netutil: make Netlist decodable from TOML * common/math: encode nil HexOrDecimal256 as 0x0 * cmd/geth: add --config file flag * cmd/geth: add missing license header * eth: prettify Config again, fix tests * eth: use gasprice.Config instead of duplicating its fields * eth/gasprice: hide nil default from dumpconfig output * cmd/geth: hide genesis block in dumpconfig output * node: make tests compile * console: fix tests * cmd/geth: make TOML keys look exactly like Go struct fields * p2p: use discovery by default This makes the zero Config slightly more useful. It also fixes package node tests because Node detects reuse of the datadir through the NodeDatabase. * cmd/geth: make ethstats URL settable through config file * cmd/faucet: fix configuration * cmd/geth: dedup attach tests * eth: add comment for DefaultConfig * eth: pass downloader.SyncMode in Config This removes the FastSync, LightSync flags in favour of a more general SyncMode flag. * cmd/utils: remove jitvm flags * cmd/utils: make mutually exclusive flag error prettier It now reads: Fatal: flags --dev, --testnet can't be used at the same time * p2p: fix typo * node: add DefaultConfig, use it for geth * mobile: add missing NoDiscovery option * cmd/utils: drop MakeNode This exposed a couple of places that needed to be updated to use node.DefaultConfig. * node: fix typo * eth: make fast sync the default mode * cmd/utils: remove IPCApiFlag (unused) * node: remove default IPC path Set it in the frontends instead. * cmd/geth: add --syncmode * cmd/utils: make --ipcdisable and --ipcpath mutually exclusive * cmd/utils: don't enable WS, HTTP when setting addr * cmd/utils: fix --identity
254 lines
5.3 KiB
Go
254 lines
5.3 KiB
Go
package stringutil
|
|
|
|
import (
|
|
"fmt"
|
|
"sort"
|
|
"unicode/utf8"
|
|
)
|
|
|
|
const (
|
|
terminationCharacter = '#'
|
|
)
|
|
|
|
func mustDoubleArray(da *doubleArray, err error) *doubleArray {
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return da
|
|
}
|
|
|
|
func (da *doubleArray) Build(keys []string) error {
|
|
records := makeRecords(keys)
|
|
if err := da.build(records, 1, 0, make(map[int]struct{})); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type doubleArray struct {
|
|
bc []baseCheck
|
|
node []int
|
|
}
|
|
|
|
func newDoubleArray(keys []string) (*doubleArray, error) {
|
|
da := &doubleArray{
|
|
bc: []baseCheck{0},
|
|
node: []int{-1}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node.
|
|
}
|
|
if err := da.Build(keys); err != nil {
|
|
return nil, err
|
|
}
|
|
return da, nil
|
|
}
|
|
|
|
// baseCheck contains BASE, CHECK and Extra flags.
|
|
// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK.
|
|
//
|
|
// BASE (22bit) | Extra flags (2bit) | CHECK (8bit)
|
|
// |----------------------|--|--------|
|
|
// 32 10 8 0
|
|
type baseCheck uint32
|
|
|
|
func (bc baseCheck) Base() int {
|
|
return int(bc >> 10)
|
|
}
|
|
|
|
func (bc *baseCheck) SetBase(base int) {
|
|
*bc |= baseCheck(base) << 10
|
|
}
|
|
|
|
func (bc baseCheck) Check() byte {
|
|
return byte(bc)
|
|
}
|
|
|
|
func (bc *baseCheck) SetCheck(check byte) {
|
|
*bc |= baseCheck(check)
|
|
}
|
|
|
|
func (bc baseCheck) IsEmpty() bool {
|
|
return bc&0xfffffcff == 0
|
|
}
|
|
|
|
func (da *doubleArray) Lookup(path string) (length int) {
|
|
idx := 1
|
|
tmpIdx := idx
|
|
for i := 0; i < len(path); i++ {
|
|
c := path[i]
|
|
tmpIdx = da.nextIndex(da.bc[tmpIdx].Base(), c)
|
|
if tmpIdx >= len(da.bc) || da.bc[tmpIdx].Check() != c {
|
|
break
|
|
}
|
|
idx = tmpIdx
|
|
}
|
|
if next := da.nextIndex(da.bc[idx].Base(), terminationCharacter); next < len(da.bc) && da.bc[next].Check() == terminationCharacter {
|
|
return da.node[da.bc[next].Base()]
|
|
}
|
|
return -1
|
|
}
|
|
|
|
func (da *doubleArray) LookupByBytes(path []byte) (length int) {
|
|
idx := 1
|
|
tmpIdx := idx
|
|
for i := 0; i < len(path); i++ {
|
|
c := path[i]
|
|
tmpIdx = da.nextIndex(da.bc[tmpIdx].Base(), c)
|
|
if tmpIdx >= len(da.bc) || da.bc[tmpIdx].Check() != c {
|
|
break
|
|
}
|
|
idx = tmpIdx
|
|
}
|
|
if next := da.nextIndex(da.bc[idx].Base(), terminationCharacter); next < len(da.bc) && da.bc[next].Check() == terminationCharacter {
|
|
return da.node[da.bc[next].Base()]
|
|
}
|
|
return -1
|
|
}
|
|
|
|
func (da *doubleArray) build(srcs []record, idx, depth int, usedBase map[int]struct{}) error {
|
|
sort.Stable(recordSlice(srcs))
|
|
base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if leaf != nil {
|
|
da.bc[idx].SetBase(len(da.node))
|
|
da.node = append(da.node, leaf.value)
|
|
}
|
|
for _, sib := range siblings {
|
|
da.setCheck(da.nextIndex(base, sib.c), sib.c)
|
|
}
|
|
for _, sib := range siblings {
|
|
if err := da.build(srcs[sib.start:sib.end], da.nextIndex(base, sib.c), depth+1, usedBase); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (da *doubleArray) setBase(i, base int) {
|
|
da.bc[i].SetBase(base)
|
|
}
|
|
|
|
func (da *doubleArray) setCheck(i int, check byte) {
|
|
da.bc[i].SetCheck(check)
|
|
}
|
|
|
|
func (da *doubleArray) findEmptyIndex(start int) int {
|
|
i := start
|
|
for ; i < len(da.bc); i++ {
|
|
if da.bc[i].IsEmpty() {
|
|
break
|
|
}
|
|
}
|
|
return i
|
|
}
|
|
|
|
// findBase returns good BASE.
|
|
func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) {
|
|
for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) {
|
|
base = da.nextIndex(idx, firstChar)
|
|
if _, used := usedBase[base]; used {
|
|
continue
|
|
}
|
|
i := 0
|
|
for ; i < len(siblings); i++ {
|
|
next := da.nextIndex(base, siblings[i].c)
|
|
if len(da.bc) <= next {
|
|
da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...)
|
|
}
|
|
if !da.bc[next].IsEmpty() {
|
|
break
|
|
}
|
|
}
|
|
if i == len(siblings) {
|
|
break
|
|
}
|
|
}
|
|
usedBase[base] = struct{}{}
|
|
return base
|
|
}
|
|
|
|
func (da *doubleArray) arrange(records []record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) {
|
|
siblings, leaf, err = makeSiblings(records, depth)
|
|
if err != nil {
|
|
return -1, nil, nil, err
|
|
}
|
|
if len(siblings) < 1 {
|
|
return -1, nil, leaf, nil
|
|
}
|
|
base = da.findBase(siblings, idx, usedBase)
|
|
da.setBase(idx, base)
|
|
return base, siblings, leaf, err
|
|
}
|
|
|
|
type sibling struct {
|
|
start int
|
|
end int
|
|
c byte
|
|
}
|
|
|
|
func (da *doubleArray) nextIndex(base int, c byte) int {
|
|
return base ^ int(c)
|
|
}
|
|
|
|
func makeSiblings(records []record, depth int) (sib []sibling, leaf *record, err error) {
|
|
var (
|
|
pc byte
|
|
n int
|
|
)
|
|
for i, r := range records {
|
|
if len(r.key) <= depth {
|
|
leaf = &r
|
|
continue
|
|
}
|
|
c := r.key[depth]
|
|
switch {
|
|
case pc < c:
|
|
sib = append(sib, sibling{start: i, c: c})
|
|
case pc == c:
|
|
continue
|
|
default:
|
|
return nil, nil, fmt.Errorf("stringutil: BUG: records hasn't been sorted")
|
|
}
|
|
if n > 0 {
|
|
sib[n-1].end = i
|
|
}
|
|
pc = c
|
|
n++
|
|
}
|
|
if n == 0 {
|
|
return nil, leaf, nil
|
|
}
|
|
sib[n-1].end = len(records)
|
|
return sib, leaf, nil
|
|
}
|
|
|
|
type record struct {
|
|
key string
|
|
value int
|
|
}
|
|
|
|
func makeRecords(srcs []string) (records []record) {
|
|
termChar := string(terminationCharacter)
|
|
for _, s := range srcs {
|
|
records = append(records, record{
|
|
key: string(s + termChar),
|
|
value: utf8.RuneCountInString(s),
|
|
})
|
|
}
|
|
return records
|
|
}
|
|
|
|
type recordSlice []record
|
|
|
|
func (rs recordSlice) Len() int {
|
|
return len(rs)
|
|
}
|
|
|
|
func (rs recordSlice) Less(i, j int) bool {
|
|
return rs[i].key < rs[j].key
|
|
}
|
|
|
|
func (rs recordSlice) Swap(i, j int) {
|
|
rs[i], rs[j] = rs[j], rs[i]
|
|
}
|