forked from cerc-io/plugeth
core, eth, trie: use common/prque (#17508)
This commit is contained in:
parent
6fc8494620
commit
6a33954731
@ -29,6 +29,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/mclock"
|
"github.com/ethereum/go-ethereum/common/mclock"
|
||||||
|
"github.com/ethereum/go-ethereum/common/prque"
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
@ -43,7 +44,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/hashicorp/golang-lru"
|
"github.com/hashicorp/golang-lru"
|
||||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -151,7 +151,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
|||||||
chainConfig: chainConfig,
|
chainConfig: chainConfig,
|
||||||
cacheConfig: cacheConfig,
|
cacheConfig: cacheConfig,
|
||||||
db: db,
|
db: db,
|
||||||
triegc: prque.New(),
|
triegc: prque.New(nil),
|
||||||
stateCache: state.NewDatabase(db),
|
stateCache: state.NewDatabase(db),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
bodyCache: bodyCache,
|
bodyCache: bodyCache,
|
||||||
@ -915,7 +915,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
|||||||
} else {
|
} else {
|
||||||
// Full but not archive node, do proper garbage collection
|
// Full but not archive node, do proper garbage collection
|
||||||
triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
|
triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
|
||||||
bc.triegc.Push(root, -float32(block.NumberU64()))
|
bc.triegc.Push(root, -int64(block.NumberU64()))
|
||||||
|
|
||||||
if current := block.NumberU64(); current > triesInMemory {
|
if current := block.NumberU64(); current > triesInMemory {
|
||||||
// If we exceeded our memory allowance, flush matured singleton nodes to disk
|
// If we exceeded our memory allowance, flush matured singleton nodes to disk
|
||||||
|
@ -26,13 +26,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/prque"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -987,11 +987,11 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) {
|
|||||||
if pending > pool.config.GlobalSlots {
|
if pending > pool.config.GlobalSlots {
|
||||||
pendingBeforeCap := pending
|
pendingBeforeCap := pending
|
||||||
// Assemble a spam order to penalize large transactors first
|
// Assemble a spam order to penalize large transactors first
|
||||||
spammers := prque.New()
|
spammers := prque.New(nil)
|
||||||
for addr, list := range pool.pending {
|
for addr, list := range pool.pending {
|
||||||
// Only evict transactions from high rollers
|
// Only evict transactions from high rollers
|
||||||
if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
|
if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
|
||||||
spammers.Push(addr, float32(list.Len()))
|
spammers.Push(addr, int64(list.Len()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Gradually drop transactions from offenders
|
// Gradually drop transactions from offenders
|
||||||
|
@ -26,10 +26,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/prque"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -105,11 +105,11 @@ func newQueue() *queue {
|
|||||||
headerPendPool: make(map[string]*fetchRequest),
|
headerPendPool: make(map[string]*fetchRequest),
|
||||||
headerContCh: make(chan bool),
|
headerContCh: make(chan bool),
|
||||||
blockTaskPool: make(map[common.Hash]*types.Header),
|
blockTaskPool: make(map[common.Hash]*types.Header),
|
||||||
blockTaskQueue: prque.New(),
|
blockTaskQueue: prque.New(nil),
|
||||||
blockPendPool: make(map[string]*fetchRequest),
|
blockPendPool: make(map[string]*fetchRequest),
|
||||||
blockDonePool: make(map[common.Hash]struct{}),
|
blockDonePool: make(map[common.Hash]struct{}),
|
||||||
receiptTaskPool: make(map[common.Hash]*types.Header),
|
receiptTaskPool: make(map[common.Hash]*types.Header),
|
||||||
receiptTaskQueue: prque.New(),
|
receiptTaskQueue: prque.New(nil),
|
||||||
receiptPendPool: make(map[string]*fetchRequest),
|
receiptPendPool: make(map[string]*fetchRequest),
|
||||||
receiptDonePool: make(map[common.Hash]struct{}),
|
receiptDonePool: make(map[common.Hash]struct{}),
|
||||||
resultCache: make([]*fetchResult, blockCacheItems),
|
resultCache: make([]*fetchResult, blockCacheItems),
|
||||||
@ -277,7 +277,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
|
|||||||
}
|
}
|
||||||
// Schedule all the header retrieval tasks for the skeleton assembly
|
// Schedule all the header retrieval tasks for the skeleton assembly
|
||||||
q.headerTaskPool = make(map[uint64]*types.Header)
|
q.headerTaskPool = make(map[uint64]*types.Header)
|
||||||
q.headerTaskQueue = prque.New()
|
q.headerTaskQueue = prque.New(nil)
|
||||||
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
|
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
|
||||||
q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
|
q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
|
||||||
q.headerProced = 0
|
q.headerProced = 0
|
||||||
@ -288,7 +288,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
|
|||||||
index := from + uint64(i*MaxHeaderFetch)
|
index := from + uint64(i*MaxHeaderFetch)
|
||||||
|
|
||||||
q.headerTaskPool[index] = header
|
q.headerTaskPool[index] = header
|
||||||
q.headerTaskQueue.Push(index, -float32(index))
|
q.headerTaskQueue.Push(index, -int64(index))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -334,11 +334,11 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
|
|||||||
}
|
}
|
||||||
// Queue the header for content retrieval
|
// Queue the header for content retrieval
|
||||||
q.blockTaskPool[hash] = header
|
q.blockTaskPool[hash] = header
|
||||||
q.blockTaskQueue.Push(header, -float32(header.Number.Uint64()))
|
q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||||
|
|
||||||
if q.mode == FastSync {
|
if q.mode == FastSync {
|
||||||
q.receiptTaskPool[hash] = header
|
q.receiptTaskPool[hash] = header
|
||||||
q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64()))
|
q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||||
}
|
}
|
||||||
inserts = append(inserts, header)
|
inserts = append(inserts, header)
|
||||||
q.headerHead = hash
|
q.headerHead = hash
|
||||||
@ -436,7 +436,7 @@ func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
|
|||||||
}
|
}
|
||||||
// Merge all the skipped batches back
|
// Merge all the skipped batches back
|
||||||
for _, from := range skip {
|
for _, from := range skip {
|
||||||
q.headerTaskQueue.Push(from, -float32(from))
|
q.headerTaskQueue.Push(from, -int64(from))
|
||||||
}
|
}
|
||||||
// Assemble and return the block download request
|
// Assemble and return the block download request
|
||||||
if send == 0 {
|
if send == 0 {
|
||||||
@ -542,7 +542,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common
|
|||||||
}
|
}
|
||||||
// Merge all the skipped headers back
|
// Merge all the skipped headers back
|
||||||
for _, header := range skip {
|
for _, header := range skip {
|
||||||
taskQueue.Push(header, -float32(header.Number.Uint64()))
|
taskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||||
}
|
}
|
||||||
if progress {
|
if progress {
|
||||||
// Wake WaitResults, resultCache was modified
|
// Wake WaitResults, resultCache was modified
|
||||||
@ -585,10 +585,10 @@ func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool m
|
|||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
if request.From > 0 {
|
if request.From > 0 {
|
||||||
taskQueue.Push(request.From, -float32(request.From))
|
taskQueue.Push(request.From, -int64(request.From))
|
||||||
}
|
}
|
||||||
for _, header := range request.Headers {
|
for _, header := range request.Headers {
|
||||||
taskQueue.Push(header, -float32(header.Number.Uint64()))
|
taskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||||
}
|
}
|
||||||
delete(pendPool, request.Peer.id)
|
delete(pendPool, request.Peer.id)
|
||||||
}
|
}
|
||||||
@ -602,13 +602,13 @@ func (q *queue) Revoke(peerID string) {
|
|||||||
|
|
||||||
if request, ok := q.blockPendPool[peerID]; ok {
|
if request, ok := q.blockPendPool[peerID]; ok {
|
||||||
for _, header := range request.Headers {
|
for _, header := range request.Headers {
|
||||||
q.blockTaskQueue.Push(header, -float32(header.Number.Uint64()))
|
q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||||
}
|
}
|
||||||
delete(q.blockPendPool, peerID)
|
delete(q.blockPendPool, peerID)
|
||||||
}
|
}
|
||||||
if request, ok := q.receiptPendPool[peerID]; ok {
|
if request, ok := q.receiptPendPool[peerID]; ok {
|
||||||
for _, header := range request.Headers {
|
for _, header := range request.Headers {
|
||||||
q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64()))
|
q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||||
}
|
}
|
||||||
delete(q.receiptPendPool, peerID)
|
delete(q.receiptPendPool, peerID)
|
||||||
}
|
}
|
||||||
@ -657,10 +657,10 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest,
|
|||||||
|
|
||||||
// Return any non satisfied requests to the pool
|
// Return any non satisfied requests to the pool
|
||||||
if request.From > 0 {
|
if request.From > 0 {
|
||||||
taskQueue.Push(request.From, -float32(request.From))
|
taskQueue.Push(request.From, -int64(request.From))
|
||||||
}
|
}
|
||||||
for _, header := range request.Headers {
|
for _, header := range request.Headers {
|
||||||
taskQueue.Push(header, -float32(header.Number.Uint64()))
|
taskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||||
}
|
}
|
||||||
// Add the peer to the expiry report along the number of failed requests
|
// Add the peer to the expiry report along the number of failed requests
|
||||||
expiries[id] = len(request.Headers)
|
expiries[id] = len(request.Headers)
|
||||||
@ -731,7 +731,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
|||||||
}
|
}
|
||||||
miss[request.From] = struct{}{}
|
miss[request.From] = struct{}{}
|
||||||
|
|
||||||
q.headerTaskQueue.Push(request.From, -float32(request.From))
|
q.headerTaskQueue.Push(request.From, -int64(request.From))
|
||||||
return 0, errors.New("delivery not accepted")
|
return 0, errors.New("delivery not accepted")
|
||||||
}
|
}
|
||||||
// Clean up a successful fetch and try to deliver any sub-results
|
// Clean up a successful fetch and try to deliver any sub-results
|
||||||
@ -854,7 +854,7 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ
|
|||||||
// Return all failed or missing fetches to the queue
|
// Return all failed or missing fetches to the queue
|
||||||
for _, header := range request.Headers {
|
for _, header := range request.Headers {
|
||||||
if header != nil {
|
if header != nil {
|
||||||
taskQueue.Push(header, -float32(header.Number.Uint64()))
|
taskQueue.Push(header, -int64(header.Number.Uint64()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Wake up WaitResults
|
// Wake up WaitResults
|
||||||
|
@ -23,10 +23,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/prque"
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -160,7 +160,7 @@ func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBloc
|
|||||||
fetching: make(map[common.Hash]*announce),
|
fetching: make(map[common.Hash]*announce),
|
||||||
fetched: make(map[common.Hash][]*announce),
|
fetched: make(map[common.Hash][]*announce),
|
||||||
completing: make(map[common.Hash]*announce),
|
completing: make(map[common.Hash]*announce),
|
||||||
queue: prque.New(),
|
queue: prque.New(nil),
|
||||||
queues: make(map[string]int),
|
queues: make(map[string]int),
|
||||||
queued: make(map[common.Hash]*inject),
|
queued: make(map[common.Hash]*inject),
|
||||||
getBlock: getBlock,
|
getBlock: getBlock,
|
||||||
@ -299,7 +299,7 @@ func (f *Fetcher) loop() {
|
|||||||
// If too high up the chain or phase, continue later
|
// If too high up the chain or phase, continue later
|
||||||
number := op.block.NumberU64()
|
number := op.block.NumberU64()
|
||||||
if number > height+1 {
|
if number > height+1 {
|
||||||
f.queue.Push(op, -float32(number))
|
f.queue.Push(op, -int64(number))
|
||||||
if f.queueChangeHook != nil {
|
if f.queueChangeHook != nil {
|
||||||
f.queueChangeHook(hash, true)
|
f.queueChangeHook(hash, true)
|
||||||
}
|
}
|
||||||
@ -624,7 +624,7 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
|
|||||||
}
|
}
|
||||||
f.queues[peer] = count
|
f.queues[peer] = count
|
||||||
f.queued[hash] = op
|
f.queued[hash] = op
|
||||||
f.queue.Push(op, -float32(block.NumberU64()))
|
f.queue.Push(op, -int64(block.NumberU64()))
|
||||||
if f.queueChangeHook != nil {
|
if f.queueChangeHook != nil {
|
||||||
f.queueChangeHook(op.block.Hash(), true)
|
f.queueChangeHook(op.block.Hash(), true)
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/prque"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrNotRequested is returned by the trie sync when it's requested to process a
|
// ErrNotRequested is returned by the trie sync when it's requested to process a
|
||||||
@ -84,7 +84,7 @@ func NewSync(root common.Hash, database DatabaseReader, callback LeafCallback) *
|
|||||||
database: database,
|
database: database,
|
||||||
membatch: newSyncMemBatch(),
|
membatch: newSyncMemBatch(),
|
||||||
requests: make(map[common.Hash]*request),
|
requests: make(map[common.Hash]*request),
|
||||||
queue: prque.New(),
|
queue: prque.New(nil),
|
||||||
}
|
}
|
||||||
ts.AddSubTrie(root, 0, common.Hash{}, callback)
|
ts.AddSubTrie(root, 0, common.Hash{}, callback)
|
||||||
return ts
|
return ts
|
||||||
@ -242,7 +242,7 @@ func (s *Sync) schedule(req *request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Schedule the request for future retrieval
|
// Schedule the request for future retrieval
|
||||||
s.queue.Push(req.hash, float32(req.depth))
|
s.queue.Push(req.hash, int64(req.depth))
|
||||||
s.requests[req.hash] = req
|
s.requests[req.hash] = req
|
||||||
}
|
}
|
||||||
|
|
||||||
|
25
vendor/gopkg.in/karalabe/cookiejar.v2/LICENSE
generated
vendored
25
vendor/gopkg.in/karalabe/cookiejar.v2/LICENSE
generated
vendored
@ -1,25 +0,0 @@
|
|||||||
Copyright (c) 2014 Péter Szilágyi. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer in the documentation
|
|
||||||
and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
|
||||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
||||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
||||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
|
||||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
||||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
Alternatively, the CookieJar toolbox may be used in accordance with the terms
|
|
||||||
and conditions contained in a signed written agreement between you and the
|
|
||||||
author(s).
|
|
109
vendor/gopkg.in/karalabe/cookiejar.v2/README.md
generated
vendored
109
vendor/gopkg.in/karalabe/cookiejar.v2/README.md
generated
vendored
@ -1,109 +0,0 @@
|
|||||||
CookieJar - A contestant's toolbox
|
|
||||||
======================================
|
|
||||||
|
|
||||||
CookieJar is a small collection of common algorithms, data structures and library extensions that were deemed handy for computing competitions at one point or another.
|
|
||||||
|
|
||||||
This toolbox is a work in progress for the time being. It may be lacking, and it may change drastically between commits (although every effort is made not to). You're welcome to use it, but it's your head on the line :)
|
|
||||||
|
|
||||||
Installation
|
|
||||||
----------------
|
|
||||||
|
|
||||||
To get the package, execute:
|
|
||||||
|
|
||||||
go get gopkg.in/karalabe/cookiejar.v2
|
|
||||||
|
|
||||||
To import this package, add the following line to your code:
|
|
||||||
|
|
||||||
import "gopkg.in/karalabe/cookiejar.v2"
|
|
||||||
|
|
||||||
For more details, see the [package documentation](http://godoc.org/gopkg.in/karalabe/cookiejar.v2).
|
|
||||||
|
|
||||||
Contents
|
|
||||||
------------
|
|
||||||
|
|
||||||
Algorithms:
|
|
||||||
- Graph
|
|
||||||
- [Breadth First Search](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/graph/bfs)
|
|
||||||
- [Depth First Search](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/graph/dfs)
|
|
||||||
|
|
||||||
Data structures:
|
|
||||||
- [Bag](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/collections/bag)
|
|
||||||
- [Deque](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/collections/deque)
|
|
||||||
- [Graph](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/graph)
|
|
||||||
- [Priority Queue](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/collections/prque)
|
|
||||||
- [Queue](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/collections/queue)
|
|
||||||
- [Set](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/collections/set)
|
|
||||||
- [Stack](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/collections/stack)
|
|
||||||
|
|
||||||
Extensions:
|
|
||||||
- [fmt](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/exts/fmtext)
|
|
||||||
- `Scan` and `Fscan` for `int`, `float64`, `string` and lines
|
|
||||||
- [math](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/exts/mathext)
|
|
||||||
- `Abs` for `int`
|
|
||||||
- `Min` and `Max` for `int`, `big.Int` and `big.Rat`
|
|
||||||
- `Sign` for `int` and `float64`
|
|
||||||
- [os](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/exts/osext)
|
|
||||||
- `Open` and `Create` without error codes
|
|
||||||
- [sort](http://godoc.org/gopkg.in/karalabe/cookiejar.v2/exts/sortext)
|
|
||||||
- `Sort` and `Search` for `big.Int` and `big.Rat`
|
|
||||||
- `Unique` for any `sort.Interface`
|
|
||||||
|
|
||||||
Below are the performance results for the data structures and the complexity analysis for the algorithms.
|
|
||||||
|
|
||||||
Performance
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz:
|
|
||||||
```
|
|
||||||
- bag
|
|
||||||
- BenchmarkInsert 309 ns/op
|
|
||||||
- BenchmarkRemove 197 ns/op
|
|
||||||
- BenchmarkDo 28.1 ns/op
|
|
||||||
- deque
|
|
||||||
- BenchmarkPush 25.4 ns/op
|
|
||||||
- BenchmarkPop 6.72 ns/op
|
|
||||||
- prque
|
|
||||||
- BenchmarkPush 171 ns/op
|
|
||||||
- BenchmarkPop 947 ns/op
|
|
||||||
- queue
|
|
||||||
- BenchmarkPush 23.0 ns/op
|
|
||||||
- BenchmarkPop 5.92 ns/op
|
|
||||||
- set
|
|
||||||
- BenchmarkInsert 259 ns/op
|
|
||||||
- BenchmarkRemove 115 ns/op
|
|
||||||
- BenchmarkDo 20.9 ns/op
|
|
||||||
- stack
|
|
||||||
- BenchmarkPush 16.4 ns/op
|
|
||||||
- BenchmarkPop 6.45 ns/op
|
|
||||||
```
|
|
||||||
|
|
||||||
Complexity
|
|
||||||
--------------
|
|
||||||
|
|
||||||
| Algorithm | Time complexity | Space complexity |
|
|
||||||
|:---------:|:---------------:|:----------------:|
|
|
||||||
| graph/bfs | O(E) | O(V) |
|
|
||||||
| graph/dfs | O(E) | O(E) |
|
|
||||||
|
|
||||||
Here be dragons :)
|
|
||||||
----------------------
|
|
||||||
|
|
||||||
```
|
|
||||||
. _///_,
|
|
||||||
. / ` ' '>
|
|
||||||
) o' __/_'>
|
|
||||||
( / _/ )_\'>
|
|
||||||
' "__/ /_/\_>
|
|
||||||
____/_/_/_/
|
|
||||||
/,---, _/ /
|
|
||||||
"" /_/_/_/
|
|
||||||
/_(_(_(_ \
|
|
||||||
( \_\_\\_ )\
|
|
||||||
\'__\_\_\_\__ ).\
|
|
||||||
//____|___\__) )_/
|
|
||||||
| _ \'___'_( /'
|
|
||||||
\_ (-'\'___'_\ __,'_'
|
|
||||||
__) \ \\___(_ __/.__,'
|
|
||||||
,((,-,__\ '", __\_/. __,'
|
|
||||||
'"./_._._-'
|
|
||||||
```
|
|
66
vendor/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go
generated
vendored
66
vendor/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go
generated
vendored
@ -1,66 +0,0 @@
|
|||||||
// CookieJar - A contestant's algorithm toolbox
|
|
||||||
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
|
|
||||||
//
|
|
||||||
// CookieJar is dual licensed: use of this source code is governed by a BSD
|
|
||||||
// license that can be found in the LICENSE file. Alternatively, the CookieJar
|
|
||||||
// toolbox may be used in accordance with the terms and conditions contained
|
|
||||||
// in a signed written agreement between you and the author(s).
|
|
||||||
|
|
||||||
// Package prque implements a priority queue data structure supporting arbitrary
|
|
||||||
// value types and float priorities.
|
|
||||||
//
|
|
||||||
// The reasoning behind using floats for the priorities vs. ints or interfaces
|
|
||||||
// was larger flexibility without sacrificing too much performance or code
|
|
||||||
// complexity.
|
|
||||||
//
|
|
||||||
// If you would like to use a min-priority queue, simply negate the priorities.
|
|
||||||
//
|
|
||||||
// Internally the queue is based on the standard heap package working on a
|
|
||||||
// sortable version of the block based stack.
|
|
||||||
package prque
|
|
||||||
|
|
||||||
import (
|
|
||||||
"container/heap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Priority queue data structure.
|
|
||||||
type Prque struct {
|
|
||||||
cont *sstack
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new priority queue.
|
|
||||||
func New() *Prque {
|
|
||||||
return &Prque{newSstack()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pushes a value with a given priority into the queue, expanding if necessary.
|
|
||||||
func (p *Prque) Push(data interface{}, priority float32) {
|
|
||||||
heap.Push(p.cont, &item{data, priority})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pops the value with the greates priority off the stack and returns it.
|
|
||||||
// Currently no shrinking is done.
|
|
||||||
func (p *Prque) Pop() (interface{}, float32) {
|
|
||||||
item := heap.Pop(p.cont).(*item)
|
|
||||||
return item.value, item.priority
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pops only the item from the queue, dropping the associated priority value.
|
|
||||||
func (p *Prque) PopItem() interface{} {
|
|
||||||
return heap.Pop(p.cont).(*item).value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks whether the priority queue is empty.
|
|
||||||
func (p *Prque) Empty() bool {
|
|
||||||
return p.cont.Len() == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the number of element in the priority queue.
|
|
||||||
func (p *Prque) Size() int {
|
|
||||||
return p.cont.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clears the contents of the priority queue.
|
|
||||||
func (p *Prque) Reset() {
|
|
||||||
*p = *New()
|
|
||||||
}
|
|
91
vendor/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go
generated
vendored
91
vendor/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go
generated
vendored
@ -1,91 +0,0 @@
|
|||||||
// CookieJar - A contestant's algorithm toolbox
|
|
||||||
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
|
|
||||||
//
|
|
||||||
// CookieJar is dual licensed: use of this source code is governed by a BSD
|
|
||||||
// license that can be found in the LICENSE file. Alternatively, the CookieJar
|
|
||||||
// toolbox may be used in accordance with the terms and conditions contained
|
|
||||||
// in a signed written agreement between you and the author(s).
|
|
||||||
|
|
||||||
package prque
|
|
||||||
|
|
||||||
// The size of a block of data
|
|
||||||
const blockSize = 4096
|
|
||||||
|
|
||||||
// A prioritized item in the sorted stack.
|
|
||||||
type item struct {
|
|
||||||
value interface{}
|
|
||||||
priority float32
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal sortable stack data structure. Implements the Push and Pop ops for
|
|
||||||
// the stack (heap) functionality and the Len, Less and Swap methods for the
|
|
||||||
// sortability requirements of the heaps.
|
|
||||||
type sstack struct {
|
|
||||||
size int
|
|
||||||
capacity int
|
|
||||||
offset int
|
|
||||||
|
|
||||||
blocks [][]*item
|
|
||||||
active []*item
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new, empty stack.
|
|
||||||
func newSstack() *sstack {
|
|
||||||
result := new(sstack)
|
|
||||||
result.active = make([]*item, blockSize)
|
|
||||||
result.blocks = [][]*item{result.active}
|
|
||||||
result.capacity = blockSize
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pushes a value onto the stack, expanding it if necessary. Required by
|
|
||||||
// heap.Interface.
|
|
||||||
func (s *sstack) Push(data interface{}) {
|
|
||||||
if s.size == s.capacity {
|
|
||||||
s.active = make([]*item, blockSize)
|
|
||||||
s.blocks = append(s.blocks, s.active)
|
|
||||||
s.capacity += blockSize
|
|
||||||
s.offset = 0
|
|
||||||
} else if s.offset == blockSize {
|
|
||||||
s.active = s.blocks[s.size/blockSize]
|
|
||||||
s.offset = 0
|
|
||||||
}
|
|
||||||
s.active[s.offset] = data.(*item)
|
|
||||||
s.offset++
|
|
||||||
s.size++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pops a value off the stack and returns it. Currently no shrinking is done.
|
|
||||||
// Required by heap.Interface.
|
|
||||||
func (s *sstack) Pop() (res interface{}) {
|
|
||||||
s.size--
|
|
||||||
s.offset--
|
|
||||||
if s.offset < 0 {
|
|
||||||
s.offset = blockSize - 1
|
|
||||||
s.active = s.blocks[s.size/blockSize]
|
|
||||||
}
|
|
||||||
res, s.active[s.offset] = s.active[s.offset], nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the length of the stack. Required by sort.Interface.
|
|
||||||
func (s *sstack) Len() int {
|
|
||||||
return s.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compares the priority of two elements of the stack (higher is first).
|
|
||||||
// Required by sort.Interface.
|
|
||||||
func (s *sstack) Less(i, j int) bool {
|
|
||||||
return s.blocks[i/blockSize][i%blockSize].priority > s.blocks[j/blockSize][j%blockSize].priority
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swaps two elements in the stack. Required by sort.Interface.
|
|
||||||
func (s *sstack) Swap(i, j int) {
|
|
||||||
ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize
|
|
||||||
s.blocks[ib][io], s.blocks[jb][jo] = s.blocks[jb][jo], s.blocks[ib][io]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resets the stack, effectively clearing its contents.
|
|
||||||
func (s *sstack) Reset() {
|
|
||||||
*s = *newSstack()
|
|
||||||
}
|
|
6
vendor/vendor.json
vendored
6
vendor/vendor.json
vendored
@ -891,12 +891,6 @@
|
|||||||
"revision": "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec",
|
"revision": "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec",
|
||||||
"revisionTime": "2016-12-08T18:13:25Z"
|
"revisionTime": "2016-12-08T18:13:25Z"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"checksumSHA1": "DQXNV0EivoHm4q+bkdahYXrjjfE=",
|
|
||||||
"path": "gopkg.in/karalabe/cookiejar.v2/collections/prque",
|
|
||||||
"revision": "8dcd6a7f4951f6ff3ee9cbb919a06d8925822e57",
|
|
||||||
"revisionTime": "2015-07-24T13:16:13Z"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"checksumSHA1": "0xgs8lwcWLUffemlj+SsgKlxvDU=",
|
"checksumSHA1": "0xgs8lwcWLUffemlj+SsgKlxvDU=",
|
||||||
"path": "gopkg.in/natefinch/npipe.v2",
|
"path": "gopkg.in/natefinch/npipe.v2",
|
||||||
|
Loading…
Reference in New Issue
Block a user