Add cold import script

This commit is contained in:
Rob Mulholand 2018-05-02 11:17:02 -05:00
parent 462f94d84a
commit 5f6bf32ec1
411 changed files with 81507 additions and 1566 deletions

25
Gopkg.lock generated
View File

@ -22,8 +22,16 @@
"common/hexutil", "common/hexutil",
"common/math", "common/math",
"common/mclock", "common/mclock",
"consensus",
"consensus/misc",
"core",
"core/state",
"core/types", "core/types",
"core/vm",
"crypto", "crypto",
"crypto/bn256",
"crypto/bn256/cloudflare",
"crypto/bn256/google",
"crypto/ecies", "crypto/ecies",
"crypto/secp256k1", "crypto/secp256k1",
"crypto/sha3", "crypto/sha3",
@ -69,6 +77,15 @@
packages = ["."] packages = ["."]
revision = "553a641470496b2327abcac10b36396bd98e45c9" revision = "553a641470496b2327abcac10b36396bd98e45c9"
[[projects]]
branch = "master"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru"
]
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/hashicorp/hcl" name = "github.com/hashicorp/hcl"
@ -262,6 +279,12 @@
] ]
revision = "adf24ef3f94bd13ec4163060b21a5678f22b429b" revision = "adf24ef3f94bd13ec4163060b21a5678f22b429b"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["ripemd160"]
revision = "613d6eafa307c6881a737a3c35c0e312e8d3a8c5"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/net" name = "golang.org/x/net"
@ -340,6 +363,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "4dcddfb7fa2db8ba7e3d8e70ec8ba23a7e9b6a1d99c62933ed63624412967aeb" inputs-digest = "8e609f758d10041b1746db11078eca9cad29a7dbf9939d14e49d70ee172dfd2e"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View File

@ -23,19 +23,27 @@
## Configuration ## Configuration
- To use a local Ethereum node, copy `environments/public.toml.example` to - To use a local Ethereum node, copy `environments/public.toml.example` to
`environments/public.toml` and update the `ipcPath` to the local node's IPC filepath: `environments/public.toml` and update the `ipcPath` and `levelDbPath`.
- when using geth: - `ipcPath` should match the local node's IPC filepath:
- The IPC file is called `geth.ipc`. - when using geth:
- The geth IPC file path is printed to the console when you start geth. - The IPC file is called `geth.ipc`.
- The default location is: - The geth IPC file path is printed to the console when you start geth.
- Mac: `$HOME/Library/Ethereum` - The default location is:
- Linux: `$HOME/.ethereum` - Mac: `$HOME/Library/Ethereum`
- Linux: `$HOME/.ethereum`
- when using parity: - when using parity:
- The IPC file is called `jsonrpc.ipc`. - The IPC file is called `jsonrpc.ipc`.
- The default location is: - The default location is:
- Mac: `$HOME/Library/Application\ Support/io.parity.ethereum/` - Mac: `$HOME/Library/Application\ Support/io.parity.ethereum/`
- Linux: `$HOME/.local/share/io.parity.ethereum/` - Linux: `$HOME/.local/share/io.parity.ethereum/`
- `levelDbPath` should match Geth's chaindata directory path.
- The geth LevelDB chaindata path is printed to the console when you start geth.
- The default location is:
- Mac: `$HOME/Library/Ethereum/geth/chaindata`
- Linux: `$HOME/.ethereum/geth/chaindata`
- `levelDbPath` is irrelevant (and `coldImport` is currently unavailable) if only running parity.
- See `environments/infura.toml` to configure commands to run against infura, if a local node is unavailable - See `environments/infura.toml` to configure commands to run against infura, if a local node is unavailable
@ -43,7 +51,13 @@
Syncs VulcanizeDB with the configured Ethereum node. Syncs VulcanizeDB with the configured Ethereum node.
1. Start node (**if fast syncing wait for initial sync to finish**) 1. Start node (**if fast syncing wait for initial sync to finish**)
1. In a separate terminal start vulcanize_db 1. In a separate terminal start vulcanize_db
- `vulcanizedb sync --config <config.toml> --starting-block-number <block-number>` - `./vulcanizedb sync --config <config.toml> --starting-block-number <block-number>`
## Alternatively, sync from Geth's underlying LevelDB
Sync VulcanizeDB from the LevelDB underlying a Geth node.
1. Assure node is not running, and that it has synced to the desired block height.
1. Start vulcanize_db
- `./vulcanizedb coldImport --config <config.toml> --starting-block-number <block-number> --ending-block-number <block-number>`
## Running the Tests ## Running the Tests

85
cmd/coldImport.go Normal file
View File

@ -0,0 +1,85 @@
// Copyright © 2018 Rob Mulholand <rmulholand@8thlight.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"log"
"github.com/ethereum/go-ethereum/common"
"github.com/spf13/cobra"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/datastore/ethereum"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/geth"
"github.com/vulcanize/vulcanizedb/pkg/geth/converters/cold_db"
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
"github.com/vulcanize/vulcanizedb/utils"
)
var coldImportCmd = &cobra.Command{
Use: "coldImport",
Short: "Sync vulcanize from a cold instance of LevelDB.",
Long: `Populate core vulcanize db data directly out of LevelDB, rather than over rpc calls. For example:
./vulcanizedb coldImport -s 0 -e 5000000
Geth must be synced over all of the desired blocks and must not be running in order to execute this command.`,
Run: func(cmd *cobra.Command, args []string) {
coldImport()
},
}
func init() {
rootCmd.AddCommand(coldImportCmd)
coldImportCmd.Flags().Int64VarP(&startingBlockNumber, "starting-block-number", "s", 0, "Number for first block to cold import")
coldImportCmd.Flags().Int64VarP(&endingBlockNumber, "ending-block-number", "e", 5500000, "Number for last block to cold import")
}
func coldImport() {
if endingBlockNumber < startingBlockNumber {
log.Fatal("Ending block number must be greater than starting block number for cold import.")
}
// init eth db
ethDBConfig := ethereum.CreateDatabaseConfig(ethereum.Level, levelDbPath)
ethDB, err := ethereum.CreateDatabase(ethDBConfig)
if err != nil {
log.Fatal("Error connecting to ethereum db: ", err)
}
// init pg db
genesisBlockHash := common.BytesToHash(ethDB.GetBlockHash(0)).String()
coldNode := core.Node{
GenesisBlock: genesisBlockHash,
NetworkID: 1,
ID: "LevelDbColdImport",
ClientName: "LevelDbColdImport",
}
pgDB := utils.LoadPostgres(databaseConfig, coldNode)
// init cold importer deps
blockRepository := repositories.BlockRepository{DB: &pgDB}
receiptRepository := repositories.ReceiptRepository{DB: &pgDB}
transactionconverter := cold_db.NewColdDbTransactionConverter()
blockConverter := vulcCommon.NewBlockConverter(transactionconverter)
// init and execute cold importer
coldImporter := geth.NewColdImporter(ethDB, blockRepository, receiptRepository, blockConverter)
err = coldImporter.Execute(startingBlockNumber, endingBlockNumber)
if err != nil {
log.Fatal("Error executing cold import: ", err)
}
}

View File

@ -11,9 +11,12 @@ import (
) )
var ( var (
cfgFile string cfgFile string
databaseConfig config.Database databaseConfig config.Database
ipc string ipc string
levelDbPath string
startingBlockNumber int64
endingBlockNumber int64
) )
var rootCmd = &cobra.Command{ var rootCmd = &cobra.Command{
@ -30,6 +33,7 @@ func Execute() {
func database(cmd *cobra.Command, args []string) { func database(cmd *cobra.Command, args []string) {
ipc = viper.GetString("client.ipcpath") ipc = viper.GetString("client.ipcpath")
levelDbPath = viper.GetString("client.leveldbpath")
databaseConfig = config.Database{ databaseConfig = config.Database{
Name: viper.GetString("database.name"), Name: viper.GetString("database.name"),
Hostname: viper.GetString("database.hostname"), Hostname: viper.GetString("database.hostname"),
@ -46,12 +50,13 @@ func init() {
rootCmd.PersistentFlags().Int("database-port", 5432, "database port") rootCmd.PersistentFlags().Int("database-port", 5432, "database port")
rootCmd.PersistentFlags().String("database-hostname", "localhost", "database hostname") rootCmd.PersistentFlags().String("database-hostname", "localhost", "database hostname")
rootCmd.PersistentFlags().String("client-ipcPath", "", "location of geth.ipc file") rootCmd.PersistentFlags().String("client-ipcPath", "", "location of geth.ipc file")
rootCmd.PersistentFlags().String("client-levelDbPath", "", "location of levelDb chaindata")
viper.BindPFlag("database.name", rootCmd.PersistentFlags().Lookup("database-name")) viper.BindPFlag("database.name", rootCmd.PersistentFlags().Lookup("database-name"))
viper.BindPFlag("database.port", rootCmd.PersistentFlags().Lookup("database-port")) viper.BindPFlag("database.port", rootCmd.PersistentFlags().Lookup("database-port"))
viper.BindPFlag("database.hostname", rootCmd.PersistentFlags().Lookup("database-hostname")) viper.BindPFlag("database.hostname", rootCmd.PersistentFlags().Lookup("database-hostname"))
viper.BindPFlag("client.ipcPath", rootCmd.PersistentFlags().Lookup("client-ipcPath")) viper.BindPFlag("client.ipcPath", rootCmd.PersistentFlags().Lookup("client-ipcPath"))
viper.BindPFlag("client.levelDbPath", rootCmd.PersistentFlags().Lookup("client-levelDbPath"))
} }
func initConfig() { func initConfig() {

View File

@ -42,12 +42,10 @@ const (
pollingInterval = 7 * time.Second pollingInterval = 7 * time.Second
) )
var startingBlockNumber int
func init() { func init() {
rootCmd.AddCommand(syncCmd) rootCmd.AddCommand(syncCmd)
syncCmd.Flags().IntVarP(&startingBlockNumber, "starting-block-number", "s", 0, "Block number to start syncing from") syncCmd.Flags().Int64VarP(&startingBlockNumber, "starting-block-number", "s", 0, "Block number to start syncing from")
} }
func backFillAllBlocks(blockchain core.Blockchain, blockRepository datastore.BlockRepository, missingBlocksPopulated chan int, startingBlockNumber int64) { func backFillAllBlocks(blockchain core.Blockchain, blockRepository datastore.BlockRepository, missingBlocksPopulated chan int, startingBlockNumber int64) {
@ -65,8 +63,7 @@ func sync() {
if lastBlock == 0 { if lastBlock == 0 {
log.Fatal("geth initial: state sync not finished") log.Fatal("geth initial: state sync not finished")
} }
_startingBlockNumber := int64(startingBlockNumber) if startingBlockNumber > lastBlock {
if _startingBlockNumber > lastBlock {
log.Fatal("starting block number > current block number") log.Fatal("starting block number > current block number")
} }
@ -74,7 +71,7 @@ func sync() {
blockRepository := repositories.BlockRepository{DB: &db} blockRepository := repositories.BlockRepository{DB: &db}
validator := history.NewBlockValidator(blockchain, blockRepository, 15) validator := history.NewBlockValidator(blockchain, blockRepository, 15)
missingBlocksPopulated := make(chan int) missingBlocksPopulated := make(chan int)
go backFillAllBlocks(blockchain, blockRepository, missingBlocksPopulated, _startingBlockNumber) go backFillAllBlocks(blockchain, blockRepository, missingBlocksPopulated, startingBlockNumber)
for { for {
select { select {
@ -82,7 +79,7 @@ func sync() {
window := validator.ValidateBlocks() window := validator.ValidateBlocks()
validator.Log(os.Stdout, window) validator.Log(os.Stdout, window)
case <-missingBlocksPopulated: case <-missingBlocksPopulated:
go backFillAllBlocks(blockchain, blockRepository, missingBlocksPopulated, _startingBlockNumber) go backFillAllBlocks(blockchain, blockRepository, missingBlocksPopulated, startingBlockNumber)
} }
} }
} }

View File

@ -5,3 +5,4 @@ port = 5432
[client] [client]
ipcPath = <local node's IPC filepath> ipcPath = <local node's IPC filepath>
levelDbPath = <local node's LevelDB chaindata filepath>

View File

@ -0,0 +1,19 @@
package ethereum
type DatabaseType int
const (
Level DatabaseType = iota
)
type DatabaseConfig struct {
Type DatabaseType
Path string
}
func CreateDatabaseConfig(dbType DatabaseType, path string) DatabaseConfig {
return DatabaseConfig{
Type: dbType,
Path: path,
}
}

View File

@ -0,0 +1,31 @@
package ethereum
import (
"fmt"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/vulcanize/vulcanizedb/pkg/datastore/ethereum/level"
)
type Database interface {
GetBlock(hash []byte, blockNumber int64) *types.Block
GetBlockHash(blockNumber int64) []byte
GetBlockReceipts(blockHash []byte, blockNumber int64) types.Receipts
}
func CreateDatabase(config DatabaseConfig) (Database, error) {
switch config.Type {
case Level:
levelDBConnection, err := ethdb.NewLDBDatabase(config.Path, 128, 1024)
if err != nil {
return nil, err
}
levelDBReader := level.NewLevelDatabaseReader(levelDBConnection)
levelDB := level.NewLevelDatabase(levelDBReader)
return levelDB, nil
default:
return nil, fmt.Errorf("Unknown ethereum database: %s", config.Path)
}
}

View File

@ -0,0 +1,34 @@
package level
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
type LevelDatabase struct {
reader Reader
}
func NewLevelDatabase(ldbReader Reader) *LevelDatabase {
return &LevelDatabase{
reader: ldbReader,
}
}
func (l LevelDatabase) GetBlock(blockHash []byte, blockNumber int64) *types.Block {
n := uint64(blockNumber)
h := common.BytesToHash(blockHash)
return l.reader.GetBlock(h, n)
}
func (l LevelDatabase) GetBlockHash(blockNumber int64) []byte {
n := uint64(blockNumber)
h := l.reader.GetCanonicalHash(n)
return h.Bytes()
}
func (l LevelDatabase) GetBlockReceipts(blockHash []byte, blockNumber int64) types.Receipts {
n := uint64(blockNumber)
h := common.BytesToHash(blockHash)
return l.reader.GetBlockReceipts(h, n)
}

View File

@ -0,0 +1,33 @@
package level
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
)
type Reader interface {
GetBlock(hash common.Hash, number uint64) *types.Block
GetBlockReceipts(hash common.Hash, number uint64) types.Receipts
GetCanonicalHash(number uint64) common.Hash
}
type LevelDatabaseReader struct {
core.DatabaseReader
}
func NewLevelDatabaseReader(reader core.DatabaseReader) *LevelDatabaseReader {
return &LevelDatabaseReader{DatabaseReader: reader}
}
func (ldbr *LevelDatabaseReader) GetBlock(hash common.Hash, number uint64) *types.Block {
return core.GetBlock(ldbr.DatabaseReader, hash, number)
}
func (ldbr *LevelDatabaseReader) GetBlockReceipts(hash common.Hash, number uint64) types.Receipts {
return core.GetBlockReceipts(ldbr.DatabaseReader, hash, number)
}
func (ldbr *LevelDatabaseReader) GetCanonicalHash(number uint64) common.Hash {
return core.GetCanonicalHash(ldbr.DatabaseReader, number)
}

View File

@ -0,0 +1,53 @@
package level_test
import (
"github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
"github.com/vulcanize/vulcanizedb/pkg/datastore/ethereum/level"
"github.com/vulcanize/vulcanizedb/pkg/fakes"
)
var _ = Describe("Level database", func() {
Describe("Getting a block hash", func() {
It("converts block number to uint64 to fetch hash from reader", func() {
mockReader := fakes.NewMockLevelDatabaseReader()
ldb := level.NewLevelDatabase(mockReader)
blockNumber := int64(12345)
ldb.GetBlockHash(blockNumber)
expectedBlockNumber := uint64(blockNumber)
mockReader.AssertGetCanonicalHashCalledWith(expectedBlockNumber)
})
})
Describe("Getting a block", func() {
It("converts block number to uint64 and hash to common.Hash to fetch block from reader", func() {
mockReader := fakes.NewMockLevelDatabaseReader()
ldb := level.NewLevelDatabase(mockReader)
blockHash := []byte{5, 4, 3, 2, 1}
blockNumber := int64(12345)
ldb.GetBlock(blockHash, blockNumber)
expectedBlockHash := common.BytesToHash(blockHash)
expectedBlockNumber := uint64(blockNumber)
mockReader.AssertGetBlockCalledWith(expectedBlockHash, expectedBlockNumber)
})
})
Describe("Getting a block's receipts", func() {
It("converts block number to uint64 and hash to common.Hash to fetch receipts from reader", func() {
mockReader := fakes.NewMockLevelDatabaseReader()
ldb := level.NewLevelDatabase(mockReader)
blockHash := []byte{5, 4, 3, 2, 1}
blockNumber := int64(12345)
ldb.GetBlockReceipts(blockHash, blockNumber)
expectedBlockHash := common.BytesToHash(blockHash)
expectedBlockNumber := uint64(blockNumber)
mockReader.AssertGetBlockReceiptsCalledWith(expectedBlockHash, expectedBlockNumber)
})
})
})

View File

@ -0,0 +1,13 @@
package level_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestLevel(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Level Suite")
}

View File

@ -9,14 +9,14 @@ type BlockRepository struct {
*InMemory *InMemory
} }
func (blockRepository *BlockRepository) CreateOrUpdateBlock(block core.Block) error { func (blockRepository *BlockRepository) CreateOrUpdateBlock(block core.Block) (int64, error) {
blockRepository.CreateOrUpdateBlockCallCount++ blockRepository.CreateOrUpdateBlockCallCount++
blockRepository.blocks[block.Number] = block blockRepository.blocks[block.Number] = block
for _, transaction := range block.Transactions { for _, transaction := range block.Transactions {
blockRepository.receipts[transaction.Hash] = transaction.Receipt blockRepository.receipts[transaction.Hash] = transaction.Receipt
blockRepository.logs[transaction.TxHash] = transaction.Logs blockRepository.logs[transaction.TxHash] = transaction.Logs
} }
return nil return 0, nil
} }
func (blockRepository *BlockRepository) GetBlock(blockNumber int64) (core.Block, error) { func (blockRepository *BlockRepository) GetBlock(blockNumber int64) (core.Block, error) {

View File

@ -23,7 +23,9 @@ var _ = Describe("Postgres DB", func() {
It("connects to the database", func() { It("connects to the database", func() {
var err error var err error
pgConfig := config.DbConnectionString(test_config.DBConfig) pgConfig := config.DbConnectionString(test_config.DBConfig)
sqlxdb, err = sqlx.Connect("postgres", pgConfig) sqlxdb, err = sqlx.Connect("postgres", pgConfig)
Expect(err).Should(BeNil()) Expect(err).Should(BeNil())
Expect(sqlxdb).ShouldNot(BeNil()) Expect(sqlxdb).ShouldNot(BeNil())
}) })
@ -76,10 +78,10 @@ var _ = Describe("Postgres DB", func() {
db := test_config.NewTestDB(node) db := test_config.NewTestDB(node)
blocksRepository := repositories.BlockRepository{DB: db} blocksRepository := repositories.BlockRepository{DB: db}
err1 := blocksRepository.CreateOrUpdateBlock(badBlock) _, err1 := blocksRepository.CreateOrUpdateBlock(badBlock)
savedBlock, err2 := blocksRepository.GetBlock(123)
Expect(err1).To(HaveOccurred()) Expect(err1).To(HaveOccurred())
savedBlock, err2 := blocksRepository.GetBlock(123)
Expect(err2).To(HaveOccurred()) Expect(err2).To(HaveOccurred())
Expect(savedBlock).To(BeZero()) Expect(savedBlock).To(BeZero())
}) })
@ -87,7 +89,9 @@ var _ = Describe("Postgres DB", func() {
It("throws error when can't connect to the database", func() { It("throws error when can't connect to the database", func() {
invalidDatabase := config.Database{} invalidDatabase := config.Database{}
node := core.Node{GenesisBlock: "GENESIS", NetworkID: 1, ID: "x123", ClientName: "geth"} node := core.Node{GenesisBlock: "GENESIS", NetworkID: 1, ID: "x123", ClientName: "geth"}
_, err := postgres.NewDB(invalidDatabase, node) _, err := postgres.NewDB(invalidDatabase, node)
Expect(err).To(Equal(postgres.ErrDBConnectionFailed)) Expect(err).To(Equal(postgres.ErrDBConnectionFailed))
}) })
@ -110,10 +114,10 @@ var _ = Describe("Postgres DB", func() {
db, _ := postgres.NewDB(test_config.DBConfig, node) db, _ := postgres.NewDB(test_config.DBConfig, node)
logRepository := repositories.LogRepository{DB: db} logRepository := repositories.LogRepository{DB: db}
err := logRepository.CreateLogs([]core.Log{badLog}) err := logRepository.CreateLogs([]core.Log{badLog}, 123)
savedBlock := logRepository.GetLogs("x123", 1)
Expect(err).ToNot(BeNil()) Expect(err).ToNot(BeNil())
savedBlock := logRepository.GetLogs("x123", 1)
Expect(savedBlock).To(BeNil()) Expect(savedBlock).To(BeNil())
}) })
@ -129,12 +133,11 @@ var _ = Describe("Postgres DB", func() {
db, _ := postgres.NewDB(test_config.DBConfig, node) db, _ := postgres.NewDB(test_config.DBConfig, node)
blockRepository := repositories.BlockRepository{DB: db} blockRepository := repositories.BlockRepository{DB: db}
err1 := blockRepository.CreateOrUpdateBlock(block) _, err1 := blockRepository.CreateOrUpdateBlock(block)
savedBlock, err2 := blockRepository.GetBlock(123)
Expect(err1).To(HaveOccurred()) Expect(err1).To(HaveOccurred())
savedBlock, err2 := blockRepository.GetBlock(123)
Expect(err2).To(HaveOccurred()) Expect(err2).To(HaveOccurred())
Expect(savedBlock).To(BeZero()) Expect(savedBlock).To(BeZero())
}) })
}) })

View File

@ -3,10 +3,11 @@ package repositories
import ( import (
"context" "context"
"database/sql" "database/sql"
"errors"
"log" "log"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/vulcanize/vulcanizedb/pkg/core" "github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/datastore" "github.com/vulcanize/vulcanizedb/pkg/datastore"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres" "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
@ -16,6 +17,8 @@ const (
blocksFromHeadBeforeFinal = 20 blocksFromHeadBeforeFinal = 20
) )
var ErrBlockExists = errors.New("Won't add block that already exists.")
type BlockRepository struct { type BlockRepository struct {
*postgres.DB *postgres.DB
} }
@ -28,22 +31,21 @@ func (blockRepository BlockRepository) SetBlocksStatus(chainHead int64) {
cutoff) cutoff)
} }
func (blockRepository BlockRepository) CreateOrUpdateBlock(block core.Block) error { func (blockRepository BlockRepository) CreateOrUpdateBlock(block core.Block) (int64, error) {
var err error var err error
var blockId int64
retrievedBlockHash, ok := blockRepository.getBlockHash(block) retrievedBlockHash, ok := blockRepository.getBlockHash(block)
if !ok { if !ok {
err = blockRepository.insertBlock(block) return blockRepository.insertBlock(block)
return err
} }
if ok && retrievedBlockHash != block.Hash { if ok && retrievedBlockHash != block.Hash {
err = blockRepository.removeBlock(block.Number) err = blockRepository.removeBlock(block.Number)
if err != nil { if err != nil {
return err return 0, err
} }
err = blockRepository.insertBlock(block) return blockRepository.insertBlock(block)
return err
} }
return nil return blockId, ErrBlockExists
} }
func (blockRepository BlockRepository) MissingBlockNumbers(startingBlockNumber int64, highestBlockNumber int64) []int64 { func (blockRepository BlockRepository) MissingBlockNumbers(startingBlockNumber int64, highestBlockNumber int64) []int64 {
@ -92,7 +94,7 @@ func (blockRepository BlockRepository) GetBlock(blockNumber int64) (core.Block,
return savedBlock, nil return savedBlock, nil
} }
func (blockRepository BlockRepository) insertBlock(block core.Block) error { func (blockRepository BlockRepository) insertBlock(block core.Block) (int64, error) {
var blockId int64 var blockId int64
tx, _ := blockRepository.DB.BeginTx(context.Background(), nil) tx, _ := blockRepository.DB.BeginTx(context.Background(), nil)
err := tx.QueryRow( err := tx.QueryRow(
@ -104,15 +106,17 @@ func (blockRepository BlockRepository) insertBlock(block core.Block) error {
Scan(&blockId) Scan(&blockId)
if err != nil { if err != nil {
tx.Rollback() tx.Rollback()
return postgres.ErrDBInsertFailed return 0, postgres.ErrDBInsertFailed
} }
err = blockRepository.createTransactions(tx, blockId, block.Transactions) if len(block.Transactions) > 0 {
if err != nil { err = blockRepository.createTransactions(tx, blockId, block.Transactions)
tx.Rollback() if err != nil {
return postgres.ErrDBInsertFailed tx.Rollback()
return 0, postgres.ErrDBInsertFailed
}
} }
tx.Commit() tx.Commit()
return nil return blockId, nil
} }
func (blockRepository BlockRepository) createTransactions(tx *sql.Tx, blockId int64, transactions []core.Transaction) error { func (blockRepository BlockRepository) createTransactions(tx *sql.Tx, blockId int64, transactions []core.Transaction) error {

View File

@ -177,6 +177,22 @@ var _ = Describe("Saving blocks", func() {
Expect(retrievedBlockTwo.Transactions[0].Hash).To(Equal("x678")) Expect(retrievedBlockTwo.Transactions[0].Hash).To(Equal("x678"))
}) })
It("returns 'block exists' error if attempting to add duplicate block", func() {
block := core.Block{
Number: 12345,
Hash: "0x12345",
}
_, err := blockRepository.CreateOrUpdateBlock(block)
Expect(err).NotTo(HaveOccurred())
_, err = blockRepository.CreateOrUpdateBlock(block)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(repositories.ErrBlockExists))
})
It("saves the attributes associated to a transaction", func() { It("saves the attributes associated to a transaction", func() {
gasLimit := uint64(5000) gasLimit := uint64(5000)
gasPrice := int64(3) gasPrice := int64(3)

View File

@ -13,14 +13,14 @@ type LogRepository struct {
*postgres.DB *postgres.DB
} }
func (logRepository LogRepository) CreateLogs(lgs []core.Log) error { func (logRepository LogRepository) CreateLogs(lgs []core.Log, receiptId int64) error {
tx, _ := logRepository.DB.BeginTx(context.Background(), nil) tx, _ := logRepository.DB.BeginTx(context.Background(), nil)
for _, tlog := range lgs { for _, tlog := range lgs {
_, err := tx.Exec( _, err := tx.Exec(
`INSERT INTO logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data) `INSERT INTO logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
`, `,
tlog.BlockNumber, tlog.Address, tlog.TxHash, tlog.Index, tlog.Topics[0], tlog.Topics[1], tlog.Topics[2], tlog.Topics[3], tlog.Data, tlog.BlockNumber, tlog.Address, tlog.TxHash, tlog.Index, tlog.Topics[0], tlog.Topics[1], tlog.Topics[2], tlog.Topics[3], tlog.Data, receiptId,
) )
if err != nil { if err != nil {
tx.Rollback() tx.Rollback()

View File

@ -13,36 +13,45 @@ import (
) )
var _ = Describe("Logs Repository", func() { var _ = Describe("Logs Repository", func() {
var db *postgres.DB
var logsRepository datastore.LogRepository
var node core.Node
BeforeEach(func() {
node = core.Node{
GenesisBlock: "GENESIS",
NetworkID: 1,
ID: "b6f90c0fdd8ec9607aed8ee45c69322e47b7063f0bfb7a29c8ecafab24d0a22d24dd2329b5ee6ed4125a03cb14e57fd584e67f9e53e6c631055cbbd82f080845",
ClientName: "Geth/v1.7.2-stable-1db4ecdc/darwin-amd64/go1.9",
}
db = test_config.NewTestDB(node)
logsRepository = repositories.LogRepository{DB: db}
})
Describe("Saving logs", func() { Describe("Saving logs", func() {
var db *postgres.DB
var blockRepository datastore.BlockRepository
var logsRepository datastore.LogRepository
var receiptRepository datastore.ReceiptRepository
var node core.Node
BeforeEach(func() {
node = core.Node{
GenesisBlock: "GENESIS",
NetworkID: 1,
ID: "b6f90c0fdd8ec9607aed8ee45c69322e47b7063f0bfb7a29c8ecafab24d0a22d24dd2329b5ee6ed4125a03cb14e57fd584e67f9e53e6c631055cbbd82f080845",
ClientName: "Geth/v1.7.2-stable-1db4ecdc/darwin-amd64/go1.9",
}
db = test_config.NewTestDB(node)
blockRepository = repositories.BlockRepository{DB: db}
logsRepository = repositories.LogRepository{DB: db}
receiptRepository = repositories.ReceiptRepository{DB: db}
})
It("returns the log when it exists", func() { It("returns the log when it exists", func() {
blockNumber := int64(12345)
blockId, err := blockRepository.CreateOrUpdateBlock(core.Block{Number: blockNumber})
Expect(err).NotTo(HaveOccurred())
receiptId, err := receiptRepository.CreateReceipt(blockId, core.Receipt{})
Expect(err).NotTo(HaveOccurred())
logsRepository.CreateLogs([]core.Log{{ logsRepository.CreateLogs([]core.Log{{
BlockNumber: 1, BlockNumber: blockNumber,
Index: 0, Index: 0,
Address: "x123", Address: "x123",
TxHash: "x456", TxHash: "x456",
Topics: core.Topics{0: "x777", 1: "x888", 2: "x999"}, Topics: core.Topics{0: "x777", 1: "x888", 2: "x999"},
Data: "xabc", Data: "xabc",
}}, }}, receiptId)
)
log := logsRepository.GetLogs("x123", 1) log := logsRepository.GetLogs("x123", blockNumber)
Expect(log).NotTo(BeNil()) Expect(log).NotTo(BeNil())
Expect(log[0].BlockNumber).To(Equal(int64(1))) Expect(log[0].BlockNumber).To(Equal(blockNumber))
Expect(log[0].Address).To(Equal("x123")) Expect(log[0].Address).To(Equal("x123"))
Expect(log[0].Index).To(Equal(int64(0))) Expect(log[0].Index).To(Equal(int64(0)))
Expect(log[0].TxHash).To(Equal("x456")) Expect(log[0].TxHash).To(Equal("x456"))
@ -58,24 +67,27 @@ var _ = Describe("Logs Repository", func() {
}) })
It("filters to the correct block number and address", func() { It("filters to the correct block number and address", func() {
blockNumber := int64(12345)
blockId, err := blockRepository.CreateOrUpdateBlock(core.Block{Number: blockNumber})
Expect(err).NotTo(HaveOccurred())
receiptId, err := receiptRepository.CreateReceipt(blockId, core.Receipt{})
Expect(err).NotTo(HaveOccurred())
logsRepository.CreateLogs([]core.Log{{ logsRepository.CreateLogs([]core.Log{{
BlockNumber: 1, BlockNumber: blockNumber,
Index: 0, Index: 0,
Address: "x123", Address: "x123",
TxHash: "x456", TxHash: "x456",
Topics: core.Topics{0: "x777", 1: "x888", 2: "x999"}, Topics: core.Topics{0: "x777", 1: "x888", 2: "x999"},
Data: "xabc", Data: "xabc",
}}, }}, receiptId)
)
logsRepository.CreateLogs([]core.Log{{ logsRepository.CreateLogs([]core.Log{{
BlockNumber: 1, BlockNumber: blockNumber,
Index: 1, Index: 1,
Address: "x123", Address: "x123",
TxHash: "x789", TxHash: "x789",
Topics: core.Topics{0: "x111", 1: "x222", 2: "x333"}, Topics: core.Topics{0: "x111", 1: "x222", 2: "x333"},
Data: "xdef", Data: "xdef",
}}, }}, receiptId)
)
logsRepository.CreateLogs([]core.Log{{ logsRepository.CreateLogs([]core.Log{{
BlockNumber: 2, BlockNumber: 2,
Index: 0, Index: 0,
@ -83,10 +95,9 @@ var _ = Describe("Logs Repository", func() {
TxHash: "x456", TxHash: "x456",
Topics: core.Topics{0: "x777", 1: "x888", 2: "x999"}, Topics: core.Topics{0: "x777", 1: "x888", 2: "x999"},
Data: "xabc", Data: "xabc",
}}, }}, receiptId)
)
log := logsRepository.GetLogs("x123", 1) log := logsRepository.GetLogs("x123", blockNumber)
type logIndex struct { type logIndex struct {
blockNumber int64 blockNumber int64
@ -111,14 +122,12 @@ var _ = Describe("Logs Repository", func() {
Expect(len(log)).To(Equal(2)) Expect(len(log)).To(Equal(2))
Expect(uniqueBlockNumbers).To(Equal( Expect(uniqueBlockNumbers).To(Equal(
[]logIndex{ []logIndex{
{blockNumber: 1, Index: 0}, {blockNumber: blockNumber, Index: 0},
{blockNumber: 1, Index: 1}}, {blockNumber: blockNumber, Index: 1}},
)) ))
}) })
It("saves the logs attached to a receipt", func() { It("saves the logs attached to a receipt", func() {
var blockRepository datastore.BlockRepository
blockRepository = repositories.BlockRepository{DB: db}
logs := []core.Log{{ logs := []core.Log{{
Address: "0x8a4774fe82c63484afef97ca8d89a6ea5e21f973", Address: "0x8a4774fe82c63484afef97ca8d89a6ea5e21f973",
@ -171,7 +180,7 @@ var _ = Describe("Logs Repository", func() {
} }
block := core.Block{Transactions: []core.Transaction{transaction}} block := core.Block{Transactions: []core.Transaction{transaction}}
err := blockRepository.CreateOrUpdateBlock(block) _, err := blockRepository.CreateOrUpdateBlock(block)
Expect(err).To(Not(HaveOccurred())) Expect(err).To(Not(HaveOccurred()))
retrievedLogs := logsRepository.GetLogs("0x99041f808d598b782d5a3e498681c2452a31da08", 4745407) retrievedLogs := logsRepository.GetLogs("0x99041f808d598b782d5a3e498681c2452a31da08", 4745407)

View File

@ -1,6 +1,7 @@
package repositories package repositories
import ( import (
"context"
"database/sql" "database/sql"
"github.com/vulcanize/vulcanizedb/pkg/core" "github.com/vulcanize/vulcanizedb/pkg/core"
@ -12,6 +13,73 @@ type ReceiptRepository struct {
*postgres.DB *postgres.DB
} }
func (receiptRepository ReceiptRepository) CreateReceiptsAndLogs(blockId int64, receipts []core.Receipt) error {
tx, err := receiptRepository.DB.BeginTx(context.Background(), nil)
if err != nil {
return err
}
for _, receipt := range receipts {
receiptId, err := createReceipt(receipt, blockId, tx)
if err != nil {
tx.Rollback()
return err
}
if len(receipt.Logs) > 0 {
err = createLogs(receipt.Logs, receiptId, tx)
if err != nil {
tx.Rollback()
return err
}
}
}
tx.Commit()
return nil
}
func createReceipt(receipt core.Receipt, blockId int64, tx *sql.Tx) (int64, error) {
var receiptId int64
err := tx.QueryRow(
`INSERT INTO receipts
(contract_address, tx_hash, cumulative_gas_used, gas_used, state_root, status, block_id)
VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING id`,
receipt.ContractAddress, receipt.TxHash, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.StateRoot, receipt.Status, blockId,
).Scan(&receiptId)
return receiptId, err
}
func createLogs(logs []core.Log, receiptId int64, tx *sql.Tx) error {
for _, log := range logs {
_, err := tx.Exec(
`INSERT INTO logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
`,
log.BlockNumber, log.Address, log.TxHash, log.Index, log.Topics[0], log.Topics[1], log.Topics[2], log.Topics[3], log.Data, receiptId,
)
if err != nil {
return err
}
}
return nil
}
func (receiptRepository ReceiptRepository) CreateReceipt(blockId int64, receipt core.Receipt) (int64, error) {
tx, _ := receiptRepository.DB.BeginTx(context.Background(), nil)
var receiptId int64
err := tx.QueryRow(
`INSERT INTO receipts
(contract_address, tx_hash, cumulative_gas_used, gas_used, state_root, status, block_id)
VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING id`,
receipt.ContractAddress, receipt.TxHash, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.StateRoot, receipt.Status, blockId).Scan(&receiptId)
if err != nil {
tx.Rollback()
return receiptId, err
}
tx.Commit()
return receiptId, nil
}
func (receiptRepository ReceiptRepository) GetReceipt(txHash string) (core.Receipt, error) { func (receiptRepository ReceiptRepository) GetReceipt(txHash string) (core.Receipt, error) {
row := receiptRepository.DB.QueryRow( row := receiptRepository.DB.QueryRow(
`SELECT contract_address, `SELECT contract_address,

View File

@ -10,7 +10,9 @@ import (
"github.com/vulcanize/vulcanizedb/test_config" "github.com/vulcanize/vulcanizedb/test_config"
) )
var _ bool = Describe("Logs Repository", func() { var _ = Describe("Logs Repository", func() {
var blockRepository datastore.BlockRepository
var logRepository datastore.LogRepository
var receiptRepository datastore.ReceiptRepository var receiptRepository datastore.ReceiptRepository
var db *postgres.DB var db *postgres.DB
var node core.Node var node core.Node
@ -22,14 +24,67 @@ var _ bool = Describe("Logs Repository", func() {
ClientName: "Geth/v1.7.2-stable-1db4ecdc/darwin-amd64/go1.9", ClientName: "Geth/v1.7.2-stable-1db4ecdc/darwin-amd64/go1.9",
} }
db = test_config.NewTestDB(node) db = test_config.NewTestDB(node)
blockRepository = repositories.BlockRepository{DB: db}
logRepository = repositories.LogRepository{DB: db}
receiptRepository = repositories.ReceiptRepository{DB: db} receiptRepository = repositories.ReceiptRepository{DB: db}
}) })
Describe("Saving receipts", func() { Describe("Saving multiple receipts", func() {
It("persists each receipt and its logs", func() {
blockNumber := int64(1234567)
blockId, err := blockRepository.CreateOrUpdateBlock(core.Block{Number: blockNumber})
Expect(err).NotTo(HaveOccurred())
txHashOne := "0xTxHashOne"
txHashTwo := "0xTxHashTwo"
addressOne := "0xAddressOne"
addressTwo := "0xAddressTwo"
logsOne := []core.Log{{
Address: addressOne,
BlockNumber: blockNumber,
TxHash: txHashOne,
}, {
Address: addressOne,
BlockNumber: blockNumber,
TxHash: txHashOne,
}}
logsTwo := []core.Log{{
BlockNumber: blockNumber,
TxHash: txHashTwo,
Address: addressTwo,
}}
receiptOne := core.Receipt{
Logs: logsOne,
TxHash: txHashOne,
}
receiptTwo := core.Receipt{
Logs: logsTwo,
TxHash: txHashTwo,
}
receipts := []core.Receipt{receiptOne, receiptTwo}
err = receiptRepository.CreateReceiptsAndLogs(blockId, receipts)
Expect(err).NotTo(HaveOccurred())
persistedReceiptOne, err := receiptRepository.GetReceipt(txHashOne)
Expect(err).NotTo(HaveOccurred())
Expect(persistedReceiptOne).NotTo(BeNil())
Expect(persistedReceiptOne.TxHash).To(Equal(txHashOne))
persistedReceiptTwo, err := receiptRepository.GetReceipt(txHashTwo)
Expect(err).NotTo(HaveOccurred())
Expect(persistedReceiptTwo).NotTo(BeNil())
Expect(persistedReceiptTwo.TxHash).To(Equal(txHashTwo))
persistedAddressOneLogs := logRepository.GetLogs(addressOne, blockNumber)
Expect(persistedAddressOneLogs).NotTo(BeNil())
Expect(len(persistedAddressOneLogs)).To(Equal(2))
persistedAddressTwoLogs := logRepository.GetLogs(addressTwo, blockNumber)
Expect(persistedAddressTwoLogs).NotTo(BeNil())
Expect(len(persistedAddressTwoLogs)).To(Equal(1))
})
})
Describe("Saving receipts on a block's transactions", func() {
It("returns the receipt when it exists", func() { It("returns the receipt when it exists", func() {
var blockRepository datastore.BlockRepository
db := test_config.NewTestDB(node)
blockRepository = repositories.BlockRepository{DB: db}
expected := core.Receipt{ expected := core.Receipt{
ContractAddress: "0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae", ContractAddress: "0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae",
CumulativeGasUsed: 7996119, CumulativeGasUsed: 7996119,
@ -66,9 +121,6 @@ var _ bool = Describe("Logs Repository", func() {
}) })
It("still saves receipts without logs", func() { It("still saves receipts without logs", func() {
var blockRepository datastore.BlockRepository
db := test_config.NewTestDB(node)
blockRepository = repositories.BlockRepository{DB: db}
receipt := core.Receipt{ receipt := core.Receipt{
TxHash: "0x002c4799161d809b23f67884eb6598c9df5894929fe1a9ead97ca175d360f547", TxHash: "0x002c4799161d809b23f67884eb6598c9df5894929fe1a9ead97ca175d360f547",
} }

View File

@ -13,14 +13,18 @@ import (
var _ = Describe("Watched Events Repository", func() { var _ = Describe("Watched Events Repository", func() {
var db *postgres.DB var db *postgres.DB
var logRepository datastore.LogRepository var blocksRepository datastore.BlockRepository
var filterRepository datastore.FilterRepository var filterRepository datastore.FilterRepository
var logRepository datastore.LogRepository
var receiptRepository datastore.ReceiptRepository
var watchedEventRepository datastore.WatchedEventRepository var watchedEventRepository datastore.WatchedEventRepository
BeforeEach(func() { BeforeEach(func() {
db = test_config.NewTestDB(core.Node{}) db = test_config.NewTestDB(core.Node{})
logRepository = repositories.LogRepository{DB: db} blocksRepository = repositories.BlockRepository{DB: db}
filterRepository = repositories.FilterRepository{DB: db} filterRepository = repositories.FilterRepository{DB: db}
logRepository = repositories.LogRepository{DB: db}
receiptRepository = repositories.ReceiptRepository{DB: db}
watchedEventRepository = repositories.WatchedEventRepository{DB: db} watchedEventRepository = repositories.WatchedEventRepository{DB: db}
}) })
@ -56,7 +60,11 @@ var _ = Describe("Watched Events Repository", func() {
} }
err := filterRepository.CreateFilter(filter) err := filterRepository.CreateFilter(filter)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = logRepository.CreateLogs(logs) blockId, err := blocksRepository.CreateOrUpdateBlock(core.Block{})
Expect(err).NotTo(HaveOccurred())
receiptId, err := receiptRepository.CreateReceipt(blockId, core.Receipt{})
Expect(err).NotTo(HaveOccurred())
err = logRepository.CreateLogs(logs, receiptId)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
matchingLogs, err := watchedEventRepository.GetWatchedEvents("Filter1") matchingLogs, err := watchedEventRepository.GetWatchedEvents("Filter1")
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -69,7 +77,6 @@ var _ = Describe("Watched Events Repository", func() {
Expect(matchingLogs[0].Topic1).To(Equal(expectedWatchedEventLog[0].Topic1)) Expect(matchingLogs[0].Topic1).To(Equal(expectedWatchedEventLog[0].Topic1))
Expect(matchingLogs[0].Topic2).To(Equal(expectedWatchedEventLog[0].Topic2)) Expect(matchingLogs[0].Topic2).To(Equal(expectedWatchedEventLog[0].Topic2))
Expect(matchingLogs[0].Data).To(Equal(expectedWatchedEventLog[0].Data)) Expect(matchingLogs[0].Data).To(Equal(expectedWatchedEventLog[0].Data))
}) })
It("retrieves a watched event log by name", func() { It("retrieves a watched event log by name", func() {
@ -110,7 +117,11 @@ var _ = Describe("Watched Events Repository", func() {
}} }}
err := filterRepository.CreateFilter(filter) err := filterRepository.CreateFilter(filter)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = logRepository.CreateLogs(logs) blockId, err := blocksRepository.CreateOrUpdateBlock(core.Block{Hash: "Ox123"})
Expect(err).NotTo(HaveOccurred())
receiptId, err := receiptRepository.CreateReceipt(blockId, core.Receipt{TxHash: "0x123"})
Expect(err).NotTo(HaveOccurred())
err = logRepository.CreateLogs(logs, receiptId)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
matchingLogs, err := watchedEventRepository.GetWatchedEvents("Filter1") matchingLogs, err := watchedEventRepository.GetWatchedEvents("Filter1")
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())

View File

@ -12,7 +12,7 @@ var ErrBlockDoesNotExist = func(blockNumber int64) error {
} }
type BlockRepository interface { type BlockRepository interface {
CreateOrUpdateBlock(block core.Block) error CreateOrUpdateBlock(block core.Block) (int64, error)
GetBlock(blockNumber int64) (core.Block, error) GetBlock(blockNumber int64) (core.Block, error)
MissingBlockNumbers(startingBlockNumber int64, endingBlockNumber int64) []int64 MissingBlockNumbers(startingBlockNumber int64, endingBlockNumber int64) []int64
SetBlocksStatus(chainHead int64) SetBlocksStatus(chainHead int64)
@ -38,7 +38,7 @@ type FilterRepository interface {
} }
type LogRepository interface { type LogRepository interface {
CreateLogs(logs []core.Log) error CreateLogs(logs []core.Log, receiptId int64) error
GetLogs(address string, blockNumber int64) []core.Log GetLogs(address string, blockNumber int64) []core.Log
} }
@ -47,6 +47,8 @@ var ErrReceiptDoesNotExist = func(txHash string) error {
} }
type ReceiptRepository interface { type ReceiptRepository interface {
CreateReceiptsAndLogs(blockId int64, receipts []core.Receipt) error
CreateReceipt(blockId int64, receipt core.Receipt) (int64, error)
GetReceipt(txHash string) (core.Receipt, error) GetReceipt(txHash string) (core.Receipt, error)
} }

View File

@ -0,0 +1,51 @@
package fakes
import (
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/core"
)
type MockBlockRepository struct {
createOrUpdateBlockCalled bool
createOrUpdateBlockPassedBlock core.Block
createOrUpdateBlockReturnInt int64
createOrUpdateBlockReturnErr error
}
func NewMockBlockRepository() *MockBlockRepository {
return &MockBlockRepository{
createOrUpdateBlockCalled: false,
createOrUpdateBlockPassedBlock: core.Block{},
createOrUpdateBlockReturnInt: 0,
createOrUpdateBlockReturnErr: nil,
}
}
func (mbr *MockBlockRepository) SetCreateOrUpdateBlockReturnVals(i int64, err error) {
mbr.createOrUpdateBlockReturnInt = i
mbr.createOrUpdateBlockReturnErr = err
}
func (mbr *MockBlockRepository) CreateOrUpdateBlock(block core.Block) (int64, error) {
mbr.createOrUpdateBlockCalled = true
mbr.createOrUpdateBlockPassedBlock = block
return mbr.createOrUpdateBlockReturnInt, mbr.createOrUpdateBlockReturnErr
}
func (mbr *MockBlockRepository) GetBlock(blockNumber int64) (core.Block, error) {
panic("implement me")
}
func (mbr *MockBlockRepository) MissingBlockNumbers(startingBlockNumber int64, endingBlockNumber int64) []int64 {
panic("implement me")
}
func (mbr *MockBlockRepository) SetBlocksStatus(chainHead int64) {
panic("implement me")
}
func (mbr *MockBlockRepository) AssertCreateOrUpdateBlockCalledWith(block core.Block) {
Expect(mbr.createOrUpdateBlockCalled).To(BeTrue())
Expect(mbr.createOrUpdateBlockPassedBlock).To(Equal(block))
}

View File

@ -0,0 +1,86 @@
package fakes
import (
. "github.com/onsi/gomega"
"github.com/ethereum/go-ethereum/core/types"
)
type MockEthereumDatabase struct {
getBlockCalled bool
getBlockPassedHash []byte
getBlockPassedNumber int64
getBlockReturnBlock *types.Block
getBlockHashCalled bool
getBlockHashPassedNumber int64
getBlockHashReturnHash []byte
getBlockReceiptsCalled bool
getBlockReceiptsPassedHash []byte
getBlockReceiptsPassedNumber int64
getBlockReceiptsReturnReceipts types.Receipts
}
func NewMockEthereumDatabase() *MockEthereumDatabase {
return &MockEthereumDatabase{
getBlockCalled: false,
getBlockPassedHash: nil,
getBlockPassedNumber: 0,
getBlockReturnBlock: nil,
getBlockHashCalled: false,
getBlockHashPassedNumber: 0,
getBlockHashReturnHash: nil,
getBlockReceiptsCalled: false,
getBlockReceiptsPassedHash: nil,
getBlockReceiptsPassedNumber: 0,
getBlockReceiptsReturnReceipts: nil,
}
}
func (med *MockEthereumDatabase) SetReturnBlock(block *types.Block) {
med.getBlockReturnBlock = block
}
func (med *MockEthereumDatabase) SetReturnHash(hash []byte) {
med.getBlockHashReturnHash = hash
}
func (med *MockEthereumDatabase) SetReturnReceipts(receipts types.Receipts) {
med.getBlockReceiptsReturnReceipts = receipts
}
func (med *MockEthereumDatabase) GetBlock(hash []byte, blockNumber int64) *types.Block {
med.getBlockCalled = true
med.getBlockPassedHash = hash
med.getBlockPassedNumber = blockNumber
return med.getBlockReturnBlock
}
func (med *MockEthereumDatabase) GetBlockHash(blockNumber int64) []byte {
med.getBlockHashCalled = true
med.getBlockHashPassedNumber = blockNumber
return med.getBlockHashReturnHash
}
func (med *MockEthereumDatabase) GetBlockReceipts(blockHash []byte, blockNumber int64) types.Receipts {
med.getBlockReceiptsCalled = true
med.getBlockReceiptsPassedHash = blockHash
med.getBlockReceiptsPassedNumber = blockNumber
return med.getBlockReceiptsReturnReceipts
}
func (med *MockEthereumDatabase) AssertGetBlockCalledWith(hash []byte, blockNumber int64) {
Expect(med.getBlockCalled).To(BeTrue())
Expect(med.getBlockPassedHash).To(Equal(hash))
Expect(med.getBlockPassedNumber).To(Equal(blockNumber))
}
func (med *MockEthereumDatabase) AssertGetBlockHashCalledWith(blockNumber int64) {
Expect(med.getBlockHashCalled).To(BeTrue())
Expect(med.getBlockHashPassedNumber).To(Equal(blockNumber))
}
func (med *MockEthereumDatabase) AssertGetBlockReceiptsCalledWith(blockHash []byte, blockNumber int64) {
Expect(med.getBlockReceiptsCalled).To(BeTrue())
Expect(med.getBlockReceiptsPassedHash).To(Equal(blockHash))
Expect(med.getBlockReceiptsPassedNumber).To(Equal(blockNumber))
}

View File

@ -0,0 +1,88 @@
package fakes
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/gomega"
)
type MockLevelDatabaseReader struct {
getBlockCalled bool
getBlockReceiptsCalled bool
getCanonicalHashCalled bool
passedHash common.Hash
getCanonicalHashPassedNumber uint64
getBlockPassedHash common.Hash
getBlockPassedNumber uint64
getBlockReceiptsPassedHash common.Hash
getBlockReceiptsPassedNumber uint64
returnBlock *types.Block
returnHash common.Hash
returnReceipts types.Receipts
}
func NewMockLevelDatabaseReader() *MockLevelDatabaseReader {
return &MockLevelDatabaseReader{
getBlockCalled: false,
getBlockReceiptsCalled: false,
getCanonicalHashCalled: false,
passedHash: common.Hash{},
getCanonicalHashPassedNumber: 0,
getBlockPassedHash: common.Hash{},
getBlockPassedNumber: 0,
getBlockReceiptsPassedHash: common.Hash{},
getBlockReceiptsPassedNumber: 0,
returnBlock: nil,
returnHash: common.Hash{},
returnReceipts: nil,
}
}
func (mldr *MockLevelDatabaseReader) SetReturnBlock(block *types.Block) {
mldr.returnBlock = block
}
func (mldr *MockLevelDatabaseReader) SetReturnHash(hash common.Hash) {
mldr.returnHash = hash
}
func (mldr *MockLevelDatabaseReader) SetReturnReceipts(receipts types.Receipts) {
mldr.returnReceipts = receipts
}
func (mldr *MockLevelDatabaseReader) GetCanonicalHash(number uint64) common.Hash {
mldr.getCanonicalHashCalled = true
mldr.getCanonicalHashPassedNumber = number
return mldr.returnHash
}
func (mldr *MockLevelDatabaseReader) GetBlock(hash common.Hash, number uint64) *types.Block {
mldr.getBlockCalled = true
mldr.getBlockPassedHash = hash
mldr.getBlockPassedNumber = number
return mldr.returnBlock
}
func (mldr *MockLevelDatabaseReader) GetBlockReceipts(hash common.Hash, number uint64) types.Receipts {
mldr.getBlockReceiptsCalled = true
mldr.getBlockReceiptsPassedHash = hash
mldr.getBlockReceiptsPassedNumber = number
return mldr.returnReceipts
}
func (mldr *MockLevelDatabaseReader) AssertGetCanonicalHashCalledWith(number uint64) {
Expect(mldr.getCanonicalHashCalled).To(BeTrue())
Expect(mldr.getCanonicalHashPassedNumber).To(Equal(number))
}
func (mldr *MockLevelDatabaseReader) AssertGetBlockCalledWith(hash common.Hash, number uint64) {
Expect(mldr.getBlockCalled).To(BeTrue())
Expect(mldr.getBlockPassedHash).To(Equal(hash))
Expect(mldr.getBlockPassedNumber).To(Equal(number))
}
func (mldr *MockLevelDatabaseReader) AssertGetBlockReceiptsCalledWith(hash common.Hash, number uint64) {
Expect(mldr.getBlockReceiptsCalled).To(BeTrue())
Expect(mldr.getBlockReceiptsPassedHash).To(Equal(hash))
Expect(mldr.getBlockReceiptsPassedNumber).To(Equal(number))
}

View File

@ -0,0 +1,52 @@
package fakes
import (
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/core"
)
type MockReceiptRepository struct {
createReceiptsAndLogsCalled bool
createReceiptsAndLogsPassedBlockId int64
createReceiptsAndLogsPassedReceipts []core.Receipt
createReceiptsAndLogsReturnErr error
}
func NewMockReceiptRepository() *MockReceiptRepository {
return &MockReceiptRepository{
createReceiptsAndLogsCalled: false,
createReceiptsAndLogsPassedBlockId: 0,
createReceiptsAndLogsPassedReceipts: nil,
createReceiptsAndLogsReturnErr: nil,
}
}
func (mrr *MockReceiptRepository) SetCreateReceiptsAndLogsReturnErr(err error) {
mrr.createReceiptsAndLogsReturnErr = err
}
func (mrr *MockReceiptRepository) CreateReceiptsAndLogs(blockId int64, receipts []core.Receipt) error {
mrr.createReceiptsAndLogsCalled = true
mrr.createReceiptsAndLogsPassedBlockId = blockId
mrr.createReceiptsAndLogsPassedReceipts = receipts
return mrr.createReceiptsAndLogsReturnErr
}
func (mrr *MockReceiptRepository) CreateReceipt(blockId int64, receipt core.Receipt) (int64, error) {
panic("implement me")
}
func (mrr *MockReceiptRepository) GetReceipt(txHash string) (core.Receipt, error) {
panic("implement me")
}
func (mrr *MockReceiptRepository) AssertCreateReceiptsAndLogsCalledWith(blockId int64, receipts []core.Receipt) {
Expect(mrr.createReceiptsAndLogsCalled).To(BeTrue())
Expect(mrr.createReceiptsAndLogsPassedBlockId).To(Equal(blockId))
Expect(mrr.createReceiptsAndLogsPassedReceipts).To(Equal(receipts))
}
func (mrr *MockReceiptRepository) AssertCreateReceiptsAndLogsNotCalled() {
Expect(mrr.createReceiptsAndLogsCalled).To(BeFalse())
}

View File

@ -0,0 +1,40 @@
package fakes
import (
"github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/core"
)
type MockTransactionConverter struct {
convertTransactionsToCoreCalled bool
convertTransactionsToCorePassedBlock *types.Block
convertTransactionsToCoreReturnTransactions []core.Transaction
convertTransactionsToCoreReturnError error
}
func NewMockTransactionConverter() *MockTransactionConverter {
return &MockTransactionConverter{
convertTransactionsToCoreCalled: false,
convertTransactionsToCorePassedBlock: nil,
convertTransactionsToCoreReturnTransactions: nil,
convertTransactionsToCoreReturnError: nil,
}
}
func (mtc *MockTransactionConverter) SetConvertTransactionsToCoreReturnVals(transactions []core.Transaction, err error) {
mtc.convertTransactionsToCoreReturnTransactions = transactions
mtc.convertTransactionsToCoreReturnError = err
}
func (mtc *MockTransactionConverter) ConvertTransactionsToCore(gethBlock *types.Block) ([]core.Transaction, error) {
mtc.convertTransactionsToCoreCalled = true
mtc.convertTransactionsToCorePassedBlock = gethBlock
return mtc.convertTransactionsToCoreReturnTransactions, mtc.convertTransactionsToCoreReturnError
}
func (mtc *MockTransactionConverter) AssertConvertTransactionsToCoreCalledWith(gethBlock *types.Block) {
Expect(mtc.convertTransactionsToCoreCalled).To(BeTrue())
Expect(mtc.convertTransactionsToCorePassedBlock).To(Equal(gethBlock))
}

View File

@ -10,13 +10,17 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/geth/node"
"golang.org/x/net/context" "golang.org/x/net/context"
"github.com/vulcanize/vulcanizedb/pkg/core"
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
vulcRpc "github.com/vulcanize/vulcanizedb/pkg/geth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/geth/node"
) )
type Blockchain struct { type Blockchain struct {
client *ethclient.Client client *ethclient.Client
blockConverter vulcCommon.BlockConverter
readGethHeaders chan *types.Header readGethHeaders chan *types.Header
outputBlocks chan core.Block outputBlocks chan core.Block
newHeadSubscription ethereum.Subscription newHeadSubscription ethereum.Subscription
@ -33,6 +37,8 @@ func NewBlockchain(ipcPath string) *Blockchain {
clientWrapper := node.ClientWrapper{ContextCaller: rpcClient, IPCPath: ipcPath} clientWrapper := node.ClientWrapper{ContextCaller: rpcClient, IPCPath: ipcPath}
blockchain.node = node.MakeNode(clientWrapper) blockchain.node = node.MakeNode(clientWrapper)
blockchain.client = client blockchain.client = client
transactionConverter := vulcRpc.NewRpcTransactionConverter(client)
blockchain.blockConverter = vulcCommon.NewBlockConverter(transactionConverter)
return &blockchain return &blockchain
} }
@ -50,7 +56,7 @@ func (blockchain *Blockchain) GetLogs(contract core.Contract, startingBlockNumbe
if err != nil { if err != nil {
return []core.Log{}, err return []core.Log{}, err
} }
logs := ToCoreLogs(gethLogs) logs := vulcCommon.ToCoreLogs(gethLogs)
return logs, nil return logs, nil
} }
@ -63,7 +69,7 @@ func (blockchain *Blockchain) GetBlockByNumber(blockNumber int64) (core.Block, e
if err != nil { if err != nil {
return core.Block{}, err return core.Block{}, err
} }
block, err := ToCoreBlock(gethBlock, blockchain.client) block, err := blockchain.blockConverter.ToCoreBlock(gethBlock)
if err != nil { if err != nil {
return core.Block{}, err return core.Block{}, err
} }

58
pkg/geth/cold_importer.go Normal file
View File

@ -0,0 +1,58 @@
package geth
import (
"github.com/vulcanize/vulcanizedb/pkg/datastore"
"github.com/vulcanize/vulcanizedb/pkg/datastore/ethereum"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
)
type ColdImporter struct {
blockRepository datastore.BlockRepository
converter common.BlockConverter
ethDB ethereum.Database
receiptRepository datastore.ReceiptRepository
}
func NewColdImporter(ethDB ethereum.Database, blockRepository datastore.BlockRepository, receiptRepository datastore.ReceiptRepository, converter common.BlockConverter) *ColdImporter {
return &ColdImporter{
blockRepository: blockRepository,
converter: converter,
ethDB: ethDB,
receiptRepository: receiptRepository,
}
}
func (ci *ColdImporter) Execute(startingBlockNumber int64, endingBlockNumber int64) error {
for i := startingBlockNumber; i <= endingBlockNumber; i++ {
hash := ci.ethDB.GetBlockHash(i)
blockId, err := ci.createBlocksAndTransactions(hash, i)
if err != nil {
if err == repositories.ErrBlockExists {
continue
}
return err
}
err = ci.createReceiptsAndLogs(hash, i, blockId)
if err != nil {
return err
}
}
return nil
}
func (ci *ColdImporter) createBlocksAndTransactions(hash []byte, i int64) (int64, error) {
block := ci.ethDB.GetBlock(hash, i)
coreBlock, err := ci.converter.ToCoreBlock(block)
if err != nil {
return 0, err
}
return ci.blockRepository.CreateOrUpdateBlock(coreBlock)
}
func (ci *ColdImporter) createReceiptsAndLogs(hash []byte, number int64, blockId int64) error {
receipts := ci.ethDB.GetBlockReceipts(hash, number)
coreReceipts := common.ToCoreReceipts(receipts)
return ci.receiptRepository.CreateReceiptsAndLogs(blockId, coreReceipts)
}

View File

@ -0,0 +1,85 @@
package geth_test
import (
"github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
"github.com/vulcanize/vulcanizedb/pkg/fakes"
"github.com/vulcanize/vulcanizedb/pkg/geth"
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
)
var _ = Describe("Geth cold importer", func() {
var fakeGethBlock *types.Block
BeforeEach(func() {
header := &types.Header{}
transactions := []*types.Transaction{}
uncles := []*types.Header{}
receipts := []*types.Receipt{}
fakeGethBlock = types.NewBlock(header, transactions, uncles, receipts)
})
It("fetches blocks from level db and persists them to pg", func() {
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
mockBlockRepository := fakes.NewMockBlockRepository()
mockReceiptRepository := fakes.NewMockReceiptRepository()
mockTransactionConverter := fakes.NewMockTransactionConverter()
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
blockNumber := int64(123)
fakeHash := []byte{1, 2, 3, 4, 5}
mockEthereumDatabase.SetReturnHash(fakeHash)
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
importer := geth.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
importer.Execute(blockNumber, blockNumber)
mockEthereumDatabase.AssertGetBlockHashCalledWith(blockNumber)
mockEthereumDatabase.AssertGetBlockCalledWith(fakeHash, blockNumber)
mockTransactionConverter.AssertConvertTransactionsToCoreCalledWith(fakeGethBlock)
convertedBlock, err := blockConverter.ToCoreBlock(fakeGethBlock)
Expect(err).NotTo(HaveOccurred())
mockBlockRepository.AssertCreateOrUpdateBlockCalledWith(convertedBlock)
})
It("fetches receipts from level db and persists them to pg", func() {
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
mockBlockRepository := fakes.NewMockBlockRepository()
mockReceiptRepository := fakes.NewMockReceiptRepository()
mockTransactionConverter := fakes.NewMockTransactionConverter()
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
blockNumber := int64(123)
blockId := int64(999)
mockBlockRepository.SetCreateOrUpdateBlockReturnVals(blockId, nil)
fakeReceipts := types.Receipts{{}}
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
mockEthereumDatabase.SetReturnReceipts(fakeReceipts)
importer := geth.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
importer.Execute(blockNumber, blockNumber)
expectedReceipts := vulcCommon.ToCoreReceipts(fakeReceipts)
mockReceiptRepository.AssertCreateReceiptsAndLogsCalledWith(blockId, expectedReceipts)
})
It("does not fetch receipts if block already exists", func() {
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
mockBlockRepository := fakes.NewMockBlockRepository()
mockReceiptRepository := fakes.NewMockReceiptRepository()
mockTransactionConverter := fakes.NewMockTransactionConverter()
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
mockBlockRepository.SetCreateOrUpdateBlockReturnVals(0, repositories.ErrBlockExists)
importer := geth.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
err := importer.Execute(1, 1)
Expect(err).NotTo(HaveOccurred())
mockReceiptRepository.AssertCreateReceiptsAndLogsNotCalled()
})
})

View File

@ -0,0 +1,69 @@
package cold_db
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/vulcanize/vulcanizedb/pkg/core"
"golang.org/x/sync/errgroup"
"strings"
)
type ColdDbTransactionConverter struct{}
func NewColdDbTransactionConverter() *ColdDbTransactionConverter {
return &ColdDbTransactionConverter{}
}
func (cdtc *ColdDbTransactionConverter) ConvertTransactionsToCore(gethBlock *types.Block) ([]core.Transaction, error) {
var g errgroup.Group
coreTransactions := make([]core.Transaction, len(gethBlock.Transactions()))
for gethTransactionIndex, gethTransaction := range gethBlock.Transactions() {
transaction := gethTransaction
transactionIndex := uint(gethTransactionIndex)
g.Go(func() error {
signer := getSigner(transaction)
sender, err := signer.Sender(transaction)
if err != nil {
return err
}
coreTransaction := transToCoreTrans(transaction, &sender)
coreTransactions[transactionIndex] = coreTransaction
return nil
})
}
if err := g.Wait(); err != nil {
return coreTransactions, err
}
return coreTransactions, nil
}
func getSigner(tx *types.Transaction) types.Signer {
v, _, _ := tx.RawSignatureValues()
if v.Sign() != 0 && tx.Protected() {
return types.NewEIP155Signer(tx.ChainId())
}
return types.HomesteadSigner{}
}
func transToCoreTrans(transaction *types.Transaction, from *common.Address) core.Transaction {
data := hexutil.Encode(transaction.Data())
return core.Transaction{
Hash: transaction.Hash().Hex(),
Nonce: transaction.Nonce(),
To: strings.ToLower(addressToHex(transaction.To())),
From: strings.ToLower(addressToHex(from)),
GasLimit: transaction.Gas(),
GasPrice: transaction.GasPrice().Int64(),
Value: transaction.Value().String(),
Data: data,
}
}
func addressToHex(to *common.Address) string {
if to == nil {
return ""
}
return to.Hex()
}

View File

@ -0,0 +1,41 @@
package common
import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/vulcanize/vulcanizedb/pkg/core"
"strings"
)
type BlockConverter struct {
transactionConverter TransactionConverter
}
func NewBlockConverter(transactionConverter TransactionConverter) BlockConverter {
return BlockConverter{transactionConverter: transactionConverter}
}
func (bc BlockConverter) ToCoreBlock(gethBlock *types.Block) (core.Block, error) {
transactions, err := bc.transactionConverter.ConvertTransactionsToCore(gethBlock)
if err != nil {
return core.Block{}, err
}
coreBlock := core.Block{
Difficulty: gethBlock.Difficulty().Int64(),
ExtraData: hexutil.Encode(gethBlock.Extra()),
GasLimit: gethBlock.GasLimit(),
GasUsed: gethBlock.GasUsed(),
Hash: gethBlock.Hash().Hex(),
Miner: strings.ToLower(gethBlock.Coinbase().Hex()),
Nonce: hexutil.Encode(gethBlock.Header().Nonce[:]),
Number: gethBlock.Number().Int64(),
ParentHash: gethBlock.ParentHash().Hex(),
Size: gethBlock.Size().String(),
Time: gethBlock.Time().Int64(),
Transactions: transactions,
UncleHash: gethBlock.UncleHash().Hex(),
}
coreBlock.Reward = CalcBlockReward(coreBlock, gethBlock.Uncles())
coreBlock.UnclesReward = CalcUnclesReward(coreBlock, gethBlock.Uncles())
return coreBlock, nil
}

View File

@ -1,15 +1,11 @@
package geth_test package common_test
import ( import (
"math/big"
"context" "context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log" "log"
"math/big"
"os" "os"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -17,7 +13,9 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/geth"
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
"github.com/vulcanize/vulcanizedb/pkg/geth/converters/rpc"
) )
type FakeGethClient struct { type FakeGethClient struct {
@ -93,27 +91,29 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
} }
block := types.NewBlock(&header, []*types.Transaction{}, []*types.Header{}, []*types.Receipt{}) block := types.NewBlock(&header, []*types.Transaction{}, []*types.Header{}, []*types.Receipt{})
client := &FakeGethClient{} client := &FakeGethClient{}
gethBlock, err := geth.ToCoreBlock(block, client) transactionConverter := rpc.NewRpcTransactionConverter(client)
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
coreBlock, err := blockConverter.ToCoreBlock(block)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(gethBlock.Difficulty).To(Equal(difficulty.Int64())) Expect(coreBlock.Difficulty).To(Equal(difficulty.Int64()))
Expect(gethBlock.GasLimit).To(Equal(gasLimit)) Expect(coreBlock.GasLimit).To(Equal(gasLimit))
Expect(gethBlock.Miner).To(Equal(miner.Hex())) Expect(coreBlock.Miner).To(Equal(miner.Hex()))
Expect(gethBlock.GasUsed).To(Equal(gasUsed)) Expect(coreBlock.GasUsed).To(Equal(gasUsed))
Expect(gethBlock.Hash).To(Equal(block.Hash().Hex())) Expect(coreBlock.Hash).To(Equal(block.Hash().Hex()))
Expect(gethBlock.Nonce).To(Equal(hexutil.Encode(header.Nonce[:]))) Expect(coreBlock.Nonce).To(Equal(hexutil.Encode(header.Nonce[:])))
Expect(gethBlock.Number).To(Equal(number)) Expect(coreBlock.Number).To(Equal(number))
Expect(gethBlock.ParentHash).To(Equal(block.ParentHash().Hex())) Expect(coreBlock.ParentHash).To(Equal(block.ParentHash().Hex()))
Expect(gethBlock.ExtraData).To(Equal(hexutil.Encode(block.Extra()))) Expect(coreBlock.ExtraData).To(Equal(hexutil.Encode(block.Extra())))
Expect(gethBlock.Size).To(Equal(block.Size().String())) Expect(coreBlock.Size).To(Equal(block.Size().String()))
Expect(gethBlock.Time).To(Equal(time)) Expect(coreBlock.Time).To(Equal(time))
Expect(gethBlock.UncleHash).To(Equal(block.UncleHash().Hex())) Expect(coreBlock.UncleHash).To(Equal(block.UncleHash().Hex()))
Expect(gethBlock.IsFinal).To(BeFalse()) Expect(coreBlock.IsFinal).To(BeFalse())
}) })
Describe("The block and uncle rewards calculations", func() { Describe("The block and uncle rewards calculations", func() {
It("calculates block rewards for a block", func() { It("calculates block rewards for a block", func() {
transaction := types.NewTransaction( transaction := types.NewTransaction(
uint64(226823), uint64(226823),
common.HexToAddress("0x108fedb097c1dcfed441480170144d8e19bb217f"), common.HexToAddress("0x108fedb097c1dcfed441480170144d8e19bb217f"),
@ -141,10 +141,13 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
} }
uncles := []*types.Header{{Number: big.NewInt(1071817)}, {Number: big.NewInt(1071818)}} uncles := []*types.Header{{Number: big.NewInt(1071817)}, {Number: big.NewInt(1071818)}}
block := types.NewBlock(&header, transactions, uncles, []*types.Receipt{&receipt}) block := types.NewBlock(&header, transactions, uncles, []*types.Receipt{&receipt})
coreBlock, err := geth.ToCoreBlock(block, client) transactionConverter := rpc.NewRpcTransactionConverter(client)
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
coreBlock, err := blockConverter.ToCoreBlock(block)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(geth.CalcBlockReward(coreBlock, block.Uncles())).To(Equal(5.31355)) Expect(vulcCommon.CalcBlockReward(coreBlock, block.Uncles())).To(Equal(5.31355))
}) })
It("calculates the uncles reward for a block", func() { It("calculates the uncles reward for a block", func() {
@ -175,11 +178,13 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
client := NewFakeClient(nil) client := NewFakeClient(nil)
client.AddReceipts(receipts) client.AddReceipts(receipts)
transactionConverter := rpc.NewRpcTransactionConverter(client)
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
coreBlock, err := geth.ToCoreBlock(block, client) coreBlock, err := blockConverter.ToCoreBlock(block)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(geth.CalcUnclesReward(coreBlock, block.Uncles())).To(Equal(6.875)) Expect(vulcCommon.CalcUnclesReward(coreBlock, block.Uncles())).To(Equal(6.875))
}) })
It("decreases the static block reward from 5 to 3 for blocks after block 4,269,999", func() { It("decreases the static block reward from 5 to 3 for blocks after block 4,269,999", func() {
@ -222,10 +227,13 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
client := NewFakeClient(nil) client := NewFakeClient(nil)
client.AddReceipts(receipts) client.AddReceipts(receipts)
coreBlock, err := geth.ToCoreBlock(block, client) transactionConverter := rpc.NewRpcTransactionConverter(client)
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
coreBlock, err := blockConverter.ToCoreBlock(block)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(geth.CalcBlockReward(coreBlock, block.Uncles())).To(Equal(3.024990672)) Expect(vulcCommon.CalcBlockReward(coreBlock, block.Uncles())).To(Equal(3.024990672))
}) })
}) })
@ -234,7 +242,10 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
header := types.Header{} header := types.Header{}
block := types.NewBlock(&header, []*types.Transaction{}, []*types.Header{}, []*types.Receipt{}) block := types.NewBlock(&header, []*types.Transaction{}, []*types.Header{}, []*types.Receipt{})
client := &FakeGethClient{} client := &FakeGethClient{}
coreBlock, err := geth.ToCoreBlock(block, client) transactionConverter := rpc.NewRpcTransactionConverter(client)
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
coreBlock, err := blockConverter.ToCoreBlock(block)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(coreBlock.Transactions)).To(Equal(0)) Expect(len(coreBlock.Transactions)).To(Equal(0))
@ -263,13 +274,16 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
client.AddReceipts([]*types.Receipt{gethReceipt}) client.AddReceipts([]*types.Receipt{gethReceipt})
header := types.Header{} header := types.Header{}
gethBlock := types.NewBlock( block := types.NewBlock(
&header, &header,
[]*types.Transaction{gethTransaction}, []*types.Transaction{gethTransaction},
[]*types.Header{}, []*types.Header{},
[]*types.Receipt{gethReceipt}, []*types.Receipt{gethReceipt},
) )
coreBlock, err := geth.ToCoreBlock(gethBlock, client) transactionConverter := rpc.NewRpcTransactionConverter(client)
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
coreBlock, err := blockConverter.ToCoreBlock(block)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(coreBlock.Transactions)).To(Equal(1)) Expect(len(coreBlock.Transactions)).To(Equal(1))
@ -283,9 +297,8 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
Expect(coreTransaction.Nonce).To(Equal(gethTransaction.Nonce())) Expect(coreTransaction.Nonce).To(Equal(gethTransaction.Nonce()))
coreReceipt := coreTransaction.Receipt coreReceipt := coreTransaction.Receipt
expectedReceipt := geth.ReceiptToCoreReceipt(gethReceipt) expectedReceipt := vulcCommon.ToCoreReceipt(gethReceipt)
Expect(coreReceipt).To(Equal(expectedReceipt)) Expect(coreReceipt).To(Equal(expectedReceipt))
}) })
It("has an empty 'To' field when transaction creates a new contract", func() { It("has an empty 'To' field when transaction creates a new contract", func() {
@ -307,21 +320,23 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
client := NewFakeClient(nil) client := NewFakeClient(nil)
client.AddReceipts([]*types.Receipt{gethReceipt}) client.AddReceipts([]*types.Receipt{gethReceipt})
gethBlock := types.NewBlock( block := types.NewBlock(
&types.Header{}, &types.Header{},
[]*types.Transaction{gethTransaction}, []*types.Transaction{gethTransaction},
[]*types.Header{}, []*types.Header{},
[]*types.Receipt{gethReceipt}, []*types.Receipt{gethReceipt},
) )
transactionConverter := rpc.NewRpcTransactionConverter(client)
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
coreBlock, err := geth.ToCoreBlock(gethBlock, client) coreBlock, err := blockConverter.ToCoreBlock(block)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
coreTransaction := coreBlock.Transactions[0] coreTransaction := coreBlock.Transactions[0]
Expect(coreTransaction.To).To(Equal("")) Expect(coreTransaction.To).To(Equal(""))
coreReceipt := coreTransaction.Receipt coreReceipt := coreTransaction.Receipt
expectedReceipt := geth.ReceiptToCoreReceipt(gethReceipt) expectedReceipt := vulcCommon.ToCoreReceipt(gethReceipt)
Expect(coreReceipt).To(Equal(expectedReceipt)) Expect(coreReceipt).To(Equal(expectedReceipt))
}) })
}) })
@ -330,7 +345,7 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
var gethTransaction *types.Transaction var gethTransaction *types.Transaction
var gethReceipt *types.Receipt var gethReceipt *types.Receipt
var header *types.Header var header *types.Header
var gethBlock *types.Block var block *types.Block
BeforeEach(func() { BeforeEach(func() {
log.SetOutput(ioutil.Discard) log.SetOutput(ioutil.Discard)
@ -344,7 +359,7 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
) )
gethReceipt = &types.Receipt{} gethReceipt = &types.Receipt{}
header = &types.Header{} header = &types.Header{}
gethBlock = types.NewBlock( block = types.NewBlock(
header, header,
[]*types.Transaction{gethTransaction}, []*types.Transaction{gethTransaction},
[]*types.Header{}, []*types.Header{},
@ -360,14 +375,22 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
It("returns an error when transaction sender call fails", func() { It("returns an error when transaction sender call fails", func() {
client := NewFakeClient(TransactionSenderError{}) client := NewFakeClient(TransactionSenderError{})
client.AddReceipts([]*types.Receipt{}) client.AddReceipts([]*types.Receipt{})
_, err := geth.ToCoreBlock(gethBlock, client) transactionConverter := rpc.NewRpcTransactionConverter(client)
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
_, err := blockConverter.ToCoreBlock(block)
Expect(err).To(Equal(TransactionSenderError{})) Expect(err).To(Equal(TransactionSenderError{}))
}) })
It("returns an error when transaction receipt call fails", func() { It("returns an error when transaction receipt call fails", func() {
client := NewFakeClient(TransActionReceiptError{}) client := NewFakeClient(TransActionReceiptError{})
client.AddReceipts([]*types.Receipt{}) client.AddReceipts([]*types.Receipt{})
_, err := geth.ToCoreBlock(gethBlock, client) transactionConverter := rpc.NewRpcTransactionConverter(client)
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
_, err := blockConverter.ToCoreBlock(block)
Expect(err).To(Equal(TransActionReceiptError{})) Expect(err).To(Equal(TransActionReceiptError{}))
}) })
}) })

View File

@ -1,4 +1,4 @@
package geth package common
import ( import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"

View File

@ -0,0 +1,13 @@
package common_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestCommon(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Common Suite")
}

View File

@ -1,4 +1,4 @@
package geth package common
import ( import (
"strings" "strings"
@ -6,6 +6,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/vulcanize/vulcanizedb/pkg/core" "github.com/vulcanize/vulcanizedb/pkg/core"
) )
@ -30,8 +31,7 @@ func ToCoreLog(gethLog types.Log) core.Log {
topics := gethLog.Topics topics := gethLog.Topics
hexTopics := makeTopics(topics) hexTopics := makeTopics(topics)
return core.Log{ return core.Log{
Address: strings.ToLower(gethLog.Address.Hex()), Address: strings.ToLower(gethLog.Address.Hex()),
BlockNumber: int64(gethLog.BlockNumber), BlockNumber: int64(gethLog.BlockNumber),
Topics: hexTopics, Topics: hexTopics,
TxHash: gethLog.TxHash.Hex(), TxHash: gethLog.TxHash.Hex(),

View File

@ -1,4 +1,4 @@
package geth_test package common_test
import ( import (
"strings" "strings"
@ -8,8 +8,9 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/core" "github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/geth" vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
) )
var _ = Describe("Conversion of GethLog to core.Log", func() { var _ = Describe("Conversion of GethLog to core.Log", func() {
@ -41,7 +42,7 @@ var _ = Describe("Conversion of GethLog to core.Log", func() {
}, },
} }
coreLog := geth.ToCoreLog(gethLog) coreLog := vulcCommon.ToCoreLog(gethLog)
Expect(coreLog.Address).To(Equal(expected.Address)) Expect(coreLog.Address).To(Equal(expected.Address))
Expect(coreLog.BlockNumber).To(Equal(expected.BlockNumber)) Expect(coreLog.BlockNumber).To(Equal(expected.BlockNumber))
@ -81,15 +82,13 @@ var _ = Describe("Conversion of GethLog to core.Log", func() {
}, },
} }
expectedOne := geth.ToCoreLog(gethLogOne) expectedOne := vulcCommon.ToCoreLog(gethLogOne)
expectedTwo := geth.ToCoreLog(gethLogTwo) expectedTwo := vulcCommon.ToCoreLog(gethLogTwo)
coreLogs := geth.ToCoreLogs([]types.Log{gethLogOne, gethLogTwo}) coreLogs := vulcCommon.ToCoreLogs([]types.Log{gethLogOne, gethLogTwo})
Expect(len(coreLogs)).To(Equal(2)) Expect(len(coreLogs)).To(Equal(2))
Expect(coreLogs[0]).To(Equal(expectedOne)) Expect(coreLogs[0]).To(Equal(expectedOne))
Expect(coreLogs[1]).To(Equal(expectedTwo)) Expect(coreLogs[1]).To(Equal(expectedTwo))
}) })
}) })

View File

@ -1,24 +1,25 @@
package geth package common
import ( import (
"math/big"
"bytes" "bytes"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/vulcanize/vulcanizedb/pkg/core" "github.com/vulcanize/vulcanizedb/pkg/core"
) )
func BigTo64(n *big.Int) int64 { func ToCoreReceipts(gethReceipts types.Receipts) []core.Receipt {
if n != nil { var coreReceipts []core.Receipt
return n.Int64() for _, receipt := range gethReceipts {
coreReceipt := ToCoreReceipt(receipt)
coreReceipts = append(coreReceipts, coreReceipt)
} }
return 0 return coreReceipts
} }
func ReceiptToCoreReceipt(gethReceipt *types.Receipt) core.Receipt { func ToCoreReceipt(gethReceipt *types.Receipt) core.Receipt {
bloom := hexutil.Encode(gethReceipt.Bloom.Bytes()) bloom := hexutil.Encode(gethReceipt.Bloom.Bytes())
var postState string var postState string
var status int var status int

View File

@ -1,4 +1,4 @@
package geth_test package common_test
import ( import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -6,8 +6,9 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/core" "github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/geth" vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
) )
var _ = Describe("Conversion of GethReceipt to core.Receipt", func() { var _ = Describe("Conversion of GethReceipt to core.Receipt", func() {
@ -34,7 +35,7 @@ var _ = Describe("Conversion of GethReceipt to core.Receipt", func() {
TxHash: receipt.TxHash.Hex(), TxHash: receipt.TxHash.Hex(),
} }
coreReceipt := geth.ReceiptToCoreReceipt(&receipt) coreReceipt := vulcCommon.ToCoreReceipt(&receipt)
Expect(coreReceipt.Bloom).To(Equal(expected.Bloom)) Expect(coreReceipt.Bloom).To(Equal(expected.Bloom))
Expect(coreReceipt.ContractAddress).To(Equal(expected.ContractAddress)) Expect(coreReceipt.ContractAddress).To(Equal(expected.ContractAddress))
Expect(coreReceipt.CumulativeGasUsed).To(Equal(expected.CumulativeGasUsed)) Expect(coreReceipt.CumulativeGasUsed).To(Equal(expected.CumulativeGasUsed))
@ -68,7 +69,7 @@ var _ = Describe("Conversion of GethReceipt to core.Receipt", func() {
TxHash: receipt.TxHash.Hex(), TxHash: receipt.TxHash.Hex(),
} }
coreReceipt := geth.ReceiptToCoreReceipt(&receipt) coreReceipt := vulcCommon.ToCoreReceipt(&receipt)
Expect(coreReceipt.Bloom).To(Equal(expected.Bloom)) Expect(coreReceipt.Bloom).To(Equal(expected.Bloom))
Expect(coreReceipt.ContractAddress).To(Equal("")) Expect(coreReceipt.ContractAddress).To(Equal(""))
Expect(coreReceipt.CumulativeGasUsed).To(Equal(expected.CumulativeGasUsed)) Expect(coreReceipt.CumulativeGasUsed).To(Equal(expected.CumulativeGasUsed))
@ -77,7 +78,5 @@ var _ = Describe("Conversion of GethReceipt to core.Receipt", func() {
Expect(coreReceipt.StateRoot).To(Equal(expected.StateRoot)) Expect(coreReceipt.StateRoot).To(Equal(expected.StateRoot))
Expect(coreReceipt.Status).To(Equal(expected.Status)) Expect(coreReceipt.Status).To(Equal(expected.Status))
Expect(coreReceipt.TxHash).To(Equal(expected.TxHash)) Expect(coreReceipt.TxHash).To(Equal(expected.TxHash))
}) })
}) })

View File

@ -0,0 +1,10 @@
package common
import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/vulcanize/vulcanizedb/pkg/core"
)
type TransactionConverter interface {
ConvertTransactionsToCore(gethBlock *types.Block) ([]core.Transaction, error)
}

View File

@ -1,16 +1,17 @@
package geth package rpc
import ( import (
"strings" "context"
"log" "log"
"strings"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/vulcanize/vulcanizedb/pkg/core"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"github.com/vulcanize/vulcanizedb/pkg/core"
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
) )
type Client interface { type Client interface {
@ -18,32 +19,15 @@ type Client interface {
TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
} }
func ToCoreBlock(gethBlock *types.Block, client Client) (core.Block, error) { type RpcTransactionConverter struct {
transactions, err := convertTransactionsToCore(gethBlock, client) client Client
if err != nil {
return core.Block{}, err
}
coreBlock := core.Block{
Difficulty: gethBlock.Difficulty().Int64(),
ExtraData: hexutil.Encode(gethBlock.Extra()),
GasLimit: gethBlock.GasLimit(),
GasUsed: gethBlock.GasUsed(),
Hash: gethBlock.Hash().Hex(),
Miner: strings.ToLower(gethBlock.Coinbase().Hex()),
Nonce: hexutil.Encode(gethBlock.Header().Nonce[:]),
Number: gethBlock.Number().Int64(),
ParentHash: gethBlock.ParentHash().Hex(),
Size: gethBlock.Size().String(),
Time: gethBlock.Time().Int64(),
Transactions: transactions,
UncleHash: gethBlock.UncleHash().Hex(),
}
coreBlock.Reward = CalcBlockReward(coreBlock, gethBlock.Uncles())
coreBlock.UnclesReward = CalcUnclesReward(coreBlock, gethBlock.Uncles())
return coreBlock, nil
} }
func convertTransactionsToCore(gethBlock *types.Block, client Client) ([]core.Transaction, error) { func NewRpcTransactionConverter(client Client) *RpcTransactionConverter {
return &RpcTransactionConverter{client: client}
}
func (rtc *RpcTransactionConverter) ConvertTransactionsToCore(gethBlock *types.Block) ([]core.Transaction, error) {
var g errgroup.Group var g errgroup.Group
coreTransactions := make([]core.Transaction, len(gethBlock.Transactions())) coreTransactions := make([]core.Transaction, len(gethBlock.Transactions()))
@ -52,13 +36,13 @@ func convertTransactionsToCore(gethBlock *types.Block, client Client) ([]core.Tr
transaction := gethTransaction transaction := gethTransaction
transactionIndex := uint(gethTransactionIndex) transactionIndex := uint(gethTransactionIndex)
g.Go(func() error { g.Go(func() error {
from, err := client.TransactionSender(context.Background(), transaction, gethBlock.Hash(), transactionIndex) from, err := rtc.client.TransactionSender(context.Background(), transaction, gethBlock.Hash(), transactionIndex)
if err != nil { if err != nil {
log.Println("transaction sender: ", err) log.Println("transaction sender: ", err)
return err return err
} }
coreTransaction := transToCoreTrans(transaction, &from) coreTransaction := transToCoreTrans(transaction, &from)
coreTransaction, err = appendReceiptToTransaction(client, coreTransaction) coreTransaction, err = appendReceiptToTransaction(rtc.client, coreTransaction)
if err != nil { if err != nil {
log.Println("receipt: ", err) log.Println("receipt: ", err)
return err return err
@ -79,7 +63,7 @@ func appendReceiptToTransaction(client Client, transaction core.Transaction) (co
if err != nil { if err != nil {
return transaction, err return transaction, err
} }
receipt := ReceiptToCoreReceipt(gethReceipt) receipt := vulcCommon.ToCoreReceipt(gethReceipt)
transaction.Receipt = receipt transaction.Receipt = receipt
return transaction, nil return transaction, nil
} }

View File

@ -23,6 +23,7 @@ func RetrieveAndUpdateBlocks(blockchain core.Blockchain, blockRepository datasto
log.Printf("failed to retrieve block number: %d\n", blockNumber) log.Printf("failed to retrieve block number: %d\n", blockNumber)
return 0 return 0
} }
// TODO: handle possible error here
blockRepository.CreateOrUpdateBlock(block) blockRepository.CreateOrUpdateBlock(block)
} }
return len(blockNumbers) return len(blockNumbers)

View File

@ -1,299 +1,299 @@
### Table of Contents ### Table of Contents
1. [About](#About) 1. [About](#About)
2. [Getting Started](#GettingStarted) 2. [Getting Started](#GettingStarted)
1. [Installation](#Installation) 1. [Installation](#Installation)
1. [Windows](#WindowsInstallation) 1. [Windows](#WindowsInstallation)
2. [Linux/BSD/MacOSX/POSIX](#PosixInstallation) 2. [Linux/BSD/MacOSX/POSIX](#PosixInstallation)
1. [Gentoo Linux](#GentooInstallation) 1. [Gentoo Linux](#GentooInstallation)
2. [Configuration](#Configuration) 2. [Configuration](#Configuration)
3. [Controlling and Querying btcd via btcctl](#BtcctlConfig) 3. [Controlling and Querying btcd via btcctl](#BtcctlConfig)
4. [Mining](#Mining) 4. [Mining](#Mining)
3. [Help](#Help) 3. [Help](#Help)
1. [Startup](#Startup) 1. [Startup](#Startup)
1. [Using bootstrap.dat](#BootstrapDat) 1. [Using bootstrap.dat](#BootstrapDat)
2. [Network Configuration](#NetworkConfig) 2. [Network Configuration](#NetworkConfig)
3. [Wallet](#Wallet) 3. [Wallet](#Wallet)
4. [Contact](#Contact) 4. [Contact](#Contact)
1. [IRC](#ContactIRC) 1. [IRC](#ContactIRC)
2. [Mailing Lists](#MailingLists) 2. [Mailing Lists](#MailingLists)
5. [Developer Resources](#DeveloperResources) 5. [Developer Resources](#DeveloperResources)
1. [Code Contribution Guidelines](#ContributionGuidelines) 1. [Code Contribution Guidelines](#ContributionGuidelines)
2. [JSON-RPC Reference](#JSONRPCReference) 2. [JSON-RPC Reference](#JSONRPCReference)
3. [The btcsuite Bitcoin-related Go Packages](#GoPackages) 3. [The btcsuite Bitcoin-related Go Packages](#GoPackages)
<a name="About" /> <a name="About" />
### 1. About ### 1. About
btcd is a full node bitcoin implementation written in [Go](http://golang.org), btcd is a full node bitcoin implementation written in [Go](http://golang.org),
licensed under the [copyfree](http://www.copyfree.org) ISC License. licensed under the [copyfree](http://www.copyfree.org) ISC License.
This project is currently under active development and is in a Beta state. It This project is currently under active development and is in a Beta state. It
is extremely stable and has been in production use since October 2013. is extremely stable and has been in production use since October 2013.
It properly downloads, validates, and serves the block chain using the exact It properly downloads, validates, and serves the block chain using the exact
rules (including consensus bugs) for block acceptance as Bitcoin Core. We have rules (including consensus bugs) for block acceptance as Bitcoin Core. We have
taken great care to avoid btcd causing a fork to the block chain. It includes a taken great care to avoid btcd causing a fork to the block chain. It includes a
full block validation testing framework which contains all of the 'official' full block validation testing framework which contains all of the 'official'
block acceptance tests (and some additional ones) that is run on every pull block acceptance tests (and some additional ones) that is run on every pull
request to help ensure it properly follows consensus. Also, it passes all of request to help ensure it properly follows consensus. Also, it passes all of
the JSON test data in the Bitcoin Core code. the JSON test data in the Bitcoin Core code.
It also properly relays newly mined blocks, maintains a transaction pool, and It also properly relays newly mined blocks, maintains a transaction pool, and
relays individual transactions that have not yet made it into a block. It relays individual transactions that have not yet made it into a block. It
ensures all individual transactions admitted to the pool follow the rules ensures all individual transactions admitted to the pool follow the rules
required by the block chain and also includes more strict checks which filter required by the block chain and also includes more strict checks which filter
transactions based on miner requirements ("standard" transactions). transactions based on miner requirements ("standard" transactions).
One key difference between btcd and Bitcoin Core is that btcd does *NOT* include One key difference between btcd and Bitcoin Core is that btcd does *NOT* include
wallet functionality and this was a very intentional design decision. See the wallet functionality and this was a very intentional design decision. See the
blog entry [here](https://blog.conformal.com/btcd-not-your-moms-bitcoin-daemon) blog entry [here](https://blog.conformal.com/btcd-not-your-moms-bitcoin-daemon)
for more details. This means you can't actually make or receive payments for more details. This means you can't actually make or receive payments
directly with btcd. That functionality is provided by the directly with btcd. That functionality is provided by the
[btcwallet](https://github.com/btcsuite/btcwallet) and [btcwallet](https://github.com/btcsuite/btcwallet) and
[Paymetheus](https://github.com/btcsuite/Paymetheus) (Windows-only) projects [Paymetheus](https://github.com/btcsuite/Paymetheus) (Windows-only) projects
which are both under active development. which are both under active development.
<a name="GettingStarted" /> <a name="GettingStarted" />
### 2. Getting Started ### 2. Getting Started
<a name="Installation" /> <a name="Installation" />
**2.1 Installation** **2.1 Installation**
The first step is to install btcd. See one of the following sections for The first step is to install btcd. See one of the following sections for
details on how to install on the supported operating systems. details on how to install on the supported operating systems.
<a name="WindowsInstallation" /> <a name="WindowsInstallation" />
**2.1.1 Windows Installation**<br /> **2.1.1 Windows Installation**<br />
* Install the MSI available at: https://github.com/btcsuite/btcd/releases * Install the MSI available at: https://github.com/btcsuite/btcd/releases
* Launch btcd from the Start Menu * Launch btcd from the Start Menu
<a name="PosixInstallation" /> <a name="PosixInstallation" />
**2.1.2 Linux/BSD/MacOSX/POSIX Installation** **2.1.2 Linux/BSD/MacOSX/POSIX Installation**
- Install Go according to the installation instructions here: - Install Go according to the installation instructions here:
http://golang.org/doc/install http://golang.org/doc/install
- Ensure Go was installed properly and is a supported version: - Ensure Go was installed properly and is a supported version:
```bash ```bash
$ go version $ go version
$ go env GOROOT GOPATH $ go env GOROOT GOPATH
``` ```
NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is NOTE: The `GOROOT` and `GOPATH` above must not be the same path. It is
recommended that `GOPATH` is set to a directory in your home directory such as recommended that `GOPATH` is set to a directory in your home directory such as
`~/goprojects` to avoid write permission issues. It is also recommended to add `~/goprojects` to avoid write permission issues. It is also recommended to add
`$GOPATH/bin` to your `PATH` at this point. `$GOPATH/bin` to your `PATH` at this point.
- Run the following commands to obtain btcd, all dependencies, and install it: - Run the following commands to obtain btcd, all dependencies, and install it:
```bash ```bash
$ go get -u github.com/Masterminds/glide $ go get -u github.com/Masterminds/glide
$ git clone https://github.com/btcsuite/btcd $GOPATH/src/github.com/btcsuite/btcd $ git clone https://github.com/btcsuite/btcd $GOPATH/src/github.com/btcsuite/btcd
$ cd $GOPATH/src/github.com/btcsuite/btcd $ cd $GOPATH/src/github.com/btcsuite/btcd
$ glide install $ glide install
$ go install . ./cmd/... $ go install . ./cmd/...
``` ```
- btcd (and utilities) will now be installed in ```$GOPATH/bin```. If you did - btcd (and utilities) will now be installed in ```$GOPATH/bin```. If you did
not already add the bin directory to your system path during Go installation, not already add the bin directory to your system path during Go installation,
we recommend you do so now. we recommend you do so now.
**Updating** **Updating**
- Run the following commands to update btcd, all dependencies, and install it: - Run the following commands to update btcd, all dependencies, and install it:
```bash ```bash
$ cd $GOPATH/src/github.com/btcsuite/btcd $ cd $GOPATH/src/github.com/btcsuite/btcd
$ git pull && glide install $ git pull && glide install
$ go install . ./cmd/... $ go install . ./cmd/...
``` ```
<a name="GentooInstallation" /> <a name="GentooInstallation" />
**2.1.2.1 Gentoo Linux Installation** **2.1.2.1 Gentoo Linux Installation**
* Install Layman and enable the Bitcoin overlay. * Install Layman and enable the Bitcoin overlay.
* https://gitlab.com/bitcoin/gentoo * https://gitlab.com/bitcoin/gentoo
* Copy or symlink `/var/lib/layman/bitcoin/Documentation/package.keywords/btcd-live` to `/etc/portage/package.keywords/` * Copy or symlink `/var/lib/layman/bitcoin/Documentation/package.keywords/btcd-live` to `/etc/portage/package.keywords/`
* Install btcd: `$ emerge net-p2p/btcd` * Install btcd: `$ emerge net-p2p/btcd`
<a name="Configuration" /> <a name="Configuration" />
**2.2 Configuration** **2.2 Configuration**
btcd has a number of [configuration](http://godoc.org/github.com/btcsuite/btcd) btcd has a number of [configuration](http://godoc.org/github.com/btcsuite/btcd)
options, which can be viewed by running: `$ btcd --help`. options, which can be viewed by running: `$ btcd --help`.
<a name="BtcctlConfig" /> <a name="BtcctlConfig" />
**2.3 Controlling and Querying btcd via btcctl** **2.3 Controlling and Querying btcd via btcctl**
btcctl is a command line utility that can be used to both control and query btcd btcctl is a command line utility that can be used to both control and query btcd
via [RPC](http://www.wikipedia.org/wiki/Remote_procedure_call). btcd does via [RPC](http://www.wikipedia.org/wiki/Remote_procedure_call). btcd does
**not** enable its RPC server by default; You must configure at minimum both an **not** enable its RPC server by default; You must configure at minimum both an
RPC username and password or both an RPC limited username and password: RPC username and password or both an RPC limited username and password:
* btcd.conf configuration file * btcd.conf configuration file
``` ```
[Application Options] [Application Options]
rpcuser=myuser rpcuser=myuser
rpcpass=SomeDecentp4ssw0rd rpcpass=SomeDecentp4ssw0rd
rpclimituser=mylimituser rpclimituser=mylimituser
rpclimitpass=Limitedp4ssw0rd rpclimitpass=Limitedp4ssw0rd
``` ```
* btcctl.conf configuration file * btcctl.conf configuration file
``` ```
[Application Options] [Application Options]
rpcuser=myuser rpcuser=myuser
rpcpass=SomeDecentp4ssw0rd rpcpass=SomeDecentp4ssw0rd
``` ```
OR OR
``` ```
[Application Options] [Application Options]
rpclimituser=mylimituser rpclimituser=mylimituser
rpclimitpass=Limitedp4ssw0rd rpclimitpass=Limitedp4ssw0rd
``` ```
For a list of available options, run: `$ btcctl --help` For a list of available options, run: `$ btcctl --help`
<a name="Mining" /> <a name="Mining" />
**2.4 Mining** **2.4 Mining**
btcd supports the `getblocktemplate` RPC. btcd supports the `getblocktemplate` RPC.
The limited user cannot access this RPC. The limited user cannot access this RPC.
**1. Add the payment addresses with the `miningaddr` option.** **1. Add the payment addresses with the `miningaddr` option.**
``` ```
[Application Options] [Application Options]
rpcuser=myuser rpcuser=myuser
rpcpass=SomeDecentp4ssw0rd rpcpass=SomeDecentp4ssw0rd
miningaddr=12c6DSiU4Rq3P4ZxziKxzrL5LmMBrzjrJX miningaddr=12c6DSiU4Rq3P4ZxziKxzrL5LmMBrzjrJX
miningaddr=1M83ju3EChKYyysmM2FXtLNftbacagd8FR miningaddr=1M83ju3EChKYyysmM2FXtLNftbacagd8FR
``` ```
**2. Add btcd's RPC TLS certificate to system Certificate Authority list.** **2. Add btcd's RPC TLS certificate to system Certificate Authority list.**
`cgminer` uses [curl](http://curl.haxx.se/) to fetch data from the RPC server. `cgminer` uses [curl](http://curl.haxx.se/) to fetch data from the RPC server.
Since curl validates the certificate by default, we must install the `btcd` RPC Since curl validates the certificate by default, we must install the `btcd` RPC
certificate into the default system Certificate Authority list. certificate into the default system Certificate Authority list.
**Ubuntu** **Ubuntu**
1. Copy rpc.cert to /usr/share/ca-certificates: `# cp /home/user/.btcd/rpc.cert /usr/share/ca-certificates/btcd.crt` 1. Copy rpc.cert to /usr/share/ca-certificates: `# cp /home/user/.btcd/rpc.cert /usr/share/ca-certificates/btcd.crt`
2. Add btcd.crt to /etc/ca-certificates.conf: `# echo btcd.crt >> /etc/ca-certificates.conf` 2. Add btcd.crt to /etc/ca-certificates.conf: `# echo btcd.crt >> /etc/ca-certificates.conf`
3. Update the CA certificate list: `# update-ca-certificates` 3. Update the CA certificate list: `# update-ca-certificates`
**3. Set your mining software url to use https.** **3. Set your mining software url to use https.**
`$ cgminer -o https://127.0.0.1:8334 -u rpcuser -p rpcpassword` `$ cgminer -o https://127.0.0.1:8334 -u rpcuser -p rpcpassword`
<a name="Help" /> <a name="Help" />
### 3. Help ### 3. Help
<a name="Startup" /> <a name="Startup" />
**3.1 Startup** **3.1 Startup**
Typically btcd will run and start downloading the block chain with no extra Typically btcd will run and start downloading the block chain with no extra
configuration necessary, however, there is an optional method to use a configuration necessary, however, there is an optional method to use a
`bootstrap.dat` file that may speed up the initial block chain download process. `bootstrap.dat` file that may speed up the initial block chain download process.
<a name="BootstrapDat" /> <a name="BootstrapDat" />
**3.1.1 bootstrap.dat** **3.1.1 bootstrap.dat**
* [Using bootstrap.dat](https://github.com/btcsuite/btcd/tree/master/docs/using_bootstrap_dat.md) * [Using bootstrap.dat](https://github.com/btcsuite/btcd/tree/master/docs/using_bootstrap_dat.md)
<a name="NetworkConfig" /> <a name="NetworkConfig" />
**3.1.2 Network Configuration** **3.1.2 Network Configuration**
* [What Ports Are Used by Default?](https://github.com/btcsuite/btcd/tree/master/docs/default_ports.md) * [What Ports Are Used by Default?](https://github.com/btcsuite/btcd/tree/master/docs/default_ports.md)
* [How To Listen on Specific Interfaces](https://github.com/btcsuite/btcd/tree/master/docs/configure_peer_server_listen_interfaces.md) * [How To Listen on Specific Interfaces](https://github.com/btcsuite/btcd/tree/master/docs/configure_peer_server_listen_interfaces.md)
* [How To Configure RPC Server to Listen on Specific Interfaces](https://github.com/btcsuite/btcd/tree/master/docs/configure_rpc_server_listen_interfaces.md) * [How To Configure RPC Server to Listen on Specific Interfaces](https://github.com/btcsuite/btcd/tree/master/docs/configure_rpc_server_listen_interfaces.md)
* [Configuring btcd with Tor](https://github.com/btcsuite/btcd/tree/master/docs/configuring_tor.md) * [Configuring btcd with Tor](https://github.com/btcsuite/btcd/tree/master/docs/configuring_tor.md)
<a name="Wallet" /> <a name="Wallet" />
**3.1 Wallet** **3.1 Wallet**
btcd was intentionally developed without an integrated wallet for security btcd was intentionally developed without an integrated wallet for security
reasons. Please see [btcwallet](https://github.com/btcsuite/btcwallet) for more reasons. Please see [btcwallet](https://github.com/btcsuite/btcwallet) for more
information. information.
<a name="Contact" /> <a name="Contact" />
### 4. Contact ### 4. Contact
<a name="ContactIRC" /> <a name="ContactIRC" />
**4.1 IRC** **4.1 IRC**
* [irc.freenode.net](irc://irc.freenode.net), channel `#btcd` * [irc.freenode.net](irc://irc.freenode.net), channel `#btcd`
<a name="MailingLists" /> <a name="MailingLists" />
**4.2 Mailing Lists** **4.2 Mailing Lists**
* <a href="mailto:btcd+subscribe@opensource.conformal.com">btcd</a>: discussion * <a href="mailto:btcd+subscribe@opensource.conformal.com">btcd</a>: discussion
of btcd and its packages. of btcd and its packages.
* <a href="mailto:btcd-commits+subscribe@opensource.conformal.com">btcd-commits</a>: * <a href="mailto:btcd-commits+subscribe@opensource.conformal.com">btcd-commits</a>:
readonly mail-out of source code changes. readonly mail-out of source code changes.
<a name="DeveloperResources" /> <a name="DeveloperResources" />
### 5. Developer Resources ### 5. Developer Resources
<a name="ContributionGuidelines" /> <a name="ContributionGuidelines" />
* [Code Contribution Guidelines](https://github.com/btcsuite/btcd/tree/master/docs/code_contribution_guidelines.md) * [Code Contribution Guidelines](https://github.com/btcsuite/btcd/tree/master/docs/code_contribution_guidelines.md)
<a name="JSONRPCReference" /> <a name="JSONRPCReference" />
* [JSON-RPC Reference](https://github.com/btcsuite/btcd/tree/master/docs/json_rpc_api.md) * [JSON-RPC Reference](https://github.com/btcsuite/btcd/tree/master/docs/json_rpc_api.md)
* [RPC Examples](https://github.com/btcsuite/btcd/tree/master/docs/json_rpc_api.md#ExampleCode) * [RPC Examples](https://github.com/btcsuite/btcd/tree/master/docs/json_rpc_api.md#ExampleCode)
<a name="GoPackages" /> <a name="GoPackages" />
* The btcsuite Bitcoin-related Go Packages: * The btcsuite Bitcoin-related Go Packages:
* [btcrpcclient](https://github.com/btcsuite/btcrpcclient) - Implements a * [btcrpcclient](https://github.com/btcsuite/btcrpcclient) - Implements a
robust and easy to use Websocket-enabled Bitcoin JSON-RPC client robust and easy to use Websocket-enabled Bitcoin JSON-RPC client
* [btcjson](https://github.com/btcsuite/btcjson) - Provides an extensive API * [btcjson](https://github.com/btcsuite/btcjson) - Provides an extensive API
for the underlying JSON-RPC command and return values for the underlying JSON-RPC command and return values
* [wire](https://github.com/btcsuite/btcd/tree/master/wire) - Implements the * [wire](https://github.com/btcsuite/btcd/tree/master/wire) - Implements the
Bitcoin wire protocol Bitcoin wire protocol
* [peer](https://github.com/btcsuite/btcd/tree/master/peer) - * [peer](https://github.com/btcsuite/btcd/tree/master/peer) -
Provides a common base for creating and managing Bitcoin network peers. Provides a common base for creating and managing Bitcoin network peers.
* [blockchain](https://github.com/btcsuite/btcd/tree/master/blockchain) - * [blockchain](https://github.com/btcsuite/btcd/tree/master/blockchain) -
Implements Bitcoin block handling and chain selection rules Implements Bitcoin block handling and chain selection rules
* [blockchain/fullblocktests](https://github.com/btcsuite/btcd/tree/master/blockchain/fullblocktests) - * [blockchain/fullblocktests](https://github.com/btcsuite/btcd/tree/master/blockchain/fullblocktests) -
Provides a set of block tests for testing the consensus validation rules Provides a set of block tests for testing the consensus validation rules
* [txscript](https://github.com/btcsuite/btcd/tree/master/txscript) - * [txscript](https://github.com/btcsuite/btcd/tree/master/txscript) -
Implements the Bitcoin transaction scripting language Implements the Bitcoin transaction scripting language
* [btcec](https://github.com/btcsuite/btcd/tree/master/btcec) - Implements * [btcec](https://github.com/btcsuite/btcd/tree/master/btcec) - Implements
support for the elliptic curve cryptographic functions needed for the support for the elliptic curve cryptographic functions needed for the
Bitcoin scripts Bitcoin scripts
* [database](https://github.com/btcsuite/btcd/tree/master/database) - * [database](https://github.com/btcsuite/btcd/tree/master/database) -
Provides a database interface for the Bitcoin block chain Provides a database interface for the Bitcoin block chain
* [mempool](https://github.com/btcsuite/btcd/tree/master/mempool) - * [mempool](https://github.com/btcsuite/btcd/tree/master/mempool) -
Package mempool provides a policy-enforced pool of unmined bitcoin Package mempool provides a policy-enforced pool of unmined bitcoin
transactions. transactions.
* [btcutil](https://github.com/btcsuite/btcutil) - Provides Bitcoin-specific * [btcutil](https://github.com/btcsuite/btcutil) - Provides Bitcoin-specific
convenience functions and types convenience functions and types
* [chainhash](https://github.com/btcsuite/btcd/tree/master/chaincfg/chainhash) - * [chainhash](https://github.com/btcsuite/btcd/tree/master/chaincfg/chainhash) -
Provides a generic hash type and associated functions that allows the Provides a generic hash type and associated functions that allows the
specific hash algorithm to be abstracted. specific hash algorithm to be abstracted.
* [connmgr](https://github.com/btcsuite/btcd/tree/master/connmgr) - * [connmgr](https://github.com/btcsuite/btcd/tree/master/connmgr) -
Package connmgr implements a generic Bitcoin network connection manager. Package connmgr implements a generic Bitcoin network connection manager.

View File

@ -1,355 +1,355 @@
### Table of Contents ### Table of Contents
1. [Overview](#Overview)<br /> 1. [Overview](#Overview)<br />
2. [Minimum Recommended Skillset](#MinSkillset)<br /> 2. [Minimum Recommended Skillset](#MinSkillset)<br />
3. [Required Reading](#ReqReading)<br /> 3. [Required Reading](#ReqReading)<br />
4. [Development Practices](#DevelopmentPractices)<br /> 4. [Development Practices](#DevelopmentPractices)<br />
4.1. [Share Early, Share Often](#ShareEarly)<br /> 4.1. [Share Early, Share Often](#ShareEarly)<br />
4.2. [Testing](#Testing)<br /> 4.2. [Testing](#Testing)<br />
4.3. [Code Documentation and Commenting](#CodeDocumentation)<br /> 4.3. [Code Documentation and Commenting](#CodeDocumentation)<br />
4.4. [Model Git Commit Messages](#ModelGitCommitMessages)<br /> 4.4. [Model Git Commit Messages](#ModelGitCommitMessages)<br />
5. [Code Approval Process](#CodeApproval)<br /> 5. [Code Approval Process](#CodeApproval)<br />
5.1 [Code Review](#CodeReview)<br /> 5.1 [Code Review](#CodeReview)<br />
5.2 [Rework Code (if needed)](#CodeRework)<br /> 5.2 [Rework Code (if needed)](#CodeRework)<br />
5.3 [Acceptance](#CodeAcceptance)<br /> 5.3 [Acceptance](#CodeAcceptance)<br />
6. [Contribution Standards](#Standards)<br /> 6. [Contribution Standards](#Standards)<br />
6.1. [Contribution Checklist](#Checklist)<br /> 6.1. [Contribution Checklist](#Checklist)<br />
6.2. [Licensing of Contributions](#Licensing)<br /> 6.2. [Licensing of Contributions](#Licensing)<br />
<a name="Overview" /> <a name="Overview" />
### 1. Overview ### 1. Overview
Developing cryptocurrencies is an exciting endeavor that touches a wide variety Developing cryptocurrencies is an exciting endeavor that touches a wide variety
of areas such as wire protocols, peer-to-peer networking, databases, of areas such as wire protocols, peer-to-peer networking, databases,
cryptography, language interpretation (transaction scripts), RPC, and cryptography, language interpretation (transaction scripts), RPC, and
websockets. They also represent a radical shift to the current fiscal system websockets. They also represent a radical shift to the current fiscal system
and as a result provide an opportunity to help reshape the entire financial and as a result provide an opportunity to help reshape the entire financial
system. There are few projects that offer this level of diversity and impact system. There are few projects that offer this level of diversity and impact
all in one code base. all in one code base.
However, as exciting as it is, one must keep in mind that cryptocurrencies However, as exciting as it is, one must keep in mind that cryptocurrencies
represent real money and introducing bugs and security vulnerabilities can have represent real money and introducing bugs and security vulnerabilities can have
far more dire consequences than in typical projects where having a small bug is far more dire consequences than in typical projects where having a small bug is
minimal by comparison. In the world of cryptocurrencies, even the smallest bug minimal by comparison. In the world of cryptocurrencies, even the smallest bug
in the wrong area can cost people a significant amount of money. For this in the wrong area can cost people a significant amount of money. For this
reason, the btcd suite has a formalized and rigorous development process which reason, the btcd suite has a formalized and rigorous development process which
is outlined on this page. is outlined on this page.
We highly encourage code contributions, however it is imperative that you adhere We highly encourage code contributions, however it is imperative that you adhere
to the guidelines established on this page. to the guidelines established on this page.
<a name="MinSkillset" /> <a name="MinSkillset" />
### 2. Minimum Recommended Skillset ### 2. Minimum Recommended Skillset
The following list is a set of core competencies that we recommend you possess The following list is a set of core competencies that we recommend you possess
before you really start attempting to contribute code to the project. These are before you really start attempting to contribute code to the project. These are
not hard requirements as we will gladly accept code contributions as long as not hard requirements as we will gladly accept code contributions as long as
they follow the guidelines set forth on this page. That said, if you don't have they follow the guidelines set forth on this page. That said, if you don't have
the following basic qualifications you will likely find it quite difficult to the following basic qualifications you will likely find it quite difficult to
contribute. contribute.
- A reasonable understanding of bitcoin at a high level (see the - A reasonable understanding of bitcoin at a high level (see the
[Required Reading](#ReqReading) section for the original white paper) [Required Reading](#ReqReading) section for the original white paper)
- Experience in some type of C-like language - Experience in some type of C-like language
- An understanding of data structures and their performance implications - An understanding of data structures and their performance implications
- Familiarity with unit testing - Familiarity with unit testing
- Debugging experience - Debugging experience
- Ability to understand not only the area you are making a change in, but also - Ability to understand not only the area you are making a change in, but also
the code your change relies on, and the code which relies on your changed code the code your change relies on, and the code which relies on your changed code
Building on top of those core competencies, the recommended skill set largely Building on top of those core competencies, the recommended skill set largely
depends on the specific areas you are looking to contribute to. For example, depends on the specific areas you are looking to contribute to. For example,
if you wish to contribute to the cryptography code, you should have a good if you wish to contribute to the cryptography code, you should have a good
understanding of the various aspects involved with cryptography such as the understanding of the various aspects involved with cryptography such as the
security and performance implications. security and performance implications.
<a name="ReqReading" /> <a name="ReqReading" />
### 3. Required Reading ### 3. Required Reading
- [Effective Go](http://golang.org/doc/effective_go.html) - The entire btcd - [Effective Go](http://golang.org/doc/effective_go.html) - The entire btcd
suite follows the guidelines in this document. For your code to be accepted, suite follows the guidelines in this document. For your code to be accepted,
it must follow the guidelines therein. it must follow the guidelines therein.
- [Original Satoshi Whitepaper](http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCkQFjAA&url=http%3A%2F%2Fbitcoin.org%2Fbitcoin.pdf&ei=os3VUuH8G4SlsASV74GoAg&usg=AFQjCNEipPLigou_1MfB7DQjXCNdlylrBg&sig2=FaHDuT5z36GMWDEnybDJLg&bvm=bv.59378465,d.b2I) - This is the white paper that started it all. Having a solid - [Original Satoshi Whitepaper](http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCkQFjAA&url=http%3A%2F%2Fbitcoin.org%2Fbitcoin.pdf&ei=os3VUuH8G4SlsASV74GoAg&usg=AFQjCNEipPLigou_1MfB7DQjXCNdlylrBg&sig2=FaHDuT5z36GMWDEnybDJLg&bvm=bv.59378465,d.b2I) - This is the white paper that started it all. Having a solid
foundation to build on will make the code much more comprehensible. foundation to build on will make the code much more comprehensible.
<a name="DevelopmentPractices" /> <a name="DevelopmentPractices" />
### 4. Development Practices ### 4. Development Practices
Developers are expected to work in their own trees and submit pull requests when Developers are expected to work in their own trees and submit pull requests when
they feel their feature or bug fix is ready for integration into the master they feel their feature or bug fix is ready for integration into the master
branch. branch.
<a name="ShareEarly" /> <a name="ShareEarly" />
### 4.1 Share Early, Share Often ### 4.1 Share Early, Share Often
We firmly believe in the share early, share often approach. The basic premise We firmly believe in the share early, share often approach. The basic premise
of the approach is to announce your plans **before** you start work, and once of the approach is to announce your plans **before** you start work, and once
you have started working, craft your changes into a stream of small and easily you have started working, craft your changes into a stream of small and easily
reviewable commits. reviewable commits.
This approach has several benefits: This approach has several benefits:
- Announcing your plans to work on a feature **before** you begin work avoids - Announcing your plans to work on a feature **before** you begin work avoids
duplicate work duplicate work
- It permits discussions which can help you achieve your goals in a way that is - It permits discussions which can help you achieve your goals in a way that is
consistent with the existing architecture consistent with the existing architecture
- It minimizes the chances of you spending time and energy on a change that - It minimizes the chances of you spending time and energy on a change that
might not fit with the consensus of the community or existing architecture and might not fit with the consensus of the community or existing architecture and
potentially be rejected as a result potentially be rejected as a result
- Incremental development helps ensure you are on the right track with regards - Incremental development helps ensure you are on the right track with regards
to the rest of the community to the rest of the community
- The quicker your changes are merged to master, the less time you will need to - The quicker your changes are merged to master, the less time you will need to
spend rebasing and otherwise trying to keep up with the main code base spend rebasing and otherwise trying to keep up with the main code base
<a name="Testing" /> <a name="Testing" />
### 4.2 Testing ### 4.2 Testing
One of the major design goals of all core btcd packages is to aim for complete One of the major design goals of all core btcd packages is to aim for complete
test coverage. This is financial software so bugs and regressions can cost test coverage. This is financial software so bugs and regressions can cost
people real money. For this reason every effort must be taken to ensure the people real money. For this reason every effort must be taken to ensure the
code is as accurate and bug-free as possible. Thorough testing is a good way to code is as accurate and bug-free as possible. Thorough testing is a good way to
help achieve that goal. help achieve that goal.
Unless a new feature you submit is completely trivial, it will probably be Unless a new feature you submit is completely trivial, it will probably be
rejected unless it is also accompanied by adequate test coverage for both rejected unless it is also accompanied by adequate test coverage for both
positive and negative conditions. That is to say, the tests must ensure your positive and negative conditions. That is to say, the tests must ensure your
code works correctly when it is fed correct data as well as incorrect data code works correctly when it is fed correct data as well as incorrect data
(error paths). (error paths).
Go provides an excellent test framework that makes writing test code and Go provides an excellent test framework that makes writing test code and
checking coverage statistics straight forward. For more information about the checking coverage statistics straight forward. For more information about the
test coverage tools, see the [golang cover blog post](http://blog.golang.org/cover). test coverage tools, see the [golang cover blog post](http://blog.golang.org/cover).
A quick summary of test practices follows: A quick summary of test practices follows:
- All new code should be accompanied by tests that ensure the code behaves - All new code should be accompanied by tests that ensure the code behaves
correctly when given expected values, and, perhaps even more importantly, that correctly when given expected values, and, perhaps even more importantly, that
it handles errors gracefully it handles errors gracefully
- When you fix a bug, it should be accompanied by tests which exercise the bug - When you fix a bug, it should be accompanied by tests which exercise the bug
to both prove it has been resolved and to prevent future regressions to both prove it has been resolved and to prevent future regressions
<a name="CodeDocumentation" /> <a name="CodeDocumentation" />
### 4.3 Code Documentation and Commenting ### 4.3 Code Documentation and Commenting
- At a minimum every function must be commented with its intended purpose and - At a minimum every function must be commented with its intended purpose and
any assumptions that it makes any assumptions that it makes
- Function comments must always begin with the name of the function per - Function comments must always begin with the name of the function per
[Effective Go](http://golang.org/doc/effective_go.html) [Effective Go](http://golang.org/doc/effective_go.html)
- Function comments should be complete sentences since they allow a wide - Function comments should be complete sentences since they allow a wide
variety of automated presentations such as [godoc.org](https://godoc.org) variety of automated presentations such as [godoc.org](https://godoc.org)
- The general rule of thumb is to look at it as if you were completely - The general rule of thumb is to look at it as if you were completely
unfamiliar with the code and ask yourself, would this give me enough unfamiliar with the code and ask yourself, would this give me enough
information to understand what this function does and how I'd probably want information to understand what this function does and how I'd probably want
to use it? to use it?
- Exported functions should also include detailed information the caller of the - Exported functions should also include detailed information the caller of the
function will likely need to know and/or understand:<br /><br /> function will likely need to know and/or understand:<br /><br />
**WRONG** **WRONG**
```Go ```Go
// convert a compact uint32 to big.Int // convert a compact uint32 to big.Int
func CompactToBig(compact uint32) *big.Int { func CompactToBig(compact uint32) *big.Int {
``` ```
**RIGHT** **RIGHT**
```Go ```Go
// CompactToBig converts a compact representation of a whole number N to a // CompactToBig converts a compact representation of a whole number N to a
// big integer. The representation is similar to IEEE754 floating point // big integer. The representation is similar to IEEE754 floating point
// numbers. // numbers.
// //
// Like IEEE754 floating point, there are three basic components: the sign, // Like IEEE754 floating point, there are three basic components: the sign,
// the exponent, and the mantissa. They are broken out as follows: // the exponent, and the mantissa. They are broken out as follows:
// //
// * the most significant 8 bits represent the unsigned base 256 exponent // * the most significant 8 bits represent the unsigned base 256 exponent
// * bit 23 (the 24th bit) represents the sign bit // * bit 23 (the 24th bit) represents the sign bit
// * the least significant 23 bits represent the mantissa // * the least significant 23 bits represent the mantissa
// //
// ------------------------------------------------- // -------------------------------------------------
// | Exponent | Sign | Mantissa | // | Exponent | Sign | Mantissa |
// ------------------------------------------------- // -------------------------------------------------
// | 8 bits [31-24] | 1 bit [23] | 23 bits [22-00] | // | 8 bits [31-24] | 1 bit [23] | 23 bits [22-00] |
// ------------------------------------------------- // -------------------------------------------------
// //
// The formula to calculate N is: // The formula to calculate N is:
// N = (-1^sign) * mantissa * 256^(exponent-3) // N = (-1^sign) * mantissa * 256^(exponent-3)
// //
// This compact form is only used in bitcoin to encode unsigned 256-bit numbers // This compact form is only used in bitcoin to encode unsigned 256-bit numbers
// which represent difficulty targets, thus there really is not a need for a // which represent difficulty targets, thus there really is not a need for a
// sign bit, but it is implemented here to stay consistent with bitcoind. // sign bit, but it is implemented here to stay consistent with bitcoind.
func CompactToBig(compact uint32) *big.Int { func CompactToBig(compact uint32) *big.Int {
``` ```
- Comments in the body of the code are highly encouraged, but they should - Comments in the body of the code are highly encouraged, but they should
explain the intention of the code as opposed to just calling out the explain the intention of the code as opposed to just calling out the
obvious<br /><br /> obvious<br /><br />
**WRONG** **WRONG**
```Go ```Go
// return err if amt is less than 5460 // return err if amt is less than 5460
if amt < 5460 { if amt < 5460 {
return err return err
} }
``` ```
**RIGHT** **RIGHT**
```Go ```Go
// Treat transactions with amounts less than the amount which is considered dust // Treat transactions with amounts less than the amount which is considered dust
// as non-standard. // as non-standard.
if amt < 5460 { if amt < 5460 {
return err return err
} }
``` ```
**NOTE:** The above should really use a constant as opposed to a magic number, **NOTE:** The above should really use a constant as opposed to a magic number,
but it was left as a magic number to show how much of a difference a good but it was left as a magic number to show how much of a difference a good
comment can make. comment can make.
<a name="ModelGitCommitMessages" /> <a name="ModelGitCommitMessages" />
### 4.4 Model Git Commit Messages ### 4.4 Model Git Commit Messages
This project prefers to keep a clean commit history with well-formed commit This project prefers to keep a clean commit history with well-formed commit
messages. This section illustrates a model commit message and provides a bit messages. This section illustrates a model commit message and provides a bit
of background for it. This content was originally created by Tim Pope and made of background for it. This content was originally created by Tim Pope and made
available on his website, however that website is no longer active, so it is available on his website, however that website is no longer active, so it is
being provided here. being provided here.
Heres a model Git commit message: Heres a model Git commit message:
``` ```
Short (50 chars or less) summary of changes Short (50 chars or less) summary of changes
More detailed explanatory text, if necessary. Wrap it to about 72 More detailed explanatory text, if necessary. Wrap it to about 72
characters or so. In some contexts, the first line is treated as the characters or so. In some contexts, the first line is treated as the
subject of an email and the rest of the text as the body. The blank subject of an email and the rest of the text as the body. The blank
line separating the summary from the body is critical (unless you omit line separating the summary from the body is critical (unless you omit
the body entirely); tools like rebase can get confused if you run the the body entirely); tools like rebase can get confused if you run the
two together. two together.
Write your commit message in the present tense: "Fix bug" and not "Fixed Write your commit message in the present tense: "Fix bug" and not "Fixed
bug." This convention matches up with commit messages generated by bug." This convention matches up with commit messages generated by
commands like git merge and git revert. commands like git merge and git revert.
Further paragraphs come after blank lines. Further paragraphs come after blank lines.
- Bullet points are okay, too - Bullet points are okay, too
- Typically a hyphen or asterisk is used for the bullet, preceded by a - Typically a hyphen or asterisk is used for the bullet, preceded by a
single space, with blank lines in between, but conventions vary here single space, with blank lines in between, but conventions vary here
- Use a hanging indent - Use a hanging indent
``` ```
Prefix the summary with the subsystem/package when possible. Many other Prefix the summary with the subsystem/package when possible. Many other
projects make use of the code and this makes it easier for them to tell when projects make use of the code and this makes it easier for them to tell when
something they're using has changed. Have a look at [past something they're using has changed. Have a look at [past
commits](https://github.com/btcsuite/btcd/commits/master) for examples of commits](https://github.com/btcsuite/btcd/commits/master) for examples of
commit messages. commit messages.
Here are some of the reasons why wrapping your commit messages to 72 columns is Here are some of the reasons why wrapping your commit messages to 72 columns is
a good thing. a good thing.
- git log doesnt do any special special wrapping of the commit messages. With - git log doesnt do any special special wrapping of the commit messages. With
the default pager of less -S, this means your paragraphs flow far off the edge the default pager of less -S, this means your paragraphs flow far off the edge
of the screen, making them difficult to read. On an 80 column terminal, if we of the screen, making them difficult to read. On an 80 column terminal, if we
subtract 4 columns for the indent on the left and 4 more for symmetry on the subtract 4 columns for the indent on the left and 4 more for symmetry on the
right, were left with 72 columns. right, were left with 72 columns.
- git format-patch --stdout converts a series of commits to a series of emails, - git format-patch --stdout converts a series of commits to a series of emails,
using the messages for the message body. Good email netiquette dictates we using the messages for the message body. Good email netiquette dictates we
wrap our plain text emails such that theres room for a few levels of nested wrap our plain text emails such that theres room for a few levels of nested
reply indicators without overflow in an 80 column terminal. reply indicators without overflow in an 80 column terminal.
<a name="CodeApproval" /> <a name="CodeApproval" />
### 5. Code Approval Process ### 5. Code Approval Process
This section describes the code approval process that is used for code This section describes the code approval process that is used for code
contributions. This is how to get your changes into btcd. contributions. This is how to get your changes into btcd.
<a name="CodeReview" /> <a name="CodeReview" />
### 5.1 Code Review ### 5.1 Code Review
All code which is submitted will need to be reviewed before inclusion into the All code which is submitted will need to be reviewed before inclusion into the
master branch. This process is performed by the project maintainers and usually master branch. This process is performed by the project maintainers and usually
other committers who are interested in the area you are working in as well. other committers who are interested in the area you are working in as well.
##### Code Review Timeframe ##### Code Review Timeframe
The timeframe for a code review will vary greatly depending on factors such as The timeframe for a code review will vary greatly depending on factors such as
the number of other pull requests which need to be reviewed, the size and the number of other pull requests which need to be reviewed, the size and
complexity of the contribution, how well you followed the guidelines presented complexity of the contribution, how well you followed the guidelines presented
on this page, and how easy it is for the reviewers to digest your commits. For on this page, and how easy it is for the reviewers to digest your commits. For
example, if you make one monolithic commit that makes sweeping changes to things example, if you make one monolithic commit that makes sweeping changes to things
in multiple subsystems, it will obviously take much longer to review. You will in multiple subsystems, it will obviously take much longer to review. You will
also likely be asked to split the commit into several smaller, and hence more also likely be asked to split the commit into several smaller, and hence more
manageable, commits. manageable, commits.
Keeping the above in mind, most small changes will be reviewed within a few Keeping the above in mind, most small changes will be reviewed within a few
days, while large or far reaching changes may take weeks. This is a good reason days, while large or far reaching changes may take weeks. This is a good reason
to stick with the [Share Early, Share Often](#ShareOften) development practice to stick with the [Share Early, Share Often](#ShareOften) development practice
outlined above. outlined above.
##### What is the review looking for? ##### What is the review looking for?
The review is mainly ensuring the code follows the [Development Practices](#DevelopmentPractices) The review is mainly ensuring the code follows the [Development Practices](#DevelopmentPractices)
and [Code Contribution Standards](#Standards). However, there are a few other and [Code Contribution Standards](#Standards). However, there are a few other
checks which are generally performed as follows: checks which are generally performed as follows:
- The code is stable and has no stability or security concerns - The code is stable and has no stability or security concerns
- The code is properly using existing APIs and generally fits well into the - The code is properly using existing APIs and generally fits well into the
overall architecture overall architecture
- The change is not something which is deemed inappropriate by community - The change is not something which is deemed inappropriate by community
consensus consensus
<a name="CodeRework" /> <a name="CodeRework" />
### 5.2 Rework Code (if needed) ### 5.2 Rework Code (if needed)
After the code review, the change will be accepted immediately if no issues are After the code review, the change will be accepted immediately if no issues are
found. If there are any concerns or questions, you will be provided with found. If there are any concerns or questions, you will be provided with
feedback along with the next steps needed to get your contribution merged with feedback along with the next steps needed to get your contribution merged with
master. In certain cases the code reviewer(s) or interested committers may help master. In certain cases the code reviewer(s) or interested committers may help
you rework the code, but generally you will simply be given feedback for you to you rework the code, but generally you will simply be given feedback for you to
make the necessary changes. make the necessary changes.
This process will continue until the code is finally accepted. This process will continue until the code is finally accepted.
<a name="CodeAcceptance" /> <a name="CodeAcceptance" />
### 5.3 Acceptance ### 5.3 Acceptance
Once your code is accepted, it will be integrated with the master branch. Once your code is accepted, it will be integrated with the master branch.
Typically it will be rebased and fast-forward merged to master as we prefer to Typically it will be rebased and fast-forward merged to master as we prefer to
keep a clean commit history over a tangled weave of merge commits. However, keep a clean commit history over a tangled weave of merge commits. However,
regardless of the specific merge method used, the code will be integrated with regardless of the specific merge method used, the code will be integrated with
the master branch and the pull request will be closed. the master branch and the pull request will be closed.
Rejoice as you will now be listed as a [contributor](https://github.com/btcsuite/btcd/graphs/contributors)! Rejoice as you will now be listed as a [contributor](https://github.com/btcsuite/btcd/graphs/contributors)!
<a name="Standards" /> <a name="Standards" />
### 6. Contribution Standards ### 6. Contribution Standards
<a name="Checklist" /> <a name="Checklist" />
### 6.1. Contribution Checklist ### 6.1. Contribution Checklist
- [&nbsp;&nbsp;] All changes are Go version 1.3 compliant - [&nbsp;&nbsp;] All changes are Go version 1.3 compliant
- [&nbsp;&nbsp;] The code being submitted is commented according to the - [&nbsp;&nbsp;] The code being submitted is commented according to the
[Code Documentation and Commenting](#CodeDocumentation) section [Code Documentation and Commenting](#CodeDocumentation) section
- [&nbsp;&nbsp;] For new code: Code is accompanied by tests which exercise both - [&nbsp;&nbsp;] For new code: Code is accompanied by tests which exercise both
the positive and negative (error paths) conditions (if applicable) the positive and negative (error paths) conditions (if applicable)
- [&nbsp;&nbsp;] For bug fixes: Code is accompanied by new tests which trigger - [&nbsp;&nbsp;] For bug fixes: Code is accompanied by new tests which trigger
the bug being fixed to prevent regressions the bug being fixed to prevent regressions
- [&nbsp;&nbsp;] Any new logging statements use an appropriate subsystem and - [&nbsp;&nbsp;] Any new logging statements use an appropriate subsystem and
logging level logging level
- [&nbsp;&nbsp;] Code has been formatted with `go fmt` - [&nbsp;&nbsp;] Code has been formatted with `go fmt`
- [&nbsp;&nbsp;] Running `go test` does not fail any tests - [&nbsp;&nbsp;] Running `go test` does not fail any tests
- [&nbsp;&nbsp;] Running `go vet` does not report any issues - [&nbsp;&nbsp;] Running `go vet` does not report any issues
- [&nbsp;&nbsp;] Running [golint](https://github.com/golang/lint) does not - [&nbsp;&nbsp;] Running [golint](https://github.com/golang/lint) does not
report any **new** issues that did not already exist report any **new** issues that did not already exist
<a name="Licensing" /> <a name="Licensing" />
### 6.2. Licensing of Contributions ### 6.2. Licensing of Contributions
All contributions must be licensed with the All contributions must be licensed with the
[ISC license](https://github.com/btcsuite/btcd/blob/master/LICENSE). This is [ISC license](https://github.com/btcsuite/btcd/blob/master/LICENSE). This is
the same license as all of the code in the btcd suite. the same license as all of the code in the btcd suite.

View File

@ -1,35 +1,35 @@
btcd allows you to bind to specific interfaces which enables you to setup btcd allows you to bind to specific interfaces which enables you to setup
configurations with varying levels of complexity. The listen parameter can be configurations with varying levels of complexity. The listen parameter can be
specified on the command line as shown below with the -- prefix or in the specified on the command line as shown below with the -- prefix or in the
configuration file without the -- prefix (as can all long command line options). configuration file without the -- prefix (as can all long command line options).
The configuration file takes one entry per line. The configuration file takes one entry per line.
**NOTE:** The listen flag can be specified multiple times to listen on multiple **NOTE:** The listen flag can be specified multiple times to listen on multiple
interfaces as a couple of the examples below illustrate. interfaces as a couple of the examples below illustrate.
Command Line Examples: Command Line Examples:
|Flags|Comment| |Flags|Comment|
|----------|------------| |----------|------------|
|--listen=|all interfaces on default port which is changed by `--testnet` and `--regtest` (**default**)| |--listen=|all interfaces on default port which is changed by `--testnet` and `--regtest` (**default**)|
|--listen=0.0.0.0|all IPv4 interfaces on default port which is changed by `--testnet` and `--regtest`| |--listen=0.0.0.0|all IPv4 interfaces on default port which is changed by `--testnet` and `--regtest`|
|--listen=::|all IPv6 interfaces on default port which is changed by `--testnet` and `--regtest`| |--listen=::|all IPv6 interfaces on default port which is changed by `--testnet` and `--regtest`|
|--listen=:8333|all interfaces on port 8333| |--listen=:8333|all interfaces on port 8333|
|--listen=0.0.0.0:8333|all IPv4 interfaces on port 8333| |--listen=0.0.0.0:8333|all IPv4 interfaces on port 8333|
|--listen=[::]:8333|all IPv6 interfaces on port 8333| |--listen=[::]:8333|all IPv6 interfaces on port 8333|
|--listen=127.0.0.1:8333|only IPv4 localhost on port 8333| |--listen=127.0.0.1:8333|only IPv4 localhost on port 8333|
|--listen=[::1]:8333|only IPv6 localhost on port 8333| |--listen=[::1]:8333|only IPv6 localhost on port 8333|
|--listen=:8336|all interfaces on non-standard port 8336| |--listen=:8336|all interfaces on non-standard port 8336|
|--listen=0.0.0.0:8336|all IPv4 interfaces on non-standard port 8336| |--listen=0.0.0.0:8336|all IPv4 interfaces on non-standard port 8336|
|--listen=[::]:8336|all IPv6 interfaces on non-standard port 8336| |--listen=[::]:8336|all IPv6 interfaces on non-standard port 8336|
|--listen=127.0.0.1:8337 --listen=[::1]:8333|IPv4 localhost on port 8337 and IPv6 localhost on port 8333| |--listen=127.0.0.1:8337 --listen=[::1]:8333|IPv4 localhost on port 8337 and IPv6 localhost on port 8333|
|--listen=:8333 --listen=:8337|all interfaces on ports 8333 and 8337| |--listen=:8333 --listen=:8337|all interfaces on ports 8333 and 8337|
The following config file would configure btcd to only listen on localhost for both IPv4 and IPv6: The following config file would configure btcd to only listen on localhost for both IPv4 and IPv6:
```text ```text
[Application Options] [Application Options]
listen=127.0.0.1:8333 listen=127.0.0.1:8333
listen=[::1]:8333 listen=[::1]:8333
``` ```

View File

@ -1,47 +1,47 @@
btcd allows you to bind the RPC server to specific interfaces which enables you btcd allows you to bind the RPC server to specific interfaces which enables you
to setup configurations with varying levels of complexity. The `rpclisten` to setup configurations with varying levels of complexity. The `rpclisten`
parameter can be specified on the command line as shown below with the -- prefix parameter can be specified on the command line as shown below with the -- prefix
or in the configuration file without the -- prefix (as can all long command line or in the configuration file without the -- prefix (as can all long command line
options). The configuration file takes one entry per line. options). The configuration file takes one entry per line.
A few things to note regarding the RPC server: A few things to note regarding the RPC server:
* The RPC server will **not** be enabled unless the `rpcuser` and `rpcpass` * The RPC server will **not** be enabled unless the `rpcuser` and `rpcpass`
options are specified. options are specified.
* When the `rpcuser` and `rpcpass` and/or `rpclimituser` and `rpclimitpass` * When the `rpcuser` and `rpcpass` and/or `rpclimituser` and `rpclimitpass`
options are specified, the RPC server will only listen on localhost IPv4 and options are specified, the RPC server will only listen on localhost IPv4 and
IPv6 interfaces by default. You will need to override the RPC listen IPv6 interfaces by default. You will need to override the RPC listen
interfaces to include external interfaces if you want to connect from a remote interfaces to include external interfaces if you want to connect from a remote
machine. machine.
* The RPC server has TLS enabled by default, even for localhost. You may use * The RPC server has TLS enabled by default, even for localhost. You may use
the `--notls` option to disable it, but only when all listeners are on the `--notls` option to disable it, but only when all listeners are on
localhost interfaces. localhost interfaces.
* The `--rpclisten` flag can be specified multiple times to listen on multiple * The `--rpclisten` flag can be specified multiple times to listen on multiple
interfaces as a couple of the examples below illustrate. interfaces as a couple of the examples below illustrate.
* The RPC server is disabled by default when using the `--regtest` and * The RPC server is disabled by default when using the `--regtest` and
`--simnet` networks. You can override this by specifying listen interfaces. `--simnet` networks. You can override this by specifying listen interfaces.
Command Line Examples: Command Line Examples:
|Flags|Comment| |Flags|Comment|
|----------|------------| |----------|------------|
|--rpclisten=|all interfaces on default port which is changed by `--testnet`| |--rpclisten=|all interfaces on default port which is changed by `--testnet`|
|--rpclisten=0.0.0.0|all IPv4 interfaces on default port which is changed by `--testnet`| |--rpclisten=0.0.0.0|all IPv4 interfaces on default port which is changed by `--testnet`|
|--rpclisten=::|all IPv6 interfaces on default port which is changed by `--testnet`| |--rpclisten=::|all IPv6 interfaces on default port which is changed by `--testnet`|
|--rpclisten=:8334|all interfaces on port 8334| |--rpclisten=:8334|all interfaces on port 8334|
|--rpclisten=0.0.0.0:8334|all IPv4 interfaces on port 8334| |--rpclisten=0.0.0.0:8334|all IPv4 interfaces on port 8334|
|--rpclisten=[::]:8334|all IPv6 interfaces on port 8334| |--rpclisten=[::]:8334|all IPv6 interfaces on port 8334|
|--rpclisten=127.0.0.1:8334|only IPv4 localhost on port 8334| |--rpclisten=127.0.0.1:8334|only IPv4 localhost on port 8334|
|--rpclisten=[::1]:8334|only IPv6 localhost on port 8334| |--rpclisten=[::1]:8334|only IPv6 localhost on port 8334|
|--rpclisten=:8336|all interfaces on non-standard port 8336| |--rpclisten=:8336|all interfaces on non-standard port 8336|
|--rpclisten=0.0.0.0:8336|all IPv4 interfaces on non-standard port 8336| |--rpclisten=0.0.0.0:8336|all IPv4 interfaces on non-standard port 8336|
|--rpclisten=[::]:8336|all IPv6 interfaces on non-standard port 8336| |--rpclisten=[::]:8336|all IPv6 interfaces on non-standard port 8336|
|--rpclisten=127.0.0.1:8337 --listen=[::1]:8334|IPv4 localhost on port 8337 and IPv6 localhost on port 8334| |--rpclisten=127.0.0.1:8337 --listen=[::1]:8334|IPv4 localhost on port 8337 and IPv6 localhost on port 8334|
|--rpclisten=:8334 --listen=:8337|all interfaces on ports 8334 and 8337| |--rpclisten=:8334 --listen=:8337|all interfaces on ports 8334 and 8337|
The following config file would configure the btcd RPC server to listen to all interfaces on the default port, including external interfaces, for both IPv4 and IPv6: The following config file would configure the btcd RPC server to listen to all interfaces on the default port, including external interfaces, for both IPv4 and IPv6:
```text ```text
[Application Options] [Application Options]
rpclisten= rpclisten=
``` ```

View File

@ -1,15 +1,15 @@
While btcd is highly configurable when it comes to the network configuration, While btcd is highly configurable when it comes to the network configuration,
the following is intended to be a quick reference for the default ports used so the following is intended to be a quick reference for the default ports used so
port forwarding can be configured as required. port forwarding can be configured as required.
btcd provides a `--upnp` flag which can be used to automatically map the bitcoin btcd provides a `--upnp` flag which can be used to automatically map the bitcoin
peer-to-peer listening port if your router supports UPnP. If your router does peer-to-peer listening port if your router supports UPnP. If your router does
not support UPnP, or you don't wish to use it, please note that only the bitcoin not support UPnP, or you don't wish to use it, please note that only the bitcoin
peer-to-peer port should be forwarded unless you specifically want to allow RPC peer-to-peer port should be forwarded unless you specifically want to allow RPC
access to your btcd from external sources such as in more advanced network access to your btcd from external sources such as in more advanced network
configurations. configurations.
|Name|Port| |Name|Port|
|----|----| |----|----|
|Default Bitcoin peer-to-peer port|TCP 8333| |Default Bitcoin peer-to-peer port|TCP 8333|
|Default RPC port|TCP 8334| |Default RPC port|TCP 8334|

View File

@ -1,79 +1,79 @@
### Table of Contents ### Table of Contents
1. [What is bootstrap.dat?](#What)<br /> 1. [What is bootstrap.dat?](#What)<br />
2. [What are the pros and cons of using bootstrap.dat?](#ProsCons) 2. [What are the pros and cons of using bootstrap.dat?](#ProsCons)
3. [Where do I get bootstrap.dat?](#Obtaining) 3. [Where do I get bootstrap.dat?](#Obtaining)
4. [How do I know I can trust the bootstrap.dat I downloaded?](#Trust) 4. [How do I know I can trust the bootstrap.dat I downloaded?](#Trust)
5. [How do I use bootstrap.dat with btcd?](#Importing) 5. [How do I use bootstrap.dat with btcd?](#Importing)
<a name="What" /> <a name="What" />
### 1. What is bootstrap.dat? ### 1. What is bootstrap.dat?
It is a flat, binary file containing bitcoin blockchain data starting from the It is a flat, binary file containing bitcoin blockchain data starting from the
genesis block and continuing through a relatively recent block height depending genesis block and continuing through a relatively recent block height depending
on the last time it was updated. on the last time it was updated.
See [this](https://bitcointalk.org/index.php?topic=145386.0) thread on See [this](https://bitcointalk.org/index.php?topic=145386.0) thread on
bitcointalk for more details. bitcointalk for more details.
**NOTE:** Using bootstrap.dat is entirely optional. Btcd will download the **NOTE:** Using bootstrap.dat is entirely optional. Btcd will download the
block chain from other peers through the Bitcoin protocol with no extra block chain from other peers through the Bitcoin protocol with no extra
configuration needed. configuration needed.
<a name="ProsCons" /> <a name="ProsCons" />
### 2. What are the pros and cons of using bootstrap.dat? ### 2. What are the pros and cons of using bootstrap.dat?
Pros: Pros:
- Typically accelerates the initial process of bringing up a new node as it - Typically accelerates the initial process of bringing up a new node as it
downloads from public P2P nodes and generally is able to achieve faster downloads from public P2P nodes and generally is able to achieve faster
download speeds download speeds
- It is particularly beneficial when bringing up multiple nodes as you only need - It is particularly beneficial when bringing up multiple nodes as you only need
to download the data once to download the data once
Cons: Cons:
- Requires you to setup and configure a torrent client if you don't already have - Requires you to setup and configure a torrent client if you don't already have
one available one available
- Requires roughly twice as much disk space since you'll need the flat file as - Requires roughly twice as much disk space since you'll need the flat file as
well as the imported database well as the imported database
<a name="Obtaining" /> <a name="Obtaining" />
### 3. Where do I get bootstrap.dat? ### 3. Where do I get bootstrap.dat?
The bootstrap.dat file is made available via a torrent. See The bootstrap.dat file is made available via a torrent. See
[this](https://bitcointalk.org/index.php?topic=145386.0) thread on bitcointalk [this](https://bitcointalk.org/index.php?topic=145386.0) thread on bitcointalk
for the torrent download details. for the torrent download details.
<a name="Trust" /> <a name="Trust" />
### 4. How do I know I can trust the bootstrap.dat I downloaded? ### 4. How do I know I can trust the bootstrap.dat I downloaded?
You don't need to trust the file as the `addblock` utility verifies every block You don't need to trust the file as the `addblock` utility verifies every block
using the same rules that are used when downloading the block chain normally using the same rules that are used when downloading the block chain normally
through the Bitcoin protocol. Additionally, the chain rules contain hard-coded through the Bitcoin protocol. Additionally, the chain rules contain hard-coded
checkpoints for the known-good block chain at periodic intervals. This ensures checkpoints for the known-good block chain at periodic intervals. This ensures
that not only is it a valid chain, but it is the same chain that everyone else that not only is it a valid chain, but it is the same chain that everyone else
is using. is using.
<a name="Importing" /> <a name="Importing" />
### 5. How do I use bootstrap.dat with btcd? ### 5. How do I use bootstrap.dat with btcd?
btcd comes with a separate utility named `addblock` which can be used to import btcd comes with a separate utility named `addblock` which can be used to import
`bootstrap.dat`. This approach is used since the import is a one-time operation `bootstrap.dat`. This approach is used since the import is a one-time operation
and we prefer to keep the daemon itself as lightweight as possible. and we prefer to keep the daemon itself as lightweight as possible.
1. Stop btcd if it is already running. This is required since addblock needs to 1. Stop btcd if it is already running. This is required since addblock needs to
access the database used by btcd and it will be locked if btcd is using it. access the database used by btcd and it will be locked if btcd is using it.
2. Note the path to the downloaded bootstrap.dat file. 2. Note the path to the downloaded bootstrap.dat file.
3. Run the addblock utility with the `-i` argument pointing to the location of 3. Run the addblock utility with the `-i` argument pointing to the location of
boostrap.dat:<br /><br /> boostrap.dat:<br /><br />
**Windows:** **Windows:**
```bat ```bat
C:\> "%PROGRAMFILES%\Btcd Suite\Btcd\addblock" -i C:\Path\To\bootstrap.dat C:\> "%PROGRAMFILES%\Btcd Suite\Btcd\addblock" -i C:\Path\To\bootstrap.dat
``` ```
**Linux/Unix/BSD/POSIX:** **Linux/Unix/BSD/POSIX:**
```bash ```bash
$ $GOPATH/bin/addblock -i /path/to/bootstrap.dat $ $GOPATH/bin/addblock -i /path/to/bootstrap.dat
``` ```

23
vendor/github.com/hashicorp/golang-lru/.gitignore generated vendored Normal file
View File

@ -0,0 +1,23 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

223
vendor/github.com/hashicorp/golang-lru/2q.go generated vendored Normal file
View File

@ -0,0 +1,223 @@
package lru
import (
"fmt"
"sync"
"github.com/hashicorp/golang-lru/simplelru"
)
const (
// Default2QRecentRatio is the ratio of the 2Q cache dedicated
// to recently added entries that have only been accessed once.
Default2QRecentRatio = 0.25
// Default2QGhostEntries is the default ratio of ghost
// entries kept to track entries recently evicted
Default2QGhostEntries = 0.50
)
// TwoQueueCache is a thread-safe fixed size 2Q cache.
// 2Q is an enhancement over the standard LRU cache
// in that it tracks both frequently and recently used
// entries separately. This avoids a burst in access to new
// entries from evicting frequently used entries. It adds some
// additional tracking overhead to the standard LRU cache, and is
// computationally about 2x the cost, and adds some metadata over
// head. The ARCCache is similar, but does not require setting any
// parameters.
type TwoQueueCache struct {
size int
recentSize int
recent simplelru.LRUCache
frequent simplelru.LRUCache
recentEvict simplelru.LRUCache
lock sync.RWMutex
}
// New2Q creates a new TwoQueueCache using the default
// values for the parameters.
func New2Q(size int) (*TwoQueueCache, error) {
return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
}
// New2QParams creates a new TwoQueueCache using the provided
// parameter values.
func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
if size <= 0 {
return nil, fmt.Errorf("invalid size")
}
if recentRatio < 0.0 || recentRatio > 1.0 {
return nil, fmt.Errorf("invalid recent ratio")
}
if ghostRatio < 0.0 || ghostRatio > 1.0 {
return nil, fmt.Errorf("invalid ghost ratio")
}
// Determine the sub-sizes
recentSize := int(float64(size) * recentRatio)
evictSize := int(float64(size) * ghostRatio)
// Allocate the LRUs
recent, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
frequent, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
recentEvict, err := simplelru.NewLRU(evictSize, nil)
if err != nil {
return nil, err
}
// Initialize the cache
c := &TwoQueueCache{
size: size,
recentSize: recentSize,
recent: recent,
frequent: frequent,
recentEvict: recentEvict,
}
return c, nil
}
// Get looks up a key's value from the cache.
func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if this is a frequent value
if val, ok := c.frequent.Get(key); ok {
return val, ok
}
// If the value is contained in recent, then we
// promote it to frequent
if val, ok := c.recent.Peek(key); ok {
c.recent.Remove(key)
c.frequent.Add(key, val)
return val, ok
}
// No hit
return nil, false
}
// Add adds a value to the cache.
func (c *TwoQueueCache) Add(key, value interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if the value is frequently used already,
// and just update the value
if c.frequent.Contains(key) {
c.frequent.Add(key, value)
return
}
// Check if the value is recently used, and promote
// the value into the frequent list
if c.recent.Contains(key) {
c.recent.Remove(key)
c.frequent.Add(key, value)
return
}
// If the value was recently evicted, add it to the
// frequently used list
if c.recentEvict.Contains(key) {
c.ensureSpace(true)
c.recentEvict.Remove(key)
c.frequent.Add(key, value)
return
}
// Add to the recently seen list
c.ensureSpace(false)
c.recent.Add(key, value)
return
}
// ensureSpace is used to ensure we have space in the cache
func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
// If we have space, nothing to do
recentLen := c.recent.Len()
freqLen := c.frequent.Len()
if recentLen+freqLen < c.size {
return
}
// If the recent buffer is larger than
// the target, evict from there
if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
k, _, _ := c.recent.RemoveOldest()
c.recentEvict.Add(k, nil)
return
}
// Remove from the frequent list otherwise
c.frequent.RemoveOldest()
}
// Len returns the number of items in the cache.
func (c *TwoQueueCache) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.recent.Len() + c.frequent.Len()
}
// Keys returns a slice of the keys in the cache.
// The frequently used keys are first in the returned slice.
func (c *TwoQueueCache) Keys() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
k1 := c.frequent.Keys()
k2 := c.recent.Keys()
return append(k1, k2...)
}
// Remove removes the provided key from the cache.
func (c *TwoQueueCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
if c.frequent.Remove(key) {
return
}
if c.recent.Remove(key) {
return
}
if c.recentEvict.Remove(key) {
return
}
}
// Purge is used to completely clear the cache.
func (c *TwoQueueCache) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
c.recent.Purge()
c.frequent.Purge()
c.recentEvict.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *TwoQueueCache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.frequent.Contains(key) || c.recent.Contains(key)
}
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.frequent.Peek(key); ok {
return val, ok
}
return c.recent.Peek(key)
}

306
vendor/github.com/hashicorp/golang-lru/2q_test.go generated vendored Normal file
View File

@ -0,0 +1,306 @@
package lru
import (
"math/rand"
"testing"
)
func Benchmark2Q_Rand(b *testing.B) {
l, err := New2Q(8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
trace[i] = rand.Int63() % 32768
}
b.ResetTimer()
var hit, miss int
for i := 0; i < 2*b.N; i++ {
if i%2 == 0 {
l.Add(trace[i], trace[i])
} else {
_, ok := l.Get(trace[i])
if ok {
hit++
} else {
miss++
}
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func Benchmark2Q_Freq(b *testing.B) {
l, err := New2Q(8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
if i%2 == 0 {
trace[i] = rand.Int63() % 16384
} else {
trace[i] = rand.Int63() % 32768
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
l.Add(trace[i], trace[i])
}
var hit, miss int
for i := 0; i < b.N; i++ {
_, ok := l.Get(trace[i])
if ok {
hit++
} else {
miss++
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func Test2Q_RandomOps(t *testing.T) {
size := 128
l, err := New2Q(128)
if err != nil {
t.Fatalf("err: %v", err)
}
n := 200000
for i := 0; i < n; i++ {
key := rand.Int63() % 512
r := rand.Int63()
switch r % 3 {
case 0:
l.Add(key, key)
case 1:
l.Get(key)
case 2:
l.Remove(key)
}
if l.recent.Len()+l.frequent.Len() > size {
t.Fatalf("bad: recent: %d freq: %d",
l.recent.Len(), l.frequent.Len())
}
}
}
func Test2Q_Get_RecentToFrequent(t *testing.T) {
l, err := New2Q(128)
if err != nil {
t.Fatalf("err: %v", err)
}
// Touch all the entries, should be in t1
for i := 0; i < 128; i++ {
l.Add(i, i)
}
if n := l.recent.Len(); n != 128 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
// Get should upgrade to t2
for i := 0; i < 128; i++ {
_, ok := l.Get(i)
if !ok {
t.Fatalf("missing: %d", i)
}
}
if n := l.recent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 128 {
t.Fatalf("bad: %d", n)
}
// Get be from t2
for i := 0; i < 128; i++ {
_, ok := l.Get(i)
if !ok {
t.Fatalf("missing: %d", i)
}
}
if n := l.recent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 128 {
t.Fatalf("bad: %d", n)
}
}
func Test2Q_Add_RecentToFrequent(t *testing.T) {
l, err := New2Q(128)
if err != nil {
t.Fatalf("err: %v", err)
}
// Add initially to recent
l.Add(1, 1)
if n := l.recent.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
// Add should upgrade to frequent
l.Add(1, 1)
if n := l.recent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
// Add should remain in frequent
l.Add(1, 1)
if n := l.recent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
}
func Test2Q_Add_RecentEvict(t *testing.T) {
l, err := New2Q(4)
if err != nil {
t.Fatalf("err: %v", err)
}
// Add 1,2,3,4,5 -> Evict 1
l.Add(1, 1)
l.Add(2, 2)
l.Add(3, 3)
l.Add(4, 4)
l.Add(5, 5)
if n := l.recent.Len(); n != 4 {
t.Fatalf("bad: %d", n)
}
if n := l.recentEvict.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
// Pull in the recently evicted
l.Add(1, 1)
if n := l.recent.Len(); n != 3 {
t.Fatalf("bad: %d", n)
}
if n := l.recentEvict.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
// Add 6, should cause another recent evict
l.Add(6, 6)
if n := l.recent.Len(); n != 3 {
t.Fatalf("bad: %d", n)
}
if n := l.recentEvict.Len(); n != 2 {
t.Fatalf("bad: %d", n)
}
if n := l.frequent.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
}
func Test2Q(t *testing.T) {
l, err := New2Q(128)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 256; i++ {
l.Add(i, i)
}
if l.Len() != 128 {
t.Fatalf("bad len: %v", l.Len())
}
for i, k := range l.Keys() {
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
t.Fatalf("bad key: %v", k)
}
}
for i := 0; i < 128; i++ {
_, ok := l.Get(i)
if ok {
t.Fatalf("should be evicted")
}
}
for i := 128; i < 256; i++ {
_, ok := l.Get(i)
if !ok {
t.Fatalf("should not be evicted")
}
}
for i := 128; i < 192; i++ {
l.Remove(i)
_, ok := l.Get(i)
if ok {
t.Fatalf("should be deleted")
}
}
l.Purge()
if l.Len() != 0 {
t.Fatalf("bad len: %v", l.Len())
}
if _, ok := l.Get(200); ok {
t.Fatalf("should contain nothing")
}
}
// Test that Contains doesn't update recent-ness
func Test2Q_Contains(t *testing.T) {
l, err := New2Q(2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if !l.Contains(1) {
t.Errorf("1 should be contained")
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Contains should not have updated recent-ness of 1")
}
}
// Test that Peek doesn't update recent-ness
func Test2Q_Peek(t *testing.T) {
l, err := New2Q(2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if v, ok := l.Peek(1); !ok || v != 1 {
t.Errorf("1 should be set to 1: %v, %v", v, ok)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("should not have updated recent-ness of 1")
}
}

362
vendor/github.com/hashicorp/golang-lru/LICENSE generated vendored Normal file
View File

@ -0,0 +1,362 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

25
vendor/github.com/hashicorp/golang-lru/README.md generated vendored Normal file
View File

@ -0,0 +1,25 @@
golang-lru
==========
This provides the `lru` package which implements a fixed-size
thread safe LRU cache. It is based on the cache in Groupcache.
Documentation
=============
Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
Example
=======
Using the LRU is very simple:
```go
l, _ := New(128)
for i := 0; i < 256; i++ {
l.Add(i, nil)
}
if l.Len() != 128 {
panic(fmt.Sprintf("bad len: %v", l.Len()))
}
```

257
vendor/github.com/hashicorp/golang-lru/arc.go generated vendored Normal file
View File

@ -0,0 +1,257 @@
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/simplelru"
)
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
// ARC is an enhancement over the standard LRU cache in that tracks both
// frequency and recency of use. This avoids a burst in access to new
// entries from evicting the frequently used older entries. It adds some
// additional tracking overhead to a standard LRU cache, computationally
// it is roughly 2x the cost, and the extra memory overhead is linear
// with the size of the cache. ARC has been patented by IBM, but is
// similar to the TwoQueueCache (2Q) which requires setting parameters.
type ARCCache struct {
size int // Size is the total capacity of the cache
p int // P is the dynamic preference towards T1 or T2
t1 simplelru.LRUCache // T1 is the LRU for recently accessed items
b1 simplelru.LRUCache // B1 is the LRU for evictions from t1
t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items
b2 simplelru.LRUCache // B2 is the LRU for evictions from t2
lock sync.RWMutex
}
// NewARC creates an ARC of the given size
func NewARC(size int) (*ARCCache, error) {
// Create the sub LRUs
b1, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
b2, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
t1, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
t2, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
// Initialize the ARC
c := &ARCCache{
size: size,
p: 0,
t1: t1,
b1: b1,
t2: t2,
b2: b2,
}
return c, nil
}
// Get looks up a key's value from the cache.
func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// If the value is contained in T1 (recent), then
// promote it to T2 (frequent)
if val, ok := c.t1.Peek(key); ok {
c.t1.Remove(key)
c.t2.Add(key, val)
return val, ok
}
// Check if the value is contained in T2 (frequent)
if val, ok := c.t2.Get(key); ok {
return val, ok
}
// No hit
return nil, false
}
// Add adds a value to the cache.
func (c *ARCCache) Add(key, value interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if the value is contained in T1 (recent), and potentially
// promote it to frequent T2
if c.t1.Contains(key) {
c.t1.Remove(key)
c.t2.Add(key, value)
return
}
// Check if the value is already in T2 (frequent) and update it
if c.t2.Contains(key) {
c.t2.Add(key, value)
return
}
// Check if this value was recently evicted as part of the
// recently used list
if c.b1.Contains(key) {
// T1 set is too small, increase P appropriately
delta := 1
b1Len := c.b1.Len()
b2Len := c.b2.Len()
if b2Len > b1Len {
delta = b2Len / b1Len
}
if c.p+delta >= c.size {
c.p = c.size
} else {
c.p += delta
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(false)
}
// Remove from B1
c.b1.Remove(key)
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
// Check if this value was recently evicted as part of the
// frequently used list
if c.b2.Contains(key) {
// T2 set is too small, decrease P appropriately
delta := 1
b1Len := c.b1.Len()
b2Len := c.b2.Len()
if b1Len > b2Len {
delta = b1Len / b2Len
}
if delta >= c.p {
c.p = 0
} else {
c.p -= delta
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(true)
}
// Remove from B2
c.b2.Remove(key)
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(false)
}
// Keep the size of the ghost buffers trim
if c.b1.Len() > c.size-c.p {
c.b1.RemoveOldest()
}
if c.b2.Len() > c.p {
c.b2.RemoveOldest()
}
// Add to the recently seen list
c.t1.Add(key, value)
return
}
// replace is used to adaptively evict from either T1 or T2
// based on the current learned value of P
func (c *ARCCache) replace(b2ContainsKey bool) {
t1Len := c.t1.Len()
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
k, _, ok := c.t1.RemoveOldest()
if ok {
c.b1.Add(k, nil)
}
} else {
k, _, ok := c.t2.RemoveOldest()
if ok {
c.b2.Add(k, nil)
}
}
}
// Len returns the number of cached entries
func (c *ARCCache) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.t1.Len() + c.t2.Len()
}
// Keys returns all the cached keys
func (c *ARCCache) Keys() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
k1 := c.t1.Keys()
k2 := c.t2.Keys()
return append(k1, k2...)
}
// Remove is used to purge a key from the cache
func (c *ARCCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
if c.t1.Remove(key) {
return
}
if c.t2.Remove(key) {
return
}
if c.b1.Remove(key) {
return
}
if c.b2.Remove(key) {
return
}
}
// Purge is used to clear the cache
func (c *ARCCache) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
c.t1.Purge()
c.t2.Purge()
c.b1.Purge()
c.b2.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *ARCCache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.t1.Contains(key) || c.t2.Contains(key)
}
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.t1.Peek(key); ok {
return val, ok
}
return c.t2.Peek(key)
}

377
vendor/github.com/hashicorp/golang-lru/arc_test.go generated vendored Normal file
View File

@ -0,0 +1,377 @@
package lru
import (
"math/rand"
"testing"
"time"
)
func init() {
rand.Seed(time.Now().Unix())
}
func BenchmarkARC_Rand(b *testing.B) {
l, err := NewARC(8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
trace[i] = rand.Int63() % 32768
}
b.ResetTimer()
var hit, miss int
for i := 0; i < 2*b.N; i++ {
if i%2 == 0 {
l.Add(trace[i], trace[i])
} else {
_, ok := l.Get(trace[i])
if ok {
hit++
} else {
miss++
}
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func BenchmarkARC_Freq(b *testing.B) {
l, err := NewARC(8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
if i%2 == 0 {
trace[i] = rand.Int63() % 16384
} else {
trace[i] = rand.Int63() % 32768
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
l.Add(trace[i], trace[i])
}
var hit, miss int
for i := 0; i < b.N; i++ {
_, ok := l.Get(trace[i])
if ok {
hit++
} else {
miss++
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func TestARC_RandomOps(t *testing.T) {
size := 128
l, err := NewARC(128)
if err != nil {
t.Fatalf("err: %v", err)
}
n := 200000
for i := 0; i < n; i++ {
key := rand.Int63() % 512
r := rand.Int63()
switch r % 3 {
case 0:
l.Add(key, key)
case 1:
l.Get(key)
case 2:
l.Remove(key)
}
if l.t1.Len()+l.t2.Len() > size {
t.Fatalf("bad: t1: %d t2: %d b1: %d b2: %d p: %d",
l.t1.Len(), l.t2.Len(), l.b1.Len(), l.b2.Len(), l.p)
}
if l.b1.Len()+l.b2.Len() > size {
t.Fatalf("bad: t1: %d t2: %d b1: %d b2: %d p: %d",
l.t1.Len(), l.t2.Len(), l.b1.Len(), l.b2.Len(), l.p)
}
}
}
func TestARC_Get_RecentToFrequent(t *testing.T) {
l, err := NewARC(128)
if err != nil {
t.Fatalf("err: %v", err)
}
// Touch all the entries, should be in t1
for i := 0; i < 128; i++ {
l.Add(i, i)
}
if n := l.t1.Len(); n != 128 {
t.Fatalf("bad: %d", n)
}
if n := l.t2.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
// Get should upgrade to t2
for i := 0; i < 128; i++ {
_, ok := l.Get(i)
if !ok {
t.Fatalf("missing: %d", i)
}
}
if n := l.t1.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.t2.Len(); n != 128 {
t.Fatalf("bad: %d", n)
}
// Get be from t2
for i := 0; i < 128; i++ {
_, ok := l.Get(i)
if !ok {
t.Fatalf("missing: %d", i)
}
}
if n := l.t1.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.t2.Len(); n != 128 {
t.Fatalf("bad: %d", n)
}
}
func TestARC_Add_RecentToFrequent(t *testing.T) {
l, err := NewARC(128)
if err != nil {
t.Fatalf("err: %v", err)
}
// Add initially to t1
l.Add(1, 1)
if n := l.t1.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
if n := l.t2.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
// Add should upgrade to t2
l.Add(1, 1)
if n := l.t1.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.t2.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
// Add should remain in t2
l.Add(1, 1)
if n := l.t1.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.t2.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
}
func TestARC_Adaptive(t *testing.T) {
l, err := NewARC(4)
if err != nil {
t.Fatalf("err: %v", err)
}
// Fill t1
for i := 0; i < 4; i++ {
l.Add(i, i)
}
if n := l.t1.Len(); n != 4 {
t.Fatalf("bad: %d", n)
}
// Move to t2
l.Get(0)
l.Get(1)
if n := l.t2.Len(); n != 2 {
t.Fatalf("bad: %d", n)
}
// Evict from t1
l.Add(4, 4)
if n := l.b1.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
// Current state
// t1 : (MRU) [4, 3] (LRU)
// t2 : (MRU) [1, 0] (LRU)
// b1 : (MRU) [2] (LRU)
// b2 : (MRU) [] (LRU)
// Add 2, should cause hit on b1
l.Add(2, 2)
if n := l.b1.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
if l.p != 1 {
t.Fatalf("bad: %d", l.p)
}
if n := l.t2.Len(); n != 3 {
t.Fatalf("bad: %d", n)
}
// Current state
// t1 : (MRU) [4] (LRU)
// t2 : (MRU) [2, 1, 0] (LRU)
// b1 : (MRU) [3] (LRU)
// b2 : (MRU) [] (LRU)
// Add 4, should migrate to t2
l.Add(4, 4)
if n := l.t1.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.t2.Len(); n != 4 {
t.Fatalf("bad: %d", n)
}
// Current state
// t1 : (MRU) [] (LRU)
// t2 : (MRU) [4, 2, 1, 0] (LRU)
// b1 : (MRU) [3] (LRU)
// b2 : (MRU) [] (LRU)
// Add 4, should evict to b2
l.Add(5, 5)
if n := l.t1.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
if n := l.t2.Len(); n != 3 {
t.Fatalf("bad: %d", n)
}
if n := l.b2.Len(); n != 1 {
t.Fatalf("bad: %d", n)
}
// Current state
// t1 : (MRU) [5] (LRU)
// t2 : (MRU) [4, 2, 1] (LRU)
// b1 : (MRU) [3] (LRU)
// b2 : (MRU) [0] (LRU)
// Add 0, should decrease p
l.Add(0, 0)
if n := l.t1.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if n := l.t2.Len(); n != 4 {
t.Fatalf("bad: %d", n)
}
if n := l.b1.Len(); n != 2 {
t.Fatalf("bad: %d", n)
}
if n := l.b2.Len(); n != 0 {
t.Fatalf("bad: %d", n)
}
if l.p != 0 {
t.Fatalf("bad: %d", l.p)
}
// Current state
// t1 : (MRU) [] (LRU)
// t2 : (MRU) [0, 4, 2, 1] (LRU)
// b1 : (MRU) [5, 3] (LRU)
// b2 : (MRU) [0] (LRU)
}
func TestARC(t *testing.T) {
l, err := NewARC(128)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 256; i++ {
l.Add(i, i)
}
if l.Len() != 128 {
t.Fatalf("bad len: %v", l.Len())
}
for i, k := range l.Keys() {
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
t.Fatalf("bad key: %v", k)
}
}
for i := 0; i < 128; i++ {
_, ok := l.Get(i)
if ok {
t.Fatalf("should be evicted")
}
}
for i := 128; i < 256; i++ {
_, ok := l.Get(i)
if !ok {
t.Fatalf("should not be evicted")
}
}
for i := 128; i < 192; i++ {
l.Remove(i)
_, ok := l.Get(i)
if ok {
t.Fatalf("should be deleted")
}
}
l.Purge()
if l.Len() != 0 {
t.Fatalf("bad len: %v", l.Len())
}
if _, ok := l.Get(200); ok {
t.Fatalf("should contain nothing")
}
}
// Test that Contains doesn't update recent-ness
func TestARC_Contains(t *testing.T) {
l, err := NewARC(2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if !l.Contains(1) {
t.Errorf("1 should be contained")
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Contains should not have updated recent-ness of 1")
}
}
// Test that Peek doesn't update recent-ness
func TestARC_Peek(t *testing.T) {
l, err := NewARC(2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if v, ok := l.Peek(1); !ok || v != 1 {
t.Errorf("1 should be set to 1: %v, %v", v, ok)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("should not have updated recent-ness of 1")
}
}

21
vendor/github.com/hashicorp/golang-lru/doc.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Package lru provides three different LRU caches of varying sophistication.
//
// Cache is a simple LRU cache. It is based on the
// LRU implementation in groupcache:
// https://github.com/golang/groupcache/tree/master/lru
//
// TwoQueueCache tracks frequently used and recently used entries separately.
// This avoids a burst of accesses from taking out frequently used entries,
// at the cost of about 2x computational overhead and some extra bookkeeping.
//
// ARCCache is an adaptive replacement cache. It tracks recent evictions as
// well as recent usage in both the frequent and recent caches. Its
// computational overhead is comparable to TwoQueueCache, but the memory
// overhead is linear with the size of the cache.
//
// ARC has been patented by IBM, so do not use it if that is problematic for
// your program.
//
// All caches in this package take locks while operating, and are therefore
// thread-safe for consumers.
package lru

110
vendor/github.com/hashicorp/golang-lru/lru.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/simplelru"
)
// Cache is a thread-safe fixed size LRU cache.
type Cache struct {
lru simplelru.LRUCache
lock sync.RWMutex
}
// New creates an LRU of the given size.
func New(size int) (*Cache, error) {
return NewWithEvict(size, nil)
}
// NewWithEvict constructs a fixed size cache with the given eviction
// callback.
func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
if err != nil {
return nil, err
}
c := &Cache{
lru: lru,
}
return c, nil
}
// Purge is used to completely clear the cache.
func (c *Cache) Purge() {
c.lock.Lock()
c.lru.Purge()
c.lock.Unlock()
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *Cache) Add(key, value interface{}) (evicted bool) {
c.lock.Lock()
defer c.lock.Unlock()
return c.lru.Add(key, value)
}
// Get looks up a key's value from the cache.
func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
return c.lru.Get(key)
}
// Contains checks if a key is in the cache, without updating the
// recent-ness or deleting it for being stale.
func (c *Cache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Contains(key)
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Peek(key)
}
// ContainsOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
c.lock.Lock()
defer c.lock.Unlock()
if c.lru.Contains(key) {
return true, false
}
evicted = c.lru.Add(key, value)
return false, evicted
}
// Remove removes the provided key from the cache.
func (c *Cache) Remove(key interface{}) {
c.lock.Lock()
c.lru.Remove(key)
c.lock.Unlock()
}
// RemoveOldest removes the oldest item from the cache.
func (c *Cache) RemoveOldest() {
c.lock.Lock()
c.lru.RemoveOldest()
c.lock.Unlock()
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *Cache) Keys() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Keys()
}
// Len returns the number of items in the cache.
func (c *Cache) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Len()
}

221
vendor/github.com/hashicorp/golang-lru/lru_test.go generated vendored Normal file
View File

@ -0,0 +1,221 @@
package lru
import (
"math/rand"
"testing"
)
func BenchmarkLRU_Rand(b *testing.B) {
l, err := New(8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
trace[i] = rand.Int63() % 32768
}
b.ResetTimer()
var hit, miss int
for i := 0; i < 2*b.N; i++ {
if i%2 == 0 {
l.Add(trace[i], trace[i])
} else {
_, ok := l.Get(trace[i])
if ok {
hit++
} else {
miss++
}
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func BenchmarkLRU_Freq(b *testing.B) {
l, err := New(8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
if i%2 == 0 {
trace[i] = rand.Int63() % 16384
} else {
trace[i] = rand.Int63() % 32768
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
l.Add(trace[i], trace[i])
}
var hit, miss int
for i := 0; i < b.N; i++ {
_, ok := l.Get(trace[i])
if ok {
hit++
} else {
miss++
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func TestLRU(t *testing.T) {
evictCounter := 0
onEvicted := func(k interface{}, v interface{}) {
if k != v {
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
}
evictCounter++
}
l, err := NewWithEvict(128, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 256; i++ {
l.Add(i, i)
}
if l.Len() != 128 {
t.Fatalf("bad len: %v", l.Len())
}
if evictCounter != 128 {
t.Fatalf("bad evict count: %v", evictCounter)
}
for i, k := range l.Keys() {
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
t.Fatalf("bad key: %v", k)
}
}
for i := 0; i < 128; i++ {
_, ok := l.Get(i)
if ok {
t.Fatalf("should be evicted")
}
}
for i := 128; i < 256; i++ {
_, ok := l.Get(i)
if !ok {
t.Fatalf("should not be evicted")
}
}
for i := 128; i < 192; i++ {
l.Remove(i)
_, ok := l.Get(i)
if ok {
t.Fatalf("should be deleted")
}
}
l.Get(192) // expect 192 to be last key in l.Keys()
for i, k := range l.Keys() {
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
t.Fatalf("out of order key: %v", k)
}
}
l.Purge()
if l.Len() != 0 {
t.Fatalf("bad len: %v", l.Len())
}
if _, ok := l.Get(200); ok {
t.Fatalf("should contain nothing")
}
}
// test that Add returns true/false if an eviction occurred
func TestLRUAdd(t *testing.T) {
evictCounter := 0
onEvicted := func(k interface{}, v interface{}) {
evictCounter++
}
l, err := NewWithEvict(1, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
if l.Add(1, 1) == true || evictCounter != 0 {
t.Errorf("should not have an eviction")
}
if l.Add(2, 2) == false || evictCounter != 1 {
t.Errorf("should have an eviction")
}
}
// test that Contains doesn't update recent-ness
func TestLRUContains(t *testing.T) {
l, err := New(2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if !l.Contains(1) {
t.Errorf("1 should be contained")
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Contains should not have updated recent-ness of 1")
}
}
// test that Contains doesn't update recent-ness
func TestLRUContainsOrAdd(t *testing.T) {
l, err := New(2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
contains, evict := l.ContainsOrAdd(1, 1)
if !contains {
t.Errorf("1 should be contained")
}
if evict {
t.Errorf("nothing should be evicted here")
}
l.Add(3, 3)
contains, evict = l.ContainsOrAdd(1, 1)
if contains {
t.Errorf("1 should not have been contained")
}
if !evict {
t.Errorf("an eviction should have occurred")
}
if !l.Contains(1) {
t.Errorf("now 1 should be contained")
}
}
// test that Peek doesn't update recent-ness
func TestLRUPeek(t *testing.T) {
l, err := New(2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if v, ok := l.Peek(1); !ok || v != 1 {
t.Errorf("1 should be set to 1: %v, %v", v, ok)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("should not have updated recent-ness of 1")
}
}

161
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go generated vendored Normal file
View File

@ -0,0 +1,161 @@
package simplelru
import (
"container/list"
"errors"
)
// EvictCallback is used to get a callback when a cache entry is evicted
type EvictCallback func(key interface{}, value interface{})
// LRU implements a non-thread safe fixed size LRU cache
type LRU struct {
size int
evictList *list.List
items map[interface{}]*list.Element
onEvict EvictCallback
}
// entry is used to hold a value in the evictList
type entry struct {
key interface{}
value interface{}
}
// NewLRU constructs an LRU of the given size
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
if size <= 0 {
return nil, errors.New("Must provide a positive size")
}
c := &LRU{
size: size,
evictList: list.New(),
items: make(map[interface{}]*list.Element),
onEvict: onEvict,
}
return c, nil
}
// Purge is used to completely clear the cache.
func (c *LRU) Purge() {
for k, v := range c.items {
if c.onEvict != nil {
c.onEvict(k, v.Value.(*entry).value)
}
delete(c.items, k)
}
c.evictList.Init()
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *LRU) Add(key, value interface{}) (evicted bool) {
// Check for existing item
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
ent.Value.(*entry).value = value
return false
}
// Add new item
ent := &entry{key, value}
entry := c.evictList.PushFront(ent)
c.items[key] = entry
evict := c.evictList.Len() > c.size
// Verify size not exceeded
if evict {
c.removeOldest()
}
return evict
}
// Get looks up a key's value from the cache.
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
return ent.Value.(*entry).value, true
}
return
}
// Contains checks if a key is in the cache, without updating the recent-ness
// or deleting it for being stale.
func (c *LRU) Contains(key interface{}) (ok bool) {
_, ok = c.items[key]
return ok
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
var ent *list.Element
if ent, ok = c.items[key]; ok {
return ent.Value.(*entry).value, true
}
return nil, ok
}
// Remove removes the provided key from the cache, returning if the
// key was contained.
func (c *LRU) Remove(key interface{}) (present bool) {
if ent, ok := c.items[key]; ok {
c.removeElement(ent)
return true
}
return false
}
// RemoveOldest removes the oldest item from the cache.
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
ent := c.evictList.Back()
if ent != nil {
c.removeElement(ent)
kv := ent.Value.(*entry)
return kv.key, kv.value, true
}
return nil, nil, false
}
// GetOldest returns the oldest entry
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
ent := c.evictList.Back()
if ent != nil {
kv := ent.Value.(*entry)
return kv.key, kv.value, true
}
return nil, nil, false
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *LRU) Keys() []interface{} {
keys := make([]interface{}, len(c.items))
i := 0
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
keys[i] = ent.Value.(*entry).key
i++
}
return keys
}
// Len returns the number of items in the cache.
func (c *LRU) Len() int {
return c.evictList.Len()
}
// removeOldest removes the oldest item from the cache.
func (c *LRU) removeOldest() {
ent := c.evictList.Back()
if ent != nil {
c.removeElement(ent)
}
}
// removeElement is used to remove a given list element from the cache
func (c *LRU) removeElement(e *list.Element) {
c.evictList.Remove(e)
kv := e.Value.(*entry)
delete(c.items, kv.key)
if c.onEvict != nil {
c.onEvict(kv.key, kv.value)
}
}

View File

@ -0,0 +1,37 @@
package simplelru
// LRUCache is the interface for simple LRU cache.
type LRUCache interface {
// Adds a value to the cache, returns true if an eviction occurred and
// updates the "recently used"-ness of the key.
Add(key, value interface{}) bool
// Returns key's value from the cache and
// updates the "recently used"-ness of the key. #value, isFound
Get(key interface{}) (value interface{}, ok bool)
// Check if a key exsists in cache without updating the recent-ness.
Contains(key interface{}) (ok bool)
// Returns key's value without updating the "recently used"-ness of the key.
Peek(key interface{}) (value interface{}, ok bool)
// Removes a key from the cache.
Remove(key interface{}) bool
// Removes the oldest entry from cache.
RemoveOldest() (interface{}, interface{}, bool)
// Returns the oldest entry from the cache. #key, value, isFound
GetOldest() (interface{}, interface{}, bool)
// Returns a slice of the keys in the cache, from oldest to newest.
Keys() []interface{}
// Returns the number of items in the cache.
Len() int
// Clear all cache entries
Purge()
}

View File

@ -0,0 +1,167 @@
package simplelru
import "testing"
func TestLRU(t *testing.T) {
evictCounter := 0
onEvicted := func(k interface{}, v interface{}) {
if k != v {
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
}
evictCounter++
}
l, err := NewLRU(128, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 256; i++ {
l.Add(i, i)
}
if l.Len() != 128 {
t.Fatalf("bad len: %v", l.Len())
}
if evictCounter != 128 {
t.Fatalf("bad evict count: %v", evictCounter)
}
for i, k := range l.Keys() {
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
t.Fatalf("bad key: %v", k)
}
}
for i := 0; i < 128; i++ {
_, ok := l.Get(i)
if ok {
t.Fatalf("should be evicted")
}
}
for i := 128; i < 256; i++ {
_, ok := l.Get(i)
if !ok {
t.Fatalf("should not be evicted")
}
}
for i := 128; i < 192; i++ {
ok := l.Remove(i)
if !ok {
t.Fatalf("should be contained")
}
ok = l.Remove(i)
if ok {
t.Fatalf("should not be contained")
}
_, ok = l.Get(i)
if ok {
t.Fatalf("should be deleted")
}
}
l.Get(192) // expect 192 to be last key in l.Keys()
for i, k := range l.Keys() {
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
t.Fatalf("out of order key: %v", k)
}
}
l.Purge()
if l.Len() != 0 {
t.Fatalf("bad len: %v", l.Len())
}
if _, ok := l.Get(200); ok {
t.Fatalf("should contain nothing")
}
}
func TestLRU_GetOldest_RemoveOldest(t *testing.T) {
l, err := NewLRU(128, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 256; i++ {
l.Add(i, i)
}
k, _, ok := l.GetOldest()
if !ok {
t.Fatalf("missing")
}
if k.(int) != 128 {
t.Fatalf("bad: %v", k)
}
k, _, ok = l.RemoveOldest()
if !ok {
t.Fatalf("missing")
}
if k.(int) != 128 {
t.Fatalf("bad: %v", k)
}
k, _, ok = l.RemoveOldest()
if !ok {
t.Fatalf("missing")
}
if k.(int) != 129 {
t.Fatalf("bad: %v", k)
}
}
// Test that Add returns true/false if an eviction occurred
func TestLRU_Add(t *testing.T) {
evictCounter := 0
onEvicted := func(k interface{}, v interface{}) {
evictCounter++
}
l, err := NewLRU(1, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
if l.Add(1, 1) == true || evictCounter != 0 {
t.Errorf("should not have an eviction")
}
if l.Add(2, 2) == false || evictCounter != 1 {
t.Errorf("should have an eviction")
}
}
// Test that Contains doesn't update recent-ness
func TestLRU_Contains(t *testing.T) {
l, err := NewLRU(2, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if !l.Contains(1) {
t.Errorf("1 should be contained")
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Contains should not have updated recent-ness of 1")
}
}
// Test that Peek doesn't update recent-ness
func TestLRU_Peek(t *testing.T) {
l, err := NewLRU(2, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if v, ok := l.Peek(1); !ok || v != 1 {
t.Errorf("1 should be set to 1: %v, %v", v, ok)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("should not have updated recent-ness of 1")
}
}

View File

@ -1,9 +1,9 @@
y.output y.output
# ignore intellij files # ignore intellij files
.idea .idea
*.iml *.iml
*.ipr *.ipr
*.iws *.iws
*.test *.test

View File

@ -1,18 +1,18 @@
TEST?=./... TEST?=./...
default: test default: test
fmt: generate fmt: generate
go fmt ./... go fmt ./...
test: generate test: generate
go get -t ./... go get -t ./...
go test $(TEST) $(TESTARGS) go test $(TEST) $(TESTARGS)
generate: generate:
go generate ./... go generate ./...
updatedeps: updatedeps:
go get -u golang.org/x/tools/cmd/stringer go get -u golang.org/x/tools/cmd/stringer
.PHONY: default generate test updatedeps .PHONY: default generate test updatedeps

View File

@ -1,5 +1,5 @@
resource = [{ resource = [{
foo = [{ foo = [{
bar = {} bar = {}
}] }]
}] }]

View File

@ -1,15 +1,15 @@
// Foo // Foo
/* Bar */ /* Bar */
/* /*
/* /*
Baz Baz
*/ */
# Another # Another
# Multiple # Multiple
# Lines # Lines
foo = "bar" foo = "bar"

View File

@ -1,42 +1,42 @@
variable "foo" { variable "foo" {
default = "bar" default = "bar"
description = "bar" description = "bar"
} }
variable "groups" { } variable "groups" { }
provider "aws" { provider "aws" {
access_key = "foo" access_key = "foo"
secret_key = "bar" secret_key = "bar"
} }
provider "do" { provider "do" {
api_key = "${var.foo}" api_key = "${var.foo}"
} }
resource "aws_security_group" "firewall" { resource "aws_security_group" "firewall" {
count = 5 count = 5
} }
resource aws_instance "web" { resource aws_instance "web" {
ami = "${var.foo}" ami = "${var.foo}"
security_groups = [ security_groups = [
"foo", "foo",
"${aws_security_group.firewall.foo}", "${aws_security_group.firewall.foo}",
"${element(split(\",\", var.groups)}", "${element(split(\",\", var.groups)}",
] ]
network_interface = { network_interface = {
device_index = 0 device_index = 0
description = "Main network interface" description = "Main network interface"
} }
} }
resource "aws_instance" "db" { resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}" security_groups = "${aws_security_group.firewall.*.id}"
VPC = "foo" VPC = "foo"
depends_on = ["aws_instance.web"] depends_on = ["aws_instance.web"]
} }
output "web_ip" { output "web_ip" {
value = "${aws_instance.web.private_ip}" value = "${aws_instance.web.private_ip}"
} }

View File

@ -1 +1 @@
foo = [1, 2, "foo"] foo = [1, 2, "foo"]

View File

@ -1,2 +1,2 @@
foo = "bar" foo = "bar"
key = 7 key = 7

View File

@ -1,5 +1,5 @@
foo { foo {
value = 7 value = 7
"value" = 8 "value" = 8
"complex::value" = 9 "complex::value" = 9
} }

View File

@ -1 +1 @@
resource "foo" "bar" {} resource "foo" "bar" {}

View File

@ -1,7 +1,7 @@
foo = "bar" foo = "bar"
bar = 7 bar = 7
baz = [1,2,3] baz = [1,2,3]
foo = -12 foo = -12
bar = 3.14159 bar = 3.14159
foo = true foo = true
bar = false bar = false

View File

@ -1,37 +1,37 @@
// A standalone comment is a comment which is not attached to any kind of node // A standalone comment is a comment which is not attached to any kind of node
// This comes from Terraform, as a test // This comes from Terraform, as a test
variable "foo" { variable "foo" {
# Standalone comment should be still here # Standalone comment should be still here
default = "bar" default = "bar"
description = "bar" # yooo description = "bar" # yooo
} }
/* This is a multi line standalone /* This is a multi line standalone
comment*/ comment*/
// fatih arslan // fatih arslan
/* This is a developer test /* This is a developer test
account and a multine comment */ account and a multine comment */
developer = [ "fatih", "arslan"] // fatih arslan developer = [ "fatih", "arslan"] // fatih arslan
# One line here # One line here
numbers = [1,2] // another line here numbers = [1,2] // another line here
# Another comment # Another comment
variable = { variable = {
description = "bar" # another yooo description = "bar" # another yooo
foo { foo {
# Nested standalone # Nested standalone
bar = "fatih" bar = "fatih"
} }
} }
// lead comment // lead comment
foo { foo {
bar = "fatih" // line comment 2 bar = "fatih" // line comment 2
} // line comment 3 } // line comment 3

View File

@ -1,15 +1,15 @@
// Foo // Foo
/* Bar */ /* Bar */
/* /*
/* /*
Baz Baz
*/ */
# Another # Another
# Multiple # Multiple
# Lines # Lines
foo = "bar" foo = "bar"

View File

@ -1 +1 @@
foo = [1, 2, "foo"] foo = [1, 2, "foo"]

View File

@ -1,2 +1,2 @@
foo = "bar" foo = "bar"
key = 7 key = 7

View File

@ -1,5 +1,5 @@
foo { foo {
value = 7 value = 7
"value" = 8 "value" = 8
"complex::value" = 9 "complex::value" = 9
} }

View File

@ -1 +1 @@
resource "foo" "bar" {} resource "foo" "bar" {}

View File

@ -1,7 +1,7 @@
foo = "bar" foo = "bar"
bar = 7 bar = 7
baz = [1,2,3] baz = [1,2,3]
foo = -12 foo = -12
bar = 3.14159 bar = 3.14159
foo = true foo = true
bar = false bar = false

View File

@ -1,4 +1,4 @@
{ {
"foo": [1, 2, "bar"], "foo": [1, 2, "bar"],
"bar": "baz" "bar": "baz"
} }

View File

@ -1,3 +1,3 @@
{ {
"foo": "bar" "foo": "bar"
} }

View File

@ -1,5 +1,5 @@
{ {
"foo": { "foo": {
"bar": [1,2] "bar": [1,2]
} }
} }

View File

@ -1,10 +1,10 @@
{ {
"foo": "bar", "foo": "bar",
"bar": 7, "bar": 7,
"baz": [1,2,3], "baz": [1,2,3],
"foo": -12, "foo": -12,
"bar": 3.14159, "bar": 3.14159,
"foo": true, "foo": true,
"bar": false, "bar": false,
"foo": null "foo": null
} }

View File

@ -1,4 +1,4 @@
{ {
"foo": [1, 2, "bar"], "foo": [1, 2, "bar"],
"bar": "baz" "bar": "baz"
} }

View File

@ -1,3 +1,3 @@
{ {
"foo": "bar" "foo": "bar"
} }

View File

@ -1,5 +1,5 @@
{ {
"foo": { "foo": {
"bar": [1,2] "bar": [1,2]
} }
} }

View File

@ -1,10 +1,10 @@
{ {
"foo": "bar", "foo": "bar",
"bar": 7, "bar": 7,
"baz": [1,2,3], "baz": [1,2,3],
"foo": -12, "foo": -12,
"bar": 3.14159, "bar": 3.14159,
"foo": true, "foo": true,
"bar": false, "bar": false,
"foo": null "foo": null
} }

View File

@ -1,5 +1,5 @@
resource = [{ resource = [{
foo = [{ foo = [{
bar = {} bar = {}
}] }]
}] }]

View File

@ -1,2 +1,2 @@
foo = "bar" foo = "bar"
bar = "${file("bing/bong.txt")}" bar = "${file("bing/bong.txt")}"

View File

@ -1,4 +1,4 @@
{ {
"foo": "bar", "foo": "bar",
"bar": "${file(\"bing/bong.txt\")}" "bar": "${file(\"bing/bong.txt\")}"
} }

View File

@ -1,2 +1,2 @@
foo = "bar" foo = "bar"
Key = 7 Key = 7

View File

@ -1,8 +1,8 @@
{ {
"foo": [{ "foo": [{
"baz": [{ "baz": [{
"key": 7, "key": 7,
"foo": "bar" "foo": "bar"
}] }]
}] }]
} }

View File

@ -1,9 +1,9 @@
// This is a test structure for the lexer // This is a test structure for the lexer
foo "baz" { foo "baz" {
key = 7 key = 7
foo = "bar" foo = "bar"
} }
foo { foo {
key = 7 key = 7
} }

View File

@ -1,10 +1,10 @@
{ {
"foo": [{ "foo": [{
"baz": { "baz": {
"key": 7, "key": 7,
"foo": "bar" "foo": "bar"
} }
}, { }, {
"key": 7 "key": 7
}] }]
} }

View File

@ -1,8 +1,8 @@
{ {
"foo": { "foo": {
"baz": { "baz": {
"key": 7, "key": 7,
"foo": "bar" "foo": "bar"
} }
} }
} }

View File

@ -1,7 +1,7 @@
foo { foo {
key = 7 key = 7
} }
foo { foo {
foo = "bar" foo = "bar"
} }

View File

@ -1,211 +1,211 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<CATALOG> <CATALOG>
<CD> <CD>
<TITLE>Empire Burlesque</TITLE> <TITLE>Empire Burlesque</TITLE>
<ARTIST>Bob Dylan</ARTIST> <ARTIST>Bob Dylan</ARTIST>
<COUNTRY>USA</COUNTRY> <COUNTRY>USA</COUNTRY>
<COMPANY>Columbia</COMPANY> <COMPANY>Columbia</COMPANY>
<PRICE>10.90</PRICE> <PRICE>10.90</PRICE>
<YEAR>1985</YEAR> <YEAR>1985</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Hide your heart</TITLE> <TITLE>Hide your heart</TITLE>
<ARTIST>Bonnie Tyler</ARTIST> <ARTIST>Bonnie Tyler</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>CBS Records</COMPANY> <COMPANY>CBS Records</COMPANY>
<PRICE>9.90</PRICE> <PRICE>9.90</PRICE>
<YEAR>1988</YEAR> <YEAR>1988</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Greatest Hits</TITLE> <TITLE>Greatest Hits</TITLE>
<ARTIST>Dolly Parton</ARTIST> <ARTIST>Dolly Parton</ARTIST>
<COUNTRY>USA</COUNTRY> <COUNTRY>USA</COUNTRY>
<COMPANY>RCA</COMPANY> <COMPANY>RCA</COMPANY>
<PRICE>9.90</PRICE> <PRICE>9.90</PRICE>
<YEAR>1982</YEAR> <YEAR>1982</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Still got the blues</TITLE> <TITLE>Still got the blues</TITLE>
<ARTIST>Gary Moore</ARTIST> <ARTIST>Gary Moore</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>Virgin records</COMPANY> <COMPANY>Virgin records</COMPANY>
<PRICE>10.20</PRICE> <PRICE>10.20</PRICE>
<YEAR>1990</YEAR> <YEAR>1990</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Eros</TITLE> <TITLE>Eros</TITLE>
<ARTIST>Eros Ramazzotti</ARTIST> <ARTIST>Eros Ramazzotti</ARTIST>
<COUNTRY>EU</COUNTRY> <COUNTRY>EU</COUNTRY>
<COMPANY>BMG</COMPANY> <COMPANY>BMG</COMPANY>
<PRICE>9.90</PRICE> <PRICE>9.90</PRICE>
<YEAR>1997</YEAR> <YEAR>1997</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>One night only</TITLE> <TITLE>One night only</TITLE>
<ARTIST>Bee Gees</ARTIST> <ARTIST>Bee Gees</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>Polydor</COMPANY> <COMPANY>Polydor</COMPANY>
<PRICE>10.90</PRICE> <PRICE>10.90</PRICE>
<YEAR>1998</YEAR> <YEAR>1998</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Sylvias Mother</TITLE> <TITLE>Sylvias Mother</TITLE>
<ARTIST>Dr.Hook</ARTIST> <ARTIST>Dr.Hook</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>CBS</COMPANY> <COMPANY>CBS</COMPANY>
<PRICE>8.10</PRICE> <PRICE>8.10</PRICE>
<YEAR>1973</YEAR> <YEAR>1973</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Maggie May</TITLE> <TITLE>Maggie May</TITLE>
<ARTIST>Rod Stewart</ARTIST> <ARTIST>Rod Stewart</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>Pickwick</COMPANY> <COMPANY>Pickwick</COMPANY>
<PRICE>8.50</PRICE> <PRICE>8.50</PRICE>
<YEAR>1990</YEAR> <YEAR>1990</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Romanza</TITLE> <TITLE>Romanza</TITLE>
<ARTIST>Andrea Bocelli</ARTIST> <ARTIST>Andrea Bocelli</ARTIST>
<COUNTRY>EU</COUNTRY> <COUNTRY>EU</COUNTRY>
<COMPANY>Polydor</COMPANY> <COMPANY>Polydor</COMPANY>
<PRICE>10.80</PRICE> <PRICE>10.80</PRICE>
<YEAR>1996</YEAR> <YEAR>1996</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>When a man loves a woman</TITLE> <TITLE>When a man loves a woman</TITLE>
<ARTIST>Percy Sledge</ARTIST> <ARTIST>Percy Sledge</ARTIST>
<COUNTRY>USA</COUNTRY> <COUNTRY>USA</COUNTRY>
<COMPANY>Atlantic</COMPANY> <COMPANY>Atlantic</COMPANY>
<PRICE>8.70</PRICE> <PRICE>8.70</PRICE>
<YEAR>1987</YEAR> <YEAR>1987</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Black angel</TITLE> <TITLE>Black angel</TITLE>
<ARTIST>Savage Rose</ARTIST> <ARTIST>Savage Rose</ARTIST>
<COUNTRY>EU</COUNTRY> <COUNTRY>EU</COUNTRY>
<COMPANY>Mega</COMPANY> <COMPANY>Mega</COMPANY>
<PRICE>10.90</PRICE> <PRICE>10.90</PRICE>
<YEAR>1995</YEAR> <YEAR>1995</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>1999 Grammy Nominees</TITLE> <TITLE>1999 Grammy Nominees</TITLE>
<ARTIST>Many</ARTIST> <ARTIST>Many</ARTIST>
<COUNTRY>USA</COUNTRY> <COUNTRY>USA</COUNTRY>
<COMPANY>Grammy</COMPANY> <COMPANY>Grammy</COMPANY>
<PRICE>10.20</PRICE> <PRICE>10.20</PRICE>
<YEAR>1999</YEAR> <YEAR>1999</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>For the good times</TITLE> <TITLE>For the good times</TITLE>
<ARTIST>Kenny Rogers</ARTIST> <ARTIST>Kenny Rogers</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>Mucik Master</COMPANY> <COMPANY>Mucik Master</COMPANY>
<PRICE>8.70</PRICE> <PRICE>8.70</PRICE>
<YEAR>1995</YEAR> <YEAR>1995</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Big Willie style</TITLE> <TITLE>Big Willie style</TITLE>
<ARTIST>Will Smith</ARTIST> <ARTIST>Will Smith</ARTIST>
<COUNTRY>USA</COUNTRY> <COUNTRY>USA</COUNTRY>
<COMPANY>Columbia</COMPANY> <COMPANY>Columbia</COMPANY>
<PRICE>9.90</PRICE> <PRICE>9.90</PRICE>
<YEAR>1997</YEAR> <YEAR>1997</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Tupelo Honey</TITLE> <TITLE>Tupelo Honey</TITLE>
<ARTIST>Van Morrison</ARTIST> <ARTIST>Van Morrison</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>Polydor</COMPANY> <COMPANY>Polydor</COMPANY>
<PRICE>8.20</PRICE> <PRICE>8.20</PRICE>
<YEAR>1971</YEAR> <YEAR>1971</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Soulsville</TITLE> <TITLE>Soulsville</TITLE>
<ARTIST>Jorn Hoel</ARTIST> <ARTIST>Jorn Hoel</ARTIST>
<COUNTRY>Norway</COUNTRY> <COUNTRY>Norway</COUNTRY>
<COMPANY>WEA</COMPANY> <COMPANY>WEA</COMPANY>
<PRICE>7.90</PRICE> <PRICE>7.90</PRICE>
<YEAR>1996</YEAR> <YEAR>1996</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>The very best of</TITLE> <TITLE>The very best of</TITLE>
<ARTIST>Cat Stevens</ARTIST> <ARTIST>Cat Stevens</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>Island</COMPANY> <COMPANY>Island</COMPANY>
<PRICE>8.90</PRICE> <PRICE>8.90</PRICE>
<YEAR>1990</YEAR> <YEAR>1990</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Stop</TITLE> <TITLE>Stop</TITLE>
<ARTIST>Sam Brown</ARTIST> <ARTIST>Sam Brown</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>A and M</COMPANY> <COMPANY>A and M</COMPANY>
<PRICE>8.90</PRICE> <PRICE>8.90</PRICE>
<YEAR>1988</YEAR> <YEAR>1988</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Bridge of Spies</TITLE> <TITLE>Bridge of Spies</TITLE>
<ARTIST>T'Pau</ARTIST> <ARTIST>T'Pau</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>Siren</COMPANY> <COMPANY>Siren</COMPANY>
<PRICE>7.90</PRICE> <PRICE>7.90</PRICE>
<YEAR>1987</YEAR> <YEAR>1987</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Private Dancer</TITLE> <TITLE>Private Dancer</TITLE>
<ARTIST>Tina Turner</ARTIST> <ARTIST>Tina Turner</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>Capitol</COMPANY> <COMPANY>Capitol</COMPANY>
<PRICE>8.90</PRICE> <PRICE>8.90</PRICE>
<YEAR>1983</YEAR> <YEAR>1983</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Midt om natten</TITLE> <TITLE>Midt om natten</TITLE>
<ARTIST>Kim Larsen</ARTIST> <ARTIST>Kim Larsen</ARTIST>
<COUNTRY>EU</COUNTRY> <COUNTRY>EU</COUNTRY>
<COMPANY>Medley</COMPANY> <COMPANY>Medley</COMPANY>
<PRICE>7.80</PRICE> <PRICE>7.80</PRICE>
<YEAR>1983</YEAR> <YEAR>1983</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Pavarotti Gala Concert</TITLE> <TITLE>Pavarotti Gala Concert</TITLE>
<ARTIST>Luciano Pavarotti</ARTIST> <ARTIST>Luciano Pavarotti</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>DECCA</COMPANY> <COMPANY>DECCA</COMPANY>
<PRICE>9.90</PRICE> <PRICE>9.90</PRICE>
<YEAR>1991</YEAR> <YEAR>1991</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>The dock of the bay</TITLE> <TITLE>The dock of the bay</TITLE>
<ARTIST>Otis Redding</ARTIST> <ARTIST>Otis Redding</ARTIST>
<COUNTRY>USA</COUNTRY> <COUNTRY>USA</COUNTRY>
<COMPANY>Stax Records</COMPANY> <COMPANY>Stax Records</COMPANY>
<PRICE>7.90</PRICE> <PRICE>7.90</PRICE>
<YEAR>1968</YEAR> <YEAR>1968</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Picture book</TITLE> <TITLE>Picture book</TITLE>
<ARTIST>Simply Red</ARTIST> <ARTIST>Simply Red</ARTIST>
<COUNTRY>EU</COUNTRY> <COUNTRY>EU</COUNTRY>
<COMPANY>Elektra</COMPANY> <COMPANY>Elektra</COMPANY>
<PRICE>7.20</PRICE> <PRICE>7.20</PRICE>
<YEAR>1985</YEAR> <YEAR>1985</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Red</TITLE> <TITLE>Red</TITLE>
<ARTIST>The Communards</ARTIST> <ARTIST>The Communards</ARTIST>
<COUNTRY>UK</COUNTRY> <COUNTRY>UK</COUNTRY>
<COMPANY>London</COMPANY> <COMPANY>London</COMPANY>
<PRICE>7.80</PRICE> <PRICE>7.80</PRICE>
<YEAR>1987</YEAR> <YEAR>1987</YEAR>
</CD> </CD>
<CD> <CD>
<TITLE>Unchain my heart</TITLE> <TITLE>Unchain my heart</TITLE>
<ARTIST>Joe Cocker</ARTIST> <ARTIST>Joe Cocker</ARTIST>
<COUNTRY>USA</COUNTRY> <COUNTRY>USA</COUNTRY>
<COMPANY>EMI</COMPANY> <COMPANY>EMI</COMPANY>
<PRICE>8.20</PRICE> <PRICE>8.20</PRICE>
<YEAR>1987</YEAR> <YEAR>1987</YEAR>
</CD> </CD>
</CATALOG> </CATALOG>

View File

@ -1,29 +1,29 @@
# This is a TOML document. Boom. # This is a TOML document. Boom.
title = "TOML Example" title = "TOML Example"
[owner] [owner]
name = "Tom Preston-Werner" name = "Tom Preston-Werner"
organization = "GitHub" organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not? dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database] [database]
server = "192.168.1.1" server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ] ports = [ 8001, 8001, 8002 ]
connection_max = 5000 connection_max = 5000
enabled = true enabled = true
[servers] [servers]
# You can indent as you please. Tabs or spaces. TOML don't care. # You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha] [servers.alpha]
ip = "10.0.0.1" ip = "10.0.0.1"
dc = "eqdc10" dc = "eqdc10"
[servers.beta] [servers.beta]
ip = "10.0.0.2" ip = "10.0.0.2"
dc = "eqdc10" dc = "eqdc10"
[clients] [clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it

View File

@ -1,21 +1,21 @@
sudo: false sudo: false
language: go language: go
go: go:
- 1.7.5 - 1.7.5
- 1.8 - 1.8
- tip - tip
os: os:
- linux - linux
- osx - osx
matrix: matrix:
allow_failures: allow_failures:
- go: tip - go: tip
fast_finish: true fast_finish: true
script: script:
- go build - go build
- go test -race -v ./... - go test -race -v ./...

10
vendor/golang.org/x/crypto/.gitattributes generated vendored Normal file
View File

@ -0,0 +1,10 @@
# Treat all files in this repo as binary, with no git magic updating
# line endings. Windows users contributing to Go will need to use a
# modern version of git and editors capable of LF line endings.
#
# We'll prevent accidental CRLF line endings from entering the repo
# via the git-review gofmt checks.
#
# See golang.org/issue/9281
* -text

2
vendor/golang.org/x/crypto/.gitignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
# Add no patterns to .hgignore except for files generated by the build.
last-change

Some files were not shown because too many files have changed in this diff Show More