commit
7533e6d476
25
Gopkg.lock
generated
25
Gopkg.lock
generated
@ -22,8 +22,16 @@
|
||||
"common/hexutil",
|
||||
"common/math",
|
||||
"common/mclock",
|
||||
"consensus",
|
||||
"consensus/misc",
|
||||
"core",
|
||||
"core/state",
|
||||
"core/types",
|
||||
"core/vm",
|
||||
"crypto",
|
||||
"crypto/bn256",
|
||||
"crypto/bn256/cloudflare",
|
||||
"crypto/bn256/google",
|
||||
"crypto/ecies",
|
||||
"crypto/secp256k1",
|
||||
"crypto/sha3",
|
||||
@ -69,6 +77,15 @@
|
||||
packages = ["."]
|
||||
revision = "553a641470496b2327abcac10b36396bd98e45c9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [
|
||||
".",
|
||||
"simplelru"
|
||||
]
|
||||
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/hcl"
|
||||
@ -262,6 +279,12 @@
|
||||
]
|
||||
revision = "adf24ef3f94bd13ec4163060b21a5678f22b429b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ripemd160"]
|
||||
revision = "613d6eafa307c6881a737a3c35c0e312e8d3a8c5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
@ -340,6 +363,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "4dcddfb7fa2db8ba7e3d8e70ec8ba23a7e9b6a1d99c62933ed63624412967aeb"
|
||||
inputs-digest = "8e609f758d10041b1746db11078eca9cad29a7dbf9939d14e49d70ee172dfd2e"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
22
README.md
22
README.md
@ -25,7 +25,8 @@
|
||||
|
||||
## Configuration
|
||||
- To use a local Ethereum node, copy `environments/public.toml.example` to
|
||||
`environments/public.toml` and update the `ipcPath` to the local node's IPC filepath:
|
||||
`environments/public.toml` and update the `ipcPath` and `levelDbPath`.
|
||||
- `ipcPath` should match the local node's IPC filepath:
|
||||
- when using geth:
|
||||
- The IPC file is called `geth.ipc`.
|
||||
- The geth IPC file path is printed to the console when you start geth.
|
||||
@ -39,13 +40,30 @@
|
||||
- Mac: `$HOME/Library/Application\ Support/io.parity.ethereum/`
|
||||
- Linux: `$HOME/.local/share/io.parity.ethereum/`
|
||||
|
||||
- `levelDbPath` should match Geth's chaindata directory path.
|
||||
- The geth LevelDB chaindata path is printed to the console when you start geth.
|
||||
- The default location is:
|
||||
- Mac: `$HOME/Library/Ethereum/geth/chaindata`
|
||||
- Linux: `$HOME/.ethereum/geth/chaindata`
|
||||
- `levelDbPath` is irrelevant (and `coldImport` is currently unavailable) if only running parity.
|
||||
|
||||
- See `environments/infura.toml` to configure commands to run against infura, if a local node is unavailable
|
||||
|
||||
## Start syncing with postgres
|
||||
Syncs VulcanizeDB with the configured Ethereum node.
|
||||
1. Start node (**if fast syncing wait for initial sync to finish**)
|
||||
1. In a separate terminal start vulcanize_db
|
||||
- `vulcanizedb sync --config <config.toml> --starting-block-number <block-number>`
|
||||
- `./vulcanizedb sync --config <config.toml> --starting-block-number <block-number>`
|
||||
|
||||
## Alternatively, sync from Geth's underlying LevelDB
|
||||
Sync VulcanizeDB from the LevelDB underlying a Geth node.
|
||||
1. Assure node is not running, and that it has synced to the desired block height.
|
||||
1. Start vulcanize_db
|
||||
- `./vulcanizedb coldImport --config <config.toml> --starting-block-number <block-number> --ending-block-number <block-number>`
|
||||
1. Optional flags:
|
||||
- `--starting-block-number`/`-s`: block number to start syncing from
|
||||
- `--ending-block-number`/`-e`: block number to sync to
|
||||
- `--all`/`-a`: sync all missing blocks
|
||||
|
||||
## Running the Tests
|
||||
|
||||
|
94
cmd/coldImport.go
Normal file
94
cmd/coldImport.go
Normal file
@ -0,0 +1,94 @@
|
||||
// Copyright © 2018 Rob Mulholand <rmulholand@8thlight.com>
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/crypto"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/ethereum"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/geth/cold_import"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/geth/converters/cold_db"
|
||||
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
|
||||
"github.com/vulcanize/vulcanizedb/utils"
|
||||
)
|
||||
|
||||
var coldImportCmd = &cobra.Command{
|
||||
Use: "coldImport",
|
||||
Short: "Sync vulcanize from a cold instance of LevelDB.",
|
||||
Long: `Populate core vulcanize db data directly out of LevelDB, rather than over rpc calls. For example:
|
||||
|
||||
./vulcanizedb coldImport -s 0 -e 5000000
|
||||
|
||||
Geth must be synced over all of the desired blocks and must not be running in order to execute this command.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
coldImport()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(coldImportCmd)
|
||||
coldImportCmd.Flags().Int64VarP(&startingBlockNumber, "starting-block-number", "s", 0, "Number for first block to cold import.")
|
||||
coldImportCmd.Flags().Int64VarP(&endingBlockNumber, "ending-block-number", "e", 5500000, "Number for last block to cold import.")
|
||||
coldImportCmd.Flags().BoolVarP(&syncAll, "all", "a", false, "Option to sync all missing blocks.")
|
||||
}
|
||||
|
||||
func coldImport() {
|
||||
// init eth db
|
||||
ethDBConfig := ethereum.CreateDatabaseConfig(ethereum.Level, levelDbPath)
|
||||
ethDB, err := ethereum.CreateDatabase(ethDBConfig)
|
||||
if err != nil {
|
||||
log.Fatal("Error connecting to ethereum db: ", err)
|
||||
}
|
||||
mostRecentBlockNumberInDb := ethDB.GetHeadBlockNumber()
|
||||
if syncAll {
|
||||
startingBlockNumber = 0
|
||||
endingBlockNumber = mostRecentBlockNumberInDb
|
||||
}
|
||||
if endingBlockNumber < startingBlockNumber {
|
||||
log.Fatal("Ending block number must be greater than starting block number for cold import.")
|
||||
}
|
||||
if endingBlockNumber > mostRecentBlockNumberInDb {
|
||||
log.Fatal("Ending block number is greater than most recent block in db: ", mostRecentBlockNumberInDb)
|
||||
}
|
||||
|
||||
// init pg db
|
||||
genesisBlock := ethDB.GetBlockHash(0)
|
||||
reader := fs.FsReader{}
|
||||
parser := crypto.EthPublicKeyParser{}
|
||||
nodeBuilder := cold_import.NewColdImportNodeBuilder(reader, parser)
|
||||
coldNode, err := nodeBuilder.GetNode(genesisBlock, levelDbPath)
|
||||
if err != nil {
|
||||
log.Fatal("Error getting node: ", err)
|
||||
}
|
||||
pgDB := utils.LoadPostgres(databaseConfig, coldNode)
|
||||
|
||||
// init cold importer deps
|
||||
blockRepository := repositories.NewBlockRepository(&pgDB)
|
||||
receiptRepository := repositories.ReceiptRepository{DB: &pgDB}
|
||||
transactionconverter := cold_db.NewColdDbTransactionConverter()
|
||||
blockConverter := vulcCommon.NewBlockConverter(transactionconverter)
|
||||
|
||||
// init and execute cold importer
|
||||
coldImporter := cold_import.NewColdImporter(ethDB, blockRepository, receiptRepository, blockConverter)
|
||||
err = coldImporter.Execute(startingBlockNumber, endingBlockNumber, coldNode.ID)
|
||||
if err != nil {
|
||||
log.Fatal("Error executing cold import: ", err)
|
||||
}
|
||||
}
|
@ -14,6 +14,10 @@ var (
|
||||
cfgFile string
|
||||
databaseConfig config.Database
|
||||
ipc string
|
||||
levelDbPath string
|
||||
startingBlockNumber int64
|
||||
syncAll bool
|
||||
endingBlockNumber int64
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
@ -30,6 +34,7 @@ func Execute() {
|
||||
|
||||
func database(cmd *cobra.Command, args []string) {
|
||||
ipc = viper.GetString("client.ipcpath")
|
||||
levelDbPath = viper.GetString("client.leveldbpath")
|
||||
databaseConfig = config.Database{
|
||||
Name: viper.GetString("database.name"),
|
||||
Hostname: viper.GetString("database.hostname"),
|
||||
@ -46,12 +51,13 @@ func init() {
|
||||
rootCmd.PersistentFlags().Int("database-port", 5432, "database port")
|
||||
rootCmd.PersistentFlags().String("database-hostname", "localhost", "database hostname")
|
||||
rootCmd.PersistentFlags().String("client-ipcPath", "", "location of geth.ipc file")
|
||||
rootCmd.PersistentFlags().String("client-levelDbPath", "", "location of levelDb chaindata")
|
||||
|
||||
viper.BindPFlag("database.name", rootCmd.PersistentFlags().Lookup("database-name"))
|
||||
viper.BindPFlag("database.port", rootCmd.PersistentFlags().Lookup("database-port"))
|
||||
viper.BindPFlag("database.hostname", rootCmd.PersistentFlags().Lookup("database-hostname"))
|
||||
viper.BindPFlag("client.ipcPath", rootCmd.PersistentFlags().Lookup("client-ipcPath"))
|
||||
|
||||
viper.BindPFlag("client.levelDbPath", rootCmd.PersistentFlags().Lookup("client-levelDbPath"))
|
||||
}
|
||||
|
||||
func initConfig() {
|
||||
|
13
cmd/sync.go
13
cmd/sync.go
@ -42,12 +42,10 @@ const (
|
||||
pollingInterval = 7 * time.Second
|
||||
)
|
||||
|
||||
var startingBlockNumber int
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(syncCmd)
|
||||
|
||||
syncCmd.Flags().IntVarP(&startingBlockNumber, "starting-block-number", "s", 0, "Block number to start syncing from")
|
||||
syncCmd.Flags().Int64VarP(&startingBlockNumber, "starting-block-number", "s", 0, "Block number to start syncing from")
|
||||
}
|
||||
|
||||
func backFillAllBlocks(blockchain core.Blockchain, blockRepository datastore.BlockRepository, missingBlocksPopulated chan int, startingBlockNumber int64) {
|
||||
@ -65,16 +63,15 @@ func sync() {
|
||||
if lastBlock == 0 {
|
||||
log.Fatal("geth initial: state sync not finished")
|
||||
}
|
||||
_startingBlockNumber := int64(startingBlockNumber)
|
||||
if _startingBlockNumber > lastBlock {
|
||||
if startingBlockNumber > lastBlock {
|
||||
log.Fatal("starting block number > current block number")
|
||||
}
|
||||
|
||||
db := utils.LoadPostgres(databaseConfig, blockchain.Node())
|
||||
blockRepository := repositories.BlockRepository{DB: &db}
|
||||
blockRepository := repositories.NewBlockRepository(&db)
|
||||
validator := history.NewBlockValidator(blockchain, blockRepository, 15)
|
||||
missingBlocksPopulated := make(chan int)
|
||||
go backFillAllBlocks(blockchain, blockRepository, missingBlocksPopulated, _startingBlockNumber)
|
||||
go backFillAllBlocks(blockchain, blockRepository, missingBlocksPopulated, startingBlockNumber)
|
||||
|
||||
for {
|
||||
select {
|
||||
@ -82,7 +79,7 @@ func sync() {
|
||||
window := validator.ValidateBlocks()
|
||||
validator.Log(os.Stdout, window)
|
||||
case <-missingBlocksPopulated:
|
||||
go backFillAllBlocks(blockchain, blockRepository, missingBlocksPopulated, _startingBlockNumber)
|
||||
go backFillAllBlocks(blockchain, blockRepository, missingBlocksPopulated, startingBlockNumber)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,23 @@
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE receipts
|
||||
ADD COLUMN transaction_id INT;
|
||||
|
||||
UPDATE receipts
|
||||
SET transaction_id = (
|
||||
SELECT id FROM transactions WHERE transactions.hash = receipts.tx_hash
|
||||
);
|
||||
|
||||
ALTER TABLE receipts
|
||||
ALTER COLUMN transaction_id SET NOT NULL;
|
||||
|
||||
ALTER TABLE receipts
|
||||
ADD CONSTRAINT transaction_fk
|
||||
FOREIGN KEY (transaction_id)
|
||||
REFERENCES transactions (id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE receipts
|
||||
DROP COLUMN block_id;
|
||||
|
||||
COMMIT;
|
@ -0,0 +1,23 @@
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE receipts
|
||||
ADD COLUMN block_id INT;
|
||||
|
||||
UPDATE receipts
|
||||
SET block_id = (
|
||||
SELECT block_id FROM transactions WHERE transactions.id = receipts.transaction_id
|
||||
);
|
||||
|
||||
ALTER TABLE receipts
|
||||
ALTER COLUMN block_id SET NOT NULL;
|
||||
|
||||
ALTER TABLE receipts
|
||||
ADD CONSTRAINT blocks_fk
|
||||
FOREIGN KEY (block_id)
|
||||
REFERENCES blocks (id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE receipts
|
||||
DROP COLUMN transaction_id;
|
||||
|
||||
COMMIT;
|
@ -0,0 +1,2 @@
|
||||
ALTER TABLE blocks
|
||||
DROP COLUMN eth_node_fingerprint;
|
@ -0,0 +1,14 @@
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE blocks
|
||||
ADD COLUMN eth_node_fingerprint VARCHAR(128);
|
||||
|
||||
UPDATE blocks
|
||||
SET eth_node_fingerprint = (
|
||||
SELECT eth_node_id FROM eth_nodes WHERE eth_nodes.id = blocks.eth_node_id
|
||||
);
|
||||
|
||||
ALTER TABLE blocks
|
||||
ALTER COLUMN eth_node_fingerprint SET NOT NULL;
|
||||
|
||||
COMMIT;
|
@ -83,7 +83,8 @@ CREATE TABLE public.blocks (
|
||||
miner character varying(42),
|
||||
extra_data character varying,
|
||||
reward double precision,
|
||||
uncles_reward double precision
|
||||
uncles_reward double precision,
|
||||
eth_node_fingerprint character varying(128) NOT NULL
|
||||
);
|
||||
|
||||
|
||||
@ -206,13 +207,13 @@ ALTER SEQUENCE public.nodes_id_seq OWNED BY public.eth_nodes.id;
|
||||
|
||||
CREATE TABLE public.receipts (
|
||||
id integer NOT NULL,
|
||||
transaction_id integer NOT NULL,
|
||||
contract_address character varying(42),
|
||||
cumulative_gas_used numeric,
|
||||
gas_used numeric,
|
||||
state_root character varying(66),
|
||||
status integer,
|
||||
tx_hash character varying(66)
|
||||
tx_hash character varying(66),
|
||||
block_id integer NOT NULL
|
||||
);
|
||||
|
||||
|
||||
@ -496,13 +497,6 @@ CREATE INDEX block_number_index ON public.blocks USING btree (number);
|
||||
CREATE INDEX node_id_index ON public.blocks USING btree (eth_node_id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: transaction_id_index; Type: INDEX; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE INDEX transaction_id_index ON public.receipts USING btree (transaction_id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: tx_from_index; Type: INDEX; Schema: public; Owner: -
|
||||
--
|
||||
@ -525,6 +519,14 @@ ALTER TABLE ONLY public.transactions
|
||||
ADD CONSTRAINT blocks_fk FOREIGN KEY (block_id) REFERENCES public.blocks(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
-- Name: receipts blocks_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.receipts
|
||||
ADD CONSTRAINT blocks_fk FOREIGN KEY (block_id) REFERENCES public.blocks(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
-- Name: blocks node_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
@ -541,14 +543,6 @@ ALTER TABLE ONLY public.logs
|
||||
ADD CONSTRAINT receipts_fk FOREIGN KEY (receipt_id) REFERENCES public.receipts(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
-- Name: receipts transaction_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.receipts
|
||||
ADD CONSTRAINT transaction_fk FOREIGN KEY (transaction_id) REFERENCES public.transactions(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
-- PostgreSQL database dump complete
|
||||
--
|
||||
|
@ -5,3 +5,4 @@ port = 5432
|
||||
|
||||
[client]
|
||||
ipcPath = <local node's IPC filepath>
|
||||
levelDbPath = <local node's LevelDB chaindata filepath>
|
||||
|
13
pkg/crypto/crypto_suite_test.go
Normal file
13
pkg/crypto/crypto_suite_test.go
Normal file
@ -0,0 +1,13 @@
|
||||
package crypto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestCrypto(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Crypto Suite")
|
||||
}
|
21
pkg/crypto/parser.go
Normal file
21
pkg/crypto/parser.go
Normal file
@ -0,0 +1,21 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
)
|
||||
|
||||
type PublicKeyParser interface {
|
||||
ParsePublicKey(privateKey string) (string, error)
|
||||
}
|
||||
|
||||
type EthPublicKeyParser struct{}
|
||||
|
||||
func (EthPublicKeyParser) ParsePublicKey(privateKey string) (string, error) {
|
||||
np, err := crypto.HexToECDSA(privateKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
pubKey := discover.PubkeyID(&np.PublicKey)
|
||||
return pubKey.String(), nil
|
||||
}
|
19
pkg/crypto/parser_test.go
Normal file
19
pkg/crypto/parser_test.go
Normal file
@ -0,0 +1,19 @@
|
||||
package crypto_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/crypto"
|
||||
)
|
||||
|
||||
var _ = Describe("Public key parser", func() {
|
||||
It("parses public key from private key", func() {
|
||||
privKey := "0000000000000000000000000000000000000000000000000000000000000001"
|
||||
parser := crypto.EthPublicKeyParser{}
|
||||
|
||||
pubKey, err := parser.ParsePublicKey(privKey)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pubKey).To(Equal("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8"))
|
||||
})
|
||||
})
|
19
pkg/datastore/ethereum/config.go
Normal file
19
pkg/datastore/ethereum/config.go
Normal file
@ -0,0 +1,19 @@
|
||||
package ethereum
|
||||
|
||||
type DatabaseType int
|
||||
|
||||
const (
|
||||
Level DatabaseType = iota
|
||||
)
|
||||
|
||||
type DatabaseConfig struct {
|
||||
Type DatabaseType
|
||||
Path string
|
||||
}
|
||||
|
||||
func CreateDatabaseConfig(dbType DatabaseType, path string) DatabaseConfig {
|
||||
return DatabaseConfig{
|
||||
Type: dbType,
|
||||
Path: path,
|
||||
}
|
||||
}
|
32
pkg/datastore/ethereum/database.go
Normal file
32
pkg/datastore/ethereum/database.go
Normal file
@ -0,0 +1,32 @@
|
||||
package ethereum
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/ethereum/level"
|
||||
)
|
||||
|
||||
type Database interface {
|
||||
GetBlock(hash []byte, blockNumber int64) *types.Block
|
||||
GetBlockHash(blockNumber int64) []byte
|
||||
GetBlockReceipts(blockHash []byte, blockNumber int64) types.Receipts
|
||||
GetHeadBlockNumber() int64
|
||||
}
|
||||
|
||||
func CreateDatabase(config DatabaseConfig) (Database, error) {
|
||||
switch config.Type {
|
||||
case Level:
|
||||
levelDBConnection, err := ethdb.NewLDBDatabase(config.Path, 128, 1024)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
levelDBReader := level.NewLevelDatabaseReader(levelDBConnection)
|
||||
levelDB := level.NewLevelDatabase(levelDBReader)
|
||||
return levelDB, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown ethereum database: %s", config.Path)
|
||||
}
|
||||
}
|
40
pkg/datastore/ethereum/level/database.go
Normal file
40
pkg/datastore/ethereum/level/database.go
Normal file
@ -0,0 +1,40 @@
|
||||
package level
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
type LevelDatabase struct {
|
||||
reader Reader
|
||||
}
|
||||
|
||||
func NewLevelDatabase(ldbReader Reader) *LevelDatabase {
|
||||
return &LevelDatabase{
|
||||
reader: ldbReader,
|
||||
}
|
||||
}
|
||||
|
||||
func (l LevelDatabase) GetBlock(blockHash []byte, blockNumber int64) *types.Block {
|
||||
n := uint64(blockNumber)
|
||||
h := common.BytesToHash(blockHash)
|
||||
return l.reader.GetBlock(h, n)
|
||||
}
|
||||
|
||||
func (l LevelDatabase) GetBlockHash(blockNumber int64) []byte {
|
||||
n := uint64(blockNumber)
|
||||
h := l.reader.GetCanonicalHash(n)
|
||||
return h.Bytes()
|
||||
}
|
||||
|
||||
func (l LevelDatabase) GetBlockReceipts(blockHash []byte, blockNumber int64) types.Receipts {
|
||||
n := uint64(blockNumber)
|
||||
h := common.BytesToHash(blockHash)
|
||||
return l.reader.GetBlockReceipts(h, n)
|
||||
}
|
||||
|
||||
func (l LevelDatabase) GetHeadBlockNumber() int64 {
|
||||
h := l.reader.GetHeadBlockHash()
|
||||
n := l.reader.GetBlockNumber(h)
|
||||
return int64(n)
|
||||
}
|
43
pkg/datastore/ethereum/level/database_reader.go
Normal file
43
pkg/datastore/ethereum/level/database_reader.go
Normal file
@ -0,0 +1,43 @@
|
||||
package level
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
type Reader interface {
|
||||
GetBlock(hash common.Hash, number uint64) *types.Block
|
||||
GetBlockNumber(hash common.Hash) uint64
|
||||
GetBlockReceipts(hash common.Hash, number uint64) types.Receipts
|
||||
GetCanonicalHash(number uint64) common.Hash
|
||||
GetHeadBlockHash() common.Hash
|
||||
}
|
||||
|
||||
type LevelDatabaseReader struct {
|
||||
reader core.DatabaseReader
|
||||
}
|
||||
|
||||
func NewLevelDatabaseReader(reader core.DatabaseReader) *LevelDatabaseReader {
|
||||
return &LevelDatabaseReader{reader: reader}
|
||||
}
|
||||
|
||||
func (ldbr *LevelDatabaseReader) GetBlock(hash common.Hash, number uint64) *types.Block {
|
||||
return core.GetBlock(ldbr.reader, hash, number)
|
||||
}
|
||||
|
||||
func (ldbr *LevelDatabaseReader) GetBlockNumber(hash common.Hash) uint64 {
|
||||
return core.GetBlockNumber(ldbr.reader, hash)
|
||||
}
|
||||
|
||||
func (ldbr *LevelDatabaseReader) GetBlockReceipts(hash common.Hash, number uint64) types.Receipts {
|
||||
return core.GetBlockReceipts(ldbr.reader, hash, number)
|
||||
}
|
||||
|
||||
func (ldbr *LevelDatabaseReader) GetCanonicalHash(number uint64) common.Hash {
|
||||
return core.GetCanonicalHash(ldbr.reader, number)
|
||||
}
|
||||
|
||||
func (ldbr *LevelDatabaseReader) GetHeadBlockHash() common.Hash {
|
||||
return core.GetHeadBlockHash(ldbr.reader)
|
||||
}
|
71
pkg/datastore/ethereum/level/database_test.go
Normal file
71
pkg/datastore/ethereum/level/database_test.go
Normal file
@ -0,0 +1,71 @@
|
||||
package level_test
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/ethereum/level"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
)
|
||||
|
||||
var _ = Describe("Level database", func() {
|
||||
Describe("Getting a block", func() {
|
||||
It("converts block number to uint64 and hash to common.Hash to fetch block from reader", func() {
|
||||
mockReader := fakes.NewMockLevelDatabaseReader()
|
||||
ldb := level.NewLevelDatabase(mockReader)
|
||||
blockHash := []byte{5, 4, 3, 2, 1}
|
||||
blockNumber := int64(12345)
|
||||
|
||||
ldb.GetBlock(blockHash, blockNumber)
|
||||
|
||||
expectedBlockHash := common.BytesToHash(blockHash)
|
||||
expectedBlockNumber := uint64(blockNumber)
|
||||
mockReader.AssertGetBlockCalledWith(expectedBlockHash, expectedBlockNumber)
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Getting a block hash", func() {
|
||||
It("converts block number to uint64 to fetch hash from reader", func() {
|
||||
mockReader := fakes.NewMockLevelDatabaseReader()
|
||||
ldb := level.NewLevelDatabase(mockReader)
|
||||
blockNumber := int64(12345)
|
||||
|
||||
ldb.GetBlockHash(blockNumber)
|
||||
|
||||
expectedBlockNumber := uint64(blockNumber)
|
||||
mockReader.AssertGetCanonicalHashCalledWith(expectedBlockNumber)
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Getting a block's receipts", func() {
|
||||
It("converts block number to uint64 and hash to common.Hash to fetch receipts from reader", func() {
|
||||
mockReader := fakes.NewMockLevelDatabaseReader()
|
||||
ldb := level.NewLevelDatabase(mockReader)
|
||||
blockHash := []byte{5, 4, 3, 2, 1}
|
||||
blockNumber := int64(12345)
|
||||
|
||||
ldb.GetBlockReceipts(blockHash, blockNumber)
|
||||
|
||||
expectedBlockHash := common.BytesToHash(blockHash)
|
||||
expectedBlockNumber := uint64(blockNumber)
|
||||
mockReader.AssertGetBlockReceiptsCalledWith(expectedBlockHash, expectedBlockNumber)
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Getting the latest block number", func() {
|
||||
It("invokes the database reader to get the latest block number by hash and converts result to int64", func() {
|
||||
mockReader := fakes.NewMockLevelDatabaseReader()
|
||||
fakeHash := common.BytesToHash([]byte{1, 2, 3, 4, 5})
|
||||
mockReader.SetHeadBlockHashReturnHash(fakeHash)
|
||||
fakeBlockNumber := uint64(123456789)
|
||||
mockReader.SetReturnBlockNumber(fakeBlockNumber)
|
||||
ldb := level.NewLevelDatabase(mockReader)
|
||||
|
||||
result := ldb.GetHeadBlockNumber()
|
||||
|
||||
mockReader.AssertGetHeadBlockHashCalled()
|
||||
mockReader.AssertGetBlockNumberCalledWith(fakeHash)
|
||||
Expect(result).To(Equal(int64(fakeBlockNumber)))
|
||||
})
|
||||
})
|
||||
})
|
13
pkg/datastore/ethereum/level/level_suite_test.go
Normal file
13
pkg/datastore/ethereum/level/level_suite_test.go
Normal file
@ -0,0 +1,13 @@
|
||||
package level_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestLevel(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Level Suite")
|
||||
}
|
@ -9,14 +9,14 @@ type BlockRepository struct {
|
||||
*InMemory
|
||||
}
|
||||
|
||||
func (blockRepository *BlockRepository) CreateOrUpdateBlock(block core.Block) error {
|
||||
func (blockRepository *BlockRepository) CreateOrUpdateBlock(block core.Block) (int64, error) {
|
||||
blockRepository.CreateOrUpdateBlockCallCount++
|
||||
blockRepository.blocks[block.Number] = block
|
||||
for _, transaction := range block.Transactions {
|
||||
blockRepository.receipts[transaction.Hash] = transaction.Receipt
|
||||
blockRepository.logs[transaction.TxHash] = transaction.Logs
|
||||
}
|
||||
return nil
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (blockRepository *BlockRepository) GetBlock(blockNumber int64) (core.Block, error) {
|
||||
@ -26,7 +26,7 @@ func (blockRepository *BlockRepository) GetBlock(blockNumber int64) (core.Block,
|
||||
return core.Block{}, datastore.ErrBlockDoesNotExist(blockNumber)
|
||||
}
|
||||
|
||||
func (blockRepository *BlockRepository) MissingBlockNumbers(startingBlockNumber int64, endingBlockNumber int64) []int64 {
|
||||
func (blockRepository *BlockRepository) MissingBlockNumbers(startingBlockNumber int64, endingBlockNumber int64, nodeId string) []int64 {
|
||||
missingNumbers := []int64{}
|
||||
for blockNumber := int64(startingBlockNumber); blockNumber <= endingBlockNumber; blockNumber++ {
|
||||
if _, ok := blockRepository.blocks[blockNumber]; !ok {
|
||||
|
@ -23,7 +23,9 @@ var _ = Describe("Postgres DB", func() {
|
||||
It("connects to the database", func() {
|
||||
var err error
|
||||
pgConfig := config.DbConnectionString(test_config.DBConfig)
|
||||
|
||||
sqlxdb, err = sqlx.Connect("postgres", pgConfig)
|
||||
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(sqlxdb).ShouldNot(BeNil())
|
||||
})
|
||||
@ -74,12 +76,12 @@ var _ = Describe("Postgres DB", func() {
|
||||
}
|
||||
node := core.Node{GenesisBlock: "GENESIS", NetworkID: 1, ID: "x123", ClientName: "geth"}
|
||||
db := test_config.NewTestDB(node)
|
||||
blocksRepository := repositories.BlockRepository{DB: db}
|
||||
blocksRepository := repositories.NewBlockRepository(db)
|
||||
|
||||
err1 := blocksRepository.CreateOrUpdateBlock(badBlock)
|
||||
savedBlock, err2 := blocksRepository.GetBlock(123)
|
||||
_, err1 := blocksRepository.CreateOrUpdateBlock(badBlock)
|
||||
|
||||
Expect(err1).To(HaveOccurred())
|
||||
savedBlock, err2 := blocksRepository.GetBlock(123)
|
||||
Expect(err2).To(HaveOccurred())
|
||||
Expect(savedBlock).To(BeZero())
|
||||
})
|
||||
@ -87,7 +89,9 @@ var _ = Describe("Postgres DB", func() {
|
||||
It("throws error when can't connect to the database", func() {
|
||||
invalidDatabase := config.Database{}
|
||||
node := core.Node{GenesisBlock: "GENESIS", NetworkID: 1, ID: "x123", ClientName: "geth"}
|
||||
|
||||
_, err := postgres.NewDB(invalidDatabase, node)
|
||||
|
||||
Expect(err).To(Equal(postgres.ErrDBConnectionFailed))
|
||||
})
|
||||
|
||||
@ -110,10 +114,10 @@ var _ = Describe("Postgres DB", func() {
|
||||
db, _ := postgres.NewDB(test_config.DBConfig, node)
|
||||
logRepository := repositories.LogRepository{DB: db}
|
||||
|
||||
err := logRepository.CreateLogs([]core.Log{badLog})
|
||||
savedBlock := logRepository.GetLogs("x123", 1)
|
||||
err := logRepository.CreateLogs([]core.Log{badLog}, 123)
|
||||
|
||||
Expect(err).ToNot(BeNil())
|
||||
savedBlock := logRepository.GetLogs("x123", 1)
|
||||
Expect(savedBlock).To(BeNil())
|
||||
})
|
||||
|
||||
@ -127,14 +131,13 @@ var _ = Describe("Postgres DB", func() {
|
||||
}
|
||||
node := core.Node{GenesisBlock: "GENESIS", NetworkID: 1, ID: "x123", ClientName: "geth"}
|
||||
db, _ := postgres.NewDB(test_config.DBConfig, node)
|
||||
blockRepository := repositories.BlockRepository{DB: db}
|
||||
blockRepository := repositories.NewBlockRepository(db)
|
||||
|
||||
err1 := blockRepository.CreateOrUpdateBlock(block)
|
||||
savedBlock, err2 := blockRepository.GetBlock(123)
|
||||
_, err1 := blockRepository.CreateOrUpdateBlock(block)
|
||||
|
||||
Expect(err1).To(HaveOccurred())
|
||||
savedBlock, err2 := blockRepository.GetBlock(123)
|
||||
Expect(err2).To(HaveOccurred())
|
||||
Expect(savedBlock).To(BeZero())
|
||||
})
|
||||
|
||||
})
|
||||
|
@ -3,10 +3,11 @@ package repositories
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"errors"
|
||||
"log"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
@ -16,52 +17,57 @@ const (
|
||||
blocksFromHeadBeforeFinal = 20
|
||||
)
|
||||
|
||||
var ErrBlockExists = errors.New("Won't add block that already exists.")
|
||||
|
||||
type BlockRepository struct {
|
||||
*postgres.DB
|
||||
database *postgres.DB
|
||||
}
|
||||
|
||||
func NewBlockRepository(database *postgres.DB) *BlockRepository {
|
||||
return &BlockRepository{database: database}
|
||||
}
|
||||
|
||||
func (blockRepository BlockRepository) SetBlocksStatus(chainHead int64) {
|
||||
cutoff := chainHead - blocksFromHeadBeforeFinal
|
||||
blockRepository.DB.Exec(`
|
||||
blockRepository.database.Exec(`
|
||||
UPDATE blocks SET is_final = TRUE
|
||||
WHERE is_final = FALSE AND number < $1`,
|
||||
cutoff)
|
||||
}
|
||||
|
||||
func (blockRepository BlockRepository) CreateOrUpdateBlock(block core.Block) error {
|
||||
func (blockRepository BlockRepository) CreateOrUpdateBlock(block core.Block) (int64, error) {
|
||||
var err error
|
||||
var blockId int64
|
||||
retrievedBlockHash, ok := blockRepository.getBlockHash(block)
|
||||
if !ok {
|
||||
err = blockRepository.insertBlock(block)
|
||||
return err
|
||||
return blockRepository.insertBlock(block)
|
||||
}
|
||||
if ok && retrievedBlockHash != block.Hash {
|
||||
err = blockRepository.removeBlock(block.Number)
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
err = blockRepository.insertBlock(block)
|
||||
return err
|
||||
return blockRepository.insertBlock(block)
|
||||
}
|
||||
return nil
|
||||
return blockId, ErrBlockExists
|
||||
}
|
||||
|
||||
func (blockRepository BlockRepository) MissingBlockNumbers(startingBlockNumber int64, highestBlockNumber int64) []int64 {
|
||||
func (blockRepository BlockRepository) MissingBlockNumbers(startingBlockNumber int64, highestBlockNumber int64, nodeId string) []int64 {
|
||||
numbers := make([]int64, 0)
|
||||
blockRepository.DB.Select(&numbers,
|
||||
blockRepository.database.Select(&numbers,
|
||||
`SELECT all_block_numbers
|
||||
FROM (
|
||||
SELECT generate_series($1::INT, $2::INT) AS all_block_numbers) series
|
||||
LEFT JOIN blocks
|
||||
ON number = all_block_numbers
|
||||
WHERE number ISNULL`,
|
||||
WHERE number ISNULL OR eth_node_fingerprint != $3`,
|
||||
startingBlockNumber,
|
||||
highestBlockNumber)
|
||||
highestBlockNumber, nodeId)
|
||||
return numbers
|
||||
}
|
||||
|
||||
func (blockRepository BlockRepository) GetBlock(blockNumber int64) (core.Block, error) {
|
||||
blockRows := blockRepository.DB.QueryRowx(
|
||||
blockRows := blockRepository.database.QueryRowx(
|
||||
`SELECT id,
|
||||
number,
|
||||
gaslimit,
|
||||
@ -79,7 +85,7 @@ func (blockRepository BlockRepository) GetBlock(blockNumber int64) (core.Block,
|
||||
reward,
|
||||
uncles_reward
|
||||
FROM blocks
|
||||
WHERE eth_node_id = $1 AND number = $2`, blockRepository.NodeID, blockNumber)
|
||||
WHERE eth_node_id = $1 AND number = $2`, blockRepository.database.NodeID, blockNumber)
|
||||
savedBlock, err := blockRepository.loadBlock(blockRows)
|
||||
if err != nil {
|
||||
switch err {
|
||||
@ -92,27 +98,29 @@ func (blockRepository BlockRepository) GetBlock(blockNumber int64) (core.Block,
|
||||
return savedBlock, nil
|
||||
}
|
||||
|
||||
func (blockRepository BlockRepository) insertBlock(block core.Block) error {
|
||||
func (blockRepository BlockRepository) insertBlock(block core.Block) (int64, error) {
|
||||
var blockId int64
|
||||
tx, _ := blockRepository.DB.BeginTx(context.Background(), nil)
|
||||
tx, _ := blockRepository.database.BeginTx(context.Background(), nil)
|
||||
err := tx.QueryRow(
|
||||
`INSERT INTO blocks
|
||||
(eth_node_id, number, gaslimit, gasused, time, difficulty, hash, nonce, parenthash, size, uncle_hash, is_final, miner, extra_data, reward, uncles_reward)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
|
||||
(eth_node_id, number, gaslimit, gasused, time, difficulty, hash, nonce, parenthash, size, uncle_hash, is_final, miner, extra_data, reward, uncles_reward, eth_node_fingerprint)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)
|
||||
RETURNING id `,
|
||||
blockRepository.NodeID, block.Number, block.GasLimit, block.GasUsed, block.Time, block.Difficulty, block.Hash, block.Nonce, block.ParentHash, block.Size, block.UncleHash, block.IsFinal, block.Miner, block.ExtraData, block.Reward, block.UnclesReward).
|
||||
blockRepository.database.NodeID, block.Number, block.GasLimit, block.GasUsed, block.Time, block.Difficulty, block.Hash, block.Nonce, block.ParentHash, block.Size, block.UncleHash, block.IsFinal, block.Miner, block.ExtraData, block.Reward, block.UnclesReward, blockRepository.database.Node.ID).
|
||||
Scan(&blockId)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return postgres.ErrDBInsertFailed
|
||||
return 0, err
|
||||
}
|
||||
if len(block.Transactions) > 0 {
|
||||
err = blockRepository.createTransactions(tx, blockId, block.Transactions)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return postgres.ErrDBInsertFailed
|
||||
return 0, postgres.ErrDBInsertFailed
|
||||
}
|
||||
}
|
||||
tx.Commit()
|
||||
return nil
|
||||
return blockId, nil
|
||||
}
|
||||
|
||||
func (blockRepository BlockRepository) createTransactions(tx *sql.Tx, blockId int64, transactions []core.Transaction) error {
|
||||
@ -136,19 +144,17 @@ func nullStringToZero(s string) string {
|
||||
}
|
||||
|
||||
func (blockRepository BlockRepository) createTransaction(tx *sql.Tx, blockId int64, transaction core.Transaction) error {
|
||||
var transactionId int
|
||||
err := tx.QueryRow(
|
||||
_, err := tx.Exec(
|
||||
`INSERT INTO transactions
|
||||
(block_id, hash, nonce, tx_to, tx_from, gaslimit, gasprice, value, input_data)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8::NUMERIC, $9)
|
||||
RETURNING id`,
|
||||
blockId, transaction.Hash, transaction.Nonce, transaction.To, transaction.From, transaction.GasLimit, transaction.GasPrice, nullStringToZero(transaction.Value), transaction.Data).
|
||||
Scan(&transactionId)
|
||||
blockId, transaction.Hash, transaction.Nonce, transaction.To, transaction.From, transaction.GasLimit, transaction.GasPrice, nullStringToZero(transaction.Value), transaction.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hasReceipt(transaction) {
|
||||
receiptId, err := blockRepository.createReceipt(tx, transactionId, transaction.Receipt)
|
||||
receiptId, err := blockRepository.createReceipt(tx, blockId, transaction.Receipt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -170,15 +176,15 @@ func hasReceipt(transaction core.Transaction) bool {
|
||||
return transaction.Receipt.TxHash != ""
|
||||
}
|
||||
|
||||
func (blockRepository BlockRepository) createReceipt(tx *sql.Tx, transactionId int, receipt core.Receipt) (int, error) {
|
||||
func (blockRepository BlockRepository) createReceipt(tx *sql.Tx, blockId int64, receipt core.Receipt) (int, error) {
|
||||
//Not currently persisting log bloom filters
|
||||
var receiptId int
|
||||
err := tx.QueryRow(
|
||||
`INSERT INTO receipts
|
||||
(contract_address, tx_hash, cumulative_gas_used, gas_used, state_root, status, transaction_id)
|
||||
(contract_address, tx_hash, cumulative_gas_used, gas_used, state_root, status, block_id)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
RETURNING id`,
|
||||
receipt.ContractAddress, receipt.TxHash, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.StateRoot, receipt.Status, transactionId).Scan(&receiptId)
|
||||
receipt.ContractAddress, receipt.TxHash, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.StateRoot, receipt.Status, blockId).Scan(&receiptId)
|
||||
if err != nil {
|
||||
return receiptId, err
|
||||
}
|
||||
@ -187,11 +193,11 @@ func (blockRepository BlockRepository) createReceipt(tx *sql.Tx, transactionId i
|
||||
|
||||
func (blockRepository BlockRepository) getBlockHash(block core.Block) (string, bool) {
|
||||
var retrievedBlockHash string
|
||||
blockRepository.DB.Get(&retrievedBlockHash,
|
||||
blockRepository.database.Get(&retrievedBlockHash,
|
||||
`SELECT hash
|
||||
FROM blocks
|
||||
WHERE number = $1 AND eth_node_id = $2`,
|
||||
block.Number, blockRepository.NodeID)
|
||||
block.Number, blockRepository.database.NodeID)
|
||||
return retrievedBlockHash, blockExists(retrievedBlockHash)
|
||||
}
|
||||
|
||||
@ -215,11 +221,11 @@ func blockExists(retrievedBlockHash string) bool {
|
||||
}
|
||||
|
||||
func (blockRepository BlockRepository) removeBlock(blockNumber int64) error {
|
||||
_, err := blockRepository.DB.Exec(
|
||||
_, err := blockRepository.database.Exec(
|
||||
`DELETE FROM
|
||||
blocks
|
||||
WHERE number=$1 AND eth_node_id=$2`,
|
||||
blockNumber, blockRepository.NodeID)
|
||||
blockNumber, blockRepository.database.NodeID)
|
||||
if err != nil {
|
||||
return postgres.ErrDBDeleteFailed
|
||||
}
|
||||
@ -236,7 +242,7 @@ func (blockRepository BlockRepository) loadBlock(blockRows *sqlx.Row) (core.Bloc
|
||||
if err != nil {
|
||||
return core.Block{}, err
|
||||
}
|
||||
transactionRows, err := blockRepository.DB.Queryx(`
|
||||
transactionRows, err := blockRepository.database.Queryx(`
|
||||
SELECT hash,
|
||||
nonce,
|
||||
tx_to,
|
||||
|
@ -15,16 +15,18 @@ import (
|
||||
|
||||
var _ = Describe("Saving blocks", func() {
|
||||
var db *postgres.DB
|
||||
var node core.Node
|
||||
var blockRepository datastore.BlockRepository
|
||||
|
||||
BeforeEach(func() {
|
||||
node := core.Node{
|
||||
node = core.Node{
|
||||
GenesisBlock: "GENESIS",
|
||||
NetworkID: 1,
|
||||
ID: "b6f90c0fdd8ec9607aed8ee45c69322e47b7063f0bfb7a29c8ecafab24d0a22d24dd2329b5ee6ed4125a03cb14e57fd584e67f9e53e6c631055cbbd82f080845",
|
||||
ClientName: "Geth/v1.7.2-stable-1db4ecdc/darwin-amd64/go1.9",
|
||||
}
|
||||
db = test_config.NewTestDB(node)
|
||||
blockRepository = repositories.BlockRepository{DB: db}
|
||||
blockRepository = repositories.NewBlockRepository(db)
|
||||
|
||||
})
|
||||
|
||||
@ -40,7 +42,7 @@ var _ = Describe("Saving blocks", func() {
|
||||
ClientName: "Geth",
|
||||
}
|
||||
dbTwo := test_config.NewTestDB(nodeTwo)
|
||||
repositoryTwo := repositories.BlockRepository{DB: dbTwo}
|
||||
repositoryTwo := repositories.NewBlockRepository(dbTwo)
|
||||
|
||||
_, err := repositoryTwo.GetBlock(123)
|
||||
Expect(err).To(HaveOccurred())
|
||||
@ -166,7 +168,7 @@ var _ = Describe("Saving blocks", func() {
|
||||
NetworkID: 1,
|
||||
}
|
||||
dbTwo := test_config.NewTestDB(nodeTwo)
|
||||
repositoryTwo := repositories.BlockRepository{DB: dbTwo}
|
||||
repositoryTwo := repositories.NewBlockRepository(dbTwo)
|
||||
|
||||
blockRepository.CreateOrUpdateBlock(blockOne)
|
||||
repositoryTwo.CreateOrUpdateBlock(blockTwo)
|
||||
@ -177,6 +179,22 @@ var _ = Describe("Saving blocks", func() {
|
||||
Expect(retrievedBlockTwo.Transactions[0].Hash).To(Equal("x678"))
|
||||
})
|
||||
|
||||
It("returns 'block exists' error if attempting to add duplicate block", func() {
|
||||
block := core.Block{
|
||||
Number: 12345,
|
||||
Hash: "0x12345",
|
||||
}
|
||||
|
||||
_, err := blockRepository.CreateOrUpdateBlock(block)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
_, err = blockRepository.CreateOrUpdateBlock(block)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(repositories.ErrBlockExists))
|
||||
})
|
||||
|
||||
It("saves the attributes associated to a transaction", func() {
|
||||
gasLimit := uint64(5000)
|
||||
gasPrice := int64(3)
|
||||
@ -220,32 +238,39 @@ var _ = Describe("Saving blocks", func() {
|
||||
It("is empty the starting block number is the highest known block number", func() {
|
||||
blockRepository.CreateOrUpdateBlock(core.Block{Number: 1})
|
||||
|
||||
Expect(len(blockRepository.MissingBlockNumbers(1, 1))).To(Equal(0))
|
||||
Expect(len(blockRepository.MissingBlockNumbers(1, 1, node.ID))).To(Equal(0))
|
||||
})
|
||||
|
||||
It("is the only missing block number", func() {
|
||||
blockRepository.CreateOrUpdateBlock(core.Block{Number: 2})
|
||||
|
||||
Expect(blockRepository.MissingBlockNumbers(1, 2)).To(Equal([]int64{1}))
|
||||
Expect(blockRepository.MissingBlockNumbers(1, 2, node.ID)).To(Equal([]int64{1}))
|
||||
})
|
||||
|
||||
It("is both missing block numbers", func() {
|
||||
blockRepository.CreateOrUpdateBlock(core.Block{Number: 3})
|
||||
|
||||
Expect(blockRepository.MissingBlockNumbers(1, 3)).To(Equal([]int64{1, 2}))
|
||||
Expect(blockRepository.MissingBlockNumbers(1, 3, node.ID)).To(Equal([]int64{1, 2}))
|
||||
})
|
||||
|
||||
It("goes back to the starting block number", func() {
|
||||
blockRepository.CreateOrUpdateBlock(core.Block{Number: 6})
|
||||
|
||||
Expect(blockRepository.MissingBlockNumbers(4, 6)).To(Equal([]int64{4, 5}))
|
||||
Expect(blockRepository.MissingBlockNumbers(4, 6, node.ID)).To(Equal([]int64{4, 5}))
|
||||
})
|
||||
|
||||
It("only includes missing block numbers", func() {
|
||||
blockRepository.CreateOrUpdateBlock(core.Block{Number: 4})
|
||||
blockRepository.CreateOrUpdateBlock(core.Block{Number: 6})
|
||||
|
||||
Expect(blockRepository.MissingBlockNumbers(4, 6)).To(Equal([]int64{5}))
|
||||
Expect(blockRepository.MissingBlockNumbers(4, 6, node.ID)).To(Equal([]int64{5}))
|
||||
})
|
||||
|
||||
It("includes blocks created by a different node", func() {
|
||||
blockRepository.CreateOrUpdateBlock(core.Block{Number: 4})
|
||||
blockRepository.CreateOrUpdateBlock(core.Block{Number: 6})
|
||||
|
||||
Expect(blockRepository.MissingBlockNumbers(4, 6, "Different node id")).To(Equal([]int64{4, 5, 6}))
|
||||
})
|
||||
|
||||
It("is a list with multiple gaps", func() {
|
||||
@ -254,18 +279,18 @@ var _ = Describe("Saving blocks", func() {
|
||||
blockRepository.CreateOrUpdateBlock(core.Block{Number: 8})
|
||||
blockRepository.CreateOrUpdateBlock(core.Block{Number: 10})
|
||||
|
||||
Expect(blockRepository.MissingBlockNumbers(3, 10)).To(Equal([]int64{3, 6, 7, 9}))
|
||||
Expect(blockRepository.MissingBlockNumbers(3, 10, node.ID)).To(Equal([]int64{3, 6, 7, 9}))
|
||||
})
|
||||
|
||||
It("returns empty array when lower bound exceeds upper bound", func() {
|
||||
Expect(blockRepository.MissingBlockNumbers(10000, 1)).To(Equal([]int64{}))
|
||||
Expect(blockRepository.MissingBlockNumbers(10000, 1, node.ID)).To(Equal([]int64{}))
|
||||
})
|
||||
|
||||
It("only returns requested range even when other gaps exist", func() {
|
||||
blockRepository.CreateOrUpdateBlock(core.Block{Number: 3})
|
||||
blockRepository.CreateOrUpdateBlock(core.Block{Number: 8})
|
||||
|
||||
Expect(blockRepository.MissingBlockNumbers(1, 5)).To(Equal([]int64{1, 2, 4, 5}))
|
||||
Expect(blockRepository.MissingBlockNumbers(1, 5, node.ID)).To(Equal([]int64{1, 2, 4, 5}))
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -53,7 +53,7 @@ var _ = Describe("Creating contracts", func() {
|
||||
|
||||
It("returns transactions 'To' a contract", func() {
|
||||
var blockRepository datastore.BlockRepository
|
||||
blockRepository = repositories.BlockRepository{DB: db}
|
||||
blockRepository = repositories.NewBlockRepository(db)
|
||||
block := core.Block{
|
||||
Number: 123,
|
||||
Transactions: []core.Transaction{
|
||||
|
@ -13,14 +13,14 @@ type LogRepository struct {
|
||||
*postgres.DB
|
||||
}
|
||||
|
||||
func (logRepository LogRepository) CreateLogs(lgs []core.Log) error {
|
||||
func (logRepository LogRepository) CreateLogs(lgs []core.Log, receiptId int64) error {
|
||||
tx, _ := logRepository.DB.BeginTx(context.Background(), nil)
|
||||
for _, tlog := range lgs {
|
||||
_, err := tx.Exec(
|
||||
`INSERT INTO logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||
`INSERT INTO logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||
`,
|
||||
tlog.BlockNumber, tlog.Address, tlog.TxHash, tlog.Index, tlog.Topics[0], tlog.Topics[1], tlog.Topics[2], tlog.Topics[3], tlog.Data,
|
||||
tlog.BlockNumber, tlog.Address, tlog.TxHash, tlog.Index, tlog.Topics[0], tlog.Topics[1], tlog.Topics[2], tlog.Topics[3], tlog.Data, receiptId,
|
||||
)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
|
@ -13,9 +13,13 @@ import (
|
||||
)
|
||||
|
||||
var _ = Describe("Logs Repository", func() {
|
||||
Describe("Saving logs", func() {
|
||||
var db *postgres.DB
|
||||
var blockRepository datastore.BlockRepository
|
||||
var logsRepository datastore.LogRepository
|
||||
var receiptRepository datastore.ReceiptRepository
|
||||
var node core.Node
|
||||
|
||||
BeforeEach(func() {
|
||||
node = core.Node{
|
||||
GenesisBlock: "GENESIS",
|
||||
@ -24,25 +28,30 @@ var _ = Describe("Logs Repository", func() {
|
||||
ClientName: "Geth/v1.7.2-stable-1db4ecdc/darwin-amd64/go1.9",
|
||||
}
|
||||
db = test_config.NewTestDB(node)
|
||||
blockRepository = repositories.NewBlockRepository(db)
|
||||
logsRepository = repositories.LogRepository{DB: db}
|
||||
receiptRepository = repositories.ReceiptRepository{DB: db}
|
||||
})
|
||||
|
||||
Describe("Saving logs", func() {
|
||||
It("returns the log when it exists", func() {
|
||||
blockNumber := int64(12345)
|
||||
blockId, err := blockRepository.CreateOrUpdateBlock(core.Block{Number: blockNumber})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
receiptId, err := receiptRepository.CreateReceipt(blockId, core.Receipt{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
logsRepository.CreateLogs([]core.Log{{
|
||||
BlockNumber: 1,
|
||||
BlockNumber: blockNumber,
|
||||
Index: 0,
|
||||
Address: "x123",
|
||||
TxHash: "x456",
|
||||
Topics: core.Topics{0: "x777", 1: "x888", 2: "x999"},
|
||||
Data: "xabc",
|
||||
}},
|
||||
)
|
||||
}}, receiptId)
|
||||
|
||||
log := logsRepository.GetLogs("x123", 1)
|
||||
log := logsRepository.GetLogs("x123", blockNumber)
|
||||
|
||||
Expect(log).NotTo(BeNil())
|
||||
Expect(log[0].BlockNumber).To(Equal(int64(1)))
|
||||
Expect(log[0].BlockNumber).To(Equal(blockNumber))
|
||||
Expect(log[0].Address).To(Equal("x123"))
|
||||
Expect(log[0].Index).To(Equal(int64(0)))
|
||||
Expect(log[0].TxHash).To(Equal("x456"))
|
||||
@ -58,24 +67,27 @@ var _ = Describe("Logs Repository", func() {
|
||||
})
|
||||
|
||||
It("filters to the correct block number and address", func() {
|
||||
blockNumber := int64(12345)
|
||||
blockId, err := blockRepository.CreateOrUpdateBlock(core.Block{Number: blockNumber})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
receiptId, err := receiptRepository.CreateReceipt(blockId, core.Receipt{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
logsRepository.CreateLogs([]core.Log{{
|
||||
BlockNumber: 1,
|
||||
BlockNumber: blockNumber,
|
||||
Index: 0,
|
||||
Address: "x123",
|
||||
TxHash: "x456",
|
||||
Topics: core.Topics{0: "x777", 1: "x888", 2: "x999"},
|
||||
Data: "xabc",
|
||||
}},
|
||||
)
|
||||
}}, receiptId)
|
||||
logsRepository.CreateLogs([]core.Log{{
|
||||
BlockNumber: 1,
|
||||
BlockNumber: blockNumber,
|
||||
Index: 1,
|
||||
Address: "x123",
|
||||
TxHash: "x789",
|
||||
Topics: core.Topics{0: "x111", 1: "x222", 2: "x333"},
|
||||
Data: "xdef",
|
||||
}},
|
||||
)
|
||||
}}, receiptId)
|
||||
logsRepository.CreateLogs([]core.Log{{
|
||||
BlockNumber: 2,
|
||||
Index: 0,
|
||||
@ -83,10 +95,9 @@ var _ = Describe("Logs Repository", func() {
|
||||
TxHash: "x456",
|
||||
Topics: core.Topics{0: "x777", 1: "x888", 2: "x999"},
|
||||
Data: "xabc",
|
||||
}},
|
||||
)
|
||||
}}, receiptId)
|
||||
|
||||
log := logsRepository.GetLogs("x123", 1)
|
||||
log := logsRepository.GetLogs("x123", blockNumber)
|
||||
|
||||
type logIndex struct {
|
||||
blockNumber int64
|
||||
@ -111,14 +122,12 @@ var _ = Describe("Logs Repository", func() {
|
||||
Expect(len(log)).To(Equal(2))
|
||||
Expect(uniqueBlockNumbers).To(Equal(
|
||||
[]logIndex{
|
||||
{blockNumber: 1, Index: 0},
|
||||
{blockNumber: 1, Index: 1}},
|
||||
{blockNumber: blockNumber, Index: 0},
|
||||
{blockNumber: blockNumber, Index: 1}},
|
||||
))
|
||||
})
|
||||
|
||||
It("saves the logs attached to a receipt", func() {
|
||||
var blockRepository datastore.BlockRepository
|
||||
blockRepository = repositories.BlockRepository{DB: db}
|
||||
|
||||
logs := []core.Log{{
|
||||
Address: "0x8a4774fe82c63484afef97ca8d89a6ea5e21f973",
|
||||
@ -171,7 +180,7 @@ var _ = Describe("Logs Repository", func() {
|
||||
}
|
||||
|
||||
block := core.Block{Transactions: []core.Transaction{transaction}}
|
||||
err := blockRepository.CreateOrUpdateBlock(block)
|
||||
_, err := blockRepository.CreateOrUpdateBlock(block)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
retrievedLogs := logsRepository.GetLogs("0x99041f808d598b782d5a3e498681c2452a31da08", 4745407)
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
package repositories
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
@ -12,6 +13,73 @@ type ReceiptRepository struct {
|
||||
*postgres.DB
|
||||
}
|
||||
|
||||
func (receiptRepository ReceiptRepository) CreateReceiptsAndLogs(blockId int64, receipts []core.Receipt) error {
|
||||
tx, err := receiptRepository.DB.BeginTx(context.Background(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, receipt := range receipts {
|
||||
receiptId, err := createReceipt(receipt, blockId, tx)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
if len(receipt.Logs) > 0 {
|
||||
err = createLogs(receipt.Logs, receiptId, tx)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
tx.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func createReceipt(receipt core.Receipt, blockId int64, tx *sql.Tx) (int64, error) {
|
||||
var receiptId int64
|
||||
err := tx.QueryRow(
|
||||
`INSERT INTO receipts
|
||||
(contract_address, tx_hash, cumulative_gas_used, gas_used, state_root, status, block_id)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
RETURNING id`,
|
||||
receipt.ContractAddress, receipt.TxHash, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.StateRoot, receipt.Status, blockId,
|
||||
).Scan(&receiptId)
|
||||
return receiptId, err
|
||||
}
|
||||
|
||||
func createLogs(logs []core.Log, receiptId int64, tx *sql.Tx) error {
|
||||
for _, log := range logs {
|
||||
_, err := tx.Exec(
|
||||
`INSERT INTO logs (block_number, address, tx_hash, index, topic0, topic1, topic2, topic3, data, receipt_id)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||
`,
|
||||
log.BlockNumber, log.Address, log.TxHash, log.Index, log.Topics[0], log.Topics[1], log.Topics[2], log.Topics[3], log.Data, receiptId,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (receiptRepository ReceiptRepository) CreateReceipt(blockId int64, receipt core.Receipt) (int64, error) {
|
||||
tx, _ := receiptRepository.DB.BeginTx(context.Background(), nil)
|
||||
var receiptId int64
|
||||
err := tx.QueryRow(
|
||||
`INSERT INTO receipts
|
||||
(contract_address, tx_hash, cumulative_gas_used, gas_used, state_root, status, block_id)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
RETURNING id`,
|
||||
receipt.ContractAddress, receipt.TxHash, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.StateRoot, receipt.Status, blockId).Scan(&receiptId)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return receiptId, err
|
||||
}
|
||||
tx.Commit()
|
||||
return receiptId, nil
|
||||
}
|
||||
|
||||
func (receiptRepository ReceiptRepository) GetReceipt(txHash string) (core.Receipt, error) {
|
||||
row := receiptRepository.DB.QueryRow(
|
||||
`SELECT contract_address,
|
||||
|
@ -10,7 +10,9 @@ import (
|
||||
"github.com/vulcanize/vulcanizedb/test_config"
|
||||
)
|
||||
|
||||
var _ bool = Describe("Logs Repository", func() {
|
||||
var _ = Describe("Receipts Repository", func() {
|
||||
var blockRepository datastore.BlockRepository
|
||||
var logRepository datastore.LogRepository
|
||||
var receiptRepository datastore.ReceiptRepository
|
||||
var db *postgres.DB
|
||||
var node core.Node
|
||||
@ -22,14 +24,67 @@ var _ bool = Describe("Logs Repository", func() {
|
||||
ClientName: "Geth/v1.7.2-stable-1db4ecdc/darwin-amd64/go1.9",
|
||||
}
|
||||
db = test_config.NewTestDB(node)
|
||||
blockRepository = repositories.NewBlockRepository(db)
|
||||
logRepository = repositories.LogRepository{DB: db}
|
||||
receiptRepository = repositories.ReceiptRepository{DB: db}
|
||||
})
|
||||
|
||||
Describe("Saving receipts", func() {
|
||||
Describe("Saving multiple receipts", func() {
|
||||
It("persists each receipt and its logs", func() {
|
||||
blockNumber := int64(1234567)
|
||||
blockId, err := blockRepository.CreateOrUpdateBlock(core.Block{Number: blockNumber})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
txHashOne := "0xTxHashOne"
|
||||
txHashTwo := "0xTxHashTwo"
|
||||
addressOne := "0xAddressOne"
|
||||
addressTwo := "0xAddressTwo"
|
||||
logsOne := []core.Log{{
|
||||
Address: addressOne,
|
||||
BlockNumber: blockNumber,
|
||||
TxHash: txHashOne,
|
||||
}, {
|
||||
Address: addressOne,
|
||||
BlockNumber: blockNumber,
|
||||
TxHash: txHashOne,
|
||||
}}
|
||||
logsTwo := []core.Log{{
|
||||
BlockNumber: blockNumber,
|
||||
TxHash: txHashTwo,
|
||||
Address: addressTwo,
|
||||
}}
|
||||
receiptOne := core.Receipt{
|
||||
Logs: logsOne,
|
||||
TxHash: txHashOne,
|
||||
}
|
||||
receiptTwo := core.Receipt{
|
||||
Logs: logsTwo,
|
||||
TxHash: txHashTwo,
|
||||
}
|
||||
receipts := []core.Receipt{receiptOne, receiptTwo}
|
||||
|
||||
err = receiptRepository.CreateReceiptsAndLogs(blockId, receipts)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
persistedReceiptOne, err := receiptRepository.GetReceipt(txHashOne)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(persistedReceiptOne).NotTo(BeNil())
|
||||
Expect(persistedReceiptOne.TxHash).To(Equal(txHashOne))
|
||||
persistedReceiptTwo, err := receiptRepository.GetReceipt(txHashTwo)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(persistedReceiptTwo).NotTo(BeNil())
|
||||
Expect(persistedReceiptTwo.TxHash).To(Equal(txHashTwo))
|
||||
persistedAddressOneLogs := logRepository.GetLogs(addressOne, blockNumber)
|
||||
Expect(persistedAddressOneLogs).NotTo(BeNil())
|
||||
Expect(len(persistedAddressOneLogs)).To(Equal(2))
|
||||
persistedAddressTwoLogs := logRepository.GetLogs(addressTwo, blockNumber)
|
||||
Expect(persistedAddressTwoLogs).NotTo(BeNil())
|
||||
Expect(len(persistedAddressTwoLogs)).To(Equal(1))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Saving receipts on a block's transactions", func() {
|
||||
It("returns the receipt when it exists", func() {
|
||||
var blockRepository datastore.BlockRepository
|
||||
db := test_config.NewTestDB(node)
|
||||
blockRepository = repositories.BlockRepository{DB: db}
|
||||
expected := core.Receipt{
|
||||
ContractAddress: "0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae",
|
||||
CumulativeGasUsed: 7996119,
|
||||
@ -44,11 +99,12 @@ var _ bool = Describe("Logs Repository", func() {
|
||||
Hash: expected.TxHash,
|
||||
Receipt: expected,
|
||||
}
|
||||
|
||||
block := core.Block{Transactions: []core.Transaction{transaction}}
|
||||
blockRepository.CreateOrUpdateBlock(block)
|
||||
receipt, err := receiptRepository.GetReceipt("0xe340558980f89d5f86045ac11e5cc34e4bcec20f9f1e2a427aa39d87114e8223")
|
||||
|
||||
_, err := blockRepository.CreateOrUpdateBlock(block)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
receipt, err := receiptRepository.GetReceipt("0xe340558980f89d5f86045ac11e5cc34e4bcec20f9f1e2a427aa39d87114e8223")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
//Not currently serializing bloom logs
|
||||
Expect(receipt.Bloom).To(Equal(core.Receipt{}.Bloom))
|
||||
@ -66,9 +122,6 @@ var _ bool = Describe("Logs Repository", func() {
|
||||
})
|
||||
|
||||
It("still saves receipts without logs", func() {
|
||||
var blockRepository datastore.BlockRepository
|
||||
db := test_config.NewTestDB(node)
|
||||
blockRepository = repositories.BlockRepository{DB: db}
|
||||
receipt := core.Receipt{
|
||||
TxHash: "0x002c4799161d809b23f67884eb6598c9df5894929fe1a9ead97ca175d360f547",
|
||||
}
|
||||
@ -80,10 +133,11 @@ var _ bool = Describe("Logs Repository", func() {
|
||||
block := core.Block{
|
||||
Transactions: []core.Transaction{transaction},
|
||||
}
|
||||
blockRepository.CreateOrUpdateBlock(block)
|
||||
|
||||
_, err := receiptRepository.GetReceipt(receipt.TxHash)
|
||||
_, err := blockRepository.CreateOrUpdateBlock(block)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = receiptRepository.GetReceipt(receipt.TxHash)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
})
|
||||
|
@ -13,14 +13,18 @@ import (
|
||||
|
||||
var _ = Describe("Watched Events Repository", func() {
|
||||
var db *postgres.DB
|
||||
var logRepository datastore.LogRepository
|
||||
var blocksRepository datastore.BlockRepository
|
||||
var filterRepository datastore.FilterRepository
|
||||
var logRepository datastore.LogRepository
|
||||
var receiptRepository datastore.ReceiptRepository
|
||||
var watchedEventRepository datastore.WatchedEventRepository
|
||||
|
||||
BeforeEach(func() {
|
||||
db = test_config.NewTestDB(core.Node{})
|
||||
logRepository = repositories.LogRepository{DB: db}
|
||||
blocksRepository = repositories.NewBlockRepository(db)
|
||||
filterRepository = repositories.FilterRepository{DB: db}
|
||||
logRepository = repositories.LogRepository{DB: db}
|
||||
receiptRepository = repositories.ReceiptRepository{DB: db}
|
||||
watchedEventRepository = repositories.WatchedEventRepository{DB: db}
|
||||
})
|
||||
|
||||
@ -56,7 +60,11 @@ var _ = Describe("Watched Events Repository", func() {
|
||||
}
|
||||
err := filterRepository.CreateFilter(filter)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = logRepository.CreateLogs(logs)
|
||||
blockId, err := blocksRepository.CreateOrUpdateBlock(core.Block{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
receiptId, err := receiptRepository.CreateReceipt(blockId, core.Receipt{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = logRepository.CreateLogs(logs, receiptId)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
matchingLogs, err := watchedEventRepository.GetWatchedEvents("Filter1")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@ -69,7 +77,6 @@ var _ = Describe("Watched Events Repository", func() {
|
||||
Expect(matchingLogs[0].Topic1).To(Equal(expectedWatchedEventLog[0].Topic1))
|
||||
Expect(matchingLogs[0].Topic2).To(Equal(expectedWatchedEventLog[0].Topic2))
|
||||
Expect(matchingLogs[0].Data).To(Equal(expectedWatchedEventLog[0].Data))
|
||||
|
||||
})
|
||||
|
||||
It("retrieves a watched event log by name", func() {
|
||||
@ -110,7 +117,11 @@ var _ = Describe("Watched Events Repository", func() {
|
||||
}}
|
||||
err := filterRepository.CreateFilter(filter)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = logRepository.CreateLogs(logs)
|
||||
blockId, err := blocksRepository.CreateOrUpdateBlock(core.Block{Hash: "Ox123"})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
receiptId, err := receiptRepository.CreateReceipt(blockId, core.Receipt{TxHash: "0x123"})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = logRepository.CreateLogs(logs, receiptId)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
matchingLogs, err := watchedEventRepository.GetWatchedEvents("Filter1")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
@ -12,9 +12,9 @@ var ErrBlockDoesNotExist = func(blockNumber int64) error {
|
||||
}
|
||||
|
||||
type BlockRepository interface {
|
||||
CreateOrUpdateBlock(block core.Block) error
|
||||
CreateOrUpdateBlock(block core.Block) (int64, error)
|
||||
GetBlock(blockNumber int64) (core.Block, error)
|
||||
MissingBlockNumbers(startingBlockNumber int64, endingBlockNumber int64) []int64
|
||||
MissingBlockNumbers(startingBlockNumber int64, endingBlockNumber int64, nodeId string) []int64
|
||||
SetBlocksStatus(chainHead int64)
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ type FilterRepository interface {
|
||||
}
|
||||
|
||||
type LogRepository interface {
|
||||
CreateLogs(logs []core.Log) error
|
||||
CreateLogs(logs []core.Log, receiptId int64) error
|
||||
GetLogs(address string, blockNumber int64) []core.Log
|
||||
}
|
||||
|
||||
@ -47,6 +47,8 @@ var ErrReceiptDoesNotExist = func(txHash string) error {
|
||||
}
|
||||
|
||||
type ReceiptRepository interface {
|
||||
CreateReceiptsAndLogs(blockId int64, receipts []core.Receipt) error
|
||||
CreateReceipt(blockId int64, receipt core.Receipt) (int64, error)
|
||||
GetReceipt(txHash string) (core.Receipt, error)
|
||||
}
|
||||
|
||||
|
86
pkg/fakes/mock_block_repository.go
Normal file
86
pkg/fakes/mock_block_repository.go
Normal file
@ -0,0 +1,86 @@
|
||||
package fakes
|
||||
|
||||
import (
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
type MockBlockRepository struct {
|
||||
createOrUpdateBlockCalled bool
|
||||
createOrUpdateBlockPassedBlock core.Block
|
||||
createOrUpdateBlockReturnInt int64
|
||||
createOrUpdateBlockReturnErr error
|
||||
missingBlockNumbersCalled bool
|
||||
missingBlockNumbersPassedStartingBlockNumber int64
|
||||
missingBlockNumbersPassedEndingBlockNumber int64
|
||||
missingBlockNumbersPassedNodeId string
|
||||
missingBlockNumbersReturnArray []int64
|
||||
setBlockStatusCalled bool
|
||||
setBlockStatusPassedChainHead int64
|
||||
}
|
||||
|
||||
func NewMockBlockRepository() *MockBlockRepository {
|
||||
return &MockBlockRepository{
|
||||
createOrUpdateBlockCalled: false,
|
||||
createOrUpdateBlockPassedBlock: core.Block{},
|
||||
createOrUpdateBlockReturnInt: 0,
|
||||
createOrUpdateBlockReturnErr: nil,
|
||||
missingBlockNumbersCalled: false,
|
||||
missingBlockNumbersPassedStartingBlockNumber: 0,
|
||||
missingBlockNumbersPassedEndingBlockNumber: 0,
|
||||
missingBlockNumbersPassedNodeId: "",
|
||||
missingBlockNumbersReturnArray: nil,
|
||||
setBlockStatusCalled: false,
|
||||
setBlockStatusPassedChainHead: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (mbr *MockBlockRepository) SetCreateOrUpdateBlockReturnVals(i int64, err error) {
|
||||
mbr.createOrUpdateBlockReturnInt = i
|
||||
mbr.createOrUpdateBlockReturnErr = err
|
||||
}
|
||||
|
||||
func (mbr *MockBlockRepository) SetMissingBlockNumbersReturnArray(returnArray []int64) {
|
||||
mbr.missingBlockNumbersReturnArray = returnArray
|
||||
}
|
||||
|
||||
func (mbr *MockBlockRepository) CreateOrUpdateBlock(block core.Block) (int64, error) {
|
||||
mbr.createOrUpdateBlockCalled = true
|
||||
mbr.createOrUpdateBlockPassedBlock = block
|
||||
return mbr.createOrUpdateBlockReturnInt, mbr.createOrUpdateBlockReturnErr
|
||||
}
|
||||
|
||||
func (mbr *MockBlockRepository) GetBlock(blockNumber int64) (core.Block, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mbr *MockBlockRepository) MissingBlockNumbers(startingBlockNumber int64, endingBlockNumber int64, nodeId string) []int64 {
|
||||
mbr.missingBlockNumbersCalled = true
|
||||
mbr.missingBlockNumbersPassedStartingBlockNumber = startingBlockNumber
|
||||
mbr.missingBlockNumbersPassedEndingBlockNumber = endingBlockNumber
|
||||
mbr.missingBlockNumbersPassedNodeId = nodeId
|
||||
return mbr.missingBlockNumbersReturnArray
|
||||
}
|
||||
|
||||
func (mbr *MockBlockRepository) SetBlocksStatus(chainHead int64) {
|
||||
mbr.setBlockStatusCalled = true
|
||||
mbr.setBlockStatusPassedChainHead = chainHead
|
||||
}
|
||||
|
||||
func (mbr *MockBlockRepository) AssertCreateOrUpdateBlockCalledWith(block core.Block) {
|
||||
Expect(mbr.createOrUpdateBlockCalled).To(BeTrue())
|
||||
Expect(mbr.createOrUpdateBlockPassedBlock).To(Equal(block))
|
||||
}
|
||||
|
||||
func (mbr *MockBlockRepository) AssertMissingBlockNumbersCalledWith(startingBlockNumber int64, endingBlockNumber int64, nodeId string) {
|
||||
Expect(mbr.missingBlockNumbersCalled).To(BeTrue())
|
||||
Expect(mbr.missingBlockNumbersPassedStartingBlockNumber).To(Equal(startingBlockNumber))
|
||||
Expect(mbr.missingBlockNumbersPassedEndingBlockNumber).To(Equal(endingBlockNumber))
|
||||
Expect(mbr.missingBlockNumbersPassedNodeId).To(Equal(nodeId))
|
||||
}
|
||||
|
||||
func (mbr *MockBlockRepository) AssertSetBlockStatusCalledWith(chainHead int64) {
|
||||
Expect(mbr.setBlockStatusCalled).To(BeTrue())
|
||||
Expect(mbr.setBlockStatusPassedChainHead).To(Equal(chainHead))
|
||||
}
|
38
pkg/fakes/mock_crypto_parser.go
Normal file
38
pkg/fakes/mock_crypto_parser.go
Normal file
@ -0,0 +1,38 @@
|
||||
package fakes
|
||||
|
||||
import . "github.com/onsi/gomega"
|
||||
|
||||
type MockCryptoParser struct {
|
||||
parsePublicKeyCalled bool
|
||||
parsePublicKeyPassedPrivateKey string
|
||||
parsePublicKeyReturnString string
|
||||
parsePublicKeyReturnErr error
|
||||
}
|
||||
|
||||
func NewMockCryptoParser() *MockCryptoParser {
|
||||
return &MockCryptoParser{
|
||||
parsePublicKeyCalled: false,
|
||||
parsePublicKeyPassedPrivateKey: "",
|
||||
parsePublicKeyReturnString: "",
|
||||
parsePublicKeyReturnErr: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (mcp *MockCryptoParser) SetReturnVal(pubKey string) {
|
||||
mcp.parsePublicKeyReturnString = pubKey
|
||||
}
|
||||
|
||||
func (mcp *MockCryptoParser) SetReturnErr(err error) {
|
||||
mcp.parsePublicKeyReturnErr = err
|
||||
}
|
||||
|
||||
func (mcp *MockCryptoParser) ParsePublicKey(privateKey string) (string, error) {
|
||||
mcp.parsePublicKeyCalled = true
|
||||
mcp.parsePublicKeyPassedPrivateKey = privateKey
|
||||
return mcp.parsePublicKeyReturnString, mcp.parsePublicKeyReturnErr
|
||||
}
|
||||
|
||||
func (mcp *MockCryptoParser) AssertParsePublicKeyCalledWith(privateKey string) {
|
||||
Expect(mcp.parsePublicKeyCalled).To(BeTrue())
|
||||
Expect(mcp.parsePublicKeyPassedPrivateKey).To(Equal(privateKey))
|
||||
}
|
95
pkg/fakes/mock_ethereum_database.go
Normal file
95
pkg/fakes/mock_ethereum_database.go
Normal file
@ -0,0 +1,95 @@
|
||||
package fakes
|
||||
|
||||
import (
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
type MockEthereumDatabase struct {
|
||||
getBlockCalled bool
|
||||
getBlockPassedHash []byte
|
||||
getBlockPassedNumber int64
|
||||
getBlockReturnBlock *types.Block
|
||||
getBlockHashCalled bool
|
||||
getBlockHashPassedNumber int64
|
||||
getBlockHashReturnHash []byte
|
||||
getBlockReceiptsCalled bool
|
||||
getBlockReceiptsPassedHash []byte
|
||||
getBlockReceiptsPassedNumber int64
|
||||
getBlockReceiptsReturnReceipts types.Receipts
|
||||
getHeadBlockNumberCalled bool
|
||||
getHeadBlockNumberReturnVal int64
|
||||
}
|
||||
|
||||
func NewMockEthereumDatabase() *MockEthereumDatabase {
|
||||
return &MockEthereumDatabase{
|
||||
getBlockCalled: false,
|
||||
getBlockPassedHash: nil,
|
||||
getBlockPassedNumber: 0,
|
||||
getBlockReturnBlock: nil,
|
||||
getBlockHashCalled: false,
|
||||
getBlockHashPassedNumber: 0,
|
||||
getBlockHashReturnHash: nil,
|
||||
getBlockReceiptsCalled: false,
|
||||
getBlockReceiptsPassedHash: nil,
|
||||
getBlockReceiptsPassedNumber: 0,
|
||||
getBlockReceiptsReturnReceipts: nil,
|
||||
getHeadBlockNumberCalled: false,
|
||||
getHeadBlockNumberReturnVal: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (med *MockEthereumDatabase) SetReturnBlock(block *types.Block) {
|
||||
med.getBlockReturnBlock = block
|
||||
}
|
||||
|
||||
func (med *MockEthereumDatabase) SetReturnHash(hash []byte) {
|
||||
med.getBlockHashReturnHash = hash
|
||||
}
|
||||
|
||||
func (med *MockEthereumDatabase) SetReturnReceipts(receipts types.Receipts) {
|
||||
med.getBlockReceiptsReturnReceipts = receipts
|
||||
}
|
||||
|
||||
func (med *MockEthereumDatabase) GetBlock(hash []byte, blockNumber int64) *types.Block {
|
||||
med.getBlockCalled = true
|
||||
med.getBlockPassedHash = hash
|
||||
med.getBlockPassedNumber = blockNumber
|
||||
return med.getBlockReturnBlock
|
||||
}
|
||||
|
||||
func (med *MockEthereumDatabase) GetBlockHash(blockNumber int64) []byte {
|
||||
med.getBlockHashCalled = true
|
||||
med.getBlockHashPassedNumber = blockNumber
|
||||
return med.getBlockHashReturnHash
|
||||
}
|
||||
|
||||
func (med *MockEthereumDatabase) GetBlockReceipts(blockHash []byte, blockNumber int64) types.Receipts {
|
||||
med.getBlockReceiptsCalled = true
|
||||
med.getBlockReceiptsPassedHash = blockHash
|
||||
med.getBlockReceiptsPassedNumber = blockNumber
|
||||
return med.getBlockReceiptsReturnReceipts
|
||||
}
|
||||
|
||||
func (med *MockEthereumDatabase) GetHeadBlockNumber() int64 {
|
||||
med.getHeadBlockNumberCalled = true
|
||||
return med.getHeadBlockNumberReturnVal
|
||||
}
|
||||
|
||||
func (med *MockEthereumDatabase) AssertGetBlockCalledWith(hash []byte, blockNumber int64) {
|
||||
Expect(med.getBlockCalled).To(BeTrue())
|
||||
Expect(med.getBlockPassedHash).To(Equal(hash))
|
||||
Expect(med.getBlockPassedNumber).To(Equal(blockNumber))
|
||||
}
|
||||
|
||||
func (med *MockEthereumDatabase) AssertGetBlockHashCalledWith(blockNumber int64) {
|
||||
Expect(med.getBlockHashCalled).To(BeTrue())
|
||||
Expect(med.getBlockHashPassedNumber).To(Equal(blockNumber))
|
||||
}
|
||||
|
||||
func (med *MockEthereumDatabase) AssertGetBlockReceiptsCalledWith(blockHash []byte, blockNumber int64) {
|
||||
Expect(med.getBlockReceiptsCalled).To(BeTrue())
|
||||
Expect(med.getBlockReceiptsPassedHash).To(Equal(blockHash))
|
||||
Expect(med.getBlockReceiptsPassedNumber).To(Equal(blockNumber))
|
||||
}
|
38
pkg/fakes/mock_fs_reader.go
Normal file
38
pkg/fakes/mock_fs_reader.go
Normal file
@ -0,0 +1,38 @@
|
||||
package fakes
|
||||
|
||||
import . "github.com/onsi/gomega"
|
||||
|
||||
type MockFsReader struct {
|
||||
readCalled bool
|
||||
readPassedPath string
|
||||
readReturnBytes []byte
|
||||
readReturnErr error
|
||||
}
|
||||
|
||||
func NewMockFsReader() *MockFsReader {
|
||||
return &MockFsReader{
|
||||
readCalled: false,
|
||||
readPassedPath: "",
|
||||
readReturnBytes: nil,
|
||||
readReturnErr: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (mfr *MockFsReader) SetReturnBytes(returnBytes []byte) {
|
||||
mfr.readReturnBytes = returnBytes
|
||||
}
|
||||
|
||||
func (mfr *MockFsReader) SetReturnErr(err error) {
|
||||
mfr.readReturnErr = err
|
||||
}
|
||||
|
||||
func (mfr *MockFsReader) Read(path string) ([]byte, error) {
|
||||
mfr.readCalled = true
|
||||
mfr.readPassedPath = path
|
||||
return mfr.readReturnBytes, mfr.readReturnErr
|
||||
}
|
||||
|
||||
func (mfr *MockFsReader) AssertReadCalledWith(path string) {
|
||||
Expect(mfr.readCalled).To(BeTrue())
|
||||
Expect(mfr.readPassedPath).To(Equal(path))
|
||||
}
|
126
pkg/fakes/mock_level_database_reader.go
Normal file
126
pkg/fakes/mock_level_database_reader.go
Normal file
@ -0,0 +1,126 @@
|
||||
package fakes
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
type MockLevelDatabaseReader struct {
|
||||
getBlockCalled bool
|
||||
getBlockNumberCalled bool
|
||||
getBlockNumberPassedHash common.Hash
|
||||
getBlockPassedHash common.Hash
|
||||
getBlockPassedNumber uint64
|
||||
getBlockReceiptsCalled bool
|
||||
getBlockReceiptsPassedHash common.Hash
|
||||
getBlockReceiptsPassedNumber uint64
|
||||
getCanonicalHashCalled bool
|
||||
getCanonicalHashPassedNumber uint64
|
||||
getCanonicalHashReturnHash common.Hash
|
||||
getHeadBlockHashCalled bool
|
||||
getHeadBlockHashReturnHash common.Hash
|
||||
passedHash common.Hash
|
||||
returnBlock *types.Block
|
||||
returnBlockNumber uint64
|
||||
returnReceipts types.Receipts
|
||||
}
|
||||
|
||||
func NewMockLevelDatabaseReader() *MockLevelDatabaseReader {
|
||||
return &MockLevelDatabaseReader{
|
||||
getBlockCalled: false,
|
||||
getBlockNumberCalled: false,
|
||||
getBlockNumberPassedHash: common.Hash{},
|
||||
getBlockPassedHash: common.Hash{},
|
||||
getBlockPassedNumber: 0,
|
||||
getBlockReceiptsCalled: false,
|
||||
getBlockReceiptsPassedHash: common.Hash{},
|
||||
getBlockReceiptsPassedNumber: 0,
|
||||
getCanonicalHashCalled: false,
|
||||
getCanonicalHashPassedNumber: 0,
|
||||
getCanonicalHashReturnHash: common.Hash{},
|
||||
getHeadBlockHashCalled: false,
|
||||
getHeadBlockHashReturnHash: common.Hash{},
|
||||
passedHash: common.Hash{},
|
||||
returnBlock: nil,
|
||||
returnBlockNumber: 0,
|
||||
returnReceipts: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) SetReturnBlock(block *types.Block) {
|
||||
mldr.returnBlock = block
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) SetReturnBlockNumber(n uint64) {
|
||||
mldr.returnBlockNumber = n
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) SetGetCanonicalHashReturnHash(hash common.Hash) {
|
||||
mldr.getCanonicalHashReturnHash = hash
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) SetHeadBlockHashReturnHash(hash common.Hash) {
|
||||
mldr.getHeadBlockHashReturnHash = hash
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) SetReturnReceipts(receipts types.Receipts) {
|
||||
mldr.returnReceipts = receipts
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) GetBlock(hash common.Hash, number uint64) *types.Block {
|
||||
mldr.getBlockCalled = true
|
||||
mldr.getBlockPassedHash = hash
|
||||
mldr.getBlockPassedNumber = number
|
||||
return mldr.returnBlock
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) GetBlockReceipts(hash common.Hash, number uint64) types.Receipts {
|
||||
mldr.getBlockReceiptsCalled = true
|
||||
mldr.getBlockReceiptsPassedHash = hash
|
||||
mldr.getBlockReceiptsPassedNumber = number
|
||||
return mldr.returnReceipts
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) GetBlockNumber(hash common.Hash) uint64 {
|
||||
mldr.getBlockNumberCalled = true
|
||||
mldr.getBlockNumberPassedHash = hash
|
||||
return mldr.returnBlockNumber
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) GetCanonicalHash(number uint64) common.Hash {
|
||||
mldr.getCanonicalHashCalled = true
|
||||
mldr.getCanonicalHashPassedNumber = number
|
||||
return mldr.getCanonicalHashReturnHash
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) GetHeadBlockHash() common.Hash {
|
||||
mldr.getHeadBlockHashCalled = true
|
||||
return mldr.getHeadBlockHashReturnHash
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) AssertGetBlockCalledWith(hash common.Hash, number uint64) {
|
||||
Expect(mldr.getBlockCalled).To(BeTrue())
|
||||
Expect(mldr.getBlockPassedHash).To(Equal(hash))
|
||||
Expect(mldr.getBlockPassedNumber).To(Equal(number))
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) AssertGetBlockNumberCalledWith(hash common.Hash) {
|
||||
Expect(mldr.getBlockNumberCalled).To(BeTrue())
|
||||
Expect(mldr.getBlockNumberPassedHash).To(Equal(hash))
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) AssertGetBlockReceiptsCalledWith(hash common.Hash, number uint64) {
|
||||
Expect(mldr.getBlockReceiptsCalled).To(BeTrue())
|
||||
Expect(mldr.getBlockReceiptsPassedHash).To(Equal(hash))
|
||||
Expect(mldr.getBlockReceiptsPassedNumber).To(Equal(number))
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) AssertGetCanonicalHashCalledWith(number uint64) {
|
||||
Expect(mldr.getCanonicalHashCalled).To(BeTrue())
|
||||
Expect(mldr.getCanonicalHashPassedNumber).To(Equal(number))
|
||||
}
|
||||
|
||||
func (mldr *MockLevelDatabaseReader) AssertGetHeadBlockHashCalled() {
|
||||
Expect(mldr.getHeadBlockHashCalled).To(BeTrue())
|
||||
}
|
52
pkg/fakes/mock_receipt_repository.go
Normal file
52
pkg/fakes/mock_receipt_repository.go
Normal file
@ -0,0 +1,52 @@
|
||||
package fakes
|
||||
|
||||
import (
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
type MockReceiptRepository struct {
|
||||
createReceiptsAndLogsCalled bool
|
||||
createReceiptsAndLogsPassedBlockId int64
|
||||
createReceiptsAndLogsPassedReceipts []core.Receipt
|
||||
createReceiptsAndLogsReturnErr error
|
||||
}
|
||||
|
||||
func NewMockReceiptRepository() *MockReceiptRepository {
|
||||
return &MockReceiptRepository{
|
||||
createReceiptsAndLogsCalled: false,
|
||||
createReceiptsAndLogsPassedBlockId: 0,
|
||||
createReceiptsAndLogsPassedReceipts: nil,
|
||||
createReceiptsAndLogsReturnErr: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (mrr *MockReceiptRepository) SetCreateReceiptsAndLogsReturnErr(err error) {
|
||||
mrr.createReceiptsAndLogsReturnErr = err
|
||||
}
|
||||
|
||||
func (mrr *MockReceiptRepository) CreateReceiptsAndLogs(blockId int64, receipts []core.Receipt) error {
|
||||
mrr.createReceiptsAndLogsCalled = true
|
||||
mrr.createReceiptsAndLogsPassedBlockId = blockId
|
||||
mrr.createReceiptsAndLogsPassedReceipts = receipts
|
||||
return mrr.createReceiptsAndLogsReturnErr
|
||||
}
|
||||
|
||||
func (mrr *MockReceiptRepository) CreateReceipt(blockId int64, receipt core.Receipt) (int64, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mrr *MockReceiptRepository) GetReceipt(txHash string) (core.Receipt, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mrr *MockReceiptRepository) AssertCreateReceiptsAndLogsCalledWith(blockId int64, receipts []core.Receipt) {
|
||||
Expect(mrr.createReceiptsAndLogsCalled).To(BeTrue())
|
||||
Expect(mrr.createReceiptsAndLogsPassedBlockId).To(Equal(blockId))
|
||||
Expect(mrr.createReceiptsAndLogsPassedReceipts).To(Equal(receipts))
|
||||
}
|
||||
|
||||
func (mrr *MockReceiptRepository) AssertCreateReceiptsAndLogsNotCalled() {
|
||||
Expect(mrr.createReceiptsAndLogsCalled).To(BeFalse())
|
||||
}
|
40
pkg/fakes/mock_transaction_converter.go
Normal file
40
pkg/fakes/mock_transaction_converter.go
Normal file
@ -0,0 +1,40 @@
|
||||
package fakes
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
type MockTransactionConverter struct {
|
||||
convertTransactionsToCoreCalled bool
|
||||
convertTransactionsToCorePassedBlock *types.Block
|
||||
convertTransactionsToCoreReturnTransactions []core.Transaction
|
||||
convertTransactionsToCoreReturnError error
|
||||
}
|
||||
|
||||
func NewMockTransactionConverter() *MockTransactionConverter {
|
||||
return &MockTransactionConverter{
|
||||
convertTransactionsToCoreCalled: false,
|
||||
convertTransactionsToCorePassedBlock: nil,
|
||||
convertTransactionsToCoreReturnTransactions: nil,
|
||||
convertTransactionsToCoreReturnError: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (mtc *MockTransactionConverter) SetConvertTransactionsToCoreReturnVals(transactions []core.Transaction, err error) {
|
||||
mtc.convertTransactionsToCoreReturnTransactions = transactions
|
||||
mtc.convertTransactionsToCoreReturnError = err
|
||||
}
|
||||
|
||||
func (mtc *MockTransactionConverter) ConvertTransactionsToCore(gethBlock *types.Block) ([]core.Transaction, error) {
|
||||
mtc.convertTransactionsToCoreCalled = true
|
||||
mtc.convertTransactionsToCorePassedBlock = gethBlock
|
||||
return mtc.convertTransactionsToCoreReturnTransactions, mtc.convertTransactionsToCoreReturnError
|
||||
}
|
||||
|
||||
func (mtc *MockTransactionConverter) AssertConvertTransactionsToCoreCalledWith(gethBlock *types.Block) {
|
||||
Expect(mtc.convertTransactionsToCoreCalled).To(BeTrue())
|
||||
Expect(mtc.convertTransactionsToCorePassedBlock).To(Equal(gethBlock))
|
||||
}
|
14
pkg/fs/reader.go
Normal file
14
pkg/fs/reader.go
Normal file
@ -0,0 +1,14 @@
|
||||
package fs
|
||||
|
||||
import "io/ioutil"
|
||||
|
||||
type Reader interface {
|
||||
Read(path string) ([]byte, error)
|
||||
}
|
||||
|
||||
type FsReader struct {
|
||||
}
|
||||
|
||||
func (FsReader) Read(path string) ([]byte, error) {
|
||||
return ioutil.ReadFile(path)
|
||||
}
|
@ -2,7 +2,6 @@ package geth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"encoding/json"
|
||||
@ -11,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fs"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -80,7 +80,8 @@ func ParseAbi(abiString string) (abi.ABI, error) {
|
||||
}
|
||||
|
||||
func ReadAbiFile(abiFilePath string) (string, error) {
|
||||
filesBytes, err := ioutil.ReadFile(abiFilePath)
|
||||
reader := fs.FsReader{}
|
||||
filesBytes, err := reader.Read(abiFilePath)
|
||||
if err != nil {
|
||||
return "", ErrMissingAbiFile
|
||||
}
|
||||
|
@ -10,13 +10,17 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/geth/node"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
|
||||
vulcRpc "github.com/vulcanize/vulcanizedb/pkg/geth/converters/rpc"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/geth/node"
|
||||
)
|
||||
|
||||
type Blockchain struct {
|
||||
client *ethclient.Client
|
||||
blockConverter vulcCommon.BlockConverter
|
||||
readGethHeaders chan *types.Header
|
||||
outputBlocks chan core.Block
|
||||
newHeadSubscription ethereum.Subscription
|
||||
@ -33,6 +37,8 @@ func NewBlockchain(ipcPath string) *Blockchain {
|
||||
clientWrapper := node.ClientWrapper{ContextCaller: rpcClient, IPCPath: ipcPath}
|
||||
blockchain.node = node.MakeNode(clientWrapper)
|
||||
blockchain.client = client
|
||||
transactionConverter := vulcRpc.NewRpcTransactionConverter(client)
|
||||
blockchain.blockConverter = vulcCommon.NewBlockConverter(transactionConverter)
|
||||
return &blockchain
|
||||
}
|
||||
|
||||
@ -50,7 +56,7 @@ func (blockchain *Blockchain) GetLogs(contract core.Contract, startingBlockNumbe
|
||||
if err != nil {
|
||||
return []core.Log{}, err
|
||||
}
|
||||
logs := ToCoreLogs(gethLogs)
|
||||
logs := vulcCommon.ToCoreLogs(gethLogs)
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
@ -63,7 +69,7 @@ func (blockchain *Blockchain) GetBlockByNumber(blockNumber int64) (core.Block, e
|
||||
if err != nil {
|
||||
return core.Block{}, err
|
||||
}
|
||||
block, err := ToCoreBlock(gethBlock, blockchain.client)
|
||||
block, err := blockchain.blockConverter.ToCoreBlock(gethBlock)
|
||||
if err != nil {
|
||||
return core.Block{}, err
|
||||
}
|
||||
|
13
pkg/geth/cold_import/cold_import_suite_test.go
Normal file
13
pkg/geth/cold_import/cold_import_suite_test.go
Normal file
@ -0,0 +1,13 @@
|
||||
package cold_import_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestColdImport(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "ColdImport Suite")
|
||||
}
|
56
pkg/geth/cold_import/importer.go
Normal file
56
pkg/geth/cold_import/importer.go
Normal file
@ -0,0 +1,56 @@
|
||||
package cold_import
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/ethereum"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
|
||||
)
|
||||
|
||||
type ColdImporter struct {
|
||||
blockRepository datastore.BlockRepository
|
||||
converter common.BlockConverter
|
||||
ethDB ethereum.Database
|
||||
receiptRepository datastore.ReceiptRepository
|
||||
}
|
||||
|
||||
func NewColdImporter(ethDB ethereum.Database, blockRepository datastore.BlockRepository, receiptRepository datastore.ReceiptRepository, converter common.BlockConverter) *ColdImporter {
|
||||
return &ColdImporter{
|
||||
blockRepository: blockRepository,
|
||||
converter: converter,
|
||||
ethDB: ethDB,
|
||||
receiptRepository: receiptRepository,
|
||||
}
|
||||
}
|
||||
|
||||
func (ci *ColdImporter) Execute(startingBlockNumber int64, endingBlockNumber int64, nodeId string) error {
|
||||
missingBlocks := ci.blockRepository.MissingBlockNumbers(startingBlockNumber, endingBlockNumber, nodeId)
|
||||
for _, n := range missingBlocks {
|
||||
hash := ci.ethDB.GetBlockHash(n)
|
||||
|
||||
blockId, err := ci.createBlocksAndTransactions(hash, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ci.createReceiptsAndLogs(hash, n, blockId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ci.blockRepository.SetBlocksStatus(endingBlockNumber)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ci *ColdImporter) createBlocksAndTransactions(hash []byte, i int64) (int64, error) {
|
||||
block := ci.ethDB.GetBlock(hash, i)
|
||||
coreBlock, err := ci.converter.ToCoreBlock(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return ci.blockRepository.CreateOrUpdateBlock(coreBlock)
|
||||
}
|
||||
|
||||
func (ci *ColdImporter) createReceiptsAndLogs(hash []byte, number int64, blockId int64) error {
|
||||
receipts := ci.ethDB.GetBlockReceipts(hash, number)
|
||||
coreReceipts := common.ToCoreReceipts(receipts)
|
||||
return ci.receiptRepository.CreateReceiptsAndLogs(blockId, coreReceipts)
|
||||
}
|
133
pkg/geth/cold_import/importer_test.go
Normal file
133
pkg/geth/cold_import/importer_test.go
Normal file
@ -0,0 +1,133 @@
|
||||
package cold_import_test
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/geth/cold_import"
|
||||
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
|
||||
)
|
||||
|
||||
var _ = Describe("Geth cold importer", func() {
|
||||
var fakeGethBlock *types.Block
|
||||
|
||||
BeforeEach(func() {
|
||||
header := &types.Header{}
|
||||
transactions := []*types.Transaction{}
|
||||
uncles := []*types.Header{}
|
||||
receipts := []*types.Receipt{}
|
||||
fakeGethBlock = types.NewBlock(header, transactions, uncles, receipts)
|
||||
})
|
||||
|
||||
It("only populates missing blocks", func() {
|
||||
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
|
||||
mockBlockRepository := fakes.NewMockBlockRepository()
|
||||
mockReceiptRepository := fakes.NewMockReceiptRepository()
|
||||
mockTransactionConverter := fakes.NewMockTransactionConverter()
|
||||
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
|
||||
|
||||
nodeId := "node_id"
|
||||
startingBlockNumber := int64(120)
|
||||
missingBlockNumber := int64(123)
|
||||
endingBlockNumber := int64(125)
|
||||
fakeHash := []byte{1, 2, 3, 4, 5}
|
||||
mockBlockRepository.SetMissingBlockNumbersReturnArray([]int64{missingBlockNumber})
|
||||
mockEthereumDatabase.SetReturnHash(fakeHash)
|
||||
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
|
||||
importer := cold_import.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
|
||||
|
||||
importer.Execute(startingBlockNumber, endingBlockNumber, nodeId)
|
||||
|
||||
mockBlockRepository.AssertMissingBlockNumbersCalledWith(startingBlockNumber, endingBlockNumber, nodeId)
|
||||
mockEthereumDatabase.AssertGetBlockHashCalledWith(missingBlockNumber)
|
||||
mockEthereumDatabase.AssertGetBlockCalledWith(fakeHash, missingBlockNumber)
|
||||
})
|
||||
|
||||
It("fetches missing blocks from level db and persists them to pg", func() {
|
||||
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
|
||||
mockBlockRepository := fakes.NewMockBlockRepository()
|
||||
mockReceiptRepository := fakes.NewMockReceiptRepository()
|
||||
mockTransactionConverter := fakes.NewMockTransactionConverter()
|
||||
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
|
||||
|
||||
blockNumber := int64(123)
|
||||
fakeHash := []byte{1, 2, 3, 4, 5}
|
||||
mockBlockRepository.SetMissingBlockNumbersReturnArray([]int64{blockNumber})
|
||||
mockEthereumDatabase.SetReturnHash(fakeHash)
|
||||
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
|
||||
importer := cold_import.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
|
||||
|
||||
importer.Execute(blockNumber, blockNumber, "node_id")
|
||||
|
||||
mockEthereumDatabase.AssertGetBlockHashCalledWith(blockNumber)
|
||||
mockEthereumDatabase.AssertGetBlockCalledWith(fakeHash, blockNumber)
|
||||
mockTransactionConverter.AssertConvertTransactionsToCoreCalledWith(fakeGethBlock)
|
||||
convertedBlock, err := blockConverter.ToCoreBlock(fakeGethBlock)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
mockBlockRepository.AssertCreateOrUpdateBlockCalledWith(convertedBlock)
|
||||
})
|
||||
|
||||
It("sets is_final status on populated blocks", func() {
|
||||
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
|
||||
mockBlockRepository := fakes.NewMockBlockRepository()
|
||||
mockReceiptRepository := fakes.NewMockReceiptRepository()
|
||||
mockTransactionConverter := fakes.NewMockTransactionConverter()
|
||||
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
|
||||
|
||||
startingBlockNumber := int64(120)
|
||||
endingBlockNumber := int64(125)
|
||||
fakeHash := []byte{1, 2, 3, 4, 5}
|
||||
mockBlockRepository.SetMissingBlockNumbersReturnArray([]int64{startingBlockNumber})
|
||||
mockEthereumDatabase.SetReturnHash(fakeHash)
|
||||
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
|
||||
importer := cold_import.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
|
||||
|
||||
importer.Execute(startingBlockNumber, endingBlockNumber, "node_id")
|
||||
|
||||
mockBlockRepository.AssertSetBlockStatusCalledWith(endingBlockNumber)
|
||||
})
|
||||
|
||||
It("fetches receipts from level db and persists them to pg", func() {
|
||||
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
|
||||
mockBlockRepository := fakes.NewMockBlockRepository()
|
||||
mockReceiptRepository := fakes.NewMockReceiptRepository()
|
||||
mockTransactionConverter := fakes.NewMockTransactionConverter()
|
||||
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
|
||||
|
||||
blockNumber := int64(123)
|
||||
blockId := int64(999)
|
||||
mockBlockRepository.SetCreateOrUpdateBlockReturnVals(blockId, nil)
|
||||
fakeReceipts := types.Receipts{{}}
|
||||
mockBlockRepository.SetMissingBlockNumbersReturnArray([]int64{blockNumber})
|
||||
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
|
||||
mockEthereumDatabase.SetReturnReceipts(fakeReceipts)
|
||||
importer := cold_import.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
|
||||
|
||||
importer.Execute(blockNumber, blockNumber, "node_id")
|
||||
|
||||
expectedReceipts := vulcCommon.ToCoreReceipts(fakeReceipts)
|
||||
mockReceiptRepository.AssertCreateReceiptsAndLogsCalledWith(blockId, expectedReceipts)
|
||||
})
|
||||
|
||||
It("does not fetch receipts if block already exists", func() {
|
||||
mockEthereumDatabase := fakes.NewMockEthereumDatabase()
|
||||
mockBlockRepository := fakes.NewMockBlockRepository()
|
||||
mockReceiptRepository := fakes.NewMockReceiptRepository()
|
||||
mockTransactionConverter := fakes.NewMockTransactionConverter()
|
||||
blockConverter := vulcCommon.NewBlockConverter(mockTransactionConverter)
|
||||
|
||||
blockNumber := int64(123)
|
||||
mockBlockRepository.SetMissingBlockNumbersReturnArray([]int64{})
|
||||
mockEthereumDatabase.SetReturnBlock(fakeGethBlock)
|
||||
mockBlockRepository.SetCreateOrUpdateBlockReturnVals(0, repositories.ErrBlockExists)
|
||||
importer := cold_import.NewColdImporter(mockEthereumDatabase, mockBlockRepository, mockReceiptRepository, blockConverter)
|
||||
|
||||
err := importer.Execute(blockNumber, blockNumber, "node_id")
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
mockReceiptRepository.AssertCreateReceiptsAndLogsNotCalled()
|
||||
})
|
||||
})
|
68
pkg/geth/cold_import/node_builder.go
Normal file
68
pkg/geth/cold_import/node_builder.go
Normal file
@ -0,0 +1,68 @@
|
||||
package cold_import
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/crypto"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fs"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
ColdImportClientName = "LevelDbColdImport"
|
||||
ColdImportNetworkId float64 = 1
|
||||
)
|
||||
|
||||
var (
|
||||
NoChainDataErr = errors.New("Level DB path does not include chaindata extension.")
|
||||
NoGethRootErr = errors.New("Level DB path does not include root path to geth.")
|
||||
)
|
||||
|
||||
type ColdImportNodeBuilder struct {
|
||||
reader fs.Reader
|
||||
parser crypto.PublicKeyParser
|
||||
}
|
||||
|
||||
func NewColdImportNodeBuilder(reader fs.Reader, parser crypto.PublicKeyParser) ColdImportNodeBuilder {
|
||||
return ColdImportNodeBuilder{reader: reader, parser: parser}
|
||||
}
|
||||
|
||||
func (cinb ColdImportNodeBuilder) GetNode(genesisBlock []byte, levelPath string) (core.Node, error) {
|
||||
var coldNode core.Node
|
||||
nodeKeyPath, err := getNodeKeyPath(levelPath)
|
||||
if err != nil {
|
||||
return coldNode, err
|
||||
}
|
||||
nodeKey, err := cinb.reader.Read(nodeKeyPath)
|
||||
if err != nil {
|
||||
return coldNode, err
|
||||
}
|
||||
nodeId, err := cinb.parser.ParsePublicKey(string(nodeKey))
|
||||
if err != nil {
|
||||
return coldNode, err
|
||||
}
|
||||
genesisBlockHash := common.BytesToHash(genesisBlock).String()
|
||||
coldNode = core.Node{
|
||||
GenesisBlock: genesisBlockHash,
|
||||
NetworkID: ColdImportNetworkId,
|
||||
ID: nodeId,
|
||||
ClientName: ColdImportClientName,
|
||||
}
|
||||
return coldNode, nil
|
||||
}
|
||||
|
||||
func getNodeKeyPath(levelPath string) (string, error) {
|
||||
chaindataExtension := "chaindata"
|
||||
if !strings.Contains(levelPath, chaindataExtension) {
|
||||
return "", NoChainDataErr
|
||||
}
|
||||
chaindataExtensionLength := len(chaindataExtension)
|
||||
gethRootPathLength := len(levelPath) - chaindataExtensionLength
|
||||
if gethRootPathLength <= chaindataExtensionLength {
|
||||
return "", NoGethRootErr
|
||||
}
|
||||
gethRootPath := levelPath[:gethRootPathLength]
|
||||
nodeKeyPath := gethRootPath + "nodekey"
|
||||
return nodeKeyPath, nil
|
||||
}
|
97
pkg/geth/cold_import/node_builder_test.go
Normal file
97
pkg/geth/cold_import/node_builder_test.go
Normal file
@ -0,0 +1,97 @@
|
||||
package cold_import_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/geth/cold_import"
|
||||
)
|
||||
|
||||
var _ = Describe("Cold importer node builder", func() {
|
||||
Describe("when level path is not valid", func() {
|
||||
It("returns error if no chaindata extension", func() {
|
||||
gethPath := "path/to/geth"
|
||||
mockReader := fakes.NewMockFsReader()
|
||||
mockParser := fakes.NewMockCryptoParser()
|
||||
nodeBuilder := cold_import.NewColdImportNodeBuilder(mockReader, mockParser)
|
||||
|
||||
_, err := nodeBuilder.GetNode([]byte{1, 2, 3, 4, 5}, gethPath)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(cold_import.NoChainDataErr))
|
||||
})
|
||||
|
||||
It("returns error if no root geth path", func() {
|
||||
chaindataPath := "chaindata"
|
||||
mockReader := fakes.NewMockFsReader()
|
||||
mockParser := fakes.NewMockCryptoParser()
|
||||
nodeBuilder := cold_import.NewColdImportNodeBuilder(mockReader, mockParser)
|
||||
|
||||
_, err := nodeBuilder.GetNode([]byte{1, 2, 3, 4, 5}, chaindataPath)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(cold_import.NoGethRootErr))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("when reader fails", func() {
|
||||
It("returns err", func() {
|
||||
mockReader := fakes.NewMockFsReader()
|
||||
fakeError := errors.New("Failed")
|
||||
mockReader.SetReturnErr(fakeError)
|
||||
mockParser := fakes.NewMockCryptoParser()
|
||||
nodeBuilder := cold_import.NewColdImportNodeBuilder(mockReader, mockParser)
|
||||
|
||||
_, err := nodeBuilder.GetNode([]byte{1, 2, 3, 4, 5}, "path/to/geth/chaindata")
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakeError))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("when parser fails", func() {
|
||||
It("returns err", func() {
|
||||
mockReader := fakes.NewMockFsReader()
|
||||
mockParser := fakes.NewMockCryptoParser()
|
||||
fakeErr := errors.New("Failed")
|
||||
mockParser.SetReturnErr(fakeErr)
|
||||
nodeBuilder := cold_import.NewColdImportNodeBuilder(mockReader, mockParser)
|
||||
|
||||
_, err := nodeBuilder.GetNode([]byte{1, 2, 3, 4, 5}, "path/to/geth/chaindata")
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakeErr))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("when path is valid and reader and parser succeed", func() {
|
||||
It("builds a node", func() {
|
||||
fakeGenesisBlock := []byte{1, 2, 3, 4, 5}
|
||||
fakeRootGethPath := "root/path/to/geth/"
|
||||
fakeLevelPath := fakeRootGethPath + "chaindata"
|
||||
fakeNodeKeyPath := fakeRootGethPath + "nodekey"
|
||||
fakePublicKeyBytes := []byte{5, 4, 3, 2, 1}
|
||||
fakePublicKeyString := "public_key"
|
||||
mockReader := fakes.NewMockFsReader()
|
||||
mockReader.SetReturnBytes(fakePublicKeyBytes)
|
||||
mockParser := fakes.NewMockCryptoParser()
|
||||
mockParser.SetReturnVal(fakePublicKeyString)
|
||||
nodeBuilder := cold_import.NewColdImportNodeBuilder(mockReader, mockParser)
|
||||
|
||||
result, err := nodeBuilder.GetNode(fakeGenesisBlock, fakeLevelPath)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
mockReader.AssertReadCalledWith(fakeNodeKeyPath)
|
||||
mockParser.AssertParsePublicKeyCalledWith(string(fakePublicKeyBytes))
|
||||
Expect(result).NotTo(BeNil())
|
||||
Expect(result.ClientName).To(Equal(cold_import.ColdImportClientName))
|
||||
expectedGenesisBlock := common.BytesToHash(fakeGenesisBlock).String()
|
||||
Expect(result.GenesisBlock).To(Equal(expectedGenesisBlock))
|
||||
Expect(result.ID).To(Equal(fakePublicKeyString))
|
||||
Expect(result.NetworkID).To(Equal(cold_import.ColdImportNetworkId))
|
||||
})
|
||||
})
|
||||
|
||||
})
|
69
pkg/geth/converters/cold_db/transaction_converter.go
Normal file
69
pkg/geth/converters/cold_db/transaction_converter.go
Normal file
@ -0,0 +1,69 @@
|
||||
package cold_db
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ColdDbTransactionConverter struct{}
|
||||
|
||||
func NewColdDbTransactionConverter() *ColdDbTransactionConverter {
|
||||
return &ColdDbTransactionConverter{}
|
||||
}
|
||||
|
||||
func (cdtc *ColdDbTransactionConverter) ConvertTransactionsToCore(gethBlock *types.Block) ([]core.Transaction, error) {
|
||||
var g errgroup.Group
|
||||
coreTransactions := make([]core.Transaction, len(gethBlock.Transactions()))
|
||||
|
||||
for gethTransactionIndex, gethTransaction := range gethBlock.Transactions() {
|
||||
transaction := gethTransaction
|
||||
transactionIndex := uint(gethTransactionIndex)
|
||||
g.Go(func() error {
|
||||
signer := getSigner(transaction)
|
||||
sender, err := signer.Sender(transaction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
coreTransaction := transToCoreTrans(transaction, &sender)
|
||||
coreTransactions[transactionIndex] = coreTransaction
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
return coreTransactions, err
|
||||
}
|
||||
return coreTransactions, nil
|
||||
}
|
||||
|
||||
func getSigner(tx *types.Transaction) types.Signer {
|
||||
v, _, _ := tx.RawSignatureValues()
|
||||
if v.Sign() != 0 && tx.Protected() {
|
||||
return types.NewEIP155Signer(tx.ChainId())
|
||||
}
|
||||
return types.HomesteadSigner{}
|
||||
}
|
||||
|
||||
func transToCoreTrans(transaction *types.Transaction, from *common.Address) core.Transaction {
|
||||
data := hexutil.Encode(transaction.Data())
|
||||
return core.Transaction{
|
||||
Hash: transaction.Hash().Hex(),
|
||||
Nonce: transaction.Nonce(),
|
||||
To: strings.ToLower(addressToHex(transaction.To())),
|
||||
From: strings.ToLower(addressToHex(from)),
|
||||
GasLimit: transaction.Gas(),
|
||||
GasPrice: transaction.GasPrice().Int64(),
|
||||
Value: transaction.Value().String(),
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func addressToHex(to *common.Address) string {
|
||||
if to == nil {
|
||||
return ""
|
||||
}
|
||||
return to.Hex()
|
||||
}
|
41
pkg/geth/converters/common/block_converter.go
Normal file
41
pkg/geth/converters/common/block_converter.go
Normal file
@ -0,0 +1,41 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type BlockConverter struct {
|
||||
transactionConverter TransactionConverter
|
||||
}
|
||||
|
||||
func NewBlockConverter(transactionConverter TransactionConverter) BlockConverter {
|
||||
return BlockConverter{transactionConverter: transactionConverter}
|
||||
}
|
||||
|
||||
func (bc BlockConverter) ToCoreBlock(gethBlock *types.Block) (core.Block, error) {
|
||||
transactions, err := bc.transactionConverter.ConvertTransactionsToCore(gethBlock)
|
||||
if err != nil {
|
||||
return core.Block{}, err
|
||||
}
|
||||
coreBlock := core.Block{
|
||||
Difficulty: gethBlock.Difficulty().Int64(),
|
||||
ExtraData: hexutil.Encode(gethBlock.Extra()),
|
||||
GasLimit: gethBlock.GasLimit(),
|
||||
GasUsed: gethBlock.GasUsed(),
|
||||
Hash: gethBlock.Hash().Hex(),
|
||||
Miner: strings.ToLower(gethBlock.Coinbase().Hex()),
|
||||
Nonce: hexutil.Encode(gethBlock.Header().Nonce[:]),
|
||||
Number: gethBlock.Number().Int64(),
|
||||
ParentHash: gethBlock.ParentHash().Hex(),
|
||||
Size: gethBlock.Size().String(),
|
||||
Time: gethBlock.Time().Int64(),
|
||||
Transactions: transactions,
|
||||
UncleHash: gethBlock.UncleHash().Hex(),
|
||||
}
|
||||
coreBlock.Reward = CalcBlockReward(coreBlock, gethBlock.Uncles())
|
||||
coreBlock.UnclesReward = CalcUnclesReward(coreBlock, gethBlock.Uncles())
|
||||
return coreBlock, nil
|
||||
}
|
@ -1,15 +1,11 @@
|
||||
package geth_test
|
||||
package common_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"context"
|
||||
|
||||
"fmt"
|
||||
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
||||
"math/big"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -17,7 +13,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/geth"
|
||||
|
||||
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/geth/converters/rpc"
|
||||
)
|
||||
|
||||
type FakeGethClient struct {
|
||||
@ -93,27 +91,29 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
}
|
||||
block := types.NewBlock(&header, []*types.Transaction{}, []*types.Header{}, []*types.Receipt{})
|
||||
client := &FakeGethClient{}
|
||||
gethBlock, err := geth.ToCoreBlock(block, client)
|
||||
transactionConverter := rpc.NewRpcTransactionConverter(client)
|
||||
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
|
||||
|
||||
coreBlock, err := blockConverter.ToCoreBlock(block)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(gethBlock.Difficulty).To(Equal(difficulty.Int64()))
|
||||
Expect(gethBlock.GasLimit).To(Equal(gasLimit))
|
||||
Expect(gethBlock.Miner).To(Equal(miner.Hex()))
|
||||
Expect(gethBlock.GasUsed).To(Equal(gasUsed))
|
||||
Expect(gethBlock.Hash).To(Equal(block.Hash().Hex()))
|
||||
Expect(gethBlock.Nonce).To(Equal(hexutil.Encode(header.Nonce[:])))
|
||||
Expect(gethBlock.Number).To(Equal(number))
|
||||
Expect(gethBlock.ParentHash).To(Equal(block.ParentHash().Hex()))
|
||||
Expect(gethBlock.ExtraData).To(Equal(hexutil.Encode(block.Extra())))
|
||||
Expect(gethBlock.Size).To(Equal(block.Size().String()))
|
||||
Expect(gethBlock.Time).To(Equal(time))
|
||||
Expect(gethBlock.UncleHash).To(Equal(block.UncleHash().Hex()))
|
||||
Expect(gethBlock.IsFinal).To(BeFalse())
|
||||
Expect(coreBlock.Difficulty).To(Equal(difficulty.Int64()))
|
||||
Expect(coreBlock.GasLimit).To(Equal(gasLimit))
|
||||
Expect(coreBlock.Miner).To(Equal(miner.Hex()))
|
||||
Expect(coreBlock.GasUsed).To(Equal(gasUsed))
|
||||
Expect(coreBlock.Hash).To(Equal(block.Hash().Hex()))
|
||||
Expect(coreBlock.Nonce).To(Equal(hexutil.Encode(header.Nonce[:])))
|
||||
Expect(coreBlock.Number).To(Equal(number))
|
||||
Expect(coreBlock.ParentHash).To(Equal(block.ParentHash().Hex()))
|
||||
Expect(coreBlock.ExtraData).To(Equal(hexutil.Encode(block.Extra())))
|
||||
Expect(coreBlock.Size).To(Equal(block.Size().String()))
|
||||
Expect(coreBlock.Time).To(Equal(time))
|
||||
Expect(coreBlock.UncleHash).To(Equal(block.UncleHash().Hex()))
|
||||
Expect(coreBlock.IsFinal).To(BeFalse())
|
||||
})
|
||||
|
||||
Describe("The block and uncle rewards calculations", func() {
|
||||
It("calculates block rewards for a block", func() {
|
||||
|
||||
transaction := types.NewTransaction(
|
||||
uint64(226823),
|
||||
common.HexToAddress("0x108fedb097c1dcfed441480170144d8e19bb217f"),
|
||||
@ -141,10 +141,13 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
}
|
||||
uncles := []*types.Header{{Number: big.NewInt(1071817)}, {Number: big.NewInt(1071818)}}
|
||||
block := types.NewBlock(&header, transactions, uncles, []*types.Receipt{&receipt})
|
||||
coreBlock, err := geth.ToCoreBlock(block, client)
|
||||
transactionConverter := rpc.NewRpcTransactionConverter(client)
|
||||
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
|
||||
|
||||
coreBlock, err := blockConverter.ToCoreBlock(block)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(geth.CalcBlockReward(coreBlock, block.Uncles())).To(Equal(5.31355))
|
||||
Expect(vulcCommon.CalcBlockReward(coreBlock, block.Uncles())).To(Equal(5.31355))
|
||||
})
|
||||
|
||||
It("calculates the uncles reward for a block", func() {
|
||||
@ -175,11 +178,13 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
|
||||
client := NewFakeClient(nil)
|
||||
client.AddReceipts(receipts)
|
||||
transactionConverter := rpc.NewRpcTransactionConverter(client)
|
||||
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
|
||||
|
||||
coreBlock, err := geth.ToCoreBlock(block, client)
|
||||
coreBlock, err := blockConverter.ToCoreBlock(block)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(geth.CalcUnclesReward(coreBlock, block.Uncles())).To(Equal(6.875))
|
||||
Expect(vulcCommon.CalcUnclesReward(coreBlock, block.Uncles())).To(Equal(6.875))
|
||||
})
|
||||
|
||||
It("decreases the static block reward from 5 to 3 for blocks after block 4,269,999", func() {
|
||||
@ -222,10 +227,13 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
|
||||
client := NewFakeClient(nil)
|
||||
client.AddReceipts(receipts)
|
||||
coreBlock, err := geth.ToCoreBlock(block, client)
|
||||
transactionConverter := rpc.NewRpcTransactionConverter(client)
|
||||
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
|
||||
|
||||
coreBlock, err := blockConverter.ToCoreBlock(block)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(geth.CalcBlockReward(coreBlock, block.Uncles())).To(Equal(3.024990672))
|
||||
Expect(vulcCommon.CalcBlockReward(coreBlock, block.Uncles())).To(Equal(3.024990672))
|
||||
})
|
||||
})
|
||||
|
||||
@ -234,7 +242,10 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
header := types.Header{}
|
||||
block := types.NewBlock(&header, []*types.Transaction{}, []*types.Header{}, []*types.Receipt{})
|
||||
client := &FakeGethClient{}
|
||||
coreBlock, err := geth.ToCoreBlock(block, client)
|
||||
transactionConverter := rpc.NewRpcTransactionConverter(client)
|
||||
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
|
||||
|
||||
coreBlock, err := blockConverter.ToCoreBlock(block)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(coreBlock.Transactions)).To(Equal(0))
|
||||
@ -263,13 +274,16 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
client.AddReceipts([]*types.Receipt{gethReceipt})
|
||||
|
||||
header := types.Header{}
|
||||
gethBlock := types.NewBlock(
|
||||
block := types.NewBlock(
|
||||
&header,
|
||||
[]*types.Transaction{gethTransaction},
|
||||
[]*types.Header{},
|
||||
[]*types.Receipt{gethReceipt},
|
||||
)
|
||||
coreBlock, err := geth.ToCoreBlock(gethBlock, client)
|
||||
transactionConverter := rpc.NewRpcTransactionConverter(client)
|
||||
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
|
||||
|
||||
coreBlock, err := blockConverter.ToCoreBlock(block)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(coreBlock.Transactions)).To(Equal(1))
|
||||
@ -283,9 +297,8 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
Expect(coreTransaction.Nonce).To(Equal(gethTransaction.Nonce()))
|
||||
|
||||
coreReceipt := coreTransaction.Receipt
|
||||
expectedReceipt := geth.ReceiptToCoreReceipt(gethReceipt)
|
||||
expectedReceipt := vulcCommon.ToCoreReceipt(gethReceipt)
|
||||
Expect(coreReceipt).To(Equal(expectedReceipt))
|
||||
|
||||
})
|
||||
|
||||
It("has an empty 'To' field when transaction creates a new contract", func() {
|
||||
@ -307,21 +320,23 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
client := NewFakeClient(nil)
|
||||
client.AddReceipts([]*types.Receipt{gethReceipt})
|
||||
|
||||
gethBlock := types.NewBlock(
|
||||
block := types.NewBlock(
|
||||
&types.Header{},
|
||||
[]*types.Transaction{gethTransaction},
|
||||
[]*types.Header{},
|
||||
[]*types.Receipt{gethReceipt},
|
||||
)
|
||||
transactionConverter := rpc.NewRpcTransactionConverter(client)
|
||||
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
|
||||
|
||||
coreBlock, err := geth.ToCoreBlock(gethBlock, client)
|
||||
coreBlock, err := blockConverter.ToCoreBlock(block)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
coreTransaction := coreBlock.Transactions[0]
|
||||
Expect(coreTransaction.To).To(Equal(""))
|
||||
|
||||
coreReceipt := coreTransaction.Receipt
|
||||
expectedReceipt := geth.ReceiptToCoreReceipt(gethReceipt)
|
||||
expectedReceipt := vulcCommon.ToCoreReceipt(gethReceipt)
|
||||
Expect(coreReceipt).To(Equal(expectedReceipt))
|
||||
})
|
||||
})
|
||||
@ -330,7 +345,7 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
var gethTransaction *types.Transaction
|
||||
var gethReceipt *types.Receipt
|
||||
var header *types.Header
|
||||
var gethBlock *types.Block
|
||||
var block *types.Block
|
||||
|
||||
BeforeEach(func() {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
@ -344,7 +359,7 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
)
|
||||
gethReceipt = &types.Receipt{}
|
||||
header = &types.Header{}
|
||||
gethBlock = types.NewBlock(
|
||||
block = types.NewBlock(
|
||||
header,
|
||||
[]*types.Transaction{gethTransaction},
|
||||
[]*types.Header{},
|
||||
@ -360,14 +375,22 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
It("returns an error when transaction sender call fails", func() {
|
||||
client := NewFakeClient(TransactionSenderError{})
|
||||
client.AddReceipts([]*types.Receipt{})
|
||||
_, err := geth.ToCoreBlock(gethBlock, client)
|
||||
transactionConverter := rpc.NewRpcTransactionConverter(client)
|
||||
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
|
||||
|
||||
_, err := blockConverter.ToCoreBlock(block)
|
||||
|
||||
Expect(err).To(Equal(TransactionSenderError{}))
|
||||
})
|
||||
|
||||
It("returns an error when transaction receipt call fails", func() {
|
||||
client := NewFakeClient(TransActionReceiptError{})
|
||||
client.AddReceipts([]*types.Receipt{})
|
||||
_, err := geth.ToCoreBlock(gethBlock, client)
|
||||
transactionConverter := rpc.NewRpcTransactionConverter(client)
|
||||
blockConverter := vulcCommon.NewBlockConverter(transactionConverter)
|
||||
|
||||
_, err := blockConverter.ToCoreBlock(block)
|
||||
|
||||
Expect(err).To(Equal(TransActionReceiptError{}))
|
||||
})
|
||||
})
|
@ -1,4 +1,4 @@
|
||||
package geth
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
13
pkg/geth/converters/common/common_suite_test.go
Normal file
13
pkg/geth/converters/common/common_suite_test.go
Normal file
@ -0,0 +1,13 @@
|
||||
package common_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestCommon(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Common Suite")
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package geth
|
||||
package common
|
||||
|
||||
import (
|
||||
"strings"
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
@ -31,7 +32,6 @@ func ToCoreLog(gethLog types.Log) core.Log {
|
||||
hexTopics := makeTopics(topics)
|
||||
return core.Log{
|
||||
Address: strings.ToLower(gethLog.Address.Hex()),
|
||||
|
||||
BlockNumber: int64(gethLog.BlockNumber),
|
||||
Topics: hexTopics,
|
||||
TxHash: gethLog.TxHash.Hex(),
|
@ -1,4 +1,4 @@
|
||||
package geth_test
|
||||
package common_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
@ -8,8 +8,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/geth"
|
||||
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
|
||||
)
|
||||
|
||||
var _ = Describe("Conversion of GethLog to core.Log", func() {
|
||||
@ -41,7 +42,7 @@ var _ = Describe("Conversion of GethLog to core.Log", func() {
|
||||
},
|
||||
}
|
||||
|
||||
coreLog := geth.ToCoreLog(gethLog)
|
||||
coreLog := vulcCommon.ToCoreLog(gethLog)
|
||||
|
||||
Expect(coreLog.Address).To(Equal(expected.Address))
|
||||
Expect(coreLog.BlockNumber).To(Equal(expected.BlockNumber))
|
||||
@ -81,15 +82,13 @@ var _ = Describe("Conversion of GethLog to core.Log", func() {
|
||||
},
|
||||
}
|
||||
|
||||
expectedOne := geth.ToCoreLog(gethLogOne)
|
||||
expectedTwo := geth.ToCoreLog(gethLogTwo)
|
||||
expectedOne := vulcCommon.ToCoreLog(gethLogOne)
|
||||
expectedTwo := vulcCommon.ToCoreLog(gethLogTwo)
|
||||
|
||||
coreLogs := geth.ToCoreLogs([]types.Log{gethLogOne, gethLogTwo})
|
||||
coreLogs := vulcCommon.ToCoreLogs([]types.Log{gethLogOne, gethLogTwo})
|
||||
|
||||
Expect(len(coreLogs)).To(Equal(2))
|
||||
Expect(coreLogs[0]).To(Equal(expectedOne))
|
||||
Expect(coreLogs[1]).To(Equal(expectedTwo))
|
||||
|
||||
})
|
||||
|
||||
})
|
@ -1,24 +1,25 @@
|
||||
package geth
|
||||
package common
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"bytes"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
func BigTo64(n *big.Int) int64 {
|
||||
if n != nil {
|
||||
return n.Int64()
|
||||
func ToCoreReceipts(gethReceipts types.Receipts) []core.Receipt {
|
||||
var coreReceipts []core.Receipt
|
||||
for _, receipt := range gethReceipts {
|
||||
coreReceipt := ToCoreReceipt(receipt)
|
||||
coreReceipts = append(coreReceipts, coreReceipt)
|
||||
}
|
||||
return 0
|
||||
return coreReceipts
|
||||
}
|
||||
|
||||
func ReceiptToCoreReceipt(gethReceipt *types.Receipt) core.Receipt {
|
||||
func ToCoreReceipt(gethReceipt *types.Receipt) core.Receipt {
|
||||
bloom := hexutil.Encode(gethReceipt.Bloom.Bytes())
|
||||
var postState string
|
||||
var status int
|
@ -1,4 +1,4 @@
|
||||
package geth_test
|
||||
package common_test
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -6,8 +6,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/geth"
|
||||
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
|
||||
)
|
||||
|
||||
var _ = Describe("Conversion of GethReceipt to core.Receipt", func() {
|
||||
@ -34,7 +35,7 @@ var _ = Describe("Conversion of GethReceipt to core.Receipt", func() {
|
||||
TxHash: receipt.TxHash.Hex(),
|
||||
}
|
||||
|
||||
coreReceipt := geth.ReceiptToCoreReceipt(&receipt)
|
||||
coreReceipt := vulcCommon.ToCoreReceipt(&receipt)
|
||||
Expect(coreReceipt.Bloom).To(Equal(expected.Bloom))
|
||||
Expect(coreReceipt.ContractAddress).To(Equal(expected.ContractAddress))
|
||||
Expect(coreReceipt.CumulativeGasUsed).To(Equal(expected.CumulativeGasUsed))
|
||||
@ -68,7 +69,7 @@ var _ = Describe("Conversion of GethReceipt to core.Receipt", func() {
|
||||
TxHash: receipt.TxHash.Hex(),
|
||||
}
|
||||
|
||||
coreReceipt := geth.ReceiptToCoreReceipt(&receipt)
|
||||
coreReceipt := vulcCommon.ToCoreReceipt(&receipt)
|
||||
Expect(coreReceipt.Bloom).To(Equal(expected.Bloom))
|
||||
Expect(coreReceipt.ContractAddress).To(Equal(""))
|
||||
Expect(coreReceipt.CumulativeGasUsed).To(Equal(expected.CumulativeGasUsed))
|
||||
@ -77,7 +78,5 @@ var _ = Describe("Conversion of GethReceipt to core.Receipt", func() {
|
||||
Expect(coreReceipt.StateRoot).To(Equal(expected.StateRoot))
|
||||
Expect(coreReceipt.Status).To(Equal(expected.Status))
|
||||
Expect(coreReceipt.TxHash).To(Equal(expected.TxHash))
|
||||
|
||||
})
|
||||
|
||||
})
|
10
pkg/geth/converters/common/transaction_converter.go
Normal file
10
pkg/geth/converters/common/transaction_converter.go
Normal file
@ -0,0 +1,10 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
type TransactionConverter interface {
|
||||
ConvertTransactionsToCore(gethBlock *types.Block) ([]core.Transaction, error)
|
||||
}
|
@ -1,16 +1,17 @@
|
||||
package geth
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"context"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
vulcCommon "github.com/vulcanize/vulcanizedb/pkg/geth/converters/common"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
@ -18,32 +19,15 @@ type Client interface {
|
||||
TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
|
||||
}
|
||||
|
||||
func ToCoreBlock(gethBlock *types.Block, client Client) (core.Block, error) {
|
||||
transactions, err := convertTransactionsToCore(gethBlock, client)
|
||||
if err != nil {
|
||||
return core.Block{}, err
|
||||
}
|
||||
coreBlock := core.Block{
|
||||
Difficulty: gethBlock.Difficulty().Int64(),
|
||||
ExtraData: hexutil.Encode(gethBlock.Extra()),
|
||||
GasLimit: gethBlock.GasLimit(),
|
||||
GasUsed: gethBlock.GasUsed(),
|
||||
Hash: gethBlock.Hash().Hex(),
|
||||
Miner: strings.ToLower(gethBlock.Coinbase().Hex()),
|
||||
Nonce: hexutil.Encode(gethBlock.Header().Nonce[:]),
|
||||
Number: gethBlock.Number().Int64(),
|
||||
ParentHash: gethBlock.ParentHash().Hex(),
|
||||
Size: gethBlock.Size().String(),
|
||||
Time: gethBlock.Time().Int64(),
|
||||
Transactions: transactions,
|
||||
UncleHash: gethBlock.UncleHash().Hex(),
|
||||
}
|
||||
coreBlock.Reward = CalcBlockReward(coreBlock, gethBlock.Uncles())
|
||||
coreBlock.UnclesReward = CalcUnclesReward(coreBlock, gethBlock.Uncles())
|
||||
return coreBlock, nil
|
||||
type RpcTransactionConverter struct {
|
||||
client Client
|
||||
}
|
||||
|
||||
func convertTransactionsToCore(gethBlock *types.Block, client Client) ([]core.Transaction, error) {
|
||||
func NewRpcTransactionConverter(client Client) *RpcTransactionConverter {
|
||||
return &RpcTransactionConverter{client: client}
|
||||
}
|
||||
|
||||
func (rtc *RpcTransactionConverter) ConvertTransactionsToCore(gethBlock *types.Block) ([]core.Transaction, error) {
|
||||
var g errgroup.Group
|
||||
coreTransactions := make([]core.Transaction, len(gethBlock.Transactions()))
|
||||
|
||||
@ -52,13 +36,13 @@ func convertTransactionsToCore(gethBlock *types.Block, client Client) ([]core.Tr
|
||||
transaction := gethTransaction
|
||||
transactionIndex := uint(gethTransactionIndex)
|
||||
g.Go(func() error {
|
||||
from, err := client.TransactionSender(context.Background(), transaction, gethBlock.Hash(), transactionIndex)
|
||||
from, err := rtc.client.TransactionSender(context.Background(), transaction, gethBlock.Hash(), transactionIndex)
|
||||
if err != nil {
|
||||
log.Println("transaction sender: ", err)
|
||||
return err
|
||||
}
|
||||
coreTransaction := transToCoreTrans(transaction, &from)
|
||||
coreTransaction, err = appendReceiptToTransaction(client, coreTransaction)
|
||||
coreTransaction, err = appendReceiptToTransaction(rtc.client, coreTransaction)
|
||||
if err != nil {
|
||||
log.Println("receipt: ", err)
|
||||
return err
|
||||
@ -79,7 +63,7 @@ func appendReceiptToTransaction(client Client, transaction core.Transaction) (co
|
||||
if err != nil {
|
||||
return transaction, err
|
||||
}
|
||||
receipt := ReceiptToCoreReceipt(gethReceipt)
|
||||
receipt := vulcCommon.ToCoreReceipt(gethReceipt)
|
||||
transaction.Receipt = receipt
|
||||
return transaction, nil
|
||||
}
|
@ -9,7 +9,7 @@ import (
|
||||
|
||||
func PopulateMissingBlocks(blockchain core.Blockchain, blockRepository datastore.BlockRepository, startingBlockNumber int64) int {
|
||||
lastBlock := blockchain.LastBlock().Int64()
|
||||
blockRange := blockRepository.MissingBlockNumbers(startingBlockNumber, lastBlock-1)
|
||||
blockRange := blockRepository.MissingBlockNumbers(startingBlockNumber, lastBlock-1, blockchain.Node().ID)
|
||||
log.SetPrefix("")
|
||||
log.Printf("Backfilling %d blocks\n\n", len(blockRange))
|
||||
RetrieveAndUpdateBlocks(blockchain, blockRepository, blockRange)
|
||||
@ -23,6 +23,7 @@ func RetrieveAndUpdateBlocks(blockchain core.Blockchain, blockRepository datasto
|
||||
log.Printf("failed to retrieve block number: %d\n", blockNumber)
|
||||
return 0
|
||||
}
|
||||
// TODO: handle possible error here
|
||||
blockRepository.CreateOrUpdateBlock(block)
|
||||
}
|
||||
return len(blockNumbers)
|
||||
|
23
vendor/github.com/hashicorp/golang-lru/.gitignore
generated
vendored
Normal file
23
vendor/github.com/hashicorp/golang-lru/.gitignore
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
223
vendor/github.com/hashicorp/golang-lru/2q.go
generated
vendored
Normal file
223
vendor/github.com/hashicorp/golang-lru/2q.go
generated
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
const (
|
||||
// Default2QRecentRatio is the ratio of the 2Q cache dedicated
|
||||
// to recently added entries that have only been accessed once.
|
||||
Default2QRecentRatio = 0.25
|
||||
|
||||
// Default2QGhostEntries is the default ratio of ghost
|
||||
// entries kept to track entries recently evicted
|
||||
Default2QGhostEntries = 0.50
|
||||
)
|
||||
|
||||
// TwoQueueCache is a thread-safe fixed size 2Q cache.
|
||||
// 2Q is an enhancement over the standard LRU cache
|
||||
// in that it tracks both frequently and recently used
|
||||
// entries separately. This avoids a burst in access to new
|
||||
// entries from evicting frequently used entries. It adds some
|
||||
// additional tracking overhead to the standard LRU cache, and is
|
||||
// computationally about 2x the cost, and adds some metadata over
|
||||
// head. The ARCCache is similar, but does not require setting any
|
||||
// parameters.
|
||||
type TwoQueueCache struct {
|
||||
size int
|
||||
recentSize int
|
||||
|
||||
recent simplelru.LRUCache
|
||||
frequent simplelru.LRUCache
|
||||
recentEvict simplelru.LRUCache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// New2Q creates a new TwoQueueCache using the default
|
||||
// values for the parameters.
|
||||
func New2Q(size int) (*TwoQueueCache, error) {
|
||||
return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
|
||||
}
|
||||
|
||||
// New2QParams creates a new TwoQueueCache using the provided
|
||||
// parameter values.
|
||||
func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
|
||||
if size <= 0 {
|
||||
return nil, fmt.Errorf("invalid size")
|
||||
}
|
||||
if recentRatio < 0.0 || recentRatio > 1.0 {
|
||||
return nil, fmt.Errorf("invalid recent ratio")
|
||||
}
|
||||
if ghostRatio < 0.0 || ghostRatio > 1.0 {
|
||||
return nil, fmt.Errorf("invalid ghost ratio")
|
||||
}
|
||||
|
||||
// Determine the sub-sizes
|
||||
recentSize := int(float64(size) * recentRatio)
|
||||
evictSize := int(float64(size) * ghostRatio)
|
||||
|
||||
// Allocate the LRUs
|
||||
recent, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
frequent, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
recentEvict, err := simplelru.NewLRU(evictSize, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the cache
|
||||
c := &TwoQueueCache{
|
||||
size: size,
|
||||
recentSize: recentSize,
|
||||
recent: recent,
|
||||
frequent: frequent,
|
||||
recentEvict: recentEvict,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Check if this is a frequent value
|
||||
if val, ok := c.frequent.Get(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// If the value is contained in recent, then we
|
||||
// promote it to frequent
|
||||
if val, ok := c.recent.Peek(key); ok {
|
||||
c.recent.Remove(key)
|
||||
c.frequent.Add(key, val)
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// No hit
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Add adds a value to the cache.
|
||||
func (c *TwoQueueCache) Add(key, value interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Check if the value is frequently used already,
|
||||
// and just update the value
|
||||
if c.frequent.Contains(key) {
|
||||
c.frequent.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the value is recently used, and promote
|
||||
// the value into the frequent list
|
||||
if c.recent.Contains(key) {
|
||||
c.recent.Remove(key)
|
||||
c.frequent.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// If the value was recently evicted, add it to the
|
||||
// frequently used list
|
||||
if c.recentEvict.Contains(key) {
|
||||
c.ensureSpace(true)
|
||||
c.recentEvict.Remove(key)
|
||||
c.frequent.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Add to the recently seen list
|
||||
c.ensureSpace(false)
|
||||
c.recent.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// ensureSpace is used to ensure we have space in the cache
|
||||
func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
|
||||
// If we have space, nothing to do
|
||||
recentLen := c.recent.Len()
|
||||
freqLen := c.frequent.Len()
|
||||
if recentLen+freqLen < c.size {
|
||||
return
|
||||
}
|
||||
|
||||
// If the recent buffer is larger than
|
||||
// the target, evict from there
|
||||
if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
|
||||
k, _, _ := c.recent.RemoveOldest()
|
||||
c.recentEvict.Add(k, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove from the frequent list otherwise
|
||||
c.frequent.RemoveOldest()
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *TwoQueueCache) Len() int {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.recent.Len() + c.frequent.Len()
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache.
|
||||
// The frequently used keys are first in the returned slice.
|
||||
func (c *TwoQueueCache) Keys() []interface{} {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
k1 := c.frequent.Keys()
|
||||
k2 := c.recent.Keys()
|
||||
return append(k1, k2...)
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *TwoQueueCache) Remove(key interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if c.frequent.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.recent.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.recentEvict.Remove(key) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache.
|
||||
func (c *TwoQueueCache) Purge() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.recent.Purge()
|
||||
c.frequent.Purge()
|
||||
c.recentEvict.Purge()
|
||||
}
|
||||
|
||||
// Contains is used to check if the cache contains a key
|
||||
// without updating recency or frequency.
|
||||
func (c *TwoQueueCache) Contains(key interface{}) bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.frequent.Contains(key) || c.recent.Contains(key)
|
||||
}
|
||||
|
||||
// Peek is used to inspect the cache value of a key
|
||||
// without updating recency or frequency.
|
||||
func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
if val, ok := c.frequent.Peek(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
return c.recent.Peek(key)
|
||||
}
|
306
vendor/github.com/hashicorp/golang-lru/2q_test.go
generated
vendored
Normal file
306
vendor/github.com/hashicorp/golang-lru/2q_test.go
generated
vendored
Normal file
@ -0,0 +1,306 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Benchmark2Q_Rand(b *testing.B) {
|
||||
l, err := New2Q(8192)
|
||||
if err != nil {
|
||||
b.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
trace := make([]int64, b.N*2)
|
||||
for i := 0; i < b.N*2; i++ {
|
||||
trace[i] = rand.Int63() % 32768
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
var hit, miss int
|
||||
for i := 0; i < 2*b.N; i++ {
|
||||
if i%2 == 0 {
|
||||
l.Add(trace[i], trace[i])
|
||||
} else {
|
||||
_, ok := l.Get(trace[i])
|
||||
if ok {
|
||||
hit++
|
||||
} else {
|
||||
miss++
|
||||
}
|
||||
}
|
||||
}
|
||||
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||
}
|
||||
|
||||
func Benchmark2Q_Freq(b *testing.B) {
|
||||
l, err := New2Q(8192)
|
||||
if err != nil {
|
||||
b.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
trace := make([]int64, b.N*2)
|
||||
for i := 0; i < b.N*2; i++ {
|
||||
if i%2 == 0 {
|
||||
trace[i] = rand.Int63() % 16384
|
||||
} else {
|
||||
trace[i] = rand.Int63() % 32768
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
l.Add(trace[i], trace[i])
|
||||
}
|
||||
var hit, miss int
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, ok := l.Get(trace[i])
|
||||
if ok {
|
||||
hit++
|
||||
} else {
|
||||
miss++
|
||||
}
|
||||
}
|
||||
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||
}
|
||||
|
||||
func Test2Q_RandomOps(t *testing.T) {
|
||||
size := 128
|
||||
l, err := New2Q(128)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
n := 200000
|
||||
for i := 0; i < n; i++ {
|
||||
key := rand.Int63() % 512
|
||||
r := rand.Int63()
|
||||
switch r % 3 {
|
||||
case 0:
|
||||
l.Add(key, key)
|
||||
case 1:
|
||||
l.Get(key)
|
||||
case 2:
|
||||
l.Remove(key)
|
||||
}
|
||||
|
||||
if l.recent.Len()+l.frequent.Len() > size {
|
||||
t.Fatalf("bad: recent: %d freq: %d",
|
||||
l.recent.Len(), l.frequent.Len())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test2Q_Get_RecentToFrequent(t *testing.T) {
|
||||
l, err := New2Q(128)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Touch all the entries, should be in t1
|
||||
for i := 0; i < 128; i++ {
|
||||
l.Add(i, i)
|
||||
}
|
||||
if n := l.recent.Len(); n != 128 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.frequent.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Get should upgrade to t2
|
||||
for i := 0; i < 128; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if !ok {
|
||||
t.Fatalf("missing: %d", i)
|
||||
}
|
||||
}
|
||||
if n := l.recent.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.frequent.Len(); n != 128 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Get be from t2
|
||||
for i := 0; i < 128; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if !ok {
|
||||
t.Fatalf("missing: %d", i)
|
||||
}
|
||||
}
|
||||
if n := l.recent.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.frequent.Len(); n != 128 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func Test2Q_Add_RecentToFrequent(t *testing.T) {
|
||||
l, err := New2Q(128)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Add initially to recent
|
||||
l.Add(1, 1)
|
||||
if n := l.recent.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.frequent.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Add should upgrade to frequent
|
||||
l.Add(1, 1)
|
||||
if n := l.recent.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.frequent.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Add should remain in frequent
|
||||
l.Add(1, 1)
|
||||
if n := l.recent.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.frequent.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func Test2Q_Add_RecentEvict(t *testing.T) {
|
||||
l, err := New2Q(4)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Add 1,2,3,4,5 -> Evict 1
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
l.Add(3, 3)
|
||||
l.Add(4, 4)
|
||||
l.Add(5, 5)
|
||||
if n := l.recent.Len(); n != 4 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.recentEvict.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.frequent.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Pull in the recently evicted
|
||||
l.Add(1, 1)
|
||||
if n := l.recent.Len(); n != 3 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.recentEvict.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.frequent.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Add 6, should cause another recent evict
|
||||
l.Add(6, 6)
|
||||
if n := l.recent.Len(); n != 3 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.recentEvict.Len(); n != 2 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.frequent.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func Test2Q(t *testing.T) {
|
||||
l, err := New2Q(128)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, i)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
|
||||
for i, k := range l.Keys() {
|
||||
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
|
||||
t.Fatalf("bad key: %v", k)
|
||||
}
|
||||
}
|
||||
for i := 0; i < 128; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if ok {
|
||||
t.Fatalf("should be evicted")
|
||||
}
|
||||
}
|
||||
for i := 128; i < 256; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if !ok {
|
||||
t.Fatalf("should not be evicted")
|
||||
}
|
||||
}
|
||||
for i := 128; i < 192; i++ {
|
||||
l.Remove(i)
|
||||
_, ok := l.Get(i)
|
||||
if ok {
|
||||
t.Fatalf("should be deleted")
|
||||
}
|
||||
}
|
||||
|
||||
l.Purge()
|
||||
if l.Len() != 0 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
if _, ok := l.Get(200); ok {
|
||||
t.Fatalf("should contain nothing")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Contains doesn't update recent-ness
|
||||
func Test2Q_Contains(t *testing.T) {
|
||||
l, err := New2Q(2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if !l.Contains(1) {
|
||||
t.Errorf("1 should be contained")
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("Contains should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Peek doesn't update recent-ness
|
||||
func Test2Q_Peek(t *testing.T) {
|
||||
l, err := New2Q(2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if v, ok := l.Peek(1); !ok || v != 1 {
|
||||
t.Errorf("1 should be set to 1: %v, %v", v, ok)
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
362
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
Normal file
362
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
Normal file
@ -0,0 +1,362 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
25
vendor/github.com/hashicorp/golang-lru/README.md
generated
vendored
Normal file
25
vendor/github.com/hashicorp/golang-lru/README.md
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
golang-lru
|
||||
==========
|
||||
|
||||
This provides the `lru` package which implements a fixed-size
|
||||
thread safe LRU cache. It is based on the cache in Groupcache.
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Using the LRU is very simple:
|
||||
|
||||
```go
|
||||
l, _ := New(128)
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, nil)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
panic(fmt.Sprintf("bad len: %v", l.Len()))
|
||||
}
|
||||
```
|
257
vendor/github.com/hashicorp/golang-lru/arc.go
generated
vendored
Normal file
257
vendor/github.com/hashicorp/golang-lru/arc.go
generated
vendored
Normal file
@ -0,0 +1,257 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
|
||||
// ARC is an enhancement over the standard LRU cache in that tracks both
|
||||
// frequency and recency of use. This avoids a burst in access to new
|
||||
// entries from evicting the frequently used older entries. It adds some
|
||||
// additional tracking overhead to a standard LRU cache, computationally
|
||||
// it is roughly 2x the cost, and the extra memory overhead is linear
|
||||
// with the size of the cache. ARC has been patented by IBM, but is
|
||||
// similar to the TwoQueueCache (2Q) which requires setting parameters.
|
||||
type ARCCache struct {
|
||||
size int // Size is the total capacity of the cache
|
||||
p int // P is the dynamic preference towards T1 or T2
|
||||
|
||||
t1 simplelru.LRUCache // T1 is the LRU for recently accessed items
|
||||
b1 simplelru.LRUCache // B1 is the LRU for evictions from t1
|
||||
|
||||
t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items
|
||||
b2 simplelru.LRUCache // B2 is the LRU for evictions from t2
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewARC creates an ARC of the given size
|
||||
func NewARC(size int) (*ARCCache, error) {
|
||||
// Create the sub LRUs
|
||||
b1, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b2, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t1, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t2, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the ARC
|
||||
c := &ARCCache{
|
||||
size: size,
|
||||
p: 0,
|
||||
t1: t1,
|
||||
b1: b1,
|
||||
t2: t2,
|
||||
b2: b2,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// If the value is contained in T1 (recent), then
|
||||
// promote it to T2 (frequent)
|
||||
if val, ok := c.t1.Peek(key); ok {
|
||||
c.t1.Remove(key)
|
||||
c.t2.Add(key, val)
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// Check if the value is contained in T2 (frequent)
|
||||
if val, ok := c.t2.Get(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// No hit
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Add adds a value to the cache.
|
||||
func (c *ARCCache) Add(key, value interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Check if the value is contained in T1 (recent), and potentially
|
||||
// promote it to frequent T2
|
||||
if c.t1.Contains(key) {
|
||||
c.t1.Remove(key)
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the value is already in T2 (frequent) and update it
|
||||
if c.t2.Contains(key) {
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this value was recently evicted as part of the
|
||||
// recently used list
|
||||
if c.b1.Contains(key) {
|
||||
// T1 set is too small, increase P appropriately
|
||||
delta := 1
|
||||
b1Len := c.b1.Len()
|
||||
b2Len := c.b2.Len()
|
||||
if b2Len > b1Len {
|
||||
delta = b2Len / b1Len
|
||||
}
|
||||
if c.p+delta >= c.size {
|
||||
c.p = c.size
|
||||
} else {
|
||||
c.p += delta
|
||||
}
|
||||
|
||||
// Potentially need to make room in the cache
|
||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||
c.replace(false)
|
||||
}
|
||||
|
||||
// Remove from B1
|
||||
c.b1.Remove(key)
|
||||
|
||||
// Add the key to the frequently used list
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this value was recently evicted as part of the
|
||||
// frequently used list
|
||||
if c.b2.Contains(key) {
|
||||
// T2 set is too small, decrease P appropriately
|
||||
delta := 1
|
||||
b1Len := c.b1.Len()
|
||||
b2Len := c.b2.Len()
|
||||
if b1Len > b2Len {
|
||||
delta = b1Len / b2Len
|
||||
}
|
||||
if delta >= c.p {
|
||||
c.p = 0
|
||||
} else {
|
||||
c.p -= delta
|
||||
}
|
||||
|
||||
// Potentially need to make room in the cache
|
||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||
c.replace(true)
|
||||
}
|
||||
|
||||
// Remove from B2
|
||||
c.b2.Remove(key)
|
||||
|
||||
// Add the key to the frequently used list
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Potentially need to make room in the cache
|
||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||
c.replace(false)
|
||||
}
|
||||
|
||||
// Keep the size of the ghost buffers trim
|
||||
if c.b1.Len() > c.size-c.p {
|
||||
c.b1.RemoveOldest()
|
||||
}
|
||||
if c.b2.Len() > c.p {
|
||||
c.b2.RemoveOldest()
|
||||
}
|
||||
|
||||
// Add to the recently seen list
|
||||
c.t1.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// replace is used to adaptively evict from either T1 or T2
|
||||
// based on the current learned value of P
|
||||
func (c *ARCCache) replace(b2ContainsKey bool) {
|
||||
t1Len := c.t1.Len()
|
||||
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
|
||||
k, _, ok := c.t1.RemoveOldest()
|
||||
if ok {
|
||||
c.b1.Add(k, nil)
|
||||
}
|
||||
} else {
|
||||
k, _, ok := c.t2.RemoveOldest()
|
||||
if ok {
|
||||
c.b2.Add(k, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of cached entries
|
||||
func (c *ARCCache) Len() int {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.t1.Len() + c.t2.Len()
|
||||
}
|
||||
|
||||
// Keys returns all the cached keys
|
||||
func (c *ARCCache) Keys() []interface{} {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
k1 := c.t1.Keys()
|
||||
k2 := c.t2.Keys()
|
||||
return append(k1, k2...)
|
||||
}
|
||||
|
||||
// Remove is used to purge a key from the cache
|
||||
func (c *ARCCache) Remove(key interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if c.t1.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.t2.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.b1.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.b2.Remove(key) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Purge is used to clear the cache
|
||||
func (c *ARCCache) Purge() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.t1.Purge()
|
||||
c.t2.Purge()
|
||||
c.b1.Purge()
|
||||
c.b2.Purge()
|
||||
}
|
||||
|
||||
// Contains is used to check if the cache contains a key
|
||||
// without updating recency or frequency.
|
||||
func (c *ARCCache) Contains(key interface{}) bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.t1.Contains(key) || c.t2.Contains(key)
|
||||
}
|
||||
|
||||
// Peek is used to inspect the cache value of a key
|
||||
// without updating recency or frequency.
|
||||
func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
if val, ok := c.t1.Peek(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
return c.t2.Peek(key)
|
||||
}
|
377
vendor/github.com/hashicorp/golang-lru/arc_test.go
generated
vendored
Normal file
377
vendor/github.com/hashicorp/golang-lru/arc_test.go
generated
vendored
Normal file
@ -0,0 +1,377 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
}
|
||||
|
||||
func BenchmarkARC_Rand(b *testing.B) {
|
||||
l, err := NewARC(8192)
|
||||
if err != nil {
|
||||
b.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
trace := make([]int64, b.N*2)
|
||||
for i := 0; i < b.N*2; i++ {
|
||||
trace[i] = rand.Int63() % 32768
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
var hit, miss int
|
||||
for i := 0; i < 2*b.N; i++ {
|
||||
if i%2 == 0 {
|
||||
l.Add(trace[i], trace[i])
|
||||
} else {
|
||||
_, ok := l.Get(trace[i])
|
||||
if ok {
|
||||
hit++
|
||||
} else {
|
||||
miss++
|
||||
}
|
||||
}
|
||||
}
|
||||
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||
}
|
||||
|
||||
func BenchmarkARC_Freq(b *testing.B) {
|
||||
l, err := NewARC(8192)
|
||||
if err != nil {
|
||||
b.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
trace := make([]int64, b.N*2)
|
||||
for i := 0; i < b.N*2; i++ {
|
||||
if i%2 == 0 {
|
||||
trace[i] = rand.Int63() % 16384
|
||||
} else {
|
||||
trace[i] = rand.Int63() % 32768
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
l.Add(trace[i], trace[i])
|
||||
}
|
||||
var hit, miss int
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, ok := l.Get(trace[i])
|
||||
if ok {
|
||||
hit++
|
||||
} else {
|
||||
miss++
|
||||
}
|
||||
}
|
||||
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||
}
|
||||
|
||||
func TestARC_RandomOps(t *testing.T) {
|
||||
size := 128
|
||||
l, err := NewARC(128)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
n := 200000
|
||||
for i := 0; i < n; i++ {
|
||||
key := rand.Int63() % 512
|
||||
r := rand.Int63()
|
||||
switch r % 3 {
|
||||
case 0:
|
||||
l.Add(key, key)
|
||||
case 1:
|
||||
l.Get(key)
|
||||
case 2:
|
||||
l.Remove(key)
|
||||
}
|
||||
|
||||
if l.t1.Len()+l.t2.Len() > size {
|
||||
t.Fatalf("bad: t1: %d t2: %d b1: %d b2: %d p: %d",
|
||||
l.t1.Len(), l.t2.Len(), l.b1.Len(), l.b2.Len(), l.p)
|
||||
}
|
||||
if l.b1.Len()+l.b2.Len() > size {
|
||||
t.Fatalf("bad: t1: %d t2: %d b1: %d b2: %d p: %d",
|
||||
l.t1.Len(), l.t2.Len(), l.b1.Len(), l.b2.Len(), l.p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestARC_Get_RecentToFrequent(t *testing.T) {
|
||||
l, err := NewARC(128)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Touch all the entries, should be in t1
|
||||
for i := 0; i < 128; i++ {
|
||||
l.Add(i, i)
|
||||
}
|
||||
if n := l.t1.Len(); n != 128 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.t2.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Get should upgrade to t2
|
||||
for i := 0; i < 128; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if !ok {
|
||||
t.Fatalf("missing: %d", i)
|
||||
}
|
||||
}
|
||||
if n := l.t1.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.t2.Len(); n != 128 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Get be from t2
|
||||
for i := 0; i < 128; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if !ok {
|
||||
t.Fatalf("missing: %d", i)
|
||||
}
|
||||
}
|
||||
if n := l.t1.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.t2.Len(); n != 128 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestARC_Add_RecentToFrequent(t *testing.T) {
|
||||
l, err := NewARC(128)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Add initially to t1
|
||||
l.Add(1, 1)
|
||||
if n := l.t1.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.t2.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Add should upgrade to t2
|
||||
l.Add(1, 1)
|
||||
if n := l.t1.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.t2.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Add should remain in t2
|
||||
l.Add(1, 1)
|
||||
if n := l.t1.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.t2.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestARC_Adaptive(t *testing.T) {
|
||||
l, err := NewARC(4)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Fill t1
|
||||
for i := 0; i < 4; i++ {
|
||||
l.Add(i, i)
|
||||
}
|
||||
if n := l.t1.Len(); n != 4 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Move to t2
|
||||
l.Get(0)
|
||||
l.Get(1)
|
||||
if n := l.t2.Len(); n != 2 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Evict from t1
|
||||
l.Add(4, 4)
|
||||
if n := l.b1.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Current state
|
||||
// t1 : (MRU) [4, 3] (LRU)
|
||||
// t2 : (MRU) [1, 0] (LRU)
|
||||
// b1 : (MRU) [2] (LRU)
|
||||
// b2 : (MRU) [] (LRU)
|
||||
|
||||
// Add 2, should cause hit on b1
|
||||
l.Add(2, 2)
|
||||
if n := l.b1.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if l.p != 1 {
|
||||
t.Fatalf("bad: %d", l.p)
|
||||
}
|
||||
if n := l.t2.Len(); n != 3 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Current state
|
||||
// t1 : (MRU) [4] (LRU)
|
||||
// t2 : (MRU) [2, 1, 0] (LRU)
|
||||
// b1 : (MRU) [3] (LRU)
|
||||
// b2 : (MRU) [] (LRU)
|
||||
|
||||
// Add 4, should migrate to t2
|
||||
l.Add(4, 4)
|
||||
if n := l.t1.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.t2.Len(); n != 4 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Current state
|
||||
// t1 : (MRU) [] (LRU)
|
||||
// t2 : (MRU) [4, 2, 1, 0] (LRU)
|
||||
// b1 : (MRU) [3] (LRU)
|
||||
// b2 : (MRU) [] (LRU)
|
||||
|
||||
// Add 4, should evict to b2
|
||||
l.Add(5, 5)
|
||||
if n := l.t1.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.t2.Len(); n != 3 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.b2.Len(); n != 1 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
|
||||
// Current state
|
||||
// t1 : (MRU) [5] (LRU)
|
||||
// t2 : (MRU) [4, 2, 1] (LRU)
|
||||
// b1 : (MRU) [3] (LRU)
|
||||
// b2 : (MRU) [0] (LRU)
|
||||
|
||||
// Add 0, should decrease p
|
||||
l.Add(0, 0)
|
||||
if n := l.t1.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.t2.Len(); n != 4 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.b1.Len(); n != 2 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if n := l.b2.Len(); n != 0 {
|
||||
t.Fatalf("bad: %d", n)
|
||||
}
|
||||
if l.p != 0 {
|
||||
t.Fatalf("bad: %d", l.p)
|
||||
}
|
||||
|
||||
// Current state
|
||||
// t1 : (MRU) [] (LRU)
|
||||
// t2 : (MRU) [0, 4, 2, 1] (LRU)
|
||||
// b1 : (MRU) [5, 3] (LRU)
|
||||
// b2 : (MRU) [0] (LRU)
|
||||
}
|
||||
|
||||
func TestARC(t *testing.T) {
|
||||
l, err := NewARC(128)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, i)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
|
||||
for i, k := range l.Keys() {
|
||||
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
|
||||
t.Fatalf("bad key: %v", k)
|
||||
}
|
||||
}
|
||||
for i := 0; i < 128; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if ok {
|
||||
t.Fatalf("should be evicted")
|
||||
}
|
||||
}
|
||||
for i := 128; i < 256; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if !ok {
|
||||
t.Fatalf("should not be evicted")
|
||||
}
|
||||
}
|
||||
for i := 128; i < 192; i++ {
|
||||
l.Remove(i)
|
||||
_, ok := l.Get(i)
|
||||
if ok {
|
||||
t.Fatalf("should be deleted")
|
||||
}
|
||||
}
|
||||
|
||||
l.Purge()
|
||||
if l.Len() != 0 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
if _, ok := l.Get(200); ok {
|
||||
t.Fatalf("should contain nothing")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Contains doesn't update recent-ness
|
||||
func TestARC_Contains(t *testing.T) {
|
||||
l, err := NewARC(2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if !l.Contains(1) {
|
||||
t.Errorf("1 should be contained")
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("Contains should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Peek doesn't update recent-ness
|
||||
func TestARC_Peek(t *testing.T) {
|
||||
l, err := NewARC(2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if v, ok := l.Peek(1); !ok || v != 1 {
|
||||
t.Errorf("1 should be set to 1: %v, %v", v, ok)
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
21
vendor/github.com/hashicorp/golang-lru/doc.go
generated
vendored
Normal file
21
vendor/github.com/hashicorp/golang-lru/doc.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
// Package lru provides three different LRU caches of varying sophistication.
|
||||
//
|
||||
// Cache is a simple LRU cache. It is based on the
|
||||
// LRU implementation in groupcache:
|
||||
// https://github.com/golang/groupcache/tree/master/lru
|
||||
//
|
||||
// TwoQueueCache tracks frequently used and recently used entries separately.
|
||||
// This avoids a burst of accesses from taking out frequently used entries,
|
||||
// at the cost of about 2x computational overhead and some extra bookkeeping.
|
||||
//
|
||||
// ARCCache is an adaptive replacement cache. It tracks recent evictions as
|
||||
// well as recent usage in both the frequent and recent caches. Its
|
||||
// computational overhead is comparable to TwoQueueCache, but the memory
|
||||
// overhead is linear with the size of the cache.
|
||||
//
|
||||
// ARC has been patented by IBM, so do not use it if that is problematic for
|
||||
// your program.
|
||||
//
|
||||
// All caches in this package take locks while operating, and are therefore
|
||||
// thread-safe for consumers.
|
||||
package lru
|
110
vendor/github.com/hashicorp/golang-lru/lru.go
generated
vendored
Normal file
110
vendor/github.com/hashicorp/golang-lru/lru.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
// Cache is a thread-safe fixed size LRU cache.
|
||||
type Cache struct {
|
||||
lru simplelru.LRUCache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// New creates an LRU of the given size.
|
||||
func New(size int) (*Cache, error) {
|
||||
return NewWithEvict(size, nil)
|
||||
}
|
||||
|
||||
// NewWithEvict constructs a fixed size cache with the given eviction
|
||||
// callback.
|
||||
func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
|
||||
lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Cache{
|
||||
lru: lru,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache.
|
||||
func (c *Cache) Purge() {
|
||||
c.lock.Lock()
|
||||
c.lru.Purge()
|
||||
c.lock.Unlock()
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *Cache) Add(key, value interface{}) (evicted bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.lru.Add(key, value)
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.lru.Get(key)
|
||||
}
|
||||
|
||||
// Contains checks if a key is in the cache, without updating the
|
||||
// recent-ness or deleting it for being stale.
|
||||
func (c *Cache) Contains(key interface{}) bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.lru.Contains(key)
|
||||
}
|
||||
|
||||
// Peek returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.lru.Peek(key)
|
||||
}
|
||||
|
||||
// ContainsOrAdd checks if a key is in the cache without updating the
|
||||
// recent-ness or deleting it for being stale, and if not, adds the value.
|
||||
// Returns whether found and whether an eviction occurred.
|
||||
func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.lru.Contains(key) {
|
||||
return true, false
|
||||
}
|
||||
evicted = c.lru.Add(key, value)
|
||||
return false, evicted
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache) Remove(key interface{}) {
|
||||
c.lock.Lock()
|
||||
c.lru.Remove(key)
|
||||
c.lock.Unlock()
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *Cache) RemoveOldest() {
|
||||
c.lock.Lock()
|
||||
c.lru.RemoveOldest()
|
||||
c.lock.Unlock()
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
func (c *Cache) Keys() []interface{} {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.lru.Keys()
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache) Len() int {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.lru.Len()
|
||||
}
|
221
vendor/github.com/hashicorp/golang-lru/lru_test.go
generated
vendored
Normal file
221
vendor/github.com/hashicorp/golang-lru/lru_test.go
generated
vendored
Normal file
@ -0,0 +1,221 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkLRU_Rand(b *testing.B) {
|
||||
l, err := New(8192)
|
||||
if err != nil {
|
||||
b.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
trace := make([]int64, b.N*2)
|
||||
for i := 0; i < b.N*2; i++ {
|
||||
trace[i] = rand.Int63() % 32768
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
var hit, miss int
|
||||
for i := 0; i < 2*b.N; i++ {
|
||||
if i%2 == 0 {
|
||||
l.Add(trace[i], trace[i])
|
||||
} else {
|
||||
_, ok := l.Get(trace[i])
|
||||
if ok {
|
||||
hit++
|
||||
} else {
|
||||
miss++
|
||||
}
|
||||
}
|
||||
}
|
||||
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||
}
|
||||
|
||||
func BenchmarkLRU_Freq(b *testing.B) {
|
||||
l, err := New(8192)
|
||||
if err != nil {
|
||||
b.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
trace := make([]int64, b.N*2)
|
||||
for i := 0; i < b.N*2; i++ {
|
||||
if i%2 == 0 {
|
||||
trace[i] = rand.Int63() % 16384
|
||||
} else {
|
||||
trace[i] = rand.Int63() % 32768
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
l.Add(trace[i], trace[i])
|
||||
}
|
||||
var hit, miss int
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, ok := l.Get(trace[i])
|
||||
if ok {
|
||||
hit++
|
||||
} else {
|
||||
miss++
|
||||
}
|
||||
}
|
||||
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||
}
|
||||
|
||||
func TestLRU(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(k interface{}, v interface{}) {
|
||||
if k != v {
|
||||
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
|
||||
}
|
||||
evictCounter++
|
||||
}
|
||||
l, err := NewWithEvict(128, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, i)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
|
||||
if evictCounter != 128 {
|
||||
t.Fatalf("bad evict count: %v", evictCounter)
|
||||
}
|
||||
|
||||
for i, k := range l.Keys() {
|
||||
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
|
||||
t.Fatalf("bad key: %v", k)
|
||||
}
|
||||
}
|
||||
for i := 0; i < 128; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if ok {
|
||||
t.Fatalf("should be evicted")
|
||||
}
|
||||
}
|
||||
for i := 128; i < 256; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if !ok {
|
||||
t.Fatalf("should not be evicted")
|
||||
}
|
||||
}
|
||||
for i := 128; i < 192; i++ {
|
||||
l.Remove(i)
|
||||
_, ok := l.Get(i)
|
||||
if ok {
|
||||
t.Fatalf("should be deleted")
|
||||
}
|
||||
}
|
||||
|
||||
l.Get(192) // expect 192 to be last key in l.Keys()
|
||||
|
||||
for i, k := range l.Keys() {
|
||||
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
|
||||
t.Fatalf("out of order key: %v", k)
|
||||
}
|
||||
}
|
||||
|
||||
l.Purge()
|
||||
if l.Len() != 0 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
if _, ok := l.Get(200); ok {
|
||||
t.Fatalf("should contain nothing")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Add returns true/false if an eviction occurred
|
||||
func TestLRUAdd(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(k interface{}, v interface{}) {
|
||||
evictCounter++
|
||||
}
|
||||
|
||||
l, err := NewWithEvict(1, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if l.Add(1, 1) == true || evictCounter != 0 {
|
||||
t.Errorf("should not have an eviction")
|
||||
}
|
||||
if l.Add(2, 2) == false || evictCounter != 1 {
|
||||
t.Errorf("should have an eviction")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Contains doesn't update recent-ness
|
||||
func TestLRUContains(t *testing.T) {
|
||||
l, err := New(2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if !l.Contains(1) {
|
||||
t.Errorf("1 should be contained")
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("Contains should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Contains doesn't update recent-ness
|
||||
func TestLRUContainsOrAdd(t *testing.T) {
|
||||
l, err := New(2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
contains, evict := l.ContainsOrAdd(1, 1)
|
||||
if !contains {
|
||||
t.Errorf("1 should be contained")
|
||||
}
|
||||
if evict {
|
||||
t.Errorf("nothing should be evicted here")
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
contains, evict = l.ContainsOrAdd(1, 1)
|
||||
if contains {
|
||||
t.Errorf("1 should not have been contained")
|
||||
}
|
||||
if !evict {
|
||||
t.Errorf("an eviction should have occurred")
|
||||
}
|
||||
if !l.Contains(1) {
|
||||
t.Errorf("now 1 should be contained")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Peek doesn't update recent-ness
|
||||
func TestLRUPeek(t *testing.T) {
|
||||
l, err := New(2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if v, ok := l.Peek(1); !ok || v != 1 {
|
||||
t.Errorf("1 should be set to 1: %v, %v", v, ok)
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
161
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
Normal file
161
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
package simplelru
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
||||
type EvictCallback func(key interface{}, value interface{})
|
||||
|
||||
// LRU implements a non-thread safe fixed size LRU cache
|
||||
type LRU struct {
|
||||
size int
|
||||
evictList *list.List
|
||||
items map[interface{}]*list.Element
|
||||
onEvict EvictCallback
|
||||
}
|
||||
|
||||
// entry is used to hold a value in the evictList
|
||||
type entry struct {
|
||||
key interface{}
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// NewLRU constructs an LRU of the given size
|
||||
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("Must provide a positive size")
|
||||
}
|
||||
c := &LRU{
|
||||
size: size,
|
||||
evictList: list.New(),
|
||||
items: make(map[interface{}]*list.Element),
|
||||
onEvict: onEvict,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache.
|
||||
func (c *LRU) Purge() {
|
||||
for k, v := range c.items {
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(k, v.Value.(*entry).value)
|
||||
}
|
||||
delete(c.items, k)
|
||||
}
|
||||
c.evictList.Init()
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *LRU) Add(key, value interface{}) (evicted bool) {
|
||||
// Check for existing item
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
ent.Value.(*entry).value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Add new item
|
||||
ent := &entry{key, value}
|
||||
entry := c.evictList.PushFront(ent)
|
||||
c.items[key] = entry
|
||||
|
||||
evict := c.evictList.Len() > c.size
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
c.removeOldest()
|
||||
}
|
||||
return evict
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Contains checks if a key is in the cache, without updating the recent-ness
|
||||
// or deleting it for being stale.
|
||||
func (c *LRU) Contains(key interface{}) (ok bool) {
|
||||
_, ok = c.items[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Peek returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
var ent *list.Element
|
||||
if ent, ok = c.items[key]; ok {
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return nil, ok
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache, returning if the
|
||||
// key was contained.
|
||||
func (c *LRU) Remove(key interface{}) (present bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.removeElement(ent)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
kv := ent.Value.(*entry)
|
||||
return kv.key, kv.value, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
kv := ent.Value.(*entry)
|
||||
return kv.key, kv.value, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
func (c *LRU) Keys() []interface{} {
|
||||
keys := make([]interface{}, len(c.items))
|
||||
i := 0
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
|
||||
keys[i] = ent.Value.(*entry).key
|
||||
i++
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *LRU) Len() int {
|
||||
return c.evictList.Len()
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *LRU) removeOldest() {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
}
|
||||
|
||||
// removeElement is used to remove a given list element from the cache
|
||||
func (c *LRU) removeElement(e *list.Element) {
|
||||
c.evictList.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.items, kv.key)
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(kv.key, kv.value)
|
||||
}
|
||||
}
|
37
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
Normal file
37
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
package simplelru
|
||||
|
||||
|
||||
// LRUCache is the interface for simple LRU cache.
|
||||
type LRUCache interface {
|
||||
// Adds a value to the cache, returns true if an eviction occurred and
|
||||
// updates the "recently used"-ness of the key.
|
||||
Add(key, value interface{}) bool
|
||||
|
||||
// Returns key's value from the cache and
|
||||
// updates the "recently used"-ness of the key. #value, isFound
|
||||
Get(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Check if a key exsists in cache without updating the recent-ness.
|
||||
Contains(key interface{}) (ok bool)
|
||||
|
||||
// Returns key's value without updating the "recently used"-ness of the key.
|
||||
Peek(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Removes a key from the cache.
|
||||
Remove(key interface{}) bool
|
||||
|
||||
// Removes the oldest entry from cache.
|
||||
RemoveOldest() (interface{}, interface{}, bool)
|
||||
|
||||
// Returns the oldest entry from the cache. #key, value, isFound
|
||||
GetOldest() (interface{}, interface{}, bool)
|
||||
|
||||
// Returns a slice of the keys in the cache, from oldest to newest.
|
||||
Keys() []interface{}
|
||||
|
||||
// Returns the number of items in the cache.
|
||||
Len() int
|
||||
|
||||
// Clear all cache entries
|
||||
Purge()
|
||||
}
|
167
vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go
generated
vendored
Normal file
167
vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
package simplelru
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestLRU(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(k interface{}, v interface{}) {
|
||||
if k != v {
|
||||
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
|
||||
}
|
||||
evictCounter++
|
||||
}
|
||||
l, err := NewLRU(128, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, i)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
|
||||
if evictCounter != 128 {
|
||||
t.Fatalf("bad evict count: %v", evictCounter)
|
||||
}
|
||||
|
||||
for i, k := range l.Keys() {
|
||||
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
|
||||
t.Fatalf("bad key: %v", k)
|
||||
}
|
||||
}
|
||||
for i := 0; i < 128; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if ok {
|
||||
t.Fatalf("should be evicted")
|
||||
}
|
||||
}
|
||||
for i := 128; i < 256; i++ {
|
||||
_, ok := l.Get(i)
|
||||
if !ok {
|
||||
t.Fatalf("should not be evicted")
|
||||
}
|
||||
}
|
||||
for i := 128; i < 192; i++ {
|
||||
ok := l.Remove(i)
|
||||
if !ok {
|
||||
t.Fatalf("should be contained")
|
||||
}
|
||||
ok = l.Remove(i)
|
||||
if ok {
|
||||
t.Fatalf("should not be contained")
|
||||
}
|
||||
_, ok = l.Get(i)
|
||||
if ok {
|
||||
t.Fatalf("should be deleted")
|
||||
}
|
||||
}
|
||||
|
||||
l.Get(192) // expect 192 to be last key in l.Keys()
|
||||
|
||||
for i, k := range l.Keys() {
|
||||
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
|
||||
t.Fatalf("out of order key: %v", k)
|
||||
}
|
||||
}
|
||||
|
||||
l.Purge()
|
||||
if l.Len() != 0 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
if _, ok := l.Get(200); ok {
|
||||
t.Fatalf("should contain nothing")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRU_GetOldest_RemoveOldest(t *testing.T) {
|
||||
l, err := NewLRU(128, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, i)
|
||||
}
|
||||
k, _, ok := l.GetOldest()
|
||||
if !ok {
|
||||
t.Fatalf("missing")
|
||||
}
|
||||
if k.(int) != 128 {
|
||||
t.Fatalf("bad: %v", k)
|
||||
}
|
||||
|
||||
k, _, ok = l.RemoveOldest()
|
||||
if !ok {
|
||||
t.Fatalf("missing")
|
||||
}
|
||||
if k.(int) != 128 {
|
||||
t.Fatalf("bad: %v", k)
|
||||
}
|
||||
|
||||
k, _, ok = l.RemoveOldest()
|
||||
if !ok {
|
||||
t.Fatalf("missing")
|
||||
}
|
||||
if k.(int) != 129 {
|
||||
t.Fatalf("bad: %v", k)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Add returns true/false if an eviction occurred
|
||||
func TestLRU_Add(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(k interface{}, v interface{}) {
|
||||
evictCounter++
|
||||
}
|
||||
|
||||
l, err := NewLRU(1, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if l.Add(1, 1) == true || evictCounter != 0 {
|
||||
t.Errorf("should not have an eviction")
|
||||
}
|
||||
if l.Add(2, 2) == false || evictCounter != 1 {
|
||||
t.Errorf("should have an eviction")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Contains doesn't update recent-ness
|
||||
func TestLRU_Contains(t *testing.T) {
|
||||
l, err := NewLRU(2, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if !l.Contains(1) {
|
||||
t.Errorf("1 should be contained")
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("Contains should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Peek doesn't update recent-ness
|
||||
func TestLRU_Peek(t *testing.T) {
|
||||
l, err := NewLRU(2, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if v, ok := l.Peek(1); !ok || v != 1 {
|
||||
t.Errorf("1 should be set to 1: %v, %v", v, ok)
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
10
vendor/golang.org/x/crypto/.gitattributes
generated
vendored
Normal file
10
vendor/golang.org/x/crypto/.gitattributes
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# Treat all files in this repo as binary, with no git magic updating
|
||||
# line endings. Windows users contributing to Go will need to use a
|
||||
# modern version of git and editors capable of LF line endings.
|
||||
#
|
||||
# We'll prevent accidental CRLF line endings from entering the repo
|
||||
# via the git-review gofmt checks.
|
||||
#
|
||||
# See golang.org/issue/9281
|
||||
|
||||
* -text
|
2
vendor/golang.org/x/crypto/.gitignore
generated
vendored
Normal file
2
vendor/golang.org/x/crypto/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
# Add no patterns to .hgignore except for files generated by the build.
|
||||
last-change
|
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/AUTHORS.
|
26
vendor/golang.org/x/crypto/CONTRIBUTING.md
generated
vendored
Normal file
26
vendor/golang.org/x/crypto/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
# Contributing to Go
|
||||
|
||||
Go is an open source project.
|
||||
|
||||
It is the work of hundreds of contributors. We appreciate your help!
|
||||
|
||||
## Filing issues
|
||||
|
||||
When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
|
||||
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
|
||||
|
||||
## Contributing code
|
||||
|
||||
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
|
||||
before sending patches.
|
||||
|
||||
Unless otherwise noted, the Go source files are distributed under
|
||||
the BSD-style license found in the LICENSE file.
|
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
21
vendor/golang.org/x/crypto/README.md
generated
vendored
Normal file
21
vendor/golang.org/x/crypto/README.md
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
# Go Cryptography
|
||||
|
||||
This repository holds supplementary Go cryptography libraries.
|
||||
|
||||
## Download/Install
|
||||
|
||||
The easiest way to install is to run `go get -u golang.org/x/crypto/...`. You
|
||||
can also manually git clone the repository to `$GOPATH/src/golang.org/x/crypto`.
|
||||
|
||||
## Report Issues / Send Patches
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html.
|
||||
|
||||
The main issue tracker for the crypto repository is located at
|
||||
https://github.com/golang/go/issues. Prefix your issue with "x/crypto:" in the
|
||||
subject line, so it is easy to find.
|
||||
|
||||
Note that contributions to the cryptography package receive additional scrutiny
|
||||
due to their sensitive nature. Patches may take longer than normal to receive
|
||||
feedback.
|
1065
vendor/golang.org/x/crypto/acme/acme.go
generated
vendored
Normal file
1065
vendor/golang.org/x/crypto/acme/acme.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1380
vendor/golang.org/x/crypto/acme/acme_test.go
generated
vendored
Normal file
1380
vendor/golang.org/x/crypto/acme/acme_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
962
vendor/golang.org/x/crypto/acme/autocert/autocert.go
generated
vendored
Normal file
962
vendor/golang.org/x/crypto/acme/autocert/autocert.go
generated
vendored
Normal file
@ -0,0 +1,962 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package autocert provides automatic access to certificates from Let's Encrypt
|
||||
// and any other ACME-based CA.
|
||||
//
|
||||
// This package is a work in progress and makes no API stability promises.
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
mathrand "math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/acme"
|
||||
)
|
||||
|
||||
// createCertRetryAfter is how much time to wait before removing a failed state
|
||||
// entry due to an unsuccessful createCert call.
|
||||
// This is a variable instead of a const for testing.
|
||||
// TODO: Consider making it configurable or an exp backoff?
|
||||
var createCertRetryAfter = time.Minute
|
||||
|
||||
// pseudoRand is safe for concurrent use.
|
||||
var pseudoRand *lockedMathRand
|
||||
|
||||
func init() {
|
||||
src := mathrand.NewSource(timeNow().UnixNano())
|
||||
pseudoRand = &lockedMathRand{rnd: mathrand.New(src)}
|
||||
}
|
||||
|
||||
// AcceptTOS is a Manager.Prompt function that always returns true to
|
||||
// indicate acceptance of the CA's Terms of Service during account
|
||||
// registration.
|
||||
func AcceptTOS(tosURL string) bool { return true }
|
||||
|
||||
// HostPolicy specifies which host names the Manager is allowed to respond to.
|
||||
// It returns a non-nil error if the host should be rejected.
|
||||
// The returned error is accessible via tls.Conn.Handshake and its callers.
|
||||
// See Manager's HostPolicy field and GetCertificate method docs for more details.
|
||||
type HostPolicy func(ctx context.Context, host string) error
|
||||
|
||||
// HostWhitelist returns a policy where only the specified host names are allowed.
|
||||
// Only exact matches are currently supported. Subdomains, regexp or wildcard
|
||||
// will not match.
|
||||
func HostWhitelist(hosts ...string) HostPolicy {
|
||||
whitelist := make(map[string]bool, len(hosts))
|
||||
for _, h := range hosts {
|
||||
whitelist[h] = true
|
||||
}
|
||||
return func(_ context.Context, host string) error {
|
||||
if !whitelist[host] {
|
||||
return errors.New("acme/autocert: host not configured")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// defaultHostPolicy is used when Manager.HostPolicy is not set.
|
||||
func defaultHostPolicy(context.Context, string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Manager is a stateful certificate manager built on top of acme.Client.
|
||||
// It obtains and refreshes certificates automatically using "tls-sni-01",
|
||||
// "tls-sni-02" and "http-01" challenge types, as well as providing them
|
||||
// to a TLS server via tls.Config.
|
||||
//
|
||||
// You must specify a cache implementation, such as DirCache,
|
||||
// to reuse obtained certificates across program restarts.
|
||||
// Otherwise your server is very likely to exceed the certificate
|
||||
// issuer's request rate limits.
|
||||
type Manager struct {
|
||||
// Prompt specifies a callback function to conditionally accept a CA's Terms of Service (TOS).
|
||||
// The registration may require the caller to agree to the CA's TOS.
|
||||
// If so, Manager calls Prompt with a TOS URL provided by the CA. Prompt should report
|
||||
// whether the caller agrees to the terms.
|
||||
//
|
||||
// To always accept the terms, the callers can use AcceptTOS.
|
||||
Prompt func(tosURL string) bool
|
||||
|
||||
// Cache optionally stores and retrieves previously-obtained certificates.
|
||||
// If nil, certs will only be cached for the lifetime of the Manager.
|
||||
//
|
||||
// Manager passes the Cache certificates data encoded in PEM, with private/public
|
||||
// parts combined in a single Cache.Put call, private key first.
|
||||
Cache Cache
|
||||
|
||||
// HostPolicy controls which domains the Manager will attempt
|
||||
// to retrieve new certificates for. It does not affect cached certs.
|
||||
//
|
||||
// If non-nil, HostPolicy is called before requesting a new cert.
|
||||
// If nil, all hosts are currently allowed. This is not recommended,
|
||||
// as it opens a potential attack where clients connect to a server
|
||||
// by IP address and pretend to be asking for an incorrect host name.
|
||||
// Manager will attempt to obtain a certificate for that host, incorrectly,
|
||||
// eventually reaching the CA's rate limit for certificate requests
|
||||
// and making it impossible to obtain actual certificates.
|
||||
//
|
||||
// See GetCertificate for more details.
|
||||
HostPolicy HostPolicy
|
||||
|
||||
// RenewBefore optionally specifies how early certificates should
|
||||
// be renewed before they expire.
|
||||
//
|
||||
// If zero, they're renewed 30 days before expiration.
|
||||
RenewBefore time.Duration
|
||||
|
||||
// Client is used to perform low-level operations, such as account registration
|
||||
// and requesting new certificates.
|
||||
// If Client is nil, a zero-value acme.Client is used with acme.LetsEncryptURL
|
||||
// directory endpoint and a newly-generated ECDSA P-256 key.
|
||||
//
|
||||
// Mutating the field after the first call of GetCertificate method will have no effect.
|
||||
Client *acme.Client
|
||||
|
||||
// Email optionally specifies a contact email address.
|
||||
// This is used by CAs, such as Let's Encrypt, to notify about problems
|
||||
// with issued certificates.
|
||||
//
|
||||
// If the Client's account key is already registered, Email is not used.
|
||||
Email string
|
||||
|
||||
// ForceRSA makes the Manager generate certificates with 2048-bit RSA keys.
|
||||
//
|
||||
// If false, a default is used. Currently the default
|
||||
// is EC-based keys using the P-256 curve.
|
||||
ForceRSA bool
|
||||
|
||||
clientMu sync.Mutex
|
||||
client *acme.Client // initialized by acmeClient method
|
||||
|
||||
stateMu sync.Mutex
|
||||
state map[string]*certState // keyed by domain name
|
||||
|
||||
// renewal tracks the set of domains currently running renewal timers.
|
||||
// It is keyed by domain name.
|
||||
renewalMu sync.Mutex
|
||||
renewal map[string]*domainRenewal
|
||||
|
||||
// tokensMu guards the rest of the fields: tryHTTP01, certTokens and httpTokens.
|
||||
tokensMu sync.RWMutex
|
||||
// tryHTTP01 indicates whether the Manager should try "http-01" challenge type
|
||||
// during the authorization flow.
|
||||
tryHTTP01 bool
|
||||
// httpTokens contains response body values for http-01 challenges
|
||||
// and is keyed by the URL path at which a challenge response is expected
|
||||
// to be provisioned.
|
||||
// The entries are stored for the duration of the authorization flow.
|
||||
httpTokens map[string][]byte
|
||||
// certTokens contains temporary certificates for tls-sni challenges
|
||||
// and is keyed by token domain name, which matches server name of ClientHello.
|
||||
// Keys always have ".acme.invalid" suffix.
|
||||
// The entries are stored for the duration of the authorization flow.
|
||||
certTokens map[string]*tls.Certificate
|
||||
}
|
||||
|
||||
// GetCertificate implements the tls.Config.GetCertificate hook.
|
||||
// It provides a TLS certificate for hello.ServerName host, including answering
|
||||
// *.acme.invalid (TLS-SNI) challenges. All other fields of hello are ignored.
|
||||
//
|
||||
// If m.HostPolicy is non-nil, GetCertificate calls the policy before requesting
|
||||
// a new cert. A non-nil error returned from m.HostPolicy halts TLS negotiation.
|
||||
// The error is propagated back to the caller of GetCertificate and is user-visible.
|
||||
// This does not affect cached certs. See HostPolicy field description for more details.
|
||||
func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
if m.Prompt == nil {
|
||||
return nil, errors.New("acme/autocert: Manager.Prompt not set")
|
||||
}
|
||||
|
||||
name := hello.ServerName
|
||||
if name == "" {
|
||||
return nil, errors.New("acme/autocert: missing server name")
|
||||
}
|
||||
if !strings.Contains(strings.Trim(name, "."), ".") {
|
||||
return nil, errors.New("acme/autocert: server name component count invalid")
|
||||
}
|
||||
if strings.ContainsAny(name, `/\`) {
|
||||
return nil, errors.New("acme/autocert: server name contains invalid character")
|
||||
}
|
||||
|
||||
// In the worst-case scenario, the timeout needs to account for caching, host policy,
|
||||
// domain ownership verification and certificate issuance.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// check whether this is a token cert requested for TLS-SNI challenge
|
||||
if strings.HasSuffix(name, ".acme.invalid") {
|
||||
m.tokensMu.RLock()
|
||||
defer m.tokensMu.RUnlock()
|
||||
if cert := m.certTokens[name]; cert != nil {
|
||||
return cert, nil
|
||||
}
|
||||
if cert, err := m.cacheGet(ctx, name); err == nil {
|
||||
return cert, nil
|
||||
}
|
||||
// TODO: cache error results?
|
||||
return nil, fmt.Errorf("acme/autocert: no token cert for %q", name)
|
||||
}
|
||||
|
||||
// regular domain
|
||||
name = strings.TrimSuffix(name, ".") // golang.org/issue/18114
|
||||
cert, err := m.cert(ctx, name)
|
||||
if err == nil {
|
||||
return cert, nil
|
||||
}
|
||||
if err != ErrCacheMiss {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// first-time
|
||||
if err := m.hostPolicy()(ctx, name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, err = m.createCert(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.cachePut(ctx, name, cert)
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// HTTPHandler configures the Manager to provision ACME "http-01" challenge responses.
|
||||
// It returns an http.Handler that responds to the challenges and must be
|
||||
// running on port 80. If it receives a request that is not an ACME challenge,
|
||||
// it delegates the request to the optional fallback handler.
|
||||
//
|
||||
// If fallback is nil, the returned handler redirects all GET and HEAD requests
|
||||
// to the default TLS port 443 with 302 Found status code, preserving the original
|
||||
// request path and query. It responds with 400 Bad Request to all other HTTP methods.
|
||||
// The fallback is not protected by the optional HostPolicy.
|
||||
//
|
||||
// Because the fallback handler is run with unencrypted port 80 requests,
|
||||
// the fallback should not serve TLS-only requests.
|
||||
//
|
||||
// If HTTPHandler is never called, the Manager will only use TLS SNI
|
||||
// challenges for domain verification.
|
||||
func (m *Manager) HTTPHandler(fallback http.Handler) http.Handler {
|
||||
m.tokensMu.Lock()
|
||||
defer m.tokensMu.Unlock()
|
||||
m.tryHTTP01 = true
|
||||
|
||||
if fallback == nil {
|
||||
fallback = http.HandlerFunc(handleHTTPRedirect)
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") {
|
||||
fallback.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
// A reasonable context timeout for cache and host policy only,
|
||||
// because we don't wait for a new certificate issuance here.
|
||||
ctx, cancel := context.WithTimeout(r.Context(), time.Minute)
|
||||
defer cancel()
|
||||
if err := m.hostPolicy()(ctx, r.Host); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
data, err := m.httpToken(ctx, r.URL.Path)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
w.Write(data)
|
||||
})
|
||||
}
|
||||
|
||||
func handleHTTPRedirect(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "GET" && r.Method != "HEAD" {
|
||||
http.Error(w, "Use HTTPS", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
target := "https://" + stripPort(r.Host) + r.URL.RequestURI()
|
||||
http.Redirect(w, r, target, http.StatusFound)
|
||||
}
|
||||
|
||||
func stripPort(hostport string) string {
|
||||
host, _, err := net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
return hostport
|
||||
}
|
||||
return net.JoinHostPort(host, "443")
|
||||
}
|
||||
|
||||
// cert returns an existing certificate either from m.state or cache.
|
||||
// If a certificate is found in cache but not in m.state, the latter will be filled
|
||||
// with the cached value.
|
||||
func (m *Manager) cert(ctx context.Context, name string) (*tls.Certificate, error) {
|
||||
m.stateMu.Lock()
|
||||
if s, ok := m.state[name]; ok {
|
||||
m.stateMu.Unlock()
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.tlscert()
|
||||
}
|
||||
defer m.stateMu.Unlock()
|
||||
cert, err := m.cacheGet(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signer, ok := cert.PrivateKey.(crypto.Signer)
|
||||
if !ok {
|
||||
return nil, errors.New("acme/autocert: private key cannot sign")
|
||||
}
|
||||
if m.state == nil {
|
||||
m.state = make(map[string]*certState)
|
||||
}
|
||||
s := &certState{
|
||||
key: signer,
|
||||
cert: cert.Certificate,
|
||||
leaf: cert.Leaf,
|
||||
}
|
||||
m.state[name] = s
|
||||
go m.renew(name, s.key, s.leaf.NotAfter)
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// cacheGet always returns a valid certificate, or an error otherwise.
|
||||
// If a cached certficate exists but is not valid, ErrCacheMiss is returned.
|
||||
func (m *Manager) cacheGet(ctx context.Context, domain string) (*tls.Certificate, error) {
|
||||
if m.Cache == nil {
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
data, err := m.Cache.Get(ctx, domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// private
|
||||
priv, pub := pem.Decode(data)
|
||||
if priv == nil || !strings.Contains(priv.Type, "PRIVATE") {
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
privKey, err := parsePrivateKey(priv.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// public
|
||||
var pubDER [][]byte
|
||||
for len(pub) > 0 {
|
||||
var b *pem.Block
|
||||
b, pub = pem.Decode(pub)
|
||||
if b == nil {
|
||||
break
|
||||
}
|
||||
pubDER = append(pubDER, b.Bytes)
|
||||
}
|
||||
if len(pub) > 0 {
|
||||
// Leftover content not consumed by pem.Decode. Corrupt. Ignore.
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
|
||||
// verify and create TLS cert
|
||||
leaf, err := validCert(domain, pubDER, privKey)
|
||||
if err != nil {
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
tlscert := &tls.Certificate{
|
||||
Certificate: pubDER,
|
||||
PrivateKey: privKey,
|
||||
Leaf: leaf,
|
||||
}
|
||||
return tlscert, nil
|
||||
}
|
||||
|
||||
func (m *Manager) cachePut(ctx context.Context, domain string, tlscert *tls.Certificate) error {
|
||||
if m.Cache == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// contains PEM-encoded data
|
||||
var buf bytes.Buffer
|
||||
|
||||
// private
|
||||
switch key := tlscert.PrivateKey.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
if err := encodeECDSAKey(&buf, key); err != nil {
|
||||
return err
|
||||
}
|
||||
case *rsa.PrivateKey:
|
||||
b := x509.MarshalPKCS1PrivateKey(key)
|
||||
pb := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: b}
|
||||
if err := pem.Encode(&buf, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("acme/autocert: unknown private key type")
|
||||
}
|
||||
|
||||
// public
|
||||
for _, b := range tlscert.Certificate {
|
||||
pb := &pem.Block{Type: "CERTIFICATE", Bytes: b}
|
||||
if err := pem.Encode(&buf, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return m.Cache.Put(ctx, domain, buf.Bytes())
|
||||
}
|
||||
|
||||
func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error {
|
||||
b, err := x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb := &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
|
||||
return pem.Encode(w, pb)
|
||||
}
|
||||
|
||||
// createCert starts the domain ownership verification and returns a certificate
|
||||
// for that domain upon success.
|
||||
//
|
||||
// If the domain is already being verified, it waits for the existing verification to complete.
|
||||
// Either way, createCert blocks for the duration of the whole process.
|
||||
func (m *Manager) createCert(ctx context.Context, domain string) (*tls.Certificate, error) {
|
||||
// TODO: maybe rewrite this whole piece using sync.Once
|
||||
state, err := m.certState(domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// state may exist if another goroutine is already working on it
|
||||
// in which case just wait for it to finish
|
||||
if !state.locked {
|
||||
state.RLock()
|
||||
defer state.RUnlock()
|
||||
return state.tlscert()
|
||||
}
|
||||
|
||||
// We are the first; state is locked.
|
||||
// Unblock the readers when domain ownership is verified
|
||||
// and we got the cert or the process failed.
|
||||
defer state.Unlock()
|
||||
state.locked = false
|
||||
|
||||
der, leaf, err := m.authorizedCert(ctx, state.key, domain)
|
||||
if err != nil {
|
||||
// Remove the failed state after some time,
|
||||
// making the manager call createCert again on the following TLS hello.
|
||||
time.AfterFunc(createCertRetryAfter, func() {
|
||||
defer testDidRemoveState(domain)
|
||||
m.stateMu.Lock()
|
||||
defer m.stateMu.Unlock()
|
||||
// Verify the state hasn't changed and it's still invalid
|
||||
// before deleting.
|
||||
s, ok := m.state[domain]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if _, err := validCert(domain, s.cert, s.key); err == nil {
|
||||
return
|
||||
}
|
||||
delete(m.state, domain)
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
state.cert = der
|
||||
state.leaf = leaf
|
||||
go m.renew(domain, state.key, state.leaf.NotAfter)
|
||||
return state.tlscert()
|
||||
}
|
||||
|
||||
// certState returns a new or existing certState.
|
||||
// If a new certState is returned, state.exist is false and the state is locked.
|
||||
// The returned error is non-nil only in the case where a new state could not be created.
|
||||
func (m *Manager) certState(domain string) (*certState, error) {
|
||||
m.stateMu.Lock()
|
||||
defer m.stateMu.Unlock()
|
||||
if m.state == nil {
|
||||
m.state = make(map[string]*certState)
|
||||
}
|
||||
// existing state
|
||||
if state, ok := m.state[domain]; ok {
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// new locked state
|
||||
var (
|
||||
err error
|
||||
key crypto.Signer
|
||||
)
|
||||
if m.ForceRSA {
|
||||
key, err = rsa.GenerateKey(rand.Reader, 2048)
|
||||
} else {
|
||||
key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
state := &certState{
|
||||
key: key,
|
||||
locked: true,
|
||||
}
|
||||
state.Lock() // will be unlocked by m.certState caller
|
||||
m.state[domain] = state
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// authorizedCert starts the domain ownership verification process and requests a new cert upon success.
|
||||
// The key argument is the certificate private key.
|
||||
func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, domain string) (der [][]byte, leaf *x509.Certificate, err error) {
|
||||
client, err := m.acmeClient(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := m.verify(ctx, client, domain); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
csr, err := certRequest(key, domain)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
der, _, err = client.CreateCert(ctx, csr, 0, true)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
leaf, err = validCert(domain, der, key)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return der, leaf, nil
|
||||
}
|
||||
|
||||
// verify runs the identifier (domain) authorization flow
|
||||
// using each applicable ACME challenge type.
|
||||
func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string) error {
|
||||
// The list of challenge types we'll try to fulfill
|
||||
// in this specific order.
|
||||
challengeTypes := []string{"tls-sni-02", "tls-sni-01"}
|
||||
m.tokensMu.RLock()
|
||||
if m.tryHTTP01 {
|
||||
challengeTypes = append(challengeTypes, "http-01")
|
||||
}
|
||||
m.tokensMu.RUnlock()
|
||||
|
||||
var nextTyp int // challengeType index of the next challenge type to try
|
||||
for {
|
||||
// Start domain authorization and get the challenge.
|
||||
authz, err := client.Authorize(ctx, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// No point in accepting challenges if the authorization status
|
||||
// is in a final state.
|
||||
switch authz.Status {
|
||||
case acme.StatusValid:
|
||||
return nil // already authorized
|
||||
case acme.StatusInvalid:
|
||||
return fmt.Errorf("acme/autocert: invalid authorization %q", authz.URI)
|
||||
}
|
||||
|
||||
// Pick the next preferred challenge.
|
||||
var chal *acme.Challenge
|
||||
for chal == nil && nextTyp < len(challengeTypes) {
|
||||
chal = pickChallenge(challengeTypes[nextTyp], authz.Challenges)
|
||||
nextTyp++
|
||||
}
|
||||
if chal == nil {
|
||||
return fmt.Errorf("acme/autocert: unable to authorize %q; tried %q", domain, challengeTypes)
|
||||
}
|
||||
cleanup, err := m.fulfill(ctx, client, chal)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer cleanup()
|
||||
if _, err := client.Accept(ctx, chal); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// A challenge is fulfilled and accepted: wait for the CA to validate.
|
||||
if _, err := client.WaitAuthorization(ctx, authz.URI); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fulfill provisions a response to the challenge chal.
|
||||
// The cleanup is non-nil only if provisioning succeeded.
|
||||
func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge) (cleanup func(), err error) {
|
||||
switch chal.Type {
|
||||
case "tls-sni-01":
|
||||
cert, name, err := client.TLSSNI01ChallengeCert(chal.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.putCertToken(ctx, name, &cert)
|
||||
return func() { go m.deleteCertToken(name) }, nil
|
||||
case "tls-sni-02":
|
||||
cert, name, err := client.TLSSNI02ChallengeCert(chal.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.putCertToken(ctx, name, &cert)
|
||||
return func() { go m.deleteCertToken(name) }, nil
|
||||
case "http-01":
|
||||
resp, err := client.HTTP01ChallengeResponse(chal.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := client.HTTP01ChallengePath(chal.Token)
|
||||
m.putHTTPToken(ctx, p, resp)
|
||||
return func() { go m.deleteHTTPToken(p) }, nil
|
||||
}
|
||||
return nil, fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type)
|
||||
}
|
||||
|
||||
func pickChallenge(typ string, chal []*acme.Challenge) *acme.Challenge {
|
||||
for _, c := range chal {
|
||||
if c.Type == typ {
|
||||
return c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// putCertToken stores the cert under the named key in both m.certTokens map
|
||||
// and m.Cache.
|
||||
func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certificate) {
|
||||
m.tokensMu.Lock()
|
||||
defer m.tokensMu.Unlock()
|
||||
if m.certTokens == nil {
|
||||
m.certTokens = make(map[string]*tls.Certificate)
|
||||
}
|
||||
m.certTokens[name] = cert
|
||||
m.cachePut(ctx, name, cert)
|
||||
}
|
||||
|
||||
// deleteCertToken removes the token certificate for the specified domain name
|
||||
// from both m.certTokens map and m.Cache.
|
||||
func (m *Manager) deleteCertToken(name string) {
|
||||
m.tokensMu.Lock()
|
||||
defer m.tokensMu.Unlock()
|
||||
delete(m.certTokens, name)
|
||||
if m.Cache != nil {
|
||||
m.Cache.Delete(context.Background(), name)
|
||||
}
|
||||
}
|
||||
|
||||
// httpToken retrieves an existing http-01 token value from an in-memory map
|
||||
// or the optional cache.
|
||||
func (m *Manager) httpToken(ctx context.Context, tokenPath string) ([]byte, error) {
|
||||
m.tokensMu.RLock()
|
||||
defer m.tokensMu.RUnlock()
|
||||
if v, ok := m.httpTokens[tokenPath]; ok {
|
||||
return v, nil
|
||||
}
|
||||
if m.Cache == nil {
|
||||
return nil, fmt.Errorf("acme/autocert: no token at %q", tokenPath)
|
||||
}
|
||||
return m.Cache.Get(ctx, httpTokenCacheKey(tokenPath))
|
||||
}
|
||||
|
||||
// putHTTPToken stores an http-01 token value using tokenPath as key
|
||||
// in both in-memory map and the optional Cache.
|
||||
//
|
||||
// It ignores any error returned from Cache.Put.
|
||||
func (m *Manager) putHTTPToken(ctx context.Context, tokenPath, val string) {
|
||||
m.tokensMu.Lock()
|
||||
defer m.tokensMu.Unlock()
|
||||
if m.httpTokens == nil {
|
||||
m.httpTokens = make(map[string][]byte)
|
||||
}
|
||||
b := []byte(val)
|
||||
m.httpTokens[tokenPath] = b
|
||||
if m.Cache != nil {
|
||||
m.Cache.Put(ctx, httpTokenCacheKey(tokenPath), b)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteHTTPToken removes an http-01 token value from both in-memory map
|
||||
// and the optional Cache, ignoring any error returned from the latter.
|
||||
//
|
||||
// If m.Cache is non-nil, it blocks until Cache.Delete returns without a timeout.
|
||||
func (m *Manager) deleteHTTPToken(tokenPath string) {
|
||||
m.tokensMu.Lock()
|
||||
defer m.tokensMu.Unlock()
|
||||
delete(m.httpTokens, tokenPath)
|
||||
if m.Cache != nil {
|
||||
m.Cache.Delete(context.Background(), httpTokenCacheKey(tokenPath))
|
||||
}
|
||||
}
|
||||
|
||||
// httpTokenCacheKey returns a key at which an http-01 token value may be stored
|
||||
// in the Manager's optional Cache.
|
||||
func httpTokenCacheKey(tokenPath string) string {
|
||||
return "http-01-" + path.Base(tokenPath)
|
||||
}
|
||||
|
||||
// renew starts a cert renewal timer loop, one per domain.
|
||||
//
|
||||
// The loop is scheduled in two cases:
|
||||
// - a cert was fetched from cache for the first time (wasn't in m.state)
|
||||
// - a new cert was created by m.createCert
|
||||
//
|
||||
// The key argument is a certificate private key.
|
||||
// The exp argument is the cert expiration time (NotAfter).
|
||||
func (m *Manager) renew(domain string, key crypto.Signer, exp time.Time) {
|
||||
m.renewalMu.Lock()
|
||||
defer m.renewalMu.Unlock()
|
||||
if m.renewal[domain] != nil {
|
||||
// another goroutine is already on it
|
||||
return
|
||||
}
|
||||
if m.renewal == nil {
|
||||
m.renewal = make(map[string]*domainRenewal)
|
||||
}
|
||||
dr := &domainRenewal{m: m, domain: domain, key: key}
|
||||
m.renewal[domain] = dr
|
||||
dr.start(exp)
|
||||
}
|
||||
|
||||
// stopRenew stops all currently running cert renewal timers.
|
||||
// The timers are not restarted during the lifetime of the Manager.
|
||||
func (m *Manager) stopRenew() {
|
||||
m.renewalMu.Lock()
|
||||
defer m.renewalMu.Unlock()
|
||||
for name, dr := range m.renewal {
|
||||
delete(m.renewal, name)
|
||||
dr.stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) accountKey(ctx context.Context) (crypto.Signer, error) {
|
||||
const keyName = "acme_account.key"
|
||||
|
||||
genKey := func() (*ecdsa.PrivateKey, error) {
|
||||
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
}
|
||||
|
||||
if m.Cache == nil {
|
||||
return genKey()
|
||||
}
|
||||
|
||||
data, err := m.Cache.Get(ctx, keyName)
|
||||
if err == ErrCacheMiss {
|
||||
key, err := genKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := encodeECDSAKey(&buf, key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := m.Cache.Put(ctx, keyName, buf.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
priv, _ := pem.Decode(data)
|
||||
if priv == nil || !strings.Contains(priv.Type, "PRIVATE") {
|
||||
return nil, errors.New("acme/autocert: invalid account key found in cache")
|
||||
}
|
||||
return parsePrivateKey(priv.Bytes)
|
||||
}
|
||||
|
||||
func (m *Manager) acmeClient(ctx context.Context) (*acme.Client, error) {
|
||||
m.clientMu.Lock()
|
||||
defer m.clientMu.Unlock()
|
||||
if m.client != nil {
|
||||
return m.client, nil
|
||||
}
|
||||
|
||||
client := m.Client
|
||||
if client == nil {
|
||||
client = &acme.Client{DirectoryURL: acme.LetsEncryptURL}
|
||||
}
|
||||
if client.Key == nil {
|
||||
var err error
|
||||
client.Key, err = m.accountKey(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var contact []string
|
||||
if m.Email != "" {
|
||||
contact = []string{"mailto:" + m.Email}
|
||||
}
|
||||
a := &acme.Account{Contact: contact}
|
||||
_, err := client.Register(ctx, a, m.Prompt)
|
||||
if ae, ok := err.(*acme.Error); err == nil || ok && ae.StatusCode == http.StatusConflict {
|
||||
// conflict indicates the key is already registered
|
||||
m.client = client
|
||||
err = nil
|
||||
}
|
||||
return m.client, err
|
||||
}
|
||||
|
||||
func (m *Manager) hostPolicy() HostPolicy {
|
||||
if m.HostPolicy != nil {
|
||||
return m.HostPolicy
|
||||
}
|
||||
return defaultHostPolicy
|
||||
}
|
||||
|
||||
func (m *Manager) renewBefore() time.Duration {
|
||||
if m.RenewBefore > renewJitter {
|
||||
return m.RenewBefore
|
||||
}
|
||||
return 720 * time.Hour // 30 days
|
||||
}
|
||||
|
||||
// certState is ready when its mutex is unlocked for reading.
|
||||
type certState struct {
|
||||
sync.RWMutex
|
||||
locked bool // locked for read/write
|
||||
key crypto.Signer // private key for cert
|
||||
cert [][]byte // DER encoding
|
||||
leaf *x509.Certificate // parsed cert[0]; always non-nil if cert != nil
|
||||
}
|
||||
|
||||
// tlscert creates a tls.Certificate from s.key and s.cert.
|
||||
// Callers should wrap it in s.RLock() and s.RUnlock().
|
||||
func (s *certState) tlscert() (*tls.Certificate, error) {
|
||||
if s.key == nil {
|
||||
return nil, errors.New("acme/autocert: missing signer")
|
||||
}
|
||||
if len(s.cert) == 0 {
|
||||
return nil, errors.New("acme/autocert: missing certificate")
|
||||
}
|
||||
return &tls.Certificate{
|
||||
PrivateKey: s.key,
|
||||
Certificate: s.cert,
|
||||
Leaf: s.leaf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// certRequest creates a certificate request for the given common name cn
|
||||
// and optional SANs.
|
||||
func certRequest(key crypto.Signer, cn string, san ...string) ([]byte, error) {
|
||||
req := &x509.CertificateRequest{
|
||||
Subject: pkix.Name{CommonName: cn},
|
||||
DNSNames: san,
|
||||
}
|
||||
return x509.CreateCertificateRequest(rand.Reader, req, key)
|
||||
}
|
||||
|
||||
// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates
|
||||
// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys.
|
||||
// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three.
|
||||
//
|
||||
// Inspired by parsePrivateKey in crypto/tls/tls.go.
|
||||
func parsePrivateKey(der []byte) (crypto.Signer, error) {
|
||||
if key, err := x509.ParsePKCS1PrivateKey(der); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
if key, err := x509.ParsePKCS8PrivateKey(der); err == nil {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return key, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return key, nil
|
||||
default:
|
||||
return nil, errors.New("acme/autocert: unknown private key type in PKCS#8 wrapping")
|
||||
}
|
||||
}
|
||||
if key, err := x509.ParseECPrivateKey(der); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("acme/autocert: failed to parse private key")
|
||||
}
|
||||
|
||||
// validCert parses a cert chain provided as der argument and verifies the leaf, der[0],
|
||||
// corresponds to the private key, as well as the domain match and expiration dates.
|
||||
// It doesn't do any revocation checking.
|
||||
//
|
||||
// The returned value is the verified leaf cert.
|
||||
func validCert(domain string, der [][]byte, key crypto.Signer) (leaf *x509.Certificate, err error) {
|
||||
// parse public part(s)
|
||||
var n int
|
||||
for _, b := range der {
|
||||
n += len(b)
|
||||
}
|
||||
pub := make([]byte, n)
|
||||
n = 0
|
||||
for _, b := range der {
|
||||
n += copy(pub[n:], b)
|
||||
}
|
||||
x509Cert, err := x509.ParseCertificates(pub)
|
||||
if len(x509Cert) == 0 {
|
||||
return nil, errors.New("acme/autocert: no public key found")
|
||||
}
|
||||
// verify the leaf is not expired and matches the domain name
|
||||
leaf = x509Cert[0]
|
||||
now := timeNow()
|
||||
if now.Before(leaf.NotBefore) {
|
||||
return nil, errors.New("acme/autocert: certificate is not valid yet")
|
||||
}
|
||||
if now.After(leaf.NotAfter) {
|
||||
return nil, errors.New("acme/autocert: expired certificate")
|
||||
}
|
||||
if err := leaf.VerifyHostname(domain); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// ensure the leaf corresponds to the private key
|
||||
switch pub := leaf.PublicKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
prv, ok := key.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("acme/autocert: private key type does not match public key type")
|
||||
}
|
||||
if pub.N.Cmp(prv.N) != 0 {
|
||||
return nil, errors.New("acme/autocert: private key does not match public key")
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
prv, ok := key.(*ecdsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("acme/autocert: private key type does not match public key type")
|
||||
}
|
||||
if pub.X.Cmp(prv.X) != 0 || pub.Y.Cmp(prv.Y) != 0 {
|
||||
return nil, errors.New("acme/autocert: private key does not match public key")
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("acme/autocert: unknown public key algorithm")
|
||||
}
|
||||
return leaf, nil
|
||||
}
|
||||
|
||||
type lockedMathRand struct {
|
||||
sync.Mutex
|
||||
rnd *mathrand.Rand
|
||||
}
|
||||
|
||||
func (r *lockedMathRand) int63n(max int64) int64 {
|
||||
r.Lock()
|
||||
n := r.rnd.Int63n(max)
|
||||
r.Unlock()
|
||||
return n
|
||||
}
|
||||
|
||||
// For easier testing.
|
||||
var (
|
||||
timeNow = time.Now
|
||||
|
||||
// Called when a state is removed.
|
||||
testDidRemoveState = func(domain string) {}
|
||||
)
|
757
vendor/golang.org/x/crypto/acme/autocert/autocert_test.go
generated
vendored
Normal file
757
vendor/golang.org/x/crypto/acme/autocert/autocert_test.go
generated
vendored
Normal file
@ -0,0 +1,757 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/acme"
|
||||
)
|
||||
|
||||
var discoTmpl = template.Must(template.New("disco").Parse(`{
|
||||
"new-reg": "{{.}}/new-reg",
|
||||
"new-authz": "{{.}}/new-authz",
|
||||
"new-cert": "{{.}}/new-cert"
|
||||
}`))
|
||||
|
||||
var authzTmpl = template.Must(template.New("authz").Parse(`{
|
||||
"status": "pending",
|
||||
"challenges": [
|
||||
{
|
||||
"uri": "{{.}}/challenge/1",
|
||||
"type": "tls-sni-01",
|
||||
"token": "token-01"
|
||||
},
|
||||
{
|
||||
"uri": "{{.}}/challenge/2",
|
||||
"type": "tls-sni-02",
|
||||
"token": "token-02"
|
||||
},
|
||||
{
|
||||
"uri": "{{.}}/challenge/dns-01",
|
||||
"type": "dns-01",
|
||||
"token": "token-dns-01"
|
||||
},
|
||||
{
|
||||
"uri": "{{.}}/challenge/http-01",
|
||||
"type": "http-01",
|
||||
"token": "token-http-01"
|
||||
}
|
||||
]
|
||||
}`))
|
||||
|
||||
type memCache struct {
|
||||
mu sync.Mutex
|
||||
keyData map[string][]byte
|
||||
}
|
||||
|
||||
func (m *memCache) Get(ctx context.Context, key string) ([]byte, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
v, ok := m.keyData[key]
|
||||
if !ok {
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (m *memCache) Put(ctx context.Context, key string, data []byte) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.keyData[key] = data
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memCache) Delete(ctx context.Context, key string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
delete(m.keyData, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func newMemCache() *memCache {
|
||||
return &memCache{
|
||||
keyData: make(map[string][]byte),
|
||||
}
|
||||
}
|
||||
|
||||
func dummyCert(pub interface{}, san ...string) ([]byte, error) {
|
||||
return dateDummyCert(pub, time.Now(), time.Now().Add(90*24*time.Hour), san...)
|
||||
}
|
||||
|
||||
func dateDummyCert(pub interface{}, start, end time.Time, san ...string) ([]byte, error) {
|
||||
// use EC key to run faster on 386
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
NotBefore: start,
|
||||
NotAfter: end,
|
||||
BasicConstraintsValid: true,
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment,
|
||||
DNSNames: san,
|
||||
}
|
||||
if pub == nil {
|
||||
pub = &key.PublicKey
|
||||
}
|
||||
return x509.CreateCertificate(rand.Reader, t, t, pub, key)
|
||||
}
|
||||
|
||||
func decodePayload(v interface{}, r io.Reader) error {
|
||||
var req struct{ Payload string }
|
||||
if err := json.NewDecoder(r).Decode(&req); err != nil {
|
||||
return err
|
||||
}
|
||||
payload, err := base64.RawURLEncoding.DecodeString(req.Payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(payload, v)
|
||||
}
|
||||
|
||||
func TestGetCertificate(t *testing.T) {
|
||||
man := &Manager{Prompt: AcceptTOS}
|
||||
defer man.stopRenew()
|
||||
hello := &tls.ClientHelloInfo{ServerName: "example.org"}
|
||||
testGetCertificate(t, man, "example.org", hello)
|
||||
}
|
||||
|
||||
func TestGetCertificate_trailingDot(t *testing.T) {
|
||||
man := &Manager{Prompt: AcceptTOS}
|
||||
defer man.stopRenew()
|
||||
hello := &tls.ClientHelloInfo{ServerName: "example.org."}
|
||||
testGetCertificate(t, man, "example.org", hello)
|
||||
}
|
||||
|
||||
func TestGetCertificate_ForceRSA(t *testing.T) {
|
||||
man := &Manager{
|
||||
Prompt: AcceptTOS,
|
||||
Cache: newMemCache(),
|
||||
ForceRSA: true,
|
||||
}
|
||||
defer man.stopRenew()
|
||||
hello := &tls.ClientHelloInfo{ServerName: "example.org"}
|
||||
testGetCertificate(t, man, "example.org", hello)
|
||||
|
||||
cert, err := man.cacheGet(context.Background(), "example.org")
|
||||
if err != nil {
|
||||
t.Fatalf("man.cacheGet: %v", err)
|
||||
}
|
||||
if _, ok := cert.PrivateKey.(*rsa.PrivateKey); !ok {
|
||||
t.Errorf("cert.PrivateKey is %T; want *rsa.PrivateKey", cert.PrivateKey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCertificate_nilPrompt(t *testing.T) {
|
||||
man := &Manager{}
|
||||
defer man.stopRenew()
|
||||
url, finish := startACMEServerStub(t, man, "example.org")
|
||||
defer finish()
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
man.Client = &acme.Client{
|
||||
Key: key,
|
||||
DirectoryURL: url,
|
||||
}
|
||||
hello := &tls.ClientHelloInfo{ServerName: "example.org"}
|
||||
if _, err := man.GetCertificate(hello); err == nil {
|
||||
t.Error("got certificate for example.org; wanted error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCertificate_expiredCache(t *testing.T) {
|
||||
// Make an expired cert and cache it.
|
||||
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmpl := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{CommonName: "example.org"},
|
||||
NotAfter: time.Now(),
|
||||
}
|
||||
pub, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &pk.PublicKey, pk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tlscert := &tls.Certificate{
|
||||
Certificate: [][]byte{pub},
|
||||
PrivateKey: pk,
|
||||
}
|
||||
|
||||
man := &Manager{Prompt: AcceptTOS, Cache: newMemCache()}
|
||||
defer man.stopRenew()
|
||||
if err := man.cachePut(context.Background(), "example.org", tlscert); err != nil {
|
||||
t.Fatalf("man.cachePut: %v", err)
|
||||
}
|
||||
|
||||
// The expired cached cert should trigger a new cert issuance
|
||||
// and return without an error.
|
||||
hello := &tls.ClientHelloInfo{ServerName: "example.org"}
|
||||
testGetCertificate(t, man, "example.org", hello)
|
||||
}
|
||||
|
||||
func TestGetCertificate_failedAttempt(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
const example = "example.org"
|
||||
d := createCertRetryAfter
|
||||
f := testDidRemoveState
|
||||
defer func() {
|
||||
createCertRetryAfter = d
|
||||
testDidRemoveState = f
|
||||
}()
|
||||
createCertRetryAfter = 0
|
||||
done := make(chan struct{})
|
||||
testDidRemoveState = func(domain string) {
|
||||
if domain != example {
|
||||
t.Errorf("testDidRemoveState: domain = %q; want %q", domain, example)
|
||||
}
|
||||
close(done)
|
||||
}
|
||||
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
man := &Manager{
|
||||
Prompt: AcceptTOS,
|
||||
Client: &acme.Client{
|
||||
Key: key,
|
||||
DirectoryURL: ts.URL,
|
||||
},
|
||||
}
|
||||
defer man.stopRenew()
|
||||
hello := &tls.ClientHelloInfo{ServerName: example}
|
||||
if _, err := man.GetCertificate(hello); err == nil {
|
||||
t.Error("GetCertificate: err is nil")
|
||||
}
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("took too long to remove the %q state", example)
|
||||
case <-done:
|
||||
man.stateMu.Lock()
|
||||
defer man.stateMu.Unlock()
|
||||
if v, exist := man.state[example]; exist {
|
||||
t.Errorf("state exists for %q: %+v", example, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// startACMEServerStub runs an ACME server
|
||||
// The domain argument is the expected domain name of a certificate request.
|
||||
func startACMEServerStub(t *testing.T, man *Manager, domain string) (url string, finish func()) {
|
||||
// echo token-02 | shasum -a 256
|
||||
// then divide result in 2 parts separated by dot
|
||||
tokenCertName := "4e8eb87631187e9ff2153b56b13a4dec.13a35d002e485d60ff37354b32f665d9.token.acme.invalid"
|
||||
verifyTokenCert := func() {
|
||||
hello := &tls.ClientHelloInfo{ServerName: tokenCertName}
|
||||
_, err := man.GetCertificate(hello)
|
||||
if err != nil {
|
||||
t.Errorf("verifyTokenCert: GetCertificate(%q): %v", tokenCertName, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ACME CA server stub
|
||||
var ca *httptest.Server
|
||||
ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Replay-Nonce", "nonce")
|
||||
if r.Method == "HEAD" {
|
||||
// a nonce request
|
||||
return
|
||||
}
|
||||
|
||||
switch r.URL.Path {
|
||||
// discovery
|
||||
case "/":
|
||||
if err := discoTmpl.Execute(w, ca.URL); err != nil {
|
||||
t.Errorf("discoTmpl: %v", err)
|
||||
}
|
||||
// client key registration
|
||||
case "/new-reg":
|
||||
w.Write([]byte("{}"))
|
||||
// domain authorization
|
||||
case "/new-authz":
|
||||
w.Header().Set("Location", ca.URL+"/authz/1")
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
if err := authzTmpl.Execute(w, ca.URL); err != nil {
|
||||
t.Errorf("authzTmpl: %v", err)
|
||||
}
|
||||
// accept tls-sni-02 challenge
|
||||
case "/challenge/2":
|
||||
verifyTokenCert()
|
||||
w.Write([]byte("{}"))
|
||||
// authorization status
|
||||
case "/authz/1":
|
||||
w.Write([]byte(`{"status": "valid"}`))
|
||||
// cert request
|
||||
case "/new-cert":
|
||||
var req struct {
|
||||
CSR string `json:"csr"`
|
||||
}
|
||||
decodePayload(&req, r.Body)
|
||||
b, _ := base64.RawURLEncoding.DecodeString(req.CSR)
|
||||
csr, err := x509.ParseCertificateRequest(b)
|
||||
if err != nil {
|
||||
t.Errorf("new-cert: CSR: %v", err)
|
||||
}
|
||||
if csr.Subject.CommonName != domain {
|
||||
t.Errorf("CommonName in CSR = %q; want %q", csr.Subject.CommonName, domain)
|
||||
}
|
||||
der, err := dummyCert(csr.PublicKey, domain)
|
||||
if err != nil {
|
||||
t.Errorf("new-cert: dummyCert: %v", err)
|
||||
}
|
||||
chainUp := fmt.Sprintf("<%s/ca-cert>; rel=up", ca.URL)
|
||||
w.Header().Set("Link", chainUp)
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
w.Write(der)
|
||||
// CA chain cert
|
||||
case "/ca-cert":
|
||||
der, err := dummyCert(nil, "ca")
|
||||
if err != nil {
|
||||
t.Errorf("ca-cert: dummyCert: %v", err)
|
||||
}
|
||||
w.Write(der)
|
||||
default:
|
||||
t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path)
|
||||
}
|
||||
}))
|
||||
finish = func() {
|
||||
ca.Close()
|
||||
|
||||
// make sure token cert was removed
|
||||
cancel := make(chan struct{})
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
tick := time.NewTicker(100 * time.Millisecond)
|
||||
defer tick.Stop()
|
||||
for {
|
||||
hello := &tls.ClientHelloInfo{ServerName: tokenCertName}
|
||||
if _, err := man.GetCertificate(hello); err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-tick.C:
|
||||
case <-cancel:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(5 * time.Second):
|
||||
close(cancel)
|
||||
t.Error("token cert was not removed")
|
||||
<-done
|
||||
}
|
||||
}
|
||||
return ca.URL, finish
|
||||
}
|
||||
|
||||
// tests man.GetCertificate flow using the provided hello argument.
|
||||
// The domain argument is the expected domain name of a certificate request.
|
||||
func testGetCertificate(t *testing.T, man *Manager, domain string, hello *tls.ClientHelloInfo) {
|
||||
url, finish := startACMEServerStub(t, man, domain)
|
||||
defer finish()
|
||||
|
||||
// use EC key to run faster on 386
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
man.Client = &acme.Client{
|
||||
Key: key,
|
||||
DirectoryURL: url,
|
||||
}
|
||||
|
||||
// simulate tls.Config.GetCertificate
|
||||
var tlscert *tls.Certificate
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
tlscert, err = man.GetCertificate(hello)
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(time.Minute):
|
||||
t.Fatal("man.GetCertificate took too long to return")
|
||||
case <-done:
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("man.GetCertificate: %v", err)
|
||||
}
|
||||
|
||||
// verify the tlscert is the same we responded with from the CA stub
|
||||
if len(tlscert.Certificate) == 0 {
|
||||
t.Fatal("len(tlscert.Certificate) is 0")
|
||||
}
|
||||
cert, err := x509.ParseCertificate(tlscert.Certificate[0])
|
||||
if err != nil {
|
||||
t.Fatalf("x509.ParseCertificate: %v", err)
|
||||
}
|
||||
if len(cert.DNSNames) == 0 || cert.DNSNames[0] != domain {
|
||||
t.Errorf("cert.DNSNames = %v; want %q", cert.DNSNames, domain)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestVerifyHTTP01(t *testing.T) {
|
||||
var (
|
||||
http01 http.Handler
|
||||
|
||||
authzCount int // num. of created authorizations
|
||||
didAcceptHTTP01 bool
|
||||
)
|
||||
|
||||
verifyHTTPToken := func() {
|
||||
r := httptest.NewRequest("GET", "/.well-known/acme-challenge/token-http-01", nil)
|
||||
w := httptest.NewRecorder()
|
||||
http01.ServeHTTP(w, r)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("http token: w.Code = %d; want %d", w.Code, http.StatusOK)
|
||||
}
|
||||
if v := string(w.Body.Bytes()); !strings.HasPrefix(v, "token-http-01.") {
|
||||
t.Errorf("http token value = %q; want 'token-http-01.' prefix", v)
|
||||
}
|
||||
}
|
||||
|
||||
// ACME CA server stub, only the needed bits.
|
||||
// TODO: Merge this with startACMEServerStub, making it a configurable CA for testing.
|
||||
var ca *httptest.Server
|
||||
ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Replay-Nonce", "nonce")
|
||||
if r.Method == "HEAD" {
|
||||
// a nonce request
|
||||
return
|
||||
}
|
||||
|
||||
switch r.URL.Path {
|
||||
// Discovery.
|
||||
case "/":
|
||||
if err := discoTmpl.Execute(w, ca.URL); err != nil {
|
||||
t.Errorf("discoTmpl: %v", err)
|
||||
}
|
||||
// Client key registration.
|
||||
case "/new-reg":
|
||||
w.Write([]byte("{}"))
|
||||
// New domain authorization.
|
||||
case "/new-authz":
|
||||
authzCount++
|
||||
w.Header().Set("Location", fmt.Sprintf("%s/authz/%d", ca.URL, authzCount))
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
if err := authzTmpl.Execute(w, ca.URL); err != nil {
|
||||
t.Errorf("authzTmpl: %v", err)
|
||||
}
|
||||
// Accept tls-sni-02.
|
||||
case "/challenge/2":
|
||||
w.Write([]byte("{}"))
|
||||
// Reject tls-sni-01.
|
||||
case "/challenge/1":
|
||||
http.Error(w, "won't accept tls-sni-01", http.StatusBadRequest)
|
||||
// Should not accept dns-01.
|
||||
case "/challenge/dns-01":
|
||||
t.Errorf("dns-01 challenge was accepted")
|
||||
http.Error(w, "won't accept dns-01", http.StatusBadRequest)
|
||||
// Accept http-01.
|
||||
case "/challenge/http-01":
|
||||
didAcceptHTTP01 = true
|
||||
verifyHTTPToken()
|
||||
w.Write([]byte("{}"))
|
||||
// Authorization statuses.
|
||||
// Make tls-sni-xxx invalid.
|
||||
case "/authz/1", "/authz/2":
|
||||
w.Write([]byte(`{"status": "invalid"}`))
|
||||
case "/authz/3", "/authz/4":
|
||||
w.Write([]byte(`{"status": "valid"}`))
|
||||
default:
|
||||
http.NotFound(w, r)
|
||||
t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path)
|
||||
}
|
||||
}))
|
||||
defer ca.Close()
|
||||
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
m := &Manager{
|
||||
Client: &acme.Client{
|
||||
Key: key,
|
||||
DirectoryURL: ca.URL,
|
||||
},
|
||||
}
|
||||
http01 = m.HTTPHandler(nil)
|
||||
if err := m.verify(context.Background(), m.Client, "example.org"); err != nil {
|
||||
t.Errorf("m.verify: %v", err)
|
||||
}
|
||||
// Only tls-sni-01, tls-sni-02 and http-01 must be accepted
|
||||
// The dns-01 challenge is unsupported.
|
||||
if authzCount != 3 {
|
||||
t.Errorf("authzCount = %d; want 3", authzCount)
|
||||
}
|
||||
if !didAcceptHTTP01 {
|
||||
t.Error("did not accept http-01 challenge")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPHandlerDefaultFallback(t *testing.T) {
|
||||
tt := []struct {
|
||||
method, url string
|
||||
wantCode int
|
||||
wantLocation string
|
||||
}{
|
||||
{"GET", "http://example.org", 302, "https://example.org/"},
|
||||
{"GET", "http://example.org/foo", 302, "https://example.org/foo"},
|
||||
{"GET", "http://example.org/foo/bar/", 302, "https://example.org/foo/bar/"},
|
||||
{"GET", "http://example.org/?a=b", 302, "https://example.org/?a=b"},
|
||||
{"GET", "http://example.org/foo?a=b", 302, "https://example.org/foo?a=b"},
|
||||
{"GET", "http://example.org:80/foo?a=b", 302, "https://example.org:443/foo?a=b"},
|
||||
{"GET", "http://example.org:80/foo%20bar", 302, "https://example.org:443/foo%20bar"},
|
||||
{"GET", "http://[2602:d1:xxxx::c60a]:1234", 302, "https://[2602:d1:xxxx::c60a]:443/"},
|
||||
{"GET", "http://[2602:d1:xxxx::c60a]", 302, "https://[2602:d1:xxxx::c60a]/"},
|
||||
{"GET", "http://[2602:d1:xxxx::c60a]/foo?a=b", 302, "https://[2602:d1:xxxx::c60a]/foo?a=b"},
|
||||
{"HEAD", "http://example.org", 302, "https://example.org/"},
|
||||
{"HEAD", "http://example.org/foo", 302, "https://example.org/foo"},
|
||||
{"HEAD", "http://example.org/foo/bar/", 302, "https://example.org/foo/bar/"},
|
||||
{"HEAD", "http://example.org/?a=b", 302, "https://example.org/?a=b"},
|
||||
{"HEAD", "http://example.org/foo?a=b", 302, "https://example.org/foo?a=b"},
|
||||
{"POST", "http://example.org", 400, ""},
|
||||
{"PUT", "http://example.org", 400, ""},
|
||||
{"GET", "http://example.org/.well-known/acme-challenge/x", 404, ""},
|
||||
}
|
||||
var m Manager
|
||||
h := m.HTTPHandler(nil)
|
||||
for i, test := range tt {
|
||||
r := httptest.NewRequest(test.method, test.url, nil)
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, r)
|
||||
if w.Code != test.wantCode {
|
||||
t.Errorf("%d: w.Code = %d; want %d", i, w.Code, test.wantCode)
|
||||
t.Errorf("%d: body: %s", i, w.Body.Bytes())
|
||||
}
|
||||
if v := w.Header().Get("Location"); v != test.wantLocation {
|
||||
t.Errorf("%d: Location = %q; want %q", i, v, test.wantLocation)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccountKeyCache(t *testing.T) {
|
||||
m := Manager{Cache: newMemCache()}
|
||||
ctx := context.Background()
|
||||
k1, err := m.accountKey(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
k2, err := m.accountKey(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(k1, k2) {
|
||||
t.Errorf("account keys don't match: k1 = %#v; k2 = %#v", k1, k2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmpl := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{CommonName: "example.org"},
|
||||
NotAfter: time.Now().Add(time.Hour),
|
||||
}
|
||||
pub, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &privKey.PublicKey, privKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tlscert := &tls.Certificate{
|
||||
Certificate: [][]byte{pub},
|
||||
PrivateKey: privKey,
|
||||
}
|
||||
|
||||
man := &Manager{Cache: newMemCache()}
|
||||
defer man.stopRenew()
|
||||
ctx := context.Background()
|
||||
if err := man.cachePut(ctx, "example.org", tlscert); err != nil {
|
||||
t.Fatalf("man.cachePut: %v", err)
|
||||
}
|
||||
res, err := man.cacheGet(ctx, "example.org")
|
||||
if err != nil {
|
||||
t.Fatalf("man.cacheGet: %v", err)
|
||||
}
|
||||
if res == nil {
|
||||
t.Fatal("res is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostWhitelist(t *testing.T) {
|
||||
policy := HostWhitelist("example.com", "example.org", "*.example.net")
|
||||
tt := []struct {
|
||||
host string
|
||||
allow bool
|
||||
}{
|
||||
{"example.com", true},
|
||||
{"example.org", true},
|
||||
{"one.example.com", false},
|
||||
{"two.example.org", false},
|
||||
{"three.example.net", false},
|
||||
{"dummy", false},
|
||||
}
|
||||
for i, test := range tt {
|
||||
err := policy(nil, test.host)
|
||||
if err != nil && test.allow {
|
||||
t.Errorf("%d: policy(%q): %v; want nil", i, test.host, err)
|
||||
}
|
||||
if err == nil && !test.allow {
|
||||
t.Errorf("%d: policy(%q): nil; want an error", i, test.host)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidCert(t *testing.T) {
|
||||
key1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
key2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
key3, err := rsa.GenerateKey(rand.Reader, 512)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cert1, err := dummyCert(key1.Public(), "example.org")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cert2, err := dummyCert(key2.Public(), "example.org")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cert3, err := dummyCert(key3.Public(), "example.org")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
now := time.Now()
|
||||
early, err := dateDummyCert(key1.Public(), now.Add(time.Hour), now.Add(2*time.Hour), "example.org")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expired, err := dateDummyCert(key1.Public(), now.Add(-2*time.Hour), now.Add(-time.Hour), "example.org")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tt := []struct {
|
||||
domain string
|
||||
key crypto.Signer
|
||||
cert [][]byte
|
||||
ok bool
|
||||
}{
|
||||
{"example.org", key1, [][]byte{cert1}, true},
|
||||
{"example.org", key3, [][]byte{cert3}, true},
|
||||
{"example.org", key1, [][]byte{cert1, cert2, cert3}, true},
|
||||
{"example.org", key1, [][]byte{cert1, {1}}, false},
|
||||
{"example.org", key1, [][]byte{{1}}, false},
|
||||
{"example.org", key1, [][]byte{cert2}, false},
|
||||
{"example.org", key2, [][]byte{cert1}, false},
|
||||
{"example.org", key1, [][]byte{cert3}, false},
|
||||
{"example.org", key3, [][]byte{cert1}, false},
|
||||
{"example.net", key1, [][]byte{cert1}, false},
|
||||
{"example.org", key1, [][]byte{early}, false},
|
||||
{"example.org", key1, [][]byte{expired}, false},
|
||||
}
|
||||
for i, test := range tt {
|
||||
leaf, err := validCert(test.domain, test.cert, test.key)
|
||||
if err != nil && test.ok {
|
||||
t.Errorf("%d: err = %v", i, err)
|
||||
}
|
||||
if err == nil && !test.ok {
|
||||
t.Errorf("%d: err is nil", i)
|
||||
}
|
||||
if err == nil && test.ok && leaf == nil {
|
||||
t.Errorf("%d: leaf is nil", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type cacheGetFunc func(ctx context.Context, key string) ([]byte, error)
|
||||
|
||||
func (f cacheGetFunc) Get(ctx context.Context, key string) ([]byte, error) {
|
||||
return f(ctx, key)
|
||||
}
|
||||
|
||||
func (f cacheGetFunc) Put(ctx context.Context, key string, data []byte) error {
|
||||
return fmt.Errorf("unsupported Put of %q = %q", key, data)
|
||||
}
|
||||
|
||||
func (f cacheGetFunc) Delete(ctx context.Context, key string) error {
|
||||
return fmt.Errorf("unsupported Delete of %q", key)
|
||||
}
|
||||
|
||||
func TestManagerGetCertificateBogusSNI(t *testing.T) {
|
||||
m := Manager{
|
||||
Prompt: AcceptTOS,
|
||||
Cache: cacheGetFunc(func(ctx context.Context, key string) ([]byte, error) {
|
||||
return nil, fmt.Errorf("cache.Get of %s", key)
|
||||
}),
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
wantErr string
|
||||
}{
|
||||
{"foo.com", "cache.Get of foo.com"},
|
||||
{"foo.com.", "cache.Get of foo.com"},
|
||||
{`a\b.com`, "acme/autocert: server name contains invalid character"},
|
||||
{`a/b.com`, "acme/autocert: server name contains invalid character"},
|
||||
{"", "acme/autocert: missing server name"},
|
||||
{"foo", "acme/autocert: server name component count invalid"},
|
||||
{".foo", "acme/autocert: server name component count invalid"},
|
||||
{"foo.", "acme/autocert: server name component count invalid"},
|
||||
{"fo.o", "cache.Get of fo.o"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
_, err := m.GetCertificate(&tls.ClientHelloInfo{ServerName: tt.name})
|
||||
got := fmt.Sprint(err)
|
||||
if got != tt.wantErr {
|
||||
t.Errorf("GetCertificate(SNI = %q) = %q; want %q", tt.name, got, tt.wantErr)
|
||||
}
|
||||
}
|
||||
}
|
130
vendor/golang.org/x/crypto/acme/autocert/cache.go
generated
vendored
Normal file
130
vendor/golang.org/x/crypto/acme/autocert/cache.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// ErrCacheMiss is returned when a certificate is not found in cache.
|
||||
var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss")
|
||||
|
||||
// Cache is used by Manager to store and retrieve previously obtained certificates
|
||||
// as opaque data.
|
||||
//
|
||||
// The key argument of the methods refers to a domain name but need not be an FQDN.
|
||||
// Cache implementations should not rely on the key naming pattern.
|
||||
type Cache interface {
|
||||
// Get returns a certificate data for the specified key.
|
||||
// If there's no such key, Get returns ErrCacheMiss.
|
||||
Get(ctx context.Context, key string) ([]byte, error)
|
||||
|
||||
// Put stores the data in the cache under the specified key.
|
||||
// Underlying implementations may use any data storage format,
|
||||
// as long as the reverse operation, Get, results in the original data.
|
||||
Put(ctx context.Context, key string, data []byte) error
|
||||
|
||||
// Delete removes a certificate data from the cache under the specified key.
|
||||
// If there's no such key in the cache, Delete returns nil.
|
||||
Delete(ctx context.Context, key string) error
|
||||
}
|
||||
|
||||
// DirCache implements Cache using a directory on the local filesystem.
|
||||
// If the directory does not exist, it will be created with 0700 permissions.
|
||||
type DirCache string
|
||||
|
||||
// Get reads a certificate data from the specified file name.
|
||||
func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) {
|
||||
name = filepath.Join(string(d), name)
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
done = make(chan struct{})
|
||||
)
|
||||
go func() {
|
||||
data, err = ioutil.ReadFile(name)
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-done:
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
return data, err
|
||||
}
|
||||
|
||||
// Put writes the certificate data to the specified file name.
|
||||
// The file will be created with 0600 permissions.
|
||||
func (d DirCache) Put(ctx context.Context, name string, data []byte) error {
|
||||
if err := os.MkdirAll(string(d), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
var err error
|
||||
go func() {
|
||||
defer close(done)
|
||||
var tmp string
|
||||
if tmp, err = d.writeTempFile(name, data); err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Don't overwrite the file if the context was canceled.
|
||||
default:
|
||||
newName := filepath.Join(string(d), name)
|
||||
err = os.Rename(tmp, newName)
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-done:
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete removes the specified file name.
|
||||
func (d DirCache) Delete(ctx context.Context, name string) error {
|
||||
name = filepath.Join(string(d), name)
|
||||
var (
|
||||
err error
|
||||
done = make(chan struct{})
|
||||
)
|
||||
go func() {
|
||||
err = os.Remove(name)
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-done:
|
||||
}
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeTempFile writes b to a temporary file, closes the file and returns its path.
|
||||
func (d DirCache) writeTempFile(prefix string, b []byte) (string, error) {
|
||||
// TempFile uses 0600 permissions
|
||||
f, err := ioutil.TempFile(string(d), prefix)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := f.Write(b); err != nil {
|
||||
f.Close()
|
||||
return "", err
|
||||
}
|
||||
return f.Name(), f.Close()
|
||||
}
|
58
vendor/golang.org/x/crypto/acme/autocert/cache_test.go
generated
vendored
Normal file
58
vendor/golang.org/x/crypto/acme/autocert/cache_test.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// make sure DirCache satisfies Cache interface
|
||||
var _ Cache = DirCache("/")
|
||||
|
||||
func TestDirCache(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "autocert")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
dir = filepath.Join(dir, "certs") // a nonexistent dir
|
||||
cache := DirCache(dir)
|
||||
ctx := context.Background()
|
||||
|
||||
// test cache miss
|
||||
if _, err := cache.Get(ctx, "nonexistent"); err != ErrCacheMiss {
|
||||
t.Errorf("get: %v; want ErrCacheMiss", err)
|
||||
}
|
||||
|
||||
// test put/get
|
||||
b1 := []byte{1}
|
||||
if err := cache.Put(ctx, "dummy", b1); err != nil {
|
||||
t.Fatalf("put: %v", err)
|
||||
}
|
||||
b2, err := cache.Get(ctx, "dummy")
|
||||
if err != nil {
|
||||
t.Fatalf("get: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(b1, b2) {
|
||||
t.Errorf("b1 = %v; want %v", b1, b2)
|
||||
}
|
||||
name := filepath.Join(dir, "dummy")
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// test delete
|
||||
if err := cache.Delete(ctx, "dummy"); err != nil {
|
||||
t.Fatalf("delete: %v", err)
|
||||
}
|
||||
if _, err := cache.Get(ctx, "dummy"); err != ErrCacheMiss {
|
||||
t.Errorf("get: %v; want ErrCacheMiss", err)
|
||||
}
|
||||
}
|
36
vendor/golang.org/x/crypto/acme/autocert/example_test.go
generated
vendored
Normal file
36
vendor/golang.org/x/crypto/acme/autocert/example_test.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert_test
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
)
|
||||
|
||||
func ExampleNewListener() {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "Hello, TLS user! Your config: %+v", r.TLS)
|
||||
})
|
||||
log.Fatal(http.Serve(autocert.NewListener("example.com"), mux))
|
||||
}
|
||||
|
||||
func ExampleManager() {
|
||||
m := &autocert.Manager{
|
||||
Cache: autocert.DirCache("secret-dir"),
|
||||
Prompt: autocert.AcceptTOS,
|
||||
HostPolicy: autocert.HostWhitelist("example.org"),
|
||||
}
|
||||
go http.ListenAndServe(":http", m.HTTPHandler(nil))
|
||||
s := &http.Server{
|
||||
Addr: ":https",
|
||||
TLSConfig: &tls.Config{GetCertificate: m.GetCertificate},
|
||||
}
|
||||
s.ListenAndServeTLS("", "")
|
||||
}
|
160
vendor/golang.org/x/crypto/acme/autocert/listener.go
generated
vendored
Normal file
160
vendor/golang.org/x/crypto/acme/autocert/listener.go
generated
vendored
Normal file
@ -0,0 +1,160 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewListener returns a net.Listener that listens on the standard TLS
|
||||
// port (443) on all interfaces and returns *tls.Conn connections with
|
||||
// LetsEncrypt certificates for the provided domain or domains.
|
||||
//
|
||||
// It enables one-line HTTPS servers:
|
||||
//
|
||||
// log.Fatal(http.Serve(autocert.NewListener("example.com"), handler))
|
||||
//
|
||||
// NewListener is a convenience function for a common configuration.
|
||||
// More complex or custom configurations can use the autocert.Manager
|
||||
// type instead.
|
||||
//
|
||||
// Use of this function implies acceptance of the LetsEncrypt Terms of
|
||||
// Service. If domains is not empty, the provided domains are passed
|
||||
// to HostWhitelist. If domains is empty, the listener will do
|
||||
// LetsEncrypt challenges for any requested domain, which is not
|
||||
// recommended.
|
||||
//
|
||||
// Certificates are cached in a "golang-autocert" directory under an
|
||||
// operating system-specific cache or temp directory. This may not
|
||||
// be suitable for servers spanning multiple machines.
|
||||
//
|
||||
// The returned listener uses a *tls.Config that enables HTTP/2, and
|
||||
// should only be used with servers that support HTTP/2.
|
||||
//
|
||||
// The returned Listener also enables TCP keep-alives on the accepted
|
||||
// connections. The returned *tls.Conn are returned before their TLS
|
||||
// handshake has completed.
|
||||
func NewListener(domains ...string) net.Listener {
|
||||
m := &Manager{
|
||||
Prompt: AcceptTOS,
|
||||
}
|
||||
if len(domains) > 0 {
|
||||
m.HostPolicy = HostWhitelist(domains...)
|
||||
}
|
||||
dir := cacheDir()
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
log.Printf("warning: autocert.NewListener not using a cache: %v", err)
|
||||
} else {
|
||||
m.Cache = DirCache(dir)
|
||||
}
|
||||
return m.Listener()
|
||||
}
|
||||
|
||||
// Listener listens on the standard TLS port (443) on all interfaces
|
||||
// and returns a net.Listener returning *tls.Conn connections.
|
||||
//
|
||||
// The returned listener uses a *tls.Config that enables HTTP/2, and
|
||||
// should only be used with servers that support HTTP/2.
|
||||
//
|
||||
// The returned Listener also enables TCP keep-alives on the accepted
|
||||
// connections. The returned *tls.Conn are returned before their TLS
|
||||
// handshake has completed.
|
||||
//
|
||||
// Unlike NewListener, it is the caller's responsibility to initialize
|
||||
// the Manager m's Prompt, Cache, HostPolicy, and other desired options.
|
||||
func (m *Manager) Listener() net.Listener {
|
||||
ln := &listener{
|
||||
m: m,
|
||||
conf: &tls.Config{
|
||||
GetCertificate: m.GetCertificate, // bonus: panic on nil m
|
||||
NextProtos: []string{"h2", "http/1.1"}, // Enable HTTP/2
|
||||
},
|
||||
}
|
||||
ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443")
|
||||
return ln
|
||||
}
|
||||
|
||||
type listener struct {
|
||||
m *Manager
|
||||
conf *tls.Config
|
||||
|
||||
tcpListener net.Listener
|
||||
tcpListenErr error
|
||||
}
|
||||
|
||||
func (ln *listener) Accept() (net.Conn, error) {
|
||||
if ln.tcpListenErr != nil {
|
||||
return nil, ln.tcpListenErr
|
||||
}
|
||||
conn, err := ln.tcpListener.Accept()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tcpConn := conn.(*net.TCPConn)
|
||||
|
||||
// Because Listener is a convenience function, help out with
|
||||
// this too. This is not possible for the caller to set once
|
||||
// we return a *tcp.Conn wrapping an inaccessible net.Conn.
|
||||
// If callers don't want this, they can do things the manual
|
||||
// way and tweak as needed. But this is what net/http does
|
||||
// itself, so copy that. If net/http changes, we can change
|
||||
// here too.
|
||||
tcpConn.SetKeepAlive(true)
|
||||
tcpConn.SetKeepAlivePeriod(3 * time.Minute)
|
||||
|
||||
return tls.Server(tcpConn, ln.conf), nil
|
||||
}
|
||||
|
||||
func (ln *listener) Addr() net.Addr {
|
||||
if ln.tcpListener != nil {
|
||||
return ln.tcpListener.Addr()
|
||||
}
|
||||
// net.Listen failed. Return something non-nil in case callers
|
||||
// call Addr before Accept:
|
||||
return &net.TCPAddr{IP: net.IP{0, 0, 0, 0}, Port: 443}
|
||||
}
|
||||
|
||||
func (ln *listener) Close() error {
|
||||
if ln.tcpListenErr != nil {
|
||||
return ln.tcpListenErr
|
||||
}
|
||||
return ln.tcpListener.Close()
|
||||
}
|
||||
|
||||
func homeDir() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
|
||||
}
|
||||
if h := os.Getenv("HOME"); h != "" {
|
||||
return h
|
||||
}
|
||||
return "/"
|
||||
}
|
||||
|
||||
func cacheDir() string {
|
||||
const base = "golang-autocert"
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
return filepath.Join(homeDir(), "Library", "Caches", base)
|
||||
case "windows":
|
||||
for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} {
|
||||
if v := os.Getenv(ev); v != "" {
|
||||
return filepath.Join(v, base)
|
||||
}
|
||||
}
|
||||
// Worst case:
|
||||
return filepath.Join(homeDir(), base)
|
||||
}
|
||||
if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" {
|
||||
return filepath.Join(xdg, base)
|
||||
}
|
||||
return filepath.Join(homeDir(), ".cache", base)
|
||||
}
|
141
vendor/golang.org/x/crypto/acme/autocert/renewal.go
generated
vendored
Normal file
141
vendor/golang.org/x/crypto/acme/autocert/renewal.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// renewJitter is the maximum deviation from Manager.RenewBefore.
|
||||
const renewJitter = time.Hour
|
||||
|
||||
// domainRenewal tracks the state used by the periodic timers
|
||||
// renewing a single domain's cert.
|
||||
type domainRenewal struct {
|
||||
m *Manager
|
||||
domain string
|
||||
key crypto.Signer
|
||||
|
||||
timerMu sync.Mutex
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// start starts a cert renewal timer at the time
|
||||
// defined by the certificate expiration time exp.
|
||||
//
|
||||
// If the timer is already started, calling start is a noop.
|
||||
func (dr *domainRenewal) start(exp time.Time) {
|
||||
dr.timerMu.Lock()
|
||||
defer dr.timerMu.Unlock()
|
||||
if dr.timer != nil {
|
||||
return
|
||||
}
|
||||
dr.timer = time.AfterFunc(dr.next(exp), dr.renew)
|
||||
}
|
||||
|
||||
// stop stops the cert renewal timer.
|
||||
// If the timer is already stopped, calling stop is a noop.
|
||||
func (dr *domainRenewal) stop() {
|
||||
dr.timerMu.Lock()
|
||||
defer dr.timerMu.Unlock()
|
||||
if dr.timer == nil {
|
||||
return
|
||||
}
|
||||
dr.timer.Stop()
|
||||
dr.timer = nil
|
||||
}
|
||||
|
||||
// renew is called periodically by a timer.
|
||||
// The first renew call is kicked off by dr.start.
|
||||
func (dr *domainRenewal) renew() {
|
||||
dr.timerMu.Lock()
|
||||
defer dr.timerMu.Unlock()
|
||||
if dr.timer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
// TODO: rotate dr.key at some point?
|
||||
next, err := dr.do(ctx)
|
||||
if err != nil {
|
||||
next = renewJitter / 2
|
||||
next += time.Duration(pseudoRand.int63n(int64(next)))
|
||||
}
|
||||
dr.timer = time.AfterFunc(next, dr.renew)
|
||||
testDidRenewLoop(next, err)
|
||||
}
|
||||
|
||||
// updateState locks and replaces the relevant Manager.state item with the given
|
||||
// state. It additionally updates dr.key with the given state's key.
|
||||
func (dr *domainRenewal) updateState(state *certState) {
|
||||
dr.m.stateMu.Lock()
|
||||
defer dr.m.stateMu.Unlock()
|
||||
dr.key = state.key
|
||||
dr.m.state[dr.domain] = state
|
||||
}
|
||||
|
||||
// do is similar to Manager.createCert but it doesn't lock a Manager.state item.
|
||||
// Instead, it requests a new certificate independently and, upon success,
|
||||
// replaces dr.m.state item with a new one and updates cache for the given domain.
|
||||
//
|
||||
// It may lock and update the Manager.state if the expiration date of the currently
|
||||
// cached cert is far enough in the future.
|
||||
//
|
||||
// The returned value is a time interval after which the renewal should occur again.
|
||||
func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) {
|
||||
// a race is likely unavoidable in a distributed environment
|
||||
// but we try nonetheless
|
||||
if tlscert, err := dr.m.cacheGet(ctx, dr.domain); err == nil {
|
||||
next := dr.next(tlscert.Leaf.NotAfter)
|
||||
if next > dr.m.renewBefore()+renewJitter {
|
||||
signer, ok := tlscert.PrivateKey.(crypto.Signer)
|
||||
if ok {
|
||||
state := &certState{
|
||||
key: signer,
|
||||
cert: tlscert.Certificate,
|
||||
leaf: tlscert.Leaf,
|
||||
}
|
||||
dr.updateState(state)
|
||||
return next, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.domain)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
state := &certState{
|
||||
key: dr.key,
|
||||
cert: der,
|
||||
leaf: leaf,
|
||||
}
|
||||
tlscert, err := state.tlscert()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := dr.m.cachePut(ctx, dr.domain, tlscert); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dr.updateState(state)
|
||||
return dr.next(leaf.NotAfter), nil
|
||||
}
|
||||
|
||||
func (dr *domainRenewal) next(expiry time.Time) time.Duration {
|
||||
d := expiry.Sub(timeNow()) - dr.m.renewBefore()
|
||||
// add a bit of randomness to renew deadline
|
||||
n := pseudoRand.int63n(int64(renewJitter))
|
||||
d -= time.Duration(n)
|
||||
if d < 0 {
|
||||
return 0
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
var testDidRenewLoop = func(next time.Duration, err error) {}
|
337
vendor/golang.org/x/crypto/acme/autocert/renewal_test.go
generated
vendored
Normal file
337
vendor/golang.org/x/crypto/acme/autocert/renewal_test.go
generated
vendored
Normal file
@ -0,0 +1,337 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/acme"
|
||||
)
|
||||
|
||||
func TestRenewalNext(t *testing.T) {
|
||||
now := time.Now()
|
||||
timeNow = func() time.Time { return now }
|
||||
defer func() { timeNow = time.Now }()
|
||||
|
||||
man := &Manager{RenewBefore: 7 * 24 * time.Hour}
|
||||
defer man.stopRenew()
|
||||
tt := []struct {
|
||||
expiry time.Time
|
||||
min, max time.Duration
|
||||
}{
|
||||
{now.Add(90 * 24 * time.Hour), 83*24*time.Hour - renewJitter, 83 * 24 * time.Hour},
|
||||
{now.Add(time.Hour), 0, 1},
|
||||
{now, 0, 1},
|
||||
{now.Add(-time.Hour), 0, 1},
|
||||
}
|
||||
|
||||
dr := &domainRenewal{m: man}
|
||||
for i, test := range tt {
|
||||
next := dr.next(test.expiry)
|
||||
if next < test.min || test.max < next {
|
||||
t.Errorf("%d: next = %v; want between %v and %v", i, next, test.min, test.max)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenewFromCache(t *testing.T) {
|
||||
const domain = "example.org"
|
||||
|
||||
// ACME CA server stub
|
||||
var ca *httptest.Server
|
||||
ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Replay-Nonce", "nonce")
|
||||
if r.Method == "HEAD" {
|
||||
// a nonce request
|
||||
return
|
||||
}
|
||||
|
||||
switch r.URL.Path {
|
||||
// discovery
|
||||
case "/":
|
||||
if err := discoTmpl.Execute(w, ca.URL); err != nil {
|
||||
t.Fatalf("discoTmpl: %v", err)
|
||||
}
|
||||
// client key registration
|
||||
case "/new-reg":
|
||||
w.Write([]byte("{}"))
|
||||
// domain authorization
|
||||
case "/new-authz":
|
||||
w.Header().Set("Location", ca.URL+"/authz/1")
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
w.Write([]byte(`{"status": "valid"}`))
|
||||
// cert request
|
||||
case "/new-cert":
|
||||
var req struct {
|
||||
CSR string `json:"csr"`
|
||||
}
|
||||
decodePayload(&req, r.Body)
|
||||
b, _ := base64.RawURLEncoding.DecodeString(req.CSR)
|
||||
csr, err := x509.ParseCertificateRequest(b)
|
||||
if err != nil {
|
||||
t.Fatalf("new-cert: CSR: %v", err)
|
||||
}
|
||||
der, err := dummyCert(csr.PublicKey, domain)
|
||||
if err != nil {
|
||||
t.Fatalf("new-cert: dummyCert: %v", err)
|
||||
}
|
||||
chainUp := fmt.Sprintf("<%s/ca-cert>; rel=up", ca.URL)
|
||||
w.Header().Set("Link", chainUp)
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
w.Write(der)
|
||||
// CA chain cert
|
||||
case "/ca-cert":
|
||||
der, err := dummyCert(nil, "ca")
|
||||
if err != nil {
|
||||
t.Fatalf("ca-cert: dummyCert: %v", err)
|
||||
}
|
||||
w.Write(der)
|
||||
default:
|
||||
t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path)
|
||||
}
|
||||
}))
|
||||
defer ca.Close()
|
||||
|
||||
// use EC key to run faster on 386
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
man := &Manager{
|
||||
Prompt: AcceptTOS,
|
||||
Cache: newMemCache(),
|
||||
RenewBefore: 24 * time.Hour,
|
||||
Client: &acme.Client{
|
||||
Key: key,
|
||||
DirectoryURL: ca.URL,
|
||||
},
|
||||
}
|
||||
defer man.stopRenew()
|
||||
|
||||
// cache an almost expired cert
|
||||
now := time.Now()
|
||||
cert, err := dateDummyCert(key.Public(), now.Add(-2*time.Hour), now.Add(time.Minute), domain)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tlscert := &tls.Certificate{PrivateKey: key, Certificate: [][]byte{cert}}
|
||||
if err := man.cachePut(context.Background(), domain, tlscert); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// veriy the renewal happened
|
||||
defer func() {
|
||||
testDidRenewLoop = func(next time.Duration, err error) {}
|
||||
}()
|
||||
done := make(chan struct{})
|
||||
testDidRenewLoop = func(next time.Duration, err error) {
|
||||
defer close(done)
|
||||
if err != nil {
|
||||
t.Errorf("testDidRenewLoop: %v", err)
|
||||
}
|
||||
// Next should be about 90 days:
|
||||
// dummyCert creates 90days expiry + account for man.RenewBefore.
|
||||
// Previous expiration was within 1 min.
|
||||
future := 88 * 24 * time.Hour
|
||||
if next < future {
|
||||
t.Errorf("testDidRenewLoop: next = %v; want >= %v", next, future)
|
||||
}
|
||||
|
||||
// ensure the new cert is cached
|
||||
after := time.Now().Add(future)
|
||||
tlscert, err := man.cacheGet(context.Background(), domain)
|
||||
if err != nil {
|
||||
t.Fatalf("man.cacheGet: %v", err)
|
||||
}
|
||||
if !tlscert.Leaf.NotAfter.After(after) {
|
||||
t.Errorf("cache leaf.NotAfter = %v; want > %v", tlscert.Leaf.NotAfter, after)
|
||||
}
|
||||
|
||||
// verify the old cert is also replaced in memory
|
||||
man.stateMu.Lock()
|
||||
defer man.stateMu.Unlock()
|
||||
s := man.state[domain]
|
||||
if s == nil {
|
||||
t.Fatalf("m.state[%q] is nil", domain)
|
||||
}
|
||||
tlscert, err = s.tlscert()
|
||||
if err != nil {
|
||||
t.Fatalf("s.tlscert: %v", err)
|
||||
}
|
||||
if !tlscert.Leaf.NotAfter.After(after) {
|
||||
t.Errorf("state leaf.NotAfter = %v; want > %v", tlscert.Leaf.NotAfter, after)
|
||||
}
|
||||
}
|
||||
|
||||
// trigger renew
|
||||
hello := &tls.ClientHelloInfo{ServerName: domain}
|
||||
if _, err := man.GetCertificate(hello); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// wait for renew loop
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("renew took too long to occur")
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenewFromCacheAlreadyRenewed(t *testing.T) {
|
||||
const domain = "example.org"
|
||||
|
||||
// use EC key to run faster on 386
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
man := &Manager{
|
||||
Prompt: AcceptTOS,
|
||||
Cache: newMemCache(),
|
||||
RenewBefore: 24 * time.Hour,
|
||||
Client: &acme.Client{
|
||||
Key: key,
|
||||
DirectoryURL: "invalid",
|
||||
},
|
||||
}
|
||||
defer man.stopRenew()
|
||||
|
||||
// cache a recently renewed cert with a different private key
|
||||
newKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
now := time.Now()
|
||||
newCert, err := dateDummyCert(newKey.Public(), now.Add(-2*time.Hour), now.Add(time.Hour*24*90), domain)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
newLeaf, err := validCert(domain, [][]byte{newCert}, newKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
newTLSCert := &tls.Certificate{PrivateKey: newKey, Certificate: [][]byte{newCert}, Leaf: newLeaf}
|
||||
if err := man.cachePut(context.Background(), domain, newTLSCert); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// set internal state to an almost expired cert
|
||||
oldCert, err := dateDummyCert(key.Public(), now.Add(-2*time.Hour), now.Add(time.Minute), domain)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
oldLeaf, err := validCert(domain, [][]byte{oldCert}, key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
man.stateMu.Lock()
|
||||
if man.state == nil {
|
||||
man.state = make(map[string]*certState)
|
||||
}
|
||||
s := &certState{
|
||||
key: key,
|
||||
cert: [][]byte{oldCert},
|
||||
leaf: oldLeaf,
|
||||
}
|
||||
man.state[domain] = s
|
||||
man.stateMu.Unlock()
|
||||
|
||||
// veriy the renewal accepted the newer cached cert
|
||||
defer func() {
|
||||
testDidRenewLoop = func(next time.Duration, err error) {}
|
||||
}()
|
||||
done := make(chan struct{})
|
||||
testDidRenewLoop = func(next time.Duration, err error) {
|
||||
defer close(done)
|
||||
if err != nil {
|
||||
t.Errorf("testDidRenewLoop: %v", err)
|
||||
}
|
||||
// Next should be about 90 days
|
||||
// Previous expiration was within 1 min.
|
||||
future := 88 * 24 * time.Hour
|
||||
if next < future {
|
||||
t.Errorf("testDidRenewLoop: next = %v; want >= %v", next, future)
|
||||
}
|
||||
|
||||
// ensure the cached cert was not modified
|
||||
tlscert, err := man.cacheGet(context.Background(), domain)
|
||||
if err != nil {
|
||||
t.Fatalf("man.cacheGet: %v", err)
|
||||
}
|
||||
if !tlscert.Leaf.NotAfter.Equal(newLeaf.NotAfter) {
|
||||
t.Errorf("cache leaf.NotAfter = %v; want == %v", tlscert.Leaf.NotAfter, newLeaf.NotAfter)
|
||||
}
|
||||
|
||||
// verify the old cert is also replaced in memory
|
||||
man.stateMu.Lock()
|
||||
defer man.stateMu.Unlock()
|
||||
s := man.state[domain]
|
||||
if s == nil {
|
||||
t.Fatalf("m.state[%q] is nil", domain)
|
||||
}
|
||||
stateKey := s.key.Public().(*ecdsa.PublicKey)
|
||||
if stateKey.X.Cmp(newKey.X) != 0 || stateKey.Y.Cmp(newKey.Y) != 0 {
|
||||
t.Fatalf("state key was not updated from cache x: %v y: %v; want x: %v y: %v", stateKey.X, stateKey.Y, newKey.X, newKey.Y)
|
||||
}
|
||||
tlscert, err = s.tlscert()
|
||||
if err != nil {
|
||||
t.Fatalf("s.tlscert: %v", err)
|
||||
}
|
||||
if !tlscert.Leaf.NotAfter.Equal(newLeaf.NotAfter) {
|
||||
t.Errorf("state leaf.NotAfter = %v; want == %v", tlscert.Leaf.NotAfter, newLeaf.NotAfter)
|
||||
}
|
||||
|
||||
// verify the private key is replaced in the renewal state
|
||||
r := man.renewal[domain]
|
||||
if r == nil {
|
||||
t.Fatalf("m.renewal[%q] is nil", domain)
|
||||
}
|
||||
renewalKey := r.key.Public().(*ecdsa.PublicKey)
|
||||
if renewalKey.X.Cmp(newKey.X) != 0 || renewalKey.Y.Cmp(newKey.Y) != 0 {
|
||||
t.Fatalf("renewal private key was not updated from cache x: %v y: %v; want x: %v y: %v", renewalKey.X, renewalKey.Y, newKey.X, newKey.Y)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// assert the expiring cert is returned from state
|
||||
hello := &tls.ClientHelloInfo{ServerName: domain}
|
||||
tlscert, err := man.GetCertificate(hello)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !oldLeaf.NotAfter.Equal(tlscert.Leaf.NotAfter) {
|
||||
t.Errorf("state leaf.NotAfter = %v; want == %v", tlscert.Leaf.NotAfter, oldLeaf.NotAfter)
|
||||
}
|
||||
|
||||
// trigger renew
|
||||
go man.renew(domain, s.key, s.leaf.NotAfter)
|
||||
|
||||
// wait for renew loop
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("renew took too long to occur")
|
||||
case <-done:
|
||||
// assert the new cert is returned from state after renew
|
||||
hello := &tls.ClientHelloInfo{ServerName: domain}
|
||||
tlscert, err := man.GetCertificate(hello)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !newTLSCert.Leaf.NotAfter.Equal(tlscert.Leaf.NotAfter) {
|
||||
t.Errorf("state leaf.NotAfter = %v; want == %v", tlscert.Leaf.NotAfter, newTLSCert.Leaf.NotAfter)
|
||||
}
|
||||
}
|
||||
}
|
153
vendor/golang.org/x/crypto/acme/jws.go
generated
vendored
Normal file
153
vendor/golang.org/x/crypto/acme/jws.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
_ "crypto/sha512" // need for EC keys
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// jwsEncodeJSON signs claimset using provided key and a nonce.
|
||||
// The result is serialized in JSON format.
|
||||
// See https://tools.ietf.org/html/rfc7515#section-7.
|
||||
func jwsEncodeJSON(claimset interface{}, key crypto.Signer, nonce string) ([]byte, error) {
|
||||
jwk, err := jwkEncode(key.Public())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
alg, sha := jwsHasher(key)
|
||||
if alg == "" || !sha.Available() {
|
||||
return nil, ErrUnsupportedKey
|
||||
}
|
||||
phead := fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q}`, alg, jwk, nonce)
|
||||
phead = base64.RawURLEncoding.EncodeToString([]byte(phead))
|
||||
cs, err := json.Marshal(claimset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload := base64.RawURLEncoding.EncodeToString(cs)
|
||||
hash := sha.New()
|
||||
hash.Write([]byte(phead + "." + payload))
|
||||
sig, err := jwsSign(key, sha, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
enc := struct {
|
||||
Protected string `json:"protected"`
|
||||
Payload string `json:"payload"`
|
||||
Sig string `json:"signature"`
|
||||
}{
|
||||
Protected: phead,
|
||||
Payload: payload,
|
||||
Sig: base64.RawURLEncoding.EncodeToString(sig),
|
||||
}
|
||||
return json.Marshal(&enc)
|
||||
}
|
||||
|
||||
// jwkEncode encodes public part of an RSA or ECDSA key into a JWK.
|
||||
// The result is also suitable for creating a JWK thumbprint.
|
||||
// https://tools.ietf.org/html/rfc7517
|
||||
func jwkEncode(pub crypto.PublicKey) (string, error) {
|
||||
switch pub := pub.(type) {
|
||||
case *rsa.PublicKey:
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.3.1
|
||||
n := pub.N
|
||||
e := big.NewInt(int64(pub.E))
|
||||
// Field order is important.
|
||||
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
|
||||
return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`,
|
||||
base64.RawURLEncoding.EncodeToString(e.Bytes()),
|
||||
base64.RawURLEncoding.EncodeToString(n.Bytes()),
|
||||
), nil
|
||||
case *ecdsa.PublicKey:
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.2.1
|
||||
p := pub.Curve.Params()
|
||||
n := p.BitSize / 8
|
||||
if p.BitSize%8 != 0 {
|
||||
n++
|
||||
}
|
||||
x := pub.X.Bytes()
|
||||
if n > len(x) {
|
||||
x = append(make([]byte, n-len(x)), x...)
|
||||
}
|
||||
y := pub.Y.Bytes()
|
||||
if n > len(y) {
|
||||
y = append(make([]byte, n-len(y)), y...)
|
||||
}
|
||||
// Field order is important.
|
||||
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
|
||||
return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`,
|
||||
p.Name,
|
||||
base64.RawURLEncoding.EncodeToString(x),
|
||||
base64.RawURLEncoding.EncodeToString(y),
|
||||
), nil
|
||||
}
|
||||
return "", ErrUnsupportedKey
|
||||
}
|
||||
|
||||
// jwsSign signs the digest using the given key.
|
||||
// It returns ErrUnsupportedKey if the key type is unknown.
|
||||
// The hash is used only for RSA keys.
|
||||
func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return key.Sign(rand.Reader, digest, hash)
|
||||
case *ecdsa.PrivateKey:
|
||||
r, s, err := ecdsa.Sign(rand.Reader, key, digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rb, sb := r.Bytes(), s.Bytes()
|
||||
size := key.Params().BitSize / 8
|
||||
if size%8 > 0 {
|
||||
size++
|
||||
}
|
||||
sig := make([]byte, size*2)
|
||||
copy(sig[size-len(rb):], rb)
|
||||
copy(sig[size*2-len(sb):], sb)
|
||||
return sig, nil
|
||||
}
|
||||
return nil, ErrUnsupportedKey
|
||||
}
|
||||
|
||||
// jwsHasher indicates suitable JWS algorithm name and a hash function
|
||||
// to use for signing a digest with the provided key.
|
||||
// It returns ("", 0) if the key is not supported.
|
||||
func jwsHasher(key crypto.Signer) (string, crypto.Hash) {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return "RS256", crypto.SHA256
|
||||
case *ecdsa.PrivateKey:
|
||||
switch key.Params().Name {
|
||||
case "P-256":
|
||||
return "ES256", crypto.SHA256
|
||||
case "P-384":
|
||||
return "ES384", crypto.SHA384
|
||||
case "P-521":
|
||||
return "ES512", crypto.SHA512
|
||||
}
|
||||
}
|
||||
return "", 0
|
||||
}
|
||||
|
||||
// JWKThumbprint creates a JWK thumbprint out of pub
|
||||
// as specified in https://tools.ietf.org/html/rfc7638.
|
||||
func JWKThumbprint(pub crypto.PublicKey) (string, error) {
|
||||
jwk, err := jwkEncode(pub)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b := sha256.Sum256([]byte(jwk))
|
||||
return base64.RawURLEncoding.EncodeToString(b[:]), nil
|
||||
}
|
319
vendor/golang.org/x/crypto/acme/jws_test.go
generated
vendored
Normal file
319
vendor/golang.org/x/crypto/acme/jws_test.go
generated
vendored
Normal file
@ -0,0 +1,319 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
testKeyPEM = `
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEA4xgZ3eRPkwoRvy7qeRUbmMDe0V+xH9eWLdu0iheeLlrmD2mq
|
||||
WXfP9IeSKApbn34g8TuAS9g5zhq8ELQ3kmjr+KV86GAMgI6VAcGlq3QrzpTCf/30
|
||||
Ab7+zawrfRaFONa1HwEzPY1KHnGVkxJc85gNkwYI9SY2RHXtvln3zs5wITNrdosq
|
||||
EXeaIkVYBEhbhNu54pp3kxo6TuWLi9e6pXeWetEwmlBwtWZlPoib2j3TxLBksKZf
|
||||
oyFyek380mHgJAumQ/I2fjj98/97mk3ihOY4AgVdCDj1z/GCoZkG5Rq7nbCGyosy
|
||||
KWyDX00Zs+nNqVhoLeIvXC4nnWdJMZ6rogxyQQIDAQABAoIBACIEZTOI1Kao9nmV
|
||||
9IeIsuaR1Y61b9neOF/MLmIVIZu+AAJFCMB4Iw11FV6sFodwpEyeZhx2WkpWVN+H
|
||||
r19eGiLX3zsL0DOdqBJoSIHDWCCMxgnYJ6nvS0nRxX3qVrBp8R2g12Ub+gNPbmFm
|
||||
ecf/eeERIVxfifd9VsyRu34eDEvcmKFuLYbElFcPh62xE3x12UZvV/sN7gXbawpP
|
||||
G+w255vbE5MoaKdnnO83cTFlcHvhn24M/78qP7Te5OAeelr1R89kYxQLpuGe4fbS
|
||||
zc6E3ym5Td6urDetGGrSY1Eu10/8sMusX+KNWkm+RsBRbkyKq72ks/qKpOxOa+c6
|
||||
9gm+Y8ECgYEA/iNUyg1ubRdH11p82l8KHtFC1DPE0V1gSZsX29TpM5jS4qv46K+s
|
||||
8Ym1zmrORM8x+cynfPx1VQZQ34EYeCMIX212ryJ+zDATl4NE0I4muMvSiH9vx6Xc
|
||||
7FmhNnaYzPsBL5Tm9nmtQuP09YEn8poiOJFiDs/4olnD5ogA5O4THGkCgYEA5MIL
|
||||
qWYBUuqbEWLRtMruUtpASclrBqNNsJEsMGbeqBJmoMxdHeSZckbLOrqm7GlMyNRJ
|
||||
Ne/5uWRGSzaMYuGmwsPpERzqEvYFnSrpjW5YtXZ+JtxFXNVfm9Z1gLLgvGpOUCIU
|
||||
RbpoDckDe1vgUuk3y5+DjZihs+rqIJ45XzXTzBkCgYBWuf3segruJZy5rEKhTv+o
|
||||
JqeUvRn0jNYYKFpLBeyTVBrbie6GkbUGNIWbrK05pC+c3K9nosvzuRUOQQL1tJbd
|
||||
4gA3oiD9U4bMFNr+BRTHyZ7OQBcIXdz3t1qhuHVKtnngIAN1p25uPlbRFUNpshnt
|
||||
jgeVoHlsBhApcs5DUc+pyQKBgDzeHPg/+g4z+nrPznjKnktRY1W+0El93kgi+J0Q
|
||||
YiJacxBKEGTJ1MKBb8X6sDurcRDm22wMpGfd9I5Cv2v4GsUsF7HD/cx5xdih+G73
|
||||
c4clNj/k0Ff5Nm1izPUno4C+0IOl7br39IPmfpSuR6wH/h6iHQDqIeybjxyKvT1G
|
||||
N0rRAoGBAKGD+4ZI/E1MoJ5CXB8cDDMHagbE3cq/DtmYzE2v1DFpQYu5I4PCm5c7
|
||||
EQeIP6dZtv8IMgtGIb91QX9pXvP0aznzQKwYIA8nZgoENCPfiMTPiEDT9e/0lObO
|
||||
9XWsXpbSTsRPj0sv1rB+UzBJ0PgjK4q2zOF0sNo7b1+6nlM3BWPx
|
||||
-----END RSA PRIVATE KEY-----
|
||||
`
|
||||
|
||||
// This thumbprint is for the testKey defined above.
|
||||
testKeyThumbprint = "6nicxzh6WETQlrvdchkz-U3e3DOQZ4heJKU63rfqMqQ"
|
||||
|
||||
// openssl ecparam -name secp256k1 -genkey -noout
|
||||
testKeyECPEM = `
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIK07hGLr0RwyUdYJ8wbIiBS55CjnkMD23DWr+ccnypWLoAoGCCqGSM49
|
||||
AwEHoUQDQgAE5lhEug5xK4xBDZ2nAbaxLtaLiv85bxJ7ePd1dkO23HThqIrvawF5
|
||||
QAaS/RNouybCiRhRjI3EaxLkQwgrCw0gqQ==
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
// openssl ecparam -name secp384r1 -genkey -noout
|
||||
testKeyEC384PEM = `
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDAQ4lNtXRORWr1bgKR1CGysr9AJ9SyEk4jiVnlUWWUChmSNL+i9SLSD
|
||||
Oe/naPqXJ6CgBwYFK4EEACKhZANiAAQzKtj+Ms0vHoTX5dzv3/L5YMXOWuI5UKRj
|
||||
JigpahYCqXD2BA1j0E/2xt5vlPf+gm0PL+UHSQsCokGnIGuaHCsJAp3ry0gHQEke
|
||||
WYXapUUFdvaK1R2/2hn5O+eiQM8YzCg=
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
// openssl ecparam -name secp521r1 -genkey -noout
|
||||
testKeyEC512PEM = `
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIHcAgEBBEIBSNZKFcWzXzB/aJClAb305ibalKgtDA7+70eEkdPt28/3LZMM935Z
|
||||
KqYHh/COcxuu3Kt8azRAUz3gyr4zZKhlKUSgBwYFK4EEACOhgYkDgYYABAHUNKbx
|
||||
7JwC7H6pa2sV0tERWhHhB3JmW+OP6SUgMWryvIKajlx73eS24dy4QPGrWO9/ABsD
|
||||
FqcRSkNVTXnIv6+0mAF25knqIBIg5Q8M9BnOu9GGAchcwt3O7RDHmqewnJJDrbjd
|
||||
GGnm6rb+NnWR9DIopM0nKNkToWoF/hzopxu4Ae/GsQ==
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
// 1. openssl ec -in key.pem -noout -text
|
||||
// 2. remove first byte, 04 (the header); the rest is X and Y
|
||||
// 3. convert each with: echo <val> | xxd -r -p | base64 -w 100 | tr -d '=' | tr '/+' '_-'
|
||||
testKeyECPubX = "5lhEug5xK4xBDZ2nAbaxLtaLiv85bxJ7ePd1dkO23HQ"
|
||||
testKeyECPubY = "4aiK72sBeUAGkv0TaLsmwokYUYyNxGsS5EMIKwsNIKk"
|
||||
testKeyEC384PubX = "MyrY_jLNLx6E1-Xc79_y-WDFzlriOVCkYyYoKWoWAqlw9gQNY9BP9sbeb5T3_oJt"
|
||||
testKeyEC384PubY = "Dy_lB0kLAqJBpyBrmhwrCQKd68tIB0BJHlmF2qVFBXb2itUdv9oZ-TvnokDPGMwo"
|
||||
testKeyEC512PubX = "AdQ0pvHsnALsfqlraxXS0RFaEeEHcmZb44_pJSAxavK8gpqOXHvd5Lbh3LhA8atY738AGwMWpxFKQ1VNeci_r7SY"
|
||||
testKeyEC512PubY = "AXbmSeogEiDlDwz0Gc670YYByFzC3c7tEMeap7CckkOtuN0Yaebqtv42dZH0MiikzSco2ROhagX-HOinG7gB78ax"
|
||||
|
||||
// echo -n '{"crv":"P-256","kty":"EC","x":"<testKeyECPubX>","y":"<testKeyECPubY>"}' | \
|
||||
// openssl dgst -binary -sha256 | base64 | tr -d '=' | tr '/+' '_-'
|
||||
testKeyECThumbprint = "zedj-Bd1Zshp8KLePv2MB-lJ_Hagp7wAwdkA0NUTniU"
|
||||
)
|
||||
|
||||
var (
|
||||
testKey *rsa.PrivateKey
|
||||
testKeyEC *ecdsa.PrivateKey
|
||||
testKeyEC384 *ecdsa.PrivateKey
|
||||
testKeyEC512 *ecdsa.PrivateKey
|
||||
)
|
||||
|
||||
func init() {
|
||||
testKey = parseRSA(testKeyPEM, "testKeyPEM")
|
||||
testKeyEC = parseEC(testKeyECPEM, "testKeyECPEM")
|
||||
testKeyEC384 = parseEC(testKeyEC384PEM, "testKeyEC384PEM")
|
||||
testKeyEC512 = parseEC(testKeyEC512PEM, "testKeyEC512PEM")
|
||||
}
|
||||
|
||||
func decodePEM(s, name string) []byte {
|
||||
d, _ := pem.Decode([]byte(s))
|
||||
if d == nil {
|
||||
panic("no block found in " + name)
|
||||
}
|
||||
return d.Bytes
|
||||
}
|
||||
|
||||
func parseRSA(s, name string) *rsa.PrivateKey {
|
||||
b := decodePEM(s, name)
|
||||
k, err := x509.ParsePKCS1PrivateKey(b)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("%s: %v", name, err))
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
func parseEC(s, name string) *ecdsa.PrivateKey {
|
||||
b := decodePEM(s, name)
|
||||
k, err := x509.ParseECPrivateKey(b)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("%s: %v", name, err))
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
func TestJWSEncodeJSON(t *testing.T) {
|
||||
claims := struct{ Msg string }{"Hello JWS"}
|
||||
// JWS signed with testKey and "nonce" as the nonce value
|
||||
// JSON-serialized JWS fields are split for easier testing
|
||||
const (
|
||||
// {"alg":"RS256","jwk":{"e":"AQAB","kty":"RSA","n":"..."},"nonce":"nonce"}
|
||||
protected = "eyJhbGciOiJSUzI1NiIsImp3ayI6eyJlIjoiQVFBQiIsImt0eSI6" +
|
||||
"IlJTQSIsIm4iOiI0eGdaM2VSUGt3b1J2eTdxZVJVYm1NRGUwVi14" +
|
||||
"SDllV0xkdTBpaGVlTGxybUQybXFXWGZQOUllU0tBcGJuMzRnOFR1" +
|
||||
"QVM5ZzV6aHE4RUxRM2ttanItS1Y4NkdBTWdJNlZBY0dscTNRcnpw" +
|
||||
"VENmXzMwQWI3LXphd3JmUmFGT05hMUh3RXpQWTFLSG5HVmt4SmM4" +
|
||||
"NWdOa3dZSTlTWTJSSFh0dmxuM3pzNXdJVE5yZG9zcUVYZWFJa1ZZ" +
|
||||
"QkVoYmhOdTU0cHAza3hvNlR1V0xpOWU2cFhlV2V0RXdtbEJ3dFda" +
|
||||
"bFBvaWIyajNUeExCa3NLWmZveUZ5ZWszODBtSGdKQXVtUV9JMmZq" +
|
||||
"ajk4Xzk3bWszaWhPWTRBZ1ZkQ0RqMXpfR0NvWmtHNVJxN25iQ0d5" +
|
||||
"b3N5S1d5RFgwMFpzLW5OcVZob0xlSXZYQzRubldkSk1aNnJvZ3h5" +
|
||||
"UVEifSwibm9uY2UiOiJub25jZSJ9"
|
||||
// {"Msg":"Hello JWS"}
|
||||
payload = "eyJNc2ciOiJIZWxsbyBKV1MifQ"
|
||||
signature = "eAGUikStX_UxyiFhxSLMyuyBcIB80GeBkFROCpap2sW3EmkU_ggF" +
|
||||
"knaQzxrTfItICSAXsCLIquZ5BbrSWA_4vdEYrwWtdUj7NqFKjHRa" +
|
||||
"zpLHcoR7r1rEHvkoP1xj49lS5fc3Wjjq8JUhffkhGbWZ8ZVkgPdC" +
|
||||
"4tMBWiQDoth-x8jELP_3LYOB_ScUXi2mETBawLgOT2K8rA0Vbbmx" +
|
||||
"hWNlOWuUf-8hL5YX4IOEwsS8JK_TrTq5Zc9My0zHJmaieqDV0UlP" +
|
||||
"k0onFjPFkGm7MrPSgd0MqRG-4vSAg2O4hDo7rKv4n8POjjXlNQvM" +
|
||||
"9IPLr8qZ7usYBKhEGwX3yq_eicAwBw"
|
||||
)
|
||||
|
||||
b, err := jwsEncodeJSON(claims, testKey, "nonce")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var jws struct{ Protected, Payload, Signature string }
|
||||
if err := json.Unmarshal(b, &jws); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if jws.Protected != protected {
|
||||
t.Errorf("protected:\n%s\nwant:\n%s", jws.Protected, protected)
|
||||
}
|
||||
if jws.Payload != payload {
|
||||
t.Errorf("payload:\n%s\nwant:\n%s", jws.Payload, payload)
|
||||
}
|
||||
if jws.Signature != signature {
|
||||
t.Errorf("signature:\n%s\nwant:\n%s", jws.Signature, signature)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWSEncodeJSONEC(t *testing.T) {
|
||||
tt := []struct {
|
||||
key *ecdsa.PrivateKey
|
||||
x, y string
|
||||
alg, crv string
|
||||
}{
|
||||
{testKeyEC, testKeyECPubX, testKeyECPubY, "ES256", "P-256"},
|
||||
{testKeyEC384, testKeyEC384PubX, testKeyEC384PubY, "ES384", "P-384"},
|
||||
{testKeyEC512, testKeyEC512PubX, testKeyEC512PubY, "ES512", "P-521"},
|
||||
}
|
||||
for i, test := range tt {
|
||||
claims := struct{ Msg string }{"Hello JWS"}
|
||||
b, err := jwsEncodeJSON(claims, test.key, "nonce")
|
||||
if err != nil {
|
||||
t.Errorf("%d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
var jws struct{ Protected, Payload, Signature string }
|
||||
if err := json.Unmarshal(b, &jws); err != nil {
|
||||
t.Errorf("%d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
b, err = base64.RawURLEncoding.DecodeString(jws.Protected)
|
||||
if err != nil {
|
||||
t.Errorf("%d: jws.Protected: %v", i, err)
|
||||
}
|
||||
var head struct {
|
||||
Alg string
|
||||
Nonce string
|
||||
JWK struct {
|
||||
Crv string
|
||||
Kty string
|
||||
X string
|
||||
Y string
|
||||
} `json:"jwk"`
|
||||
}
|
||||
if err := json.Unmarshal(b, &head); err != nil {
|
||||
t.Errorf("%d: jws.Protected: %v", i, err)
|
||||
}
|
||||
if head.Alg != test.alg {
|
||||
t.Errorf("%d: head.Alg = %q; want %q", i, head.Alg, test.alg)
|
||||
}
|
||||
if head.Nonce != "nonce" {
|
||||
t.Errorf("%d: head.Nonce = %q; want nonce", i, head.Nonce)
|
||||
}
|
||||
if head.JWK.Crv != test.crv {
|
||||
t.Errorf("%d: head.JWK.Crv = %q; want %q", i, head.JWK.Crv, test.crv)
|
||||
}
|
||||
if head.JWK.Kty != "EC" {
|
||||
t.Errorf("%d: head.JWK.Kty = %q; want EC", i, head.JWK.Kty)
|
||||
}
|
||||
if head.JWK.X != test.x {
|
||||
t.Errorf("%d: head.JWK.X = %q; want %q", i, head.JWK.X, test.x)
|
||||
}
|
||||
if head.JWK.Y != test.y {
|
||||
t.Errorf("%d: head.JWK.Y = %q; want %q", i, head.JWK.Y, test.y)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWKThumbprintRSA(t *testing.T) {
|
||||
// Key example from RFC 7638
|
||||
const base64N = "0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAt" +
|
||||
"VT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn6" +
|
||||
"4tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FD" +
|
||||
"W2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n9" +
|
||||
"1CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINH" +
|
||||
"aQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw"
|
||||
const base64E = "AQAB"
|
||||
const expected = "NzbLsXh8uDCcd-6MNwXF4W_7noWXFZAfHkxZsRGC9Xs"
|
||||
|
||||
b, err := base64.RawURLEncoding.DecodeString(base64N)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing example key N: %v", err)
|
||||
}
|
||||
n := new(big.Int).SetBytes(b)
|
||||
|
||||
b, err = base64.RawURLEncoding.DecodeString(base64E)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing example key E: %v", err)
|
||||
}
|
||||
e := new(big.Int).SetBytes(b)
|
||||
|
||||
pub := &rsa.PublicKey{N: n, E: int(e.Uint64())}
|
||||
th, err := JWKThumbprint(pub)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if th != expected {
|
||||
t.Errorf("thumbprint = %q; want %q", th, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWKThumbprintEC(t *testing.T) {
|
||||
// Key example from RFC 7520
|
||||
// expected was computed with
|
||||
// echo -n '{"crv":"P-521","kty":"EC","x":"<base64X>","y":"<base64Y>"}' | \
|
||||
// openssl dgst -binary -sha256 | \
|
||||
// base64 | \
|
||||
// tr -d '=' | tr '/+' '_-'
|
||||
const (
|
||||
base64X = "AHKZLLOsCOzz5cY97ewNUajB957y-C-U88c3v13nmGZx6sYl_oJXu9A5RkT" +
|
||||
"KqjqvjyekWF-7ytDyRXYgCF5cj0Kt"
|
||||
base64Y = "AdymlHvOiLxXkEhayXQnNCvDX4h9htZaCJN34kfmC6pV5OhQHiraVySsUda" +
|
||||
"QkAgDPrwQrJmbnX9cwlGfP-HqHZR1"
|
||||
expected = "dHri3SADZkrush5HU_50AoRhcKFryN-PI6jPBtPL55M"
|
||||
)
|
||||
|
||||
b, err := base64.RawURLEncoding.DecodeString(base64X)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing example key X: %v", err)
|
||||
}
|
||||
x := new(big.Int).SetBytes(b)
|
||||
|
||||
b, err = base64.RawURLEncoding.DecodeString(base64Y)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing example key Y: %v", err)
|
||||
}
|
||||
y := new(big.Int).SetBytes(b)
|
||||
|
||||
pub := &ecdsa.PublicKey{Curve: elliptic.P521(), X: x, Y: y}
|
||||
th, err := JWKThumbprint(pub)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if th != expected {
|
||||
t.Errorf("thumbprint = %q; want %q", th, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWKThumbprintErrUnsupportedKey(t *testing.T) {
|
||||
_, err := JWKThumbprint(struct{}{})
|
||||
if err != ErrUnsupportedKey {
|
||||
t.Errorf("err = %q; want %q", err, ErrUnsupportedKey)
|
||||
}
|
||||
}
|
329
vendor/golang.org/x/crypto/acme/types.go
generated
vendored
Normal file
329
vendor/golang.org/x/crypto/acme/types.go
generated
vendored
Normal file
@ -0,0 +1,329 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ACME server response statuses used to describe Authorization and Challenge states.
|
||||
const (
|
||||
StatusUnknown = "unknown"
|
||||
StatusPending = "pending"
|
||||
StatusProcessing = "processing"
|
||||
StatusValid = "valid"
|
||||
StatusInvalid = "invalid"
|
||||
StatusRevoked = "revoked"
|
||||
)
|
||||
|
||||
// CRLReasonCode identifies the reason for a certificate revocation.
|
||||
type CRLReasonCode int
|
||||
|
||||
// CRL reason codes as defined in RFC 5280.
|
||||
const (
|
||||
CRLReasonUnspecified CRLReasonCode = 0
|
||||
CRLReasonKeyCompromise CRLReasonCode = 1
|
||||
CRLReasonCACompromise CRLReasonCode = 2
|
||||
CRLReasonAffiliationChanged CRLReasonCode = 3
|
||||
CRLReasonSuperseded CRLReasonCode = 4
|
||||
CRLReasonCessationOfOperation CRLReasonCode = 5
|
||||
CRLReasonCertificateHold CRLReasonCode = 6
|
||||
CRLReasonRemoveFromCRL CRLReasonCode = 8
|
||||
CRLReasonPrivilegeWithdrawn CRLReasonCode = 9
|
||||
CRLReasonAACompromise CRLReasonCode = 10
|
||||
)
|
||||
|
||||
// ErrUnsupportedKey is returned when an unsupported key type is encountered.
|
||||
var ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported")
|
||||
|
||||
// Error is an ACME error, defined in Problem Details for HTTP APIs doc
|
||||
// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem.
|
||||
type Error struct {
|
||||
// StatusCode is The HTTP status code generated by the origin server.
|
||||
StatusCode int
|
||||
// ProblemType is a URI reference that identifies the problem type,
|
||||
// typically in a "urn:acme:error:xxx" form.
|
||||
ProblemType string
|
||||
// Detail is a human-readable explanation specific to this occurrence of the problem.
|
||||
Detail string
|
||||
// Header is the original server error response headers.
|
||||
// It may be nil.
|
||||
Header http.Header
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail)
|
||||
}
|
||||
|
||||
// AuthorizationError indicates that an authorization for an identifier
|
||||
// did not succeed.
|
||||
// It contains all errors from Challenge items of the failed Authorization.
|
||||
type AuthorizationError struct {
|
||||
// URI uniquely identifies the failed Authorization.
|
||||
URI string
|
||||
|
||||
// Identifier is an AuthzID.Value of the failed Authorization.
|
||||
Identifier string
|
||||
|
||||
// Errors is a collection of non-nil error values of Challenge items
|
||||
// of the failed Authorization.
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func (a *AuthorizationError) Error() string {
|
||||
e := make([]string, len(a.Errors))
|
||||
for i, err := range a.Errors {
|
||||
e[i] = err.Error()
|
||||
}
|
||||
return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; "))
|
||||
}
|
||||
|
||||
// RateLimit reports whether err represents a rate limit error and
|
||||
// any Retry-After duration returned by the server.
|
||||
//
|
||||
// See the following for more details on rate limiting:
|
||||
// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6
|
||||
func RateLimit(err error) (time.Duration, bool) {
|
||||
e, ok := err.(*Error)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
// Some CA implementations may return incorrect values.
|
||||
// Use case-insensitive comparison.
|
||||
if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") {
|
||||
return 0, false
|
||||
}
|
||||
if e.Header == nil {
|
||||
return 0, true
|
||||
}
|
||||
return retryAfter(e.Header.Get("Retry-After"), 0), true
|
||||
}
|
||||
|
||||
// Account is a user account. It is associated with a private key.
|
||||
type Account struct {
|
||||
// URI is the account unique ID, which is also a URL used to retrieve
|
||||
// account data from the CA.
|
||||
URI string
|
||||
|
||||
// Contact is a slice of contact info used during registration.
|
||||
Contact []string
|
||||
|
||||
// The terms user has agreed to.
|
||||
// A value not matching CurrentTerms indicates that the user hasn't agreed
|
||||
// to the actual Terms of Service of the CA.
|
||||
AgreedTerms string
|
||||
|
||||
// Actual terms of a CA.
|
||||
CurrentTerms string
|
||||
|
||||
// Authz is the authorization URL used to initiate a new authz flow.
|
||||
Authz string
|
||||
|
||||
// Authorizations is a URI from which a list of authorizations
|
||||
// granted to this account can be fetched via a GET request.
|
||||
Authorizations string
|
||||
|
||||
// Certificates is a URI from which a list of certificates
|
||||
// issued for this account can be fetched via a GET request.
|
||||
Certificates string
|
||||
}
|
||||
|
||||
// Directory is ACME server discovery data.
|
||||
type Directory struct {
|
||||
// RegURL is an account endpoint URL, allowing for creating new
|
||||
// and modifying existing accounts.
|
||||
RegURL string
|
||||
|
||||
// AuthzURL is used to initiate Identifier Authorization flow.
|
||||
AuthzURL string
|
||||
|
||||
// CertURL is a new certificate issuance endpoint URL.
|
||||
CertURL string
|
||||
|
||||
// RevokeURL is used to initiate a certificate revocation flow.
|
||||
RevokeURL string
|
||||
|
||||
// Term is a URI identifying the current terms of service.
|
||||
Terms string
|
||||
|
||||
// Website is an HTTP or HTTPS URL locating a website
|
||||
// providing more information about the ACME server.
|
||||
Website string
|
||||
|
||||
// CAA consists of lowercase hostname elements, which the ACME server
|
||||
// recognises as referring to itself for the purposes of CAA record validation
|
||||
// as defined in RFC6844.
|
||||
CAA []string
|
||||
}
|
||||
|
||||
// Challenge encodes a returned CA challenge.
|
||||
// Its Error field may be non-nil if the challenge is part of an Authorization
|
||||
// with StatusInvalid.
|
||||
type Challenge struct {
|
||||
// Type is the challenge type, e.g. "http-01", "tls-sni-02", "dns-01".
|
||||
Type string
|
||||
|
||||
// URI is where a challenge response can be posted to.
|
||||
URI string
|
||||
|
||||
// Token is a random value that uniquely identifies the challenge.
|
||||
Token string
|
||||
|
||||
// Status identifies the status of this challenge.
|
||||
Status string
|
||||
|
||||
// Error indicates the reason for an authorization failure
|
||||
// when this challenge was used.
|
||||
// The type of a non-nil value is *Error.
|
||||
Error error
|
||||
}
|
||||
|
||||
// Authorization encodes an authorization response.
|
||||
type Authorization struct {
|
||||
// URI uniquely identifies a authorization.
|
||||
URI string
|
||||
|
||||
// Status identifies the status of an authorization.
|
||||
Status string
|
||||
|
||||
// Identifier is what the account is authorized to represent.
|
||||
Identifier AuthzID
|
||||
|
||||
// Challenges that the client needs to fulfill in order to prove possession
|
||||
// of the identifier (for pending authorizations).
|
||||
// For final authorizations, the challenges that were used.
|
||||
Challenges []*Challenge
|
||||
|
||||
// A collection of sets of challenges, each of which would be sufficient
|
||||
// to prove possession of the identifier.
|
||||
// Clients must complete a set of challenges that covers at least one set.
|
||||
// Challenges are identified by their indices in the challenges array.
|
||||
// If this field is empty, the client needs to complete all challenges.
|
||||
Combinations [][]int
|
||||
}
|
||||
|
||||
// AuthzID is an identifier that an account is authorized to represent.
|
||||
type AuthzID struct {
|
||||
Type string // The type of identifier, e.g. "dns".
|
||||
Value string // The identifier itself, e.g. "example.org".
|
||||
}
|
||||
|
||||
// wireAuthz is ACME JSON representation of Authorization objects.
|
||||
type wireAuthz struct {
|
||||
Status string
|
||||
Challenges []wireChallenge
|
||||
Combinations [][]int
|
||||
Identifier struct {
|
||||
Type string
|
||||
Value string
|
||||
}
|
||||
}
|
||||
|
||||
func (z *wireAuthz) authorization(uri string) *Authorization {
|
||||
a := &Authorization{
|
||||
URI: uri,
|
||||
Status: z.Status,
|
||||
Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value},
|
||||
Combinations: z.Combinations, // shallow copy
|
||||
Challenges: make([]*Challenge, len(z.Challenges)),
|
||||
}
|
||||
for i, v := range z.Challenges {
|
||||
a.Challenges[i] = v.challenge()
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func (z *wireAuthz) error(uri string) *AuthorizationError {
|
||||
err := &AuthorizationError{
|
||||
URI: uri,
|
||||
Identifier: z.Identifier.Value,
|
||||
}
|
||||
for _, raw := range z.Challenges {
|
||||
if raw.Error != nil {
|
||||
err.Errors = append(err.Errors, raw.Error.error(nil))
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// wireChallenge is ACME JSON challenge representation.
|
||||
type wireChallenge struct {
|
||||
URI string `json:"uri"`
|
||||
Type string
|
||||
Token string
|
||||
Status string
|
||||
Error *wireError
|
||||
}
|
||||
|
||||
func (c *wireChallenge) challenge() *Challenge {
|
||||
v := &Challenge{
|
||||
URI: c.URI,
|
||||
Type: c.Type,
|
||||
Token: c.Token,
|
||||
Status: c.Status,
|
||||
}
|
||||
if v.Status == "" {
|
||||
v.Status = StatusPending
|
||||
}
|
||||
if c.Error != nil {
|
||||
v.Error = c.Error.error(nil)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// wireError is a subset of fields of the Problem Details object
|
||||
// as described in https://tools.ietf.org/html/rfc7807#section-3.1.
|
||||
type wireError struct {
|
||||
Status int
|
||||
Type string
|
||||
Detail string
|
||||
}
|
||||
|
||||
func (e *wireError) error(h http.Header) *Error {
|
||||
return &Error{
|
||||
StatusCode: e.Status,
|
||||
ProblemType: e.Type,
|
||||
Detail: e.Detail,
|
||||
Header: h,
|
||||
}
|
||||
}
|
||||
|
||||
// CertOption is an optional argument type for the TLSSNIxChallengeCert methods for
|
||||
// customizing a temporary certificate for TLS-SNI challenges.
|
||||
type CertOption interface {
|
||||
privateCertOpt()
|
||||
}
|
||||
|
||||
// WithKey creates an option holding a private/public key pair.
|
||||
// The private part signs a certificate, and the public part represents the signee.
|
||||
func WithKey(key crypto.Signer) CertOption {
|
||||
return &certOptKey{key}
|
||||
}
|
||||
|
||||
type certOptKey struct {
|
||||
key crypto.Signer
|
||||
}
|
||||
|
||||
func (*certOptKey) privateCertOpt() {}
|
||||
|
||||
// WithTemplate creates an option for specifying a certificate template.
|
||||
// See x509.CreateCertificate for template usage details.
|
||||
//
|
||||
// In TLSSNIxChallengeCert methods, the template is also used as parent,
|
||||
// resulting in a self-signed certificate.
|
||||
// The DNSNames field of t is always overwritten for tls-sni challenge certs.
|
||||
func WithTemplate(t *x509.Certificate) CertOption {
|
||||
return (*certOptTemplate)(t)
|
||||
}
|
||||
|
||||
type certOptTemplate x509.Certificate
|
||||
|
||||
func (*certOptTemplate) privateCertOpt() {}
|
63
vendor/golang.org/x/crypto/acme/types_test.go
generated
vendored
Normal file
63
vendor/golang.org/x/crypto/acme/types_test.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRateLimit(t *testing.T) {
|
||||
now := time.Date(2017, 04, 27, 10, 0, 0, 0, time.UTC)
|
||||
f := timeNow
|
||||
defer func() { timeNow = f }()
|
||||
timeNow = func() time.Time { return now }
|
||||
|
||||
h120, hTime := http.Header{}, http.Header{}
|
||||
h120.Set("Retry-After", "120")
|
||||
hTime.Set("Retry-After", "Tue Apr 27 11:00:00 2017")
|
||||
|
||||
err1 := &Error{
|
||||
ProblemType: "urn:ietf:params:acme:error:nolimit",
|
||||
Header: h120,
|
||||
}
|
||||
err2 := &Error{
|
||||
ProblemType: "urn:ietf:params:acme:error:rateLimited",
|
||||
Header: h120,
|
||||
}
|
||||
err3 := &Error{
|
||||
ProblemType: "urn:ietf:params:acme:error:rateLimited",
|
||||
Header: nil,
|
||||
}
|
||||
err4 := &Error{
|
||||
ProblemType: "urn:ietf:params:acme:error:rateLimited",
|
||||
Header: hTime,
|
||||
}
|
||||
|
||||
tt := []struct {
|
||||
err error
|
||||
res time.Duration
|
||||
ok bool
|
||||
}{
|
||||
{nil, 0, false},
|
||||
{errors.New("dummy"), 0, false},
|
||||
{err1, 0, false},
|
||||
{err2, 2 * time.Minute, true},
|
||||
{err3, 0, true},
|
||||
{err4, time.Hour, true},
|
||||
}
|
||||
for i, test := range tt {
|
||||
res, ok := RateLimit(test.err)
|
||||
if ok != test.ok {
|
||||
t.Errorf("%d: RateLimit(%+v): ok = %v; want %v", i, test.err, ok, test.ok)
|
||||
continue
|
||||
}
|
||||
if res != test.res {
|
||||
t.Errorf("%d: RateLimit(%+v) = %v; want %v", i, test.err, res, test.res)
|
||||
}
|
||||
}
|
||||
}
|
285
vendor/golang.org/x/crypto/argon2/argon2.go
generated
vendored
Normal file
285
vendor/golang.org/x/crypto/argon2/argon2.go
generated
vendored
Normal file
@ -0,0 +1,285 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package argon2 implements the key derivation function Argon2.
|
||||
// Argon2 was selected as the winner of the Password Hashing Competition and can
|
||||
// be used to derive cryptographic keys from passwords.
|
||||
//
|
||||
// For a detailed specification of Argon2 see [1].
|
||||
//
|
||||
// If you aren't sure which function you need, use Argon2id (IDKey) and
|
||||
// the parameter recommendations for your scenario.
|
||||
//
|
||||
//
|
||||
// Argon2i
|
||||
//
|
||||
// Argon2i (implemented by Key) is the side-channel resistant version of Argon2.
|
||||
// It uses data-independent memory access, which is preferred for password
|
||||
// hashing and password-based key derivation. Argon2i requires more passes over
|
||||
// memory than Argon2id to protect from trade-off attacks. The recommended
|
||||
// parameters (taken from [2]) for non-interactive operations are time=3 and to
|
||||
// use the maximum available memory.
|
||||
//
|
||||
//
|
||||
// Argon2id
|
||||
//
|
||||
// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining
|
||||
// Argon2i and Argon2d. It uses data-independent memory access for the first
|
||||
// half of the first iteration over the memory and data-dependent memory access
|
||||
// for the rest. Argon2id is side-channel resistant and provides better brute-
|
||||
// force cost savings due to time-memory tradeoffs than Argon2i. The recommended
|
||||
// parameters for non-interactive operations (taken from [2]) are time=1 and to
|
||||
// use the maximum available memory.
|
||||
//
|
||||
// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf
|
||||
// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3
|
||||
package argon2
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
// The Argon2 version implemented by this package.
|
||||
const Version = 0x13
|
||||
|
||||
const (
|
||||
argon2d = iota
|
||||
argon2i
|
||||
argon2id
|
||||
)
|
||||
|
||||
// Key derives a key from the password, salt, and cost parameters using Argon2i
|
||||
// returning a byte slice of length keyLen that can be used as cryptographic
|
||||
// key. The CPU cost and parallelism degree must be greater than zero.
|
||||
//
|
||||
// For example, you can get a derived key for e.g. AES-256 (which needs a
|
||||
// 32-byte key) by doing:
|
||||
//
|
||||
// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32)
|
||||
//
|
||||
// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number.
|
||||
// If using that amount of memory (32 MB) is not possible in some contexts then
|
||||
// the time parameter can be increased to compensate.
|
||||
//
|
||||
// The time parameter specifies the number of passes over the memory and the
|
||||
// memory parameter specifies the size of the memory in KiB. For example
|
||||
// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be
|
||||
// adjusted to the number of available CPUs. The cost parameters should be
|
||||
// increased as memory latency and CPU parallelism increases. Remember to get a
|
||||
// good random salt.
|
||||
func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
|
||||
return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen)
|
||||
}
|
||||
|
||||
// IDKey derives a key from the password, salt, and cost parameters using
|
||||
// Argon2id returning a byte slice of length keyLen that can be used as
|
||||
// cryptographic key. The CPU cost and parallelism degree must be greater than
|
||||
// zero.
|
||||
//
|
||||
// For example, you can get a derived key for e.g. AES-256 (which needs a
|
||||
// 32-byte key) by doing:
|
||||
//
|
||||
// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32)
|
||||
//
|
||||
// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number.
|
||||
// If using that amount of memory (64 MB) is not possible in some contexts then
|
||||
// the time parameter can be increased to compensate.
|
||||
//
|
||||
// The time parameter specifies the number of passes over the memory and the
|
||||
// memory parameter specifies the size of the memory in KiB. For example
|
||||
// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be
|
||||
// adjusted to the numbers of available CPUs. The cost parameters should be
|
||||
// increased as memory latency and CPU parallelism increases. Remember to get a
|
||||
// good random salt.
|
||||
func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
|
||||
return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen)
|
||||
}
|
||||
|
||||
func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
|
||||
if time < 1 {
|
||||
panic("argon2: number of rounds too small")
|
||||
}
|
||||
if threads < 1 {
|
||||
panic("argon2: parallelism degree too low")
|
||||
}
|
||||
h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode)
|
||||
|
||||
memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads))
|
||||
if memory < 2*syncPoints*uint32(threads) {
|
||||
memory = 2 * syncPoints * uint32(threads)
|
||||
}
|
||||
B := initBlocks(&h0, memory, uint32(threads))
|
||||
processBlocks(B, time, memory, uint32(threads), mode)
|
||||
return extractKey(B, memory, uint32(threads), keyLen)
|
||||
}
|
||||
|
||||
const (
|
||||
blockLength = 128
|
||||
syncPoints = 4
|
||||
)
|
||||
|
||||
type block [blockLength]uint64
|
||||
|
||||
func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte {
|
||||
var (
|
||||
h0 [blake2b.Size + 8]byte
|
||||
params [24]byte
|
||||
tmp [4]byte
|
||||
)
|
||||
|
||||
b2, _ := blake2b.New512(nil)
|
||||
binary.LittleEndian.PutUint32(params[0:4], threads)
|
||||
binary.LittleEndian.PutUint32(params[4:8], keyLen)
|
||||
binary.LittleEndian.PutUint32(params[8:12], memory)
|
||||
binary.LittleEndian.PutUint32(params[12:16], time)
|
||||
binary.LittleEndian.PutUint32(params[16:20], uint32(Version))
|
||||
binary.LittleEndian.PutUint32(params[20:24], uint32(mode))
|
||||
b2.Write(params[:])
|
||||
binary.LittleEndian.PutUint32(tmp[:], uint32(len(password)))
|
||||
b2.Write(tmp[:])
|
||||
b2.Write(password)
|
||||
binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt)))
|
||||
b2.Write(tmp[:])
|
||||
b2.Write(salt)
|
||||
binary.LittleEndian.PutUint32(tmp[:], uint32(len(key)))
|
||||
b2.Write(tmp[:])
|
||||
b2.Write(key)
|
||||
binary.LittleEndian.PutUint32(tmp[:], uint32(len(data)))
|
||||
b2.Write(tmp[:])
|
||||
b2.Write(data)
|
||||
b2.Sum(h0[:0])
|
||||
return h0
|
||||
}
|
||||
|
||||
func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block {
|
||||
var block0 [1024]byte
|
||||
B := make([]block, memory)
|
||||
for lane := uint32(0); lane < threads; lane++ {
|
||||
j := lane * (memory / threads)
|
||||
binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane)
|
||||
|
||||
binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0)
|
||||
blake2bHash(block0[:], h0[:])
|
||||
for i := range B[j+0] {
|
||||
B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:])
|
||||
}
|
||||
|
||||
binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1)
|
||||
blake2bHash(block0[:], h0[:])
|
||||
for i := range B[j+1] {
|
||||
B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:])
|
||||
}
|
||||
}
|
||||
return B
|
||||
}
|
||||
|
||||
func processBlocks(B []block, time, memory, threads uint32, mode int) {
|
||||
lanes := memory / threads
|
||||
segments := lanes / syncPoints
|
||||
|
||||
processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) {
|
||||
var addresses, in, zero block
|
||||
if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) {
|
||||
in[0] = uint64(n)
|
||||
in[1] = uint64(lane)
|
||||
in[2] = uint64(slice)
|
||||
in[3] = uint64(memory)
|
||||
in[4] = uint64(time)
|
||||
in[5] = uint64(mode)
|
||||
}
|
||||
|
||||
index := uint32(0)
|
||||
if n == 0 && slice == 0 {
|
||||
index = 2 // we have already generated the first two blocks
|
||||
if mode == argon2i || mode == argon2id {
|
||||
in[6]++
|
||||
processBlock(&addresses, &in, &zero)
|
||||
processBlock(&addresses, &addresses, &zero)
|
||||
}
|
||||
}
|
||||
|
||||
offset := lane*lanes + slice*segments + index
|
||||
var random uint64
|
||||
for index < segments {
|
||||
prev := offset - 1
|
||||
if index == 0 && slice == 0 {
|
||||
prev += lanes // last block in lane
|
||||
}
|
||||
if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) {
|
||||
if index%blockLength == 0 {
|
||||
in[6]++
|
||||
processBlock(&addresses, &in, &zero)
|
||||
processBlock(&addresses, &addresses, &zero)
|
||||
}
|
||||
random = addresses[index%blockLength]
|
||||
} else {
|
||||
random = B[prev][0]
|
||||
}
|
||||
newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index)
|
||||
processBlockXOR(&B[offset], &B[prev], &B[newOffset])
|
||||
index, offset = index+1, offset+1
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
for n := uint32(0); n < time; n++ {
|
||||
for slice := uint32(0); slice < syncPoints; slice++ {
|
||||
var wg sync.WaitGroup
|
||||
for lane := uint32(0); lane < threads; lane++ {
|
||||
wg.Add(1)
|
||||
go processSegment(n, slice, lane, &wg)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func extractKey(B []block, memory, threads, keyLen uint32) []byte {
|
||||
lanes := memory / threads
|
||||
for lane := uint32(0); lane < threads-1; lane++ {
|
||||
for i, v := range B[(lane*lanes)+lanes-1] {
|
||||
B[memory-1][i] ^= v
|
||||
}
|
||||
}
|
||||
|
||||
var block [1024]byte
|
||||
for i, v := range B[memory-1] {
|
||||
binary.LittleEndian.PutUint64(block[i*8:], v)
|
||||
}
|
||||
key := make([]byte, keyLen)
|
||||
blake2bHash(key, block[:])
|
||||
return key
|
||||
}
|
||||
|
||||
func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 {
|
||||
refLane := uint32(rand>>32) % threads
|
||||
if n == 0 && slice == 0 {
|
||||
refLane = lane
|
||||
}
|
||||
m, s := 3*segments, ((slice+1)%syncPoints)*segments
|
||||
if lane == refLane {
|
||||
m += index
|
||||
}
|
||||
if n == 0 {
|
||||
m, s = slice*segments, 0
|
||||
if slice == 0 || lane == refLane {
|
||||
m += index
|
||||
}
|
||||
}
|
||||
if index == 0 || lane == refLane {
|
||||
m--
|
||||
}
|
||||
return phi(rand, uint64(m), uint64(s), refLane, lanes)
|
||||
}
|
||||
|
||||
func phi(rand, m, s uint64, lane, lanes uint32) uint32 {
|
||||
p := rand & 0xFFFFFFFF
|
||||
p = (p * p) >> 32
|
||||
p = (p * m) >> 32
|
||||
return lane*lanes + uint32((s+m-(p+1))%uint64(lanes))
|
||||
}
|
233
vendor/golang.org/x/crypto/argon2/argon2_test.go
generated
vendored
Normal file
233
vendor/golang.org/x/crypto/argon2/argon2_test.go
generated
vendored
Normal file
@ -0,0 +1,233 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package argon2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
genKatPassword = []byte{
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
}
|
||||
genKatSalt = []byte{0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02}
|
||||
genKatSecret = []byte{0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03}
|
||||
genKatAAD = []byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}
|
||||
)
|
||||
|
||||
func TestArgon2(t *testing.T) {
|
||||
defer func(sse4 bool) { useSSE4 = sse4 }(useSSE4)
|
||||
|
||||
if useSSE4 {
|
||||
t.Log("SSE4.1 version")
|
||||
testArgon2i(t)
|
||||
testArgon2d(t)
|
||||
testArgon2id(t)
|
||||
useSSE4 = false
|
||||
}
|
||||
t.Log("generic version")
|
||||
testArgon2i(t)
|
||||
testArgon2d(t)
|
||||
testArgon2id(t)
|
||||
}
|
||||
|
||||
func testArgon2d(t *testing.T) {
|
||||
want := []byte{
|
||||
0x51, 0x2b, 0x39, 0x1b, 0x6f, 0x11, 0x62, 0x97,
|
||||
0x53, 0x71, 0xd3, 0x09, 0x19, 0x73, 0x42, 0x94,
|
||||
0xf8, 0x68, 0xe3, 0xbe, 0x39, 0x84, 0xf3, 0xc1,
|
||||
0xa1, 0x3a, 0x4d, 0xb9, 0xfa, 0xbe, 0x4a, 0xcb,
|
||||
}
|
||||
hash := deriveKey(argon2d, genKatPassword, genKatSalt, genKatSecret, genKatAAD, 3, 32, 4, 32)
|
||||
if !bytes.Equal(hash, want) {
|
||||
t.Errorf("derived key does not match - got: %s , want: %s", hex.EncodeToString(hash), hex.EncodeToString(want))
|
||||
}
|
||||
}
|
||||
|
||||
func testArgon2i(t *testing.T) {
|
||||
want := []byte{
|
||||
0xc8, 0x14, 0xd9, 0xd1, 0xdc, 0x7f, 0x37, 0xaa,
|
||||
0x13, 0xf0, 0xd7, 0x7f, 0x24, 0x94, 0xbd, 0xa1,
|
||||
0xc8, 0xde, 0x6b, 0x01, 0x6d, 0xd3, 0x88, 0xd2,
|
||||
0x99, 0x52, 0xa4, 0xc4, 0x67, 0x2b, 0x6c, 0xe8,
|
||||
}
|
||||
hash := deriveKey(argon2i, genKatPassword, genKatSalt, genKatSecret, genKatAAD, 3, 32, 4, 32)
|
||||
if !bytes.Equal(hash, want) {
|
||||
t.Errorf("derived key does not match - got: %s , want: %s", hex.EncodeToString(hash), hex.EncodeToString(want))
|
||||
}
|
||||
}
|
||||
|
||||
func testArgon2id(t *testing.T) {
|
||||
want := []byte{
|
||||
0x0d, 0x64, 0x0d, 0xf5, 0x8d, 0x78, 0x76, 0x6c,
|
||||
0x08, 0xc0, 0x37, 0xa3, 0x4a, 0x8b, 0x53, 0xc9,
|
||||
0xd0, 0x1e, 0xf0, 0x45, 0x2d, 0x75, 0xb6, 0x5e,
|
||||
0xb5, 0x25, 0x20, 0xe9, 0x6b, 0x01, 0xe6, 0x59,
|
||||
}
|
||||
hash := deriveKey(argon2id, genKatPassword, genKatSalt, genKatSecret, genKatAAD, 3, 32, 4, 32)
|
||||
if !bytes.Equal(hash, want) {
|
||||
t.Errorf("derived key does not match - got: %s , want: %s", hex.EncodeToString(hash), hex.EncodeToString(want))
|
||||
}
|
||||
}
|
||||
|
||||
func TestVectors(t *testing.T) {
|
||||
password, salt := []byte("password"), []byte("somesalt")
|
||||
for i, v := range testVectors {
|
||||
want, err := hex.DecodeString(v.hash)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: failed to decode hash: %v", i, err)
|
||||
}
|
||||
hash := deriveKey(v.mode, password, salt, nil, nil, v.time, v.memory, v.threads, uint32(len(want)))
|
||||
if !bytes.Equal(hash, want) {
|
||||
t.Errorf("Test %d - got: %s want: %s", i, hex.EncodeToString(hash), hex.EncodeToString(want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkArgon2(mode int, time, memory uint32, threads uint8, keyLen uint32, b *testing.B) {
|
||||
password := []byte("password")
|
||||
salt := []byte("choosing random salts is hard")
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
deriveKey(mode, password, salt, nil, nil, time, memory, threads, keyLen)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkArgon2i(b *testing.B) {
|
||||
b.Run(" Time: 3 Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2i, 3, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 4 Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2i, 4, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 5 Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2i, 5, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 3 Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2i, 3, 64*1024, 4, 32, b) })
|
||||
b.Run(" Time: 4 Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2i, 4, 64*1024, 4, 32, b) })
|
||||
b.Run(" Time: 5 Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2i, 5, 64*1024, 4, 32, b) })
|
||||
}
|
||||
|
||||
func BenchmarkArgon2d(b *testing.B) {
|
||||
b.Run(" Time: 3, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2d, 3, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 4, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2d, 4, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 5, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2d, 5, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 3, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2d, 3, 64*1024, 4, 32, b) })
|
||||
b.Run(" Time: 4, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2d, 4, 64*1024, 4, 32, b) })
|
||||
b.Run(" Time: 5, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2d, 5, 64*1024, 4, 32, b) })
|
||||
}
|
||||
|
||||
func BenchmarkArgon2id(b *testing.B) {
|
||||
b.Run(" Time: 3, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2id, 3, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 4, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2id, 4, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 5, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2id, 5, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 3, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2id, 3, 64*1024, 4, 32, b) })
|
||||
b.Run(" Time: 4, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2id, 4, 64*1024, 4, 32, b) })
|
||||
b.Run(" Time: 5, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2id, 5, 64*1024, 4, 32, b) })
|
||||
}
|
||||
|
||||
// Generated with the CLI of https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf
|
||||
var testVectors = []struct {
|
||||
mode int
|
||||
time, memory uint32
|
||||
threads uint8
|
||||
hash string
|
||||
}{
|
||||
{
|
||||
mode: argon2i, time: 1, memory: 64, threads: 1,
|
||||
hash: "b9c401d1844a67d50eae3967dc28870b22e508092e861a37",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 1, memory: 64, threads: 1,
|
||||
hash: "8727405fd07c32c78d64f547f24150d3f2e703a89f981a19",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 1, memory: 64, threads: 1,
|
||||
hash: "655ad15eac652dc59f7170a7332bf49b8469be1fdb9c28bb",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 2, memory: 64, threads: 1,
|
||||
hash: "8cf3d8f76a6617afe35fac48eb0b7433a9a670ca4a07ed64",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 2, memory: 64, threads: 1,
|
||||
hash: "3be9ec79a69b75d3752acb59a1fbb8b295a46529c48fbb75",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 2, memory: 64, threads: 1,
|
||||
hash: "068d62b26455936aa6ebe60060b0a65870dbfa3ddf8d41f7",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 2, memory: 64, threads: 2,
|
||||
hash: "2089f3e78a799720f80af806553128f29b132cafe40d059f",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 2, memory: 64, threads: 2,
|
||||
hash: "68e2462c98b8bc6bb60ec68db418ae2c9ed24fc6748a40e9",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 2, memory: 64, threads: 2,
|
||||
hash: "350ac37222f436ccb5c0972f1ebd3bf6b958bf2071841362",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 3, memory: 256, threads: 2,
|
||||
hash: "f5bbf5d4c3836af13193053155b73ec7476a6a2eb93fd5e6",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 3, memory: 256, threads: 2,
|
||||
hash: "f4f0669218eaf3641f39cc97efb915721102f4b128211ef2",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 3, memory: 256, threads: 2,
|
||||
hash: "4668d30ac4187e6878eedeacf0fd83c5a0a30db2cc16ef0b",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 4, memory: 4096, threads: 4,
|
||||
hash: "a11f7b7f3f93f02ad4bddb59ab62d121e278369288a0d0e7",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 4, memory: 4096, threads: 4,
|
||||
hash: "935598181aa8dc2b720914aa6435ac8d3e3a4210c5b0fb2d",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 4, memory: 4096, threads: 4,
|
||||
hash: "145db9733a9f4ee43edf33c509be96b934d505a4efb33c5a",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 4, memory: 1024, threads: 8,
|
||||
hash: "0cdd3956aa35e6b475a7b0c63488822f774f15b43f6e6e17",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 4, memory: 1024, threads: 8,
|
||||
hash: "83604fc2ad0589b9d055578f4d3cc55bc616df3578a896e9",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 4, memory: 1024, threads: 8,
|
||||
hash: "8dafa8e004f8ea96bf7c0f93eecf67a6047476143d15577f",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 2, memory: 64, threads: 3,
|
||||
hash: "5cab452fe6b8479c8661def8cd703b611a3905a6d5477fe6",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 2, memory: 64, threads: 3,
|
||||
hash: "22474a423bda2ccd36ec9afd5119e5c8949798cadf659f51",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 2, memory: 64, threads: 3,
|
||||
hash: "4a15b31aec7c2590b87d1f520be7d96f56658172deaa3079",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 3, memory: 1024, threads: 6,
|
||||
hash: "d236b29c2b2a09babee842b0dec6aa1e83ccbdea8023dced",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 3, memory: 1024, threads: 6,
|
||||
hash: "a3351b0319a53229152023d9206902f4ef59661cdca89481",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 3, memory: 1024, threads: 6,
|
||||
hash: "1640b932f4b60e272f5d2207b9a9c626ffa1bd88d2349016",
|
||||
},
|
||||
}
|
53
vendor/golang.org/x/crypto/argon2/blake2b.go
generated
vendored
Normal file
53
vendor/golang.org/x/crypto/argon2/blake2b.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package argon2
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
// blake2bHash computes an arbitrary long hash value of in
|
||||
// and writes the hash to out.
|
||||
func blake2bHash(out []byte, in []byte) {
|
||||
var b2 hash.Hash
|
||||
if n := len(out); n < blake2b.Size {
|
||||
b2, _ = blake2b.New(n, nil)
|
||||
} else {
|
||||
b2, _ = blake2b.New512(nil)
|
||||
}
|
||||
|
||||
var buffer [blake2b.Size]byte
|
||||
binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out)))
|
||||
b2.Write(buffer[:4])
|
||||
b2.Write(in)
|
||||
|
||||
if len(out) <= blake2b.Size {
|
||||
b2.Sum(out[:0])
|
||||
return
|
||||
}
|
||||
|
||||
outLen := len(out)
|
||||
b2.Sum(buffer[:0])
|
||||
b2.Reset()
|
||||
copy(out, buffer[:32])
|
||||
out = out[32:]
|
||||
for len(out) > blake2b.Size {
|
||||
b2.Write(buffer[:])
|
||||
b2.Sum(buffer[:0])
|
||||
copy(out, buffer[:32])
|
||||
out = out[32:]
|
||||
b2.Reset()
|
||||
}
|
||||
|
||||
if outLen%blake2b.Size > 0 { // outLen > 64
|
||||
r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2
|
||||
b2, _ = blake2b.New(outLen-32*r, nil)
|
||||
}
|
||||
b2.Write(buffer[:])
|
||||
b2.Sum(out[:0])
|
||||
}
|
60
vendor/golang.org/x/crypto/argon2/blamka_amd64.go
generated
vendored
Normal file
60
vendor/golang.org/x/crypto/argon2/blamka_amd64.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
package argon2
|
||||
|
||||
import "golang.org/x/sys/cpu"
|
||||
|
||||
func init() {
|
||||
useSSE4 = cpu.X86.HasSSE41
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
func mixBlocksSSE2(out, a, b, c *block)
|
||||
|
||||
//go:noescape
|
||||
func xorBlocksSSE2(out, a, b, c *block)
|
||||
|
||||
//go:noescape
|
||||
func blamkaSSE4(b *block)
|
||||
|
||||
func processBlockSSE(out, in1, in2 *block, xor bool) {
|
||||
var t block
|
||||
mixBlocksSSE2(&t, in1, in2, &t)
|
||||
if useSSE4 {
|
||||
blamkaSSE4(&t)
|
||||
} else {
|
||||
for i := 0; i < blockLength; i += 16 {
|
||||
blamkaGeneric(
|
||||
&t[i+0], &t[i+1], &t[i+2], &t[i+3],
|
||||
&t[i+4], &t[i+5], &t[i+6], &t[i+7],
|
||||
&t[i+8], &t[i+9], &t[i+10], &t[i+11],
|
||||
&t[i+12], &t[i+13], &t[i+14], &t[i+15],
|
||||
)
|
||||
}
|
||||
for i := 0; i < blockLength/8; i += 2 {
|
||||
blamkaGeneric(
|
||||
&t[i], &t[i+1], &t[16+i], &t[16+i+1],
|
||||
&t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1],
|
||||
&t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1],
|
||||
&t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1],
|
||||
)
|
||||
}
|
||||
}
|
||||
if xor {
|
||||
xorBlocksSSE2(out, in1, in2, &t)
|
||||
} else {
|
||||
mixBlocksSSE2(out, in1, in2, &t)
|
||||
}
|
||||
}
|
||||
|
||||
func processBlock(out, in1, in2 *block) {
|
||||
processBlockSSE(out, in1, in2, false)
|
||||
}
|
||||
|
||||
func processBlockXOR(out, in1, in2 *block) {
|
||||
processBlockSSE(out, in1, in2, true)
|
||||
}
|
243
vendor/golang.org/x/crypto/argon2/blamka_amd64.s
generated
vendored
Normal file
243
vendor/golang.org/x/crypto/argon2/blamka_amd64.s
generated
vendored
Normal file
@ -0,0 +1,243 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
|
||||
DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
|
||||
GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
|
||||
DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
|
||||
GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
|
||||
MOVO v4, t1; \
|
||||
MOVO v5, v4; \
|
||||
MOVO t1, v5; \
|
||||
MOVO v6, t1; \
|
||||
PUNPCKLQDQ v6, t2; \
|
||||
PUNPCKHQDQ v7, v6; \
|
||||
PUNPCKHQDQ t2, v6; \
|
||||
PUNPCKLQDQ v7, t2; \
|
||||
MOVO t1, v7; \
|
||||
MOVO v2, t1; \
|
||||
PUNPCKHQDQ t2, v7; \
|
||||
PUNPCKLQDQ v3, t2; \
|
||||
PUNPCKHQDQ t2, v2; \
|
||||
PUNPCKLQDQ t1, t2; \
|
||||
PUNPCKHQDQ t2, v3
|
||||
|
||||
#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
|
||||
MOVO v4, t1; \
|
||||
MOVO v5, v4; \
|
||||
MOVO t1, v5; \
|
||||
MOVO v2, t1; \
|
||||
PUNPCKLQDQ v2, t2; \
|
||||
PUNPCKHQDQ v3, v2; \
|
||||
PUNPCKHQDQ t2, v2; \
|
||||
PUNPCKLQDQ v3, t2; \
|
||||
MOVO t1, v3; \
|
||||
MOVO v6, t1; \
|
||||
PUNPCKHQDQ t2, v3; \
|
||||
PUNPCKLQDQ v7, t2; \
|
||||
PUNPCKHQDQ t2, v6; \
|
||||
PUNPCKLQDQ t1, t2; \
|
||||
PUNPCKHQDQ t2, v7
|
||||
|
||||
#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \
|
||||
MOVO v0, t0; \
|
||||
PMULULQ v2, t0; \
|
||||
PADDQ v2, v0; \
|
||||
PADDQ t0, v0; \
|
||||
PADDQ t0, v0; \
|
||||
PXOR v0, v6; \
|
||||
PSHUFD $0xB1, v6, v6; \
|
||||
MOVO v4, t0; \
|
||||
PMULULQ v6, t0; \
|
||||
PADDQ v6, v4; \
|
||||
PADDQ t0, v4; \
|
||||
PADDQ t0, v4; \
|
||||
PXOR v4, v2; \
|
||||
PSHUFB c40, v2; \
|
||||
MOVO v0, t0; \
|
||||
PMULULQ v2, t0; \
|
||||
PADDQ v2, v0; \
|
||||
PADDQ t0, v0; \
|
||||
PADDQ t0, v0; \
|
||||
PXOR v0, v6; \
|
||||
PSHUFB c48, v6; \
|
||||
MOVO v4, t0; \
|
||||
PMULULQ v6, t0; \
|
||||
PADDQ v6, v4; \
|
||||
PADDQ t0, v4; \
|
||||
PADDQ t0, v4; \
|
||||
PXOR v4, v2; \
|
||||
MOVO v2, t0; \
|
||||
PADDQ v2, t0; \
|
||||
PSRLQ $63, v2; \
|
||||
PXOR t0, v2; \
|
||||
MOVO v1, t0; \
|
||||
PMULULQ v3, t0; \
|
||||
PADDQ v3, v1; \
|
||||
PADDQ t0, v1; \
|
||||
PADDQ t0, v1; \
|
||||
PXOR v1, v7; \
|
||||
PSHUFD $0xB1, v7, v7; \
|
||||
MOVO v5, t0; \
|
||||
PMULULQ v7, t0; \
|
||||
PADDQ v7, v5; \
|
||||
PADDQ t0, v5; \
|
||||
PADDQ t0, v5; \
|
||||
PXOR v5, v3; \
|
||||
PSHUFB c40, v3; \
|
||||
MOVO v1, t0; \
|
||||
PMULULQ v3, t0; \
|
||||
PADDQ v3, v1; \
|
||||
PADDQ t0, v1; \
|
||||
PADDQ t0, v1; \
|
||||
PXOR v1, v7; \
|
||||
PSHUFB c48, v7; \
|
||||
MOVO v5, t0; \
|
||||
PMULULQ v7, t0; \
|
||||
PADDQ v7, v5; \
|
||||
PADDQ t0, v5; \
|
||||
PADDQ t0, v5; \
|
||||
PXOR v5, v3; \
|
||||
MOVO v3, t0; \
|
||||
PADDQ v3, t0; \
|
||||
PSRLQ $63, v3; \
|
||||
PXOR t0, v3
|
||||
|
||||
#define LOAD_MSG_0(block, off) \
|
||||
MOVOU 8*(off+0)(block), X0; \
|
||||
MOVOU 8*(off+2)(block), X1; \
|
||||
MOVOU 8*(off+4)(block), X2; \
|
||||
MOVOU 8*(off+6)(block), X3; \
|
||||
MOVOU 8*(off+8)(block), X4; \
|
||||
MOVOU 8*(off+10)(block), X5; \
|
||||
MOVOU 8*(off+12)(block), X6; \
|
||||
MOVOU 8*(off+14)(block), X7
|
||||
|
||||
#define STORE_MSG_0(block, off) \
|
||||
MOVOU X0, 8*(off+0)(block); \
|
||||
MOVOU X1, 8*(off+2)(block); \
|
||||
MOVOU X2, 8*(off+4)(block); \
|
||||
MOVOU X3, 8*(off+6)(block); \
|
||||
MOVOU X4, 8*(off+8)(block); \
|
||||
MOVOU X5, 8*(off+10)(block); \
|
||||
MOVOU X6, 8*(off+12)(block); \
|
||||
MOVOU X7, 8*(off+14)(block)
|
||||
|
||||
#define LOAD_MSG_1(block, off) \
|
||||
MOVOU 8*off+0*8(block), X0; \
|
||||
MOVOU 8*off+16*8(block), X1; \
|
||||
MOVOU 8*off+32*8(block), X2; \
|
||||
MOVOU 8*off+48*8(block), X3; \
|
||||
MOVOU 8*off+64*8(block), X4; \
|
||||
MOVOU 8*off+80*8(block), X5; \
|
||||
MOVOU 8*off+96*8(block), X6; \
|
||||
MOVOU 8*off+112*8(block), X7
|
||||
|
||||
#define STORE_MSG_1(block, off) \
|
||||
MOVOU X0, 8*off+0*8(block); \
|
||||
MOVOU X1, 8*off+16*8(block); \
|
||||
MOVOU X2, 8*off+32*8(block); \
|
||||
MOVOU X3, 8*off+48*8(block); \
|
||||
MOVOU X4, 8*off+64*8(block); \
|
||||
MOVOU X5, 8*off+80*8(block); \
|
||||
MOVOU X6, 8*off+96*8(block); \
|
||||
MOVOU X7, 8*off+112*8(block)
|
||||
|
||||
#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \
|
||||
LOAD_MSG_0(block, off); \
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
|
||||
STORE_MSG_0(block, off)
|
||||
|
||||
#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \
|
||||
LOAD_MSG_1(block, off); \
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
|
||||
STORE_MSG_1(block, off)
|
||||
|
||||
// func blamkaSSE4(b *block)
|
||||
TEXT ·blamkaSSE4(SB), 4, $0-8
|
||||
MOVQ b+0(FP), AX
|
||||
|
||||
MOVOU ·c40<>(SB), X10
|
||||
MOVOU ·c48<>(SB), X11
|
||||
|
||||
BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11)
|
||||
|
||||
BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11)
|
||||
RET
|
||||
|
||||
// func mixBlocksSSE2(out, a, b, c *block)
|
||||
TEXT ·mixBlocksSSE2(SB), 4, $0-32
|
||||
MOVQ out+0(FP), DX
|
||||
MOVQ a+8(FP), AX
|
||||
MOVQ b+16(FP), BX
|
||||
MOVQ a+24(FP), CX
|
||||
MOVQ $128, BP
|
||||
|
||||
loop:
|
||||
MOVOU 0(AX), X0
|
||||
MOVOU 0(BX), X1
|
||||
MOVOU 0(CX), X2
|
||||
PXOR X1, X0
|
||||
PXOR X2, X0
|
||||
MOVOU X0, 0(DX)
|
||||
ADDQ $16, AX
|
||||
ADDQ $16, BX
|
||||
ADDQ $16, CX
|
||||
ADDQ $16, DX
|
||||
SUBQ $2, BP
|
||||
JA loop
|
||||
RET
|
||||
|
||||
// func xorBlocksSSE2(out, a, b, c *block)
|
||||
TEXT ·xorBlocksSSE2(SB), 4, $0-32
|
||||
MOVQ out+0(FP), DX
|
||||
MOVQ a+8(FP), AX
|
||||
MOVQ b+16(FP), BX
|
||||
MOVQ a+24(FP), CX
|
||||
MOVQ $128, BP
|
||||
|
||||
loop:
|
||||
MOVOU 0(AX), X0
|
||||
MOVOU 0(BX), X1
|
||||
MOVOU 0(CX), X2
|
||||
MOVOU 0(DX), X3
|
||||
PXOR X1, X0
|
||||
PXOR X2, X0
|
||||
PXOR X3, X0
|
||||
MOVOU X0, 0(DX)
|
||||
ADDQ $16, AX
|
||||
ADDQ $16, BX
|
||||
ADDQ $16, CX
|
||||
ADDQ $16, DX
|
||||
SUBQ $2, BP
|
||||
JA loop
|
||||
RET
|
163
vendor/golang.org/x/crypto/argon2/blamka_generic.go
generated
vendored
Normal file
163
vendor/golang.org/x/crypto/argon2/blamka_generic.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package argon2
|
||||
|
||||
var useSSE4 bool
|
||||
|
||||
func processBlockGeneric(out, in1, in2 *block, xor bool) {
|
||||
var t block
|
||||
for i := range t {
|
||||
t[i] = in1[i] ^ in2[i]
|
||||
}
|
||||
for i := 0; i < blockLength; i += 16 {
|
||||
blamkaGeneric(
|
||||
&t[i+0], &t[i+1], &t[i+2], &t[i+3],
|
||||
&t[i+4], &t[i+5], &t[i+6], &t[i+7],
|
||||
&t[i+8], &t[i+9], &t[i+10], &t[i+11],
|
||||
&t[i+12], &t[i+13], &t[i+14], &t[i+15],
|
||||
)
|
||||
}
|
||||
for i := 0; i < blockLength/8; i += 2 {
|
||||
blamkaGeneric(
|
||||
&t[i], &t[i+1], &t[16+i], &t[16+i+1],
|
||||
&t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1],
|
||||
&t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1],
|
||||
&t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1],
|
||||
)
|
||||
}
|
||||
if xor {
|
||||
for i := range t {
|
||||
out[i] ^= in1[i] ^ in2[i] ^ t[i]
|
||||
}
|
||||
} else {
|
||||
for i := range t {
|
||||
out[i] = in1[i] ^ in2[i] ^ t[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) {
|
||||
v00, v01, v02, v03 := *t00, *t01, *t02, *t03
|
||||
v04, v05, v06, v07 := *t04, *t05, *t06, *t07
|
||||
v08, v09, v10, v11 := *t08, *t09, *t10, *t11
|
||||
v12, v13, v14, v15 := *t12, *t13, *t14, *t15
|
||||
|
||||
v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04))
|
||||
v12 ^= v00
|
||||
v12 = v12>>32 | v12<<32
|
||||
v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12))
|
||||
v04 ^= v08
|
||||
v04 = v04>>24 | v04<<40
|
||||
|
||||
v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04))
|
||||
v12 ^= v00
|
||||
v12 = v12>>16 | v12<<48
|
||||
v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12))
|
||||
v04 ^= v08
|
||||
v04 = v04>>63 | v04<<1
|
||||
|
||||
v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05))
|
||||
v13 ^= v01
|
||||
v13 = v13>>32 | v13<<32
|
||||
v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13))
|
||||
v05 ^= v09
|
||||
v05 = v05>>24 | v05<<40
|
||||
|
||||
v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05))
|
||||
v13 ^= v01
|
||||
v13 = v13>>16 | v13<<48
|
||||
v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13))
|
||||
v05 ^= v09
|
||||
v05 = v05>>63 | v05<<1
|
||||
|
||||
v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06))
|
||||
v14 ^= v02
|
||||
v14 = v14>>32 | v14<<32
|
||||
v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14))
|
||||
v06 ^= v10
|
||||
v06 = v06>>24 | v06<<40
|
||||
|
||||
v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06))
|
||||
v14 ^= v02
|
||||
v14 = v14>>16 | v14<<48
|
||||
v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14))
|
||||
v06 ^= v10
|
||||
v06 = v06>>63 | v06<<1
|
||||
|
||||
v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07))
|
||||
v15 ^= v03
|
||||
v15 = v15>>32 | v15<<32
|
||||
v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15))
|
||||
v07 ^= v11
|
||||
v07 = v07>>24 | v07<<40
|
||||
|
||||
v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07))
|
||||
v15 ^= v03
|
||||
v15 = v15>>16 | v15<<48
|
||||
v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15))
|
||||
v07 ^= v11
|
||||
v07 = v07>>63 | v07<<1
|
||||
|
||||
v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05))
|
||||
v15 ^= v00
|
||||
v15 = v15>>32 | v15<<32
|
||||
v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15))
|
||||
v05 ^= v10
|
||||
v05 = v05>>24 | v05<<40
|
||||
|
||||
v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05))
|
||||
v15 ^= v00
|
||||
v15 = v15>>16 | v15<<48
|
||||
v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15))
|
||||
v05 ^= v10
|
||||
v05 = v05>>63 | v05<<1
|
||||
|
||||
v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06))
|
||||
v12 ^= v01
|
||||
v12 = v12>>32 | v12<<32
|
||||
v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12))
|
||||
v06 ^= v11
|
||||
v06 = v06>>24 | v06<<40
|
||||
|
||||
v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06))
|
||||
v12 ^= v01
|
||||
v12 = v12>>16 | v12<<48
|
||||
v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12))
|
||||
v06 ^= v11
|
||||
v06 = v06>>63 | v06<<1
|
||||
|
||||
v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07))
|
||||
v13 ^= v02
|
||||
v13 = v13>>32 | v13<<32
|
||||
v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13))
|
||||
v07 ^= v08
|
||||
v07 = v07>>24 | v07<<40
|
||||
|
||||
v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07))
|
||||
v13 ^= v02
|
||||
v13 = v13>>16 | v13<<48
|
||||
v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13))
|
||||
v07 ^= v08
|
||||
v07 = v07>>63 | v07<<1
|
||||
|
||||
v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04))
|
||||
v14 ^= v03
|
||||
v14 = v14>>32 | v14<<32
|
||||
v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14))
|
||||
v04 ^= v09
|
||||
v04 = v04>>24 | v04<<40
|
||||
|
||||
v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04))
|
||||
v14 ^= v03
|
||||
v14 = v14>>16 | v14<<48
|
||||
v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14))
|
||||
v04 ^= v09
|
||||
v04 = v04>>63 | v04<<1
|
||||
|
||||
*t00, *t01, *t02, *t03 = v00, v01, v02, v03
|
||||
*t04, *t05, *t06, *t07 = v04, v05, v06, v07
|
||||
*t08, *t09, *t10, *t11 = v08, v09, v10, v11
|
||||
*t12, *t13, *t14, *t15 = v12, v13, v14, v15
|
||||
}
|
15
vendor/golang.org/x/crypto/argon2/blamka_ref.go
generated
vendored
Normal file
15
vendor/golang.org/x/crypto/argon2/blamka_ref.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64 appengine gccgo
|
||||
|
||||
package argon2
|
||||
|
||||
func processBlock(out, in1, in2 *block) {
|
||||
processBlockGeneric(out, in1, in2, false)
|
||||
}
|
||||
|
||||
func processBlockXOR(out, in1, in2 *block) {
|
||||
processBlockGeneric(out, in1, in2, true)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user