work on mocks and unit tests

This commit is contained in:
Ian Norden 2019-04-30 12:48:31 -05:00
parent d702cb720c
commit 15e044403d
753 changed files with 201600 additions and 135 deletions

View File

@ -65,3 +65,7 @@
[prune]
go-tests = true
unused-packages = true
[[prune.project]]
name = "github.com/ethereum/go-ethereum"
unused-packages = false

View File

@ -60,13 +60,13 @@ func syncAndPublish() {
db := utils.LoadPostgres(databaseConfig, blockChain.Node())
quitChan := make(chan bool)
indexer, err := ipfs.NewIPFSIndexer(ipfsPath, &db, ethClient, rpcClient, quitChan)
processor, err := ipfs.NewIPFSProcessor(ipfsPath, &db, ethClient, rpcClient, quitChan)
if err != nil {
log.Fatal(err)
}
wg := &syn.WaitGroup{}
err = indexer.Index(wg)
err = processor.Process(wg)
if err != nil {
log.Fatal(err)
}

View File

@ -4,6 +4,8 @@ CREATE TABLE public.transaction_cids (
header_id INTEGER NOT NULL REFERENCES header_cids (id) ON DELETE CASCADE,
tx_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
dst VARCHAR(66) NOT NULL,
src VARCHAR(66) NOT NULL,
UNIQUE (header_id, tx_hash)
);

View File

@ -2,7 +2,8 @@
CREATE TABLE public.receipt_cids (
id SERIAL PRIMARY KEY,
tx_id INTEGER NOT NULL REFERENCES transaction_cids (id) ON DELETE CASCADE,
cid TEXT NOT NULL
cid TEXT NOT NULL,
topic0s VARCHAR(66)[]
);
-- +goose Down

View File

@ -18,7 +18,6 @@ package ipfs
import (
"context"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
@ -28,36 +27,25 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/core"
)
// Converter interface is used to convert a geth statediff.Payload to our IPLDPayload type
type Converter interface {
// PayloadConverter interface is used to convert a geth statediff.Payload to our IPLDPayload type
type PayloadConverter interface {
Convert(payload statediff.Payload) (*IPLDPayload, error)
}
// PayloadConverter is the underlying struct for the Converter interface
type PayloadConverter struct {
// Converter is the underlying struct for the PayloadConverter interface
type Converter struct {
client core.EthClient
}
// IPLDPayload is a custom type which packages ETH data for the IPFS publisher
type IPLDPayload struct {
HeaderRLP []byte
BlockNumber *big.Int
BlockHash common.Hash
BlockBody *types.Body
Receipts types.Receipts
StateLeafs map[common.Hash][]byte
StorageLeafs map[common.Hash]map[common.Hash][]byte
}
// NewPayloadConverter creates a pointer to a new PayloadConverter which satisfies the Converter interface
func NewPayloadConverter(client core.EthClient) *PayloadConverter {
return &PayloadConverter{
// NewPayloadConverter creates a pointer to a new Converter which satisfies the PayloadConverter interface
func NewPayloadConverter(client core.EthClient) *Converter {
return &Converter{
client: client,
}
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
func (pc *PayloadConverter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
// Unpack block rlp to access fields
block := new(types.Block)
err := rlp.DecodeBytes(payload.BlockRlp, block)
@ -66,21 +54,49 @@ func (pc *PayloadConverter) Convert(payload statediff.Payload) (*IPLDPayload, er
if err != nil {
return nil, err
}
trxLen := len(block.Transactions())
convertedPayload := &IPLDPayload{
BlockHash: block.Hash(),
BlockNumber: block.Number(),
HeaderRLP: headerRlp,
BlockBody: block.Body(),
Receipts: make(types.Receipts, 0),
StateLeafs: make(map[common.Hash][]byte),
StorageLeafs: make(map[common.Hash]map[common.Hash][]byte),
BlockHash: block.Hash(),
BlockNumber: block.Number(),
HeaderRLP: headerRlp,
BlockBody: block.Body(),
TrxMetaData: make([]*TrxMetaData, 0, trxLen),
Receipts: make(types.Receipts, 0, trxLen),
ReceiptMetaData: make([]*ReceiptMetaData, 0, trxLen),
StateLeafs: make(map[common.Hash][]byte),
StorageLeafs: make(map[common.Hash]map[common.Hash][]byte),
}
for _, trx := range block.Transactions() {
for gethTransactionIndex, trx := range block.Transactions() {
// Extract to and from data from the the transactions for indexing
from, err := pc.client.TransactionSender(context.Background(), trx, block.Hash(), uint(gethTransactionIndex))
if err != nil {
return nil, err
}
txMeta := &TrxMetaData{
To: trx.To().Hex(),
From: from.Hex(),
}
// txMeta will have same index as its corresponding trx in the convertedPayload.BlockBody
convertedPayload.TrxMetaData = append(convertedPayload.TrxMetaData, txMeta)
// Retrieve receipt for this transaction
gethReceipt, err := pc.client.TransactionReceipt(context.Background(), trx.Hash())
if err != nil {
return nil, err
}
// Extract topic0 data from the receipt's logs for indexing
rctMeta := &ReceiptMetaData{
Topic0s: make([]string, 0, len(gethReceipt.Logs)),
}
for _, log := range gethReceipt.Logs {
if len(log.Topics[0]) < 1 {
continue
}
rctMeta.Topic0s = append(rctMeta.Topic0s, log.Topics[0].Hex())
}
// receipt and rctMeta will have same indexes
convertedPayload.Receipts = append(convertedPayload.Receipts, gethReceipt)
convertedPayload.ReceiptMetaData = append(convertedPayload.ReceiptMetaData, rctMeta)
}
// Unpack state diff rlp to access fields

View File

@ -15,3 +15,28 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/test_helpers/mocks"
)
var _ = Describe("Converter", func() {
Describe("Convert", func() {
It("Converts StatediffPayloads into IPLDPayloads", func() {
mockConverter := mocks.PayloadConverter{}
mockConverter.ReturnIPLDPayload = &test_helpers.MockIPLDPayload
ipldPayload, err := mockConverter.Convert(test_helpers.MockStatediffPayload)
Expect(err).ToNot(HaveOccurred())
Expect(ipldPayload).To(Equal(&test_helpers.MockIPLDPayload))
Expect(mockConverter.PassedStatediffPayload).To(Equal(test_helpers.MockStatediffPayload))
})
It("Fails if", func() {
})
})
})

View File

@ -15,3 +15,21 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs_test
import (
"github.com/sirupsen/logrus"
"io/ioutil"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestIPFS(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "IPFS Suite Test")
}
var _ = BeforeSuite(func() {
logrus.SetOutput(ioutil.Discard)
})

View File

@ -28,31 +28,36 @@ import (
const payloadChanBufferSize = 800 // 1/10th max eth sub buffer size
// Indexer is an interface for streaming, converting to IPLDs, publishing, and indexing all Ethereum data
// SyncAndPublish is an interface for streaming, converting to IPLDs, publishing, and indexing all Ethereum data
// This is the top-level interface used by the syncAndPublish command
type Indexer interface {
Index(wg *sync.WaitGroup) error
type SyncAndPublish interface {
Process(wg *sync.WaitGroup) error
}
// ipfsIndexer is the underlying struct for the Indexer interface
// we are not exporting this to enforce proper initialization through the NewIPFSIndexer function
type ipfsIndexer struct {
Streamer Streamer
Converter Converter
Publisher Publisher
Repository Repository
// Processor is the underlying struct for the SyncAndPublish interface
type Processor struct {
// Interface for streaming statediff payloads over a geth rpc subscription
Streamer StateDiffStreamer
// Interface for converting statediff payloads into ETH-IPLD object payloads
Converter PayloadConverter
// Interface for publishing the ETH-IPLD payloads to IPFS
Publisher IPLDPublisher
// Interface for indexing the CIDs of the published ETH-IPLDs in Postgres
Repository CIDRepository
// Chan the processor uses to subscribe to state diff payloads from the Streamer
PayloadChan chan statediff.Payload
QuitChan chan bool
// Chan used to shut down the Processor
QuitChan chan bool
}
// NewIPFSIndexer creates a new Indexer interface using an underlying ipfsIndexer struct
func NewIPFSIndexer(ipfsPath string, db *postgres.DB, ethClient core.EthClient, rpcClient core.RpcClient, qc chan bool) (Indexer, error) {
// NewIPFSProcessor creates a new Processor interface using an underlying Processor struct
func NewIPFSProcessor(ipfsPath string, db *postgres.DB, ethClient core.EthClient, rpcClient core.RpcClient, qc chan bool) (SyncAndPublish, error) {
publisher, err := NewIPLDPublisher(ipfsPath)
if err != nil {
return nil, err
}
return &ipfsIndexer{
Streamer: NewStateDiffSyncer(rpcClient),
return &Processor{
Streamer: NewStateDiffStreamer(rpcClient),
Repository: NewCIDRepository(db),
Converter: NewPayloadConverter(ethClient),
Publisher: publisher,
@ -61,15 +66,14 @@ func NewIPFSIndexer(ipfsPath string, db *postgres.DB, ethClient core.EthClient,
}, nil
}
// Index is the main processing loop
func (i *ipfsIndexer) Index(wg *sync.WaitGroup) error {
// Process is the main processing loop
func (i *Processor) Process(wg *sync.WaitGroup) error {
sub, err := i.Streamer.Stream(i.PayloadChan)
if err != nil {
return err
}
go func() {
wg.Add(1)
defer wg.Done()
for {
select {
case payload := <-i.PayloadChan:
@ -80,10 +84,12 @@ func (i *ipfsIndexer) Index(wg *sync.WaitGroup) error {
ipldPayload, err := i.Converter.Convert(payload)
if err != nil {
log.Error(err)
continue
}
cidPayload, err := i.Publisher.Publish(ipldPayload)
if err != nil {
log.Error(err)
continue
}
err = i.Repository.Index(cidPayload)
if err != nil {
@ -92,7 +98,8 @@ func (i *ipfsIndexer) Index(wg *sync.WaitGroup) error {
case err = <-sub.Err():
log.Error(err)
case <-i.QuitChan:
log.Info("quiting IPFSIndexer")
log.Info("quiting IPFSProcessor")
wg.Done()
return
}
}

View File

@ -0,0 +1,69 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs_test
import (
"sync"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/test_helpers/mocks"
)
var _ = Describe("Processor", func() {
Describe("Process", func() {
It("Streams StatediffPayloads, converts them to IPLDPayloads, publishes IPLDPayloads, and indexes CIDPayloads", func() {
wg := new(sync.WaitGroup)
payloadChan := make(chan statediff.Payload, 800)
processor := ipfs.Processor{
Repository: &mocks.CIDRepository{
ReturnErr: nil,
},
Publisher: &mocks.IPLDPublisher{
ReturnCIDPayload: &test_helpers.MockCIDPayload,
ReturnErr: nil,
},
Streamer: &mocks.StateDiffStreamer{
ReturnSub: &rpc.ClientSubscription{},
StreamPayloads: []statediff.Payload{
test_helpers.MockStatediffPayload,
},
ReturnErr: nil,
WaitGroup: wg,
},
Converter: &mocks.PayloadConverter{
ReturnIPLDPayload: &test_helpers.MockIPLDPayload,
ReturnErr: nil,
},
PayloadChan: payloadChan,
}
err := processor.Process(wg)
Expect(err).ToNot(HaveOccurred())
wg.Wait()
})
It("Fails if", func() {
})
})
})

View File

@ -21,6 +21,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
rlp2 "github.com/ethereum/go-ethereum/rlp"
"github.com/vulcanize/eth-block-extractor/pkg/ipfs"
"github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_header"
@ -31,13 +32,13 @@ import (
"github.com/vulcanize/eth-block-extractor/pkg/wrappers/rlp"
)
// Publisher is the interface for publishing an IPLD payload
type Publisher interface {
// IPLDPublisher is the interface for publishing an IPLD payload
type IPLDPublisher interface {
Publish(payload *IPLDPayload) (*CIDPayload, error)
}
// IPLDPublisher is the underlying struct for the Publisher interface
type IPLDPublisher struct {
// Publisher is the underlying struct for the IPLDPublisher interface
type Publisher struct {
Node *ipfs.IPFS
HeaderPutter *eth_block_header.BlockHeaderDagPutter
TransactionPutter *eth_block_transactions.BlockTransactionsDagPutter
@ -46,27 +47,15 @@ type IPLDPublisher struct {
StoragePutter *eth_storage_trie.StorageTrieDagPutter
}
// CID payload is a struct to hold all the CIDs and their meta data
type CIDPayload struct {
BlockNumber string
BlockHash string
HeaderCID string
TransactionCIDs map[common.Hash]string
ReceiptCIDs map[common.Hash]string
StateLeafCIDs map[common.Hash]string
StorageLeafCIDs map[common.Hash]map[common.Hash]string
}
// NewIPLDPublisher creates a pointer to a new IPLDPublisher which satisfies the Publisher interface
func NewIPLDPublisher(ipfsPath string) (*IPLDPublisher, error) {
// NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface
func NewIPLDPublisher(ipfsPath string) (*Publisher, error) {
node, err := ipfs.InitIPFSNode(ipfsPath)
if err != nil {
return nil, err
}
decoder := rlp.RlpDecoder{}
return &IPLDPublisher{
return &Publisher{
Node: node,
HeaderPutter: eth_block_header.NewBlockHeaderDagPutter(node, decoder),
HeaderPutter: eth_block_header.NewBlockHeaderDagPutter(node, rlp.RlpDecoder{}),
TransactionPutter: eth_block_transactions.NewBlockTransactionsDagPutter(node),
ReceiptPutter: eth_block_receipts.NewEthBlockReceiptDagPutter(node),
StatePutter: eth_state_trie.NewStateTrieDagPutter(node),
@ -75,21 +64,35 @@ func NewIPLDPublisher(ipfsPath string) (*IPLDPublisher, error) {
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
func (pub *IPLDPublisher) Publish(payload *IPLDPayload) (*CIDPayload, error) {
func (pub *Publisher) Publish(payload *IPLDPayload) (*CIDPayload, error) {
// Process and publish headers
headerCid, err := pub.publishHeaders(payload.HeaderRLP)
if err != nil {
return nil, err
}
// Process and publish uncles
uncleCids := make(map[common.Hash]string)
for _, uncle := range payload.BlockBody.Uncles {
uncleRlp, err := rlp2.EncodeToBytes(uncle)
if err != nil {
return nil, err
}
cid, err := pub.publishHeaders(uncleRlp)
if err != nil {
return nil, err
}
uncleCids[uncle.Hash()] = cid
}
// Process and publish transactions
transactionCids, err := pub.publishTransactions(payload.BlockBody)
transactionCids, err := pub.publishTransactions(payload.BlockBody, payload.TrxMetaData)
if err != nil {
return nil, err
}
// Process and publish receipts
receiptsCids, err := pub.publishReceipts(payload.Receipts)
receiptsCids, err := pub.publishReceipts(payload.Receipts, payload.ReceiptMetaData)
if err != nil {
return nil, err
}
@ -111,6 +114,7 @@ func (pub *IPLDPublisher) Publish(payload *IPLDPayload) (*CIDPayload, error) {
BlockHash: payload.BlockHash.Hex(),
BlockNumber: payload.BlockNumber.String(),
HeaderCID: headerCid,
UncleCIDS: uncleCids,
TransactionCIDs: transactionCids,
ReceiptCIDs: receiptsCids,
StateLeafCIDs: stateLeafCids,
@ -118,7 +122,7 @@ func (pub *IPLDPublisher) Publish(payload *IPLDPayload) (*CIDPayload, error) {
}, nil
}
func (pub *IPLDPublisher) publishHeaders(headerRLP []byte) (string, error) {
func (pub *Publisher) publishHeaders(headerRLP []byte) (string, error) {
headerCids, err := pub.HeaderPutter.DagPut(headerRLP)
if err != nil {
return "", err
@ -129,7 +133,7 @@ func (pub *IPLDPublisher) publishHeaders(headerRLP []byte) (string, error) {
return headerCids[0], nil
}
func (pub *IPLDPublisher) publishTransactions(blockBody *types.Body) (map[common.Hash]string, error) {
func (pub *Publisher) publishTransactions(blockBody *types.Body, trxMeta []*TrxMetaData) (map[common.Hash]*TrxMetaData, error) {
transactionCids, err := pub.TransactionPutter.DagPut(blockBody)
if err != nil {
return nil, err
@ -137,14 +141,15 @@ func (pub *IPLDPublisher) publishTransactions(blockBody *types.Body) (map[common
if len(transactionCids) != len(blockBody.Transactions) {
return nil, errors.New("expected one CID for each transaction")
}
mappedTrxCids := make(map[common.Hash]string, len(transactionCids))
mappedTrxCids := make(map[common.Hash]*TrxMetaData, len(transactionCids))
for i, trx := range blockBody.Transactions {
mappedTrxCids[trx.Hash()] = transactionCids[i]
mappedTrxCids[trx.Hash()] = trxMeta[i]
mappedTrxCids[trx.Hash()].CID = transactionCids[i]
}
return mappedTrxCids, nil
}
func (pub *IPLDPublisher) publishReceipts(receipts types.Receipts) (map[common.Hash]string, error) {
func (pub *Publisher) publishReceipts(receipts types.Receipts, receiptMeta []*ReceiptMetaData) (map[common.Hash]*ReceiptMetaData, error) {
receiptsCids, err := pub.ReceiptPutter.DagPut(receipts)
if err != nil {
return nil, err
@ -152,14 +157,15 @@ func (pub *IPLDPublisher) publishReceipts(receipts types.Receipts) (map[common.H
if len(receiptsCids) != len(receipts) {
return nil, errors.New("expected one CID for each receipt")
}
mappedRctCids := make(map[common.Hash]string, len(receiptsCids))
mappedRctCids := make(map[common.Hash]*ReceiptMetaData, len(receiptsCids))
for i, rct := range receipts {
mappedRctCids[rct.TxHash] = receiptsCids[i]
mappedRctCids[rct.TxHash] = receiptMeta[i]
mappedRctCids[rct.TxHash].CID = receiptsCids[i]
}
return mappedRctCids, nil
}
func (pub *IPLDPublisher) publishStateLeafs(stateLeafs map[common.Hash][]byte) (map[common.Hash]string, error) {
func (pub *Publisher) publishStateLeafs(stateLeafs map[common.Hash][]byte) (map[common.Hash]string, error) {
stateLeafCids := make(map[common.Hash]string)
for addr, leaf := range stateLeafs {
stateLeafCid, err := pub.StatePutter.DagPut(leaf)
@ -174,7 +180,7 @@ func (pub *IPLDPublisher) publishStateLeafs(stateLeafs map[common.Hash][]byte) (
return stateLeafCids, nil
}
func (pub *IPLDPublisher) publishStorageLeafs(storageLeafs map[common.Hash]map[common.Hash][]byte) (map[common.Hash]map[common.Hash]string, error) {
func (pub *Publisher) publishStorageLeafs(storageLeafs map[common.Hash]map[common.Hash][]byte) (map[common.Hash]map[common.Hash]string, error) {
storageLeafCids := make(map[common.Hash]map[common.Hash]string)
for addr, storageTrie := range storageLeafs {
storageLeafCids[addr] = make(map[common.Hash]string)

View File

@ -15,3 +15,28 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/test_helpers/mocks"
)
var _ = Describe("Publisher", func() {
Describe("Publish", func() {
It("Publishes IPLDPayload to IPFS", func() {
mockPublisher := mocks.IPLDPublisher{}
mockPublisher.ReturnCIDPayload = &test_helpers.MockCIDPayload
cidPayload, err := mockPublisher.Publish(&test_helpers.MockIPLDPayload)
Expect(err).ToNot(HaveOccurred())
Expect(cidPayload).To(Equal(&test_helpers.MockCIDPayload))
Expect(mockPublisher.PassedIPLDPayload).To(Equal(&test_helpers.MockIPLDPayload))
})
It("Fails if", func() {
})
})
})

View File

@ -18,29 +18,30 @@ package ipfs
import (
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
)
// Repository is an interface for indexing CIDPayloads
type Repository interface {
// CIDRepository is an interface for indexing CIDPayloads
type CIDRepository interface {
Index(cidPayload *CIDPayload) error
}
// CIDRepository is the underlying struct for the Repository interface
type CIDRepository struct {
// Repository is the underlying struct for the CIDRepository interface
type Repository struct {
db *postgres.DB
}
// NewCIDRepository creates a new pointer to a CIDRepository which satisfies the Repository interface
func NewCIDRepository(db *postgres.DB) *CIDRepository {
return &CIDRepository{
// NewCIDRepository creates a new pointer to a Repository which satisfies the CIDRepository interface
func NewCIDRepository(db *postgres.DB) *Repository {
return &Repository{
db: db,
}
}
// IndexCIDs indexes a cidPayload in Postgres
func (repo *CIDRepository) Index(cidPayload *CIDPayload) error {
// Index indexes a cidPayload in Postgres
func (repo *Repository) Index(cidPayload *CIDPayload) error {
tx, _ := repo.db.Beginx()
headerID, err := repo.indexHeaderCID(tx, cidPayload.HeaderCID, cidPayload.BlockNumber, cidPayload.BlockHash)
if err != nil {
@ -60,7 +61,7 @@ func (repo *CIDRepository) Index(cidPayload *CIDPayload) error {
return tx.Commit()
}
func (repo *CIDRepository) indexHeaderCID(tx *sqlx.Tx, cid, blockNumber, hash string) (int64, error) {
func (repo *Repository) indexHeaderCID(tx *sqlx.Tx, cid, blockNumber, hash string) (int64, error) {
var headerID int64
err := tx.QueryRowx(`INSERT INTO public.header_cids (block_number, block_hash, cid) VALUES ($1, $2, $3)
ON CONFLICT DO UPDATE SET cid = $3
@ -69,19 +70,19 @@ func (repo *CIDRepository) indexHeaderCID(tx *sqlx.Tx, cid, blockNumber, hash st
return headerID, err
}
func (repo *CIDRepository) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
for hash, trxCid := range payload.TransactionCIDs {
func (repo *Repository) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
for hash, trxCidMeta := range payload.TransactionCIDs {
var txID int64
err := tx.QueryRowx(`INSERT INTO public.transaction_cids (header_id, tx_hash, cid) VALUES ($1, $2, $3)
ON CONFLICT DO UPDATE SET cid = $3
RETURNING id`,
headerID, hash.Hex(), trxCid).Scan(&txID)
err := tx.QueryRowx(`INSERT INTO public.transaction_cids (header_id, tx_hash, cid, dst, src) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT DO UPDATE SET (cid, dst, src) = ($3, $4, $5)
RETURNING id`,
headerID, hash.Hex(), trxCidMeta.CID, trxCidMeta.To, trxCidMeta.From).Scan(&txID)
if err != nil {
return err
}
receiptCid, ok := payload.ReceiptCIDs[hash]
receiptCidMeta, ok := payload.ReceiptCIDs[hash]
if ok {
err = repo.indexReceiptCID(tx, receiptCid, txID)
err = repo.indexReceiptCID(tx, receiptCidMeta, txID)
if err != nil {
return err
}
@ -90,18 +91,18 @@ func (repo *CIDRepository) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *
return nil
}
func (repo *CIDRepository) indexReceiptCID(tx *sqlx.Tx, cid string, txId int64) error {
_, err := tx.Exec(`INSERT INTO public.receipt_cids (tx_id, cid) VALUES ($1, $2)
ON CONFLICT DO UPDATE SET cid = $2`, txId, cid)
func (repo *Repository) indexReceiptCID(tx *sqlx.Tx, cidMeta *ReceiptMetaData, txID int64) error {
_, err := tx.Exec(`INSERT INTO public.receipt_cids (tx_id, cid, topic0s) VALUES ($1, $2, $3)
ON CONFLICT DO UPDATE SET (cid, topic0s) = ($2, $3)`, txID, cidMeta.CID, pq.Array(cidMeta.Topic0s))
return err
}
func (repo *CIDRepository) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
func (repo *Repository) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
for accountKey, stateCID := range payload.StateLeafCIDs {
var stateID int64
err := tx.QueryRowx(`INSERT INTO public.state_cids (header_id, account_key, cid) VALUES ($1, $2, $3)
ON CONFLICT DO UPDATE SET cid = $3
RETURNING id`,
err := tx.QueryRowx(`INSERT INTO public.state_cids (header_id, account_key, cid) VALUES ($1, $2, $3)
ON CONFLICT DO UPDATE SET cid = $3
RETURNING id`,
headerID, accountKey.Hex(), stateCID).Scan(&stateID)
if err != nil {
return err
@ -116,8 +117,8 @@ func (repo *CIDRepository) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *CIDPay
return nil
}
func (repo *CIDRepository) indexStorageCID(tx *sqlx.Tx, key, cid string, stateId int64) error {
func (repo *Repository) indexStorageCID(tx *sqlx.Tx, key, cid string, stateID int64) error {
_, err := repo.db.Exec(`INSERT INTO public.storage_cids (state_id, storage_key, cid) VALUES ($1, $2, $3)
ON CONFLICT DO UPDATE SET cid = $3`, stateId, key, cid)
ON CONFLICT DO UPDATE SET cid = $3`, stateID, key, cid)
return err
}

View File

@ -15,3 +15,26 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/test_helpers/mocks"
)
var _ = Describe("Repository", func() {
Describe("Index", func() {
It("Indexes CIDs against their metadata", func() {
mockRepo := mocks.CIDRepository{}
err := mockRepo.Index(&test_helpers.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
Expect(mockRepo.PassedCIDPayload).To(Equal(&test_helpers.MockCIDPayload))
})
It("Fails if", func() {
})
})
})

View File

@ -23,25 +23,25 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/core"
)
// Streamer is the interface for streaming a statediff subscription
type Streamer interface {
// StateDiffStreamer is the interface for streaming a statediff subscription
type StateDiffStreamer interface {
Stream(payloadChan chan statediff.Payload) (*rpc.ClientSubscription, error)
}
// StateDiffStreamer is the underlying struct for the Streamer interface
type StateDiffStreamer struct {
// Streamer is the underlying struct for the StateDiffStreamer interface
type Streamer struct {
Client core.RpcClient
PayloadChan chan statediff.Payload
}
// NewStateDiffStreamer creates a pointer to a new StateDiffStreamer which satisfies the Streamer interface
func NewStateDiffSyncer(client core.RpcClient) *StateDiffStreamer {
return &StateDiffStreamer{
// NewStateDiffStreamer creates a pointer to a new Streamer which satisfies the StateDiffStreamer interface
func NewStateDiffStreamer(client core.RpcClient) *Streamer {
return &Streamer{
Client: client,
}
}
// Stream is the main loop for subscribing to data from the Geth state diff process
func (i *StateDiffStreamer) Stream(payloadChan chan statediff.Payload) (*rpc.ClientSubscription, error) {
return i.Client.Subscribe("statediff", i.PayloadChan)
func (sds *Streamer) Stream(payloadChan chan statediff.Payload) (*rpc.ClientSubscription, error) {
return sds.Client.Subscribe("statediff", sds.PayloadChan)
}

View File

@ -15,3 +15,41 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs_test
import (
"sync"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/test_helpers"
"github.com/vulcanize/vulcanizedb/pkg/ipfs/test_helpers/mocks"
)
var _ = Describe("Streamer", func() {
Describe("Stream", func() {
It("Streams StatediffPayloads from a Geth RPC subscription", func() {
wg := new(sync.WaitGroup)
mockStreamer := mocks.StateDiffStreamer{}
mockStreamer.ReturnSub = &rpc.ClientSubscription{}
mockStreamer.WaitGroup = wg
mockStreamer.StreamPayloads = []statediff.Payload{
test_helpers.MockStatediffPayload,
}
payloadChan := make(chan statediff.Payload, 1)
sub, err := mockStreamer.Stream(payloadChan)
wg.Wait()
Expect(err).ToNot(HaveOccurred())
Expect(sub).To(Equal(&rpc.ClientSubscription{}))
Expect(mockStreamer.PassedPayloadChan).To(Equal(payloadChan))
streamedPayload := <-payloadChan
Expect(streamedPayload).To(Equal(test_helpers.MockStatediffPayload))
})
It("Fails if", func() {
})
})
})

View File

@ -1,17 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package test_helpers

View File

@ -15,3 +15,22 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mocks
import (
"github.com/ethereum/go-ethereum/statediff"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
// PayloadConverter is the underlying struct for the Converter interface
type PayloadConverter struct {
PassedStatediffPayload statediff.Payload
ReturnIPLDPayload *ipfs.IPLDPayload
ReturnErr error
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
func (pc *PayloadConverter) Convert(payload statediff.Payload) (*ipfs.IPLDPayload, error) {
pc.PassedStatediffPayload = payload
return pc.ReturnIPLDPayload, pc.ReturnErr
}

View File

@ -15,3 +15,20 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mocks
import (
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
// IPLDPublisher is the underlying struct for the Publisher interface
type IPLDPublisher struct {
PassedIPLDPayload *ipfs.IPLDPayload
ReturnCIDPayload *ipfs.CIDPayload
ReturnErr error
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
func (pub *IPLDPublisher) Publish(payload *ipfs.IPLDPayload) (*ipfs.CIDPayload, error) {
pub.PassedIPLDPayload = payload
return pub.ReturnCIDPayload, pub.ReturnErr
}

View File

@ -15,3 +15,17 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mocks
import "github.com/vulcanize/vulcanizedb/pkg/ipfs"
// CIDRepository is the underlying struct for the Repository interface
type CIDRepository struct {
PassedCIDPayload *ipfs.CIDPayload
ReturnErr error
}
// Index indexes a cidPayload in Postgres
func (repo *CIDRepository) Index(cidPayload *ipfs.CIDPayload) error {
repo.PassedCIDPayload = cidPayload
return repo.ReturnErr
}

View File

@ -15,3 +15,34 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mocks
import (
"sync"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
)
// StateDiffStreamer is the underlying struct for the Streamer interface
type StateDiffStreamer struct {
PassedPayloadChan chan statediff.Payload
ReturnSub *rpc.ClientSubscription
ReturnErr error
StreamPayloads []statediff.Payload
WaitGroup *sync.WaitGroup
}
// Stream is the main loop for subscribing to data from the Geth state diff process
func (sds *StateDiffStreamer) Stream(payloadChan chan statediff.Payload) (*rpc.ClientSubscription, error) {
sds.PassedPayloadChan = payloadChan
go func() {
sds.WaitGroup.Add(1)
for _, payload := range sds.StreamPayloads {
sds.PassedPayloadChan <- payload
}
sds.WaitGroup.Done()
}()
return sds.ReturnSub, sds.ReturnErr
}

View File

@ -0,0 +1,179 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package test_helpers
import (
"errors"
"math/big"
"math/rand"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff"
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
)
// AddressToLeafKey hashes an returns an address
func AddressToLeafKey(address common.Address) common.Hash {
return common.BytesToHash(crypto.Keccak256(address[:]))
}
// Test variables
var (
BlockNumber = rand.Int63()
BlockHash = "0xfa40fbe2d98d98b3363a778d52f2bcd29d6790b9b3f3cab2b167fd12d3550f73"
CodeHash = common.Hex2Bytes("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
NewNonceValue = rand.Uint64()
NewBalanceValue = rand.Int63()
ContractRoot = common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
StoragePath = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes()
StorageKey = common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001").Bytes()
StorageValue = common.Hex2Bytes("0x03")
storage = []statediff.StorageDiff{{
Key: StorageKey,
Value: StorageValue,
Path: StoragePath,
Proof: [][]byte{},
}}
emptyStorage = make([]statediff.StorageDiff, 0)
address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
ContractLeafKey = AddressToLeafKey(address)
anotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
AnotherContractLeafKey = AddressToLeafKey(anotherAddress)
testAccount = state.Account{
Nonce: NewNonceValue,
Balance: big.NewInt(NewBalanceValue),
Root: ContractRoot,
CodeHash: CodeHash,
}
valueBytes, _ = rlp.EncodeToBytes(testAccount)
CreatedAccountDiffs = statediff.AccountDiffsMap{
ContractLeafKey: {
Key: ContractLeafKey.Bytes(),
Value: valueBytes,
Storage: storage,
},
AnotherContractLeafKey: {
Key: AnotherContractLeafKey.Bytes(),
Value: valueBytes,
Storage: emptyStorage,
},
}
UpdatedAccountDiffs = statediff.AccountDiffsMap{ContractLeafKey: {
Key: ContractLeafKey.Bytes(),
Value: valueBytes,
Storage: storage,
}}
DeletedAccountDiffs = statediff.AccountDiffsMap{ContractLeafKey: {
Key: ContractLeafKey.Bytes(),
Value: valueBytes,
Storage: storage,
}}
MockStateDiff = statediff.StateDiff{
BlockNumber: BlockNumber,
BlockHash: common.HexToHash(BlockHash),
CreatedAccounts: CreatedAccountDiffs,
DeletedAccounts: DeletedAccountDiffs,
UpdatedAccounts: UpdatedAccountDiffs,
}
MockStateDiffRlp, _ = rlp.EncodeToBytes(MockStateDiff)
mockTransaction1 = types.NewTransaction(0, common.HexToAddress("0x0"), big.NewInt(1000), 50, big.NewInt(100), nil)
mockTransaction2 = types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(2000), 100, big.NewInt(200), nil)
MockTransactions = types.Transactions{mockTransaction1, mockTransaction2}
mockReceipt1 = types.NewReceipt(common.HexToHash("0x0").Bytes(), false, 50)
mockReceipt2 = types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100)
MockReceipts = types.Receipts{mockReceipt1, mockReceipt2}
MockHeader = types.Header{
Time: 0,
Number: big.NewInt(1),
Root: common.HexToHash("0x0"),
TxHash: common.HexToHash("0x0"),
ReceiptHash: common.HexToHash("0x0"),
}
MockBlock = types.NewBlock(&MockHeader, MockTransactions, nil, MockReceipts)
MockBlockRlp, _ = rlp.EncodeToBytes(MockBlock)
MockStatediffPayload = statediff.Payload{
BlockRlp: MockBlockRlp,
StateDiffRlp: MockStateDiffRlp,
Err: nil,
}
EmptyStatediffPayload = statediff.Payload{
BlockRlp: []byte{},
StateDiffRlp: []byte{},
Err: nil,
}
ErrStatediffPayload = statediff.Payload{
BlockRlp: []byte{},
StateDiffRlp: []byte{},
Err: errors.New("mock error"),
}
MockIPLDPayload = ipfs.IPLDPayload{}
MockCIDPayload = ipfs.CIDPayload{
BlockNumber: "1",
BlockHash: "0x0",
HeaderCID: "mockHeaderCID",
TransactionCIDs: map[common.Hash]*ipfs.TrxMetaData{
common.HexToHash("0x0"): {
CID: "mockTrxCID1",
To: "mockTo1",
From: "mockFrom1",
},
common.HexToHash("0x1"): {
CID: "mockTrxCID2",
To: "mockTo2",
From: "mockFrom2",
},
},
ReceiptCIDs: map[common.Hash]*ipfs.ReceiptMetaData{
common.HexToHash("0x0"): {
CID: "mockReceiptCID1",
Topic0s: []string{"mockTopic1"},
},
common.HexToHash("0x1"): {
CID: "mockReceiptCID2",
Topic0s: []string{"mockTopic1", "mockTopic2"},
},
},
StateLeafCIDs: map[common.Hash]string{
common.HexToHash("0x0"): "mockStateCID1",
common.HexToHash("0x1"): "mockStateCID2",
},
StorageLeafCIDs: map[common.Hash]map[common.Hash]string{
common.HexToHash("0x0"): {
common.HexToHash("0x0"): "mockStorageCID1",
},
common.HexToHash("0x1"): {
common.HexToHash("0x1"): "mockStorageCID2",
},
},
}
)

61
pkg/ipfs/types.go Normal file
View File

@ -0,0 +1,61 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"math/big"
)
// IPLDPayload is a custom type which packages ETH data for the IPFS publisher
type IPLDPayload struct {
HeaderRLP []byte
BlockNumber *big.Int
BlockHash common.Hash
BlockBody *types.Body
TrxMetaData []*TrxMetaData
Receipts types.Receipts
ReceiptMetaData []*ReceiptMetaData
StateLeafs map[common.Hash][]byte
StorageLeafs map[common.Hash]map[common.Hash][]byte
}
// CIDPayload is a struct to hold all the CIDs and their meta data
type CIDPayload struct {
BlockNumber string
BlockHash string
HeaderCID string
UncleCIDS map[common.Hash]string
TransactionCIDs map[common.Hash]*TrxMetaData
ReceiptCIDs map[common.Hash]*ReceiptMetaData
StateLeafCIDs map[common.Hash]string
StorageLeafCIDs map[common.Hash]map[common.Hash]string
}
// ReceiptMetaData wraps some additional data around our receipt CIDs for indexing
type ReceiptMetaData struct {
CID string
Topic0s []string
}
// TrxMetaData wraps some additional data around our transaction CID for indexing
type TrxMetaData struct {
CID string
To string
From string
}

View File

@ -0,0 +1,18 @@
# Lines starting with '#' are comments.
# Each line is a file pattern followed by one or more owners.
accounts/usbwallet @karalabe
accounts/scwallet @gballet
accounts/abi @gballet
consensus @karalabe
core/ @karalabe @holiman
eth/ @karalabe
graphql/ @gballet
les/ @zsfelfoldi
light/ @zsfelfoldi
mobile/ @karalabe
p2p/ @fjl @zsfelfoldi
p2p/simulations @zelig @nonsense @janos @justelad
p2p/protocols @zelig @nonsense @janos @justelad
p2p/testing @zelig @nonsense @janos @justelad
whisper/ @gballet @gluk256

View File

@ -0,0 +1,40 @@
# Contributing
Thank you for considering to help out with the source code! We welcome
contributions from anyone on the internet, and are grateful for even the
smallest of fixes!
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a
pull request for the maintainers to review and merge into the main code base. If
you wish to submit more complex changes though, please check up with the core
devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) to
ensure those changes are in line with the general philosophy of the project
and/or get some early feedback which can make both your efforts much lighter as
well as our review and merge procedures quick and simple.
## Coding guidelines
Please make sure your contributions adhere to our coding guidelines:
* Code must adhere to the official Go
[formatting](https://golang.org/doc/effective_go.html#formatting) guidelines
(i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
* Code must be documented adhering to the official Go
[commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
* Pull requests need to be based on and opened against the `master` branch.
* Commit messages should be prefixed with the package(s) they modify.
* E.g. "eth, rpc: make trace configs optional"
## Can I have feature X
Before you submit a feature request, please check and make sure that it isn't
possible through some other means. The JavaScript-enabled console is a powerful
feature in the right hands. Please check our
[Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
and help.
## Configuration, dependencies, and tests
Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
for more details on configuring your environment, managing project dependencies
and testing procedures.

View File

@ -0,0 +1,26 @@
Hi there,
please note that this is an issue tracker reserved for bug reports and feature requests.
For general questions please use the gitter channel or the Ethereum stack exchange at https://ethereum.stackexchange.com.
#### System information
Geth version: `geth version`
OS & Version: Windows/Linux/OSX
Commit hash : (if `develop`)
#### Expected behaviour
#### Actual behaviour
#### Steps to reproduce the behaviour
#### Backtrace
````
[backtrace]
````

View File

@ -0,0 +1,11 @@
# Number of days of inactivity before an Issue is closed for lack of response
daysUntilClose: 30
# Label requiring a response
responseRequiredLabel: "need:more-information"
# Comment to post when closing an Issue for lack of response. Set to `false` to disable
closeComment: >
This issue has been automatically closed because there has been no response
to our request for more information from the original author. With only the
information that is currently in the issue, we don't have enough information
to take action. Please reach out if you have more relevant information or
answers to our questions so that we can investigate further.

View File

@ -0,0 +1,17 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 366
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 42
# Issues with these labels will never be considered stale
exemptLabels:
- pinned
- security
# Label to use when marking an issue as stale
staleLabel: "status:inactive"
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: false

View File

@ -0,0 +1,505 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package backends
import (
"context"
"errors"
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)
// This nil assignment ensures compile time that SimulatedBackend implements bind.ContractBackend.
var _ bind.ContractBackend = (*SimulatedBackend)(nil)
var errBlockNumberUnsupported = errors.New("SimulatedBackend cannot access blocks other than the latest block")
var errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction")
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
// the background. Its main purpose is to allow easily testing contract bindings.
type SimulatedBackend struct {
database ethdb.Database // In memory database to store our testing data
blockchain *core.BlockChain // Ethereum blockchain to handle the consensus
mu sync.Mutex
pendingBlock *types.Block // Currently pending block that will be imported on request
pendingState *state.StateDB // Currently pending state that will be the active on on request
events *filters.EventSystem // Event system for filtering log events live
config *params.ChainConfig
}
// NewSimulatedBackend creates a new binding backend using a simulated blockchain
// for testing purposes.
func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
database := rawdb.NewMemoryDatabase()
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
genesis.MustCommit(database)
blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil)
backend := &SimulatedBackend{
database: database,
blockchain: blockchain,
config: genesis.Config,
events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{database, blockchain}, false),
}
backend.rollback()
return backend
}
// Commit imports all the pending transactions as a single block and starts a
// fresh new state.
func (b *SimulatedBackend) Commit() {
b.mu.Lock()
defer b.mu.Unlock()
if _, err := b.blockchain.InsertChain([]*types.Block{b.pendingBlock}); err != nil {
panic(err) // This cannot happen unless the simulator is wrong, fail in that case
}
b.rollback()
}
// Rollback aborts all pending transactions, reverting to the last committed state.
func (b *SimulatedBackend) Rollback() {
b.mu.Lock()
defer b.mu.Unlock()
b.rollback()
}
func (b *SimulatedBackend) rollback() {
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {})
statedb, _ := b.blockchain.State()
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
}
// CodeAt returns the code associated with a certain account in the blockchain.
func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
b.mu.Lock()
defer b.mu.Unlock()
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
return nil, errBlockNumberUnsupported
}
statedb, _ := b.blockchain.State()
return statedb.GetCode(contract), nil
}
// BalanceAt returns the wei balance of a certain account in the blockchain.
func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Address, blockNumber *big.Int) (*big.Int, error) {
b.mu.Lock()
defer b.mu.Unlock()
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
return nil, errBlockNumberUnsupported
}
statedb, _ := b.blockchain.State()
return statedb.GetBalance(contract), nil
}
// NonceAt returns the nonce of a certain account in the blockchain.
func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address, blockNumber *big.Int) (uint64, error) {
b.mu.Lock()
defer b.mu.Unlock()
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
return 0, errBlockNumberUnsupported
}
statedb, _ := b.blockchain.State()
return statedb.GetNonce(contract), nil
}
// StorageAt returns the value of key in the storage of an account in the blockchain.
func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Address, key common.Hash, blockNumber *big.Int) ([]byte, error) {
b.mu.Lock()
defer b.mu.Unlock()
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
return nil, errBlockNumberUnsupported
}
statedb, _ := b.blockchain.State()
val := statedb.GetState(contract, key)
return val[:], nil
}
// TransactionReceipt returns the receipt of a transaction.
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash)
return receipt, nil
}
// TransactionByHash checks the pool of pending transactions in addition to the
// blockchain. The isPending return value indicates whether the transaction has been
// mined yet. Note that the transaction may not be part of the canonical chain even if
// it's not pending.
func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, bool, error) {
b.mu.Lock()
defer b.mu.Unlock()
tx := b.pendingBlock.Transaction(txHash)
if tx != nil {
return tx, true, nil
}
tx, _, _, _ = rawdb.ReadTransaction(b.database, txHash)
if tx != nil {
return tx, false, nil
}
return nil, false, ethereum.NotFound
}
// PendingCodeAt returns the code associated with an account in the pending state.
func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) {
b.mu.Lock()
defer b.mu.Unlock()
return b.pendingState.GetCode(contract), nil
}
// CallContract executes a contract call.
func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
b.mu.Lock()
defer b.mu.Unlock()
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
return nil, errBlockNumberUnsupported
}
state, err := b.blockchain.State()
if err != nil {
return nil, err
}
rval, _, _, err := b.callContract(ctx, call, b.blockchain.CurrentBlock(), state)
return rval, err
}
// PendingCallContract executes a contract call on the pending state.
func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) {
b.mu.Lock()
defer b.mu.Unlock()
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
rval, _, _, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
return rval, err
}
// PendingNonceAt implements PendingStateReader.PendingNonceAt, retrieving
// the nonce currently pending for the account.
func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) {
b.mu.Lock()
defer b.mu.Unlock()
return b.pendingState.GetOrNewStateObject(account).Nonce(), nil
}
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
// chain doesn't have miners, we just return a gas price of 1 for any call.
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
return big.NewInt(1), nil
}
// EstimateGas executes the requested code against the currently pending block/state and
// returns the used amount of gas.
func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) {
b.mu.Lock()
defer b.mu.Unlock()
// Determine the lowest and highest possible gas limits to binary search in between
var (
lo uint64 = params.TxGas - 1
hi uint64
cap uint64
)
if call.Gas >= params.TxGas {
hi = call.Gas
} else {
hi = b.pendingBlock.GasLimit()
}
cap = hi
// Create a helper to check if a gas allowance results in an executable transaction
executable := func(gas uint64) bool {
call.Gas = gas
snapshot := b.pendingState.Snapshot()
_, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
b.pendingState.RevertToSnapshot(snapshot)
if err != nil || failed {
return false
}
return true
}
// Execute the binary search and hone in on an executable gas limit
for lo+1 < hi {
mid := (hi + lo) / 2
if !executable(mid) {
lo = mid
} else {
hi = mid
}
}
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
if !executable(hi) {
return 0, errGasEstimationFailed
}
}
return hi, nil
}
// callContract implements common code between normal and pending contract calls.
// state is modified during execution, make sure to copy it if necessary.
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb *state.StateDB) ([]byte, uint64, bool, error) {
// Ensure message is initialized properly.
if call.GasPrice == nil {
call.GasPrice = big.NewInt(1)
}
if call.Gas == 0 {
call.Gas = 50000000
}
if call.Value == nil {
call.Value = new(big.Int)
}
// Set infinite balance to the fake caller account.
from := statedb.GetOrNewStateObject(call.From)
from.SetBalance(math.MaxBig256)
// Execute the call.
msg := callmsg{call}
evmContext := core.NewEVMContext(msg, block.Header(), b.blockchain, nil)
// Create a new environment which holds all relevant information
// about the transaction and calling mechanisms.
vmenv := vm.NewEVM(evmContext, statedb, b.config, vm.Config{})
gaspool := new(core.GasPool).AddGas(math.MaxUint64)
return core.NewStateTransition(vmenv, msg, gaspool).TransitionDb()
}
// SendTransaction updates the pending block to include the given transaction.
// It panics if the transaction is invalid.
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
b.mu.Lock()
defer b.mu.Unlock()
sender, err := types.Sender(types.HomesteadSigner{}, tx)
if err != nil {
panic(fmt.Errorf("invalid transaction: %v", err))
}
nonce := b.pendingState.GetNonce(sender)
if tx.Nonce() != nonce {
panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce))
}
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
for _, tx := range b.pendingBlock.Transactions() {
block.AddTxWithChain(b.blockchain, tx)
}
block.AddTxWithChain(b.blockchain, tx)
})
statedb, _ := b.blockchain.State()
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
return nil
}
// FilterLogs executes a log filter operation, blocking during execution and
// returning all the results in one batch.
//
// TODO(karalabe): Deprecate when the subscription one can return past data too.
func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
var filter *filters.Filter
if query.BlockHash != nil {
// Block filter requested, construct a single-shot filter
filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics)
} else {
// Initialize unset filter boundaried to run from genesis to chain head
from := int64(0)
if query.FromBlock != nil {
from = query.FromBlock.Int64()
}
to := int64(-1)
if query.ToBlock != nil {
to = query.ToBlock.Int64()
}
// Construct the range filter
filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
}
// Run the filter and return all the logs
logs, err := filter.Logs(ctx)
if err != nil {
return nil, err
}
res := make([]types.Log, len(logs))
for i, log := range logs {
res[i] = *log
}
return res, nil
}
// SubscribeFilterLogs creates a background log filtering operation, returning a
// subscription immediately, which can be used to stream the found events.
func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {
// Subscribe to contract events
sink := make(chan []*types.Log)
sub, err := b.events.SubscribeLogs(query, sink)
if err != nil {
return nil, err
}
// Since we're getting logs in batches, we need to flatten them into a plain stream
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case logs := <-sink:
for _, log := range logs {
select {
case ch <- *log:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// AdjustTime adds a time shift to the simulated clock.
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
b.mu.Lock()
defer b.mu.Unlock()
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
for _, tx := range b.pendingBlock.Transactions() {
block.AddTx(tx)
}
block.OffsetTime(int64(adjustment.Seconds()))
})
statedb, _ := b.blockchain.State()
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
return nil
}
// callmsg implements core.Message to allow passing it as a transaction simulator.
type callmsg struct {
ethereum.CallMsg
}
func (m callmsg) From() common.Address { return m.CallMsg.From }
func (m callmsg) Nonce() uint64 { return 0 }
func (m callmsg) CheckNonce() bool { return false }
func (m callmsg) To() *common.Address { return m.CallMsg.To }
func (m callmsg) GasPrice() *big.Int { return m.CallMsg.GasPrice }
func (m callmsg) Gas() uint64 { return m.CallMsg.Gas }
func (m callmsg) Value() *big.Int { return m.CallMsg.Value }
func (m callmsg) Data() []byte { return m.CallMsg.Data }
// filterBackend implements filters.Backend to support filtering for logs without
// taking bloom-bits acceleration structures into account.
type filterBackend struct {
db ethdb.Database
bc *core.BlockChain
}
func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }
func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") }
func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) {
if block == rpc.LatestBlockNumber {
return fb.bc.CurrentHeader(), nil
}
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
}
func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
return fb.bc.GetHeaderByHash(hash), nil
}
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
number := rawdb.ReadHeaderNumber(fb.db, hash)
if number == nil {
return nil, nil
}
return rawdb.ReadReceipts(fb.db, hash, *number), nil
}
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
number := rawdb.ReadHeaderNumber(fb.db, hash)
if number == nil {
return nil, nil
}
receipts := rawdb.ReadReceipts(fb.db, hash, *number)
if receipts == nil {
return nil, nil
}
logs := make([][]*types.Log, len(receipts))
for i, receipt := range receipts {
logs[i] = receipt.Logs
}
return logs, nil
}
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return event.NewSubscription(func(quit <-chan struct{}) error {
<-quit
return nil
})
}
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
return fb.bc.SubscribeChainEvent(ch)
}
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
return fb.bc.SubscribeRemovedLogsEvent(ch)
}
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return fb.bc.SubscribeLogsEvent(ch)
}
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
panic("not supported")
}

View File

@ -0,0 +1 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -0,0 +1 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -0,0 +1 @@
{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3}

View File

@ -0,0 +1 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -0,0 +1,21 @@
This directory contains accounts for testing.
The passphrase that unlocks them is "foobar".
The "good" key files which are supposed to be loadable are:
- File: UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
Address: 0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8
- File: aaa
Address: 0xf466859ead1932d743d622cb74fc058882e8648a
- File: zzz
Address: 0x289d485d9771714cce91d3393d764e1311907acc
The other files (including this README) are broken in various ways
and should not be picked up by package accounts:
- File: no-address (missing address field, otherwise same as "aaa")
- File: garbage (file with random data)
- File: empty (file with no content)
- File: swapfile~ (should be skipped)
- File: .hiddenfile (should be skipped)
- File: foo/... (should be skipped because it is a directory)

View File

@ -0,0 +1 @@
{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3}

View File

@ -0,0 +1 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -0,0 +1 @@
{"address":"fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e","crypto":{"cipher":"aes-128-ctr","ciphertext":"8124d5134aa4a927c79fd852989e4b5419397566f04b0936a1eb1d168c7c68a5","cipherparams":{"iv":"e2febe17176414dd2cda28287947eb2f"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":4096,"p":6,"r":8,"salt":"44b415ede89f3bdd6830390a21b78965f571b347a589d1d943029f016c5e8bd5"},"mac":"5e149ff25bfd9dd45746a84bb2bcd2f015f2cbca2b6d25c5de8c29617f71fe5b"},"id":"d6ac5452-2b2c-4d3c-ad80-4bf0327d971c","version":3}

Binary file not shown.

View File

@ -0,0 +1 @@
{"crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -0,0 +1 @@
{"address":"0000000000000000000000000000000000000000","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -0,0 +1 @@
{"address":"289d485d9771714cce91d3393d764e1311907acc","crypto":{"cipher":"aes-128-ctr","ciphertext":"faf32ca89d286b107f5e6d842802e05263c49b78d46eac74e6109e9a963378ab","cipherparams":{"iv":"558833eec4a665a8c55608d7d503407d"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"d571fff447ffb24314f9513f5160246f09997b857ac71348b73e785aab40dc04"},"mac":"21edb85ff7d0dab1767b9bf498f2c3cb7be7609490756bd32300bb213b59effe"},"id":"3279afcf-55ba-43ff-8997-02dcc46a6525","version":3}

View File

@ -0,0 +1 @@
{"address":"cb61d5a9c4896fb9658090b597ef0e7be6f7b67e","Crypto":{"cipher":"aes-128-cbc","ciphertext":"6143d3192db8b66eabd693d9c4e414dcfaee52abda451af79ccf474dafb35f1bfc7ea013aa9d2ee35969a1a2e8d752d0","cipherparams":{"iv":"35337770fc2117994ecdcad026bccff4"},"kdf":"scrypt","kdfparams":{"n":262144,"r":8,"p":1,"dklen":32,"salt":"9afcddebca541253a2f4053391c673ff9fe23097cd8555d149d929e4ccf1257f"},"mac":"3f3d5af884b17a100b0b3232c0636c230a54dc2ac8d986227219b0dd89197644","version":"1"},"id":"e25f7c1f-d318-4f29-b62c-687190d4d299","version":"1"}

View File

@ -0,0 +1,28 @@
{
"test1": {
"json": {
"Crypto": {
"cipher": "aes-128-cbc",
"cipherparams": {
"iv": "35337770fc2117994ecdcad026bccff4"
},
"ciphertext": "6143d3192db8b66eabd693d9c4e414dcfaee52abda451af79ccf474dafb35f1bfc7ea013aa9d2ee35969a1a2e8d752d0",
"kdf": "scrypt",
"kdfparams": {
"dklen": 32,
"n": 262144,
"p": 1,
"r": 8,
"salt": "9afcddebca541253a2f4053391c673ff9fe23097cd8555d149d929e4ccf1257f"
},
"mac": "3f3d5af884b17a100b0b3232c0636c230a54dc2ac8d986227219b0dd89197644",
"version": "1"
},
"address": "cb61d5a9c4896fb9658090b597ef0e7be6f7b67e",
"id": "e25f7c1f-d318-4f29-b62c-687190d4d299",
"version": "1"
},
"password": "g",
"priv": "d1b1178d3529626a1a93e073f65028370d14c7eb0936eb42abef05db6f37ad7d"
}
}

View File

@ -0,0 +1,97 @@
{
"wikipage_test_vector_scrypt": {
"json": {
"crypto" : {
"cipher" : "aes-128-ctr",
"cipherparams" : {
"iv" : "83dbcc02d8ccb40e466191a123791e0e"
},
"ciphertext" : "d172bf743a674da9cdad04534d56926ef8358534d458fffccd4e6ad2fbde479c",
"kdf" : "scrypt",
"kdfparams" : {
"dklen" : 32,
"n" : 262144,
"r" : 1,
"p" : 8,
"salt" : "ab0c7876052600dd703518d6fc3fe8984592145b591fc8fb5c6d43190334ba19"
},
"mac" : "2103ac29920d71da29f15d75b4a16dbe95cfd7ff8faea1056c33131d846e3097"
},
"id" : "3198bc9c-6672-5ab3-d995-4942343ae5b6",
"version" : 3
},
"password": "testpassword",
"priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d"
},
"wikipage_test_vector_pbkdf2": {
"json": {
"crypto" : {
"cipher" : "aes-128-ctr",
"cipherparams" : {
"iv" : "6087dab2f9fdbbfaddc31a909735c1e6"
},
"ciphertext" : "5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46",
"kdf" : "pbkdf2",
"kdfparams" : {
"c" : 262144,
"dklen" : 32,
"prf" : "hmac-sha256",
"salt" : "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd"
},
"mac" : "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e9b2"
},
"id" : "3198bc9c-6672-5ab3-d995-4942343ae5b6",
"version" : 3
},
"password": "testpassword",
"priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d"
},
"31_byte_key": {
"json": {
"crypto" : {
"cipher" : "aes-128-ctr",
"cipherparams" : {
"iv" : "e0c41130a323adc1446fc82f724bca2f"
},
"ciphertext" : "9517cd5bdbe69076f9bf5057248c6c050141e970efa36ce53692d5d59a3984",
"kdf" : "scrypt",
"kdfparams" : {
"dklen" : 32,
"n" : 2,
"r" : 8,
"p" : 1,
"salt" : "711f816911c92d649fb4c84b047915679933555030b3552c1212609b38208c63"
},
"mac" : "d5e116151c6aa71470e67a7d42c9620c75c4d23229847dcc127794f0732b0db5"
},
"id" : "fecfc4ce-e956-48fd-953b-30f8b52ed66c",
"version" : 3
},
"password": "foo",
"priv": "fa7b3db73dc7dfdf8c5fbdb796d741e4488628c41fc4febd9160a866ba0f35"
},
"30_byte_key": {
"json": {
"crypto" : {
"cipher" : "aes-128-ctr",
"cipherparams" : {
"iv" : "3ca92af36ad7c2cd92454c59cea5ef00"
},
"ciphertext" : "108b7d34f3442fc26ab1ab90ca91476ba6bfa8c00975a49ef9051dc675aa",
"kdf" : "scrypt",
"kdfparams" : {
"dklen" : 32,
"n" : 2,
"r" : 8,
"p" : 1,
"salt" : "d0769e608fb86cda848065642a9c6fa046845c928175662b8e356c77f914cd3b"
},
"mac" : "75d0e6759f7b3cefa319c3be41680ab6beea7d8328653474bd06706d4cc67420"
},
"id" : "a37e1559-5955-450d-8075-7b8931b392b2",
"version" : 3
},
"password": "foo",
"priv": "81c29e8142bb6a81bef5a92bda7a8328a5c85bb2f9542e76f9b0f94fc018"
}
}

View File

@ -0,0 +1 @@
{"address":"45dea0fb0bba44f4fcf290bba71fd57d7117cbb8","crypto":{"cipher":"aes-128-ctr","ciphertext":"b87781948a1befd247bff51ef4063f716cf6c2d3481163e9a8f42e1f9bb74145","cipherparams":{"iv":"dc4926b48a105133d2f16b96833abf1e"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":2,"p":1,"r":8,"salt":"004244bbdc51cadda545b1cfa43cff9ed2ae88e08c61f1479dbb45410722f8f0"},"mac":"39990c1684557447940d4c69e06b1b82b2aceacb43f284df65c956daf3046b85"},"id":"ce541d8d-c79b-40f8-9f8c-20f59616faba","version":3}

View File

@ -0,0 +1,51 @@
# Debian Packaging
Tagged releases and develop branch commits are available as installable Debian packages
for Ubuntu. Packages are built for the all Ubuntu versions which are supported by
Canonical.
Packages of develop branch commits have suffix -unstable and cannot be installed alongside
the stable version. Switching between release streams requires user intervention.
## Launchpad
The packages are built and served by launchpad.net. We generate a Debian source package
for each distribution and upload it. Their builder picks up the source package, builds it
and installs the new version into the PPA repository. Launchpad requires a valid signature
by a team member for source package uploads.
The signing key is stored in an environment variable which Travis CI makes available to
certain builds. Since Travis CI doesn't support FTP, SFTP is used to transfer the
packages. To set this up yourself, you need to create a Launchpad user and add a GPG key
and SSH key to it. Then encode both keys as base64 and configure 'secret' environment
variables `PPA_SIGNING_KEY` and `PPA_SSH_KEY` on Travis.
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
version that is available in the main Ubuntu repository. In order to make this possible,
our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
golang-1.11, which is co-installable alongside the regular golang package. PPA dependencies
can be edited at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
## Building Packages Locally (for testing)
You need to run Ubuntu to do test packaging.
Add the gophers PPA and install Go 1.11 and Debian packaging tools:
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
$ sudo apt-get update
$ sudo apt-get install build-essential golang-1.11 devscripts debhelper python-bzrlib python-paramiko
Create the source packages:
$ go run build/ci.go debsrc -workdir dist
Then go into the source package directory for your running distribution and build the package:
$ cd dist/ethereum-unstable-1.6.0+xenial
$ dpkg-buildpackage
Built packages are placed in the dist/ directory.
$ cd ..
$ dpkg-deb -c geth-unstable_1.6.0+xenial_amd64.deb

1107
vendor/github.com/ethereum/go-ethereum/build/ci.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,19 @@
#!/bin/sh
# Cleaning the Go cache only makes sense if we actually have Go installed... or
# if Go is actually callable. This does not hold true during deb packaging, so
# we need an explicit check to avoid build failures.
if ! command -v go > /dev/null; then
exit
fi
version_gt() {
test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"
}
golang_version=$(go version |cut -d' ' -f3 |sed 's/go//')
# Clean go build cache when go version is greater than or equal to 1.10
if !(version_gt 1.10 $golang_version); then
go clean -cache
fi

View File

@ -0,0 +1,5 @@
{{.Name}} ({{.VersionString}}) {{.Distro}}; urgency=low
* git build of {{.Env.Commit}}
-- {{.Author}} {{.Time}}

View File

@ -0,0 +1,19 @@
Source: {{.Name}}
Section: science
Priority: extra
Maintainer: {{.Author}}
Build-Depends: debhelper (>= 8.0.0), golang-1.11
Standards-Version: 3.9.5
Homepage: https://ethereum.org
Vcs-Git: git://github.com/ethereum/go-ethereum.git
Vcs-Browser: https://github.com/ethereum/go-ethereum
{{range .Executables}}
Package: {{$.ExeName .}}
Conflicts: {{$.ExeConflicts .}}
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends}
Built-Using: ${misc:Built-Using}
Description: {{.Description}}
{{.Description}}
{{end}}

View File

@ -0,0 +1,14 @@
Copyright 2018 The go-ethereum Authors
go-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
go-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1 @@
AUTHORS

View File

@ -0,0 +1 @@
build/bin/{{.BinaryName}} usr/bin

View File

@ -0,0 +1,16 @@
#!/usr/bin/make -f
# -*- makefile -*-
# Uncomment this to turn on verbose mode.
#export DH_VERBOSE=1
# Launchpad rejects Go's access to $HOME/.cache, use custom folder
export GOCACHE=/tmp/go-build
override_dh_auto_build:
build/env.sh /usr/lib/go-1.11/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
override_dh_auto_test:
%:
dh $@

View File

@ -0,0 +1,5 @@
{{.Name}} ({{.VersionString}}) {{.Distro}}; urgency=low
* git build of {{.Env.Commit}}
-- {{.Author}} {{.Time}}

View File

@ -0,0 +1,25 @@
Source: {{.Name}}
Section: science
Priority: extra
Maintainer: {{.Author}}
Build-Depends: debhelper (>= 8.0.0), golang-1.11
Standards-Version: 3.9.5
Homepage: https://ethereum.org
Vcs-Git: git://github.com/ethereum/go-ethereum.git
Vcs-Browser: https://github.com/ethereum/go-ethereum
Package: {{.Name}}
Architecture: any
Depends: ${misc:Depends}, {{.ExeList}}
Description: Meta-package to install geth, swarm, and other tools
Meta-package to install geth, swarm and other tools
{{range .Executables}}
Package: {{$.ExeName .}}
Conflicts: {{$.ExeConflicts .}}
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends}
Built-Using: ${misc:Built-Using}
Description: {{.Description}}
{{.Description}}
{{end}}

View File

@ -0,0 +1,14 @@
Copyright 2018 The go-ethereum Authors
go-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
go-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1 @@
AUTHORS

View File

@ -0,0 +1 @@
build/bin/{{.BinaryName}} usr/bin

View File

@ -0,0 +1,16 @@
#!/usr/bin/make -f
# -*- makefile -*-
# Uncomment this to turn on verbose mode.
#export DH_VERBOSE=1
# Launchpad rejects Go's access to $HOME/.cache, use custom folder
export GOCACHE=/tmp/go-build
override_dh_auto_build:
build/env.sh /usr/lib/go-1.11/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
override_dh_auto_test:
%:
dh $@

30
vendor/github.com/ethereum/go-ethereum/build/env.sh generated vendored Executable file
View File

@ -0,0 +1,30 @@
#!/bin/sh
set -e
if [ ! -f "build/env.sh" ]; then
echo "$0 must be run from the root of the repository."
exit 2
fi
# Create fake Go workspace if it doesn't exist yet.
workspace="$PWD/build/_workspace"
root="$PWD"
ethdir="$workspace/src/github.com/ethereum"
if [ ! -L "$ethdir/go-ethereum" ]; then
mkdir -p "$ethdir"
cd "$ethdir"
ln -s ../../../../../. go-ethereum
cd "$root"
fi
# Set up the environment to use the workspace.
GOPATH="$workspace"
export GOPATH
# Run the command inside the workspace.
cd "$ethdir/go-ethereum"
PWD="$ethdir/go-ethereum"
# Launch the arguments with the configured environment.
exec "$@"

18
vendor/github.com/ethereum/go-ethereum/build/goimports.sh generated vendored Executable file
View File

@ -0,0 +1,18 @@
#!/bin/sh
find_files() {
find . ! \( \
\( \
-path '.github' \
-o -path './build/_workspace' \
-o -path './build/bin' \
-o -path './crypto/bn256' \
-o -path '*/vendor/*' \
\) -prune \
\) -name '*.go'
}
GOFMT="gofmt -s -w"
GOIMPORTS="goimports -w"
find_files | xargs $GOFMT
find_files | xargs $GOIMPORTS

57
vendor/github.com/ethereum/go-ethereum/build/mvn.pom generated vendored Normal file
View File

@ -0,0 +1,57 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.ethereum</groupId>
<artifactId>geth</artifactId>
<version>{{.Version}}</version>
<packaging>aar</packaging>
<name>Android Ethereum Client</name>
<description>Android port of the go-ethereum libraries and node</description>
<url>https://github.com/ethereum/go-ethereum</url>
<inceptionYear>2015</inceptionYear>
<licenses>
<license>
<name>GNU Lesser General Public License, Version 3.0</name>
<url>https://www.gnu.org/licenses/lgpl-3.0.en.html</url>
<distribution>repo</distribution>
</license>
</licenses>
<organization>
<name>Ethereum</name>
<url>https://ethereum.org</url>
</organization>
<developers>
<developer>
<id>karalabe</id>
<name>Péter Szilágyi</name>
<email>peterke@gmail.com</email>
<url>https://github.com/karalabe</url>
<properties>
<picUrl>https://www.gravatar.com/avatar/2ecbf0f5b4b79eebf8c193e5d324357f?s=256</picUrl>
</properties>
</developer>
</developers>
<contributors>{{range .Contributors}}
<contributor>
<name>{{.Name}}</name>
<email>{{.Email}}</email>
</contributor>{{end}}
</contributors>
<issueManagement>
<system>GitHub Issues</system>
<url>https://github.com/ethereum/go-ethereum/issues/</url>
</issueManagement>
<scm>
<url>https://github.com/ethereum/go-ethereum</url>
</scm>
</project>

View File

@ -0,0 +1,24 @@
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
http://maven.apache.org/xsd/settings-1.0.0.xsd">
<servers>
<server>
<id>ossrh</id>
<username>${env.ANDROID_SONATYPE_USERNAME}</username>
<password>${env.ANDROID_SONATYPE_PASSWORD}</password>
</server>
</servers>
<profiles>
<profile>
<id>ossrh</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<properties>
<gpg.executable>gpg</gpg.executable>
<gpg.passphrase></gpg.passphrase>
</properties>
</profile>
</profiles>
</settings>

View File

@ -0,0 +1,327 @@
/**
* EnvVarUpdate.nsh
* : Environmental Variables: append, prepend, and remove entries
*
* WARNING: If you use StrFunc.nsh header then include it before this file
* with all required definitions. This is to avoid conflicts
*
* Usage:
* ${EnvVarUpdate} "ResultVar" "EnvVarName" "Action" "RegLoc" "PathString"
*
* Credits:
* Version 1.0
* * Cal Turney (turnec2)
* * Amir Szekely (KiCHiK) and e-circ for developing the forerunners of this
* function: AddToPath, un.RemoveFromPath, AddToEnvVar, un.RemoveFromEnvVar,
* WriteEnvStr, and un.DeleteEnvStr
* * Diego Pedroso (deguix) for StrTok
* * Kevin English (kenglish_hi) for StrContains
* * Hendri Adriaens (Smile2Me), Diego Pedroso (deguix), and Dan Fuhry
* (dandaman32) for StrReplace
*
* Version 1.1 (compatibility with StrFunc.nsh)
* * techtonik
*
* http://nsis.sourceforge.net/Environmental_Variables:_append%2C_prepend%2C_and_remove_entries
*
*/
!ifndef ENVVARUPDATE_FUNCTION
!define ENVVARUPDATE_FUNCTION
!verbose push
!verbose 3
!include "LogicLib.nsh"
!include "WinMessages.NSH"
!include "StrFunc.nsh"
; ---- Fix for conflict if StrFunc.nsh is already includes in main file -----------------------
!macro _IncludeStrFunction StrFuncName
!ifndef ${StrFuncName}_INCLUDED
${${StrFuncName}}
!endif
!ifndef Un${StrFuncName}_INCLUDED
${Un${StrFuncName}}
!endif
!define un.${StrFuncName} "${Un${StrFuncName}}"
!macroend
!insertmacro _IncludeStrFunction StrTok
!insertmacro _IncludeStrFunction StrStr
!insertmacro _IncludeStrFunction StrRep
; ---------------------------------- Macro Definitions ----------------------------------------
!macro _EnvVarUpdateConstructor ResultVar EnvVarName Action Regloc PathString
Push "${EnvVarName}"
Push "${Action}"
Push "${RegLoc}"
Push "${PathString}"
Call EnvVarUpdate
Pop "${ResultVar}"
!macroend
!define EnvVarUpdate '!insertmacro "_EnvVarUpdateConstructor"'
!macro _unEnvVarUpdateConstructor ResultVar EnvVarName Action Regloc PathString
Push "${EnvVarName}"
Push "${Action}"
Push "${RegLoc}"
Push "${PathString}"
Call un.EnvVarUpdate
Pop "${ResultVar}"
!macroend
!define un.EnvVarUpdate '!insertmacro "_unEnvVarUpdateConstructor"'
; ---------------------------------- Macro Definitions end-------------------------------------
;----------------------------------- EnvVarUpdate start----------------------------------------
!define hklm_all_users 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"'
!define hkcu_current_user 'HKCU "Environment"'
!macro EnvVarUpdate UN
Function ${UN}EnvVarUpdate
Push $0
Exch 4
Exch $1
Exch 3
Exch $2
Exch 2
Exch $3
Exch
Exch $4
Push $5
Push $6
Push $7
Push $8
Push $9
Push $R0
/* After this point:
-------------------------
$0 = ResultVar (returned)
$1 = EnvVarName (input)
$2 = Action (input)
$3 = RegLoc (input)
$4 = PathString (input)
$5 = Orig EnvVar (read from registry)
$6 = Len of $0 (temp)
$7 = tempstr1 (temp)
$8 = Entry counter (temp)
$9 = tempstr2 (temp)
$R0 = tempChar (temp) */
; Step 1: Read contents of EnvVarName from RegLoc
;
; Check for empty EnvVarName
${If} $1 == ""
SetErrors
DetailPrint "ERROR: EnvVarName is blank"
Goto EnvVarUpdate_Restore_Vars
${EndIf}
; Check for valid Action
${If} $2 != "A"
${AndIf} $2 != "P"
${AndIf} $2 != "R"
SetErrors
DetailPrint "ERROR: Invalid Action - must be A, P, or R"
Goto EnvVarUpdate_Restore_Vars
${EndIf}
${If} $3 == HKLM
ReadRegStr $5 ${hklm_all_users} $1 ; Get EnvVarName from all users into $5
${ElseIf} $3 == HKCU
ReadRegStr $5 ${hkcu_current_user} $1 ; Read EnvVarName from current user into $5
${Else}
SetErrors
DetailPrint 'ERROR: Action is [$3] but must be "HKLM" or HKCU"'
Goto EnvVarUpdate_Restore_Vars
${EndIf}
; Check for empty PathString
${If} $4 == ""
SetErrors
DetailPrint "ERROR: PathString is blank"
Goto EnvVarUpdate_Restore_Vars
${EndIf}
; Make sure we've got some work to do
${If} $5 == ""
${AndIf} $2 == "R"
SetErrors
DetailPrint "$1 is empty - Nothing to remove"
Goto EnvVarUpdate_Restore_Vars
${EndIf}
; Step 2: Scrub EnvVar
;
StrCpy $0 $5 ; Copy the contents to $0
; Remove spaces around semicolons (NOTE: spaces before the 1st entry or
; after the last one are not removed here but instead in Step 3)
${If} $0 != "" ; If EnvVar is not empty ...
${Do}
${${UN}StrStr} $7 $0 " ;"
${If} $7 == ""
${ExitDo}
${EndIf}
${${UN}StrRep} $0 $0 " ;" ";" ; Remove '<space>;'
${Loop}
${Do}
${${UN}StrStr} $7 $0 "; "
${If} $7 == ""
${ExitDo}
${EndIf}
${${UN}StrRep} $0 $0 "; " ";" ; Remove ';<space>'
${Loop}
${Do}
${${UN}StrStr} $7 $0 ";;"
${If} $7 == ""
${ExitDo}
${EndIf}
${${UN}StrRep} $0 $0 ";;" ";"
${Loop}
; Remove a leading or trailing semicolon from EnvVar
StrCpy $7 $0 1 0
${If} $7 == ";"
StrCpy $0 $0 "" 1 ; Change ';<EnvVar>' to '<EnvVar>'
${EndIf}
StrLen $6 $0
IntOp $6 $6 - 1
StrCpy $7 $0 1 $6
${If} $7 == ";"
StrCpy $0 $0 $6 ; Change ';<EnvVar>' to '<EnvVar>'
${EndIf}
; DetailPrint "Scrubbed $1: [$0]" ; Uncomment to debug
${EndIf}
/* Step 3. Remove all instances of the target path/string (even if "A" or "P")
$6 = bool flag (1 = found and removed PathString)
$7 = a string (e.g. path) delimited by semicolon(s)
$8 = entry counter starting at 0
$9 = copy of $0
$R0 = tempChar */
${If} $5 != "" ; If EnvVar is not empty ...
StrCpy $9 $0
StrCpy $0 ""
StrCpy $8 0
StrCpy $6 0
${Do}
${${UN}StrTok} $7 $9 ";" $8 "0" ; $7 = next entry, $8 = entry counter
${If} $7 == "" ; If we've run out of entries,
${ExitDo} ; were done
${EndIf} ;
; Remove leading and trailing spaces from this entry (critical step for Action=Remove)
${Do}
StrCpy $R0 $7 1
${If} $R0 != " "
${ExitDo}
${EndIf}
StrCpy $7 $7 "" 1 ; Remove leading space
${Loop}
${Do}
StrCpy $R0 $7 1 -1
${If} $R0 != " "
${ExitDo}
${EndIf}
StrCpy $7 $7 -1 ; Remove trailing space
${Loop}
${If} $7 == $4 ; If string matches, remove it by not appending it
StrCpy $6 1 ; Set 'found' flag
${ElseIf} $7 != $4 ; If string does NOT match
${AndIf} $0 == "" ; and the 1st string being added to $0,
StrCpy $0 $7 ; copy it to $0 without a prepended semicolon
${ElseIf} $7 != $4 ; If string does NOT match
${AndIf} $0 != "" ; and this is NOT the 1st string to be added to $0,
StrCpy $0 $0;$7 ; append path to $0 with a prepended semicolon
${EndIf} ;
IntOp $8 $8 + 1 ; Bump counter
${Loop} ; Check for duplicates until we run out of paths
${EndIf}
; Step 4: Perform the requested Action
;
${If} $2 != "R" ; If Append or Prepend
${If} $6 == 1 ; And if we found the target
DetailPrint "Target is already present in $1. It will be removed and"
${EndIf}
${If} $0 == "" ; If EnvVar is (now) empty
StrCpy $0 $4 ; just copy PathString to EnvVar
${If} $6 == 0 ; If found flag is either 0
${OrIf} $6 == "" ; or blank (if EnvVarName is empty)
DetailPrint "$1 was empty and has been updated with the target"
${EndIf}
${ElseIf} $2 == "A" ; If Append (and EnvVar is not empty),
StrCpy $0 $0;$4 ; append PathString
${If} $6 == 1
DetailPrint "appended to $1"
${Else}
DetailPrint "Target was appended to $1"
${EndIf}
${Else} ; If Prepend (and EnvVar is not empty),
StrCpy $0 $4;$0 ; prepend PathString
${If} $6 == 1
DetailPrint "prepended to $1"
${Else}
DetailPrint "Target was prepended to $1"
${EndIf}
${EndIf}
${Else} ; If Action = Remove
${If} $6 == 1 ; and we found the target
DetailPrint "Target was found and removed from $1"
${Else}
DetailPrint "Target was NOT found in $1 (nothing to remove)"
${EndIf}
${If} $0 == ""
DetailPrint "$1 is now empty"
${EndIf}
${EndIf}
; Step 5: Update the registry at RegLoc with the updated EnvVar and announce the change
;
ClearErrors
${If} $3 == HKLM
WriteRegExpandStr ${hklm_all_users} $1 $0 ; Write it in all users section
${ElseIf} $3 == HKCU
WriteRegExpandStr ${hkcu_current_user} $1 $0 ; Write it to current user section
${EndIf}
IfErrors 0 +4
MessageBox MB_OK|MB_ICONEXCLAMATION "Could not write updated $1 to $3"
DetailPrint "Could not write updated $1 to $3"
Goto EnvVarUpdate_Restore_Vars
; "Export" our change
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
EnvVarUpdate_Restore_Vars:
;
; Restore the user's variables and return ResultVar
Pop $R0
Pop $9
Pop $8
Pop $7
Pop $6
Pop $5
Pop $4
Pop $3
Pop $2
Pop $1
Push $0 ; Push my $0 (ResultVar)
Exch
Pop $0 ; Restore his $0
FunctionEnd
!macroend ; EnvVarUpdate UN
!insertmacro EnvVarUpdate ""
!insertmacro EnvVarUpdate "un."
;----------------------------------- EnvVarUpdate end----------------------------------------
!verbose pop
!endif

View File

@ -0,0 +1,70 @@
# Builds a Windows installer with NSIS.
# It expects the following command line arguments:
# - OUTPUTFILE, filename of the installer (without extension)
# - MAJORVERSION, major build version
# - MINORVERSION, minor build version
# - BUILDVERSION, build id version
#
# The created installer executes the following steps:
# 1. install geth for all users
# 2. install optional development tools such as abigen
# 3. create an uninstaller
# 4. configures the Windows firewall for geth
# 5. create geth, attach and uninstall start menu entries
# 6. configures the registry that allows Windows to manage the package through its platform tools
# 7. adds the environment system wide variable ETHEREUM_SOCKET
# 8. adds the install directory to %PATH%
#
# Requirements:
# - NSIS, http://nsis.sourceforge.net/Main_Page
# - NSIS Large Strings build, http://nsis.sourceforge.net/Special_Builds
# - SFP, http://nsis.sourceforge.net/NSIS_Simple_Firewall_Plugin (put dll in NSIS\Plugins\x86-ansi)
#
# After intalling NSIS extra the NSIS Large Strings build zip and replace the makensis.exe and the
# files found in Stub.
#
# based on: http://nsis.sourceforge.net/A_simple_installer_with_start_menu_shortcut_and_uninstaller
#
# TODO:
# - sign installer
CRCCheck on
!define GROUPNAME "Ethereum"
!define APPNAME "Geth"
!define DESCRIPTION "Official Go implementation of the Ethereum protocol"
!addplugindir .\
# Require admin rights on NT6+ (When UAC is turned on)
RequestExecutionLevel admin
# Use LZMA compression
SetCompressor /SOLID lzma
!include LogicLib.nsh
!include PathUpdate.nsh
!include EnvVarUpdate.nsh
!macro VerifyUserIsAdmin
UserInfo::GetAccountType
pop $0
${If} $0 != "admin" # Require admin rights on NT4+
messageBox mb_iconstop "Administrator rights required!"
setErrorLevel 740 # ERROR_ELEVATION_REQUIRED
quit
${EndIf}
!macroend
function .onInit
# make vars are global for all users since geth is installed global
setShellVarContext all
!insertmacro VerifyUserIsAdmin
${If} ${ARCH} == "amd64"
StrCpy $InstDir "$PROGRAMFILES64\${APPNAME}"
${Else}
StrCpy $InstDir "$PROGRAMFILES32\${APPNAME}"
${Endif}
functionEnd
!include install.nsh
!include uninstall.nsh

View File

@ -0,0 +1,103 @@
Name "geth ${MAJORVERSION}.${MINORVERSION}.${BUILDVERSION}" # VERSION variables set through command line arguments
InstallDir "$InstDir"
OutFile "${OUTPUTFILE}" # set through command line arguments
# Links for "Add/Remove Programs"
!define HELPURL "https://github.com/ethereum/go-ethereum/issues"
!define UPDATEURL "https://github.com/ethereum/go-ethereum/releases"
!define ABOUTURL "https://github.com/ethereum/go-ethereum#ethereum-go"
!define /date NOW "%Y%m%d"
PageEx license
LicenseData {{.License}}
PageExEnd
# Install geth binary
Section "Geth" GETH_IDX
SetOutPath $INSTDIR
file {{.Geth}}
# Create start menu launcher
createDirectory "$SMPROGRAMS\${APPNAME}"
createShortCut "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk" "$INSTDIR\geth.exe" "--fast" "--cache=512"
createShortCut "$SMPROGRAMS\${APPNAME}\Attach.lnk" "$INSTDIR\geth.exe" "attach" "" ""
createShortCut "$SMPROGRAMS\${APPNAME}\Uninstall.lnk" "$INSTDIR\uninstall.exe" "" "" ""
# Firewall - remove rules (if exists)
SimpleFC::AdvRemoveRule "Geth incoming peers (TCP:30303)"
SimpleFC::AdvRemoveRule "Geth outgoing peers (TCP:30303)"
SimpleFC::AdvRemoveRule "Geth UDP discovery (UDP:30303)"
# Firewall - add rules
SimpleFC::AdvAddRule "Geth incoming peers (TCP:30303)" "" 6 1 1 2147483647 1 "$INSTDIR\geth.exe" "" "" "Ethereum" 30303 "" "" ""
SimpleFC::AdvAddRule "Geth outgoing peers (TCP:30303)" "" 6 2 1 2147483647 1 "$INSTDIR\geth.exe" "" "" "Ethereum" "" 30303 "" ""
SimpleFC::AdvAddRule "Geth UDP discovery (UDP:30303)" "" 17 2 1 2147483647 1 "$INSTDIR\geth.exe" "" "" "Ethereum" "" 30303 "" ""
# Set default IPC endpoint (https://github.com/ethereum/EIPs/issues/147)
${EnvVarUpdate} $0 "ETHEREUM_SOCKET" "R" "HKLM" "\\.\pipe\geth.ipc"
${EnvVarUpdate} $0 "ETHEREUM_SOCKET" "A" "HKLM" "\\.\pipe\geth.ipc"
# Add instdir to PATH
Push "$INSTDIR"
Call AddToPath
SectionEnd
# Install optional develop tools.
Section /o "Development tools" DEV_TOOLS_IDX
SetOutPath $INSTDIR
{{range .DevTools}}file {{.}}
{{end}}
SectionEnd
# Return on top of stack the total size (as DWORD) of the selected/installed sections.
Var GetInstalledSize.total
Function GetInstalledSize
StrCpy $GetInstalledSize.total 0
${if} ${SectionIsSelected} ${GETH_IDX}
SectionGetSize ${GETH_IDX} $0
IntOp $GetInstalledSize.total $GetInstalledSize.total + $0
${endif}
${if} ${SectionIsSelected} ${DEV_TOOLS_IDX}
SectionGetSize ${DEV_TOOLS_IDX} $0
IntOp $GetInstalledSize.total $GetInstalledSize.total + $0
${endif}
IntFmt $GetInstalledSize.total "0x%08X" $GetInstalledSize.total
Push $GetInstalledSize.total
FunctionEnd
# Write registry, Windows uses these values in various tools such as add/remove program.
# PowerShell: Get-ItemProperty HKLM:\Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall\* | Select-Object DisplayName, InstallLocation, InstallDate | Format-Table AutoSize
function .onInstSuccess
# Save information in registry in HKEY_LOCAL_MACHINE branch, Windows add/remove functionality depends on this
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "DisplayName" "${GROUPNAME} - ${APPNAME} - ${DESCRIPTION}"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "UninstallString" "$\"$INSTDIR\uninstall.exe$\""
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "QuietUninstallString" "$\"$INSTDIR\uninstall.exe$\" /S"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "InstallLocation" "$INSTDIR"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "InstallDate" "${NOW}"
# Wait for Alex
#WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "DisplayIcon" "$\"$INSTDIR\logo.ico$\""
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "Publisher" "${GROUPNAME}"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "HelpLink" "${HELPURL}"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "URLUpdateInfo" "${UPDATEURL}"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "URLInfoAbout" "${ABOUTURL}"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "DisplayVersion" "${MAJORVERSION}.${MINORVERSION}.${BUILDVERSION}"
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "VersionMajor" ${MAJORVERSION}
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "VersionMinor" ${MINORVERSION}
# There is no option for modifying or repairing the install
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "NoModify" 1
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "NoRepair" 1
Call GetInstalledSize
Pop $0
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}" "EstimatedSize" "$0"
# Create uninstaller
writeUninstaller "$INSTDIR\uninstall.exe"
functionEnd
Page components
Page directory
Page instfiles

View File

@ -0,0 +1,153 @@
!include "WinMessages.nsh"
; see https://support.microsoft.com/en-us/kb/104011
!define Environ 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"'
; HKEY_LOCAL_MACHINE = 0x80000002
; AddToPath - Appends dir to PATH
; (does not work on Win9x/ME)
;
; Usage:
; Push "dir"
; Call AddToPath
Function AddToPath
Exch $0
Push $1
Push $2
Push $3
Push $4
; NSIS ReadRegStr returns empty string on string overflow
; Native calls are used here to check actual length of PATH
; $4 = RegOpenKey(HKEY_LOCAL_MACHINE, "SYSTEM\CurrentControlSet\Control\Session Manager\Environment", &$3)
System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4"
IntCmp $4 0 0 done done
; $4 = RegQueryValueEx($3, "PATH", (DWORD*)0, (DWORD*)0, &$1, ($2=NSIS_MAX_STRLEN, &$2))
; RegCloseKey($3)
System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4"
System::Call "advapi32::RegCloseKey(i $3)"
IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA
DetailPrint "AddToPath: original length $2 > ${NSIS_MAX_STRLEN}"
MessageBox MB_OK "PATH not updated, original length $2 > ${NSIS_MAX_STRLEN}"
Goto done
IntCmp $4 0 +5 ; $4 != NO_ERROR
IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND
DetailPrint "AddToPath: unexpected error code $4"
Goto done
StrCpy $1 ""
; Check if already in PATH
Push "$1;"
Push "$0;"
Call StrStr
Pop $2
StrCmp $2 "" 0 done
Push "$1;"
Push "$0\;"
Call StrStr
Pop $2
StrCmp $2 "" 0 done
; Prevent NSIS string overflow
StrLen $2 $0
StrLen $3 $1
IntOp $2 $2 + $3
IntOp $2 $2 + 2 ; $2 = strlen(dir) + strlen(PATH) + sizeof(";")
IntCmp $2 ${NSIS_MAX_STRLEN} +4 +4 0
DetailPrint "AddToPath: new length $2 > ${NSIS_MAX_STRLEN}"
MessageBox MB_OK "PATH not updated, new length $2 > ${NSIS_MAX_STRLEN}."
Goto done
; Append dir to PATH
DetailPrint "Add to PATH: $0"
StrCpy $2 $1 1 -1
StrCmp $2 ";" 0 +2
StrCpy $1 $1 -1 ; remove trailing ';'
StrCmp $1 "" +2 ; no leading ';'
StrCpy $0 "$1;$0"
WriteRegExpandStr ${Environ} "PATH" $0
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
done:
Pop $4
Pop $3
Pop $2
Pop $1
Pop $0
FunctionEnd
; RemoveFromPath - Removes dir from PATH
;
; Usage:
; Push "dir"
; Call RemoveFromPath
Function un.RemoveFromPath
Exch $0
Push $1
Push $2
Push $3
Push $4
Push $5
Push $6
; NSIS ReadRegStr returns empty string on string overflow
; Native calls are used here to check actual length of PATH
; $4 = RegOpenKey(HKEY_LOCAL_MACHINE, "SYSTEM\CurrentControlSet\Control\Session Manager\Environment", &$3)
System::Call "advapi32::RegOpenKey(i 0x80000002, t'SYSTEM\CurrentControlSet\Control\Session Manager\Environment', *i.r3) i.r4"
IntCmp $4 0 0 done done
; $4 = RegQueryValueEx($3, "PATH", (DWORD*)0, (DWORD*)0, &$1, ($2=NSIS_MAX_STRLEN, &$2))
; RegCloseKey($3)
System::Call "advapi32::RegQueryValueEx(i $3, t'PATH', i 0, i 0, t.r1, *i ${NSIS_MAX_STRLEN} r2) i.r4"
System::Call "advapi32::RegCloseKey(i $3)"
IntCmp $4 234 0 +4 +4 ; $4 == ERROR_MORE_DATA
DetailPrint "RemoveFromPath: original length $2 > ${NSIS_MAX_STRLEN}"
MessageBox MB_OK "PATH not updated, original length $2 > ${NSIS_MAX_STRLEN}"
Goto done
IntCmp $4 0 +5 ; $4 != NO_ERROR
IntCmp $4 2 +3 ; $4 != ERROR_FILE_NOT_FOUND
DetailPrint "RemoveFromPath: unexpected error code $4"
Goto done
StrCpy $1 ""
; length < ${NSIS_MAX_STRLEN} -> ReadRegStr can be used
ReadRegStr $1 ${Environ} "PATH"
StrCpy $5 $1 1 -1
StrCmp $5 ";" +2
StrCpy $1 "$1;" ; ensure trailing ';'
Push $1
Push "$0;"
Call un.StrStr
Pop $2 ; pos of our dir
StrCmp $2 "" done
DetailPrint "Remove from PATH: $0"
StrLen $3 "$0;"
StrLen $4 $2
StrCpy $5 $1 -$4 ; $5 is now the part before the path to remove
StrCpy $6 $2 "" $3 ; $6 is now the part after the path to remove
StrCpy $3 "$5$6"
StrCpy $5 $3 1 -1
StrCmp $5 ";" 0 +2
StrCpy $3 $3 -1 ; remove trailing ';'
WriteRegExpandStr ${Environ} "PATH" $3
SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
done:
Pop $6
Pop $5
Pop $4
Pop $3
Pop $2
Pop $1
Pop $0
FunctionEnd

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,33 @@
Section "Uninstall"
# uninstall for all users
setShellVarContext all
# Delete (optionally) installed files
{{range $}}Delete $INSTDIR\{{.}}
{{end}}
Delete $INSTDIR\uninstall.exe
# Delete install directory
rmDir $INSTDIR
# Delete start menu launcher
Delete "$SMPROGRAMS\${APPNAME}\${APPNAME}.lnk"
Delete "$SMPROGRAMS\${APPNAME}\Attach.lnk"
Delete "$SMPROGRAMS\${APPNAME}\Uninstall.lnk"
rmDir "$SMPROGRAMS\${APPNAME}"
# Firewall - remove rules if exists
SimpleFC::AdvRemoveRule "Geth incoming peers (TCP:30303)"
SimpleFC::AdvRemoveRule "Geth outgoing peers (TCP:30303)"
SimpleFC::AdvRemoveRule "Geth UDP discovery (UDP:30303)"
# Remove IPC endpoint (https://github.com/ethereum/EIPs/issues/147)
${un.EnvVarUpdate} $0 "ETHEREUM_SOCKET" "R" "HKLM" "\\.\pipe\geth.ipc"
# Remove install directory from PATH
Push "$INSTDIR"
Call un.RemoveFromPath
# Cleanup registry (deletes all sub keys)
DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\${GROUPNAME} ${APPNAME}"
SectionEnd

View File

@ -0,0 +1,22 @@
Pod::Spec.new do |spec|
spec.name = 'Geth'
spec.version = '{{.Version}}'
spec.license = { :type => 'GNU Lesser General Public License, Version 3.0' }
spec.homepage = 'https://github.com/ethereum/go-ethereum'
spec.authors = { {{range .Contributors}}
'{{.Name}}' => '{{.Email}}',{{end}}
}
spec.summary = 'iOS Ethereum Client'
spec.source = { :git => 'https://github.com/ethereum/go-ethereum.git', :commit => '{{.Commit}}' }
spec.platform = :ios
spec.ios.deployment_target = '9.0'
spec.ios.vendored_frameworks = 'Frameworks/Geth.framework'
spec.prepare_command = <<-CMD
curl https://gethstore.blob.core.windows.net/builds/{{.Archive}}.tar.gz | tar -xvz
mkdir Frameworks
mv {{.Archive}}/Geth.framework Frameworks
rm -rf {{.Archive}}
CMD
end

View File

@ -0,0 +1,46 @@
#!/usr/bin/env bash
# travis_keepalive runs the given command and preserves its return value,
# while it forks a child process what periodically produces a log line,
# so that Travis won't abort the build after 10 minutes.
# Why?
# `t.Log()` in Go holds the buffer until the test does not pass or fail,
# and `-race` can increase the execution time by 2-20x.
set -euo pipefail
readonly KEEPALIVE_INTERVAL=300 # seconds => 5m
main() {
keepalive
$@
}
# Keepalive produces a log line in each KEEPALIVE_INTERVAL.
keepalive() {
local child_pid
# Note: We fork here!
repeat "keepalive" &
child_pid=$!
ensureChildOnEXIT "${child_pid}"
}
repeat() {
local this="$1"
while true; do
echo "${this}"
sleep "${KEEPALIVE_INTERVAL}"
done
}
# Ensures that the child gets killed on normal program exit.
ensureChildOnEXIT() {
# Note: SIGINT and SIGTERM are forwarded to the child process by Bash
# automatically, so we don't have to deal with signals.
local child_pid="$1"
trap "kill ${child_pid}" EXIT
}
main "$@"

View File

@ -0,0 +1,411 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build none
/*
This command generates GPL license headers on top of all source files.
You can run it once per month, before cutting a release or just
whenever you feel like it.
go run update-license.go
All authors (people who have contributed code) are listed in the
AUTHORS file. The author names are mapped and deduplicated using the
.mailmap file. You can use .mailmap to set the canonical name and
address for each author. See git-shortlog(1) for an explanation of the
.mailmap format.
Please review the resulting diff to check whether the correct
copyright assignments are performed.
*/
package main
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"text/template"
"time"
)
var (
// only files with these extensions will be considered
extensions = []string{".go", ".js", ".qml"}
// paths with any of these prefixes will be skipped
skipPrefixes = []string{
// boring stuff
"vendor/", "tests/testdata/", "build/",
// don't relicense vendored sources
"cmd/internal/browser",
"consensus/ethash/xor.go",
"crypto/bn256/",
"crypto/ecies/",
"crypto/secp256k1/curve.go",
"crypto/sha3/",
"internal/jsre/deps",
"log/",
"common/bitutil/bitutil",
// don't license generated files
"contracts/chequebook/contract/code.go",
}
// paths with this prefix are licensed as GPL. all other files are LGPL.
gplPrefixes = []string{"cmd/"}
// this regexp must match the entire license comment at the
// beginning of each file.
licenseCommentRE = regexp.MustCompile(`^//\s*(Copyright|This file is part of).*?\n(?://.*?\n)*\n*`)
// this text appears at the start of AUTHORS
authorsFileHeader = "# This is the official list of go-ethereum authors for copyright purposes.\n\n"
)
// this template generates the license comment.
// its input is an info structure.
var licenseT = template.Must(template.New("").Parse(`
// Copyright {{.Year}} The go-ethereum Authors
// This file is part of {{.Whole false}}.
//
// {{.Whole true}} is free software: you can redistribute it and/or modify
// it under the terms of the GNU {{.License}} as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// {{.Whole true}} is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU {{.License}} for more details.
//
// You should have received a copy of the GNU {{.License}}
// along with {{.Whole false}}. If not, see <http://www.gnu.org/licenses/>.
`[1:]))
type info struct {
file string
Year int64
}
func (i info) License() string {
if i.gpl() {
return "General Public License"
}
return "Lesser General Public License"
}
func (i info) ShortLicense() string {
if i.gpl() {
return "GPL"
}
return "LGPL"
}
func (i info) Whole(startOfSentence bool) string {
if i.gpl() {
return "go-ethereum"
}
if startOfSentence {
return "The go-ethereum library"
}
return "the go-ethereum library"
}
func (i info) gpl() bool {
for _, p := range gplPrefixes {
if strings.HasPrefix(i.file, p) {
return true
}
}
return false
}
func main() {
var (
files = getFiles()
filec = make(chan string)
infoc = make(chan *info, 20)
wg sync.WaitGroup
)
writeAuthors(files)
go func() {
for _, f := range files {
filec <- f
}
close(filec)
}()
for i := runtime.NumCPU(); i >= 0; i-- {
// getting file info is slow and needs to be parallel.
// it traverses git history for each file.
wg.Add(1)
go getInfo(filec, infoc, &wg)
}
go func() {
wg.Wait()
close(infoc)
}()
writeLicenses(infoc)
}
func skipFile(path string) bool {
if strings.Contains(path, "/testdata/") {
return true
}
for _, p := range skipPrefixes {
if strings.HasPrefix(path, p) {
return true
}
}
return false
}
func getFiles() []string {
cmd := exec.Command("git", "ls-tree", "-r", "--name-only", "HEAD")
var files []string
err := doLines(cmd, func(line string) {
if skipFile(line) {
return
}
ext := filepath.Ext(line)
for _, wantExt := range extensions {
if ext == wantExt {
goto keep
}
}
return
keep:
files = append(files, line)
})
if err != nil {
log.Fatal("error getting files:", err)
}
return files
}
var authorRegexp = regexp.MustCompile(`\s*[0-9]+\s*(.*)`)
func gitAuthors(files []string) []string {
cmds := []string{"shortlog", "-s", "-n", "-e", "HEAD", "--"}
cmds = append(cmds, files...)
cmd := exec.Command("git", cmds...)
var authors []string
err := doLines(cmd, func(line string) {
m := authorRegexp.FindStringSubmatch(line)
if len(m) > 1 {
authors = append(authors, m[1])
}
})
if err != nil {
log.Fatalln("error getting authors:", err)
}
return authors
}
func readAuthors() []string {
content, err := ioutil.ReadFile("AUTHORS")
if err != nil && !os.IsNotExist(err) {
log.Fatalln("error reading AUTHORS:", err)
}
var authors []string
for _, a := range bytes.Split(content, []byte("\n")) {
if len(a) > 0 && a[0] != '#' {
authors = append(authors, string(a))
}
}
// Retranslate existing authors through .mailmap.
// This should catch email address changes.
authors = mailmapLookup(authors)
return authors
}
func mailmapLookup(authors []string) []string {
if len(authors) == 0 {
return nil
}
cmds := []string{"check-mailmap", "--"}
cmds = append(cmds, authors...)
cmd := exec.Command("git", cmds...)
var translated []string
err := doLines(cmd, func(line string) {
translated = append(translated, line)
})
if err != nil {
log.Fatalln("error translating authors:", err)
}
return translated
}
func writeAuthors(files []string) {
merge := make(map[string]bool)
// Add authors that Git reports as contributorxs.
// This is the primary source of author information.
for _, a := range gitAuthors(files) {
merge[a] = true
}
// Add existing authors from the file. This should ensure that we
// never lose authors, even if Git stops listing them. We can also
// add authors manually this way.
for _, a := range readAuthors() {
merge[a] = true
}
// Write sorted list of authors back to the file.
var result []string
for a := range merge {
result = append(result, a)
}
sort.Strings(result)
content := new(bytes.Buffer)
content.WriteString(authorsFileHeader)
for _, a := range result {
content.WriteString(a)
content.WriteString("\n")
}
fmt.Println("writing AUTHORS")
if err := ioutil.WriteFile("AUTHORS", content.Bytes(), 0644); err != nil {
log.Fatalln(err)
}
}
func getInfo(files <-chan string, out chan<- *info, wg *sync.WaitGroup) {
for file := range files {
stat, err := os.Lstat(file)
if err != nil {
fmt.Printf("ERROR %s: %v\n", file, err)
continue
}
if !stat.Mode().IsRegular() {
continue
}
if isGenerated(file) {
continue
}
info, err := fileInfo(file)
if err != nil {
fmt.Printf("ERROR %s: %v\n", file, err)
continue
}
out <- info
}
wg.Done()
}
func isGenerated(file string) bool {
fd, err := os.Open(file)
if err != nil {
return false
}
defer fd.Close()
buf := make([]byte, 2048)
n, _ := fd.Read(buf)
buf = buf[:n]
for _, l := range bytes.Split(buf, []byte("\n")) {
if bytes.HasPrefix(l, []byte("// Code generated")) {
return true
}
}
return false
}
// fileInfo finds the lowest year in which the given file was committed.
func fileInfo(file string) (*info, error) {
info := &info{file: file, Year: int64(time.Now().Year())}
cmd := exec.Command("git", "log", "--follow", "--find-renames=80", "--find-copies=80", "--pretty=format:%ai", "--", file)
err := doLines(cmd, func(line string) {
y, err := strconv.ParseInt(line[:4], 10, 64)
if err != nil {
fmt.Printf("cannot parse year: %q", line[:4])
}
if y < info.Year {
info.Year = y
}
})
return info, err
}
func writeLicenses(infos <-chan *info) {
for i := range infos {
writeLicense(i)
}
}
func writeLicense(info *info) {
fi, err := os.Stat(info.file)
if os.IsNotExist(err) {
fmt.Println("skipping (does not exist)", info.file)
return
}
if err != nil {
log.Fatalf("error stat'ing %s: %v\n", info.file, err)
}
content, err := ioutil.ReadFile(info.file)
if err != nil {
log.Fatalf("error reading %s: %v\n", info.file, err)
}
// Construct new file content.
buf := new(bytes.Buffer)
licenseT.Execute(buf, info)
if m := licenseCommentRE.FindIndex(content); m != nil && m[0] == 0 {
buf.Write(content[:m[0]])
buf.Write(content[m[1]:])
} else {
buf.Write(content)
}
// Write it to the file.
if bytes.Equal(content, buf.Bytes()) {
fmt.Println("skipping (no changes)", info.file)
return
}
fmt.Println("writing", info.ShortLicense(), info.file)
if err := ioutil.WriteFile(info.file, buf.Bytes(), fi.Mode()); err != nil {
log.Fatalf("error writing %s: %v", info.file, err)
}
}
func doLines(cmd *exec.Cmd, f func(string)) error {
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
if err := cmd.Start(); err != nil {
return err
}
s := bufio.NewScanner(stdout)
for s.Scan() {
f(s.Text())
}
if s.Err() != nil {
return s.Err()
}
if err := cmd.Wait(); err != nil {
return fmt.Errorf("%v (for %s)", err, strings.Join(cmd.Args, " "))
}
return nil
}

View File

@ -0,0 +1,183 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common/compiler"
)
var (
abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind, - for STDIN")
binFlag = flag.String("bin", "", "Path to the Ethereum contract bytecode (generate deploy method)")
typFlag = flag.String("type", "", "Struct name for the binding (default = package name)")
solFlag = flag.String("sol", "", "Path to the Ethereum contract Solidity source to build and bind")
solcFlag = flag.String("solc", "solc", "Solidity compiler to use if source builds are requested")
excFlag = flag.String("exc", "", "Comma separated types to exclude from binding")
vyFlag = flag.String("vy", "", "Path to the Ethereum contract Vyper source to build and bind")
vyperFlag = flag.String("vyper", "vyper", "Vyper compiler to use if source builds are requested")
pkgFlag = flag.String("pkg", "", "Package name to generate the binding into")
outFlag = flag.String("out", "", "Output file for the generated binding (default = stdout)")
langFlag = flag.String("lang", "go", "Destination language for the bindings (go, java, objc)")
)
func main() {
// Parse and ensure all needed inputs are specified
flag.Parse()
if *abiFlag == "" && *solFlag == "" && *vyFlag == "" {
fmt.Printf("No contract ABI (--abi), Solidity source (--sol), or Vyper source (--vy) specified\n")
os.Exit(-1)
} else if (*abiFlag != "" || *binFlag != "" || *typFlag != "") && (*solFlag != "" || *vyFlag != "") {
fmt.Printf("Contract ABI (--abi), bytecode (--bin) and type (--type) flags are mutually exclusive with the Solidity (--sol) and Vyper (--vy) flags\n")
os.Exit(-1)
} else if *solFlag != "" && *vyFlag != "" {
fmt.Printf("Solidity (--sol) and Vyper (--vy) flags are mutually exclusive\n")
os.Exit(-1)
}
if *pkgFlag == "" {
fmt.Printf("No destination package specified (--pkg)\n")
os.Exit(-1)
}
var lang bind.Lang
switch *langFlag {
case "go":
lang = bind.LangGo
case "java":
lang = bind.LangJava
case "objc":
lang = bind.LangObjC
default:
fmt.Printf("Unsupported destination language \"%s\" (--lang)\n", *langFlag)
os.Exit(-1)
}
// If the entire solidity code was specified, build and bind based on that
var (
abis []string
bins []string
types []string
)
if *solFlag != "" || *vyFlag != "" || (*abiFlag == "-" && *pkgFlag == "") {
// Generate the list of types to exclude from binding
exclude := make(map[string]bool)
for _, kind := range strings.Split(*excFlag, ",") {
exclude[strings.ToLower(kind)] = true
}
var contracts map[string]*compiler.Contract
var err error
switch {
case *solFlag != "":
contracts, err = compiler.CompileSolidity(*solcFlag, *solFlag)
if err != nil {
fmt.Printf("Failed to build Solidity contract: %v\n", err)
os.Exit(-1)
}
case *vyFlag != "":
contracts, err = compiler.CompileVyper(*vyperFlag, *vyFlag)
if err != nil {
fmt.Printf("Failed to build Vyper contract: %v\n", err)
os.Exit(-1)
}
default:
contracts, err = contractsFromStdin()
if err != nil {
fmt.Printf("Failed to read input ABIs from STDIN: %v\n", err)
os.Exit(-1)
}
}
// Gather all non-excluded contract for binding
for name, contract := range contracts {
if exclude[strings.ToLower(name)] {
continue
}
abi, err := json.Marshal(contract.Info.AbiDefinition) // Flatten the compiler parse
if err != nil {
fmt.Printf("Failed to parse ABIs from compiler output: %v\n", err)
os.Exit(-1)
}
abis = append(abis, string(abi))
bins = append(bins, contract.Code)
nameParts := strings.Split(name, ":")
types = append(types, nameParts[len(nameParts)-1])
}
} else {
// Otherwise load up the ABI, optional bytecode and type name from the parameters
var abi []byte
var err error
if *abiFlag == "-" {
abi, err = ioutil.ReadAll(os.Stdin)
} else {
abi, err = ioutil.ReadFile(*abiFlag)
}
if err != nil {
fmt.Printf("Failed to read input ABI: %v\n", err)
os.Exit(-1)
}
abis = append(abis, string(abi))
var bin []byte
if *binFlag != "" {
if bin, err = ioutil.ReadFile(*binFlag); err != nil {
fmt.Printf("Failed to read input bytecode: %v\n", err)
os.Exit(-1)
}
}
bins = append(bins, string(bin))
kind := *typFlag
if kind == "" {
kind = *pkgFlag
}
types = append(types, kind)
}
// Generate the contract binding
code, err := bind.Bind(types, abis, bins, *pkgFlag, lang)
if err != nil {
fmt.Printf("Failed to generate ABI binding: %v\n", err)
os.Exit(-1)
}
// Either flush it out to a file or display on the standard output
if *outFlag == "" {
fmt.Printf("%s\n", code)
return
}
if err := ioutil.WriteFile(*outFlag, []byte(code), 0600); err != nil {
fmt.Printf("Failed to write ABI binding: %v\n", err)
os.Exit(-1)
}
}
func contractsFromStdin() (map[string]*compiler.Contract, error) {
bytes, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return nil, err
}
return compiler.ParseCombinedJSON(bytes, "", "", "", "")
}

View File

@ -0,0 +1,149 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// bootnode runs a bootstrap node for the Ethereum Discovery Protocol.
package main
import (
"crypto/ecdsa"
"flag"
"fmt"
"net"
"os"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil"
)
func main() {
var (
listenAddr = flag.String("addr", ":30301", "listen address")
genKey = flag.String("genkey", "", "generate a node key")
writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit")
nodeKeyFile = flag.String("nodekey", "", "private key filename")
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)")
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-9)")
vmodule = flag.String("vmodule", "", "log verbosity pattern")
nodeKey *ecdsa.PrivateKey
err error
)
flag.Parse()
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(*verbosity))
glogger.Vmodule(*vmodule)
log.Root().SetHandler(glogger)
natm, err := nat.Parse(*natdesc)
if err != nil {
utils.Fatalf("-nat: %v", err)
}
switch {
case *genKey != "":
nodeKey, err = crypto.GenerateKey()
if err != nil {
utils.Fatalf("could not generate key: %v", err)
}
if err = crypto.SaveECDSA(*genKey, nodeKey); err != nil {
utils.Fatalf("%v", err)
}
return
case *nodeKeyFile == "" && *nodeKeyHex == "":
utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key")
case *nodeKeyFile != "" && *nodeKeyHex != "":
utils.Fatalf("Options -nodekey and -nodekeyhex are mutually exclusive")
case *nodeKeyFile != "":
if nodeKey, err = crypto.LoadECDSA(*nodeKeyFile); err != nil {
utils.Fatalf("-nodekey: %v", err)
}
case *nodeKeyHex != "":
if nodeKey, err = crypto.HexToECDSA(*nodeKeyHex); err != nil {
utils.Fatalf("-nodekeyhex: %v", err)
}
}
if *writeAddr {
fmt.Printf("%x\n", crypto.FromECDSAPub(&nodeKey.PublicKey)[1:])
os.Exit(0)
}
var restrictList *netutil.Netlist
if *netrestrict != "" {
restrictList, err = netutil.ParseNetlist(*netrestrict)
if err != nil {
utils.Fatalf("-netrestrict: %v", err)
}
}
addr, err := net.ResolveUDPAddr("udp", *listenAddr)
if err != nil {
utils.Fatalf("-ResolveUDPAddr: %v", err)
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
utils.Fatalf("-ListenUDP: %v", err)
}
realaddr := conn.LocalAddr().(*net.UDPAddr)
if natm != nil {
if !realaddr.IP.IsLoopback() {
go nat.Map(natm, nil, "udp", realaddr.Port, realaddr.Port, "ethereum discovery")
}
if ext, err := natm.ExternalIP(); err == nil {
realaddr = &net.UDPAddr{IP: ext, Port: realaddr.Port}
}
}
printNotice(&nodeKey.PublicKey, *realaddr)
if *runv5 {
if _, err := discv5.ListenUDP(nodeKey, conn, "", restrictList); err != nil {
utils.Fatalf("%v", err)
}
} else {
db, _ := enode.OpenDB("")
ln := enode.NewLocalNode(db, nodeKey)
cfg := discover.Config{
PrivateKey: nodeKey,
NetRestrict: restrictList,
}
if _, err := discover.ListenUDP(conn, ln, cfg); err != nil {
utils.Fatalf("%v", err)
}
}
select {}
}
func printNotice(nodeKey *ecdsa.PublicKey, addr net.UDPAddr) {
if addr.IP.IsUnspecified() {
addr.IP = net.IP{127, 0, 0, 1}
}
n := enode.NewV4(nodeKey, addr.IP, 0, addr.Port)
fmt.Println(n.String())
fmt.Println("Note: you're using cmd/bootnode, a developer tool.")
fmt.Println("We recommend using a regular node as bootstrap node for production deployments.")
}

View File

@ -0,0 +1,965 @@
Clef
----
Clef can be used to sign transactions and data and is meant as a replacement for geth's account management.
This allows DApps not to depend on geth's account management. When a DApp wants to sign data it can send the data to
the signer, the signer will then provide the user with context and asks the user for permission to sign the data. If
the users grants the signing request the signer will send the signature back to the DApp.
This setup allows a DApp to connect to a remote Ethereum node and send transactions that are locally signed. This can
help in situations when a DApp is connected to a remote node because a local Ethereum node is not available, not
synchronised with the chain or a particular Ethereum node that has no built-in (or limited) account management.
Clef can run as a daemon on the same machine, or off a usb-stick like [usb armory](https://inversepath.com/usbarmory),
or a separate VM in a [QubesOS](https://www.qubes-os.org/) type os setup.
Check out
* the [tutorial](tutorial.md) for some concrete examples on how the signer works.
* the [setup docs](docs/setup.md) for some information on how to configure it to work on QubesOS or USBArmory.
* the [data types](datatypes.md) for detailed information on the json types used in the communication between
clef and an external UI
## Command line flags
Clef accepts the following command line options:
```
COMMANDS:
init Initialize the signer, generate secret storage
attest Attest that a js-file is to be used
setpw Store a credential for a keystore file
gendoc Generate documentation about json-rpc format
help Shows a list of commands or help for one command
GLOBAL OPTIONS:
--loglevel value log level to emit to the screen (default: 4)
--keystore value Directory for the keystore (default: "$HOME/.ethereum/keystore")
--configdir value Directory for Clef configuration (default: "$HOME/.clef")
--chainid value Chain id to use for signing (1=mainnet, 3=ropsten, 4=rinkeby, 5=Goerli) (default: 1)
--lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength
--nousb Disables monitoring for and managing USB hardware wallets
--rpcaddr value HTTP-RPC server listening interface (default: "localhost")
--rpcvhosts value Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (default: "localhost")
--ipcdisable Disable the IPC-RPC server
--ipcpath Filename for IPC socket/pipe within the datadir (explicit paths escape it)
--rpc Enable the HTTP-RPC server
--rpcport value HTTP-RPC server listening port (default: 8550)
--signersecret value A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash
--4bytedb value File containing 4byte-identifiers (default: "./4byte.json")
--4bytedb-custom value File used for writing new 4byte-identifiers submitted via API (default: "./4byte-custom.json")
--auditlog value File used to emit audit logs. Set to "" to disable (default: "audit.log")
--rules value Enable rule-engine (default: "rules.json")
--stdio-ui Use STDIN/STDOUT as a channel for an external UI. This means that an STDIN/STDOUT is used for RPC-communication with a e.g. a graphical user interface, and can be used when Clef is started by an external process.
--stdio-ui-test Mechanism to test interface between Clef and UI. Requires 'stdio-ui'.
--advanced If enabled, issues warnings instead of rejections for suspicious requests. Default off
--help, -h show help
--version, -v print the version
```
Example:
```
signer -keystore /my/keystore -chainid 4
```
## Security model
The security model of the signer is as follows:
* One critical component (the signer binary / daemon) is responsible for handling cryptographic operations: signing, private keys, encryption/decryption of keystore files.
* The signer binary has a well-defined 'external' API.
* The 'external' API is considered UNTRUSTED.
* The signer binary also communicates with whatever process that invoked the binary, via stdin/stdout.
* This channel is considered 'trusted'. Over this channel, approvals and passwords are communicated.
The general flow for signing a transaction using e.g. geth is as follows:
![image](sign_flow.png)
In this case, `geth` would be started with `--externalsigner=http://localhost:8550` and would relay requests to `eth.sendTransaction`.
## TODOs
Some snags and todos
* [ ] The signer should take a startup param "--no-change", for UIs that do not contain the capability
to perform changes to things, only approve/deny. Such a UI should be able to start the signer in
a more secure mode by telling it that it only wants approve/deny capabilities.
* [x] It would be nice if the signer could collect new 4byte-id:s/method selectors, and have a
secondary database for those (`4byte_custom.json`). Users could then (optionally) submit their collections for
inclusion upstream.
* It should be possible to configure the signer to check if an account is indeed known to it, before
passing on to the UI. The reason it currently does not, is that it would make it possible to enumerate
accounts if it immediately returned "unknown account".
* [x] It should be possible to configure the signer to auto-allow listing (certain) accounts, instead of asking every time.
* [x] Done Upon startup, the signer should spit out some info to the caller (particularly important when executed in `stdio-ui`-mode),
invoking methods with the following info:
* [x] Version info about the signer
* [x] Address of API (http/ipc)
* [ ] List of known accounts
* [ ] Have a default timeout on signing operations, so that if the user has not answered within e.g. 60 seconds, the request is rejected.
* [ ] `account_signRawTransaction`
* [ ] `account_bulkSignTransactions([] transactions)` should
* only exist if enabled via config/flag
* only allow non-data-sending transactions
* all txs must use the same `from`-account
* let the user confirm, showing
* the total amount
* the number of unique recipients
* Geth todos
- The signer should pass the `Origin` header as call-info to the UI. As of right now, the way that info about the request is
put together is a bit of a hack into the http server. This could probably be greatly improved
- Relay: Geth should be started in `geth --external_signer localhost:8550`.
- Currently, the Geth APIs use `common.Address` in the arguments to transaction submission (e.g `to` field). This
type is 20 `bytes`, and is incapable of carrying checksum information. The signer uses `common.MixedcaseAddress`, which
retains the original input.
- The Geth api should switch to use the same type, and relay `to`-account verbatim to the external api.
* [x] Storage
* [x] An encrypted key-value storage should be implemented
* See [rules.md](rules.md) for more info about this.
* Another potential thing to introduce is pairing.
* To prevent spurious requests which users just accept, implement a way to "pair" the caller with the signer (external API).
* Thus geth/mist/cpp would cryptographically handshake and afterwards the caller would be allowed to make signing requests.
* This feature would make the addition of rules less dangerous.
* Wallets / accounts. Add API methods for wallets.
## Communication
### External API
The signer listens to HTTP requests on `rpcaddr`:`rpcport`, with the same JSONRPC standard as Geth. The messages are
expected to be JSON [jsonrpc 2.0 standard](http://www.jsonrpc.org/specification).
Some of these call can require user interaction. Clients must be aware that responses
may be delayed significantly or may never be received if a users decides to ignore the confirmation request.
The External API is **untrusted** : it does not accept credentials over this api, nor does it expect
that requests have any authority.
### UI API
The signer has one native console-based UI, for operation without any standalone tools.
However, there is also an API to communicate with an external UI. To enable that UI,
the signer needs to be executed with the `--stdio-ui` option, which allocates the
`stdin`/`stdout` for the UI-api.
An example (insecure) proof-of-concept of has been implemented in `pythonsigner.py`.
The model is as follows:
* The user starts the UI app (`pythonsigner.py`).
* The UI app starts the `signer` with `--stdio-ui`, and listens to the
process output for confirmation-requests.
* The `signer` opens the external http api.
* When the `signer` receives requests, it sends a `jsonrpc` request via `stdout`.
* The UI app prompts the user accordingly, and responds to the `signer`
* The `signer` signs (or not), and responds to the original request.
## External API
See the [external api changelog](extapi_changelog.md) for information about changes to this API.
### Encoding
- number: positive integers that are hex encoded
- data: hex encoded data
- string: ASCII string
All hex encoded values must be prefixed with `0x`.
## Methods
### account_new
#### Create new password protected account
The signer will generate a new private key, encrypts it according to [web3 keystore spec](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) and stores it in the keystore directory.
The client is responsible for creating a backup of the keystore. If the keystore is lost there is no method of retrieving lost accounts.
#### Arguments
None
#### Result
- address [string]: account address that is derived from the generated key
- url [string]: location of the keyfile
#### Sample call
```json
{
"id": 0,
"jsonrpc": "2.0",
"method": "account_new",
"params": []
}
```
Response
```
{
"id": 0,
"jsonrpc": "2.0",
"result": {
"address": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133",
"url": "keystore:///my/keystore/UTC--2017-08-24T08-40-15.419655028Z--bea9183f8f4f03d427f6bcea17388bdff1cab133"
}
}
```
### account_list
#### List available accounts
List all accounts that this signer currently manages
#### Arguments
None
#### Result
- array with account records:
- account.address [string]: account address that is derived from the generated key
- account.type [string]: type of the
- account.url [string]: location of the account
#### Sample call
```json
{
"id": 1,
"jsonrpc": "2.0",
"method": "account_list"
}
```
Response
```
{
"id": 1,
"jsonrpc": "2.0",
"result": [
{
"address": "0xafb2f771f58513609765698f65d3f2f0224a956f",
"type": "account",
"url": "keystore:///tmp/keystore/UTC--2017-08-24T07-26-47.162109726Z--afb2f771f58513609765698f65d3f2f0224a956f"
},
{
"address": "0xbea9183f8f4f03d427f6bcea17388bdff1cab133",
"type": "account",
"url": "keystore:///tmp/keystore/UTC--2017-08-24T08-40-15.419655028Z--bea9183f8f4f03d427f6bcea17388bdff1cab133"
}
]
}
```
### account_signTransaction
#### Sign transactions
Signs a transactions and responds with the signed transaction in RLP encoded form.
#### Arguments
2. transaction object:
- `from` [address]: account to send the transaction from
- `to` [address]: receiver account. If omitted or `0x`, will cause contract creation.
- `gas` [number]: maximum amount of gas to burn
- `gasPrice` [number]: gas price
- `value` [number:optional]: amount of Wei to send with the transaction
- `data` [data:optional]: input data
- `nonce` [number]: account nonce
3. method signature [string:optional]
- The method signature, if present, is to aid decoding the calldata. Should consist of `methodname(paramtype,...)`, e.g. `transfer(uint256,address)`. The signer may use this data to parse the supplied calldata, and show the user. The data, however, is considered totally untrusted, and reliability is not expected.
#### Result
- signed transaction in RLP encoded form [data]
#### Sample call
```json
{
"id": 2,
"jsonrpc": "2.0",
"method": "account_signTransaction",
"params": [
{
"from": "0x1923f626bb8dc025849e00f99c25fe2b2f7fb0db",
"gas": "0x55555",
"gasPrice": "0x1234",
"input": "0xabcd",
"nonce": "0x0",
"to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
"value": "0x1234"
}
]
}
```
Response
```json
{
"id": 2,
"jsonrpc": "2.0",
"error": {
"code": -32000,
"message": "Request denied"
}
}
```
#### Sample call with ABI-data
```json
{
"id": 67,
"jsonrpc": "2.0",
"method": "account_signTransaction",
"params": [
{
"from": "0x694267f14675d7e1b9494fd8d72fefe1755710fa",
"gas": "0x333",
"gasPrice": "0x1",
"nonce": "0x0",
"to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
"value": "0x0",
"data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"
},
"safeSend(address)"
]
}
```
Response
```json
{
"jsonrpc": "2.0",
"id": 67,
"result": {
"raw": "0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663",
"tx": {
"nonce": "0x0",
"gasPrice": "0x1",
"gas": "0x333",
"to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
"value": "0x0",
"input": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012",
"v": "0x26",
"r": "0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e",
"s": "0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663",
"hash": "0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e"
}
}
}
```
Bash example:
```bash
#curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x694267f14675d7e1b9494fd8d72fefe1755710fa","gas":"0x333","gasPrice":"0x1","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x0", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"},"safeSend(address)"],"id":67}' http://localhost:8550/
{"jsonrpc":"2.0","id":67,"result":{"raw":"0xf88380018203339407a565b7ed7d7a678680a4c162885bedbb695fe080a44401a6e4000000000000000000000000000000000000000000000000000000000000001226a0223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20ea02aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663","tx":{"nonce":"0x0","gasPrice":"0x1","gas":"0x333","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0","value":"0x0","input":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012","v":"0x26","r":"0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e","s":"0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663","hash":"0xeba2df809e7a612a0a0d444ccfa5c839624bdc00dd29e3340d46df3870f8a30e"}}}
```
### account_signData
#### Sign data
Signs a chunk of data and returns the calculated signature.
#### Arguments
- content type [string]: type of signed data
- `text/validator`: hex data with custom validator defined in a contract
- `application/clique`: [clique](https://github.com/ethereum/EIPs/issues/225) headers
- `text/plain`: simple hex data validated by `account_ecRecover`
- account [address]: account to sign with
- data [object]: data to sign
#### Result
- calculated signature [data]
#### Sample call
```json
{
"id": 3,
"jsonrpc": "2.0",
"method": "account_signData",
"params": [
"data/plain",
"0x1923f626bb8dc025849e00f99c25fe2b2f7fb0db",
"0xaabbccdd"
]
}
```
Response
```json
{
"id": 3,
"jsonrpc": "2.0",
"result": "0x5b6693f153b48ec1c706ba4169960386dbaa6903e249cc79a8e6ddc434451d417e1e57327872c7f538beeb323c300afa9999a3d4a5de6caf3be0d5ef832b67ef1c"
}
```
### account_signTypedData
#### Sign data
Signs a chunk of structured data conformant to [EIP712]([EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md)) and returns the calculated signature.
#### Arguments
- account [address]: account to sign with
- data [object]: data to sign
#### Result
- calculated signature [data]
#### Sample call
```json
{
"id": 68,
"jsonrpc": "2.0",
"method": "account_signTypedData",
"params": [
"0xcd2a3d9f938e13cd947ec05abc7fe734df8dd826",
{
"types": {
"EIP712Domain": [
{
"name": "name",
"type": "string"
},
{
"name": "version",
"type": "string"
},
{
"name": "chainId",
"type": "uint256"
},
{
"name": "verifyingContract",
"type": "address"
}
],
"Person": [
{
"name": "name",
"type": "string"
},
{
"name": "wallet",
"type": "address"
}
],
"Mail": [
{
"name": "from",
"type": "Person"
},
{
"name": "to",
"type": "Person"
},
{
"name": "contents",
"type": "string"
}
]
},
"primaryType": "Mail",
"domain": {
"name": "Ether Mail",
"version": "1",
"chainId": 1,
"verifyingContract": "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"
},
"message": {
"from": {
"name": "Cow",
"wallet": "0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"
},
"to": {
"name": "Bob",
"wallet": "0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB"
},
"contents": "Hello, Bob!"
}
}
]
}
```
Response
```json
{
"id": 1,
"jsonrpc": "2.0",
"result": "0x4355c47d63924e8a72e509b65029052eb6c299d53a04e167c5775fd466751c9d07299936d304c153f6443dfa05f40ff007d72911b6f72307f996231605b915621c"
}
```
### account_ecRecover
#### Sign data
Derive the address from the account that was used to sign data with content type `text/plain` and the signature.
#### Arguments
- data [data]: data that was signed
- signature [data]: the signature to verify
#### Result
- derived account [address]
#### Sample call
```json
{
"id": 4,
"jsonrpc": "2.0",
"method": "account_ecRecover",
"params": [
"data/plain",
"0xaabbccdd",
"0x5b6693f153b48ec1c706ba4169960386dbaa6903e249cc79a8e6ddc434451d417e1e57327872c7f538beeb323c300afa9999a3d4a5de6caf3be0d5ef832b67ef1c"
]
}
```
Response
```json
{
"id": 4,
"jsonrpc": "2.0",
"result": "0x1923f626bb8dc025849e00f99c25fe2b2f7fb0db"
}
```
### account_import
#### Import account
Import a private key into the keystore. The imported key is expected to be encrypted according to the web3 keystore
format.
#### Arguments
- account [object]: key in [web3 keystore format](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) (retrieved with account_export)
#### Result
- imported key [object]:
- key.address [address]: address of the imported key
- key.type [string]: type of the account
- key.url [string]: key URL
#### Sample call
```json
{
"id": 6,
"jsonrpc": "2.0",
"method": "account_import",
"params": [
{
"address": "c7412fc59930fd90099c917a50e5f11d0934b2f5",
"crypto": {
"cipher": "aes-128-ctr",
"cipherparams": {
"iv": "401c39a7c7af0388491c3d3ecb39f532"
},
"ciphertext": "eb045260b18dd35cd0e6d99ead52f8fa1e63a6b0af2d52a8de198e59ad783204",
"kdf": "scrypt",
"kdfparams": {
"dklen": 32,
"n": 262144,
"p": 1,
"r": 8,
"salt": "9a657e3618527c9b5580ded60c12092e5038922667b7b76b906496f021bb841a"
},
"mac": "880dc10bc06e9cec78eb9830aeb1e7a4a26b4c2c19615c94acb632992b952806"
},
"id": "09bccb61-b8d3-4e93-bf4f-205a8194f0b9",
"version": 3
}
]
}
```
Response
```json
{
"id": 6,
"jsonrpc": "2.0",
"result": {
"address": "0xc7412fc59930fd90099c917a50e5f11d0934b2f5",
"type": "account",
"url": "keystore:///tmp/keystore/UTC--2017-08-24T11-00-42.032024108Z--c7412fc59930fd90099c917a50e5f11d0934b2f5"
}
}
```
### account_export
#### Export account from keystore
Export a private key from the keystore. The exported private key is encrypted with the original passphrase. When the
key is imported later this passphrase is required.
#### Arguments
- account [address]: export private key that is associated with this account
#### Result
- exported key, see [web3 keystore format](https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition) for
more information
#### Sample call
```json
{
"id": 5,
"jsonrpc": "2.0",
"method": "account_export",
"params": [
"0xc7412fc59930fd90099c917a50e5f11d0934b2f5"
]
}
```
Response
```json
{
"id": 5,
"jsonrpc": "2.0",
"result": {
"address": "c7412fc59930fd90099c917a50e5f11d0934b2f5",
"crypto": {
"cipher": "aes-128-ctr",
"cipherparams": {
"iv": "401c39a7c7af0388491c3d3ecb39f532"
},
"ciphertext": "eb045260b18dd35cd0e6d99ead52f8fa1e63a6b0af2d52a8de198e59ad783204",
"kdf": "scrypt",
"kdfparams": {
"dklen": 32,
"n": 262144,
"p": 1,
"r": 8,
"salt": "9a657e3618527c9b5580ded60c12092e5038922667b7b76b906496f021bb841a"
},
"mac": "880dc10bc06e9cec78eb9830aeb1e7a4a26b4c2c19615c94acb632992b952806"
},
"id": "09bccb61-b8d3-4e93-bf4f-205a8194f0b9",
"version": 3
}
}
```
## UI API
These methods needs to be implemented by a UI listener.
By starting the signer with the switch `--stdio-ui-test`, the signer will invoke all known methods, and expect the UI to respond with
denials. This can be used during development to ensure that the API is (at least somewhat) correctly implemented.
See `pythonsigner`, which can be invoked via `python3 pythonsigner.py test` to perform the 'denial-handshake-test'.
All methods in this API uses object-based parameters, so that there can be no mixups of parameters: each piece of data is accessed by key.
See the [ui api changelog](intapi_changelog.md) for information about changes to this API.
OBS! A slight deviation from `json` standard is in place: every request and response should be confined to a single line.
Whereas the `json` specification allows for linebreaks, linebreaks __should not__ be used in this communication channel, to make
things simpler for both parties.
### ApproveTx / `ui_approveTx`
Invoked when there's a transaction for approval.
#### Sample call
Here's a method invocation:
```bash
curl -i -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x694267f14675d7e1b9494fd8d72fefe1755710fa","gas":"0x333","gasPrice":"0x1","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x0", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"},"safeSend(address)"],"id":67}' http://localhost:8550/
```
Results in the following invocation on the UI:
```json
{
"jsonrpc": "2.0",
"id": 1,
"method": "ui_approveTx",
"params": [
{
"transaction": {
"from": "0x0x694267f14675d7e1b9494fd8d72fefe1755710fa",
"to": "0x0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
"gas": "0x333",
"gasPrice": "0x1",
"value": "0x0",
"nonce": "0x0",
"data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012",
"input": null
},
"call_info": [
{
"type": "WARNING",
"message": "Invalid checksum on to-address"
},
{
"type": "Info",
"message": "safeSend(address: 0x0000000000000000000000000000000000000012)"
}
],
"meta": {
"remote": "127.0.0.1:48486",
"local": "localhost:8550",
"scheme": "HTTP/1.1"
}
}
]
}
```
The same method invocation, but with invalid data:
```bash
curl -i -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x694267f14675d7e1b9494fd8d72fefe1755710fa","gas":"0x333","gasPrice":"0x1","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x0", "data":"0x4401a6e40000000000000002000000000000000000000000000000000000000000000012"},"safeSend(address)"],"id":67}' http://localhost:8550/
```
```json
{
"jsonrpc": "2.0",
"id": 1,
"method": "ui_approveTx",
"params": [
{
"transaction": {
"from": "0x0x694267f14675d7e1b9494fd8d72fefe1755710fa",
"to": "0x0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
"gas": "0x333",
"gasPrice": "0x1",
"value": "0x0",
"nonce": "0x0",
"data": "0x4401a6e40000000000000002000000000000000000000000000000000000000000000012",
"input": null
},
"call_info": [
{
"type": "WARNING",
"message": "Invalid checksum on to-address"
},
{
"type": "WARNING",
"message": "Transaction data did not match ABI-interface: WARNING: Supplied data is stuffed with extra data. \nWant 0000000000000002000000000000000000000000000000000000000000000012\nHave 0000000000000000000000000000000000000000000000000000000000000012\nfor method safeSend(address)"
}
],
"meta": {
"remote": "127.0.0.1:48492",
"local": "localhost:8550",
"scheme": "HTTP/1.1"
}
}
]
}
```
One which has missing `to`, but with no `data`:
```json
{
"jsonrpc": "2.0",
"id": 3,
"method": "ui_approveTx",
"params": [
{
"transaction": {
"from": "",
"to": null,
"gas": "0x0",
"gasPrice": "0x0",
"value": "0x0",
"nonce": "0x0",
"data": null,
"input": null
},
"call_info": [
{
"type": "CRITICAL",
"message": "Tx will create contract with empty code!"
}
],
"meta": {
"remote": "signer binary",
"local": "main",
"scheme": "in-proc"
}
}
]
}
```
### ApproveListing / `ui_approveListing`
Invoked when a request for account listing has been made.
#### Sample call
```json
{
"jsonrpc": "2.0",
"id": 5,
"method": "ui_approveListing",
"params": [
{
"accounts": [
{
"type": "Account",
"url": "keystore:///home/bazonk/.ethereum/keystore/UTC--2017-11-20T14-44-54.089682944Z--123409812340981234098123409812deadbeef42",
"address": "0x123409812340981234098123409812deadbeef42"
},
{
"type": "Account",
"url": "keystore:///home/bazonk/.ethereum/keystore/UTC--2017-11-23T21-59-03.199240693Z--cafebabedeadbeef34098123409812deadbeef42",
"address": "0xcafebabedeadbeef34098123409812deadbeef42"
}
],
"meta": {
"remote": "signer binary",
"local": "main",
"scheme": "in-proc"
}
}
]
}
```
### ApproveSignData / `ui_approveSignData`
#### Sample call
```json
{
"jsonrpc": "2.0",
"id": 4,
"method": "ui_approveSignData",
"params": [
{
"address": "0x123409812340981234098123409812deadbeef42",
"raw_data": "0x01020304",
"message": "\u0019Ethereum Signed Message:\n4\u0001\u0002\u0003\u0004",
"hash": "0x7e3a4e7a9d1744bc5c675c25e1234ca8ed9162bd17f78b9085e48047c15ac310",
"meta": {
"remote": "signer binary",
"local": "main",
"scheme": "in-proc"
}
}
]
}
```
### ShowInfo / `ui_showInfo`
The UI should show the info to the user. Does not expect response.
#### Sample call
```json
{
"jsonrpc": "2.0",
"id": 9,
"method": "ui_showInfo",
"params": [
{
"text": "Tests completed"
}
]
}
```
### ShowError / `ui_showError`
The UI should show the info to the user. Does not expect response.
```json
{
"jsonrpc": "2.0",
"id": 2,
"method": "ShowError",
"params": [
{
"text": "Testing 'ShowError'"
}
]
}
```
### OnApprovedTx / `ui_onApprovedTx`
`OnApprovedTx` is called when a transaction has been approved and signed. The call contains the return value that will be sent to the external caller. The return value from this method is ignored - the reason for having this callback is to allow the ruleset to keep track of approved transactions.
When implementing rate-limited rules, this callback should be used.
TLDR; Use this method to keep track of signed transactions, instead of using the data in `ApproveTx`.
### OnSignerStartup / `ui_onSignerStartup`
This method provide the UI with information about what API version the signer uses (both internal and external) aswell as build-info and external api,
in k/v-form.
Example call:
```json
{
"jsonrpc": "2.0",
"id": 1,
"method": "ui_onSignerStartup",
"params": [
{
"info": {
"extapi_http": "http://localhost:8550",
"extapi_ipc": null,
"extapi_version": "2.0.0",
"intapi_version": "1.2.0"
}
}
]
}
```
### Rules for UI apis
A UI should conform to the following rules.
* A UI MUST NOT load any external resources that were not embedded/part of the UI package.
* For example, not load icons, stylesheets from the internet
* Not load files from the filesystem, unless they reside in the same local directory (e.g. config files)
* A Graphical UI MUST show the blocky-identicon for ethereum addresses.
* A UI MUST warn display approproate warning if the destination-account is formatted with invalid checksum.
* A UI MUST NOT open any ports or services
* The signer opens the public port
* A UI SHOULD verify the permissions on the signer binary, and refuse to execute or warn if permissions allow non-user write.
* A UI SHOULD inform the user about the `SHA256` or `MD5` hash of the binary being executed
* A UI SHOULD NOT maintain a secondary storage of data, e.g. list of accounts
* The signer provides accounts
* A UI SHOULD, to the best extent possible, use static linking / bundling, so that required libraries are bundled
along with the UI.
### UI Implementations
There are a couple of implementation for a UI. We'll try to keep this list up to date.
| Name | Repo | UI type| No external resources| Blocky support| Verifies permissions | Hash information | No secondary storage | Statically linked| Can modify parameters|
| ---- | ---- | -------| ---- | ---- | ---- |---- | ---- | ---- | ---- |
| QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)|
| GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: |
| Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: |
| Clef UI| https://github.com/kyokan/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)|

View File

@ -0,0 +1,224 @@
## UI Client interface
These data types are defined in the channel between clef and the UI
### SignDataRequest
SignDataRequest contains information about a pending request to sign some data. The data to be signed can be of various types, defined by content-type. Clef has done most of the work in canonicalizing and making sense of the data, and it's up to the UI to presentthe user with the contents of the `message`
Example:
```json
{
"content_type": "text/plain",
"address": "0xDEADbEeF000000000000000000000000DeaDbeEf",
"raw_data": "GUV0aGVyZXVtIFNpZ25lZCBNZXNzYWdlOgoxMWhlbGxvIHdvcmxk",
"message": [
{
"name": "message",
"value": "\u0019Ethereum Signed Message:\n11hello world",
"type": "text/plain"
}
],
"hash": "0xd9eba16ed0ecae432b71fe008c98cc872bb4cc214d3220a36f365326cf807d68",
"meta": {
"remote": "localhost:9999",
"local": "localhost:8545",
"scheme": "http",
"User-Agent": "Firefox 3.2",
"Origin": "www.malicious.ru"
}
}
```
### SignDataResponse - approve
Response to SignDataRequest
Example:
```json
{
"approved": true
}
```
### SignDataResponse - deny
Response to SignDataRequest
Example:
```json
{
"approved": false
}
```
### SignTxRequest
SignTxRequest contains information about a pending request to sign a transaction. Aside from the transaction itself, there is also a `call_info`-struct. That struct contains messages of various types, that the user should be informed of.
As in any request, it's important to consider that the `meta` info also contains untrusted data.
The `transaction` (on input into clef) can have either `data` or `input` -- if both are set, they must be identical, otherwise an error is generated. However, Clef will always use `data` when passing this struct on (if Clef does otherwise, please file a ticket)
Example:
```json
{
"transaction": {
"from": "0xDEADbEeF000000000000000000000000DeaDbeEf",
"to": null,
"gas": "0x3e8",
"gasPrice": "0x5",
"value": "0x6",
"nonce": "0x1",
"data": "0x01020304"
},
"call_info": [
{
"type": "Warning",
"message": "Something looks odd, show this message as a warning"
},
{
"type": "Info",
"message": "User should see this aswell"
}
],
"meta": {
"remote": "localhost:9999",
"local": "localhost:8545",
"scheme": "http",
"User-Agent": "Firefox 3.2",
"Origin": "www.malicious.ru"
}
}
```
### SignTxResponse - approve
Response to request to sign a transaction. This response needs to contain the `transaction`, because the UI is free to make modifications to the transaction.
Example:
```json
{
"transaction": {
"from": "0xDEADbEeF000000000000000000000000DeaDbeEf",
"to": null,
"gas": "0x3e8",
"gasPrice": "0x5",
"value": "0x6",
"nonce": "0x4",
"data": "0x04030201"
},
"approved": true
}
```
### SignTxResponse - deny
Response to SignTxRequest. When denying a request, there's no need to provide the transaction in return
Example:
```json
{
"transaction": {
"from": "0x",
"to": null,
"gas": "0x0",
"gasPrice": "0x0",
"value": "0x0",
"nonce": "0x0",
"data": null
},
"approved": false
}
```
### OnApproved - SignTransactionResult
SignTransactionResult is used in the call `clef` -> `OnApprovedTx(result)`
This occurs _after_ successful completion of the entire signing procedure, but right before the signed transaction is passed to the external caller. This method (and data) can be used by the UI to signal to the user that the transaction was signed, but it is primarily useful for ruleset implementations.
A ruleset that implements a rate limitation needs to know what transactions are sent out to the external interface. By hooking into this methods, the ruleset can maintain track of that count.
**OBS:** Note that if an attacker can restore your `clef` data to a previous point in time (e.g through a backup), the attacker can reset such windows, even if he/she is unable to decrypt the content.
The `OnApproved` method cannot be responded to, it's purely informative
Example:
```json
{
"raw": "0xf85d640101948a8eafb1cf62bfbeb1741769dae1a9dd47996192018026a0716bd90515acb1e68e5ac5867aa11a1e65399c3349d479f5fb698554ebc6f293a04e8a4ebfff434e971e0ef12c5bf3a881b06fd04fc3f8b8a7291fb67a26a1d4ed",
"tx": {
"nonce": "0x64",
"gasPrice": "0x1",
"gas": "0x1",
"to": "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192",
"value": "0x1",
"input": "0x",
"v": "0x26",
"r": "0x716bd90515acb1e68e5ac5867aa11a1e65399c3349d479f5fb698554ebc6f293",
"s": "0x4e8a4ebfff434e971e0ef12c5bf3a881b06fd04fc3f8b8a7291fb67a26a1d4ed",
"hash": "0x662f6d772692dd692f1b5e8baa77a9ff95bbd909362df3fc3d301aafebde5441"
}
}
```
### UserInputRequest
Sent when clef needs the user to provide data. If 'password' is true, the input field should be treated accordingly (echo-free)
Example:
```json
{
"prompt": "The question to ask the user",
"title": "The title here",
"isPassword": true
}
```
### UserInputResponse
Response to UserInputRequest
Example:
```json
{
"text": "The textual response from user"
}
```
### ListRequest
Sent when a request has been made to list addresses. The UI is provided with the full `account`s, including local directory names. Note: this information is not passed back to the external caller, who only sees the `address`es.
Example:
```json
{
"accounts": [
{
"address": "0xdeadbeef000000000000000000000000deadbeef",
"url": "keystore:///path/to/keyfile/a"
},
{
"address": "0x1111111122222222222233333333334444444444",
"url": "keystore:///path/to/keyfile/b"
}
],
"meta": {
"remote": "localhost:9999",
"local": "localhost:8545",
"scheme": "http",
"User-Agent": "Firefox 3.2",
"Origin": "www.malicious.ru"
}
}
```
### ListResponse
Response to list request. The response contains a list of all addresses to show to the caller. Note: the UI is free to respond with any address the caller, regardless of whether it exists or not
Example:
```json
{
"accounts": [
{
"address": "0x0000000000000000000000000000000000000000",
"url": ".. ignored .."
},
{
"address": "0xffffffffffffffffffffffffffffffffffffffff",
"url": ""
}
]
}
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 115 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

View File

@ -0,0 +1,23 @@
"""
This implements a dispatcher which listens to localhost:8550, and proxies
requests via qrexec to the service qubes.EthSign on a target domain
"""
import http.server
import socketserver,subprocess
PORT=8550
TARGET_DOMAIN= 'debian-work'
class Dispatcher(http.server.BaseHTTPRequestHandler):
def do_POST(self):
post_data = self.rfile.read(int(self.headers['Content-Length']))
p = subprocess.Popen(['/usr/bin/qrexec-client-vm',TARGET_DOMAIN,'qubes.Clefsign'],stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = p.communicate(post_data)[0]
self.wfile.write(output)
with socketserver.TCPServer(("",PORT), Dispatcher) as httpd:
print("Serving at port", PORT)
httpd.serve_forever()

View File

@ -0,0 +1,16 @@
#!/bin/bash
SIGNER_BIN="/home/user/tools/clef/clef"
SIGNER_CMD="/home/user/tools/gtksigner/gtkui.py -s $SIGNER_BIN"
# Start clef if not already started
if [ ! -S /home/user/.clef/clef.ipc ]; then
$SIGNER_CMD &
sleep 1
fi
# Should be started by now
if [ -S /home/user/.clef/clef.ipc ]; then
# Post incoming request to HTTP channel
curl -H "Content-Type: application/json" -X POST -d @- http://localhost:8550 2>/dev/null
fi

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

View File

@ -0,0 +1,198 @@
# Setting up Clef
This document describes how Clef can be used in a more secure manner than executing it from your everyday laptop,
in order to ensure that the keys remain safe in the event that your computer should get compromised.
## Qubes OS
### Background
The Qubes operating system is based around virtual machines (qubes), where a set of virtual machines are configured, typically for
different purposes such as:
- personal
- Your personal email, browsing etc
- work
- Work email etc
- vault
- a VM without network access, where gpg-keys and/or keepass credentials are stored.
A couple of dedicated virtual machines handle externalities:
- sys-net provides networking to all other (network-enabled) machines
- sys-firewall handles firewall rules
- sys-usb handles USB devices, and can map usb-devices to certain qubes.
The goal of this document is to describe how we can set up clef to provide secure transaction
signing from a `vault` vm, to another networked qube which runs Dapps.
### Setup
There are two ways that this can be achieved: integrated via Qubes or integrated via networking.
#### 1. Qubes Integrated
Qubes provdes a facility for inter-qubes communication via `qrexec`. A qube can request to make a cross-qube RPC request
to another qube. The OS then asks the user if the call is permitted.
![Example](qubes/qrexec-example.png)
A policy-file can be created to allow such interaction. On the `target` domain, a service is invoked which can read the
`stdin` from the `client` qube.
This is how [Split GPG](https://www.qubes-os.org/doc/split-gpg/) is implemented. We can set up Clef the same way:
##### Server
![Clef via qrexec](qubes/clef_qubes_qrexec.png)
On the `target` qubes, we need to define the rpc service.
[qubes.Clefsign](qubes/qubes.Clefsign):
```bash
#!/bin/bash
SIGNER_BIN="/home/user/tools/clef/clef"
SIGNER_CMD="/home/user/tools/gtksigner/gtkui.py -s $SIGNER_BIN"
# Start clef if not already started
if [ ! -S /home/user/.clef/clef.ipc ]; then
$SIGNER_CMD &
sleep 1
fi
# Should be started by now
if [ -S /home/user/.clef/clef.ipc ]; then
# Post incoming request to HTTP channel
curl -H "Content-Type: application/json" -X POST -d @- http://localhost:8550 2>/dev/null
fi
```
This RPC service is not complete (see notes about HTTP headers below), but works as a proof-of-concept.
It will forward the data received on `stdin` (forwarded by the OS) to Clef's HTTP channel.
It would have been possible to send data directly to the `/home/user/.clef/.clef.ipc`
socket via e.g `nc -U /home/user/.clef/clef.ipc`, but the reason for sending the request
data over `HTTP` instead of `IPC` is that we want the ability to forward `HTTP` headers.
To enable the service:
``` bash
sudo cp qubes.Clefsign /etc/qubes-rpc/
sudo chmod +x /etc/qubes-rpc/ qubes.Clefsign
```
This setup uses [gtksigner](https://github.com/holiman/gtksigner), which is a very minimal GTK-based UI that works well
with minimal requirements.
##### Client
On the `client` qube, we need to create a listener which will receive the request from the Dapp, and proxy it.
[qubes-client.py](qubes/client/qubes-client.py):
```python
"""
This implements a dispatcher which listens to localhost:8550, and proxies
requests via qrexec to the service qubes.EthSign on a target domain
"""
import http.server
import socketserver,subprocess
PORT=8550
TARGET_DOMAIN= 'debian-work'
class Dispatcher(http.server.BaseHTTPRequestHandler):
def do_POST(self):
post_data = self.rfile.read(int(self.headers['Content-Length']))
p = subprocess.Popen(['/usr/bin/qrexec-client-vm',TARGET_DOMAIN,'qubes.Clefsign'],stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output = p.communicate(post_data)[0]
self.wfile.write(output)
with socketserver.TCPServer(("",PORT), Dispatcher) as httpd:
print("Serving at port", PORT)
httpd.serve_forever()
```
#### Testing
To test the flow, if we have set up `debian-work` as the `target`, we can do
```bash
$ cat newaccnt.json
{ "id": 0, "jsonrpc": "2.0","method": "account_new","params": []}
$ cat newaccnt.json| qrexec-client-vm debian-work qubes.Clefsign
```
This should pop up first a dialog to allow the IPC call:
![one](qubes/qubes_newaccount-1.png)
Followed by a GTK-dialog to approve the operation
![two](qubes/qubes_newaccount-2.png)
To test the full flow, we use the client wrapper. Start it on the `client` qube:
```
[user@work qubes]$ python3 qubes-client.py
```
Make the request over http (`client` qube):
```
[user@work clef]$ cat newaccnt.json | curl -X POST -d @- http://localhost:8550
```
And it should show the same popups again.
##### Pros and cons
The benefits of this setup are:
- This is the qubes-os intended model for inter-qube communication,
- and thus benefits from qubes-os dialogs and policies for user approval
However, it comes with a couple of drawbacks:
- The `qubes-gpg-client` must forward the http request via RPC to the `target` qube. When doing so, the proxy
will either drop important headers, or replace them.
- The `Host` header is most likely `localhost`
- The `Origin` header must be forwarded
- Information about the remote ip must be added as a `X-Forwarded-For`. However, Clef cannot always trust an `XFF` header,
since malicious clients may lie about `XFF` in order to fool the http server into believing it comes from another address.
- Even with a policy in place to allow rpc-calls between `caller` and `target`, there will be several popups:
- One qubes-specific where the user specifies the `target` vm
- One clef-specific to approve the transaction
#### 2. Network integrated
The second way to set up Clef on a qubes system is to allow networking, and have Clef listen to a port which is accessible
form other qubes.
![Clef via http](qubes/clef_qubes_http.png)
## USBArmory
The [USB armory](https://inversepath.com/usbarmory) is an open source hardware design with an 800 Mhz ARM processor. It is a pocket-size
computer. When inserted into a laptop, it identifies itself as a USB network interface, basically adding another network
to your computer. Over this new network interface, you can SSH into the device.
Running Clef off a USB armory means that you can use the armory as a very versatile offline computer, which only
ever connects to a local network between your computer and the device itself.
Needless to say, the while this model should be fairly secure against remote attacks, an attacker with physical access
to the USB Armory would trivially be able to extract the contents of the device filesystem.

View File

@ -0,0 +1,47 @@
### Changelog for external API
### 6.0.0
* `New` was changed to deliver only an address, not the full `Account` data
* `Export` was moved from External API to the UI Server API
#### 5.0.0
* The external `account_EcRecover`-method was reimplemented.
* The external method `account_sign(address, data)` was replaced with `account_signData(contentType, address, data)`.
The addition of `contentType` makes it possible to use the method for different types of objects, such as:
* signing data with an intended validator (not yet implemented)
* signing clique headers,
* signing plain personal messages,
* The external method `account_signTypedData` implements [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md) and makes it possible to sign typed data.
#### 4.0.0
* The external `account_Ecrecover`-method was removed.
* The external `account_Import`-method was removed.
#### 3.0.0
* The external `account_List`-method was changed to not expose `url`, which contained info about the local filesystem. It now returns only a list of addresses.
#### 2.0.0
* Commit `73abaf04b1372fa4c43201fb1b8019fe6b0a6f8d`, move `from` into `transaction` object in `signTransaction`. This
makes the `accounts_signTransaction` identical to the old `eth_signTransaction`.
#### 1.0.0
Initial release.
### Versioning
The API uses [semantic versioning](https://semver.org/).
TLDR; Given a version number MAJOR.MINOR.PATCH, increment the:
* MAJOR version when you make incompatible API changes,
* MINOR version when you add functionality in a backwards-compatible manner, and
* PATCH version when you make backwards-compatible bug fixes.
Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.

View File

@ -0,0 +1,175 @@
### Changelog for internal API (ui-api)
### 6.0.0
Removed `password` from responses to operations which require them. This is for two reasons,
- Consistency between how rulesets operate and how manual processing works. A rule can `Approve` but require the actual password to be stored in the clef storage.
With this change, the same stored password can be used even if rulesets are not enabled, but storage is.
- It also removes the usability-shortcut that a UI might otherwise want to implement; remembering passwords. Since we now will not require the
password on every `Approve`, there's no need for the UI to cache it locally.
- In a future update, we'll likely add `clef_storePassword` to the internal API, so the user can store it via his UI (currently only CLI works).
Affected datatypes:
- `SignTxResponse`
- `SignDataResponse`
- `NewAccountResponse`
If `clef` requires a password, the `OnInputRequired` will be used to collect it.
### 5.0.0
Changed the namespace format to adhere to the legacy ethereum format: `name_methodName`. Changes:
* `ApproveTx` -> `ui_approveTx`
* `ApproveSignData` -> `ui_approveSignData`
* `ApproveExport` -> `removed`
* `ApproveImport` -> `removed`
* `ApproveListing` -> `ui_approveListing`
* `ApproveNewAccount` -> `ui_approveNewAccount`
* `ShowError` -> `ui_showError`
* `ShowInfo` -> `ui_showInfo`
* `OnApprovedTx` -> `ui_onApprovedTx`
* `OnSignerStartup` -> `ui_onSignerStartup`
* `OnInputRequired` -> `ui_onInputRequired`
### 4.0.0
* Bidirectional communication implemented, so the UI can query `clef` via the stdin/stdout RPC channel. Methods implemented are:
- `clef_listWallets`
- `clef_listAccounts`
- `clef_listWallets`
- `clef_deriveAccount`
- `clef_importRawKey`
- `clef_openWallet`
- `clef_chainId`
- `clef_setChainId`
- `clef_export`
- `clef_import`
* The type `Account` was modified (the json-field `type` was removed), to consist of
```golang
type Account struct {
Address common.Address `json:"address"` // Ethereum account address derived from the key
URL URL `json:"url"` // Optional resource locator within a backend
}
```
### 3.2.0
* Make `ShowError`, `OnApprovedTx`, `OnSignerStartup` be json-rpc [notifications](https://www.jsonrpc.org/specification#notification):
> A Notification is a Request object without an "id" member. A Request object that is a Notification signifies the Client's lack of interest in the corresponding Response object, and as such no Response object needs to be returned to the client. The Server MUST NOT reply to a Notification, including those that are within a batch request.
>
> Notifications are not confirmable by definition, since they do not have a Response object to be returned. As such, the Client would not be aware of any errors (like e.g. "Invalid params","Internal error"
### 3.1.0
* Add `ContentType` `string` to `SignDataRequest` to accommodate the latest EIP-191 and EIP-712 implementations.
### 3.0.0
* Make use of `OnInputRequired(info UserInputRequest)` for obtaining master password during startup
### 2.1.0
* Add `OnInputRequired(info UserInputRequest)` to internal API. This method is used when Clef needs user input, e.g. passwords.
The following structures are used:
```golang
UserInputRequest struct {
Prompt string `json:"prompt"`
Title string `json:"title"`
IsPassword bool `json:"isPassword"`
}
UserInputResponse struct {
Text string `json:"text"`
}
### 2.0.0
* Modify how `call_info` on a transaction is conveyed. New format:
```
{
"jsonrpc": "2.0",
"id": 2,
"method": "ApproveTx",
"params": [
{
"transaction": {
"from": "0x82A2A876D39022B3019932D30Cd9c97ad5616813",
"to": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
"gas": "0x333",
"gasPrice": "0x123",
"value": "0x10",
"nonce": "0x0",
"data": "0x4401a6e40000000000000000000000000000000000000000000000000000000000000012",
"input": null
},
"call_info": [
{
"type": "WARNING",
"message": "Invalid checksum on to-address"
},
{
"type": "WARNING",
"message": "Tx contains data, but provided ABI signature could not be matched: Did not match: test (0 matches)"
}
],
"meta": {
"remote": "127.0.0.1:54286",
"local": "localhost:8550",
"scheme": "HTTP/1.1"
}
}
]
}
```
#### 1.2.0
* Add `OnStartup` method, to provide the UI with information about what API version
the signer uses (both internal and external) aswell as build-info and external api.
Example call:
```json
{
"jsonrpc": "2.0",
"id": 1,
"method": "OnSignerStartup",
"params": [
{
"info": {
"extapi_http": "http://localhost:8550",
"extapi_ipc": null,
"extapi_version": "2.0.0",
"intapi_version": "1.2.0"
}
}
]
}
```
#### 1.1.0
* Add `OnApproved` method
#### 1.0.0
Initial release.
### Versioning
The API uses [semantic versioning](https://semver.org/).
TLDR; Given a version number MAJOR.MINOR.PATCH, increment the:
* MAJOR version when you make incompatible API changes,
* MINOR version when you add functionality in a backwards-compatible manner, and
* PATCH version when you make backwards-compatible bug fixes.
Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.

983
vendor/github.com/ethereum/go-ethereum/cmd/clef/main.go generated vendored Normal file
View File

@ -0,0 +1,983 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bufio"
"context"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/big"
"os"
"os/signal"
"os/user"
"path/filepath"
"runtime"
"strings"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/signer/core"
"github.com/ethereum/go-ethereum/signer/fourbyte"
"github.com/ethereum/go-ethereum/signer/rules"
"github.com/ethereum/go-ethereum/signer/storage"
"gopkg.in/urfave/cli.v1"
)
const legalWarning = `
WARNING!
Clef is an account management tool. It may, like any software, contain bugs.
Please take care to
- backup your keystore files,
- verify that the keystore(s) can be opened with your password.
Clef is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
`
var (
logLevelFlag = cli.IntFlag{
Name: "loglevel",
Value: 4,
Usage: "log level to emit to the screen",
}
advancedMode = cli.BoolFlag{
Name: "advanced",
Usage: "If enabled, issues warnings instead of rejections for suspicious requests. Default off",
}
keystoreFlag = cli.StringFlag{
Name: "keystore",
Value: filepath.Join(node.DefaultDataDir(), "keystore"),
Usage: "Directory for the keystore",
}
configdirFlag = cli.StringFlag{
Name: "configdir",
Value: DefaultConfigDir(),
Usage: "Directory for Clef configuration",
}
chainIdFlag = cli.Int64Flag{
Name: "chainid",
Value: params.MainnetChainConfig.ChainID.Int64(),
Usage: "Chain id to use for signing (1=mainnet, 3=ropsten, 4=rinkeby, 5=Goerli)",
}
rpcPortFlag = cli.IntFlag{
Name: "rpcport",
Usage: "HTTP-RPC server listening port",
Value: node.DefaultHTTPPort + 5,
}
signerSecretFlag = cli.StringFlag{
Name: "signersecret",
Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash",
}
customDBFlag = cli.StringFlag{
Name: "4bytedb-custom",
Usage: "File used for writing new 4byte-identifiers submitted via API",
Value: "./4byte-custom.json",
}
auditLogFlag = cli.StringFlag{
Name: "auditlog",
Usage: "File used to emit audit logs. Set to \"\" to disable",
Value: "audit.log",
}
ruleFlag = cli.StringFlag{
Name: "rules",
Usage: "Enable rule-engine",
Value: "",
}
stdiouiFlag = cli.BoolFlag{
Name: "stdio-ui",
Usage: "Use STDIN/STDOUT as a channel for an external UI. " +
"This means that an STDIN/STDOUT is used for RPC-communication with a e.g. a graphical user " +
"interface, and can be used when Clef is started by an external process.",
}
testFlag = cli.BoolFlag{
Name: "stdio-ui-test",
Usage: "Mechanism to test interface between Clef and UI. Requires 'stdio-ui'.",
}
app = cli.NewApp()
initCommand = cli.Command{
Action: utils.MigrateFlags(initializeSecrets),
Name: "init",
Usage: "Initialize the signer, generate secret storage",
ArgsUsage: "",
Flags: []cli.Flag{
logLevelFlag,
configdirFlag,
},
Description: `
The init command generates a master seed which Clef can use to store credentials and data needed for
the rule-engine to work.`,
}
attestCommand = cli.Command{
Action: utils.MigrateFlags(attestFile),
Name: "attest",
Usage: "Attest that a js-file is to be used",
ArgsUsage: "<sha256sum>",
Flags: []cli.Flag{
logLevelFlag,
configdirFlag,
signerSecretFlag,
},
Description: `
The attest command stores the sha256 of the rule.js-file that you want to use for automatic processing of
incoming requests.
Whenever you make an edit to the rule file, you need to use attestation to tell
Clef that the file is 'safe' to execute.`,
}
setCredentialCommand = cli.Command{
Action: utils.MigrateFlags(setCredential),
Name: "setpw",
Usage: "Store a credential for a keystore file",
ArgsUsage: "<address>",
Flags: []cli.Flag{
logLevelFlag,
configdirFlag,
signerSecretFlag,
},
Description: `
The setpw command stores a password for a given address (keyfile). If you enter a blank passphrase, it will
remove any stored credential for that address (keyfile)
`}
gendocCommand = cli.Command{
Action: GenDoc,
Name: "gendoc",
Usage: "Generate documentation about json-rpc format",
Description: `
The gendoc generates example structures of the json-rpc communication types.
`}
)
func init() {
app.Name = "Clef"
app.Usage = "Manage Ethereum account operations"
app.Flags = []cli.Flag{
logLevelFlag,
keystoreFlag,
configdirFlag,
chainIdFlag,
utils.LightKDFFlag,
utils.NoUSBFlag,
utils.RPCListenAddrFlag,
utils.RPCVirtualHostsFlag,
utils.IPCDisabledFlag,
utils.IPCPathFlag,
utils.RPCEnabledFlag,
rpcPortFlag,
signerSecretFlag,
customDBFlag,
auditLogFlag,
ruleFlag,
stdiouiFlag,
testFlag,
advancedMode,
}
app.Action = signer
app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand, gendocCommand}
}
func main() {
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func initializeSecrets(c *cli.Context) error {
if err := initialize(c); err != nil {
return err
}
configDir := c.GlobalString(configdirFlag.Name)
masterSeed := make([]byte, 256)
num, err := io.ReadFull(rand.Reader, masterSeed)
if err != nil {
return err
}
if num != len(masterSeed) {
return fmt.Errorf("failed to read enough random")
}
n, p := keystore.StandardScryptN, keystore.StandardScryptP
if c.GlobalBool(utils.LightKDFFlag.Name) {
n, p = keystore.LightScryptN, keystore.LightScryptP
}
text := "The master seed of clef is locked with a password. Please give a password. Do not forget this password."
var password string
for {
password = getPassPhrase(text, true)
if err := core.ValidatePasswordFormat(password); err != nil {
fmt.Printf("invalid password: %v\n", err)
} else {
break
}
}
cipherSeed, err := encryptSeed(masterSeed, []byte(password), n, p)
if err != nil {
return fmt.Errorf("failed to encrypt master seed: %v", err)
}
err = os.Mkdir(configDir, 0700)
if err != nil && !os.IsExist(err) {
return err
}
location := filepath.Join(configDir, "masterseed.json")
if _, err := os.Stat(location); err == nil {
return fmt.Errorf("file %v already exists, will not overwrite", location)
}
err = ioutil.WriteFile(location, cipherSeed, 0400)
if err != nil {
return err
}
fmt.Printf("A master seed has been generated into %s\n", location)
fmt.Printf(`
This is required to be able to store credentials, such as :
* Passwords for keystores (used by rule engine)
* Storage for javascript rules
* Hash of rule-file
You should treat that file with utmost secrecy, and make a backup of it.
NOTE: This file does not contain your accounts. Those need to be backed up separately!
`)
return nil
}
func attestFile(ctx *cli.Context) error {
if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.")
}
if err := initialize(ctx); err != nil {
return err
}
stretchedKey, err := readMasterKey(ctx, nil)
if err != nil {
utils.Fatalf(err.Error())
}
configDir := ctx.GlobalString(configdirFlag.Name)
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
confKey := crypto.Keccak256([]byte("config"), stretchedKey)
// Initialize the encrypted storages
configStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "config.json"), confKey)
val := ctx.Args().First()
configStorage.Put("ruleset_sha256", val)
log.Info("Ruleset attestation updated", "sha256", val)
return nil
}
func setCredential(ctx *cli.Context) error {
if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an address to be passed as an argument.")
}
if err := initialize(ctx); err != nil {
return err
}
address := ctx.Args().First()
password := getPassPhrase("Enter a passphrase to store with this address.", true)
stretchedKey, err := readMasterKey(ctx, nil)
if err != nil {
utils.Fatalf(err.Error())
}
configDir := ctx.GlobalString(configdirFlag.Name)
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
pwkey := crypto.Keccak256([]byte("credentials"), stretchedKey)
// Initialize the encrypted storages
pwStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "credentials.json"), pwkey)
pwStorage.Put(address, password)
log.Info("Credential store updated", "key", address)
return nil
}
func initialize(c *cli.Context) error {
// Set up the logger to print everything
logOutput := os.Stdout
if c.GlobalBool(stdiouiFlag.Name) {
logOutput = os.Stderr
// If using the stdioui, we can't do the 'confirm'-flow
fmt.Fprintf(logOutput, legalWarning)
} else {
if !confirm(legalWarning) {
return fmt.Errorf("aborted by user")
}
}
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int(logLevelFlag.Name)), log.StreamHandler(logOutput, log.TerminalFormat(true))))
return nil
}
func signer(c *cli.Context) error {
if err := initialize(c); err != nil {
return err
}
var (
ui core.UIClientAPI
)
if c.GlobalBool(stdiouiFlag.Name) {
log.Info("Using stdin/stdout as UI-channel")
ui = core.NewStdIOUI()
} else {
log.Info("Using CLI as UI-channel")
ui = core.NewCommandlineUI()
}
// 4bytedb data
fourByteLocal := c.GlobalString(customDBFlag.Name)
db, err := fourbyte.NewWithFile(fourByteLocal)
if err != nil {
utils.Fatalf(err.Error())
}
embeds, locals := db.Size()
log.Info("Loaded 4byte database", "embeds", embeds, "locals", locals, "local", fourByteLocal)
var (
api core.ExternalAPI
pwStorage storage.Storage = &storage.NoStorage{}
)
configDir := c.GlobalString(configdirFlag.Name)
if stretchedKey, err := readMasterKey(c, ui); err != nil {
log.Info("No master seed provided, rules disabled", "error", err)
} else {
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
// Generate domain specific keys
pwkey := crypto.Keccak256([]byte("credentials"), stretchedKey)
jskey := crypto.Keccak256([]byte("jsstorage"), stretchedKey)
confkey := crypto.Keccak256([]byte("config"), stretchedKey)
// Initialize the encrypted storages
pwStorage = storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "credentials.json"), pwkey)
jsStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "jsstorage.json"), jskey)
configStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "config.json"), confkey)
//Do we have a rule-file?
if ruleFile := c.GlobalString(ruleFlag.Name); ruleFile != "" {
ruleJS, err := ioutil.ReadFile(c.GlobalString(ruleFile))
if err != nil {
log.Info("Could not load rulefile, rules not enabled", "file", "rulefile")
} else {
shasum := sha256.Sum256(ruleJS)
foundShaSum := hex.EncodeToString(shasum[:])
storedShasum := configStorage.Get("ruleset_sha256")
if storedShasum != foundShaSum {
log.Info("Could not validate ruleset hash, rules not enabled", "got", foundShaSum, "expected", storedShasum)
} else {
// Initialize rules
ruleEngine, err := rules.NewRuleEvaluator(ui, jsStorage)
if err != nil {
utils.Fatalf(err.Error())
}
ruleEngine.Init(string(ruleJS))
ui = ruleEngine
log.Info("Rule engine configured", "file", c.String(ruleFlag.Name))
}
}
}
}
var (
chainId = c.GlobalInt64(chainIdFlag.Name)
ksLoc = c.GlobalString(keystoreFlag.Name)
lightKdf = c.GlobalBool(utils.LightKDFFlag.Name)
advanced = c.GlobalBool(advancedMode.Name)
nousb = c.GlobalBool(utils.NoUSBFlag.Name)
)
log.Info("Starting signer", "chainid", chainId, "keystore", ksLoc,
"light-kdf", lightKdf, "advanced", advanced)
am := core.StartClefAccountManager(ksLoc, nousb, lightKdf)
apiImpl := core.NewSignerAPI(am, chainId, nousb, ui, db, advanced, pwStorage)
// Establish the bidirectional communication, by creating a new UI backend and registering
// it with the UI.
ui.RegisterUIServer(core.NewUIServerAPI(apiImpl))
api = apiImpl
// Audit logging
if logfile := c.GlobalString(auditLogFlag.Name); logfile != "" {
api, err = core.NewAuditLogger(logfile, api)
if err != nil {
utils.Fatalf(err.Error())
}
log.Info("Audit logs configured", "file", logfile)
}
// register signer API with server
var (
extapiURL = "n/a"
ipcapiURL = "n/a"
)
rpcAPI := []rpc.API{
{
Namespace: "account",
Public: true,
Service: api,
Version: "1.0"},
}
if c.GlobalBool(utils.RPCEnabledFlag.Name) {
vhosts := splitAndTrim(c.GlobalString(utils.RPCVirtualHostsFlag.Name))
cors := splitAndTrim(c.GlobalString(utils.RPCCORSDomainFlag.Name))
// start http server
httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts)
if err != nil {
utils.Fatalf("Could not start RPC api: %v", err)
}
extapiURL = fmt.Sprintf("http://%s", httpEndpoint)
log.Info("HTTP endpoint opened", "url", extapiURL)
defer func() {
listener.Close()
log.Info("HTTP endpoint closed", "url", httpEndpoint)
}()
}
if !c.GlobalBool(utils.IPCDisabledFlag.Name) {
if c.IsSet(utils.IPCPathFlag.Name) {
ipcapiURL = c.GlobalString(utils.IPCPathFlag.Name)
} else {
ipcapiURL = filepath.Join(configDir, "clef.ipc")
}
listener, _, err := rpc.StartIPCEndpoint(ipcapiURL, rpcAPI)
if err != nil {
utils.Fatalf("Could not start IPC api: %v", err)
}
log.Info("IPC endpoint opened", "url", ipcapiURL)
defer func() {
listener.Close()
log.Info("IPC endpoint closed", "url", ipcapiURL)
}()
}
if c.GlobalBool(testFlag.Name) {
log.Info("Performing UI test")
go testExternalUI(apiImpl)
}
ui.OnSignerStartup(core.StartupInfo{
Info: map[string]interface{}{
"extapi_version": core.ExternalAPIVersion,
"intapi_version": core.InternalAPIVersion,
"extapi_http": extapiURL,
"extapi_ipc": ipcapiURL,
},
})
abortChan := make(chan os.Signal)
signal.Notify(abortChan, os.Interrupt)
sig := <-abortChan
log.Info("Exiting...", "signal", sig)
return nil
}
// splitAndTrim splits input separated by a comma
// and trims excessive white space from the substrings.
func splitAndTrim(input string) []string {
result := strings.Split(input, ",")
for i, r := range result {
result[i] = strings.TrimSpace(r)
}
return result
}
// DefaultConfigDir is the default config directory to use for the vaults and other
// persistence requirements.
func DefaultConfigDir() string {
// Try to place the data folder in the user's home dir
home := homeDir()
if home != "" {
if runtime.GOOS == "darwin" {
return filepath.Join(home, "Library", "Signer")
} else if runtime.GOOS == "windows" {
appdata := os.Getenv("APPDATA")
if appdata != "" {
return filepath.Join(appdata, "Signer")
} else {
return filepath.Join(home, "AppData", "Roaming", "Signer")
}
} else {
return filepath.Join(home, ".clef")
}
}
// As we cannot guess a stable location, return empty and handle later
return ""
}
func homeDir() string {
if home := os.Getenv("HOME"); home != "" {
return home
}
if usr, err := user.Current(); err == nil {
return usr.HomeDir
}
return ""
}
func readMasterKey(ctx *cli.Context, ui core.UIClientAPI) ([]byte, error) {
var (
file string
configDir = ctx.GlobalString(configdirFlag.Name)
)
if ctx.GlobalIsSet(signerSecretFlag.Name) {
file = ctx.GlobalString(signerSecretFlag.Name)
} else {
file = filepath.Join(configDir, "masterseed.json")
}
if err := checkFile(file); err != nil {
return nil, err
}
cipherKey, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
var password string
// If ui is not nil, get the password from ui.
if ui != nil {
resp, err := ui.OnInputRequired(core.UserInputRequest{
Title: "Master Password",
Prompt: "Please enter the password to decrypt the master seed",
IsPassword: true})
if err != nil {
return nil, err
}
password = resp.Text
} else {
password = getPassPhrase("Decrypt master seed of clef", false)
}
masterSeed, err := decryptSeed(cipherKey, password)
if err != nil {
return nil, fmt.Errorf("failed to decrypt the master seed of clef")
}
if len(masterSeed) < 256 {
return nil, fmt.Errorf("master seed of insufficient length, expected >255 bytes, got %d", len(masterSeed))
}
// Create vault location
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), masterSeed)[:10]))
err = os.Mkdir(vaultLocation, 0700)
if err != nil && !os.IsExist(err) {
return nil, err
}
return masterSeed, nil
}
// checkFile is a convenience function to check if a file
// * exists
// * is mode 0400
func checkFile(filename string) error {
info, err := os.Stat(filename)
if err != nil {
return fmt.Errorf("failed stat on %s: %v", filename, err)
}
// Check the unix permission bits
if info.Mode().Perm()&0377 != 0 {
return fmt.Errorf("file (%v) has insecure file permissions (%v)", filename, info.Mode().String())
}
return nil
}
// confirm displays a text and asks for user confirmation
func confirm(text string) bool {
fmt.Printf(text)
fmt.Printf("\nEnter 'ok' to proceed:\n>")
text, err := bufio.NewReader(os.Stdin).ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
if text := strings.TrimSpace(text); text == "ok" {
return true
}
return false
}
func testExternalUI(api *core.SignerAPI) {
ctx := context.WithValue(context.Background(), "remote", "clef binary")
ctx = context.WithValue(ctx, "scheme", "in-proc")
ctx = context.WithValue(ctx, "local", "main")
errs := make([]string, 0)
a := common.HexToAddress("0xdeadbeef000000000000000000000000deadbeef")
queryUser := func(q string) string {
resp, err := api.UI.OnInputRequired(core.UserInputRequest{
Title: "Testing",
Prompt: q,
})
if err != nil {
errs = append(errs, err.Error())
}
return resp.Text
}
expectResponse := func(testcase, question, expect string) {
if got := queryUser(question); got != expect {
errs = append(errs, fmt.Sprintf("%s: got %v, expected %v", testcase, got, expect))
}
}
expectApprove := func(testcase string, err error) {
if err == nil || err == accounts.ErrUnknownAccount {
return
}
errs = append(errs, fmt.Sprintf("%v: expected no error, got %v", testcase, err.Error()))
}
expectDeny := func(testcase string, err error) {
if err == nil || err != core.ErrRequestDenied {
errs = append(errs, fmt.Sprintf("%v: expected ErrRequestDenied, got %v", testcase, err))
}
}
// Test display of info and error
{
api.UI.ShowInfo("If you see this message, enter 'yes' to next question")
expectResponse("showinfo", "Did you see the message? [yes/no]", "yes")
api.UI.ShowError("If you see this message, enter 'yes' to the next question")
expectResponse("showerror", "Did you see the message? [yes/no]", "yes")
}
{ // Sign data test - clique header
api.UI.ShowInfo("Please approve the next request for signing a clique header")
cliqueHeader := types.Header{
common.HexToHash("0000H45H"),
common.HexToHash("0000H45H"),
common.HexToAddress("0000H45H"),
common.HexToHash("0000H00H"),
common.HexToHash("0000H45H"),
common.HexToHash("0000H45H"),
types.Bloom{},
big.NewInt(1337),
big.NewInt(1337),
1338,
1338,
1338,
[]byte("Extra data Extra data Extra data Extra data Extra data Extra data Extra data Extra data"),
common.HexToHash("0x0000H45H"),
types.BlockNonce{},
}
cliqueRlp, err := rlp.EncodeToBytes(cliqueHeader)
if err != nil {
utils.Fatalf("Should not error: %v", err)
}
addr, _ := common.NewMixedcaseAddressFromString("0x0011223344556677889900112233445566778899")
_, err = api.SignData(ctx, accounts.MimetypeClique, *addr, hexutil.Encode(cliqueRlp))
expectApprove("signdata - clique header", err)
}
{ // Sign data test - plain text
api.UI.ShowInfo("Please approve the next request for signing text")
addr, _ := common.NewMixedcaseAddressFromString("0x0011223344556677889900112233445566778899")
_, err := api.SignData(ctx, accounts.MimetypeTextPlain, *addr, hexutil.Encode([]byte("hello world")))
expectApprove("signdata - text", err)
}
{ // Sign data test - plain text reject
api.UI.ShowInfo("Please deny the next request for signing text")
addr, _ := common.NewMixedcaseAddressFromString("0x0011223344556677889900112233445566778899")
_, err := api.SignData(ctx, accounts.MimetypeTextPlain, *addr, hexutil.Encode([]byte("hello world")))
expectDeny("signdata - text", err)
}
{ // Sign transaction
api.UI.ShowInfo("Please reject next transaction")
data := hexutil.Bytes([]byte{})
to := common.NewMixedcaseAddress(a)
tx := core.SendTxArgs{
Data: &data,
Nonce: 0x1,
Value: hexutil.Big(*big.NewInt(6)),
From: common.NewMixedcaseAddress(a),
To: &to,
GasPrice: hexutil.Big(*big.NewInt(5)),
Gas: 1000,
Input: nil,
}
_, err := api.SignTransaction(ctx, tx, nil)
expectDeny("signtransaction [1]", err)
expectResponse("signtransaction [2]", "Did you see any warnings for the last transaction? (yes/no)", "no")
}
{ // Listing
api.UI.ShowInfo("Please reject listing-request")
_, err := api.List(ctx)
expectDeny("list", err)
}
{ // Import
api.UI.ShowInfo("Please reject new account-request")
_, err := api.New(ctx)
expectDeny("newaccount", err)
}
{ // Metadata
api.UI.ShowInfo("Please check if you see the Origin in next listing (approve or deny)")
api.List(context.WithValue(ctx, "Origin", "origin.com"))
expectResponse("metadata - origin", "Did you see origin (origin.com)? [yes/no] ", "yes")
}
for _, e := range errs {
log.Error(e)
}
result := fmt.Sprintf("Tests completed. %d errors:\n%s\n", len(errs), strings.Join(errs, "\n"))
api.UI.ShowInfo(result)
}
// getPassPhrase retrieves the password associated with clef, either fetched
// from a list of preloaded passphrases, or requested interactively from the user.
// TODO: there are many `getPassPhrase` functions, it will be better to abstract them into one.
func getPassPhrase(prompt string, confirmation bool) string {
fmt.Println(prompt)
password, err := console.Stdin.PromptPassword("Passphrase: ")
if err != nil {
utils.Fatalf("Failed to read passphrase: %v", err)
}
if confirmation {
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
if err != nil {
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
}
if password != confirm {
utils.Fatalf("Passphrases do not match")
}
}
return password
}
type encryptedSeedStorage struct {
Description string `json:"description"`
Version int `json:"version"`
Params keystore.CryptoJSON `json:"params"`
}
// encryptSeed uses a similar scheme as the keystore uses, but with a different wrapping,
// to encrypt the master seed
func encryptSeed(seed []byte, auth []byte, scryptN, scryptP int) ([]byte, error) {
cryptoStruct, err := keystore.EncryptDataV3(seed, auth, scryptN, scryptP)
if err != nil {
return nil, err
}
return json.Marshal(&encryptedSeedStorage{"Clef seed", 1, cryptoStruct})
}
// decryptSeed decrypts the master seed
func decryptSeed(keyjson []byte, auth string) ([]byte, error) {
var encSeed encryptedSeedStorage
if err := json.Unmarshal(keyjson, &encSeed); err != nil {
return nil, err
}
if encSeed.Version != 1 {
log.Warn(fmt.Sprintf("unsupported encryption format of seed: %d, operation will likely fail", encSeed.Version))
}
seed, err := keystore.DecryptDataV3(encSeed.Params, auth)
if err != nil {
return nil, err
}
return seed, err
}
// GenDoc outputs examples of all structures used in json-rpc communication
func GenDoc(ctx *cli.Context) {
var (
a = common.HexToAddress("0xdeadbeef000000000000000000000000deadbeef")
b = common.HexToAddress("0x1111111122222222222233333333334444444444")
meta = core.Metadata{
Scheme: "http",
Local: "localhost:8545",
Origin: "www.malicious.ru",
Remote: "localhost:9999",
UserAgent: "Firefox 3.2",
}
output []string
add = func(name, desc string, v interface{}) {
if data, err := json.MarshalIndent(v, "", " "); err == nil {
output = append(output, fmt.Sprintf("### %s\n\n%s\n\nExample:\n```json\n%s\n```", name, desc, data))
} else {
log.Error("Error generating output", err)
}
}
)
{ // Sign plain text request
desc := "SignDataRequest contains information about a pending request to sign some data. " +
"The data to be signed can be of various types, defined by content-type. Clef has done most " +
"of the work in canonicalizing and making sense of the data, and it's up to the UI to present" +
"the user with the contents of the `message`"
sighash, msg := accounts.TextAndHash([]byte("hello world"))
message := []*core.NameValueType{{"message", msg, accounts.MimetypeTextPlain}}
add("SignDataRequest", desc, &core.SignDataRequest{
Address: common.NewMixedcaseAddress(a),
Meta: meta,
ContentType: accounts.MimetypeTextPlain,
Rawdata: []byte(msg),
Message: message,
Hash: sighash})
}
{ // Sign plain text response
add("SignDataResponse - approve", "Response to SignDataRequest",
&core.SignDataResponse{Approved: true})
add("SignDataResponse - deny", "Response to SignDataRequest",
&core.SignDataResponse{})
}
{ // Sign transaction request
desc := "SignTxRequest contains information about a pending request to sign a transaction. " +
"Aside from the transaction itself, there is also a `call_info`-struct. That struct contains " +
"messages of various types, that the user should be informed of." +
"\n\n" +
"As in any request, it's important to consider that the `meta` info also contains untrusted data." +
"\n\n" +
"The `transaction` (on input into clef) can have either `data` or `input` -- if both are set, " +
"they must be identical, otherwise an error is generated. " +
"However, Clef will always use `data` when passing this struct on (if Clef does otherwise, please file a ticket)"
data := hexutil.Bytes([]byte{0x01, 0x02, 0x03, 0x04})
add("SignTxRequest", desc, &core.SignTxRequest{
Meta: meta,
Callinfo: []core.ValidationInfo{
{"Warning", "Something looks odd, show this message as a warning"},
{"Info", "User should see this aswell"},
},
Transaction: core.SendTxArgs{
Data: &data,
Nonce: 0x1,
Value: hexutil.Big(*big.NewInt(6)),
From: common.NewMixedcaseAddress(a),
To: nil,
GasPrice: hexutil.Big(*big.NewInt(5)),
Gas: 1000,
Input: nil,
}})
}
{ // Sign tx response
data := hexutil.Bytes([]byte{0x04, 0x03, 0x02, 0x01})
add("SignTxResponse - approve", "Response to request to sign a transaction. This response needs to contain the `transaction`"+
", because the UI is free to make modifications to the transaction.",
&core.SignTxResponse{Approved: true,
Transaction: core.SendTxArgs{
Data: &data,
Nonce: 0x4,
Value: hexutil.Big(*big.NewInt(6)),
From: common.NewMixedcaseAddress(a),
To: nil,
GasPrice: hexutil.Big(*big.NewInt(5)),
Gas: 1000,
Input: nil,
}})
add("SignTxResponse - deny", "Response to SignTxRequest. When denying a request, there's no need to "+
"provide the transaction in return",
&core.SignTxResponse{})
}
{ // WHen a signed tx is ready to go out
desc := "SignTransactionResult is used in the call `clef` -> `OnApprovedTx(result)`" +
"\n\n" +
"This occurs _after_ successful completion of the entire signing procedure, but right before the signed " +
"transaction is passed to the external caller. This method (and data) can be used by the UI to signal " +
"to the user that the transaction was signed, but it is primarily useful for ruleset implementations." +
"\n\n" +
"A ruleset that implements a rate limitation needs to know what transactions are sent out to the external " +
"interface. By hooking into this methods, the ruleset can maintain track of that count." +
"\n\n" +
"**OBS:** Note that if an attacker can restore your `clef` data to a previous point in time" +
" (e.g through a backup), the attacker can reset such windows, even if he/she is unable to decrypt the content. " +
"\n\n" +
"The `OnApproved` method cannot be responded to, it's purely informative"
rlpdata := common.FromHex("0xf85d640101948a8eafb1cf62bfbeb1741769dae1a9dd47996192018026a0716bd90515acb1e68e5ac5867aa11a1e65399c3349d479f5fb698554ebc6f293a04e8a4ebfff434e971e0ef12c5bf3a881b06fd04fc3f8b8a7291fb67a26a1d4ed")
var tx types.Transaction
rlp.DecodeBytes(rlpdata, &tx)
add("OnApproved - SignTransactionResult", desc, &ethapi.SignTransactionResult{Raw: rlpdata, Tx: &tx})
}
{ // User input
add("UserInputRequest", "Sent when clef needs the user to provide data. If 'password' is true, the input field should be treated accordingly (echo-free)",
&core.UserInputRequest{IsPassword: true, Title: "The title here", Prompt: "The question to ask the user"})
add("UserInputResponse", "Response to UserInputRequest",
&core.UserInputResponse{Text: "The textual response from user"})
}
{ // List request
add("ListRequest", "Sent when a request has been made to list addresses. The UI is provided with the "+
"full `account`s, including local directory names. Note: this information is not passed back to the external caller, "+
"who only sees the `address`es. ",
&core.ListRequest{
Meta: meta,
Accounts: []accounts.Account{
{a, accounts.URL{Scheme: "keystore", Path: "/path/to/keyfile/a"}},
{b, accounts.URL{Scheme: "keystore", Path: "/path/to/keyfile/b"}}},
})
add("ListResponse", "Response to list request. The response contains a list of all addresses to show to the caller. "+
"Note: the UI is free to respond with any address the caller, regardless of whether it exists or not",
&core.ListResponse{
Accounts: []accounts.Account{
{common.HexToAddress("0xcowbeef000000cowbeef00000000000000000c0w"), accounts.URL{Path: ".. ignored .."}},
{common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"), accounts.URL{}},
}})
}
fmt.Println(`## UI Client interface
These data types are defined in the channel between clef and the UI`)
for _, elem := range output {
fmt.Println(elem)
}
}
/**
//Create Account
curl -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_new","params":["test"],"id":67}' localhost:8550
// List accounts
curl -i -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_list","params":[""],"id":67}' http://localhost:8550/
// Make Transaction
// safeSend(0x12)
// 4401a6e40000000000000000000000000000000000000000000000000000000000000012
// supplied abi
curl -i -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x82A2A876D39022B3019932D30Cd9c97ad5616813","gas":"0x333","gasPrice":"0x123","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x10", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"},"test"],"id":67}' http://localhost:8550/
// Not supplied
curl -i -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_signTransaction","params":[{"from":"0x82A2A876D39022B3019932D30Cd9c97ad5616813","gas":"0x333","gasPrice":"0x123","nonce":"0x0","to":"0x07a565b7ed7d7a678680a4c162885bedbb695fe0", "value":"0x10", "data":"0x4401a6e40000000000000000000000000000000000000000000000000000000000000012"}],"id":67}' http://localhost:8550/
// Sign data
curl -i -H "Content-Type: application/json" -X POST --data '{"jsonrpc":"2.0","method":"account_sign","params":["0x694267f14675d7e1b9494fd8d72fefe1755710fa","bazonk gaz baz"],"id":67}' http://localhost:8550/
**/

View File

@ -0,0 +1,179 @@
import os,sys, subprocess
from tinyrpc.transports import ServerTransport
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.dispatch import public,RPCDispatcher
from tinyrpc.server import RPCServer
""" This is a POC example of how to write a custom UI for Clef. The UI starts the
clef process with the '--stdio-ui' option, and communicates with clef using standard input / output.
The standard input/output is a relatively secure way to communicate, as it does not require opening any ports
or IPC files. Needless to say, it does not protect against memory inspection mechanisms where an attacker
can access process memory."""
try:
import urllib.parse as urlparse
except ImportError:
import urllib as urlparse
class StdIOTransport(ServerTransport):
""" Uses std input/output for RPC """
def receive_message(self):
return None, urlparse.unquote(sys.stdin.readline())
def send_reply(self, context, reply):
print(reply)
class PipeTransport(ServerTransport):
""" Uses std a pipe for RPC """
def __init__(self,input, output):
self.input = input
self.output = output
def receive_message(self):
data = self.input.readline()
print(">> {}".format( data))
return None, urlparse.unquote(data)
def send_reply(self, context, reply):
print("<< {}".format( reply))
self.output.write(reply)
self.output.write("\n")
class StdIOHandler():
def __init__(self):
pass
@public
def ApproveTx(self,req):
"""
Example request:
{
"jsonrpc": "2.0",
"method": "ApproveTx",
"params": [{
"transaction": {
"to": "0xae967917c465db8578ca9024c205720b1a3651A9",
"gas": "0x333",
"gasPrice": "0x123",
"value": "0x10",
"data": "0xd7a5865800000000000000000000000000000000000000000000000000000000000000ff",
"nonce": "0x0"
},
"from": "0xAe967917c465db8578ca9024c205720b1a3651A9",
"call_info": "Warning! Could not validate ABI-data against calldata\nSupplied ABI spec does not contain method signature in data: 0xd7a58658",
"meta": {
"remote": "127.0.0.1:34572",
"local": "localhost:8550",
"scheme": "HTTP/1.1"
}
}],
"id": 1
}
:param transaction: transaction info
:param call_info: info abou the call, e.g. if ABI info could not be
:param meta: metadata about the request, e.g. where the call comes from
:return:
"""
transaction = req.get('transaction')
_from = req.get('from')
call_info = req.get('call_info')
meta = req.get('meta')
return {
"approved" : False,
#"transaction" : transaction,
# "from" : _from,
# "password" : None,
}
@public
def ApproveSignData(self, req):
""" Example request
"""
return {"approved": False, "password" : None}
@public
def ApproveExport(self, req):
""" Example request
"""
return {"approved" : False}
@public
def ApproveImport(self, req):
""" Example request
"""
return { "approved" : False, "old_password": "", "new_password": ""}
@public
def ApproveListing(self, req):
""" Example request
"""
return {'accounts': []}
@public
def ApproveNewAccount(self, req):
"""
Example request
:return:
"""
return {"approved": False,
#"password": ""
}
@public
def ShowError(self,message = {}):
"""
Example request:
{"jsonrpc":"2.0","method":"ShowInfo","params":{"message":"Testing 'ShowError'"},"id":1}
:param message: to show
:return: nothing
"""
if 'text' in message.keys():
sys.stderr.write("Error: {}\n".format( message['text']))
return
@public
def ShowInfo(self,message = {}):
"""
Example request
{"jsonrpc":"2.0","method":"ShowInfo","params":{"message":"Testing 'ShowInfo'"},"id":0}
:param message: to display
:return:nothing
"""
if 'text' in message.keys():
sys.stdout.write("Error: {}\n".format( message['text']))
return
def main(args):
cmd = ["./clef", "--stdio-ui"]
if len(args) > 0 and args[0] == "test":
cmd.extend(["--stdio-ui-test"])
print("cmd: {}".format(" ".join(cmd)))
dispatcher = RPCDispatcher()
dispatcher.register_instance(StdIOHandler(), '')
# line buffered
p = subprocess.Popen(cmd, bufsize=1, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
rpc_server = RPCServer(
PipeTransport(p.stdout, p.stdin),
JSONRPCProtocol(),
dispatcher
)
rpc_server.serve_forever()
if __name__ == '__main__':
main(sys.argv[1:])

View File

@ -0,0 +1,236 @@
# Rules
The `signer` binary contains a ruleset engine, implemented with [OttoVM](https://github.com/robertkrimen/otto)
It enables usecases like the following:
* I want to auto-approve transactions with contract `CasinoDapp`, with up to `0.05 ether` in value to maximum `1 ether` per 24h period
* I want to auto-approve transaction to contract `EthAlarmClock` with `data`=`0xdeadbeef`, if `value=0`, `gas < 44k` and `gasPrice < 40Gwei`
The two main features that are required for this to work well are;
1. Rule Implementation: how to create, manage and interpret rules in a flexible but secure manner
2. Credential managements and credentials; how to provide auto-unlock without exposing keys unnecessarily.
The section below deals with both of them
## Rule Implementation
A ruleset file is implemented as a `js` file. Under the hood, the ruleset-engine is a `SignerUI`, implementing the same methods as the `json-rpc` methods
defined in the UI protocol. Example:
```javascript
function asBig(str){
if(str.slice(0,2) == "0x"){ return new BigNumber(str.slice(2),16)}
return new BigNumber(str)
}
// Approve transactions to a certain contract if value is below a certain limit
function ApproveTx(req){
var limit = big.Newint("0xb1a2bc2ec50000")
var value = asBig(req.transaction.value);
if(req.transaction.to.toLowerCase()=="0xae967917c465db8578ca9024c205720b1a3651a9")
&& value.lt(limit) ){
return "Approve"
}
// If we return "Reject", it will be rejected.
// By not returning anything, it will be passed to the next UI, for manual processing
}
//Approve listings if request made from IPC
function ApproveListing(req){
if (req.metadata.scheme == "ipc"){ return "Approve"}
}
```
Whenever the external API is called (and the ruleset is enabled), the `signer` calls the UI, which is an instance of a ruleset-engine. The ruleset-engine
invokes the corresponding method. In doing so, there are three possible outcomes:
1. JS returns "Approve"
* Auto-approve request
2. JS returns "Reject"
* Auto-reject request
3. Error occurs, or something else is returned
* Pass on to `next` ui: the regular UI channel.
A more advanced example can be found below, "Example 1: ruleset for a rate-limited window", using `storage` to `Put` and `Get` `string`s by key.
* At the time of writing, storage only exists as an ephemeral unencrypted implementation, to be used during testing.
### Things to note
The Otto vm has a few [caveats](https://github.com/robertkrimen/otto):
* "use strict" will parse, but does nothing.
* The regular expression engine (re2/regexp) is not fully compatible with the ECMA5 specification.
* Otto targets ES5. ES6 features (eg: Typed Arrays) are not supported.
Additionally, a few more have been added
* The rule execution cannot load external javascript files.
* The only preloaded libary is [`bignumber.js`](https://github.com/MikeMcl/bignumber.js) version `2.0.3`. This one is fairly old, and is not aligned with the documentation at the github repository.
* Each invocation is made in a fresh virtual machine. This means that you cannot store data in global variables between invocations. This is a deliberate choice -- if you want to store data, use the disk-backed `storage`, since rules should not rely on ephemeral data.
* Javascript API parameters are _always_ an object. This is also a design choice, to ensure that parameters are accessed by _key_ and not by order. This is to prevent mistakes due to missing parameters or parameter changes.
* The JS engine has access to `storage` and `console`.
#### Security considerations
##### Security of ruleset
Some security precautions can be made, such as:
* Never load `ruleset.js` unless the file is `readonly` (`r-??-??-?`). If the user wishes to modify the ruleset, he must make it writeable and then set back to readonly.
* This is to prevent attacks where files are dropped on the users disk.
* Since we're going to have to have some form of secure storage (not defined in this section), we could also store the `sha3` of the `ruleset.js` file in there.
* If the user wishes to modify the ruleset, he'd then have to perform e.g. `signer --attest /path/to/ruleset --credential <creds>`
##### Security of implementation
The drawbacks of this very flexible solution is that the `signer` needs to contain a javascript engine. This is pretty simple to implement, since it's already
implemented for `geth`. There are no known security vulnerabilities in, nor have we had any security-problems with it so far.
The javascript engine would be an added attack surface; but if the validation of `rulesets` is made good (with hash-based attestation), the actual javascript cannot be considered
an attack surface -- if an attacker can control the ruleset, a much simpler attack would be to implement an "always-approve" rule instead of exploiting the js vm. The only benefit
to be gained from attacking the actual `signer` process from the `js` side would be if it could somehow extract cryptographic keys from memory.
##### Security in usability
Javascript is flexible, but also easy to get wrong, especially when users assume that `js` can handle large integers natively. Typical errors
include trying to multiply `gasCost` with `gas` without using `bigint`:s.
It's unclear whether any other DSL could be more secure; since there's always the possibility of erroneously implementing a rule.
## Credential management
The ability to auto-approve transaction means that the signer needs to have necessary credentials to decrypt keyfiles. These passwords are hereafter called `ksp` (keystore pass).
### Example implementation
Upon startup of the signer, the signer is given a switch: `--seed <path/to/masterseed>`
The `seed` contains a blob of bytes, which is the master seed for the `signer`.
The `signer` uses the `seed` to:
* Generate the `path` where the settings are stored.
* `./settings/1df094eb-c2b1-4689-90dd-790046d38025/vault.dat`
* `./settings/1df094eb-c2b1-4689-90dd-790046d38025/rules.js`
* Generate the encryption password for `vault.dat`.
The `vault.dat` would be an encrypted container storing the following information:
* `ksp` entries
* `sha256` hash of `rules.js`
* Information about pair:ed callers (not yet specified)
### Security considerations
This would leave it up to the user to ensure that the `path/to/masterseed` is handled in a secure way. It's difficult to get around this, although one could
imagine leveraging OS-level keychains where supported. The setup is however in general similar to how ssh-keys are stored in `.ssh/`.
# Implementation status
This is now implemented (with ephemeral non-encrypted storage for now, so not yet enabled).
## Example 1: ruleset for a rate-limited window
```javascript
function big(str){
if(str.slice(0,2) == "0x"){ return new BigNumber(str.slice(2),16)}
return new BigNumber(str)
}
// Time window: 1 week
var window = 1000* 3600*24*7;
// Limit : 1 ether
var limit = new BigNumber("1e18");
function isLimitOk(transaction){
var value = big(transaction.value)
// Start of our window function
var windowstart = new Date().getTime() - window;
var txs = [];
var stored = storage.Get('txs');
if(stored != ""){
txs = JSON.parse(stored)
}
// First, remove all that have passed out of the time-window
var newtxs = txs.filter(function(tx){return tx.tstamp > windowstart});
console.log(txs, newtxs.length);
// Secondly, aggregate the current sum
sum = new BigNumber(0)
sum = newtxs.reduce(function(agg, tx){ return big(tx.value).plus(agg)}, sum);
console.log("ApproveTx > Sum so far", sum);
console.log("ApproveTx > Requested", value.toNumber());
// Would we exceed weekly limit ?
return sum.plus(value).lt(limit)
}
function ApproveTx(r){
if (isLimitOk(r.transaction)){
return "Approve"
}
return "Nope"
}
/**
* OnApprovedTx(str) is called when a transaction has been approved and signed. The parameter
* 'response_str' contains the return value that will be sent to the external caller.
* The return value from this method is ignore - the reason for having this callback is to allow the
* ruleset to keep track of approved transactions.
*
* When implementing rate-limited rules, this callback should be used.
* If a rule responds with neither 'Approve' nor 'Reject' - the tx goes to manual processing. If the user
* then accepts the transaction, this method will be called.
*
* TLDR; Use this method to keep track of signed transactions, instead of using the data in ApproveTx.
*/
function OnApprovedTx(resp){
var value = big(resp.tx.value)
var txs = []
// Load stored transactions
var stored = storage.Get('txs');
if(stored != ""){
txs = JSON.parse(stored)
}
// Add this to the storage
txs.push({tstamp: new Date().getTime(), value: value});
storage.Put("txs", JSON.stringify(txs));
}
```
## Example 2: allow destination
```javascript
function ApproveTx(r){
if(r.transaction.from.toLowerCase()=="0x0000000000000000000000000000000000001337"){ return "Approve"}
if(r.transaction.from.toLowerCase()=="0x000000000000000000000000000000000000dead"){ return "Reject"}
// Otherwise goes to manual processing
}
```
## Example 3: Allow listing
```javascript
function ApproveListing(){
return "Approve"
}
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

View File

@ -0,0 +1,73 @@
// This file is a test-utility for testing clef-functionality
//
// Start clef with
//
// build/bin/clef --4bytedb=./cmd/clef/4byte.json --rpc
//
// Start geth with
//
// build/bin/geth --nodiscover --maxpeers 0 --signer http://localhost:8550 console --preload=cmd/clef/tests/testsigner.js
//
// and in the console simply invoke
//
// > test()
//
// You can reload the file via `reload()`
function reload(){
loadScript("./cmd/clef/tests/testsigner.js");
}
function init(){
if (typeof accts == 'undefined' || accts.length == 0){
accts = eth.accounts
console.log("Got accounts ", accts);
}
}
init()
function testTx(){
if( accts && accts.length > 0) {
var a = accts[0]
var txdata = eth.signTransaction({from: a, to: a, value: 1, nonce: 1, gas: 1, gasPrice: 1})
var v = parseInt(txdata.tx.v)
console.log("V value: ", v)
if (v == 37 || v == 38){
console.log("Mainnet 155-protected chainid was used")
}
if (v == 27 || v == 28){
throw new Error("Mainnet chainid was used, but without replay protection!")
}
}
}
function testSignText(){
if( accts && accts.length > 0){
var a = accts[0]
var r = eth.sign(a, "0x68656c6c6f20776f726c64"); //hello world
console.log("signing response", r)
}
}
function testClique(){
if( accts && accts.length > 0){
var a = accts[0]
var r = debug.testSignCliqueBlock(a, 0); // Sign genesis
console.log("signing response", r)
if( a != r){
throw new Error("Requested signing by "+a+ " but got sealer "+r)
}
}
}
function test(){
var tests = [
testTx,
testSignText,
testClique,
]
for( i in tests){
try{
tests[i]()
}catch(err){
console.log(err)
}
}
}

View File

@ -0,0 +1,211 @@
## Initializing the signer
First, initialize the master seed.
```text
#./signer init
WARNING!
The signer is alpha software, and not yet publically released. This software has _not_ been audited, and there
are no guarantees about the workings of this software. It may contain severe flaws. You should not use this software
unless you agree to take full responsibility for doing so, and know what you are doing.
TLDR; THIS IS NOT PRODUCTION-READY SOFTWARE!
Enter 'ok' to proceed:
>ok
A master seed has been generated into /home/martin/.signer/secrets.dat
This is required to be able to store credentials, such as :
* Passwords for keystores (used by rule engine)
* Storage for javascript rules
* Hash of rule-file
You should treat that file with utmost secrecy, and make a backup of it.
NOTE: This file does not contain your accounts. Those need to be backed up separately!
```
(for readability purposes, we'll remove the WARNING printout in the rest of this document)
## Creating rules
Now, you can create a rule-file. Note that it is not mandatory to use predefined rules, but it's really handy.
```javascript
function ApproveListing(){
return "Approve"
}
```
Get the `sha256` hash. If you have openssl, you can do `openssl sha256 rules.js`...
```text
#sha256sum rules.js
6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72 rules.js
```
...now `attest` the file...
```text
#./signer attest 6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72
INFO [02-21|12:14:38] Ruleset attestation updated sha256=6c21d1737429d6d4f2e55146da0797782f3c0a0355227f19d702df377c165d72
```
...and (this is required only for non-production versions) load a mock-up `4byte.json` by copying the file from the source to your current working directory:
```text
#cp $GOPATH/src/github.com/ethereum/go-ethereum/cmd/clef/4byte.json $PWD
```
At this point, we can start the signer with the rule-file:
```text
#./signer --rules rules.js --rpc
INFO [09-25|20:28:11.866] Using CLI as UI-channel
INFO [09-25|20:28:11.876] Loaded 4byte db signatures=5509 file=./4byte.json
INFO [09-25|20:28:11.877] Rule engine configured file=./rules.js
DEBUG[09-25|20:28:11.877] FS scan times list=100.781µs set=13.253µs diff=5.761µs
DEBUG[09-25|20:28:11.884] Ledger support enabled
DEBUG[09-25|20:28:11.888] Trezor support enabled
INFO [09-25|20:28:11.888] Audit logs configured file=audit.log
DEBUG[09-25|20:28:11.888] HTTP registered namespace=account
INFO [09-25|20:28:11.890] HTTP endpoint opened url=http://localhost:8550
DEBUG[09-25|20:28:11.890] IPC registered namespace=account
INFO [09-25|20:28:11.890] IPC endpoint opened url=<nil>
------- Signer info -------
* extapi_version : 2.0.0
* intapi_version : 2.0.0
* extapi_http : http://localhost:8550
* extapi_ipc : <nil>
```
Any list-requests will now be auto-approved by our rule-file.
## Under the hood
While doing the operations above, these files have been created:
```text
#ls -laR ~/.signer/
/home/martin/.signer/:
total 16
drwx------ 3 martin martin 4096 feb 21 12:14 .
drwxr-xr-x 71 martin martin 4096 feb 21 12:12 ..
drwx------ 2 martin martin 4096 feb 21 12:14 43f73718397aa54d1b22
-rwx------ 1 martin martin 256 feb 21 12:12 secrets.dat
/home/martin/.signer/43f73718397aa54d1b22:
total 12
drwx------ 2 martin martin 4096 feb 21 12:14 .
drwx------ 3 martin martin 4096 feb 21 12:14 ..
-rw------- 1 martin martin 159 feb 21 12:14 config.json
#cat /home/martin/.signer/43f73718397aa54d1b22/config.json
{"ruleset_sha256":{"iv":"6v4W4tfJxj3zZFbl","c":"6dt5RTDiTq93yh1qDEjpsat/tsKG7cb+vr3sza26IPL2fvsQ6ZoqFx++CPUa8yy6fD9Bbq41L01ehkKHTG3pOAeqTW6zc/+t0wv3AB6xPmU="}}
```
In `~/.signer`, the `secrets.dat` file was created, containing the `master_seed`.
The `master_seed` was then used to derive a few other things:
- `vault_location` : in this case `43f73718397aa54d1b22` .
- Thus, if you use a different `master_seed`, another `vault_location` will be used that does not conflict with each other.
- Example: `signer --signersecret /path/to/afile ...`
- `config.json` which is the encrypted key/value storage for configuration data, containing the key `ruleset_sha256`.
## Adding credentials
In order to make more useful rules like signing transactions, the signer needs access to the passwords needed to unlock keystores.
```text
#./signer addpw "0x694267f14675d7e1b9494fd8d72fefe1755710fa" "test_password"
INFO [02-21|13:43:21] Credential store updated key=0x694267f14675d7e1b9494fd8d72fefe1755710fa
```
## More advanced rules
Now let's update the rules to make use of credentials:
```javascript
function ApproveListing(){
return "Approve"
}
function ApproveSignData(r){
if( r.address.toLowerCase() == "0x694267f14675d7e1b9494fd8d72fefe1755710fa")
{
if(r.message.indexOf("bazonk") >= 0){
return "Approve"
}
return "Reject"
}
// Otherwise goes to manual processing
}
```
In this example:
* Any requests to sign data with the account `0x694...` will be
* auto-approved if the message contains with `bazonk`
* auto-rejected if it does not.
* Any other signing-requests will be passed along for manual approve/reject.
_Note: make sure that `0x694...` is an account you have access to. You can create it either via the clef or the traditional account cli tool. If the latter was chosen, make sure both clef and geth use the same keystore by specifing `--keystore path/to/your/keystore` when running clef._
Attest the new file...
```text
#sha256sum rules.js
2a0cb661dacfc804b6e95d935d813fd17c0997a7170e4092ffbc34ca976acd9f rules.js
#./signer attest 2a0cb661dacfc804b6e95d935d813fd17c0997a7170e4092ffbc34ca976acd9f
INFO [02-21|14:36:30] Ruleset attestation updated sha256=2a0cb661dacfc804b6e95d935d813fd17c0997a7170e4092ffbc34ca976acd9f
```
And start the signer:
```
#./signer --rules rules.js --rpc
INFO [09-25|21:02:16.450] Using CLI as UI-channel
INFO [09-25|21:02:16.466] Loaded 4byte db signatures=5509 file=./4byte.json
INFO [09-25|21:02:16.467] Rule engine configured file=./rules.js
DEBUG[09-25|21:02:16.468] FS scan times list=1.45262ms set=21.926µs diff=6.944µs
DEBUG[09-25|21:02:16.473] Ledger support enabled
DEBUG[09-25|21:02:16.475] Trezor support enabled
INFO [09-25|21:02:16.476] Audit logs configured file=audit.log
DEBUG[09-25|21:02:16.476] HTTP registered namespace=account
INFO [09-25|21:02:16.478] HTTP endpoint opened url=http://localhost:8550
DEBUG[09-25|21:02:16.478] IPC registered namespace=account
INFO [09-25|21:02:16.478] IPC endpoint opened url=<nil>
------- Signer info -------
* extapi_version : 2.0.0
* intapi_version : 2.0.0
* extapi_http : http://localhost:8550
* extapi_ipc : <nil>
```
And then test signing, once with `bazonk` and once without:
```
#curl -H "Content-Type: application/json" -X POST --data "{\"jsonrpc\":\"2.0\",\"method\":\"account_sign\",\"params\":[\"0x694267f14675d7e1b9494fd8d72fefe1755710fa\",\"0x$(xxd -pu <<< ' bazonk baz gaz')\"],\"id\":67}" http://localhost:8550/
{"jsonrpc":"2.0","id":67,"result":"0x93e6161840c3ae1efc26dc68dedab6e8fc233bb3fefa1b4645dbf6609b93dace160572ea4ab33240256bb6d3dadb60dcd9c515d6374d3cf614ee897408d41d541c"}
#curl -H "Content-Type: application/json" -X POST --data "{\"jsonrpc\":\"2.0\",\"method\":\"account_sign\",\"params\":[\"0x694267f14675d7e1b9494fd8d72fefe1755710fa\",\"0x$(xxd -pu <<< ' bonk baz gaz')\"],\"id\":67}" http://localhost:8550/
{"jsonrpc":"2.0","id":67,"error":{"code":-32000,"message":"Request denied"}}
```
Meanwhile, in the signer output:
```text
INFO [02-21|14:42:41] Op approved
INFO [02-21|14:42:56] Op rejected
```
The signer also stores all traffic over the external API in a log file. The last 4 lines shows the two requests and their responses:
```text
#tail -n 4 audit.log
t=2018-02-21T14:42:41+0100 lvl=info msg=Sign api=signer type=request metadata="{\"remote\":\"127.0.0.1:49706\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=202062617a6f6e6b2062617a2067617a0a
t=2018-02-21T14:42:42+0100 lvl=info msg=Sign api=signer type=response data=93e6161840c3ae1efc26dc68dedab6e8fc233bb3fefa1b4645dbf6609b93dace160572ea4ab33240256bb6d3dadb60dcd9c515d6374d3cf614ee897408d41d541c error=nil
t=2018-02-21T14:42:56+0100 lvl=info msg=Sign api=signer type=request metadata="{\"remote\":\"127.0.0.1:49708\",\"local\":\"localhost:8550\",\"scheme\":\"HTTP/1.1\"}" addr="0x694267f14675d7e1b9494fd8d72fefe1755710fa [chksum INVALID]" data=2020626f6e6b2062617a2067617a0a
t=2018-02-21T14:42:56+0100 lvl=info msg=Sign api=signer type=response data= error="Request denied"
```

View File

@ -0,0 +1,53 @@
ethkey
======
ethkey is a simple command-line tool for working with Ethereum keyfiles.
# Usage
### `ethkey generate`
Generate a new keyfile.
If you want to use an existing private key to use in the keyfile, it can be
specified by setting `--privatekey` with the location of the file containing the
private key.
### `ethkey inspect <keyfile>`
Print various information about the keyfile.
Private key information can be printed by using the `--private` flag;
make sure to use this feature with great caution!
### `ethkey signmessage <keyfile> <message/file>`
Sign the message with a keyfile.
It is possible to refer to a file containing the message.
To sign a message contained in a file, use the `--msgfile` flag.
### `ethkey verifymessage <address> <signature> <message/file>`
Verify the signature of the message.
It is possible to refer to a file containing the message.
To sign a message contained in a file, use the --msgfile flag.
### `ethkey changepassphrase <keyfile>`
Change the passphrase of a keyfile.
use the `--newpasswordfile` to point to the new password file.
## Passphrases
For every command that uses a keyfile, you will be prompted to provide the
passphrase for decrypting the keyfile. To avoid this message, it is possible
to pass the passphrase by using the `--passwordfile` flag pointing to a file that
contains the passphrase.
## JSON
In case you need to output the result in a JSON format, you shall by using the `--json` flag.

View File

@ -0,0 +1,72 @@
package main
import (
"fmt"
"io/ioutil"
"strings"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils"
"gopkg.in/urfave/cli.v1"
)
var newPassphraseFlag = cli.StringFlag{
Name: "newpasswordfile",
Usage: "the file that contains the new passphrase for the keyfile",
}
var commandChangePassphrase = cli.Command{
Name: "changepassphrase",
Usage: "change the passphrase on a keyfile",
ArgsUsage: "<keyfile>",
Description: `
Change the passphrase of a keyfile.`,
Flags: []cli.Flag{
passphraseFlag,
newPassphraseFlag,
},
Action: func(ctx *cli.Context) error {
keyfilepath := ctx.Args().First()
// Read key from file.
keyjson, err := ioutil.ReadFile(keyfilepath)
if err != nil {
utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err)
}
// Decrypt key with passphrase.
passphrase := getPassphrase(ctx)
key, err := keystore.DecryptKey(keyjson, passphrase)
if err != nil {
utils.Fatalf("Error decrypting key: %v", err)
}
// Get a new passphrase.
fmt.Println("Please provide a new passphrase")
var newPhrase string
if passFile := ctx.String(newPassphraseFlag.Name); passFile != "" {
content, err := ioutil.ReadFile(passFile)
if err != nil {
utils.Fatalf("Failed to read new passphrase file '%s': %v", passFile, err)
}
newPhrase = strings.TrimRight(string(content), "\r\n")
} else {
newPhrase = promptPassphrase(true)
}
// Encrypt the key with the new passphrase.
newJson, err := keystore.EncryptKey(key, newPhrase, keystore.StandardScryptN, keystore.StandardScryptP)
if err != nil {
utils.Fatalf("Error encrypting with new passphrase: %v", err)
}
// Then write the new keyfile in place of the old one.
if err := ioutil.WriteFile(keyfilepath, newJson, 600); err != nil {
utils.Fatalf("Error writing new keyfile to disk: %v", err)
}
// Don't print anything. Just return successfully,
// producing a positive exit code.
return nil
},
}

Some files were not shown because too many files have changed in this diff Show More