forked from cerc-io/plugeth
rlp, trie, contracts, compression, consensus: improve comments (#14580)
This commit is contained in:
parent
e3dfd55820
commit
061889d4ea
@ -33,102 +33,18 @@ func (s *CompressionRleSuite) TestDecompressSimple(c *checker.C) {
|
|||||||
res, err := Decompress([]byte{token, 0xfd})
|
res, err := Decompress([]byte{token, 0xfd})
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(res, checker.DeepEquals, exp)
|
c.Assert(res, checker.DeepEquals, exp)
|
||||||
// if bytes.Compare(res, exp) != 0 {
|
|
||||||
// t.Error("empty sha3", res)
|
|
||||||
// }
|
|
||||||
|
|
||||||
exp = []byte{0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x1, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21}
|
exp = []byte{0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x1, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21}
|
||||||
res, err = Decompress([]byte{token, 0xfe})
|
res, err = Decompress([]byte{token, 0xfe})
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(res, checker.DeepEquals, exp)
|
c.Assert(res, checker.DeepEquals, exp)
|
||||||
// if bytes.Compare(res, exp) != 0 {
|
|
||||||
// t.Error("0x80 sha3", res)
|
|
||||||
// }
|
|
||||||
|
|
||||||
res, err = Decompress([]byte{token, 0xff})
|
res, err = Decompress([]byte{token, 0xff})
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(res, checker.DeepEquals, []byte{token})
|
c.Assert(res, checker.DeepEquals, []byte{token})
|
||||||
// if bytes.Compare(res, []byte{token}) != 0 {
|
|
||||||
// t.Error("token", res)
|
|
||||||
// }
|
|
||||||
|
|
||||||
res, err = Decompress([]byte{token, 12})
|
res, err = Decompress([]byte{token, 12})
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(res, checker.DeepEquals, make([]byte, 10))
|
c.Assert(res, checker.DeepEquals, make([]byte, 10))
|
||||||
// if bytes.Compare(res, make([]byte, 10)) != 0 {
|
|
||||||
// t.Error("10 * zero", res)
|
|
||||||
// }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// func TestDecompressMulti(t *testing.T) {
|
|
||||||
// res, err := Decompress([]byte{token, 0xfd, token, 0xfe, token, 12})
|
|
||||||
// if err != nil {
|
|
||||||
// t.Error(err)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// var exp []byte
|
|
||||||
// exp = append(exp, crypto.Keccak256([]byte(""))...)
|
|
||||||
// exp = append(exp, crypto.Keccak256([]byte{0x80})...)
|
|
||||||
// exp = append(exp, make([]byte, 10)...)
|
|
||||||
|
|
||||||
// if bytes.Compare(res, res) != 0 {
|
|
||||||
// t.Error("Expected", exp, "result", res)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// func TestCompressSimple(t *testing.T) {
|
|
||||||
// res := Compress([]byte{0, 0, 0, 0, 0})
|
|
||||||
// if bytes.Compare(res, []byte{token, 7}) != 0 {
|
|
||||||
// t.Error("5 * zero", res)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// res = Compress(crypto.Keccak256([]byte("")))
|
|
||||||
// if bytes.Compare(res, []byte{token, emptyShaToken}) != 0 {
|
|
||||||
// t.Error("empty sha", res)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// res = Compress(crypto.Keccak256([]byte{0x80}))
|
|
||||||
// if bytes.Compare(res, []byte{token, emptyListShaToken}) != 0 {
|
|
||||||
// t.Error("empty list sha", res)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// res = Compress([]byte{token})
|
|
||||||
// if bytes.Compare(res, []byte{token, tokenToken}) != 0 {
|
|
||||||
// t.Error("token", res)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// func TestCompressMulti(t *testing.T) {
|
|
||||||
// in := []byte{0, 0, 0, 0, 0}
|
|
||||||
// in = append(in, crypto.Keccak256([]byte(""))...)
|
|
||||||
// in = append(in, crypto.Keccak256([]byte{0x80})...)
|
|
||||||
// in = append(in, token)
|
|
||||||
// res := Compress(in)
|
|
||||||
|
|
||||||
// exp := []byte{token, 7, token, emptyShaToken, token, emptyListShaToken, token, tokenToken}
|
|
||||||
// if bytes.Compare(res, exp) != 0 {
|
|
||||||
// t.Error("expected", exp, "got", res)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// func TestCompressDecompress(t *testing.T) {
|
|
||||||
// var in []byte
|
|
||||||
|
|
||||||
// for i := 0; i < 20; i++ {
|
|
||||||
// in = append(in, []byte{0, 0, 0, 0, 0}...)
|
|
||||||
// in = append(in, crypto.Keccak256([]byte(""))...)
|
|
||||||
// in = append(in, crypto.Keccak256([]byte{0x80})...)
|
|
||||||
// in = append(in, []byte{123, 2, 19, 89, 245, 254, 255, token, 98, 233}...)
|
|
||||||
// in = append(in, token)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// c := Compress(in)
|
|
||||||
// d, err := Decompress(c)
|
|
||||||
// if err != nil {
|
|
||||||
// t.Error(err)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if bytes.Compare(d, in) != 0 {
|
|
||||||
// t.Error("multi failed\n", d, "\n", in)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
@ -76,7 +76,7 @@ var (
|
|||||||
errUnknownBlock = errors.New("unknown block")
|
errUnknownBlock = errors.New("unknown block")
|
||||||
|
|
||||||
// errInvalidCheckpointBeneficiary is returned if a checkpoint/epoch transition
|
// errInvalidCheckpointBeneficiary is returned if a checkpoint/epoch transition
|
||||||
// block has a beneficiary set to non zeroes.
|
// block has a beneficiary set to non-zeroes.
|
||||||
errInvalidCheckpointBeneficiary = errors.New("beneficiary in checkpoint block non-zero")
|
errInvalidCheckpointBeneficiary = errors.New("beneficiary in checkpoint block non-zero")
|
||||||
|
|
||||||
// errInvalidVote is returned if a nonce value is something else that the two
|
// errInvalidVote is returned if a nonce value is something else that the two
|
||||||
@ -84,7 +84,7 @@ var (
|
|||||||
errInvalidVote = errors.New("vote nonce not 0x00..0 or 0xff..f")
|
errInvalidVote = errors.New("vote nonce not 0x00..0 or 0xff..f")
|
||||||
|
|
||||||
// errInvalidCheckpointVote is returned if a checkpoint/epoch transition block
|
// errInvalidCheckpointVote is returned if a checkpoint/epoch transition block
|
||||||
// has a vote nonce set to non zeroes.
|
// has a vote nonce set to non-zeroes.
|
||||||
errInvalidCheckpointVote = errors.New("vote nonce in checkpoint block non-zero")
|
errInvalidCheckpointVote = errors.New("vote nonce in checkpoint block non-zero")
|
||||||
|
|
||||||
// errMissingVanity is returned if a block's extra-data section is shorter than
|
// errMissingVanity is returned if a block's extra-data section is shorter than
|
||||||
@ -104,7 +104,7 @@ var (
|
|||||||
// ones).
|
// ones).
|
||||||
drrInvalidCheckpointSigners = errors.New("invalid signer list on checkpoint block")
|
drrInvalidCheckpointSigners = errors.New("invalid signer list on checkpoint block")
|
||||||
|
|
||||||
// errInvalidMixDigest is returned if a block's mix digest is non zero.
|
// errInvalidMixDigest is returned if a block's mix digest is non-zero.
|
||||||
errInvalidMixDigest = errors.New("non-zero mix digest")
|
errInvalidMixDigest = errors.New("non-zero mix digest")
|
||||||
|
|
||||||
// errInvalidUncleHash is returned if a block contains an non-empty uncle list.
|
// errInvalidUncleHash is returned if a block contains an non-empty uncle list.
|
||||||
@ -122,7 +122,7 @@ var (
|
|||||||
// be modified via out-of-range or non-contiguous headers.
|
// be modified via out-of-range or non-contiguous headers.
|
||||||
errInvalidVotingChain = errors.New("invalid voting chain")
|
errInvalidVotingChain = errors.New("invalid voting chain")
|
||||||
|
|
||||||
// errUnauthorized is returned if a header is signed by a non authorized entity.
|
// errUnauthorized is returned if a header is signed by a non-authorized entity.
|
||||||
errUnauthorized = errors.New("unauthorized")
|
errUnauthorized = errors.New("unauthorized")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -499,7 +499,7 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
|
|||||||
// Prepare implements consensus.Engine, preparing all the consensus fields of the
|
// Prepare implements consensus.Engine, preparing all the consensus fields of the
|
||||||
// header for running the transactions on top.
|
// header for running the transactions on top.
|
||||||
func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) error {
|
func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) error {
|
||||||
// If the block isn't a checkpoint, cast a random vote (good enough fror now)
|
// If the block isn't a checkpoint, cast a random vote (good enough for now)
|
||||||
header.Coinbase = common.Address{}
|
header.Coinbase = common.Address{}
|
||||||
header.Nonce = types.BlockNonce{}
|
header.Nonce = types.BlockNonce{}
|
||||||
|
|
||||||
@ -601,7 +601,7 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, stop <-ch
|
|||||||
if _, authorized := snap.Signers[signer]; !authorized {
|
if _, authorized := snap.Signers[signer]; !authorized {
|
||||||
return nil, errUnauthorized
|
return nil, errUnauthorized
|
||||||
}
|
}
|
||||||
// If we're amongs the recent signers, wait for the next block
|
// If we're amongst the recent signers, wait for the next block
|
||||||
for seen, recent := range snap.Recents {
|
for seen, recent := range snap.Recents {
|
||||||
if recent == signer {
|
if recent == signer {
|
||||||
// Signer is among recents, only wait if the current block doens't shift it out
|
// Signer is among recents, only wait if the current block doens't shift it out
|
||||||
|
@ -39,7 +39,7 @@ type Vote struct {
|
|||||||
// Tally is a simple vote tally to keep the current score of votes. Votes that
|
// Tally is a simple vote tally to keep the current score of votes. Votes that
|
||||||
// go against the proposal aren't counted since it's equivalent to not voting.
|
// go against the proposal aren't counted since it's equivalent to not voting.
|
||||||
type Tally struct {
|
type Tally struct {
|
||||||
Authorize bool `json:"authorize"` // Whether the vote it about authorizing or kicking someone
|
Authorize bool `json:"authorize"` // Whether the vote is about authorizing or kicking someone
|
||||||
Votes int `json:"votes"` // Number of votes until now wanting to pass the proposal
|
Votes int `json:"votes"` // Number of votes until now wanting to pass the proposal
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ type Snapshot struct {
|
|||||||
Tally map[common.Address]Tally `json:"tally"` // Current vote tally to avoid recalculating
|
Tally map[common.Address]Tally `json:"tally"` // Current vote tally to avoid recalculating
|
||||||
}
|
}
|
||||||
|
|
||||||
// newSnapshot create a new snapshot with the specified startup parameters. This
|
// newSnapshot creates a new snapshot with the specified startup parameters. This
|
||||||
// method does not initialize the set of recent signers, so only ever use if for
|
// method does not initialize the set of recent signers, so only ever use if for
|
||||||
// the genesis block.
|
// the genesis block.
|
||||||
func newSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, number uint64, hash common.Hash, signers []common.Address) *Snapshot {
|
func newSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, number uint64, hash common.Hash, signers []common.Address) *Snapshot {
|
||||||
|
@ -243,7 +243,7 @@ func TestVoting(t *testing.T) {
|
|||||||
},
|
},
|
||||||
results: []string{"A", "B"},
|
results: []string{"A", "B"},
|
||||||
}, {
|
}, {
|
||||||
// Cascading changes are not allowed, only the the account being voted on may change
|
// Cascading changes are not allowed, only the account being voted on may change
|
||||||
signers: []string{"A", "B", "C", "D"},
|
signers: []string{"A", "B", "C", "D"},
|
||||||
votes: []testerVote{
|
votes: []testerVote{
|
||||||
{signer: "A", voted: "C", auth: false},
|
{signer: "A", voted: "C", auth: false},
|
||||||
@ -293,7 +293,7 @@ func TestVoting(t *testing.T) {
|
|||||||
results: []string{"A", "B", "C"},
|
results: []string{"A", "B", "C"},
|
||||||
}, {
|
}, {
|
||||||
// Ensure that pending votes don't survive authorization status changes. This
|
// Ensure that pending votes don't survive authorization status changes. This
|
||||||
// corner case can only appear if a signer is quickly added, remove and then
|
// corner case can only appear if a signer is quickly added, removed and then
|
||||||
// readded (or the inverse), while one of the original voters dropped. If a
|
// readded (or the inverse), while one of the original voters dropped. If a
|
||||||
// past vote is left cached in the system somewhere, this will interfere with
|
// past vote is left cached in the system somewhere, this will interfere with
|
||||||
// the final signer outcome.
|
// the final signer outcome.
|
||||||
|
@ -79,8 +79,7 @@ type Engine interface {
|
|||||||
|
|
||||||
// Finalize runs any post-transaction state modifications (e.g. block rewards)
|
// Finalize runs any post-transaction state modifications (e.g. block rewards)
|
||||||
// and assembles the final block.
|
// and assembles the final block.
|
||||||
//
|
// Note: The block header and state database might be updated to reflect any
|
||||||
// Note, the block header and state database might be updated to reflect any
|
|
||||||
// consensus rules that happen at finalization (e.g. block rewards).
|
// consensus rules that happen at finalization (e.g. block rewards).
|
||||||
Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||||
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error)
|
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error)
|
||||||
|
@ -53,7 +53,6 @@ type hasher func(dest []byte, data []byte)
|
|||||||
|
|
||||||
// makeHasher creates a repetitive hasher, allowing the same hash data structures
|
// makeHasher creates a repetitive hasher, allowing the same hash data structures
|
||||||
// to be reused between hash runs instead of requiring new ones to be created.
|
// to be reused between hash runs instead of requiring new ones to be created.
|
||||||
//
|
|
||||||
// The returned function is not thread safe!
|
// The returned function is not thread safe!
|
||||||
func makeHasher(h hash.Hash) hasher {
|
func makeHasher(h hash.Hash) hasher {
|
||||||
return func(dest []byte, data []byte) {
|
return func(dest []byte, data []byte) {
|
||||||
@ -82,7 +81,6 @@ func seedHash(block uint64) []byte {
|
|||||||
// memory, then performing two passes of Sergio Demian Lerner's RandMemoHash
|
// memory, then performing two passes of Sergio Demian Lerner's RandMemoHash
|
||||||
// algorithm from Strict Memory Hard Hashing Functions (2014). The output is a
|
// algorithm from Strict Memory Hard Hashing Functions (2014). The output is a
|
||||||
// set of 524288 64-byte values.
|
// set of 524288 64-byte values.
|
||||||
//
|
|
||||||
// This method places the result into dest in machine byte order.
|
// This method places the result into dest in machine byte order.
|
||||||
func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
||||||
// Print some debug logs to allow analysis on low end devices
|
// Print some debug logs to allow analysis on low end devices
|
||||||
@ -220,7 +218,6 @@ func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte
|
|||||||
}
|
}
|
||||||
|
|
||||||
// generateDataset generates the entire ethash dataset for mining.
|
// generateDataset generates the entire ethash dataset for mining.
|
||||||
//
|
|
||||||
// This method places the result into dest in machine byte order.
|
// This method places the result into dest in machine byte order.
|
||||||
func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
||||||
// Print some debug logs to allow analysis on low end devices
|
// Print some debug logs to allow analysis on low end devices
|
||||||
|
@ -20,7 +20,7 @@ package ethash
|
|||||||
|
|
||||||
import "testing"
|
import "testing"
|
||||||
|
|
||||||
// Tests whether the dataset size calculator work correctly by cross checking the
|
// Tests whether the dataset size calculator works correctly by cross checking the
|
||||||
// hard coded lookup table with the value generated by it.
|
// hard coded lookup table with the value generated by it.
|
||||||
func TestSizeCalculations(t *testing.T) {
|
func TestSizeCalculations(t *testing.T) {
|
||||||
var tests []uint64
|
var tests []uint64
|
||||||
|
@ -218,7 +218,6 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
|||||||
|
|
||||||
// verifyHeader checks whether a header conforms to the consensus rules of the
|
// verifyHeader checks whether a header conforms to the consensus rules of the
|
||||||
// stock Ethereum ethash engine.
|
// stock Ethereum ethash engine.
|
||||||
//
|
|
||||||
// See YP section 4.3.4. "Block Header Validity"
|
// See YP section 4.3.4. "Block Header Validity"
|
||||||
func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
|
func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
|
||||||
// Ensure that the header's extra-data section is of a reasonable size
|
// Ensure that the header's extra-data section is of a reasonable size
|
||||||
@ -286,7 +285,6 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
|||||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||||
// the difficulty that a new block should have when created at time
|
// the difficulty that a new block should have when created at time
|
||||||
// given the parent block's time and difficulty.
|
// given the parent block's time and difficulty.
|
||||||
//
|
|
||||||
// TODO (karalabe): Move the chain maker into this package and make this private!
|
// TODO (karalabe): Move the chain maker into this package and make this private!
|
||||||
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
||||||
next := new(big.Int).Add(parent.Number, common.Big1)
|
next := new(big.Int).Add(parent.Number, common.Big1)
|
||||||
@ -462,7 +460,6 @@ var (
|
|||||||
// AccumulateRewards credits the coinbase of the given block with the mining
|
// AccumulateRewards credits the coinbase of the given block with the mining
|
||||||
// reward. The total reward consists of the static block reward and rewards for
|
// reward. The total reward consists of the static block reward and rewards for
|
||||||
// included uncles. The coinbase of each uncle block is also rewarded.
|
// included uncles. The coinbase of each uncle block is also rewarded.
|
||||||
//
|
|
||||||
// TODO (karalabe): Move the chain maker into this package and make this private!
|
// TODO (karalabe): Move the chain maker into this package and make this private!
|
||||||
func AccumulateRewards(state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
func AccumulateRewards(state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
||||||
reward := new(big.Int).Set(blockReward)
|
reward := new(big.Int).Set(blockReward)
|
||||||
|
@ -355,7 +355,7 @@ type Ethash struct {
|
|||||||
// New creates a full sized ethash PoW scheme.
|
// New creates a full sized ethash PoW scheme.
|
||||||
func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) *Ethash {
|
func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) *Ethash {
|
||||||
if cachesinmem <= 0 {
|
if cachesinmem <= 0 {
|
||||||
log.Warn("One ethash cache must alwast be in memory", "requested", cachesinmem)
|
log.Warn("One ethash cache must always be in memory", "requested", cachesinmem)
|
||||||
cachesinmem = 1
|
cachesinmem = 1
|
||||||
}
|
}
|
||||||
if cachedir != "" && cachesondisk > 0 {
|
if cachedir != "" && cachesondisk > 0 {
|
||||||
@ -412,7 +412,7 @@ func NewFakeDelayer(delay time.Duration) *Ethash {
|
|||||||
return &Ethash{fakeMode: true, fakeDelay: delay}
|
return &Ethash{fakeMode: true, fakeDelay: delay}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFullFaker creates a ethash consensus engine with a full fake scheme that
|
// NewFullFaker creates an ethash consensus engine with a full fake scheme that
|
||||||
// accepts all blocks as valid, without checking any consensus rules whatsoever.
|
// accepts all blocks as valid, without checking any consensus rules whatsoever.
|
||||||
func NewFullFaker() *Ethash {
|
func NewFullFaker() *Ethash {
|
||||||
return &Ethash{fakeMode: true, fakeFull: true}
|
return &Ethash{fakeMode: true, fakeFull: true}
|
||||||
|
@ -54,7 +54,7 @@ func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header)
|
|||||||
if header.Number.Cmp(config.DAOForkBlock) < 0 || header.Number.Cmp(limit) >= 0 {
|
if header.Number.Cmp(config.DAOForkBlock) < 0 || header.Number.Cmp(limit) >= 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Depending whether we support or oppose the fork, validate the extra-data contents
|
// Depending on whether we support or oppose the fork, validate the extra-data contents
|
||||||
if config.DAOForkSupport {
|
if config.DAOForkSupport {
|
||||||
if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
|
if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
|
||||||
return ErrBadProDAOExtra
|
return ErrBadProDAOExtra
|
||||||
|
@ -49,7 +49,7 @@ import (
|
|||||||
// TODO(zelig): watch peer solvency and notify of bouncing cheques
|
// TODO(zelig): watch peer solvency and notify of bouncing cheques
|
||||||
// TODO(zelig): enable paying with cheque by signing off
|
// TODO(zelig): enable paying with cheque by signing off
|
||||||
|
|
||||||
// Some functionality require interacting with the blockchain:
|
// Some functionality requires interacting with the blockchain:
|
||||||
// * setting current balance on peer's chequebook
|
// * setting current balance on peer's chequebook
|
||||||
// * sending the transaction to cash the cheque
|
// * sending the transaction to cash the cheque
|
||||||
// * depositing ether to the chequebook
|
// * depositing ether to the chequebook
|
||||||
@ -100,13 +100,13 @@ type Chequebook struct {
|
|||||||
// persisted fields
|
// persisted fields
|
||||||
balance *big.Int // not synced with blockchain
|
balance *big.Int // not synced with blockchain
|
||||||
contractAddr common.Address // contract address
|
contractAddr common.Address // contract address
|
||||||
sent map[common.Address]*big.Int //tallies for beneficiarys
|
sent map[common.Address]*big.Int //tallies for beneficiaries
|
||||||
|
|
||||||
txhash string // tx hash of last deposit tx
|
txhash string // tx hash of last deposit tx
|
||||||
threshold *big.Int // threshold that triggers autodeposit if not nil
|
threshold *big.Int // threshold that triggers autodeposit if not nil
|
||||||
buffer *big.Int // buffer to keep on top of balance for fork protection
|
buffer *big.Int // buffer to keep on top of balance for fork protection
|
||||||
|
|
||||||
log log.Logger // contextual logger with the contrac address embedded
|
log log.Logger // contextual logger with the contract address embedded
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *Chequebook) String() string {
|
func (self *Chequebook) String() string {
|
||||||
@ -442,7 +442,7 @@ type Inbox struct {
|
|||||||
maxUncashed *big.Int // threshold that triggers autocashing
|
maxUncashed *big.Int // threshold that triggers autocashing
|
||||||
cashed *big.Int // cumulative amount cashed
|
cashed *big.Int // cumulative amount cashed
|
||||||
cheque *Cheque // last cheque, nil if none yet received
|
cheque *Cheque // last cheque, nil if none yet received
|
||||||
log log.Logger // contextual logger with the contrac address embedded
|
log log.Logger // contextual logger with the contract address embedded
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInbox creates an Inbox. An Inboxes is not persisted, the cumulative sum is updated
|
// NewInbox creates an Inbox. An Inboxes is not persisted, the cumulative sum is updated
|
||||||
@ -509,9 +509,8 @@ func (self *Inbox) AutoCash(cashInterval time.Duration, maxUncashed *big.Int) {
|
|||||||
self.autoCash(cashInterval)
|
self.autoCash(cashInterval)
|
||||||
}
|
}
|
||||||
|
|
||||||
// autoCash starts a loop that periodically clears the last check
|
// autoCash starts a loop that periodically clears the last cheque
|
||||||
// if the peer is trusted. Clearing period could be 24h or a week.
|
// if the peer is trusted. Clearing period could be 24h or a week.
|
||||||
//
|
|
||||||
// The caller must hold self.lock.
|
// The caller must hold self.lock.
|
||||||
func (self *Inbox) autoCash(cashInterval time.Duration) {
|
func (self *Inbox) autoCash(cashInterval time.Duration) {
|
||||||
if self.quit != nil {
|
if self.quit != nil {
|
||||||
@ -557,7 +556,7 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
|
|||||||
|
|
||||||
var sum *big.Int
|
var sum *big.Int
|
||||||
if self.cheque == nil {
|
if self.cheque == nil {
|
||||||
// the sum is checked against the blockchain once a check is received
|
// the sum is checked against the blockchain once a cheque is received
|
||||||
tally, err := self.session.Sent(self.beneficiary)
|
tally, err := self.session.Sent(self.beneficiary)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("inbox: error calling backend to set amount: %v", err)
|
return nil, fmt.Errorf("inbox: error calling backend to set amount: %v", err)
|
||||||
|
@ -414,21 +414,10 @@ func TestCash(t *testing.T) {
|
|||||||
t.Fatalf("expected no error, got %v", err)
|
t.Fatalf("expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
backend.Commit()
|
backend.Commit()
|
||||||
// expBalance := big.NewInt(2)
|
|
||||||
// gotBalance := backend.BalanceAt(addr1)
|
|
||||||
// if gotBalance.Cmp(expBalance) != 0 {
|
|
||||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
|
||||||
// }
|
|
||||||
// after 3x interval time and 2 cheques received, exactly one cashing tx is sent
|
// after 3x interval time and 2 cheques received, exactly one cashing tx is sent
|
||||||
time.Sleep(4 * interval)
|
time.Sleep(4 * interval)
|
||||||
backend.Commit()
|
backend.Commit()
|
||||||
|
|
||||||
// expBalance = big.NewInt(4)
|
|
||||||
// gotBalance = backend.BalanceAt(addr1)
|
|
||||||
// if gotBalance.Cmp(expBalance) != 0 {
|
|
||||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// after stopping autocash no more tx are sent
|
// after stopping autocash no more tx are sent
|
||||||
ch2, err := chbook.Issue(addr1, amount)
|
ch2, err := chbook.Issue(addr1, amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -441,11 +430,6 @@ func TestCash(t *testing.T) {
|
|||||||
}
|
}
|
||||||
time.Sleep(2 * interval)
|
time.Sleep(2 * interval)
|
||||||
backend.Commit()
|
backend.Commit()
|
||||||
// expBalance = big.NewInt(4)
|
|
||||||
// gotBalance = backend.BalanceAt(addr1)
|
|
||||||
// if gotBalance.Cmp(expBalance) != 0 {
|
|
||||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// autocash below 1
|
// autocash below 1
|
||||||
chbook.balance = big.NewInt(2)
|
chbook.balance = big.NewInt(2)
|
||||||
@ -456,11 +440,6 @@ func TestCash(t *testing.T) {
|
|||||||
t.Fatalf("expected no error, got %v", err)
|
t.Fatalf("expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
backend.Commit()
|
backend.Commit()
|
||||||
// expBalance = big.NewInt(4)
|
|
||||||
// gotBalance = backend.BalanceAt(addr1)
|
|
||||||
// if gotBalance.Cmp(expBalance) != 0 {
|
|
||||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
|
||||||
// }
|
|
||||||
|
|
||||||
ch4, err := chbook.Issue(addr1, amount)
|
ch4, err := chbook.Issue(addr1, amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -479,13 +458,6 @@ func TestCash(t *testing.T) {
|
|||||||
}
|
}
|
||||||
backend.Commit()
|
backend.Commit()
|
||||||
|
|
||||||
// 2 checks of amount 1 received, exactly 1 tx is sent
|
|
||||||
// expBalance = big.NewInt(6)
|
|
||||||
// gotBalance = backend.BalanceAt(addr1)
|
|
||||||
// if gotBalance.Cmp(expBalance) != 0 {
|
|
||||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// autochash on receipt when maxUncashed is 0
|
// autochash on receipt when maxUncashed is 0
|
||||||
chbook.balance = new(big.Int).Set(common.Big2)
|
chbook.balance = new(big.Int).Set(common.Big2)
|
||||||
chbox.AutoCash(0, common.Big0)
|
chbox.AutoCash(0, common.Big0)
|
||||||
@ -495,11 +467,6 @@ func TestCash(t *testing.T) {
|
|||||||
t.Fatalf("expected no error, got %v", err)
|
t.Fatalf("expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
backend.Commit()
|
backend.Commit()
|
||||||
// expBalance = big.NewInt(5)
|
|
||||||
// gotBalance = backend.BalanceAt(addr1)
|
|
||||||
// if gotBalance.Cmp(expBalance) != 0 {
|
|
||||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
|
||||||
// }
|
|
||||||
|
|
||||||
ch6, err := chbook.Issue(addr1, amount)
|
ch6, err := chbook.Issue(addr1, amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -511,21 +478,11 @@ func TestCash(t *testing.T) {
|
|||||||
t.Fatalf("expected no error, got %v", err)
|
t.Fatalf("expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
backend.Commit()
|
backend.Commit()
|
||||||
// expBalance = big.NewInt(4)
|
|
||||||
// gotBalance = backend.BalanceAt(addr1)
|
|
||||||
// if gotBalance.Cmp(expBalance) != 0 {
|
|
||||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
|
||||||
// }
|
|
||||||
|
|
||||||
_, err = chbox.Receive(ch6)
|
_, err = chbox.Receive(ch6)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("expected no error, got %v", err)
|
t.Fatalf("expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
backend.Commit()
|
backend.Commit()
|
||||||
// expBalance = big.NewInt(6)
|
|
||||||
// gotBalance = backend.BalanceAt(addr1)
|
|
||||||
// if gotBalance.Cmp(expBalance) != 0 {
|
|
||||||
// t.Fatalf("expected beneficiary balance %v, got %v", expBalance, gotBalance)
|
|
||||||
// }
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -3,12 +3,12 @@
|
|||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
Full documentation for the Ethereum Name Service [can be found as EIP 137](https://github.com/ethereum/EIPs/issues/137).
|
Full documentation for the Ethereum Name Service [can be found as EIP 137](https://github.com/ethereum/EIPs/issues/137).
|
||||||
This package offers a simple binding that streamlines the registration arbitrary utf8 domain names to swarm content hashes.
|
This package offers a simple binding that streamlines the registration of arbitrary UTF8 domain names to swarm content hashes.
|
||||||
|
|
||||||
## Development
|
## Development
|
||||||
|
|
||||||
The SOL file in contract subdirectory implements the ENS root registry, a simple
|
The SOL file in contract subdirectory implements the ENS root registry, a simple
|
||||||
first-in-first-served registrar for the root namespace, and a simple resolver contract;
|
first-in, first-served registrar for the root namespace, and a simple resolver contract;
|
||||||
they're used in tests, and can be used to deploy these contracts for your own purposes.
|
they're used in tests, and can be used to deploy these contracts for your own purposes.
|
||||||
|
|
||||||
The solidity source code can be found at [github.com/arachnid/ens/](https://github.com/arachnid/ens/).
|
The solidity source code can be found at [github.com/arachnid/ens/](https://github.com/arachnid/ens/).
|
||||||
|
@ -52,7 +52,7 @@ func NewENS(transactOpts *bind.TransactOpts, contractAddr common.Address, contra
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeployENS deploys an instance of the ENS nameservice, with a 'first in first served' root registrar.
|
// DeployENS deploys an instance of the ENS nameservice, with a 'first-in, first-served' root registrar.
|
||||||
func DeployENS(transactOpts *bind.TransactOpts, contractBackend bind.ContractBackend) (*ENS, error) {
|
func DeployENS(transactOpts *bind.TransactOpts, contractBackend bind.ContractBackend) (*ENS, error) {
|
||||||
// Deploy the ENS registry
|
// Deploy the ENS registry
|
||||||
ensAddr, _, _, err := contract.DeployENS(transactOpts, contractBackend, transactOpts.From)
|
ensAddr, _, _, err := contract.DeployENS(transactOpts, contractBackend, transactOpts.From)
|
||||||
|
@ -79,7 +79,7 @@ func TestSignerPromotion(t *testing.T) {
|
|||||||
// Gradually promote the keys, until all are authorized
|
// Gradually promote the keys, until all are authorized
|
||||||
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
||||||
for i := 1; i < len(keys); i++ {
|
for i := 1; i < len(keys); i++ {
|
||||||
// Check that no votes are accepted from the not yet authed user
|
// Check that no votes are accepted from the not yet authorized user
|
||||||
if _, err := oracle.Promote(bind.NewKeyedTransactor(keys[i]), common.Address{}); err != nil {
|
if _, err := oracle.Promote(bind.NewKeyedTransactor(keys[i]), common.Address{}); err != nil {
|
||||||
t.Fatalf("Iter #%d: failed invalid promotion attempt: %v", i, err)
|
t.Fatalf("Iter #%d: failed invalid promotion attempt: %v", i, err)
|
||||||
}
|
}
|
||||||
@ -216,7 +216,7 @@ func TestVersionRelease(t *testing.T) {
|
|||||||
// Gradually push releases, always requiring more signers than previously
|
// Gradually push releases, always requiring more signers than previously
|
||||||
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
keys = append([]*ecdsa.PrivateKey{key}, keys...)
|
||||||
for i := 1; i < len(keys); i++ {
|
for i := 1; i < len(keys); i++ {
|
||||||
// Check that no votes are accepted from the not yet authed user
|
// Check that no votes are accepted from the not yet authorized user
|
||||||
if _, err := oracle.Release(bind.NewKeyedTransactor(keys[i]), 0, 0, 0, [20]byte{0}); err != nil {
|
if _, err := oracle.Release(bind.NewKeyedTransactor(keys[i]), 0, 0, 0, [20]byte{0}); err != nil {
|
||||||
t.Fatalf("Iter #%d: failed invalid release attempt: %v", i, err)
|
t.Fatalf("Iter #%d: failed invalid release attempt: %v", i, err)
|
||||||
}
|
}
|
||||||
|
@ -55,7 +55,7 @@ type Decoder interface {
|
|||||||
// To decode into a pointer, Decode will decode into the value pointed
|
// To decode into a pointer, Decode will decode into the value pointed
|
||||||
// to. If the pointer is nil, a new value of the pointer's element
|
// to. If the pointer is nil, a new value of the pointer's element
|
||||||
// type is allocated. If the pointer is non-nil, the existing value
|
// type is allocated. If the pointer is non-nil, the existing value
|
||||||
// will reused.
|
// will be reused.
|
||||||
//
|
//
|
||||||
// To decode into a struct, Decode expects the input to be an RLP
|
// To decode into a struct, Decode expects the input to be an RLP
|
||||||
// list. The decoded elements of the list are assigned to each public
|
// list. The decoded elements of the list are assigned to each public
|
||||||
@ -290,7 +290,7 @@ func makeListDecoder(typ reflect.Type, tag tags) (decoder, error) {
|
|||||||
}
|
}
|
||||||
case tag.tail:
|
case tag.tail:
|
||||||
// A slice with "tail" tag can occur as the last field
|
// A slice with "tail" tag can occur as the last field
|
||||||
// of a struct and is upposed to swallow all remaining
|
// of a struct and is supposed to swallow all remaining
|
||||||
// list elements. The struct decoder already called s.List,
|
// list elements. The struct decoder already called s.List,
|
||||||
// proceed directly to decoding the elements.
|
// proceed directly to decoding the elements.
|
||||||
dec = func(s *Stream, val reflect.Value) error {
|
dec = func(s *Stream, val reflect.Value) error {
|
||||||
@ -741,7 +741,7 @@ func (s *Stream) uint(maxbits int) (uint64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Bool reads an RLP string of up to 1 byte and returns its contents
|
// Bool reads an RLP string of up to 1 byte and returns its contents
|
||||||
// as an boolean. If the input does not contain an RLP string, the
|
// as a boolean. If the input does not contain an RLP string, the
|
||||||
// returned error will be ErrExpectedString.
|
// returned error will be ErrExpectedString.
|
||||||
func (s *Stream) Bool() (bool, error) {
|
func (s *Stream) Bool() (bool, error) {
|
||||||
num, err := s.uint(8)
|
num, err := s.uint(8)
|
||||||
|
@ -17,13 +17,13 @@
|
|||||||
/*
|
/*
|
||||||
Package rlp implements the RLP serialization format.
|
Package rlp implements the RLP serialization format.
|
||||||
|
|
||||||
The purpose of RLP (Recursive Linear Prefix) qis to encode arbitrarily
|
The purpose of RLP (Recursive Linear Prefix) is to encode arbitrarily
|
||||||
nested arrays of binary data, and RLP is the main encoding method used
|
nested arrays of binary data, and RLP is the main encoding method used
|
||||||
to serialize objects in Ethereum. The only purpose of RLP is to encode
|
to serialize objects in Ethereum. The only purpose of RLP is to encode
|
||||||
structure; encoding specific atomic data types (eg. strings, ints,
|
structure; encoding specific atomic data types (eg. strings, ints,
|
||||||
floats) is left up to higher-order protocols; in Ethereum integers
|
floats) is left up to higher-order protocols; in Ethereum integers
|
||||||
must be represented in big endian binary form with no leading zeroes
|
must be represented in big endian binary form with no leading zeroes
|
||||||
(thus making the integer value zero be equivalent to the empty byte
|
(thus making the integer value zero equivalent to the empty byte
|
||||||
array).
|
array).
|
||||||
|
|
||||||
RLP values are distinguished by a type tag. The type tag precedes the
|
RLP values are distinguished by a type tag. The type tag precedes the
|
||||||
|
@ -478,7 +478,7 @@ func writeEncoder(val reflect.Value, w *encbuf) error {
|
|||||||
// with a pointer receiver.
|
// with a pointer receiver.
|
||||||
func writeEncoderNoPtr(val reflect.Value, w *encbuf) error {
|
func writeEncoderNoPtr(val reflect.Value, w *encbuf) error {
|
||||||
if !val.CanAddr() {
|
if !val.CanAddr() {
|
||||||
// We can't get the address. It would be possible make the
|
// We can't get the address. It would be possible to make the
|
||||||
// value addressable by creating a shallow copy, but this
|
// value addressable by creating a shallow copy, but this
|
||||||
// creates other problems so we're not doing it (yet).
|
// creates other problems so we're not doing it (yet).
|
||||||
//
|
//
|
||||||
@ -583,7 +583,7 @@ func makePtrWriter(typ reflect.Type) (writer, error) {
|
|||||||
return writer, err
|
return writer, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// putint writes i to the beginning of b in with big endian byte
|
// putint writes i to the beginning of b in big endian byte
|
||||||
// order, using the least number of bytes needed to represent i.
|
// order, using the least number of bytes needed to represent i.
|
||||||
func putint(b []byte, i uint64) (size int) {
|
func putint(b []byte, i uint64) (size int) {
|
||||||
switch {
|
switch {
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// RawValue represents an encoded RLP value and can be used to delay
|
// RawValue represents an encoded RLP value and can be used to delay
|
||||||
// RLP decoding or precompute an encoding. Note that the decoder does
|
// RLP decoding or to precompute an encoding. Note that the decoder does
|
||||||
// not verify whether the content of RawValues is valid RLP.
|
// not verify whether the content of RawValues is valid RLP.
|
||||||
type RawValue []byte
|
type RawValue []byte
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ func returnHasherToPool(h *hasher) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// hash collapses a node down into a hash node, also returning a copy of the
|
// hash collapses a node down into a hash node, also returning a copy of the
|
||||||
// original node initialzied with the computed hash to replace the original one.
|
// original node initialized with the computed hash to replace the original one.
|
||||||
func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, node, error) {
|
func (h *hasher) hash(n node, db DatabaseWriter, force bool) (node, node, error) {
|
||||||
// If we're not storing the node, just hashing, use available cached data
|
// If we're not storing the node, just hashing, use available cached data
|
||||||
if hash, dirty := n.cache(); hash != nil {
|
if hash, dirty := n.cache(); hash != nil {
|
||||||
|
@ -51,7 +51,7 @@ func makeTestSecureTrie() (ethdb.Database, *SecureTrie, map[string][]byte) {
|
|||||||
content[string(key)] = val
|
content[string(key)] = val
|
||||||
trie.Update(key, val)
|
trie.Update(key, val)
|
||||||
|
|
||||||
// Add some other data to inflate th trie
|
// Add some other data to inflate the trie
|
||||||
for j := byte(3); j < 13; j++ {
|
for j := byte(3); j < 13; j++ {
|
||||||
key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
|
key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
|
||||||
content[string(key)] = val
|
content[string(key)] = val
|
||||||
|
@ -42,7 +42,7 @@ func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) {
|
|||||||
content[string(key)] = val
|
content[string(key)] = val
|
||||||
trie.Update(key, val)
|
trie.Update(key, val)
|
||||||
|
|
||||||
// Add some other data to inflate th trie
|
// Add some other data to inflate the trie
|
||||||
for j := byte(3); j < 13; j++ {
|
for j := byte(3); j < 13; j++ {
|
||||||
key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
|
key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
|
||||||
content[string(key)] = val
|
content[string(key)] = val
|
||||||
@ -78,7 +78,7 @@ func checkTrieConsistency(db Database, root common.Hash) error {
|
|||||||
// Create and iterate a trie rooted in a subnode
|
// Create and iterate a trie rooted in a subnode
|
||||||
trie, err := New(root, db)
|
trie, err := New(root, db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil // // Consider a non existent state consistent
|
return nil // Consider a non existent state consistent
|
||||||
}
|
}
|
||||||
it := trie.NodeIterator(nil)
|
it := trie.NodeIterator(nil)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
@ -310,7 +310,7 @@ func TestIncompleteTrieSync(t *testing.T) {
|
|||||||
for _, result := range results {
|
for _, result := range results {
|
||||||
added = append(added, result.Hash)
|
added = append(added, result.Hash)
|
||||||
}
|
}
|
||||||
// Check that all known sub-tries in the synced trie is complete
|
// Check that all known sub-tries in the synced trie are complete
|
||||||
for _, root := range added {
|
for _, root := range added {
|
||||||
if err := checkTrieConsistency(dstDb, root); err != nil {
|
if err := checkTrieConsistency(dstDb, root); err != nil {
|
||||||
t.Fatalf("trie inconsistent: %v", err)
|
t.Fatalf("trie inconsistent: %v", err)
|
||||||
|
@ -40,7 +40,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// CacheMisses retrieves a global counter measuring the number of cache misses
|
// CacheMisses retrieves a global counter measuring the number of cache misses
|
||||||
// the trie did since process startup. This isn't useful for anything apart from
|
// the trie had since process startup. This isn't useful for anything apart from
|
||||||
// trie debugging purposes.
|
// trie debugging purposes.
|
||||||
func CacheMisses() int64 {
|
func CacheMisses() int64 {
|
||||||
return cacheMissCounter.Count()
|
return cacheMissCounter.Count()
|
||||||
@ -87,14 +87,14 @@ type Trie struct {
|
|||||||
originalRoot common.Hash
|
originalRoot common.Hash
|
||||||
|
|
||||||
// Cache generation values.
|
// Cache generation values.
|
||||||
// cachegen increase by one with each commit operation.
|
// cachegen increases by one with each commit operation.
|
||||||
// new nodes are tagged with the current generation and unloaded
|
// new nodes are tagged with the current generation and unloaded
|
||||||
// when their generation is older than than cachegen-cachelimit.
|
// when their generation is older than than cachegen-cachelimit.
|
||||||
cachegen, cachelimit uint16
|
cachegen, cachelimit uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetCacheLimit sets the number of 'cache generations' to keep.
|
// SetCacheLimit sets the number of 'cache generations' to keep.
|
||||||
// A cache generations is created by a call to Commit.
|
// A cache generation is created by a call to Commit.
|
||||||
func (t *Trie) SetCacheLimit(l uint16) {
|
func (t *Trie) SetCacheLimit(l uint16) {
|
||||||
t.cachelimit = l
|
t.cachelimit = l
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user