Merge pull request #23 from openrelayxyz/feature/consensus-engine
Feature/consensus engine
This commit is contained in:
commit
0deaa33408
@ -114,6 +114,12 @@ type StateDB interface {
|
||||
|
||||
AddressInAccessList(addr Address) bool
|
||||
SlotInAccessList(addr Address, slot Hash) (addressOk bool, slotOk bool)
|
||||
|
||||
IntermediateRoot(deleteEmptyObjects bool) Hash
|
||||
}
|
||||
|
||||
type RWStateDB interface {
|
||||
StateDB
|
||||
}
|
||||
|
||||
type ScopeContext interface {
|
||||
|
102
restricted/consensus/consensus.go
Normal file
102
restricted/consensus/consensus.go
Normal file
@ -0,0 +1,102 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"github.com/openrelayxyz/plugeth-utils/restricted/params"
|
||||
"github.com/openrelayxyz/plugeth-utils/restricted/types"
|
||||
"github.com/openrelayxyz/plugeth-utils/core"
|
||||
)
|
||||
|
||||
// ChainHeaderReader defines a small collection of methods needed to access the local
|
||||
// blockchain during header verification.
|
||||
type ChainHeaderReader interface {
|
||||
// Config retrieves the blockchain's chain configuration.
|
||||
Config() *params.ChainConfig
|
||||
|
||||
// CurrentHeader retrieves the current header from the local chain.
|
||||
CurrentHeader() *types.Header
|
||||
|
||||
// GetHeader retrieves a block header from the database by hash and number.
|
||||
GetHeader(hash core.Hash, number uint64) *types.Header
|
||||
|
||||
// GetHeaderByNumber retrieves a block header from the database by number.
|
||||
GetHeaderByNumber(number uint64) *types.Header
|
||||
|
||||
// GetHeaderByHash retrieves a block header from the database by its hash.
|
||||
GetHeaderByHash(hash core.Hash) *types.Header
|
||||
|
||||
// GetTd retrieves the total difficulty from the database by hash and number.
|
||||
GetTd(hash core.Hash, number uint64) *big.Int
|
||||
}
|
||||
|
||||
// ChainReader defines a small collection of methods needed to access the local
|
||||
// blockchain during header and/or uncle verification.
|
||||
type ChainReader interface {
|
||||
ChainHeaderReader
|
||||
|
||||
// GetBlock retrieves a block from the database by hash and number.
|
||||
GetBlock(hash core.Hash, number uint64) *types.Block
|
||||
}
|
||||
|
||||
// Engine is an algorithm agnostic consensus engine.
|
||||
type Engine interface {
|
||||
// Author retrieves the Ethereum address of the account that minted the given
|
||||
// block, which may be different from the header's coinbase if a consensus
|
||||
// engine is based on signatures.
|
||||
Author(header *types.Header) (core.Address, error)
|
||||
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules of a
|
||||
// given engine. Verifying the seal may be done optionally here, or explicitly
|
||||
// via the VerifySeal method.
|
||||
VerifyHeader(chain ChainHeaderReader, header *types.Header, seal bool) error
|
||||
|
||||
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
||||
// concurrently. The method returns a quit channel to abort the operations and
|
||||
// a results channel to retrieve the async verifications (the order is that of
|
||||
// the input slice).
|
||||
VerifyHeaders(chain ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error)
|
||||
|
||||
// VerifyUncles verifies that the given block's uncles conform to the consensus
|
||||
// rules of a given engine.
|
||||
VerifyUncles(chain ChainReader, block *types.Block) error
|
||||
|
||||
// Prepare initializes the consensus fields of a block header according to the
|
||||
// rules of a particular engine. The changes are executed inline.
|
||||
Prepare(chain ChainHeaderReader, header *types.Header) error
|
||||
|
||||
// Finalize runs any post-transaction state modifications (e.g. block rewards
|
||||
// or process withdrawals) but does not assemble the block.
|
||||
//
|
||||
// Note: The state database might be updated to reflect any consensus rules
|
||||
// that happen at finalization (e.g. block rewards).
|
||||
Finalize(chain ChainHeaderReader, header *types.Header, state core.RWStateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header, withdrawals []*types.Withdrawal)
|
||||
|
||||
// FinalizeAndAssemble runs any post-transaction state modifications (e.g. block
|
||||
// rewards or process withdrawals) and assembles the final block.
|
||||
//
|
||||
// Note: The block header and state database might be updated to reflect any
|
||||
// consensus rules that happen at finalization (e.g. block rewards).
|
||||
FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state core.RWStateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header, receipts []*types.Receipt, withdrawals []*types.Withdrawal) (*types.Block, error)
|
||||
|
||||
// Seal generates a new sealing request for the given input block and pushes
|
||||
// the result into the given channel.
|
||||
//
|
||||
// Note, the method returns immediately and will send the result async. More
|
||||
// than one result may also be returned depending on the consensus algorithm.
|
||||
Seal(chain ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error
|
||||
|
||||
// SealHash returns the hash of a block prior to it being sealed.
|
||||
SealHash(header *types.Header) core.Hash
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
// that a new block should have.
|
||||
CalcDifficulty(chain ChainHeaderReader, time uint64, parent *types.Header) *big.Int
|
||||
|
||||
// APIs returns the RPC APIs this consensus engine provides.
|
||||
APIs(chain ChainHeaderReader) []core.API
|
||||
|
||||
// Close terminates any background threads maintained by the consensus engine.
|
||||
Close() error
|
||||
}
|
@ -14,8 +14,6 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build nacl js !cgo gofuzz
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
@ -23,37 +21,48 @@ import (
|
||||
"crypto/elliptic"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
btc_ecdsa "github.com/btcsuite/btcd/btcec/v2/ecdsa"
|
||||
)
|
||||
|
||||
// Ecrecover returns the uncompressed public key that created the given signature.
|
||||
func Ecrecover(hash, sig []byte) ([]byte, error) {
|
||||
pub, err := SigToPub(hash, sig)
|
||||
pub, err := sigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bytes := (*btcec.PublicKey)(pub).SerializeUncompressed()
|
||||
bytes := pub.SerializeUncompressed()
|
||||
return bytes, err
|
||||
}
|
||||
|
||||
func sigToPub(hash, sig []byte) (*btcec.PublicKey, error) {
|
||||
if len(sig) != SignatureLength {
|
||||
return nil, errors.New("invalid signature")
|
||||
}
|
||||
// Convert to btcec input format with 'recovery id' v at the beginning.
|
||||
btcsig := make([]byte, SignatureLength)
|
||||
btcsig[0] = sig[RecoveryIDOffset] + 27
|
||||
copy(btcsig[1:], sig)
|
||||
|
||||
pub, _, err := btc_ecdsa.RecoverCompact(btcsig, hash)
|
||||
return pub, err
|
||||
}
|
||||
|
||||
// SigToPub returns the public key that created the given signature.
|
||||
func SigToPub(hash, sig []byte) (*ecdsa.PublicKey, error) {
|
||||
// Convert to btcec input format with 'recovery id' v at the beginning.
|
||||
btcsig := make([]byte, SignatureLength)
|
||||
btcsig[0] = sig[64] + 27
|
||||
copy(btcsig[1:], sig)
|
||||
|
||||
pub, _, err := btcec.RecoverCompact(btcec.S256(), btcsig, hash)
|
||||
return (*ecdsa.PublicKey)(pub), err
|
||||
pub, err := sigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pub.ToECDSA(), nil
|
||||
}
|
||||
|
||||
// Sign calculates an ECDSA signature.
|
||||
//
|
||||
// This function is susceptible to chosen plaintext attacks that can leak
|
||||
// information about the private key that is used for signing. Callers must
|
||||
// be aware that the given hash cannot be chosen by an adversery. Common
|
||||
// be aware that the given hash cannot be chosen by an adversary. Common
|
||||
// solution is to hash any input before calculating the signature.
|
||||
//
|
||||
// The produced signature is in the [R || S || V] format where V is 0 or 1.
|
||||
@ -64,14 +73,20 @@ func Sign(hash []byte, prv *ecdsa.PrivateKey) ([]byte, error) {
|
||||
if prv.Curve != btcec.S256() {
|
||||
return nil, fmt.Errorf("private key curve is not secp256k1")
|
||||
}
|
||||
sig, err := btcec.SignCompact(btcec.S256(), (*btcec.PrivateKey)(prv), hash, false)
|
||||
// ecdsa.PrivateKey -> btcec.PrivateKey
|
||||
var priv btcec.PrivateKey
|
||||
if overflow := priv.Key.SetByteSlice(prv.D.Bytes()); overflow || priv.Key.IsZero() {
|
||||
return nil, fmt.Errorf("invalid private key")
|
||||
}
|
||||
defer priv.Zero()
|
||||
sig, err := btc_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Convert to Ethereum signature format with 'recovery id' v at the end.
|
||||
v := sig[0] - 27
|
||||
copy(sig, sig[1:])
|
||||
sig[64] = v
|
||||
sig[RecoveryIDOffset] = v
|
||||
return sig, nil
|
||||
}
|
||||
|
||||
@ -82,13 +97,20 @@ func VerifySignature(pubkey, hash, signature []byte) bool {
|
||||
if len(signature) != 64 {
|
||||
return false
|
||||
}
|
||||
sig := &btcec.Signature{R: new(big.Int).SetBytes(signature[:32]), S: new(big.Int).SetBytes(signature[32:])}
|
||||
key, err := btcec.ParsePubKey(pubkey, btcec.S256())
|
||||
var r, s btcec.ModNScalar
|
||||
if r.SetByteSlice(signature[:32]) {
|
||||
return false // overflow
|
||||
}
|
||||
if s.SetByteSlice(signature[32:]) {
|
||||
return false
|
||||
}
|
||||
sig := btc_ecdsa.NewSignature(&r, &s)
|
||||
key, err := btcec.ParsePubKey(pubkey)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
|
||||
if sig.S.Cmp(secp256k1halfN) > 0 {
|
||||
if s.IsOverHalfOrder() {
|
||||
return false
|
||||
}
|
||||
return sig.Verify(hash, key)
|
||||
@ -99,16 +121,26 @@ func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) {
|
||||
if len(pubkey) != 33 {
|
||||
return nil, errors.New("invalid compressed public key length")
|
||||
}
|
||||
key, err := btcec.ParsePubKey(pubkey, btcec.S256())
|
||||
key, err := btcec.ParsePubKey(pubkey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key.ToECDSA(), nil
|
||||
}
|
||||
|
||||
// CompressPubkey encodes a public key to the 33-byte compressed format.
|
||||
// CompressPubkey encodes a public key to the 33-byte compressed format. The
|
||||
// provided PublicKey must be valid. Namely, the coordinates must not be larger
|
||||
// than 32 bytes each, they must be less than the field prime, and it must be a
|
||||
// point on the secp256k1 curve. This is the case for a PublicKey constructed by
|
||||
// elliptic.Unmarshal (see UnmarshalPubkey), or by ToECDSA and ecdsa.GenerateKey
|
||||
// when constructing a PrivateKey.
|
||||
func CompressPubkey(pubkey *ecdsa.PublicKey) []byte {
|
||||
return (*btcec.PublicKey)(pubkey).SerializeCompressed()
|
||||
// NOTE: the coordinates may be validated with
|
||||
// btcec.ParsePubKey(FromECDSAPub(pubkey))
|
||||
var x, y btcec.FieldVal
|
||||
x.SetByteSlice(pubkey.X.Bytes())
|
||||
y.SetByteSlice(pubkey.Y.Bytes())
|
||||
return btcec.NewPublicKey(&x, &y).SerializeCompressed()
|
||||
}
|
||||
|
||||
// S256 returns an instance of the secp256k1 curve.
|
@ -1,86 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build !nacl,!js,cgo,!gofuzz
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"fmt"
|
||||
|
||||
"github.com/openrelayxyz/plugeth-utils/restricted/crypto/secp256k1"
|
||||
)
|
||||
|
||||
// Ecrecover returns the uncompressed public key that created the given signature.
|
||||
func Ecrecover(hash, sig []byte) ([]byte, error) {
|
||||
return secp256k1.RecoverPubkey(hash, sig)
|
||||
}
|
||||
|
||||
// SigToPub returns the public key that created the given signature.
|
||||
func SigToPub(hash, sig []byte) (*ecdsa.PublicKey, error) {
|
||||
s, err := Ecrecover(hash, sig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
x, y := elliptic.Unmarshal(S256(), s)
|
||||
return &ecdsa.PublicKey{Curve: S256(), X: x, Y: y}, nil
|
||||
}
|
||||
|
||||
// Sign calculates an ECDSA signature.
|
||||
//
|
||||
// This function is susceptible to chosen plaintext attacks that can leak
|
||||
// information about the private key that is used for signing. Callers must
|
||||
// be aware that the given digest cannot be chosen by an adversery. Common
|
||||
// solution is to hash any input before calculating the signature.
|
||||
//
|
||||
// The produced signature is in the [R || S || V] format where V is 0 or 1.
|
||||
func Sign(digestHash []byte, prv *ecdsa.PrivateKey) (sig []byte, err error) {
|
||||
if len(digestHash) != DigestLength {
|
||||
return nil, fmt.Errorf("hash is required to be exactly %d bytes (%d)", DigestLength, len(digestHash))
|
||||
}
|
||||
seckey := PaddedBigBytes(prv.D, prv.Params().BitSize/8)
|
||||
defer zeroBytes(seckey)
|
||||
return secp256k1.Sign(digestHash, seckey)
|
||||
}
|
||||
|
||||
// VerifySignature checks that the given public key created signature over digest.
|
||||
// The public key should be in compressed (33 bytes) or uncompressed (65 bytes) format.
|
||||
// The signature should have the 64 byte [R || S] format.
|
||||
func VerifySignature(pubkey, digestHash, signature []byte) bool {
|
||||
return secp256k1.VerifySignature(pubkey, digestHash, signature)
|
||||
}
|
||||
|
||||
// DecompressPubkey parses a public key in the 33-byte compressed format.
|
||||
func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) {
|
||||
x, y := secp256k1.DecompressPubkey(pubkey)
|
||||
if x == nil {
|
||||
return nil, fmt.Errorf("invalid public key")
|
||||
}
|
||||
return &ecdsa.PublicKey{X: x, Y: y, Curve: S256()}, nil
|
||||
}
|
||||
|
||||
// CompressPubkey encodes a public key to the 33-byte compressed format.
|
||||
func CompressPubkey(pubkey *ecdsa.PublicKey) []byte {
|
||||
return secp256k1.CompressPubkey(pubkey.X, pubkey.Y)
|
||||
}
|
||||
|
||||
// S256 returns an instance of the secp256k1 curve.
|
||||
func S256() elliptic.Curve {
|
||||
return secp256k1.S256()
|
||||
}
|
145
restricted/hasher/encoding.go
Normal file
145
restricted/hasher/encoding.go
Normal file
@ -0,0 +1,145 @@
|
||||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package hasher
|
||||
|
||||
// Trie keys are dealt with in three distinct encodings:
|
||||
//
|
||||
// KEYBYTES encoding contains the actual key and nothing else. This encoding is the
|
||||
// input to most API functions.
|
||||
//
|
||||
// HEX encoding contains one byte for each nibble of the key and an optional trailing
|
||||
// 'terminator' byte of value 0x10 which indicates whether or not the node at the key
|
||||
// contains a value. Hex key encoding is used for nodes loaded in memory because it's
|
||||
// convenient to access.
|
||||
//
|
||||
// COMPACT encoding is defined by the Ethereum Yellow Paper (it's called "hex prefix
|
||||
// encoding" there) and contains the bytes of the key and a flag. The high nibble of the
|
||||
// first byte contains the flag; the lowest bit encoding the oddness of the length and
|
||||
// the second-lowest encoding whether the node at the key is a value node. The low nibble
|
||||
// of the first byte is zero in the case of an even number of nibbles and the first nibble
|
||||
// in the case of an odd number. All remaining nibbles (now an even number) fit properly
|
||||
// into the remaining bytes. Compact encoding is used for nodes stored on disk.
|
||||
|
||||
func hexToCompact(hex []byte) []byte {
|
||||
terminator := byte(0)
|
||||
if hasTerm(hex) {
|
||||
terminator = 1
|
||||
hex = hex[:len(hex)-1]
|
||||
}
|
||||
buf := make([]byte, len(hex)/2+1)
|
||||
buf[0] = terminator << 5 // the flag byte
|
||||
if len(hex)&1 == 1 {
|
||||
buf[0] |= 1 << 4 // odd flag
|
||||
buf[0] |= hex[0] // first nibble is contained in the first byte
|
||||
hex = hex[1:]
|
||||
}
|
||||
decodeNibbles(hex, buf[1:])
|
||||
return buf
|
||||
}
|
||||
|
||||
// hexToCompactInPlace places the compact key in input buffer, returning the length
|
||||
// needed for the representation
|
||||
func hexToCompactInPlace(hex []byte) int {
|
||||
var (
|
||||
hexLen = len(hex) // length of the hex input
|
||||
firstByte = byte(0)
|
||||
)
|
||||
// Check if we have a terminator there
|
||||
if hexLen > 0 && hex[hexLen-1] == 16 {
|
||||
firstByte = 1 << 5
|
||||
hexLen-- // last part was the terminator, ignore that
|
||||
}
|
||||
var (
|
||||
binLen = hexLen/2 + 1
|
||||
ni = 0 // index in hex
|
||||
bi = 1 // index in bin (compact)
|
||||
)
|
||||
if hexLen&1 == 1 {
|
||||
firstByte |= 1 << 4 // odd flag
|
||||
firstByte |= hex[0] // first nibble is contained in the first byte
|
||||
ni++
|
||||
}
|
||||
for ; ni < hexLen; bi, ni = bi+1, ni+2 {
|
||||
hex[bi] = hex[ni]<<4 | hex[ni+1]
|
||||
}
|
||||
hex[0] = firstByte
|
||||
return binLen
|
||||
}
|
||||
|
||||
func compactToHex(compact []byte) []byte {
|
||||
if len(compact) == 0 {
|
||||
return compact
|
||||
}
|
||||
base := keybytesToHex(compact)
|
||||
// delete terminator flag
|
||||
if base[0] < 2 {
|
||||
base = base[:len(base)-1]
|
||||
}
|
||||
// apply odd flag
|
||||
chop := 2 - base[0]&1
|
||||
return base[chop:]
|
||||
}
|
||||
|
||||
func keybytesToHex(str []byte) []byte {
|
||||
l := len(str)*2 + 1
|
||||
var nibbles = make([]byte, l)
|
||||
for i, b := range str {
|
||||
nibbles[i*2] = b / 16
|
||||
nibbles[i*2+1] = b % 16
|
||||
}
|
||||
nibbles[l-1] = 16
|
||||
return nibbles
|
||||
}
|
||||
|
||||
// hexToKeybytes turns hex nibbles into key bytes.
|
||||
// This can only be used for keys of even length.
|
||||
func hexToKeybytes(hex []byte) []byte {
|
||||
if hasTerm(hex) {
|
||||
hex = hex[:len(hex)-1]
|
||||
}
|
||||
if len(hex)&1 != 0 {
|
||||
panic("can't convert hex key of odd length")
|
||||
}
|
||||
key := make([]byte, len(hex)/2)
|
||||
decodeNibbles(hex, key)
|
||||
return key
|
||||
}
|
||||
|
||||
func decodeNibbles(nibbles []byte, bytes []byte) {
|
||||
for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 {
|
||||
bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1]
|
||||
}
|
||||
}
|
||||
|
||||
// prefixLen returns the length of the common prefix of a and b.
|
||||
func prefixLen(a, b []byte) int {
|
||||
var i, length = 0, len(a)
|
||||
if len(b) < length {
|
||||
length = len(b)
|
||||
}
|
||||
for ; i < length; i++ {
|
||||
if a[i] != b[i] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// hasTerm returns whether a hex key has the terminator flag.
|
||||
func hasTerm(s []byte) bool {
|
||||
return len(s) > 0 && s[len(s)-1] == 16
|
||||
}
|
209
restricted/hasher/hasher.go
Normal file
209
restricted/hasher/hasher.go
Normal file
@ -0,0 +1,209 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/openrelayxyz/plugeth-utils/restricted/crypto"
|
||||
"github.com/openrelayxyz/plugeth-utils/restricted/rlp"
|
||||
"golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
||||
// Hasher is a type used for the trie Hash operation. A Hasher has some
|
||||
// internal preallocated temp space
|
||||
type Hasher struct {
|
||||
sha crypto.KeccakState
|
||||
tmp []byte
|
||||
encbuf rlp.EncoderBuffer
|
||||
parallel bool // Whether to use parallel threads when hashing
|
||||
}
|
||||
|
||||
// HasherPool holds pureHashers
|
||||
var HasherPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &Hasher{
|
||||
tmp: make([]byte, 0, 550), // cap is as large as a full fullNode.
|
||||
sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
|
||||
encbuf: rlp.NewEncoderBuffer(nil),
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func newHasher(parallel bool) *Hasher {
|
||||
h := HasherPool.Get().(*Hasher)
|
||||
h.parallel = parallel
|
||||
return h
|
||||
}
|
||||
|
||||
func returnHasherToPool(h *Hasher) {
|
||||
HasherPool.Put(h)
|
||||
}
|
||||
|
||||
// hash collapses a node down into a hash node, also returning a copy of the
|
||||
// original node initialized with the computed hash to replace the original one.
|
||||
func (h *Hasher) hash(n node, force bool) (hashed node, cached node) {
|
||||
// Return the cached hash if it's available
|
||||
if hash, _ := n.cache(); hash != nil {
|
||||
return hash, n
|
||||
}
|
||||
// Trie not processed yet, walk the children
|
||||
switch n := n.(type) {
|
||||
case *shortNode:
|
||||
collapsed, cached := h.hashShortNodeChildren(n)
|
||||
hashed := h.shortnodeToHash(collapsed, force)
|
||||
// We need to retain the possibly _not_ hashed node, in case it was too
|
||||
// small to be hashed
|
||||
if hn, ok := hashed.(hashNode); ok {
|
||||
cached.flags.hash = hn
|
||||
} else {
|
||||
cached.flags.hash = nil
|
||||
}
|
||||
return hashed, cached
|
||||
case *fullNode:
|
||||
collapsed, cached := h.hashFullNodeChildren(n)
|
||||
hashed = h.fullnodeToHash(collapsed, force)
|
||||
if hn, ok := hashed.(hashNode); ok {
|
||||
cached.flags.hash = hn
|
||||
} else {
|
||||
cached.flags.hash = nil
|
||||
}
|
||||
return hashed, cached
|
||||
default:
|
||||
// Value and hash nodes don't have children so they're left as were
|
||||
return n, n
|
||||
}
|
||||
}
|
||||
|
||||
// hashShortNodeChildren collapses the short node. The returned collapsed node
|
||||
// holds a live reference to the Key, and must not be modified.
|
||||
// The cached
|
||||
func (h *Hasher) hashShortNodeChildren(n *shortNode) (collapsed, cached *shortNode) {
|
||||
// Hash the short node's child, caching the newly hashed subtree
|
||||
collapsed, cached = n.copy(), n.copy()
|
||||
// Previously, we did copy this one. We don't seem to need to actually
|
||||
// do that, since we don't overwrite/reuse keys
|
||||
//cached.Key = common.CopyBytes(n.Key)
|
||||
collapsed.Key = hexToCompact(n.Key)
|
||||
// Unless the child is a valuenode or hashnode, hash it
|
||||
switch n.Val.(type) {
|
||||
case *fullNode, *shortNode:
|
||||
collapsed.Val, cached.Val = h.hash(n.Val, false)
|
||||
}
|
||||
return collapsed, cached
|
||||
}
|
||||
|
||||
func (h *Hasher) hashFullNodeChildren(n *fullNode) (collapsed *fullNode, cached *fullNode) {
|
||||
// Hash the full node's children, caching the newly hashed subtrees
|
||||
cached = n.copy()
|
||||
collapsed = n.copy()
|
||||
if h.parallel {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(16)
|
||||
for i := 0; i < 16; i++ {
|
||||
go func(i int) {
|
||||
Hasher := newHasher(false)
|
||||
if child := n.Children[i]; child != nil {
|
||||
collapsed.Children[i], cached.Children[i] = Hasher.hash(child, false)
|
||||
} else {
|
||||
collapsed.Children[i] = nilValueNode
|
||||
}
|
||||
returnHasherToPool(Hasher)
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
} else {
|
||||
for i := 0; i < 16; i++ {
|
||||
if child := n.Children[i]; child != nil {
|
||||
collapsed.Children[i], cached.Children[i] = h.hash(child, false)
|
||||
} else {
|
||||
collapsed.Children[i] = nilValueNode
|
||||
}
|
||||
}
|
||||
}
|
||||
return collapsed, cached
|
||||
}
|
||||
|
||||
// shortnodeToHash creates a hashNode from a shortNode. The supplied shortnode
|
||||
// should have hex-type Key, which will be converted (without modification)
|
||||
// into compact form for RLP encoding.
|
||||
// If the rlp data is smaller than 32 bytes, `nil` is returned.
|
||||
func (h *Hasher) shortnodeToHash(n *shortNode, force bool) node {
|
||||
n.encode(h.encbuf)
|
||||
enc := h.encodedBytes()
|
||||
|
||||
if len(enc) < 32 && !force {
|
||||
return n // Nodes smaller than 32 bytes are stored inside their parent
|
||||
}
|
||||
return h.hashData(enc)
|
||||
}
|
||||
|
||||
// shortnodeToHash is used to creates a hashNode from a set of hashNodes, (which
|
||||
// may contain nil values)
|
||||
func (h *Hasher) fullnodeToHash(n *fullNode, force bool) node {
|
||||
n.encode(h.encbuf)
|
||||
enc := h.encodedBytes()
|
||||
|
||||
if len(enc) < 32 && !force {
|
||||
return n // Nodes smaller than 32 bytes are stored inside their parent
|
||||
}
|
||||
return h.hashData(enc)
|
||||
}
|
||||
|
||||
// encodedBytes returns the result of the last encoding operation on h.encbuf.
|
||||
// This also resets the encoder buffer.
|
||||
//
|
||||
// All node encoding must be done like this:
|
||||
//
|
||||
// node.encode(h.encbuf)
|
||||
// enc := h.encodedBytes()
|
||||
//
|
||||
// This convention exists because node.encode can only be inlined/escape-analyzed when
|
||||
// called on a concrete receiver type.
|
||||
func (h *Hasher) encodedBytes() []byte {
|
||||
h.tmp = h.encbuf.AppendToBytes(h.tmp[:0])
|
||||
h.encbuf.Reset(nil)
|
||||
return h.tmp
|
||||
}
|
||||
|
||||
// hashData hashes the provided data
|
||||
func (h *Hasher) hashData(data []byte) hashNode {
|
||||
n := make(hashNode, 32)
|
||||
h.sha.Reset()
|
||||
h.sha.Write(data)
|
||||
h.sha.Read(n)
|
||||
return n
|
||||
}
|
||||
|
||||
// proofHash is used to construct trie proofs, and returns the 'collapsed'
|
||||
// node (for later RLP encoding) as well as the hashed node -- unless the
|
||||
// node is smaller than 32 bytes, in which case it will be returned as is.
|
||||
// This method does not do anything on value- or hash-nodes.
|
||||
func (h *Hasher) proofHash(original node) (collapsed, hashed node) {
|
||||
switch n := original.(type) {
|
||||
case *shortNode:
|
||||
sn, _ := h.hashShortNodeChildren(n)
|
||||
return sn, h.shortnodeToHash(sn, false)
|
||||
case *fullNode:
|
||||
fn, _ := h.hashFullNodeChildren(n)
|
||||
return fn, h.fullnodeToHash(fn, false)
|
||||
default:
|
||||
// Value and hash nodes don't have children so they're left as were
|
||||
return n, n
|
||||
}
|
||||
}
|
280
restricted/hasher/node.go
Normal file
280
restricted/hasher/node.go
Normal file
@ -0,0 +1,280 @@
|
||||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/openrelayxyz/plugeth-utils/core"
|
||||
"github.com/openrelayxyz/plugeth-utils/restricted/rlp"
|
||||
)
|
||||
|
||||
var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "[17]"}
|
||||
|
||||
type node interface {
|
||||
cache() (hashNode, bool)
|
||||
encode(w rlp.EncoderBuffer)
|
||||
fstring(string) string
|
||||
}
|
||||
|
||||
type (
|
||||
fullNode struct {
|
||||
Children [17]node // Actual trie node data to encode/decode (needs custom encoder)
|
||||
flags nodeFlag
|
||||
}
|
||||
shortNode struct {
|
||||
Key []byte
|
||||
Val node
|
||||
flags nodeFlag
|
||||
}
|
||||
hashNode []byte
|
||||
valueNode []byte
|
||||
)
|
||||
|
||||
// nilValueNode is used when collapsing internal trie nodes for hashing, since
|
||||
// unset children need to serialize correctly.
|
||||
var nilValueNode = valueNode(nil)
|
||||
|
||||
// EncodeRLP encodes a full node into the consensus RLP format.
|
||||
func (n *fullNode) EncodeRLP(w io.Writer) error {
|
||||
eb := rlp.NewEncoderBuffer(w)
|
||||
n.encode(eb)
|
||||
return eb.Flush()
|
||||
}
|
||||
|
||||
func (n *fullNode) copy() *fullNode { copy := *n; return © }
|
||||
func (n *shortNode) copy() *shortNode { copy := *n; return © }
|
||||
|
||||
// nodeFlag contains caching-related metadata about a node.
|
||||
type nodeFlag struct {
|
||||
hash hashNode // cached hash of the node (may be nil)
|
||||
dirty bool // whether the node has changes that must be written to the database
|
||||
}
|
||||
|
||||
func (n *fullNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
|
||||
func (n *shortNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
|
||||
func (n hashNode) cache() (hashNode, bool) { return nil, true }
|
||||
func (n valueNode) cache() (hashNode, bool) { return nil, true }
|
||||
|
||||
// Pretty printing.
|
||||
func (n *fullNode) String() string { return n.fstring("") }
|
||||
func (n *shortNode) String() string { return n.fstring("") }
|
||||
func (n hashNode) String() string { return n.fstring("") }
|
||||
func (n valueNode) String() string { return n.fstring("") }
|
||||
|
||||
func (n *fullNode) fstring(ind string) string {
|
||||
resp := fmt.Sprintf("[\n%s ", ind)
|
||||
for i, node := range &n.Children {
|
||||
if node == nil {
|
||||
resp += fmt.Sprintf("%s: <nil> ", indices[i])
|
||||
} else {
|
||||
resp += fmt.Sprintf("%s: %v", indices[i], node.fstring(ind+" "))
|
||||
}
|
||||
}
|
||||
return resp + fmt.Sprintf("\n%s] ", ind)
|
||||
}
|
||||
func (n *shortNode) fstring(ind string) string {
|
||||
return fmt.Sprintf("{%x: %v} ", n.Key, n.Val.fstring(ind+" "))
|
||||
}
|
||||
func (n hashNode) fstring(ind string) string {
|
||||
return fmt.Sprintf("<%x> ", []byte(n))
|
||||
}
|
||||
func (n valueNode) fstring(ind string) string {
|
||||
return fmt.Sprintf("%x ", []byte(n))
|
||||
}
|
||||
|
||||
// mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered.
|
||||
func mustDecodeNode(hash, buf []byte) node {
|
||||
n, err := decodeNode(hash, buf)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("node %x: %v", hash, err))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// mustDecodeNodeUnsafe is a wrapper of decodeNodeUnsafe and panic if any error is
|
||||
// encountered.
|
||||
func mustDecodeNodeUnsafe(hash, buf []byte) node {
|
||||
n, err := decodeNodeUnsafe(hash, buf)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("node %x: %v", hash, err))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// decodeNode parses the RLP encoding of a trie node. It will deep-copy the passed
|
||||
// byte slice for decoding, so it's safe to modify the byte slice afterwards. The-
|
||||
// decode performance of this function is not optimal, but it is suitable for most
|
||||
// scenarios with low performance requirements and hard to determine whether the
|
||||
// byte slice be modified or not.
|
||||
func decodeNode(hash, buf []byte) (node, error) {
|
||||
return decodeNodeUnsafe(hash, core.CopyBytes(buf))
|
||||
}
|
||||
|
||||
// decodeNodeUnsafe parses the RLP encoding of a trie node. The passed byte slice
|
||||
// will be directly referenced by node without bytes deep copy, so the input MUST
|
||||
// not be changed after.
|
||||
func decodeNodeUnsafe(hash, buf []byte) (node, error) {
|
||||
if len(buf) == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
elems, _, err := rlp.SplitList(buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decode error: %v", err)
|
||||
}
|
||||
switch c, _ := rlp.CountValues(elems); c {
|
||||
case 2:
|
||||
n, err := decodeShort(hash, elems)
|
||||
return n, wrapError(err, "short")
|
||||
case 17:
|
||||
n, err := decodeFull(hash, elems)
|
||||
return n, wrapError(err, "full")
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid number of list elements: %v", c)
|
||||
}
|
||||
}
|
||||
|
||||
func decodeShort(hash, elems []byte) (node, error) {
|
||||
kbuf, rest, err := rlp.SplitString(elems)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
flag := nodeFlag{hash: hash}
|
||||
key := compactToHex(kbuf)
|
||||
if hasTerm(key) {
|
||||
// value node
|
||||
val, _, err := rlp.SplitString(rest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value node: %v", err)
|
||||
}
|
||||
return &shortNode{key, valueNode(val), flag}, nil
|
||||
}
|
||||
r, _, err := decodeRef(rest)
|
||||
if err != nil {
|
||||
return nil, wrapError(err, "val")
|
||||
}
|
||||
return &shortNode{key, r, flag}, nil
|
||||
}
|
||||
|
||||
func decodeFull(hash, elems []byte) (*fullNode, error) {
|
||||
n := &fullNode{flags: nodeFlag{hash: hash}}
|
||||
for i := 0; i < 16; i++ {
|
||||
cld, rest, err := decodeRef(elems)
|
||||
if err != nil {
|
||||
return n, wrapError(err, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
n.Children[i], elems = cld, rest
|
||||
}
|
||||
val, _, err := rlp.SplitString(elems)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if len(val) > 0 {
|
||||
n.Children[16] = valueNode(val)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
const hashLen = len(core.Hash{})
|
||||
|
||||
func decodeRef(buf []byte) (node, []byte, error) {
|
||||
kind, val, rest, err := rlp.Split(buf)
|
||||
if err != nil {
|
||||
return nil, buf, err
|
||||
}
|
||||
switch {
|
||||
case kind == rlp.List:
|
||||
// 'embedded' node reference. The encoding must be smaller
|
||||
// than a hash in order to be valid.
|
||||
if size := len(buf) - len(rest); size > hashLen {
|
||||
err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen)
|
||||
return nil, buf, err
|
||||
}
|
||||
n, err := decodeNode(nil, buf)
|
||||
return n, rest, err
|
||||
case kind == rlp.String && len(val) == 0:
|
||||
// empty node
|
||||
return nil, rest, nil
|
||||
case kind == rlp.String && len(val) == 32:
|
||||
return hashNode(val), rest, nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("invalid RLP string size %d (want 0 or 32)", len(val))
|
||||
}
|
||||
}
|
||||
|
||||
// wraps a decoding error with information about the path to the
|
||||
// invalid child node (for debugging encoding issues).
|
||||
type decodeError struct {
|
||||
what error
|
||||
stack []string
|
||||
}
|
||||
|
||||
func wrapError(err error, ctx string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if decErr, ok := err.(*decodeError); ok {
|
||||
decErr.stack = append(decErr.stack, ctx)
|
||||
return decErr
|
||||
}
|
||||
return &decodeError{err, []string{ctx}}
|
||||
}
|
||||
|
||||
func (err *decodeError) Error() string {
|
||||
return fmt.Sprintf("%v (decode path: %s)", err.what, strings.Join(err.stack, "<-"))
|
||||
}
|
||||
|
||||
// rawNode is a simple binary blob used to differentiate between collapsed trie
|
||||
// nodes and already encoded RLP binary blobs (while at the same time store them
|
||||
// in the same cache fields).
|
||||
type rawNode []byte
|
||||
|
||||
func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
|
||||
func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") }
|
||||
|
||||
func (n rawNode) EncodeRLP(w io.Writer) error {
|
||||
_, err := w.Write(n)
|
||||
return err
|
||||
}
|
||||
|
||||
// rawFullNode represents only the useful data content of a full node, with the
|
||||
// caches and flags stripped out to minimize its data storage. This type honors
|
||||
// the same RLP encoding as the original parent.
|
||||
type rawFullNode [17]node
|
||||
|
||||
func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
|
||||
func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") }
|
||||
|
||||
func (n rawFullNode) EncodeRLP(w io.Writer) error {
|
||||
eb := rlp.NewEncoderBuffer(w)
|
||||
n.encode(eb)
|
||||
return eb.Flush()
|
||||
}
|
||||
|
||||
// rawShortNode represents only the useful data content of a short node, with the
|
||||
// caches and flags stripped out to minimize its data storage. This type honors
|
||||
// the same RLP encoding as the original parent.
|
||||
type rawShortNode struct {
|
||||
Key []byte
|
||||
Val node
|
||||
}
|
||||
|
||||
func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
|
||||
func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") }
|
||||
|
87
restricted/hasher/node_enc.go
Normal file
87
restricted/hasher/node_enc.go
Normal file
@ -0,0 +1,87 @@
|
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"github.com/openrelayxyz/plugeth-utils/restricted/rlp"
|
||||
)
|
||||
|
||||
func nodeToBytes(n node) []byte {
|
||||
w := rlp.NewEncoderBuffer(nil)
|
||||
n.encode(w)
|
||||
result := w.ToBytes()
|
||||
w.Flush()
|
||||
return result
|
||||
}
|
||||
|
||||
func (n *fullNode) encode(w rlp.EncoderBuffer) {
|
||||
offset := w.List()
|
||||
for _, c := range n.Children {
|
||||
if c != nil {
|
||||
c.encode(w)
|
||||
} else {
|
||||
w.Write(rlp.EmptyString)
|
||||
}
|
||||
}
|
||||
w.ListEnd(offset)
|
||||
}
|
||||
|
||||
func (n *shortNode) encode(w rlp.EncoderBuffer) {
|
||||
offset := w.List()
|
||||
w.WriteBytes(n.Key)
|
||||
if n.Val != nil {
|
||||
n.Val.encode(w)
|
||||
} else {
|
||||
w.Write(rlp.EmptyString)
|
||||
}
|
||||
w.ListEnd(offset)
|
||||
}
|
||||
|
||||
func (n hashNode) encode(w rlp.EncoderBuffer) {
|
||||
w.WriteBytes(n)
|
||||
}
|
||||
|
||||
func (n valueNode) encode(w rlp.EncoderBuffer) {
|
||||
w.WriteBytes(n)
|
||||
}
|
||||
|
||||
func (n rawFullNode) encode(w rlp.EncoderBuffer) {
|
||||
offset := w.List()
|
||||
for _, c := range n {
|
||||
if c != nil {
|
||||
c.encode(w)
|
||||
} else {
|
||||
w.Write(rlp.EmptyString)
|
||||
}
|
||||
}
|
||||
w.ListEnd(offset)
|
||||
}
|
||||
|
||||
func (n *rawShortNode) encode(w rlp.EncoderBuffer) {
|
||||
offset := w.List()
|
||||
w.WriteBytes(n.Key)
|
||||
if n.Val != nil {
|
||||
n.Val.encode(w)
|
||||
} else {
|
||||
w.Write(rlp.EmptyString)
|
||||
}
|
||||
w.ListEnd(offset)
|
||||
}
|
||||
|
||||
func (n rawNode) encode(w rlp.EncoderBuffer) {
|
||||
w.Write(n)
|
||||
}
|
532
restricted/hasher/statetrie.go
Normal file
532
restricted/hasher/statetrie.go
Normal file
@ -0,0 +1,532 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package hasher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/openrelayxyz/plugeth-utils/core"
|
||||
"github.com/openrelayxyz/plugeth-utils/restricted/types"
|
||||
)
|
||||
|
||||
var ErrCommitDisabled = errors.New("no database for committing")
|
||||
|
||||
var stPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return NewStackTrie(nil)
|
||||
},
|
||||
}
|
||||
|
||||
// NodeWriteFunc is used to provide all information of a dirty node for committing
|
||||
// so that callers can flush nodes into database with desired scheme.
|
||||
type NodeWriteFunc = func(owner core.Hash, path []byte, hash core.Hash, blob []byte)
|
||||
|
||||
func stackTrieFromPool(writeFn NodeWriteFunc, owner core.Hash) *StackTrie {
|
||||
st := stPool.Get().(*StackTrie)
|
||||
st.owner = owner
|
||||
st.writeFn = writeFn
|
||||
return st
|
||||
}
|
||||
|
||||
func returnToPool(st *StackTrie) {
|
||||
st.Reset()
|
||||
stPool.Put(st)
|
||||
}
|
||||
|
||||
// StackTrie is a trie implementation that expects keys to be inserted
|
||||
// in order. Once it determines that a subtree will no longer be inserted
|
||||
// into, it will hash it and free up the memory it uses.
|
||||
type StackTrie struct {
|
||||
owner core.Hash // the owner of the trie
|
||||
nodeType uint8 // node type (as in branch, ext, leaf)
|
||||
val []byte // value contained by this node if it's a leaf
|
||||
key []byte // key chunk covered by this (leaf|ext) node
|
||||
children [16]*StackTrie // list of children (for branch and exts)
|
||||
writeFn NodeWriteFunc // function for committing nodes, can be nil
|
||||
}
|
||||
|
||||
// NewStackTrie allocates and initializes an empty trie.
|
||||
func NewStackTrie(writeFn NodeWriteFunc) *StackTrie {
|
||||
return &StackTrie{
|
||||
nodeType: emptyNode,
|
||||
writeFn: writeFn,
|
||||
}
|
||||
}
|
||||
|
||||
// NewStackTrieWithOwner allocates and initializes an empty trie, but with
|
||||
// the additional owner field.
|
||||
func NewStackTrieWithOwner(writeFn NodeWriteFunc, owner core.Hash) *StackTrie {
|
||||
return &StackTrie{
|
||||
owner: owner,
|
||||
nodeType: emptyNode,
|
||||
writeFn: writeFn,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFromBinary initialises a serialized stacktrie with the given db.
|
||||
func NewFromBinary(data []byte, writeFn NodeWriteFunc) (*StackTrie, error) {
|
||||
var st StackTrie
|
||||
if err := st.UnmarshalBinary(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If a database is used, we need to recursively add it to every child
|
||||
if writeFn != nil {
|
||||
st.setWriter(writeFn)
|
||||
}
|
||||
return &st, nil
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler
|
||||
func (st *StackTrie) MarshalBinary() (data []byte, err error) {
|
||||
var (
|
||||
b bytes.Buffer
|
||||
w = bufio.NewWriter(&b)
|
||||
)
|
||||
if err := gob.NewEncoder(w).Encode(struct {
|
||||
Owner core.Hash
|
||||
NodeType uint8
|
||||
Val []byte
|
||||
Key []byte
|
||||
}{
|
||||
st.owner,
|
||||
st.nodeType,
|
||||
st.val,
|
||||
st.key,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, child := range st.children {
|
||||
if child == nil {
|
||||
w.WriteByte(0)
|
||||
continue
|
||||
}
|
||||
w.WriteByte(1)
|
||||
if childData, err := child.MarshalBinary(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
w.Write(childData)
|
||||
}
|
||||
}
|
||||
w.Flush()
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||
func (st *StackTrie) UnmarshalBinary(data []byte) error {
|
||||
r := bytes.NewReader(data)
|
||||
return st.unmarshalBinary(r)
|
||||
}
|
||||
|
||||
func (st *StackTrie) unmarshalBinary(r io.Reader) error {
|
||||
var dec struct {
|
||||
Owner core.Hash
|
||||
NodeType uint8
|
||||
Val []byte
|
||||
Key []byte
|
||||
}
|
||||
if err := gob.NewDecoder(r).Decode(&dec); err != nil {
|
||||
return err
|
||||
}
|
||||
st.owner = dec.Owner
|
||||
st.nodeType = dec.NodeType
|
||||
st.val = dec.Val
|
||||
st.key = dec.Key
|
||||
|
||||
var hasChild = make([]byte, 1)
|
||||
for i := range st.children {
|
||||
if _, err := r.Read(hasChild); err != nil {
|
||||
return err
|
||||
} else if hasChild[0] == 0 {
|
||||
continue
|
||||
}
|
||||
var child StackTrie
|
||||
if err := child.unmarshalBinary(r); err != nil {
|
||||
return err
|
||||
}
|
||||
st.children[i] = &child
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *StackTrie) setWriter(writeFn NodeWriteFunc) {
|
||||
st.writeFn = writeFn
|
||||
for _, child := range st.children {
|
||||
if child != nil {
|
||||
child.setWriter(writeFn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newLeaf(owner core.Hash, key, val []byte, writeFn NodeWriteFunc) *StackTrie {
|
||||
st := stackTrieFromPool(writeFn, owner)
|
||||
st.nodeType = leafNode
|
||||
st.key = append(st.key, key...)
|
||||
st.val = val
|
||||
return st
|
||||
}
|
||||
|
||||
func newExt(owner core.Hash, key []byte, child *StackTrie, writeFn NodeWriteFunc) *StackTrie {
|
||||
st := stackTrieFromPool(writeFn, owner)
|
||||
st.nodeType = extNode
|
||||
st.key = append(st.key, key...)
|
||||
st.children[0] = child
|
||||
return st
|
||||
}
|
||||
|
||||
// List all values that StackTrie#nodeType can hold
|
||||
const (
|
||||
emptyNode = iota
|
||||
branchNode
|
||||
extNode
|
||||
leafNode
|
||||
hashedNode
|
||||
)
|
||||
|
||||
// TryUpdate inserts a (key, value) pair into the stack trie
|
||||
func (st *StackTrie) TryUpdate(key, value []byte) error {
|
||||
k := keybytesToHex(key)
|
||||
if len(value) == 0 {
|
||||
panic("deletion not supported")
|
||||
}
|
||||
st.insert(k[:len(k)-1], value, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *StackTrie) Update(key, value []byte) {
|
||||
if err := st.TryUpdate(key, value); err != nil {
|
||||
fmt.Errorf("Unhandled trie error in StackTrie.Update", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (st *StackTrie) Reset() {
|
||||
st.owner = core.Hash{}
|
||||
st.writeFn = nil
|
||||
st.key = st.key[:0]
|
||||
st.val = nil
|
||||
for i := range st.children {
|
||||
st.children[i] = nil
|
||||
}
|
||||
st.nodeType = emptyNode
|
||||
}
|
||||
|
||||
// Helper function that, given a full key, determines the index
|
||||
// at which the chunk pointed by st.keyOffset is different from
|
||||
// the same chunk in the full key.
|
||||
func (st *StackTrie) getDiffIndex(key []byte) int {
|
||||
for idx, nibble := range st.key {
|
||||
if nibble != key[idx] {
|
||||
return idx
|
||||
}
|
||||
}
|
||||
return len(st.key)
|
||||
}
|
||||
|
||||
// Helper function to that inserts a (key, value) pair into
|
||||
// the trie.
|
||||
func (st *StackTrie) insert(key, value []byte, prefix []byte) {
|
||||
switch st.nodeType {
|
||||
case branchNode: /* Branch */
|
||||
idx := int(key[0])
|
||||
|
||||
// Unresolve elder siblings
|
||||
for i := idx - 1; i >= 0; i-- {
|
||||
if st.children[i] != nil {
|
||||
if st.children[i].nodeType != hashedNode {
|
||||
st.children[i].hash(append(prefix, byte(i)))
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Add new child
|
||||
if st.children[idx] == nil {
|
||||
st.children[idx] = newLeaf(st.owner, key[1:], value, st.writeFn)
|
||||
} else {
|
||||
st.children[idx].insert(key[1:], value, append(prefix, key[0]))
|
||||
}
|
||||
|
||||
case extNode: /* Ext */
|
||||
// Compare both key chunks and see where they differ
|
||||
diffidx := st.getDiffIndex(key)
|
||||
|
||||
// Check if chunks are identical. If so, recurse into
|
||||
// the child node. Otherwise, the key has to be split
|
||||
// into 1) an optional common prefix, 2) the fullnode
|
||||
// representing the two differing path, and 3) a leaf
|
||||
// for each of the differentiated subtrees.
|
||||
if diffidx == len(st.key) {
|
||||
// Ext key and key segment are identical, recurse into
|
||||
// the child node.
|
||||
st.children[0].insert(key[diffidx:], value, append(prefix, key[:diffidx]...))
|
||||
return
|
||||
}
|
||||
// Save the original part. Depending if the break is
|
||||
// at the extension's last byte or not, create an
|
||||
// intermediate extension or use the extension's child
|
||||
// node directly.
|
||||
var n *StackTrie
|
||||
if diffidx < len(st.key)-1 {
|
||||
// Break on the non-last byte, insert an intermediate
|
||||
// extension. The path prefix of the newly-inserted
|
||||
// extension should also contain the different byte.
|
||||
n = newExt(st.owner, st.key[diffidx+1:], st.children[0], st.writeFn)
|
||||
n.hash(append(prefix, st.key[:diffidx+1]...))
|
||||
} else {
|
||||
// Break on the last byte, no need to insert
|
||||
// an extension node: reuse the current node.
|
||||
// The path prefix of the original part should
|
||||
// still be same.
|
||||
n = st.children[0]
|
||||
n.hash(append(prefix, st.key...))
|
||||
}
|
||||
var p *StackTrie
|
||||
if diffidx == 0 {
|
||||
// the break is on the first byte, so
|
||||
// the current node is converted into
|
||||
// a branch node.
|
||||
st.children[0] = nil
|
||||
p = st
|
||||
st.nodeType = branchNode
|
||||
} else {
|
||||
// the common prefix is at least one byte
|
||||
// long, insert a new intermediate branch
|
||||
// node.
|
||||
st.children[0] = stackTrieFromPool(st.writeFn, st.owner)
|
||||
st.children[0].nodeType = branchNode
|
||||
p = st.children[0]
|
||||
}
|
||||
// Create a leaf for the inserted part
|
||||
o := newLeaf(st.owner, key[diffidx+1:], value, st.writeFn)
|
||||
|
||||
// Insert both child leaves where they belong:
|
||||
origIdx := st.key[diffidx]
|
||||
newIdx := key[diffidx]
|
||||
p.children[origIdx] = n
|
||||
p.children[newIdx] = o
|
||||
st.key = st.key[:diffidx]
|
||||
|
||||
case leafNode: /* Leaf */
|
||||
// Compare both key chunks and see where they differ
|
||||
diffidx := st.getDiffIndex(key)
|
||||
|
||||
// Overwriting a key isn't supported, which means that
|
||||
// the current leaf is expected to be split into 1) an
|
||||
// optional extension for the common prefix of these 2
|
||||
// keys, 2) a fullnode selecting the path on which the
|
||||
// keys differ, and 3) one leaf for the differentiated
|
||||
// component of each key.
|
||||
if diffidx >= len(st.key) {
|
||||
panic("Trying to insert into existing key")
|
||||
}
|
||||
|
||||
// Check if the split occurs at the first nibble of the
|
||||
// chunk. In that case, no prefix extnode is necessary.
|
||||
// Otherwise, create that
|
||||
var p *StackTrie
|
||||
if diffidx == 0 {
|
||||
// Convert current leaf into a branch
|
||||
st.nodeType = branchNode
|
||||
p = st
|
||||
st.children[0] = nil
|
||||
} else {
|
||||
// Convert current node into an ext,
|
||||
// and insert a child branch node.
|
||||
st.nodeType = extNode
|
||||
st.children[0] = NewStackTrieWithOwner(st.writeFn, st.owner)
|
||||
st.children[0].nodeType = branchNode
|
||||
p = st.children[0]
|
||||
}
|
||||
|
||||
// Create the two child leaves: one containing the original
|
||||
// value and another containing the new value. The child leaf
|
||||
// is hashed directly in order to free up some memory.
|
||||
origIdx := st.key[diffidx]
|
||||
p.children[origIdx] = newLeaf(st.owner, st.key[diffidx+1:], st.val, st.writeFn)
|
||||
p.children[origIdx].hash(append(prefix, st.key[:diffidx+1]...))
|
||||
|
||||
newIdx := key[diffidx]
|
||||
p.children[newIdx] = newLeaf(st.owner, key[diffidx+1:], value, st.writeFn)
|
||||
|
||||
// Finally, cut off the key part that has been passed
|
||||
// over to the children.
|
||||
st.key = st.key[:diffidx]
|
||||
st.val = nil
|
||||
|
||||
case emptyNode: /* Empty */
|
||||
st.nodeType = leafNode
|
||||
st.key = key
|
||||
st.val = value
|
||||
|
||||
case hashedNode:
|
||||
panic("trying to insert into hash")
|
||||
|
||||
default:
|
||||
panic("invalid type")
|
||||
}
|
||||
}
|
||||
|
||||
// hash converts st into a 'hashedNode', if possible. Possible outcomes:
|
||||
//
|
||||
// 1. The rlp-encoded value was >= 32 bytes:
|
||||
// - Then the 32-byte `hash` will be accessible in `st.val`.
|
||||
// - And the 'st.type' will be 'hashedNode'
|
||||
//
|
||||
// 2. The rlp-encoded value was < 32 bytes
|
||||
// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'.
|
||||
// - And the 'st.type' will be 'hashedNode' AGAIN
|
||||
//
|
||||
// This method also sets 'st.type' to hashedNode, and clears 'st.key'.
|
||||
func (st *StackTrie) hash(path []byte) {
|
||||
h := newHasher(false)
|
||||
defer returnHasherToPool(h)
|
||||
|
||||
st.hashRec(h, path)
|
||||
}
|
||||
|
||||
func (st *StackTrie) hashRec(hasher *Hasher, path []byte) {
|
||||
// The switch below sets this to the RLP-encoding of this node.
|
||||
var encodedNode []byte
|
||||
|
||||
switch st.nodeType {
|
||||
case hashedNode:
|
||||
return
|
||||
|
||||
case emptyNode:
|
||||
st.val = types.EmptyRootHash.Bytes()
|
||||
st.key = st.key[:0]
|
||||
st.nodeType = hashedNode
|
||||
return
|
||||
|
||||
case branchNode:
|
||||
var nodes rawFullNode
|
||||
for i, child := range st.children {
|
||||
if child == nil {
|
||||
nodes[i] = nilValueNode
|
||||
continue
|
||||
}
|
||||
child.hashRec(hasher, append(path, byte(i)))
|
||||
if len(child.val) < 32 {
|
||||
nodes[i] = rawNode(child.val)
|
||||
} else {
|
||||
nodes[i] = hashNode(child.val)
|
||||
}
|
||||
|
||||
// Release child back to pool.
|
||||
st.children[i] = nil
|
||||
returnToPool(child)
|
||||
}
|
||||
|
||||
nodes.encode(hasher.encbuf)
|
||||
encodedNode = hasher.encodedBytes()
|
||||
|
||||
case extNode:
|
||||
st.children[0].hashRec(hasher, append(path, st.key...))
|
||||
|
||||
n := rawShortNode{Key: hexToCompact(st.key)}
|
||||
if len(st.children[0].val) < 32 {
|
||||
n.Val = rawNode(st.children[0].val)
|
||||
} else {
|
||||
n.Val = hashNode(st.children[0].val)
|
||||
}
|
||||
|
||||
n.encode(hasher.encbuf)
|
||||
encodedNode = hasher.encodedBytes()
|
||||
|
||||
// Release child back to pool.
|
||||
returnToPool(st.children[0])
|
||||
st.children[0] = nil
|
||||
|
||||
case leafNode:
|
||||
st.key = append(st.key, byte(16))
|
||||
n := rawShortNode{Key: hexToCompact(st.key), Val: valueNode(st.val)}
|
||||
|
||||
n.encode(hasher.encbuf)
|
||||
encodedNode = hasher.encodedBytes()
|
||||
|
||||
default:
|
||||
panic("invalid node type")
|
||||
}
|
||||
|
||||
st.nodeType = hashedNode
|
||||
st.key = st.key[:0]
|
||||
if len(encodedNode) < 32 {
|
||||
st.val = core.CopyBytes(encodedNode)
|
||||
return
|
||||
}
|
||||
|
||||
// Write the hash to the 'val'. We allocate a new val here to not mutate
|
||||
// input values
|
||||
st.val = hasher.hashData(encodedNode)
|
||||
if st.writeFn != nil {
|
||||
st.writeFn(st.owner, path, core.BytesToHash(st.val), encodedNode)
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns the hash of the current node.
|
||||
func (st *StackTrie) Hash() (h core.Hash) {
|
||||
hasher := newHasher(false)
|
||||
defer returnHasherToPool(hasher)
|
||||
|
||||
st.hashRec(hasher, nil)
|
||||
if len(st.val) == 32 {
|
||||
copy(h[:], st.val)
|
||||
return h
|
||||
}
|
||||
// If the node's RLP isn't 32 bytes long, the node will not
|
||||
// be hashed, and instead contain the rlp-encoding of the
|
||||
// node. For the top level node, we need to force the hashing.
|
||||
hasher.sha.Reset()
|
||||
hasher.sha.Write(st.val)
|
||||
hasher.sha.Read(h[:])
|
||||
return h
|
||||
}
|
||||
|
||||
// Commit will firstly hash the entire trie if it's still not hashed
|
||||
// and then commit all nodes to the associated database. Actually most
|
||||
// of the trie nodes MAY have been committed already. The main purpose
|
||||
// here is to commit the root node.
|
||||
//
|
||||
// The associated database is expected, otherwise the whole commit
|
||||
// functionality should be disabled.
|
||||
func (st *StackTrie) Commit() (h core.Hash, err error) {
|
||||
if st.writeFn == nil {
|
||||
return core.Hash{}, ErrCommitDisabled
|
||||
}
|
||||
hasher := newHasher(false)
|
||||
defer returnHasherToPool(hasher)
|
||||
|
||||
st.hashRec(hasher, nil)
|
||||
if len(st.val) == 32 {
|
||||
copy(h[:], st.val)
|
||||
return h, nil
|
||||
}
|
||||
// If the node's RLP isn't 32 bytes long, the node will not
|
||||
// be hashed (and committed), and instead contain the rlp-encoding of the
|
||||
// node. For the top level node, we need to force the hashing+commit.
|
||||
hasher.sha.Reset()
|
||||
hasher.sha.Write(st.val)
|
||||
hasher.sha.Read(h[:])
|
||||
|
||||
st.writeFn(st.owner, nil, h, st.val)
|
||||
return h, nil
|
||||
}
|
@ -34,231 +34,6 @@ var (
|
||||
CalaverasGenesisHash = core.HexToHash("0xeb9233d066c275efcdfed8037f4fc082770176aefdbcb7691c71da412a5670f2")
|
||||
)
|
||||
|
||||
// TrustedCheckpoints associates each known checkpoint with the genesis hash of
|
||||
// the chain it belongs to.
|
||||
var TrustedCheckpoints = map[core.Hash]*TrustedCheckpoint{
|
||||
MainnetGenesisHash: MainnetTrustedCheckpoint,
|
||||
RopstenGenesisHash: RopstenTrustedCheckpoint,
|
||||
RinkebyGenesisHash: RinkebyTrustedCheckpoint,
|
||||
GoerliGenesisHash: GoerliTrustedCheckpoint,
|
||||
}
|
||||
|
||||
// CheckpointOracles associates each known checkpoint oracles with the genesis hash of
|
||||
// the chain it belongs to.
|
||||
var CheckpointOracles = map[core.Hash]*CheckpointOracleConfig{
|
||||
MainnetGenesisHash: MainnetCheckpointOracle,
|
||||
RopstenGenesisHash: RopstenCheckpointOracle,
|
||||
RinkebyGenesisHash: RinkebyCheckpointOracle,
|
||||
GoerliGenesisHash: GoerliCheckpointOracle,
|
||||
}
|
||||
|
||||
var (
|
||||
// MainnetChainConfig is the chain parameters to run a node on the main network.
|
||||
MainnetChainConfig = &ChainConfig{
|
||||
ChainID: big.NewInt(1),
|
||||
HomesteadBlock: big.NewInt(1_150_000),
|
||||
DAOForkBlock: big.NewInt(1_920_000),
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: big.NewInt(2_463_000),
|
||||
EIP150Hash: core.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"),
|
||||
EIP155Block: big.NewInt(2_675_000),
|
||||
EIP158Block: big.NewInt(2_675_000),
|
||||
ByzantiumBlock: big.NewInt(4_370_000),
|
||||
ConstantinopleBlock: big.NewInt(7_280_000),
|
||||
PetersburgBlock: big.NewInt(7_280_000),
|
||||
IstanbulBlock: big.NewInt(9_069_000),
|
||||
MuirGlacierBlock: big.NewInt(9_200_000),
|
||||
BerlinBlock: big.NewInt(12_244_000),
|
||||
LondonBlock: big.NewInt(12_965_000),
|
||||
Ethash: new(EthashConfig),
|
||||
}
|
||||
|
||||
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
|
||||
MainnetTrustedCheckpoint = &TrustedCheckpoint{
|
||||
SectionIndex: 389,
|
||||
SectionHead: core.HexToHash("0x8f96e510cf64abf34095c5aa3937acdf5316de5540945b9688f4a2e083cddc73"),
|
||||
CHTRoot: core.HexToHash("0xa2362493848d6dbc50dcbbf74c017ea808b8938bfb129217d507bd276950d7ac"),
|
||||
BloomRoot: core.HexToHash("0x72fc78a841bde7e08e1fb7c187b622c49dc8271db12db748ff5d0f27bdb41413"),
|
||||
}
|
||||
|
||||
// MainnetCheckpointOracle contains a set of configs for the main network oracle.
|
||||
MainnetCheckpointOracle = &CheckpointOracleConfig{
|
||||
Address: core.HexToAddress("0x9a9070028361F7AAbeB3f2F2Dc07F82C4a98A02a"),
|
||||
Signers: []core.Address{
|
||||
core.HexToAddress("0x1b2C260efc720BE89101890E4Db589b44E950527"), // Peter
|
||||
core.HexToAddress("0x78d1aD571A1A09D60D9BBf25894b44e4C8859595"), // Martin
|
||||
core.HexToAddress("0x286834935f4A8Cfb4FF4C77D5770C2775aE2b0E7"), // Zsolt
|
||||
core.HexToAddress("0xb86e2B0Ab5A4B1373e40c51A7C712c70Ba2f9f8E"), // Gary
|
||||
core.HexToAddress("0x0DF8fa387C602AE62559cC4aFa4972A7045d6707"), // Guillaume
|
||||
},
|
||||
Threshold: 2,
|
||||
}
|
||||
|
||||
// RopstenChainConfig contains the chain parameters to run a node on the Ropsten test network.
|
||||
RopstenChainConfig = &ChainConfig{
|
||||
ChainID: big.NewInt(3),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
DAOForkBlock: nil,
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP150Hash: core.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d"),
|
||||
EIP155Block: big.NewInt(10),
|
||||
EIP158Block: big.NewInt(10),
|
||||
ByzantiumBlock: big.NewInt(1_700_000),
|
||||
ConstantinopleBlock: big.NewInt(4_230_000),
|
||||
PetersburgBlock: big.NewInt(4_939_394),
|
||||
IstanbulBlock: big.NewInt(6_485_846),
|
||||
MuirGlacierBlock: big.NewInt(7_117_117),
|
||||
BerlinBlock: big.NewInt(9_812_189),
|
||||
LondonBlock: big.NewInt(10_499_401),
|
||||
Ethash: new(EthashConfig),
|
||||
}
|
||||
|
||||
// RopstenTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
|
||||
RopstenTrustedCheckpoint = &TrustedCheckpoint{
|
||||
SectionIndex: 322,
|
||||
SectionHead: core.HexToHash("0xe3f2fb70acd752bbcac06b67688db8430815c788a31213011ed51b966108a5f4"),
|
||||
CHTRoot: core.HexToHash("0xb2993a6bc28b23b84159cb477c38c0ec5607434faae6b3657ad44cbcf116f288"),
|
||||
BloomRoot: core.HexToHash("0x871841e5c2ada9dab2011a550d38e9fe0a30047cfc81f1ffc7ebc09f4f230732"),
|
||||
}
|
||||
|
||||
// RopstenCheckpointOracle contains a set of configs for the Ropsten test network oracle.
|
||||
RopstenCheckpointOracle = &CheckpointOracleConfig{
|
||||
Address: core.HexToAddress("0xEF79475013f154E6A65b54cB2742867791bf0B84"),
|
||||
Signers: []core.Address{
|
||||
core.HexToAddress("0x32162F3581E88a5f62e8A61892B42C46E2c18f7b"), // Peter
|
||||
core.HexToAddress("0x78d1aD571A1A09D60D9BBf25894b44e4C8859595"), // Martin
|
||||
core.HexToAddress("0x286834935f4A8Cfb4FF4C77D5770C2775aE2b0E7"), // Zsolt
|
||||
core.HexToAddress("0xb86e2B0Ab5A4B1373e40c51A7C712c70Ba2f9f8E"), // Gary
|
||||
core.HexToAddress("0x0DF8fa387C602AE62559cC4aFa4972A7045d6707"), // Guillaume
|
||||
},
|
||||
Threshold: 2,
|
||||
}
|
||||
|
||||
// RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network.
|
||||
RinkebyChainConfig = &ChainConfig{
|
||||
ChainID: big.NewInt(4),
|
||||
HomesteadBlock: big.NewInt(1),
|
||||
DAOForkBlock: nil,
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: big.NewInt(2),
|
||||
EIP150Hash: core.HexToHash("0x9b095b36c15eaf13044373aef8ee0bd3a382a5abb92e402afa44b8249c3a90e9"),
|
||||
EIP155Block: big.NewInt(3),
|
||||
EIP158Block: big.NewInt(3),
|
||||
ByzantiumBlock: big.NewInt(1_035_301),
|
||||
ConstantinopleBlock: big.NewInt(3_660_663),
|
||||
PetersburgBlock: big.NewInt(4_321_234),
|
||||
IstanbulBlock: big.NewInt(5_435_345),
|
||||
MuirGlacierBlock: nil,
|
||||
BerlinBlock: big.NewInt(8_290_928),
|
||||
LondonBlock: big.NewInt(8_897_988),
|
||||
Clique: &CliqueConfig{
|
||||
Period: 15,
|
||||
Epoch: 30000,
|
||||
},
|
||||
}
|
||||
|
||||
// RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
|
||||
RinkebyTrustedCheckpoint = &TrustedCheckpoint{
|
||||
SectionIndex: 270,
|
||||
SectionHead: core.HexToHash("0x03ef8982c93bbf18c859bc1b20ae05b439f04cf1ff592656e941d2c3fcff5d68"),
|
||||
CHTRoot: core.HexToHash("0x9eb80685e8ece479e105b170439779bc0f89997ab7f4dee425f85c4234e8a6b5"),
|
||||
BloomRoot: core.HexToHash("0xc3673721c5697efe5fe4cb825d178f4a335dbfeda6a197fb75c9256a767379dc"),
|
||||
}
|
||||
|
||||
// RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle.
|
||||
RinkebyCheckpointOracle = &CheckpointOracleConfig{
|
||||
Address: core.HexToAddress("0xebe8eFA441B9302A0d7eaECc277c09d20D684540"),
|
||||
Signers: []core.Address{
|
||||
core.HexToAddress("0xd9c9cd5f6779558b6e0ed4e6acf6b1947e7fa1f3"), // Peter
|
||||
core.HexToAddress("0x78d1aD571A1A09D60D9BBf25894b44e4C8859595"), // Martin
|
||||
core.HexToAddress("0x286834935f4A8Cfb4FF4C77D5770C2775aE2b0E7"), // Zsolt
|
||||
core.HexToAddress("0xb86e2B0Ab5A4B1373e40c51A7C712c70Ba2f9f8E"), // Gary
|
||||
},
|
||||
Threshold: 2,
|
||||
}
|
||||
|
||||
// GoerliChainConfig contains the chain parameters to run a node on the Görli test network.
|
||||
GoerliChainConfig = &ChainConfig{
|
||||
ChainID: big.NewInt(5),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
DAOForkBlock: nil,
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(1_561_651),
|
||||
MuirGlacierBlock: nil,
|
||||
BerlinBlock: big.NewInt(4_460_644),
|
||||
LondonBlock: big.NewInt(5_062_605),
|
||||
Clique: &CliqueConfig{
|
||||
Period: 15,
|
||||
Epoch: 30000,
|
||||
},
|
||||
}
|
||||
|
||||
// GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network.
|
||||
GoerliTrustedCheckpoint = &TrustedCheckpoint{
|
||||
SectionIndex: 154,
|
||||
SectionHead: core.HexToHash("0xf4cb74cc0e3683589f4992902184241fb892d7c3859d0044c16ec864605ff80d"),
|
||||
CHTRoot: core.HexToHash("0xead95f9f2504b2c7c6d82c51d30e50b40631c3ea2f590cddcc9721cfc0ae79de"),
|
||||
BloomRoot: core.HexToHash("0xc6dd6cfe88ac9c4a6d19c9a8651944fa9d941a2340a8f5ddaf673d4d39779d81"),
|
||||
}
|
||||
|
||||
// GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle.
|
||||
GoerliCheckpointOracle = &CheckpointOracleConfig{
|
||||
Address: core.HexToAddress("0x18CA0E045F0D772a851BC7e48357Bcaab0a0795D"),
|
||||
Signers: []core.Address{
|
||||
core.HexToAddress("0x4769bcaD07e3b938B7f43EB7D278Bc7Cb9efFb38"), // Peter
|
||||
core.HexToAddress("0x78d1aD571A1A09D60D9BBf25894b44e4C8859595"), // Martin
|
||||
core.HexToAddress("0x286834935f4A8Cfb4FF4C77D5770C2775aE2b0E7"), // Zsolt
|
||||
core.HexToAddress("0xb86e2B0Ab5A4B1373e40c51A7C712c70Ba2f9f8E"), // Gary
|
||||
core.HexToAddress("0x0DF8fa387C602AE62559cC4aFa4972A7045d6707"), // Guillaume
|
||||
},
|
||||
Threshold: 2,
|
||||
}
|
||||
|
||||
CalaverasChainConfig = &ChainConfig{
|
||||
ChainID: big.NewInt(123),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
DAOForkBlock: nil,
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
IstanbulBlock: big.NewInt(0),
|
||||
MuirGlacierBlock: nil,
|
||||
BerlinBlock: big.NewInt(0),
|
||||
LondonBlock: big.NewInt(500),
|
||||
Clique: &CliqueConfig{
|
||||
Period: 30,
|
||||
Epoch: 30000,
|
||||
},
|
||||
}
|
||||
|
||||
// AllEthashProtocolChanges contains every protocol change (EIPs) introduced
|
||||
// and accepted by the Ethereum core developers into the Ethash consensus.
|
||||
//
|
||||
// This configuration is intentionally not using keyed fields to force anyone
|
||||
// adding flags to the config to also have to set these fields.
|
||||
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), core.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
|
||||
|
||||
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
|
||||
// and accepted by the Ethereum core developers into the Clique consensus.
|
||||
//
|
||||
// This configuration is intentionally not using keyed fields to force anyone
|
||||
// adding flags to the config to also have to set these fields.
|
||||
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), core.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
|
||||
|
||||
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), core.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
|
||||
TestRules = TestChainConfig.Rules(new(big.Int))
|
||||
)
|
||||
|
||||
// TrustedCheckpoint represents a set of post-processed trie roots (CHT and
|
||||
// BloomTrie) associated with the appropriate section index and head hash. It is
|
||||
@ -336,7 +111,24 @@ type ChainConfig struct {
|
||||
BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin)
|
||||
LondonBlock *big.Int `json:"londonBlock,omitempty"` // London switch block (nil = no fork, 0 = already on london)
|
||||
|
||||
CatalystBlock *big.Int `json:"catalystBlock,omitempty"` // Catalyst switch block (nil = no fork, 0 = already on catalyst)
|
||||
ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // Eip-4345 (bomb delay) switch block (nil = no fork, 0 = already activated)
|
||||
GrayGlacierBlock *big.Int `json:"grayGlacierBlock,omitempty"` // Eip-5133 (bomb delay) switch block (nil = no fork, 0 = already activated)
|
||||
MergeNetsplitBlock *big.Int `json:"mergeNetsplitBlock,omitempty"` // Virtual fork after The Merge to use as a network splitter
|
||||
|
||||
// Fork scheduling was switched from blocks to timestamps here
|
||||
|
||||
ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai)
|
||||
CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun)
|
||||
PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague)
|
||||
|
||||
// TerminalTotalDifficulty is the amount of total difficulty reached by
|
||||
// the network that triggers the consensus upgrade.
|
||||
TerminalTotalDifficulty *big.Int `json:"terminalTotalDifficulty,omitempty"`
|
||||
|
||||
// TerminalTotalDifficultyPassed is a flag specifying that the network already
|
||||
// passed the terminal total difficulty. Its purpose is to disable legacy sync
|
||||
// even without having seen the TTD locally (safer long term).
|
||||
TerminalTotalDifficultyPassed bool `json:"terminalTotalDifficultyPassed,omitempty"`
|
||||
|
||||
// Various consensus engines
|
||||
Ethash *EthashConfig `json:"ethash,omitempty"`
|
||||
@ -454,11 +246,6 @@ func (c *ChainConfig) IsLondon(num *big.Int) bool {
|
||||
return isForked(c.LondonBlock, num)
|
||||
}
|
||||
|
||||
// IsCatalyst returns whether num is either equal to the Merge fork block or greater.
|
||||
func (c *ChainConfig) IsCatalyst(num *big.Int) bool {
|
||||
return isForked(c.CatalystBlock, num)
|
||||
}
|
||||
|
||||
// CheckCompatible checks whether scheduled fork transitions have been imported
|
||||
// with a mismatching chain configuration.
|
||||
func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64) *ConfigCompatError {
|
||||
@ -656,6 +443,5 @@ func (c *ChainConfig) Rules(num *big.Int) Rules {
|
||||
IsIstanbul: c.IsIstanbul(num),
|
||||
IsBerlin: c.IsBerlin(num),
|
||||
IsLondon: c.IsLondon(num),
|
||||
IsCatalyst: c.IsCatalyst(num),
|
||||
}
|
||||
}
|
||||
|
423
restricted/rlp/encbuffer.go
Normal file
423
restricted/rlp/encbuffer.go
Normal file
@ -0,0 +1,423 @@
|
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package rlp
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math/big"
|
||||
// "reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/holiman/uint256"
|
||||
)
|
||||
|
||||
type encBuffer struct {
|
||||
str []byte // string data, contains everything except list headers
|
||||
lheads []listhead // all list headers
|
||||
lhsize int // sum of sizes of all encoded list headers
|
||||
sizebuf [9]byte // auxiliary buffer for uint encoding
|
||||
}
|
||||
|
||||
// The global encBuffer pool.
|
||||
var encBufferPool = sync.Pool{
|
||||
New: func() interface{} { return new(encBuffer) },
|
||||
}
|
||||
|
||||
func getEncBuffer() *encBuffer {
|
||||
buf := encBufferPool.Get().(*encBuffer)
|
||||
buf.reset()
|
||||
return buf
|
||||
}
|
||||
|
||||
func (buf *encBuffer) reset() {
|
||||
buf.lhsize = 0
|
||||
buf.str = buf.str[:0]
|
||||
buf.lheads = buf.lheads[:0]
|
||||
}
|
||||
|
||||
// size returns the length of the encoded data.
|
||||
func (buf *encBuffer) size() int {
|
||||
return len(buf.str) + buf.lhsize
|
||||
}
|
||||
|
||||
// makeBytes creates the encoder output.
|
||||
func (w *encBuffer) makeBytes() []byte {
|
||||
out := make([]byte, w.size())
|
||||
w.copyTo(out)
|
||||
return out
|
||||
}
|
||||
|
||||
func (w *encBuffer) copyTo(dst []byte) {
|
||||
strpos := 0
|
||||
pos := 0
|
||||
for _, head := range w.lheads {
|
||||
// write string data before header
|
||||
n := copy(dst[pos:], w.str[strpos:head.offset])
|
||||
pos += n
|
||||
strpos += n
|
||||
// write the header
|
||||
enc := head.encode(dst[pos:])
|
||||
pos += len(enc)
|
||||
}
|
||||
// copy string data after the last list header
|
||||
copy(dst[pos:], w.str[strpos:])
|
||||
}
|
||||
|
||||
// writeTo writes the encoder output to w.
|
||||
func (buf *encBuffer) writeTo(w io.Writer) (err error) {
|
||||
strpos := 0
|
||||
for _, head := range buf.lheads {
|
||||
// write string data before header
|
||||
if head.offset-strpos > 0 {
|
||||
n, err := w.Write(buf.str[strpos:head.offset])
|
||||
strpos += n
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// write the header
|
||||
enc := head.encode(buf.sizebuf[:])
|
||||
if _, err = w.Write(enc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if strpos < len(buf.str) {
|
||||
// write string data after the last list header
|
||||
_, err = w.Write(buf.str[strpos:])
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Write implements io.Writer and appends b directly to the output.
|
||||
func (buf *encBuffer) Write(b []byte) (int, error) {
|
||||
buf.str = append(buf.str, b...)
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// writeBool writes b as the integer 0 (false) or 1 (true).
|
||||
func (buf *encBuffer) writeBool(b bool) {
|
||||
if b {
|
||||
buf.str = append(buf.str, 0x01)
|
||||
} else {
|
||||
buf.str = append(buf.str, 0x80)
|
||||
}
|
||||
}
|
||||
|
||||
func (buf *encBuffer) writeUint64(i uint64) {
|
||||
if i == 0 {
|
||||
buf.str = append(buf.str, 0x80)
|
||||
} else if i < 128 {
|
||||
// fits single byte
|
||||
buf.str = append(buf.str, byte(i))
|
||||
} else {
|
||||
s := putint(buf.sizebuf[1:], i)
|
||||
buf.sizebuf[0] = 0x80 + byte(s)
|
||||
buf.str = append(buf.str, buf.sizebuf[:s+1]...)
|
||||
}
|
||||
}
|
||||
|
||||
func (buf *encBuffer) writeBytes(b []byte) {
|
||||
if len(b) == 1 && b[0] <= 0x7F {
|
||||
// fits single byte, no string header
|
||||
buf.str = append(buf.str, b[0])
|
||||
} else {
|
||||
buf.encodeStringHeader(len(b))
|
||||
buf.str = append(buf.str, b...)
|
||||
}
|
||||
}
|
||||
|
||||
func (buf *encBuffer) writeString(s string) {
|
||||
buf.writeBytes([]byte(s))
|
||||
}
|
||||
|
||||
// // wordBytes is the number of bytes in a big.Word
|
||||
// const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8
|
||||
|
||||
// writeBigInt writes i as an integer.
|
||||
func (w *encBuffer) writeBigInt(i *big.Int) {
|
||||
bitlen := i.BitLen()
|
||||
if bitlen <= 64 {
|
||||
w.writeUint64(i.Uint64())
|
||||
return
|
||||
}
|
||||
// Integer is larger than 64 bits, encode from i.Bits().
|
||||
// The minimal byte length is bitlen rounded up to the next
|
||||
// multiple of 8, divided by 8.
|
||||
length := ((bitlen + 7) & -8) >> 3
|
||||
w.encodeStringHeader(length)
|
||||
w.str = append(w.str, make([]byte, length)...)
|
||||
index := length
|
||||
buf := w.str[len(w.str)-length:]
|
||||
for _, d := range i.Bits() {
|
||||
for j := 0; j < wordBytes && index > 0; j++ {
|
||||
index--
|
||||
buf[index] = byte(d)
|
||||
d >>= 8
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// writeUint256 writes z as an integer.
|
||||
func (w *encBuffer) writeUint256(z *uint256.Int) {
|
||||
bitlen := z.BitLen()
|
||||
if bitlen <= 64 {
|
||||
w.writeUint64(z.Uint64())
|
||||
return
|
||||
}
|
||||
nBytes := byte((bitlen + 7) / 8)
|
||||
var b [33]byte
|
||||
binary.BigEndian.PutUint64(b[1:9], z[3])
|
||||
binary.BigEndian.PutUint64(b[9:17], z[2])
|
||||
binary.BigEndian.PutUint64(b[17:25], z[1])
|
||||
binary.BigEndian.PutUint64(b[25:33], z[0])
|
||||
b[32-nBytes] = 0x80 + nBytes
|
||||
w.str = append(w.str, b[32-nBytes:]...)
|
||||
}
|
||||
|
||||
// list adds a new list header to the header stack. It returns the index of the header.
|
||||
// Call listEnd with this index after encoding the content of the list.
|
||||
func (buf *encBuffer) list() int {
|
||||
buf.lheads = append(buf.lheads, listhead{offset: len(buf.str), size: buf.lhsize})
|
||||
return len(buf.lheads) - 1
|
||||
}
|
||||
|
||||
func (buf *encBuffer) listEnd(index int) {
|
||||
lh := &buf.lheads[index]
|
||||
lh.size = buf.size() - lh.offset - lh.size
|
||||
if lh.size < 56 {
|
||||
buf.lhsize++ // length encoded into kind tag
|
||||
} else {
|
||||
buf.lhsize += 1 + intsize(uint64(lh.size))
|
||||
}
|
||||
}
|
||||
|
||||
// func (buf *encBuffer) encode(val interface{}) error {
|
||||
// rval := reflect.ValueOf(val)
|
||||
// writer, err := cachedWriter(rval.Type())
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// return writer(rval, buf)
|
||||
// }
|
||||
|
||||
func (buf *encBuffer) encodeStringHeader(size int) {
|
||||
if size < 56 {
|
||||
buf.str = append(buf.str, 0x80+byte(size))
|
||||
} else {
|
||||
sizesize := putint(buf.sizebuf[1:], uint64(size))
|
||||
buf.sizebuf[0] = 0xB7 + byte(sizesize)
|
||||
buf.str = append(buf.str, buf.sizebuf[:sizesize+1]...)
|
||||
}
|
||||
}
|
||||
|
||||
// // encReader is the io.Reader returned by EncodeToReader.
|
||||
// // It releases its encbuf at EOF.
|
||||
// type encReader struct {
|
||||
// buf *encBuffer // the buffer we're reading from. this is nil when we're at EOF.
|
||||
// lhpos int // index of list header that we're reading
|
||||
// strpos int // current position in string buffer
|
||||
// piece []byte // next piece to be read
|
||||
// }
|
||||
|
||||
// func (r *encReader) Read(b []byte) (n int, err error) {
|
||||
// for {
|
||||
// if r.piece = r.next(); r.piece == nil {
|
||||
// // Put the encode buffer back into the pool at EOF when it
|
||||
// // is first encountered. Subsequent calls still return EOF
|
||||
// // as the error but the buffer is no longer valid.
|
||||
// if r.buf != nil {
|
||||
// encBufferPool.Put(r.buf)
|
||||
// r.buf = nil
|
||||
// }
|
||||
// return n, io.EOF
|
||||
// }
|
||||
// nn := copy(b[n:], r.piece)
|
||||
// n += nn
|
||||
// if nn < len(r.piece) {
|
||||
// // piece didn't fit, see you next time.
|
||||
// r.piece = r.piece[nn:]
|
||||
// return n, nil
|
||||
// }
|
||||
// r.piece = nil
|
||||
// }
|
||||
// }
|
||||
|
||||
// // next returns the next piece of data to be read.
|
||||
// // it returns nil at EOF.
|
||||
// func (r *encReader) next() []byte {
|
||||
// switch {
|
||||
// case r.buf == nil:
|
||||
// return nil
|
||||
|
||||
// case r.piece != nil:
|
||||
// // There is still data available for reading.
|
||||
// return r.piece
|
||||
|
||||
// case r.lhpos < len(r.buf.lheads):
|
||||
// // We're before the last list header.
|
||||
// head := r.buf.lheads[r.lhpos]
|
||||
// sizebefore := head.offset - r.strpos
|
||||
// if sizebefore > 0 {
|
||||
// // String data before header.
|
||||
// p := r.buf.str[r.strpos:head.offset]
|
||||
// r.strpos += sizebefore
|
||||
// return p
|
||||
// }
|
||||
// r.lhpos++
|
||||
// return head.encode(r.buf.sizebuf[:])
|
||||
|
||||
// case r.strpos < len(r.buf.str):
|
||||
// // String data at the end, after all list headers.
|
||||
// p := r.buf.str[r.strpos:]
|
||||
// r.strpos = len(r.buf.str)
|
||||
// return p
|
||||
|
||||
// default:
|
||||
// return nil
|
||||
// }
|
||||
// }
|
||||
|
||||
func encBufferFromWriter(w io.Writer) *encBuffer {
|
||||
switch w := w.(type) {
|
||||
case EncoderBuffer:
|
||||
return w.buf
|
||||
case *EncoderBuffer:
|
||||
return w.buf
|
||||
case *encBuffer:
|
||||
return w
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// EncoderBuffer is a buffer for incremental encoding.
|
||||
//
|
||||
// The zero value is NOT ready for use. To get a usable buffer,
|
||||
// create it using NewEncoderBuffer or call Reset.
|
||||
type EncoderBuffer struct {
|
||||
buf *encBuffer
|
||||
dst io.Writer
|
||||
|
||||
ownBuffer bool
|
||||
}
|
||||
|
||||
// NewEncoderBuffer creates an encoder buffer.
|
||||
func NewEncoderBuffer(dst io.Writer) EncoderBuffer {
|
||||
var w EncoderBuffer
|
||||
w.Reset(dst)
|
||||
return w
|
||||
}
|
||||
|
||||
// Reset truncates the buffer and sets the output destination.
|
||||
func (w *EncoderBuffer) Reset(dst io.Writer) {
|
||||
if w.buf != nil && !w.ownBuffer {
|
||||
panic("can't Reset derived EncoderBuffer")
|
||||
}
|
||||
|
||||
// If the destination writer has an *encBuffer, use it.
|
||||
// Note that w.ownBuffer is left false here.
|
||||
if dst != nil {
|
||||
if outer := encBufferFromWriter(dst); outer != nil {
|
||||
*w = EncoderBuffer{outer, nil, false}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Get a fresh buffer.
|
||||
if w.buf == nil {
|
||||
w.buf = encBufferPool.Get().(*encBuffer)
|
||||
w.ownBuffer = true
|
||||
}
|
||||
w.buf.reset()
|
||||
w.dst = dst
|
||||
}
|
||||
|
||||
// Flush writes encoded RLP data to the output writer. This can only be called once.
|
||||
// If you want to re-use the buffer after Flush, you must call Reset.
|
||||
func (w *EncoderBuffer) Flush() error {
|
||||
var err error
|
||||
if w.dst != nil {
|
||||
err = w.buf.writeTo(w.dst)
|
||||
}
|
||||
// Release the internal buffer.
|
||||
if w.ownBuffer {
|
||||
encBufferPool.Put(w.buf)
|
||||
}
|
||||
*w = EncoderBuffer{}
|
||||
return err
|
||||
}
|
||||
|
||||
// ToBytes returns the encoded bytes.
|
||||
func (w *EncoderBuffer) ToBytes() []byte {
|
||||
return w.buf.makeBytes()
|
||||
}
|
||||
|
||||
// AppendToBytes appends the encoded bytes to dst.
|
||||
func (w *EncoderBuffer) AppendToBytes(dst []byte) []byte {
|
||||
size := w.buf.size()
|
||||
out := append(dst, make([]byte, size)...)
|
||||
w.buf.copyTo(out[len(dst):])
|
||||
return out
|
||||
}
|
||||
|
||||
// Write appends b directly to the encoder output.
|
||||
func (w EncoderBuffer) Write(b []byte) (int, error) {
|
||||
return w.buf.Write(b)
|
||||
}
|
||||
|
||||
// WriteBool writes b as the integer 0 (false) or 1 (true).
|
||||
func (w EncoderBuffer) WriteBool(b bool) {
|
||||
w.buf.writeBool(b)
|
||||
}
|
||||
|
||||
// WriteUint64 encodes an unsigned integer.
|
||||
func (w EncoderBuffer) WriteUint64(i uint64) {
|
||||
w.buf.writeUint64(i)
|
||||
}
|
||||
|
||||
// WriteBigInt encodes a big.Int as an RLP string.
|
||||
// Note: Unlike with Encode, the sign of i is ignored.
|
||||
func (w EncoderBuffer) WriteBigInt(i *big.Int) {
|
||||
w.buf.writeBigInt(i)
|
||||
}
|
||||
|
||||
// WriteUint256 encodes uint256.Int as an RLP string.
|
||||
func (w EncoderBuffer) WriteUint256(i *uint256.Int) {
|
||||
w.buf.writeUint256(i)
|
||||
}
|
||||
|
||||
// WriteBytes encodes b as an RLP string.
|
||||
func (w EncoderBuffer) WriteBytes(b []byte) {
|
||||
w.buf.writeBytes(b)
|
||||
}
|
||||
|
||||
// WriteString encodes s as an RLP string.
|
||||
func (w EncoderBuffer) WriteString(s string) {
|
||||
w.buf.writeString(s)
|
||||
}
|
||||
|
||||
// List starts a list. It returns an internal index. Call EndList with
|
||||
// this index after encoding the content to finish the list.
|
||||
func (w EncoderBuffer) List() int {
|
||||
return w.buf.list()
|
||||
}
|
||||
|
||||
// ListEnd finishes the given list.
|
||||
func (w EncoderBuffer) ListEnd(index int) {
|
||||
w.buf.listEnd(index)
|
||||
}
|
Loading…
Reference in New Issue
Block a user