diff --git a/.travis.yml b/.travis.yml index 925e23b1a..4f5d482c6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,7 +13,7 @@ jobs: - stage: lint os: linux dist: bionic - go: 1.20.x + go: 1.21.x env: - lint git: @@ -28,7 +28,7 @@ jobs: os: linux arch: amd64 dist: bionic - go: 1.20.x + go: 1.21.x env: - docker services: @@ -45,7 +45,7 @@ jobs: os: linux arch: arm64 dist: bionic - go: 1.20.x + go: 1.21.x env: - docker services: @@ -63,10 +63,9 @@ jobs: os: linux dist: bionic sudo: required - go: 1.20.x + go: 1.21.x env: - azure-linux - - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests addons: @@ -97,10 +96,9 @@ jobs: - stage: build if: type = push os: osx - go: 1.20.x + go: 1.21.x env: - azure-osx - - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests script: @@ -112,41 +110,34 @@ jobs: os: linux arch: amd64 dist: bionic - go: 1.20.x - env: - - GO111MODULE=on + go: 1.21.x script: - - go run build/ci.go test $TEST_PACKAGES + - travis_wait 30 go run build/ci.go test $TEST_PACKAGES - stage: build if: type = pull_request os: linux arch: arm64 dist: bionic - go: 1.19.x - env: - - GO111MODULE=on + go: 1.20.x script: - - go run build/ci.go test $TEST_PACKAGES + - travis_wait 30 go run build/ci.go test $TEST_PACKAGES - stage: build os: linux dist: bionic - go: 1.19.x - env: - - GO111MODULE=on + go: 1.20.x script: - - go run build/ci.go test $TEST_PACKAGES + - travis_wait 30 go run build/ci.go test $TEST_PACKAGES # This builder does the Ubuntu PPA nightly uploads - stage: build if: type = cron || (type = push && tag ~= /^v[0-9]/) os: linux dist: bionic - go: 1.20.x + go: 1.21.x env: - ubuntu-ppa - - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests addons: @@ -167,10 +158,9 @@ jobs: if: type = cron os: linux dist: bionic - go: 1.20.x + go: 1.21.x env: - azure-purge - - GO111MODULE=on git: submodules: false # avoid cloning ethereum/tests script: @@ -181,9 +171,7 @@ jobs: if: type = cron os: linux dist: bionic - go: 1.20.x - env: - - GO111MODULE=on + go: 1.21.x script: - - go run build/ci.go test -race $TEST_PACKAGES + - travis_wait 30 go run build/ci.go test -race $TEST_PACKAGES diff --git a/Dockerfile b/Dockerfile index db8be0dd9..a8ec376dc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ ARG VERSION="" ARG BUILDNUM="" # Build Geth in a stock Go builder container -FROM golang:1.20-alpine as builder +FROM golang:1.21-alpine as builder RUN apk add --no-cache gcc musl-dev binutils-gold linux-headers git diff --git a/Makefile b/Makefile index e6286da7f..d736ef61c 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ GOBIN = ./build/bin GO ?= latest -GORUN = env GO111MODULE=on go run +GORUN = go run geth: $(GORUN) build/ci.go install ./cmd/geth @@ -23,7 +23,7 @@ lint: ## Run linters. $(GORUN) build/ci.go lint clean: - env GO111MODULE=on go clean -cache + go clean -cache rm -fr build/_workspace/pkg/ $(GOBIN)/* # The devtools target installs tools required for 'go generate'. diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go index 55576070a..6e1075c71 100644 --- a/accounts/abi/abi.go +++ b/accounts/abi/abi.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "io" + "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -246,24 +247,65 @@ func (abi *ABI) HasReceive() bool { // revertSelector is a special function selector for revert reason unpacking. var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4] +// panicSelector is a special function selector for panic reason unpacking. +var panicSelector = crypto.Keccak256([]byte("Panic(uint256)"))[:4] + +// panicReasons map is for readable panic codes +// see this linkage for the deails +// https://docs.soliditylang.org/en/v0.8.21/control-structures.html#panic-via-assert-and-error-via-require +// the reason string list is copied from ether.js +// https://github.com/ethers-io/ethers.js/blob/fa3a883ff7c88611ce766f58bdd4b8ac90814470/src.ts/abi/interface.ts#L207-L218 +var panicReasons = map[uint64]string{ + 0x00: "generic panic", + 0x01: "assert(false)", + 0x11: "arithmetic underflow or overflow", + 0x12: "division or modulo by zero", + 0x21: "enum overflow", + 0x22: "invalid encoded storage byte array accessed", + 0x31: "out-of-bounds array access; popping on an empty array", + 0x32: "out-of-bounds access of an array or bytesN", + 0x41: "out of memory", + 0x51: "uninitialized function", +} + // UnpackRevert resolves the abi-encoded revert reason. According to the solidity // spec https://solidity.readthedocs.io/en/latest/control-structures.html#revert, -// the provided revert reason is abi-encoded as if it were a call to a function -// `Error(string)`. So it's a special tool for it. +// the provided revert reason is abi-encoded as if it were a call to function +// `Error(string)` or `Panic(uint256)`. So it's a special tool for it. func UnpackRevert(data []byte) (string, error) { if len(data) < 4 { return "", errors.New("invalid data for unpacking") } - if !bytes.Equal(data[:4], revertSelector) { + switch { + case bytes.Equal(data[:4], revertSelector): + typ, err := NewType("string", "", nil) + if err != nil { + return "", err + } + unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:]) + if err != nil { + return "", err + } + return unpacked[0].(string), nil + case bytes.Equal(data[:4], panicSelector): + typ, err := NewType("uint256", "", nil) + if err != nil { + return "", err + } + unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:]) + if err != nil { + return "", err + } + pCode := unpacked[0].(*big.Int) + // uint64 safety check for future + // but the code is not bigger than MAX(uint64) now + if pCode.IsUint64() { + if reason, ok := panicReasons[pCode.Uint64()]; ok { + return reason, nil + } + } + return fmt.Sprintf("unknown panic code: %#x", pCode), nil + default: return "", errors.New("invalid data for unpacking") } - typ, err := NewType("string", "", nil) - if err != nil { - return "", err - } - unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:]) - if err != nil { - return "", err - } - return unpacked[0].(string), nil } diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index 3486ffd1a..84175df4b 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -1173,6 +1173,8 @@ func TestUnpackRevert(t *testing.T) { {"", "", errors.New("invalid data for unpacking")}, {"08c379a1", "", errors.New("invalid data for unpacking")}, {"08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d72657665727420726561736f6e00000000000000000000000000000000000000", "revert reason", nil}, + {"4e487b710000000000000000000000000000000000000000000000000000000000000000", "generic panic", nil}, + {"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil}, } for index, c := range cases { t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) { diff --git a/accounts/abi/bind/backend.go b/accounts/abi/bind/backend.go index c16990f39..34530a645 100644 --- a/accounts/abi/bind/backend.go +++ b/accounts/abi/bind/backend.go @@ -29,7 +29,7 @@ import ( var ( // ErrNoCode is returned by call and transact operations for which the requested // recipient contract to operate on does not exist in the state db or does not - // have any code associated with it (i.e. suicided). + // have any code associated with it (i.e. self-destructed). ErrNoCode = errors.New("no contract code at given address") // ErrNoPendingState is raised when attempting to perform a pending state action diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 0af520b5d..0c4342c49 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -681,7 +681,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa // Get the last block block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash()) if err != nil { - return fmt.Errorf("could not fetch parent") + return errors.New("could not fetch parent") } // Check transaction validity signer := types.MakeSigner(b.blockchain.Config(), block.Number(), block.Time()) @@ -815,7 +815,7 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { // Get the last block block := b.blockchain.GetBlockByHash(b.pendingBlock.ParentHash()) if block == nil { - return fmt.Errorf("could not find parent") + return errors.New("could not find parent") } blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { @@ -892,7 +892,7 @@ func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (typ } func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { - logs := rawdb.ReadLogs(fb.db, hash, number, fb.bc.Config()) + logs := rawdb.ReadLogs(fb.db, hash, number) return logs, nil } diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index 698bfc576..11900a6cf 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -161,6 +161,7 @@ func TestAdjustTime(t *testing.T) { func TestNewAdjustTimeFail(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) + defer sim.blockchain.Stop() // Create tx and send head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go index c22eb4ae8..95dc13cc1 100644 --- a/accounts/abi/bind/template.go +++ b/accounts/abi/bind/template.go @@ -325,7 +325,7 @@ var ( if err != nil { return *outstruct, err } - {{range $i, $t := .Normalized.Outputs}} + {{range $i, $t := .Normalized.Outputs}} outstruct.{{.Name}} = *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}} return *outstruct, err @@ -335,7 +335,7 @@ var ( } {{range $i, $t := .Normalized.Outputs}} out{{$i}} := *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}} - + return {{range $i, $t := .Normalized.Outputs}}out{{$i}}, {{end}} err {{end}} } @@ -378,7 +378,7 @@ var ( } {{end}} - {{if .Fallback}} + {{if .Fallback}} // Fallback is a paid mutator transaction binding the contract fallback function. // // Solidity: {{.Fallback.Original.String}} @@ -392,16 +392,16 @@ var ( func (_{{$contract.Type}} *{{$contract.Type}}Session) Fallback(calldata []byte) (*types.Transaction, error) { return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata) } - + // Fallback is a paid mutator transaction binding the contract fallback function. - // + // // Solidity: {{.Fallback.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata) } {{end}} - {{if .Receive}} + {{if .Receive}} // Receive is a paid mutator transaction binding the contract receive function. // // Solidity: {{.Receive.Original.String}} @@ -415,9 +415,9 @@ var ( func (_{{$contract.Type}} *{{$contract.Type}}Session) Receive() (*types.Transaction, error) { return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts) } - + // Receive is a paid mutator transaction binding the contract receive function. - // + // // Solidity: {{.Receive.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Receive() (*types.Transaction, error) { return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts) diff --git a/accounts/abi/error.go b/accounts/abi/error.go index f65f0d2cc..218a22f1e 100644 --- a/accounts/abi/error.go +++ b/accounts/abi/error.go @@ -32,7 +32,7 @@ type Error struct { str string // Sig contains the string signature according to the ABI spec. - // e.g. error foo(uint32 a, int b) = "foo(uint32,int256)" + // e.g. error foo(uint32 a, int b) = "foo(uint32,int256)" // Please note that "int" is substitute for its canonical representation "int256" Sig string diff --git a/accounts/abi/method.go b/accounts/abi/method.go index f69e3ee9b..b6e1eef3c 100644 --- a/accounts/abi/method.go +++ b/accounts/abi/method.go @@ -127,11 +127,12 @@ func NewMethod(name string, rawName string, funType FunctionType, mutability str state = state + " " } identity := fmt.Sprintf("function %v", rawName) - if funType == Fallback { + switch funType { + case Fallback: identity = "fallback" - } else if funType == Receive { + case Receive: identity = "receive" - } else if funType == Constructor { + case Constructor: identity = "constructor" } str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", ")) diff --git a/accounts/abi/method_test.go b/accounts/abi/method_test.go index 395a52896..9230e307a 100644 --- a/accounts/abi/method_test.go +++ b/accounts/abi/method_test.go @@ -84,11 +84,12 @@ func TestMethodString(t *testing.T) { for _, test := range table { var got string - if test.method == "fallback" { + switch test.method { + case "fallback": got = abi.Fallback.String() - } else if test.method == "receive" { + case "receive": got = abi.Receive.String() - } else { + default: got = abi.Methods[test.method].String() } if got != test.expectation { diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go index 1f84b111a..48d2ef41e 100644 --- a/accounts/abi/reflect.go +++ b/accounts/abi/reflect.go @@ -228,7 +228,7 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri structFieldName := ToCamelCase(argName) if structFieldName == "" { - return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct") + return nil, errors.New("abi: purely underscored output cannot unpack to struct") } // this abi has already been paired, skip it... unless there exists another, yet unassigned diff --git a/accounts/abi/selector_parser.go b/accounts/abi/selector_parser.go index d5472e374..b8ddd7d65 100644 --- a/accounts/abi/selector_parser.go +++ b/accounts/abi/selector_parser.go @@ -17,6 +17,7 @@ package abi import ( + "errors" "fmt" ) @@ -40,7 +41,7 @@ func isIdentifierSymbol(c byte) bool { func parseToken(unescapedSelector string, isIdent bool) (string, string, error) { if len(unescapedSelector) == 0 { - return "", "", fmt.Errorf("empty token") + return "", "", errors.New("empty token") } firstChar := unescapedSelector[0] position := 1 @@ -110,7 +111,7 @@ func parseCompositeType(unescapedSelector string) ([]interface{}, string, error) func parseType(unescapedSelector string) (interface{}, string, error) { if len(unescapedSelector) == 0 { - return nil, "", fmt.Errorf("empty type") + return nil, "", errors.New("empty type") } if unescapedSelector[0] == '(' { return parseCompositeType(unescapedSelector) diff --git a/accounts/abi/type.go b/accounts/abi/type.go index 7f74907a8..2eee11787 100644 --- a/accounts/abi/type.go +++ b/accounts/abi/type.go @@ -70,7 +70,7 @@ var ( func NewType(t string, internalType string, components []ArgumentMarshaling) (typ Type, err error) { // check that array brackets are equal if they exist if strings.Count(t, "[") != strings.Count(t, "]") { - return Type{}, fmt.Errorf("invalid arg type in abi") + return Type{}, errors.New("invalid arg type in abi") } typ.stringKind = t @@ -109,7 +109,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty } typ.stringKind = embeddedType.stringKind + sliced } else { - return Type{}, fmt.Errorf("invalid formatting of array type") + return Type{}, errors.New("invalid formatting of array type") } return typ, err } @@ -348,7 +348,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) { } } -// requireLengthPrefix returns whether the type requires any sort of length +// requiresLengthPrefix returns whether the type requires any sort of length // prefixing. func (t Type) requiresLengthPrefix() bool { return t.T == StringTy || t.T == BytesTy || t.T == SliceTy diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go index 1e778a6ff..905b5ce62 100644 --- a/accounts/abi/unpack.go +++ b/accounts/abi/unpack.go @@ -18,6 +18,7 @@ package abi import ( "encoding/binary" + "errors" "fmt" "math" "math/big" @@ -125,7 +126,7 @@ func readBool(word []byte) (bool, error) { // readFunctionType enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes) func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { if t.T != FunctionTy { - return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array") + return [24]byte{}, errors.New("abi: invalid type in call to make function type byte array") } if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 { err = fmt.Errorf("abi: got improperly encoded function type, got %v", word) @@ -138,7 +139,7 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { // ReadFixedBytes uses reflection to create a fixed array to be read from. func ReadFixedBytes(t Type, word []byte) (interface{}, error) { if t.T != FixedBytesTy { - return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array") + return nil, errors.New("abi: invalid type in call to make fixed byte array") } // convert array := reflect.New(t.GetType()).Elem() @@ -159,14 +160,15 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) // this value will become our slice or our array, depending on the type var refSlice reflect.Value - if t.T == SliceTy { + switch t.T { + case SliceTy: // declare our slice refSlice = reflect.MakeSlice(t.GetType(), size, size) - } else if t.T == ArrayTy { + case ArrayTy: // declare our array refSlice = reflect.New(t.GetType()).Elem() - } else { - return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage") + default: + return nil, errors.New("abi: invalid type in array/slice unpacking stage") } // Arrays have packed elements, resulting in longer unpack steps. diff --git a/accounts/external/backend.go b/accounts/external/backend.go index d403b7e56..6f1581f9b 100644 --- a/accounts/external/backend.go +++ b/accounts/external/backend.go @@ -17,6 +17,7 @@ package external import ( + "errors" "fmt" "math/big" "sync" @@ -98,11 +99,11 @@ func (api *ExternalSigner) Status() (string, error) { } func (api *ExternalSigner) Open(passphrase string) error { - return fmt.Errorf("operation not supported on external signers") + return errors.New("operation not supported on external signers") } func (api *ExternalSigner) Close() error { - return fmt.Errorf("operation not supported on external signers") + return errors.New("operation not supported on external signers") } func (api *ExternalSigner) Accounts() []accounts.Account { @@ -145,7 +146,7 @@ func (api *ExternalSigner) Contains(account accounts.Account) bool { } func (api *ExternalSigner) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) { - return accounts.Account{}, fmt.Errorf("operation not supported on external signers") + return accounts.Account{}, errors.New("operation not supported on external signers") } func (api *ExternalSigner) SelfDerive(bases []accounts.DerivationPath, chain ethereum.ChainStateReader) { @@ -242,14 +243,14 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio } func (api *ExternalSigner) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) { - return []byte{}, fmt.Errorf("password-operations not supported on external signers") + return []byte{}, errors.New("password-operations not supported on external signers") } func (api *ExternalSigner) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { - return nil, fmt.Errorf("password-operations not supported on external signers") + return nil, errors.New("password-operations not supported on external signers") } func (api *ExternalSigner) SignDataWithPassphrase(account accounts.Account, passphrase, mimeType string, data []byte) ([]byte, error) { - return nil, fmt.Errorf("password-operations not supported on external signers") + return nil, errors.New("password-operations not supported on external signers") } func (api *ExternalSigner) listAccounts() ([]common.Address, error) { diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go index 12f92d261..4ed143951 100644 --- a/accounts/keystore/account_cache.go +++ b/accounts/keystore/account_cache.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "golang.org/x/exp/slices" ) // Minimum amount of time between cache reloads. This limit applies if the platform does @@ -38,11 +39,10 @@ import ( // exist yet, the code will attempt to create a watcher at most this often. const minReloadInterval = 2 * time.Second -type accountsByURL []accounts.Account - -func (s accountsByURL) Len() int { return len(s) } -func (s accountsByURL) Less(i, j int) bool { return s[i].URL.Cmp(s[j].URL) < 0 } -func (s accountsByURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +// byURL defines the sorting order for accounts. +func byURL(a, b accounts.Account) int { + return a.URL.Cmp(b.URL) +} // AmbiguousAddrError is returned when attempting to unlock // an address for which more than one file exists. @@ -67,7 +67,7 @@ type accountCache struct { keydir string watcher *watcher mu sync.Mutex - all accountsByURL + all []accounts.Account byAddr map[common.Address][]accounts.Account throttle *time.Timer notify chan struct{} @@ -194,7 +194,7 @@ func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) { default: err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))} copy(err.Matches, matches) - sort.Sort(accountsByURL(err.Matches)) + slices.SortFunc(err.Matches, byURL) return accounts.Account{}, err } } diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go index aa71c1201..3847e9daf 100644 --- a/accounts/keystore/account_cache_test.go +++ b/accounts/keystore/account_cache_test.go @@ -17,12 +17,12 @@ package keystore import ( + "errors" "fmt" "math/rand" "os" "path/filepath" "reflect" - "sort" "testing" "time" @@ -30,6 +30,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" + "golang.org/x/exp/slices" ) var ( @@ -74,7 +75,7 @@ func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error { select { case <-ks.changes: default: - return fmt.Errorf("wasn't notified of new accounts") + return errors.New("wasn't notified of new accounts") } return nil } @@ -202,7 +203,7 @@ func TestCacheAddDeleteOrder(t *testing.T) { // Check that the account list is sorted by filename. wantAccounts := make([]accounts.Account, len(accs)) copy(wantAccounts, accs) - sort.Sort(accountsByURL(wantAccounts)) + slices.SortFunc(wantAccounts, byURL) list := cache.accounts() if !reflect.DeepEqual(list, wantAccounts) { t.Fatalf("got accounts: %s\nwant %s", spew.Sdump(accs), spew.Sdump(wantAccounts)) diff --git a/accounts/keystore/keystore_test.go b/accounts/keystore/keystore_test.go index f90d809b5..deb7cae9f 100644 --- a/accounts/keystore/keystore_test.go +++ b/accounts/keystore/keystore_test.go @@ -20,7 +20,6 @@ import ( "math/rand" "os" "runtime" - "sort" "strings" "sync" "sync/atomic" @@ -31,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" + "golang.org/x/exp/slices" ) var testSigData = make([]byte, 32) @@ -397,19 +397,19 @@ func TestImportRace(t *testing.T) { t.Fatalf("failed to export account: %v", acc) } _, ks2 := tmpKeyStore(t, true) - var atom uint32 + var atom atomic.Uint32 var wg sync.WaitGroup wg.Add(2) for i := 0; i < 2; i++ { go func() { defer wg.Done() if _, err := ks2.Import(json, "new", "new"); err != nil { - atomic.AddUint32(&atom, 1) + atom.Add(1) } }() } wg.Wait() - if atom != 1 { + if atom.Load() != 1 { t.Errorf("Import is racy") } } @@ -424,7 +424,7 @@ func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, walle for _, account := range live { liveList = append(liveList, account) } - sort.Sort(accountsByURL(liveList)) + slices.SortFunc(liveList, byURL) for j, wallet := range wallets { if accs := wallet.Accounts(); len(accs) != 1 { t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs)) diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go index 1701fbf53..8d6ed2b14 100644 --- a/accounts/keystore/passphrase.go +++ b/accounts/keystore/passphrase.go @@ -225,10 +225,13 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { if err != nil { return nil, err } - key := crypto.ToECDSAUnsafe(keyBytes) + key, err := crypto.ToECDSA(keyBytes) + if err != nil { + return nil, fmt.Errorf("invalid key: %w", err) + } id, err := uuid.FromBytes(keyId) if err != nil { - return nil, err + return nil, fmt.Errorf("invalid UUID: %w", err) } return &Key{ Id: id, diff --git a/accounts/scwallet/securechannel.go b/accounts/scwallet/securechannel.go index b1b533eb7..bbd8b2264 100644 --- a/accounts/scwallet/securechannel.go +++ b/accounts/scwallet/securechannel.go @@ -24,6 +24,7 @@ import ( "crypto/rand" "crypto/sha256" "crypto/sha512" + "errors" "fmt" "github.com/ethereum/go-ethereum/crypto" @@ -125,7 +126,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error { // Unpair disestablishes an existing pairing. func (s *SecureChannelSession) Unpair() error { if s.PairingKey == nil { - return fmt.Errorf("cannot unpair: not paired") + return errors.New("cannot unpair: not paired") } _, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{}) @@ -141,7 +142,7 @@ func (s *SecureChannelSession) Unpair() error { // Open initializes the secure channel. func (s *SecureChannelSession) Open() error { if s.iv != nil { - return fmt.Errorf("session already opened") + return errors.New("session already opened") } response, err := s.open() @@ -215,7 +216,7 @@ func (s *SecureChannelSession) pair(p1 uint8, data []byte) (*responseAPDU, error // transmitEncrypted sends an encrypted message, and decrypts and returns the response. func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) { if s.iv == nil { - return nil, fmt.Errorf("channel not open") + return nil, errors.New("channel not open") } data, err := s.encryptAPDU(data) @@ -254,7 +255,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b return nil, err } if !bytes.Equal(s.iv, rmac) { - return nil, fmt.Errorf("invalid MAC in response") + return nil, errors.New("invalid MAC in response") } rapdu := &responseAPDU{} @@ -319,7 +320,7 @@ func unpad(data []byte, terminator byte) ([]byte, error) { return nil, fmt.Errorf("expected end of padding, got %d", data[len(data)-i]) } } - return nil, fmt.Errorf("expected end of padding, got 0") + return nil, errors.New("expected end of padding, got 0") } // updateIV is an internal method that updates the initialization vector after diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go index 6a40e28ae..067bda83f 100644 --- a/accounts/scwallet/wallet.go +++ b/accounts/scwallet/wallet.go @@ -252,7 +252,7 @@ func (w *Wallet) release() error { // with the wallet. func (w *Wallet) pair(puk []byte) error { if w.session.paired() { - return fmt.Errorf("wallet already paired") + return errors.New("wallet already paired") } pairing, err := w.session.pair(puk) if err != nil { @@ -813,7 +813,7 @@ func (s *Session) pair(secret []byte) (smartcardPairing, error) { // unpair deletes an existing pairing. func (s *Session) unpair() error { if !s.verified { - return fmt.Errorf("unpair requires that the PIN be verified") + return errors.New("unpair requires that the PIN be verified") } return s.Channel.Unpair() } @@ -907,7 +907,7 @@ func (s *Session) initialize(seed []byte) error { return err } if status == "Online" { - return fmt.Errorf("card is already initialized, cowardly refusing to proceed") + return errors.New("card is already initialized, cowardly refusing to proceed") } s.Wallet.lock.Lock() diff --git a/accounts/usbwallet/hub.go b/accounts/usbwallet/hub.go index 213996722..e67942dbc 100644 --- a/accounts/usbwallet/hub.go +++ b/accounts/usbwallet/hub.go @@ -63,9 +63,9 @@ type Hub struct { stateLock sync.RWMutex // Protects the internals of the hub from racey access // TODO(karalabe): remove if hotplug lands on Windows - commsPend int // Number of operations blocking enumeration - commsLock sync.Mutex // Lock protecting the pending counter and enumeration - enumFails uint32 // Number of times enumeration has failed + commsPend int // Number of operations blocking enumeration + commsLock sync.Mutex // Lock protecting the pending counter and enumeration + enumFails atomic.Uint32 // Number of times enumeration has failed } // NewLedgerHub creates a new hardware wallet manager for Ledger devices. @@ -151,7 +151,7 @@ func (hub *Hub) refreshWallets() { return } // If USB enumeration is continually failing, don't keep trying indefinitely - if atomic.LoadUint32(&hub.enumFails) > 2 { + if hub.enumFails.Load() > 2 { return } // Retrieve the current list of USB wallet devices @@ -172,7 +172,7 @@ func (hub *Hub) refreshWallets() { } infos, err := usb.Enumerate(hub.vendorID, 0) if err != nil { - failcount := atomic.AddUint32(&hub.enumFails, 1) + failcount := hub.enumFails.Add(1) if runtime.GOOS == "linux" { // See rationale before the enumeration why this is needed and only on Linux. hub.commsLock.Unlock() @@ -181,7 +181,7 @@ func (hub *Hub) refreshWallets() { "vendor", hub.vendorID, "failcount", failcount, "err", err) return } - atomic.StoreUint32(&hub.enumFails, 0) + hub.enumFails.Store(0) for _, info := range infos { for _, id := range hub.productIDs { diff --git a/accounts/usbwallet/wallet.go b/accounts/usbwallet/wallet.go index 0e399a6d0..05add081a 100644 --- a/accounts/usbwallet/wallet.go +++ b/accounts/usbwallet/wallet.go @@ -624,7 +624,7 @@ func (w *wallet) SignTx(account accounts.Account, tx *types.Transaction, chainID return signed, nil } -// SignHashWithPassphrase implements accounts.Wallet, however signing arbitrary +// SignTextWithPassphrase implements accounts.Wallet, however signing arbitrary // data is not supported for Ledger wallets, so this method will always return // an error. func (w *wallet) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) { diff --git a/beacon/engine/errors.go b/beacon/engine/errors.go index 769001b9e..62773a0ea 100644 --- a/beacon/engine/errors.go +++ b/beacon/engine/errors.go @@ -80,6 +80,7 @@ var ( InvalidPayloadAttributes = &EngineAPIError{code: -38003, msg: "Invalid payload attributes"} TooLargeRequest = &EngineAPIError{code: -38004, msg: "Too large request"} InvalidParams = &EngineAPIError{code: -32602, msg: "Invalid parameters"} + UnsupportedFork = &EngineAPIError{code: -38005, msg: "Unsupported fork"} STATUS_INVALID = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: INVALID}, PayloadID: nil} STATUS_SYNCING = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: SYNCING}, PayloadID: nil} diff --git a/beacon/engine/gen_blockparams.go b/beacon/engine/gen_blockparams.go index 0dd2b5259..b1f01b50f 100644 --- a/beacon/engine/gen_blockparams.go +++ b/beacon/engine/gen_blockparams.go @@ -20,12 +20,14 @@ func (p PayloadAttributes) MarshalJSON() ([]byte, error) { Random common.Hash `json:"prevRandao" gencodec:"required"` SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` Withdrawals []*types.Withdrawal `json:"withdrawals"` + BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"` } var enc PayloadAttributes enc.Timestamp = hexutil.Uint64(p.Timestamp) enc.Random = p.Random enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient enc.Withdrawals = p.Withdrawals + enc.BeaconRoot = p.BeaconRoot return json.Marshal(&enc) } @@ -36,6 +38,7 @@ func (p *PayloadAttributes) UnmarshalJSON(input []byte) error { Random *common.Hash `json:"prevRandao" gencodec:"required"` SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"` Withdrawals []*types.Withdrawal `json:"withdrawals"` + BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"` } var dec PayloadAttributes if err := json.Unmarshal(input, &dec); err != nil { @@ -56,5 +59,8 @@ func (p *PayloadAttributes) UnmarshalJSON(input []byte) error { if dec.Withdrawals != nil { p.Withdrawals = dec.Withdrawals } + if dec.BeaconRoot != nil { + p.BeaconRoot = dec.BeaconRoot + } return nil } diff --git a/beacon/engine/gen_ed.go b/beacon/engine/gen_ed.go index 336dfc6cc..6893d64a1 100644 --- a/beacon/engine/gen_ed.go +++ b/beacon/engine/gen_ed.go @@ -32,6 +32,8 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) { BlockHash common.Hash `json:"blockHash" gencodec:"required"` Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` Withdrawals []*types.Withdrawal `json:"withdrawals"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` + ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` } var enc ExecutableData enc.ParentHash = e.ParentHash @@ -54,6 +56,8 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) { } } enc.Withdrawals = e.Withdrawals + enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed) + enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas) return json.Marshal(&enc) } @@ -75,6 +79,8 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error { BlockHash *common.Hash `json:"blockHash" gencodec:"required"` Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` Withdrawals []*types.Withdrawal `json:"withdrawals"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` + ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` } var dec ExecutableData if err := json.Unmarshal(input, &dec); err != nil { @@ -142,5 +148,11 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error { if dec.Withdrawals != nil { e.Withdrawals = dec.Withdrawals } + if dec.BlobGasUsed != nil { + e.BlobGasUsed = (*uint64)(dec.BlobGasUsed) + } + if dec.ExcessBlobGas != nil { + e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) + } return nil } diff --git a/beacon/engine/gen_epe.go b/beacon/engine/gen_epe.go index cc66cef6c..e69f9a595 100644 --- a/beacon/engine/gen_epe.go +++ b/beacon/engine/gen_epe.go @@ -17,10 +17,14 @@ func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) { type ExecutionPayloadEnvelope struct { ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"` + BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` + Override bool `json:"shouldOverrideBuilder"` } var enc ExecutionPayloadEnvelope enc.ExecutionPayload = e.ExecutionPayload enc.BlockValue = (*hexutil.Big)(e.BlockValue) + enc.BlobsBundle = e.BlobsBundle + enc.Override = e.Override return json.Marshal(&enc) } @@ -29,6 +33,8 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error { type ExecutionPayloadEnvelope struct { ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"` + BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` + Override *bool `json:"shouldOverrideBuilder"` } var dec ExecutionPayloadEnvelope if err := json.Unmarshal(input, &dec); err != nil { @@ -42,5 +48,11 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'blockValue' for ExecutionPayloadEnvelope") } e.BlockValue = (*big.Int)(dec.BlockValue) + if dec.BlobsBundle != nil { + e.BlobsBundle = dec.BlobsBundle + } + if dec.Override != nil { + e.Override = *dec.Override + } return nil } diff --git a/beacon/engine/types.go b/beacon/engine/types.go index 07ebe544b..67f30d445 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -35,6 +35,7 @@ type PayloadAttributes struct { Random common.Hash `json:"prevRandao" gencodec:"required"` SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` Withdrawals []*types.Withdrawal `json:"withdrawals"` + BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"` } // JSON type overrides for PayloadAttributes. @@ -61,6 +62,8 @@ type ExecutableData struct { BlockHash common.Hash `json:"blockHash" gencodec:"required"` Transactions [][]byte `json:"transactions" gencodec:"required"` Withdrawals []*types.Withdrawal `json:"withdrawals"` + BlobGasUsed *uint64 `json:"blobGasUsed"` + ExcessBlobGas *uint64 `json:"excessBlobGas"` } // JSON type overrides for executableData. @@ -73,6 +76,8 @@ type executableDataMarshaling struct { ExtraData hexutil.Bytes LogsBloom hexutil.Bytes Transactions []hexutil.Bytes + BlobGasUsed *hexutil.Uint64 + ExcessBlobGas *hexutil.Uint64 } //go:generate go run github.com/fjl/gencodec -type ExecutionPayloadEnvelope -field-override executionPayloadEnvelopeMarshaling -out gen_epe.go @@ -80,6 +85,14 @@ type executableDataMarshaling struct { type ExecutionPayloadEnvelope struct { ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"` BlockValue *big.Int `json:"blockValue" gencodec:"required"` + BlobsBundle *BlobsBundleV1 `json:"blobsBundle"` + Override bool `json:"shouldOverrideBuilder"` +} + +type BlobsBundleV1 struct { + Commitments []hexutil.Bytes `json:"commitments"` + Proofs []hexutil.Bytes `json:"proofs"` + Blobs []hexutil.Bytes `json:"blobs"` } // JSON type overrides for ExecutionPayloadEnvelope. @@ -152,14 +165,15 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) { // ExecutableDataToBlock constructs a block from executable data. // It verifies that the following fields: // -// len(extraData) <= 32 -// uncleHash = emptyUncleHash -// difficulty = 0 +// len(extraData) <= 32 +// uncleHash = emptyUncleHash +// difficulty = 0 +// if versionedHashes != nil, versionedHashes match to blob transactions // // and that the blockhash of the constructed block matches the parameters. Nil // Withdrawals value will propagate through the returned block. Empty // Withdrawals value must be passed via non-nil, length 0 value in params. -func ExecutableDataToBlock(params ExecutableData) (*types.Block, error) { +func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (*types.Block, error) { txs, err := decodeTransactions(params.Transactions) if err != nil { return nil, err @@ -174,6 +188,18 @@ func ExecutableDataToBlock(params ExecutableData) (*types.Block, error) { if params.BaseFeePerGas != nil && (params.BaseFeePerGas.Sign() == -1 || params.BaseFeePerGas.BitLen() > 256) { return nil, fmt.Errorf("invalid baseFeePerGas: %v", params.BaseFeePerGas) } + var blobHashes []common.Hash + for _, tx := range txs { + blobHashes = append(blobHashes, tx.BlobHashes()...) + } + if len(blobHashes) != len(versionedHashes) { + return nil, fmt.Errorf("invalid number of versionedHashes: %v blobHashes: %v", versionedHashes, blobHashes) + } + for i := 0; i < len(blobHashes); i++ { + if blobHashes[i] != versionedHashes[i] { + return nil, fmt.Errorf("invalid versionedHash at %v: %v blobHashes: %v", i, versionedHashes, blobHashes) + } + } // Only set withdrawalsRoot if it is non-nil. This allows CLs to use // ExecutableData before withdrawals are enabled by marshaling // Withdrawals as the json null value. @@ -183,22 +209,25 @@ func ExecutableDataToBlock(params ExecutableData) (*types.Block, error) { withdrawalsRoot = &h } header := &types.Header{ - ParentHash: params.ParentHash, - UncleHash: types.EmptyUncleHash, - Coinbase: params.FeeRecipient, - Root: params.StateRoot, - TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)), - ReceiptHash: params.ReceiptsRoot, - Bloom: types.BytesToBloom(params.LogsBloom), - Difficulty: common.Big0, - Number: new(big.Int).SetUint64(params.Number), - GasLimit: params.GasLimit, - GasUsed: params.GasUsed, - Time: params.Timestamp, - BaseFee: params.BaseFeePerGas, - Extra: params.ExtraData, - MixDigest: params.Random, - WithdrawalsHash: withdrawalsRoot, + ParentHash: params.ParentHash, + UncleHash: types.EmptyUncleHash, + Coinbase: params.FeeRecipient, + Root: params.StateRoot, + TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)), + ReceiptHash: params.ReceiptsRoot, + Bloom: types.BytesToBloom(params.LogsBloom), + Difficulty: common.Big0, + Number: new(big.Int).SetUint64(params.Number), + GasLimit: params.GasLimit, + GasUsed: params.GasUsed, + Time: params.Timestamp, + BaseFee: params.BaseFeePerGas, + Extra: params.ExtraData, + MixDigest: params.Random, + WithdrawalsHash: withdrawalsRoot, + ExcessBlobGas: params.ExcessBlobGas, + BlobGasUsed: params.BlobGasUsed, + ParentBeaconRoot: beaconRoot, } block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */).WithWithdrawals(params.Withdrawals) if block.Hash() != params.BlockHash { @@ -209,7 +238,7 @@ func ExecutableDataToBlock(params ExecutableData) (*types.Block, error) { // BlockToExecutableData constructs the ExecutableData structure by filling the // fields from the given block. It assumes the given block is post-merge block. -func BlockToExecutableData(block *types.Block, fees *big.Int) *ExecutionPayloadEnvelope { +func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar) *ExecutionPayloadEnvelope { data := &ExecutableData{ BlockHash: block.Hash(), ParentHash: block.ParentHash(), @@ -226,8 +255,22 @@ func BlockToExecutableData(block *types.Block, fees *big.Int) *ExecutionPayloadE Random: block.MixDigest(), ExtraData: block.Extra(), Withdrawals: block.Withdrawals(), + BlobGasUsed: block.BlobGasUsed(), + ExcessBlobGas: block.ExcessBlobGas(), } - return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees} + bundle := BlobsBundleV1{ + Commitments: make([]hexutil.Bytes, 0), + Blobs: make([]hexutil.Bytes, 0), + Proofs: make([]hexutil.Bytes, 0), + } + for _, sidecar := range sidecars { + for j := range sidecar.Blobs { + bundle.Blobs = append(bundle.Blobs, hexutil.Bytes(sidecar.Blobs[j][:])) + bundle.Commitments = append(bundle.Commitments, hexutil.Bytes(sidecar.Commitments[j][:])) + bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(sidecar.Proofs[j][:])) + } + } + return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees, BlobsBundle: &bundle, Override: false} } // ExecutionPayloadBodyV1 is used in the response to GetPayloadBodiesByHashV1 and GetPayloadBodiesByRangeV1 diff --git a/build/checksums.txt b/build/checksums.txt index 9b038582a..adfa0bc34 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,20 +1,25 @@ # This file contains sha256 checksums of optional build dependencies. -e447b498cde50215c4f7619e5124b0fc4e25fb5d16ea47271c47f278e7aa763a go1.20.3.src.tar.gz -c1e1161d6d859deb576e6cfabeb40e3d042ceb1c6f444f617c3c9d76269c3565 go1.20.3.darwin-amd64.tar.gz -86b0ed0f2b2df50fa8036eea875d1cf2d76cefdacf247c44639a1464b7e36b95 go1.20.3.darwin-arm64.tar.gz -340e80abd047c597fdc0f50a6cc59617f06c297d62f7fc77f4a0164e2da6f7aa go1.20.3.freebsd-386.tar.gz -2169fcd8b6c94c5fbe07c0b470ccfb6001d343f6548ad49f3d9ab78e3b5753c7 go1.20.3.freebsd-amd64.tar.gz -e12384311403f1389d14cc1c1295bfb4e0dd5ab919403b80da429f671a223507 go1.20.3.linux-386.tar.gz -979694c2c25c735755bf26f4f45e19e64e4811d661dd07b8c010f7a8e18adfca go1.20.3.linux-amd64.tar.gz -eb186529f13f901e7a2c4438a05c2cd90d74706aaa0a888469b2a4a617b6ee54 go1.20.3.linux-arm64.tar.gz -b421e90469a83671641f81b6e20df6500f033e9523e89cbe7b7223704dd1035c go1.20.3.linux-armv6l.tar.gz -943c89aa1624ea544a022b31e3d6e16a037200e436370bdd5fd67f3fa60be282 go1.20.3.linux-ppc64le.tar.gz -126cf823a5634ef2544b866db107b9d351d3ea70d9e240b0bdcfb46f4dcae54b go1.20.3.linux-s390x.tar.gz -37e9146e1f9d681cfcaa6fee6c7b890c44c64bc50228c9588f3c4231346d33bd go1.20.3.windows-386.zip -143a2837821c7dbacf7744cbb1a8421c1f48307c6fdfaeffc5f8c2f69e1b7932 go1.20.3.windows-amd64.zip -158cb159e00bc979f473e0f5b5a561613129c5e51067967b72b8e072e5a4db81 go1.20.3.windows-arm64.zip +# https://github.com/ethereum/execution-spec-tests/releases +24bac679f3a2d8240d8e08e7f6a70b70c2dabf673317d924cf1d1887b9fe1f81 fixtures.tar.gz +# https://go.dev/dl/ +818d46ede85682dd551ad378ef37a4d247006f12ec59b5b755601d2ce114369a go1.21.0.src.tar.gz +b314de9f704ab122c077d2ec8e67e3670affe8865479d1f01991e7ac55d65e70 go1.21.0.darwin-amd64.tar.gz +3aca44de55c5e098de2f406e98aba328898b05d509a2e2a356416faacf2c4566 go1.21.0.darwin-arm64.tar.gz +312a0065714a50862af714e7a5b3fbbd70fe68f905ffb9bcc56d42eadf6bffab go1.21.0.freebsd-386.tar.gz +b8eaa36654625df799654f77f4af0ea7bd9e5e760ebe86e68fe7c484748ae995 go1.21.0.freebsd-amd64.tar.gz +0e6f378d9b072fab0a3d9ff4d5e990d98487d47252dba8160015a61e6bd0bcba go1.21.0.linux-386.tar.gz +d0398903a16ba2232b389fb31032ddf57cac34efda306a0eebac34f0965a0742 go1.21.0.linux-amd64.tar.gz +f3d4548edf9b22f26bbd49720350bbfe59d75b7090a1a2bff1afad8214febaf3 go1.21.0.linux-arm64.tar.gz +e377a0004957c8c560a3ff99601bce612330a3d95ba3b0a2ae144165fc87deb1 go1.21.0.linux-armv6l.tar.gz +e938ffc81d8ebe5efc179240960ba22da6a841ff05d5cab7ce2547112b14a47f go1.21.0.linux-ppc64le.tar.gz +be7338df8e5d5472dfa307b0df2b446d85d001b0a2a3cdb1a14048d751b70481 go1.21.0.linux-s390x.tar.gz +af920fbb74fc3d173118dc3cc35f02a709c1de642700e92a91a7d16981df3fec go1.21.0.windows-386.zip +732121e64e0ecb07c77fdf6cc1bc5ce7b242c2d40d4ac29021ad4c64a08731f6 go1.21.0.windows-amd64.zip +41342f5a0f8c083b14c68bde738ddcd313a4f53a5854bfdfab47f0e88247de12 go1.21.0.windows-arm64.zip + +# https://github.com/golangci/golangci-lint/releases fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz 75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz e06b3459aaed356e1667580be00b05f41f3b2e29685d12cdee571c23e1edb414 golangci-lint-1.51.1-freebsd-386.tar.gz diff --git a/build/ci.go b/build/ci.go index 7933724d7..6f77e5df6 100644 --- a/build/ci.go +++ b/build/ci.go @@ -120,15 +120,15 @@ var ( // Distros for which packages are created. // Note: vivid is unsupported because there is no golang-1.6 package for it. // Note: the following Ubuntu releases have been officially deprecated on Launchpad: - // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish + // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish, + // kinetic debDistroGoBoots = map[string]string{ - "trusty": "golang-1.11", // EOL: 04/2024 - "xenial": "golang-go", // EOL: 04/2026 - "bionic": "golang-go", // EOL: 04/2028 - "focal": "golang-go", // EOL: 04/2030 - "jammy": "golang-go", // EOL: 04/2032 - "kinetic": "golang-go", // EOL: 07/2023 - "lunar": "golang-go", // EOL: 01/2024 + "trusty": "golang-1.11", // EOL: 04/2024 + "xenial": "golang-go", // EOL: 04/2026 + "bionic": "golang-go", // EOL: 04/2028 + "focal": "golang-go", // EOL: 04/2030 + "jammy": "golang-go", // EOL: 04/2032 + "lunar": "golang-go", // EOL: 01/2024 } debGoBootPaths = map[string]string{ @@ -139,7 +139,7 @@ var ( // This is the version of Go that will be downloaded by // // go run ci.go install -dlgo - dlgoVersion = "1.20.3" + dlgoVersion = "1.21.0" // This is the version of Go that will be used to bootstrap the PPA builder. // @@ -148,6 +148,13 @@ var ( // we need to switch over to a recursive builder to jumpt across supported // versions. gobootVersion = "1.19.6" + + // This is the version of execution-spec-tests that we are using. + // When updating, you must also update build/checksums.txt. + executionSpecTestsVersion = "1.0.2" + + // This is where the tests should be unpacked. + executionSpecTestsDir = "tests/spec-tests" ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) @@ -200,6 +207,7 @@ func doInstall(cmdline []string) { staticlink = flag.Bool("static", false, "Create statically-linked executable") ) flag.CommandLine.Parse(cmdline) + env := build.Env() // Configure the toolchain. tc := build.GoToolchain{GOARCH: *arch, CC: *cc} @@ -207,12 +215,16 @@ func doInstall(cmdline []string) { csdb := build.MustLoadChecksums("build/checksums.txt") tc.Root = build.DownloadGo(csdb, dlgoVersion) } - // Disable CLI markdown doc generation in release builds and enable linking - // the CKZG library since we can make it portable here. - buildTags := []string{"urfave_cli_no_docs", "ckzg"} + + // Disable CLI markdown doc generation in release builds. + buildTags := []string{"urfave_cli_no_docs"} + + // Enable linking the CKZG library since we can make it work with additional flags. + if env.UbuntuVersion != "trusty" { + buildTags = append(buildTags, "ckzg") + } // Configure the build. - env := build.Env() gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...) // arm64 CI builders are memory-constrained and can't handle concurrent builds, @@ -289,16 +301,26 @@ func doTest(cmdline []string) { coverage = flag.Bool("coverage", false, "Whether to record code coverage") verbose = flag.Bool("v", false, "Whether to log verbosely") race = flag.Bool("race", false, "Execute the race detector") + cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads") ) flag.CommandLine.Parse(cmdline) + // Get test fixtures. + csdb := build.MustLoadChecksums("build/checksums.txt") + downloadSpecTestFixtures(csdb, *cachedir) + // Configure the toolchain. tc := build.GoToolchain{GOARCH: *arch, CC: *cc} if *dlgo { - csdb := build.MustLoadChecksums("build/checksums.txt") tc.Root = build.DownloadGo(csdb, dlgoVersion) } - gotest := tc.Go("test", "-tags=ckzg") + gotest := tc.Go("test") + + // CI needs a bit more time for the statetests (default 10m). + gotest.Args = append(gotest.Args, "-timeout=20m") + + // Enable CKZG backend in CI. + gotest.Args = append(gotest.Args, "-tags=ckzg") // Test a single package at a time. CI builders are slow // and some tests run into timeouts under load. @@ -321,6 +343,21 @@ func doTest(cmdline []string) { build.MustRun(gotest) } +// downloadSpecTestFixtures downloads and extracts the execution-spec-tests fixtures. +func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string { + ext := ".tar.gz" + base := "fixtures" // TODO(MariusVanDerWijden) rename once the version becomes part of the filename + url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/v%s/%s%s", executionSpecTestsVersion, base, ext) + archivePath := filepath.Join(cachedir, base+ext) + if err := csdb.DownloadFile(url, archivePath); err != nil { + log.Fatal(err) + } + if err := build.ExtractArchive(archivePath, executionSpecTestsDir); err != nil { + log.Fatal(err) + } + return filepath.Join(cachedir, base) +} + // doLint runs golangci-lint on requested packages. func doLint(cmdline []string) { var ( diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules index 475e628b2..daca793e5 100644 --- a/build/deb/ethereum/deb.rules +++ b/build/deb/ethereum/deb.rules @@ -28,7 +28,7 @@ override_dh_auto_build: mv .mod $(GOPATH)/pkg/mod # A fresh Go was built, all dependency downloads faked, hope build works now - ../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}} + ../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}} -ubuntu {{.Distro}} override_dh_auto_test: diff --git a/build/update-license.go b/build/update-license.go index f61536470..52a54bf66 100644 --- a/build/update-license.go +++ b/build/update-license.go @@ -46,12 +46,13 @@ import ( "path/filepath" "regexp" "runtime" - "sort" "strconv" "strings" "sync" "text/template" "time" + + "golang.org/x/exp/slices" ) var ( @@ -152,13 +153,6 @@ func (i info) gpl() bool { return false } -// authors implements the sort.Interface for strings in case-insensitive mode. -type authors []string - -func (as authors) Len() int { return len(as) } -func (as authors) Less(i, j int) bool { return strings.ToLower(as[i]) < strings.ToLower(as[j]) } -func (as authors) Swap(i, j int) { as[i], as[j] = as[j], as[i] } - func main() { var ( files = getFiles() @@ -299,7 +293,9 @@ func writeAuthors(files []string) { } } // Write sorted list of authors back to the file. - sort.Sort(authors(list)) + slices.SortFunc(list, func(a, b string) bool { + return strings.ToLower(a) < strings.ToLower(b) + }) content := new(bytes.Buffer) content.WriteString(authorsFileHeader) for _, a := range list { diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go index 748113aa4..5c1635de3 100644 --- a/cmd/bootnode/main.go +++ b/cmd/bootnode/main.go @@ -23,6 +23,7 @@ import ( "fmt" "net" "os" + "time" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/crypto" @@ -107,21 +108,20 @@ func main() { if err != nil { utils.Fatalf("-ListenUDP: %v", err) } - - realaddr := conn.LocalAddr().(*net.UDPAddr) - if natm != nil { - if !realaddr.IP.IsLoopback() { - go nat.Map(natm, nil, "udp", realaddr.Port, realaddr.Port, "ethereum discovery") - } - if ext, err := natm.ExternalIP(); err == nil { - realaddr = &net.UDPAddr{IP: ext, Port: realaddr.Port} - } - } - - printNotice(&nodeKey.PublicKey, *realaddr) + defer conn.Close() db, _ := enode.OpenDB("") ln := enode.NewLocalNode(db, nodeKey) + + listenerAddr := conn.LocalAddr().(*net.UDPAddr) + if natm != nil && !listenerAddr.IP.IsLoopback() { + natAddr := doPortMapping(natm, ln, listenerAddr) + if natAddr != nil { + listenerAddr = natAddr + } + } + + printNotice(&nodeKey.PublicKey, *listenerAddr) cfg := discover.Config{ PrivateKey: nodeKey, NetRestrict: restrictList, @@ -148,3 +148,61 @@ func printNotice(nodeKey *ecdsa.PublicKey, addr net.UDPAddr) { fmt.Println("Note: you're using cmd/bootnode, a developer tool.") fmt.Println("We recommend using a regular node as bootstrap node for production deployments.") } + +func doPortMapping(natm nat.Interface, ln *enode.LocalNode, addr *net.UDPAddr) *net.UDPAddr { + const ( + protocol = "udp" + name = "ethereum discovery" + ) + newLogger := func(external int, internal int) log.Logger { + return log.New("proto", protocol, "extport", external, "intport", internal, "interface", natm) + } + + var ( + intport = addr.Port + extaddr = &net.UDPAddr{IP: addr.IP, Port: addr.Port} + mapTimeout = nat.DefaultMapTimeout + log = newLogger(addr.Port, intport) + ) + addMapping := func() { + // Get the external address. + var err error + extaddr.IP, err = natm.ExternalIP() + if err != nil { + log.Debug("Couldn't get external IP", "err", err) + return + } + // Create the mapping. + p, err := natm.AddMapping(protocol, extaddr.Port, intport, name, mapTimeout) + if err != nil { + log.Debug("Couldn't add port mapping", "err", err) + return + } + if p != uint16(extaddr.Port) { + extaddr.Port = int(p) + log = newLogger(extaddr.Port, intport) + log.Info("NAT mapped alternative port") + } else { + log.Info("NAT mapped port") + } + // Update IP/port information of the local node. + ln.SetStaticIP(extaddr.IP) + ln.SetFallbackUDP(extaddr.Port) + } + + // Perform mapping once, synchronously. + log.Info("Attempting port mapping") + addMapping() + + // Refresh the mapping periodically. + go func() { + refresh := time.NewTimer(mapTimeout) + defer refresh.Stop() + for range refresh.C { + addMapping() + refresh.Reset(mapTimeout) + } + }() + + return extaddr +} diff --git a/cmd/clef/README.md b/cmd/clef/README.md index 3891e5540..85c9c7060 100644 --- a/cmd/clef/README.md +++ b/cmd/clef/README.md @@ -29,7 +29,7 @@ GLOBAL OPTIONS: --loglevel value log level to emit to the screen (default: 4) --keystore value Directory for the keystore (default: "$HOME/.ethereum/keystore") --configdir value Directory for Clef configuration (default: "$HOME/.clef") - --chainid value Chain id to use for signing (1=mainnet, 4=Rinkeby, 5=Goerli) (default: 1) + --chainid value Chain id to use for signing (1=mainnet, 5=Goerli) (default: 1) --lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength --nousb Disables monitoring for and managing USB hardware wallets --pcscdpath value Path to the smartcard daemon (pcscd) socket file (default: "/run/pcscd/pcscd.comm") diff --git a/cmd/clef/main.go b/cmd/clef/main.go index 35f9f50f5..a8b0bf816 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -27,6 +27,7 @@ import ( "fmt" "io" "math/big" + "net" "os" "os/signal" "path/filepath" @@ -99,7 +100,7 @@ var ( chainIdFlag = &cli.Int64Flag{ Name: "chainid", Value: params.MainnetChainConfig.ChainID.Int64(), - Usage: "Chain id to use for signing (1=mainnet, 4=Rinkeby, 5=Goerli)", + Usage: "Chain id to use for signing (1=mainnet, 5=Goerli)", } rpcPortFlag = &cli.IntFlag{ Name: "http.port", @@ -732,6 +733,7 @@ func signer(c *cli.Context) error { cors := utils.SplitAndTrim(c.String(utils.HTTPCORSDomainFlag.Name)) srv := rpc.NewServer() + srv.SetBatchLimits(node.DefaultConfig.BatchRequestLimit, node.DefaultConfig.BatchResponseMaxSize) err := node.RegisterApis(rpcAPI, []string{"account"}, srv) if err != nil { utils.Fatalf("Could not register API: %w", err) @@ -742,7 +744,7 @@ func signer(c *cli.Context) error { port := c.Int(rpcPortFlag.Name) // start http server - httpEndpoint := fmt.Sprintf("%s:%d", c.String(utils.HTTPListenAddrFlag.Name), port) + httpEndpoint := net.JoinHostPort(c.String(utils.HTTPListenAddrFlag.Name), fmt.Sprintf("%d", port)) httpServer, addr, err := node.StartHTTPEndpoint(httpEndpoint, rpc.DefaultHTTPTimeouts, handler) if err != nil { utils.Fatalf("Could not start RPC api: %v", err) diff --git a/cmd/devp2p/README.md b/cmd/devp2p/README.md index 0a61304c2..5ca7b497a 100644 --- a/cmd/devp2p/README.md +++ b/cmd/devp2p/README.md @@ -44,7 +44,7 @@ set to standard output. The following filters are supported: - `-limit ` limits the output set to N entries, taking the top N nodes by score - `-ip ` filters nodes by IP subnet - `-min-age ` filters nodes by 'first seen' time -- `-eth-network ` filters nodes by "eth" ENR entry +- `-eth-network ` filters nodes by "eth" ENR entry - `-les-server` filters nodes by LES server support - `-snap` filters nodes by snap protocol support diff --git a/cmd/devp2p/crawl.go b/cmd/devp2p/crawl.go index 1b964164d..8c0defff6 100644 --- a/cmd/devp2p/crawl.go +++ b/cmd/devp2p/crawl.go @@ -87,11 +87,11 @@ func (c *crawler) run(timeout time.Duration, nthreads int) nodeSet { go c.runIterator(doneCh, it) } var ( - added uint64 - updated uint64 - skipped uint64 - recent uint64 - removed uint64 + added atomic.Uint64 + updated atomic.Uint64 + skipped atomic.Uint64 + recent atomic.Uint64 + removed atomic.Uint64 wg sync.WaitGroup ) wg.Add(nthreads) @@ -103,15 +103,15 @@ func (c *crawler) run(timeout time.Duration, nthreads int) nodeSet { case n := <-c.ch: switch c.updateNode(n) { case nodeSkipIncompat: - atomic.AddUint64(&skipped, 1) + skipped.Add(1) case nodeSkipRecent: - atomic.AddUint64(&recent, 1) + recent.Add(1) case nodeRemoved: - atomic.AddUint64(&removed, 1) + removed.Add(1) case nodeAdded: - atomic.AddUint64(&added, 1) + added.Add(1) default: - atomic.AddUint64(&updated, 1) + updated.Add(1) } case <-c.closed: return @@ -138,11 +138,11 @@ loop: break loop case <-statusTicker.C: log.Info("Crawling in progress", - "added", atomic.LoadUint64(&added), - "updated", atomic.LoadUint64(&updated), - "removed", atomic.LoadUint64(&removed), - "ignored(recent)", atomic.LoadUint64(&recent), - "ignored(incompatible)", atomic.LoadUint64(&skipped)) + "added", added.Load(), + "updated", updated.Load(), + "removed", removed.Load(), + "ignored(recent)", recent.Load(), + "ignored(incompatible)", skipped.Load()) } } diff --git a/cmd/devp2p/discv4cmd.go b/cmd/devp2p/discv4cmd.go index 31129bce8..0117c7eb8 100644 --- a/cmd/devp2p/discv4cmd.go +++ b/cmd/devp2p/discv4cmd.go @@ -337,7 +337,7 @@ func listen(ctx *cli.Context, ln *enode.LocalNode) *net.UDPConn { } func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) { - s := params.RinkebyBootnodes + s := params.MainnetBootnodes if ctx.IsSet(bootnodesFlag.Name) { input := ctx.String(bootnodesFlag.Name) if input == "" { diff --git a/cmd/devp2p/dns_route53.go b/cmd/devp2p/dns_route53.go index 6c97fa937..21a32f941 100644 --- a/cmd/devp2p/dns_route53.go +++ b/cmd/devp2p/dns_route53.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "sort" "strconv" "strings" "time" @@ -33,6 +32,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/urfave/cli/v2" + "golang.org/x/exp/slices" ) const ( @@ -288,11 +288,17 @@ func makeDeletionChanges(records map[string]recordSet, keep map[string]string) [ // sortChanges ensures DNS changes are in leaf-added -> root-changed -> leaf-deleted order. func sortChanges(changes []types.Change) { score := map[string]int{"CREATE": 1, "UPSERT": 2, "DELETE": 3} - sort.Slice(changes, func(i, j int) bool { - if changes[i].Action == changes[j].Action { - return *changes[i].ResourceRecordSet.Name < *changes[j].ResourceRecordSet.Name + slices.SortFunc(changes, func(a, b types.Change) int { + if a.Action == b.Action { + return strings.Compare(*a.ResourceRecordSet.Name, *b.ResourceRecordSet.Name) } - return score[string(changes[i].Action)] < score[string(changes[j].Action)] + if score[string(a.Action)] < score[string(b.Action)] { + return -1 + } + if score[string(a.Action)] > score[string(b.Action)] { + return 1 + } + return 0 }) } diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go index 5e3d9fe98..938159ec5 100644 --- a/cmd/devp2p/internal/ethtest/chain.go +++ b/cmd/devp2p/internal/ethtest/chain.go @@ -77,7 +77,7 @@ func (c *Chain) RootAt(height int) common.Hash { // ForkID gets the fork id of the chain. func (c *Chain) ForkID() forkid.ID { - return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()), c.blocks[0].Time()) + return forkid.NewID(c.chainConfig, c.blocks[0], uint64(c.Len()), c.blocks[0].Time()) } // Shorten returns a copy chain of a desired height from the imported diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go index 8a2b132fa..c5bcc3db1 100644 --- a/cmd/devp2p/internal/ethtest/suite_test.go +++ b/cmd/devp2p/internal/ethtest/suite_test.go @@ -109,15 +109,13 @@ func setupGeth(stack *node.Node) error { } backend, err := eth.New(stack, ðconfig.Config{ - Genesis: &chain.genesis, - NetworkId: chain.genesis.Config.ChainID.Uint64(), // 19763 - DatabaseCache: 10, - TrieCleanCache: 10, - TrieCleanCacheJournal: "", - TrieCleanCacheRejournal: 60 * time.Minute, - TrieDirtyCache: 16, - TrieTimeout: 60 * time.Minute, - SnapshotCache: 10, + Genesis: &chain.genesis, + NetworkId: chain.genesis.Config.ChainID.Uint64(), // 19763 + DatabaseCache: 10, + TrieCleanCache: 10, + TrieDirtyCache: 16, + TrieTimeout: 60 * time.Minute, + SnapshotCache: 10, }) if err != nil { return err diff --git a/cmd/devp2p/nodeset.go b/cmd/devp2p/nodeset.go index 33c39f4b9..7360dc5bc 100644 --- a/cmd/devp2p/nodeset.go +++ b/cmd/devp2p/nodeset.go @@ -21,11 +21,11 @@ import ( "encoding/json" "fmt" "os" - "sort" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/p2p/enode" + "golang.org/x/exp/slices" ) const jsonIndent = " " @@ -77,8 +77,8 @@ func (ns nodeSet) nodes() []*enode.Node { result = append(result, n.N) } // Sort by ID. - sort.Slice(result, func(i, j int) bool { - return bytes.Compare(result[i].ID().Bytes(), result[j].ID().Bytes()) < 0 + slices.SortFunc(result, func(a, b *enode.Node) int { + return bytes.Compare(a.ID().Bytes(), b.ID().Bytes()) }) return result } @@ -103,8 +103,14 @@ func (ns nodeSet) topN(n int) nodeSet { for _, v := range ns { byscore = append(byscore, v) } - sort.Slice(byscore, func(i, j int) bool { - return byscore[i].Score >= byscore[j].Score + slices.SortFunc(byscore, func(a, b nodeJSON) int { + if a.Score > b.Score { + return -1 + } + if a.Score < b.Score { + return 1 + } + return 0 }) result := make(nodeSet, n) for _, v := range byscore[:n] { diff --git a/cmd/devp2p/nodesetcmd.go b/cmd/devp2p/nodesetcmd.go index f7c258637..6fbc185ad 100644 --- a/cmd/devp2p/nodesetcmd.go +++ b/cmd/devp2p/nodesetcmd.go @@ -25,6 +25,7 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/params" @@ -228,13 +229,13 @@ func ethFilter(args []string) (nodeFilter, error) { var filter forkid.Filter switch args[0] { case "mainnet": - filter = forkid.NewStaticFilter(params.MainnetChainConfig, params.MainnetGenesisHash) - case "rinkeby": - filter = forkid.NewStaticFilter(params.RinkebyChainConfig, params.RinkebyGenesisHash) + filter = forkid.NewStaticFilter(params.MainnetChainConfig, core.DefaultGenesisBlock().ToBlock()) case "goerli": - filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash) + filter = forkid.NewStaticFilter(params.GoerliChainConfig, core.DefaultGoerliGenesisBlock().ToBlock()) case "sepolia": - filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash) + filter = forkid.NewStaticFilter(params.SepoliaChainConfig, core.DefaultSepoliaGenesisBlock().ToBlock()) + case "holesky": + filter = forkid.NewStaticFilter(params.HoleskyChainConfig, core.DefaultHoleskyGenesisBlock().ToBlock()) default: return nil, fmt.Errorf("unknown network %q", args[0]) } diff --git a/cmd/evm/README.md b/cmd/evm/README.md index 296215861..e6c6fe06a 100644 --- a/cmd/evm/README.md +++ b/cmd/evm/README.md @@ -342,7 +342,7 @@ To make `t8n` apply these, the following inputs are required: - For ethash, it is `5000000000000000000` `wei`, - If this is not defined, mining rewards are not applied, - A value of `0` is valid, and causes accounts to be 'touched'. -- For each ommer, the tool needs to be given an `addres\` and a `delta`. This +- For each ommer, the tool needs to be given an `address\` and a `delta`. This is done via the `ommers` field in `env`. Note: the tool does not verify that e.g. the normal uncle rules apply, diff --git a/cmd/evm/blockrunner.go b/cmd/evm/blockrunner.go index ffd76b8fb..0be5f6971 100644 --- a/cmd/evm/blockrunner.go +++ b/cmd/evm/blockrunner.go @@ -22,6 +22,9 @@ import ( "fmt" "os" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/tests" "github.com/urfave/cli/v2" @@ -42,7 +45,16 @@ func blockTestCmd(ctx *cli.Context) error { glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) log.Root().SetHandler(glogger) - + var tracer vm.EVMLogger + // Configure the EVM logger + if ctx.Bool(MachineFlag.Name) { + tracer = logger.NewJSONLogger(&logger.Config{ + EnableMemory: !ctx.Bool(DisableMemoryFlag.Name), + DisableStack: ctx.Bool(DisableStackFlag.Name), + DisableStorage: ctx.Bool(DisableStorageFlag.Name), + EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name), + }, os.Stderr) + } // Load the test content from the input file src, err := os.ReadFile(ctx.Args().First()) if err != nil { @@ -53,7 +65,7 @@ func blockTestCmd(ctx *cli.Context) error { return err } for i, test := range tests { - if err := test.Run(false); err != nil { + if err := test.Run(false, rawdb.HashScheme, tracer); err != nil { return fmt.Errorf("test %v: %w", i, err) } } diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go index 64804a17f..09dca8984 100644 --- a/cmd/evm/internal/t8ntool/block.go +++ b/cmd/evm/internal/t8ntool/block.go @@ -306,7 +306,7 @@ func readInput(ctx *cli.Context) (*bbInput, error) { return inputData, nil } -// dispatchOutput writes the output data to either stderr or stdout, or to the specified +// dispatchBlock writes the output data to either stderr or stdout, or to the specified // files func dispatchBlock(ctx *cli.Context, baseDir string, block *types.Block) error { raw, _ := rlp.EncodeToBytes(block) diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 5633198d3..bb14ac63c 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -19,12 +19,12 @@ package t8ntool import ( "fmt" "math/big" - "os" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" @@ -47,17 +47,19 @@ type Prestate struct { // ExecutionResult contains the execution status after running a state test, any // error that might have occurred and a dump of the final state if requested. type ExecutionResult struct { - StateRoot common.Hash `json:"stateRoot"` - TxRoot common.Hash `json:"txRoot"` - ReceiptRoot common.Hash `json:"receiptsRoot"` - LogsHash common.Hash `json:"logsHash"` - Bloom types.Bloom `json:"logsBloom" gencodec:"required"` - Receipts types.Receipts `json:"receipts"` - Rejected []*rejectedTx `json:"rejected,omitempty"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - GasUsed math.HexOrDecimal64 `json:"gasUsed"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"` + StateRoot common.Hash `json:"stateRoot"` + TxRoot common.Hash `json:"txRoot"` + ReceiptRoot common.Hash `json:"receiptsRoot"` + LogsHash common.Hash `json:"logsHash"` + Bloom types.Bloom `json:"logsBloom" gencodec:"required"` + Receipts types.Receipts `json:"receipts"` + Rejected []*rejectedTx `json:"rejected,omitempty"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"` + CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` + CurrentBlobGasUsed *math.HexOrDecimal64 `json:"currentBlobGasUsed,omitempty"` } type ommer struct { @@ -67,37 +69,44 @@ type ommer struct { //go:generate go run github.com/fjl/gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go type stEnv struct { - Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` - Difficulty *big.Int `json:"currentDifficulty"` - Random *big.Int `json:"currentRandom"` - ParentDifficulty *big.Int `json:"parentDifficulty"` - ParentBaseFee *big.Int `json:"parentBaseFee,omitempty"` - ParentGasUsed uint64 `json:"parentGasUsed,omitempty"` - ParentGasLimit uint64 `json:"parentGasLimit,omitempty"` - GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` - Number uint64 `json:"currentNumber" gencodec:"required"` - Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp uint64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` - BaseFee *big.Int `json:"currentBaseFee,omitempty"` - ParentUncleHash common.Hash `json:"parentUncleHash"` + Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` + Difficulty *big.Int `json:"currentDifficulty"` + Random *big.Int `json:"currentRandom"` + ParentDifficulty *big.Int `json:"parentDifficulty"` + ParentBaseFee *big.Int `json:"parentBaseFee,omitempty"` + ParentGasUsed uint64 `json:"parentGasUsed,omitempty"` + ParentGasLimit uint64 `json:"parentGasLimit,omitempty"` + GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` + Number uint64 `json:"currentNumber" gencodec:"required"` + Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp uint64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` + BaseFee *big.Int `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` + ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"` + ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"` + ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` } type stEnvMarshaling struct { - Coinbase common.UnprefixedAddress - Difficulty *math.HexOrDecimal256 - Random *math.HexOrDecimal256 - ParentDifficulty *math.HexOrDecimal256 - ParentBaseFee *math.HexOrDecimal256 - ParentGasUsed math.HexOrDecimal64 - ParentGasLimit math.HexOrDecimal64 - GasLimit math.HexOrDecimal64 - Number math.HexOrDecimal64 - Timestamp math.HexOrDecimal64 - ParentTimestamp math.HexOrDecimal64 - BaseFee *math.HexOrDecimal256 + Coinbase common.UnprefixedAddress + Difficulty *math.HexOrDecimal256 + Random *math.HexOrDecimal256 + ParentDifficulty *math.HexOrDecimal256 + ParentBaseFee *math.HexOrDecimal256 + ParentGasUsed math.HexOrDecimal64 + ParentGasLimit math.HexOrDecimal64 + GasLimit math.HexOrDecimal64 + Number math.HexOrDecimal64 + Timestamp math.HexOrDecimal64 + ParentTimestamp math.HexOrDecimal64 + BaseFee *math.HexOrDecimal256 + ExcessBlobGas *math.HexOrDecimal64 + ParentExcessBlobGas *math.HexOrDecimal64 + ParentBlobGasUsed *math.HexOrDecimal64 } type rejectedTx struct { @@ -154,6 +163,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, rnd := common.BigToHash(pre.Env.Random) vmContext.Random = &rnd } + // If excessBlobGas is defined, add it to the vmContext. + if pre.Env.ExcessBlobGas != nil { + vmContext.ExcessBlobGas = pre.Env.ExcessBlobGas + } else { + // If it is not explicitly defined, but we have the parent values, we try + // to calculate it ourselves. + parentExcessBlobGas := pre.Env.ParentExcessBlobGas + parentBlobGasUsed := pre.Env.ParentBlobGasUsed + if parentExcessBlobGas != nil && parentBlobGasUsed != nil { + excessBlobGas := eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed) + vmContext.ExcessBlobGas = &excessBlobGas + } + } // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's // done in StateProcessor.Process(block, ...), right before transactions are applied. if chainConfig.DAOForkSupport && @@ -161,8 +183,18 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 { misc.ApplyDAOHardFork(statedb) } - + if beaconRoot := pre.Env.ParentBeaconBlockRoot; beaconRoot != nil { + evm := vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vmConfig) + core.ProcessBeaconBlockRoot(*beaconRoot, evm, statedb) + } + var blobGasUsed uint64 for i, tx := range txs { + if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil { + errMsg := "blob tx used but field env.ExcessBlobGas missing" + log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg) + rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg}) + continue + } msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee) if err != nil { log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err) @@ -192,6 +224,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, gaspool.SetGas(prevGas) continue } + if tx.Type() == types.BlobTxType { + blobGasUsed += params.BlobTxBlobGasPerBlob + } includedTxs = append(includedTxs, tx) if hashError != nil { return nil, nil, NewError(ErrorMissingBlockhash, hashError) @@ -240,7 +275,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, if miningReward >= 0 { // Add mining reward. The mining reward may be `0`, which only makes a difference in the cases // where - // - the coinbase suicided, or + // - the coinbase self-destructed, or // - there are only 'bad' transactions, which aren't executed. In those cases, // the coinbase gets no txfee, so isn't created, and thus needs to be touched var ( @@ -267,9 +302,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, statedb.AddBalance(w.Address, amount) } // Commit block - root, err := statedb.Commit(chainConfig.IsEIP158(vmContext.BlockNumber)) + root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber)) if err != nil { - fmt.Fprintf(os.Stderr, "Could not commit state: %v", err) return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err)) } execRs := &ExecutionResult{ @@ -288,6 +322,16 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil)) execRs.WithdrawalsRoot = &h } + if vmContext.ExcessBlobGas != nil { + execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(vmContext.ExcessBlobGas) + execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed) + } + // Re-create statedb instance with new root upon the updated database + // for accessing latest states. + statedb, err = state.New(root, statedb.Database(), nil) + if err != nil { + return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err)) + } return statedb, execRs, nil } @@ -303,7 +347,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB } } // Commit and re-open to start with a clean state. - root, _ := statedb.Commit(false) + root, _ := statedb.Commit(0, false) statedb, _ = state.New(root, sdb, nil) return statedb } diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index c2cc3a2c8..bb195ef64 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -17,22 +17,26 @@ var _ = (*stEnvMarshaling)(nil) // MarshalJSON marshals as JSON. func (s stEnv) MarshalJSON() ([]byte, error) { type stEnv struct { - Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` - Random *math.HexOrDecimal256 `json:"currentRandom"` - ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` - ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"` - ParentGasUsed math.HexOrDecimal64 `json:"parentGasUsed,omitempty"` - ParentGasLimit math.HexOrDecimal64 `json:"parentGasLimit,omitempty"` - GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - ParentUncleHash common.Hash `json:"parentUncleHash"` + Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"` + ParentGasUsed math.HexOrDecimal64 `json:"parentGasUsed,omitempty"` + ParentGasLimit math.HexOrDecimal64 `json:"parentGasLimit,omitempty"` + GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` + ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` + ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` + ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` } var enc stEnv enc.Coinbase = common.UnprefixedAddress(s.Coinbase) @@ -51,28 +55,36 @@ func (s stEnv) MarshalJSON() ([]byte, error) { enc.Withdrawals = s.Withdrawals enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee) enc.ParentUncleHash = s.ParentUncleHash + enc.ExcessBlobGas = (*math.HexOrDecimal64)(s.ExcessBlobGas) + enc.ParentExcessBlobGas = (*math.HexOrDecimal64)(s.ParentExcessBlobGas) + enc.ParentBlobGasUsed = (*math.HexOrDecimal64)(s.ParentBlobGasUsed) + enc.ParentBeaconBlockRoot = s.ParentBeaconBlockRoot return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (s *stEnv) UnmarshalJSON(input []byte) error { type stEnv struct { - Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` - Random *math.HexOrDecimal256 `json:"currentRandom"` - ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` - ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"` - ParentGasUsed *math.HexOrDecimal64 `json:"parentGasUsed,omitempty"` - ParentGasLimit *math.HexOrDecimal64 `json:"parentGasLimit,omitempty"` - GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - ParentUncleHash *common.Hash `json:"parentUncleHash"` + Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"` + ParentGasUsed *math.HexOrDecimal64 `json:"parentGasUsed,omitempty"` + ParentGasLimit *math.HexOrDecimal64 `json:"parentGasLimit,omitempty"` + GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash *common.Hash `json:"parentUncleHash"` + ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` + ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` + ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` + ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` } var dec stEnv if err := json.Unmarshal(input, &dec); err != nil { @@ -130,5 +142,17 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.ParentUncleHash != nil { s.ParentUncleHash = *dec.ParentUncleHash } + if dec.ExcessBlobGas != nil { + s.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) + } + if dec.ParentExcessBlobGas != nil { + s.ParentExcessBlobGas = (*uint64)(dec.ParentExcessBlobGas) + } + if dec.ParentBlobGasUsed != nil { + s.ParentBlobGasUsed = (*uint64)(dec.ParentBlobGasUsed) + } + if dec.ParentBeaconBlockRoot != nil { + s.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot + } return nil } diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 80ebf599c..396b341d2 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -28,7 +28,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -192,105 +192,20 @@ func Transition(ctx *cli.Context) error { // Set the chain id chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name)) - var txsWithKeys []*txWithKey - if txStr != stdinSelector { - inFile, err := os.Open(txStr) - if err != nil { - return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err)) - } - defer inFile.Close() - decoder := json.NewDecoder(inFile) - if strings.HasSuffix(txStr, ".rlp") { - var body hexutil.Bytes - if err := decoder.Decode(&body); err != nil { - return err - } - var txs types.Transactions - if err := rlp.DecodeBytes(body, &txs); err != nil { - return err - } - for _, tx := range txs { - txsWithKeys = append(txsWithKeys, &txWithKey{ - key: nil, - tx: tx, - }) - } - } else { - if err := decoder.Decode(&txsWithKeys); err != nil { - return NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err)) - } - } - } else { - if len(inputData.TxRlp) > 0 { - // Decode the body of already signed transactions - body := common.FromHex(inputData.TxRlp) - var txs types.Transactions - if err := rlp.DecodeBytes(body, &txs); err != nil { - return err - } - for _, tx := range txs { - txsWithKeys = append(txsWithKeys, &txWithKey{ - key: nil, - tx: tx, - }) - } - } else { - // JSON encoded transactions - txsWithKeys = inputData.Txs - } + if txs, err = loadTransactions(txStr, inputData, prestate.Env, chainConfig); err != nil { + return err } - // We may have to sign the transactions. - signer := types.MakeSigner(chainConfig, big.NewInt(int64(prestate.Env.Number)), prestate.Env.Timestamp) - - if txs, err = signUnsignedTransactions(txsWithKeys, signer); err != nil { - return NewError(ErrorJson, fmt.Errorf("failed signing transactions: %v", err)) + if err := applyLondonChecks(&prestate.Env, chainConfig); err != nil { + return err } - // Sanity check, to not `panic` in state_transition - if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) { - if prestate.Env.BaseFee != nil { - // Already set, base fee has precedent over parent base fee. - } else if prestate.Env.ParentBaseFee != nil && prestate.Env.Number != 0 { - parent := &types.Header{ - Number: new(big.Int).SetUint64(prestate.Env.Number - 1), - BaseFee: prestate.Env.ParentBaseFee, - GasUsed: prestate.Env.ParentGasUsed, - GasLimit: prestate.Env.ParentGasLimit, - } - prestate.Env.BaseFee = misc.CalcBaseFee(chainConfig, parent) - } else { - return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) - } + if err := applyShanghaiChecks(&prestate.Env, chainConfig); err != nil { + return err } - if chainConfig.IsShanghai(big.NewInt(int64(prestate.Env.Number)), prestate.Env.Timestamp) && prestate.Env.Withdrawals == nil { - return NewError(ErrorConfig, errors.New("Shanghai config but missing 'withdrawals' in env section")) + if err := applyMergeChecks(&prestate.Env, chainConfig); err != nil { + return err } - isMerged := chainConfig.TerminalTotalDifficulty != nil && chainConfig.TerminalTotalDifficulty.BitLen() == 0 - env := prestate.Env - if isMerged { - // post-merge: - // - random must be supplied - // - difficulty must be zero - switch { - case env.Random == nil: - return NewError(ErrorConfig, errors.New("post-merge requires currentRandom to be defined in env")) - case env.Difficulty != nil && env.Difficulty.BitLen() != 0: - return NewError(ErrorConfig, errors.New("post-merge difficulty must be zero (or omitted) in env")) - } - prestate.Env.Difficulty = nil - } else if env.Difficulty == nil { - // pre-merge: - // If difficulty was not provided by caller, we need to calculate it. - switch { - case env.ParentDifficulty == nil: - return NewError(ErrorConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty")) - case env.Number == 0: - return NewError(ErrorConfig, errors.New("currentDifficulty needs to be provided for block number 0")) - case env.Timestamp <= env.ParentTimestamp: - return NewError(ErrorConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)", - env.Timestamp, env.ParentTimestamp)) - } - prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp, - env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash) + if err := applyCancunChecks(&prestate.Env, chainConfig); err != nil { + return err } // Run the test and aggregate the result s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) @@ -358,33 +273,149 @@ func (t *txWithKey) UnmarshalJSON(input []byte) error { // and secondly to read them with the standard tx json format func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Transactions, error) { var signedTxs []*types.Transaction - for i, txWithKey := range txs { - tx := txWithKey.tx - key := txWithKey.key - v, r, s := tx.RawSignatureValues() - if key != nil && v.BitLen()+r.BitLen()+s.BitLen() == 0 { - // This transaction needs to be signed - var ( - signed *types.Transaction - err error - ) - if txWithKey.protected { - signed, err = types.SignTx(tx, signer, key) - } else { - signed, err = types.SignTx(tx, types.FrontierSigner{}, key) - } - if err != nil { - return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err)) - } - signedTxs = append(signedTxs, signed) - } else { + for i, tx := range txs { + var ( + v, r, s = tx.tx.RawSignatureValues() + signed *types.Transaction + err error + ) + if tx.key == nil || v.BitLen()+r.BitLen()+s.BitLen() != 0 { // Already signed - signedTxs = append(signedTxs, tx) + signedTxs = append(signedTxs, tx.tx) + continue } + // This transaction needs to be signed + if tx.protected { + signed, err = types.SignTx(tx.tx, signer, tx.key) + } else { + signed, err = types.SignTx(tx.tx, types.FrontierSigner{}, tx.key) + } + if err != nil { + return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err)) + } + signedTxs = append(signedTxs, signed) } return signedTxs, nil } +func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *params.ChainConfig) (types.Transactions, error) { + var txsWithKeys []*txWithKey + var signed types.Transactions + if txStr != stdinSelector { + data, err := os.ReadFile(txStr) + if err != nil { + return nil, NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err)) + } + if strings.HasSuffix(txStr, ".rlp") { // A file containing an rlp list + var body hexutil.Bytes + if err := json.Unmarshal(data, &body); err != nil { + return nil, err + } + // Already signed transactions + if err := rlp.DecodeBytes(body, &signed); err != nil { + return nil, err + } + return signed, nil + } + if err := json.Unmarshal(data, &txsWithKeys); err != nil { + return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err)) + } + } else { + if len(inputData.TxRlp) > 0 { + // Decode the body of already signed transactions + body := common.FromHex(inputData.TxRlp) + // Already signed transactions + if err := rlp.DecodeBytes(body, &signed); err != nil { + return nil, err + } + return signed, nil + } + // JSON encoded transactions + txsWithKeys = inputData.Txs + } + // We may have to sign the transactions. + signer := types.MakeSigner(chainConfig, big.NewInt(int64(env.Number)), env.Timestamp) + return signUnsignedTransactions(txsWithKeys, signer) +} + +func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error { + if !chainConfig.IsLondon(big.NewInt(int64(env.Number))) { + return nil + } + // Sanity check, to not `panic` in state_transition + if env.BaseFee != nil { + // Already set, base fee has precedent over parent base fee. + return nil + } + if env.ParentBaseFee == nil || env.Number == 0 { + return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) + } + env.BaseFee = eip1559.CalcBaseFee(chainConfig, &types.Header{ + Number: new(big.Int).SetUint64(env.Number - 1), + BaseFee: env.ParentBaseFee, + GasUsed: env.ParentGasUsed, + GasLimit: env.ParentGasLimit, + }) + return nil +} + +func applyShanghaiChecks(env *stEnv, chainConfig *params.ChainConfig) error { + if !chainConfig.IsShanghai(big.NewInt(int64(env.Number)), env.Timestamp) { + return nil + } + if env.Withdrawals == nil { + return NewError(ErrorConfig, errors.New("Shanghai config but missing 'withdrawals' in env section")) + } + return nil +} + +func applyMergeChecks(env *stEnv, chainConfig *params.ChainConfig) error { + isMerged := chainConfig.TerminalTotalDifficulty != nil && chainConfig.TerminalTotalDifficulty.BitLen() == 0 + if !isMerged { + // pre-merge: If difficulty was not provided by caller, we need to calculate it. + if env.Difficulty != nil { + // already set + return nil + } + switch { + case env.ParentDifficulty == nil: + return NewError(ErrorConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty")) + case env.Number == 0: + return NewError(ErrorConfig, errors.New("currentDifficulty needs to be provided for block number 0")) + case env.Timestamp <= env.ParentTimestamp: + return NewError(ErrorConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)", + env.Timestamp, env.ParentTimestamp)) + } + env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp, + env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash) + return nil + } + // post-merge: + // - random must be supplied + // - difficulty must be zero + switch { + case env.Random == nil: + return NewError(ErrorConfig, errors.New("post-merge requires currentRandom to be defined in env")) + case env.Difficulty != nil && env.Difficulty.BitLen() != 0: + return NewError(ErrorConfig, errors.New("post-merge difficulty must be zero (or omitted) in env")) + } + env.Difficulty = nil + return nil +} + +func applyCancunChecks(env *stEnv, chainConfig *params.ChainConfig) error { + if !chainConfig.IsCancun(big.NewInt(int64(env.Number)), env.Timestamp) { + env.ParentBeaconBlockRoot = nil // un-set it if it has been set too early + return nil + } + // Post-cancun + // We require EIP-4788 beacon root to be set in the env + if env.ParentBeaconBlockRoot == nil { + return NewError(ErrorConfig, errors.New("post-cancun env requires parentBeaconBlockRoot to be set")) + } + return nil +} + type Alloc map[common.Address]core.GenesisAccount func (g Alloc) OnRoot(common.Hash) {} diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index 52d7c3fa3..ac8432bad 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -42,6 +42,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/urfave/cli/v2" ) @@ -128,6 +129,7 @@ func runCmd(ctx *cli.Context) error { receiver = common.BytesToAddress([]byte("receiver")) genesisConfig *core.Genesis preimages = ctx.Bool(DumpFlag.Name) + blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests ) if ctx.Bool(MachineFlag.Name) { tracer = logger.NewJSONLogger(logconfig, os.Stdout) @@ -141,12 +143,23 @@ func runCmd(ctx *cli.Context) error { gen := readGenesis(ctx.String(GenesisFlag.Name)) genesisConfig = gen db := rawdb.NewMemoryDatabase() - genesis := gen.MustCommit(db) - sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: preimages}) + triedb := trie.NewDatabase(db, &trie.Config{ + Preimages: preimages, + HashDB: hashdb.Defaults, + }) + defer triedb.Close() + genesis := gen.MustCommit(db, triedb) + sdb := state.NewDatabaseWithNodeDB(db, triedb) statedb, _ = state.New(genesis.Root(), sdb, nil) chainConfig = gen.Config } else { - sdb := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: preimages}) + db := rawdb.NewMemoryDatabase() + triedb := trie.NewDatabase(db, &trie.Config{ + Preimages: preimages, + HashDB: hashdb.Defaults, + }) + defer triedb.Close() + sdb := state.NewDatabaseWithNodeDB(db, triedb) statedb, _ = state.New(types.EmptyRootHash, sdb, nil) genesisConfig = new(core.Genesis) } @@ -217,6 +230,7 @@ func runCmd(ctx *cli.Context) error { Time: genesisConfig.Timestamp, Coinbase: genesisConfig.Coinbase, BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), + BlobHashes: blobHashes, EVMConfig: vm.Config{ Tracer: tracer, }, @@ -278,8 +292,7 @@ func runCmd(ctx *cli.Context) error { output, leftOverGas, stats, err := timedExec(bench, execFunc) if ctx.Bool(DumpFlag.Name) { - statedb.Commit(true) - statedb.IntermediateRoot(true) + statedb.Commit(genesisConfig.Number, true) fmt.Println(string(statedb.Dump(nil))) } diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index ed102e605..85931f040 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -23,7 +23,9 @@ import ( "os" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" @@ -104,24 +106,23 @@ func runStateTest(fname string, cfg vm.Config, jsonOut, dump bool) error { for _, st := range test.Subtests() { // Run the test and aggregate the result result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} - _, s, err := test.Run(st, cfg, false) - // print state root for evmlab tracing - if s != nil { - root := s.IntermediateRoot(false) - result.Root = &root - if jsonOut { - fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root) + test.Run(st, cfg, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) { + if state != nil { + root := state.IntermediateRoot(false) + result.Root = &root + if jsonOut { + fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root) + } } - } - if err != nil { - // Test failed, mark as so and dump any state to aid debugging - result.Pass, result.Error = false, err.Error() - if dump && s != nil { - dump := s.RawDump(nil) - result.State = &dump + if err != nil { + // Test failed, mark as so and dump any state to aid debugging + result.Pass, result.Error = false, err.Error() + if dump { + dump := state.RawDump(nil) + result.State = &dump + } } - } - + }) results = append(results, *result) } } diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go index 0ef3fac40..ad09a6b4d 100644 --- a/cmd/evm/t8n_test.go +++ b/cmd/evm/t8n_test.go @@ -259,6 +259,22 @@ func TestT8n(t *testing.T) { output: t8nOutput{alloc: true, result: true}, expOut: "exp.json", }, + { // Cancun tests + base: "./testdata/28", + input: t8nInput{ + "alloc.json", "txs.rlp", "env.json", "Cancun", "", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // More cancun tests + base: "./testdata/29", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Cancun", "", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, } { args := []string{"t8n"} args = append(args, tc.output.get()...) diff --git a/cmd/evm/testdata/13/readme.md b/cmd/evm/testdata/13/readme.md index 64f52fc9a..889975d47 100644 --- a/cmd/evm/testdata/13/readme.md +++ b/cmd/evm/testdata/13/readme.md @@ -1,4 +1,4 @@ ## Input transactions in RLP form -This testdata folder is used to examplify how transaction input can be provided in rlp form. +This testdata folder is used to exemplify how transaction input can be provided in rlp form. Please see the README in `evm` folder for how this is performed. \ No newline at end of file diff --git a/cmd/evm/testdata/2/readme.md b/cmd/evm/testdata/2/readme.md index c116f0e79..4bcf0f0fa 100644 --- a/cmd/evm/testdata/2/readme.md +++ b/cmd/evm/testdata/2/readme.md @@ -1 +1 @@ -These files examplify a selfdestruct to the `0`-address. \ No newline at end of file +These files exemplify a selfdestruct to the `0`-address. \ No newline at end of file diff --git a/cmd/evm/testdata/23/readme.md b/cmd/evm/testdata/23/readme.md index 85fe8db66..f31b64de2 100644 --- a/cmd/evm/testdata/23/readme.md +++ b/cmd/evm/testdata/23/readme.md @@ -1 +1 @@ -These files examplify how to sign a transaction using the pre-EIP155 scheme. +These files exemplify how to sign a transaction using the pre-EIP155 scheme. diff --git a/cmd/evm/testdata/28/alloc.json b/cmd/evm/testdata/28/alloc.json new file mode 100644 index 000000000..680a89f4e --- /dev/null +++ b/cmd/evm/testdata/28/alloc.json @@ -0,0 +1,16 @@ +{ + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x016345785d8a0000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x016345785d8a0000", + "code" : "0x60004960015500", + "nonce" : "0x00", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/28/env.json b/cmd/evm/testdata/28/env.json new file mode 100644 index 000000000..5056fe29a --- /dev/null +++ b/cmd/evm/testdata/28/env.json @@ -0,0 +1,23 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentNumber" : "0x01", + "currentTimestamp" : "0x079e", + "currentGasLimit" : "0x7fffffffffffffff", + "previousHash" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6", + "currentBlobGasUsed" : "0x00", + "parentTimestamp" : "0x03b6", + "parentDifficulty" : "0x00", + "parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "withdrawals" : [ + ], + "parentBaseFee" : "0x0a", + "parentGasUsed" : "0x00", + "parentGasLimit" : "0x7fffffffffffffff", + "parentExcessBlobGas" : "0x00", + "parentBlobGasUsed" : "0x00", + "blockHashes" : { + "0" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6" + }, + "parentBeaconBlockRoot": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00" +} \ No newline at end of file diff --git a/cmd/evm/testdata/28/exp.json b/cmd/evm/testdata/28/exp.json new file mode 100644 index 000000000..a55ce0aec --- /dev/null +++ b/cmd/evm/testdata/28/exp.json @@ -0,0 +1,47 @@ +{ + "alloc": { + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x150ca" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x16345785d80c3a9", + "nonce": "0x1" + }, + "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "code": "0x60004960015500", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x01a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + "balance": "0x16345785d8a0000" + } + }, + "result": { + "stateRoot": "0xa40cb3fab01848e922a48bd24191815df9f721ad4b60376edac75161517663e8", + "txRoot": "0x4409cc4b699384ba5f8248d92b784713610c5ff9c1de51e9239da0dac76de9ce", + "receiptsRoot": "0xbff643da765981266133094092d98c81d2ac8e9a83a7bbda46c3d736f1f874ac", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x3", + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0xa865", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x7508d7139d002a4b3a26a4f12dec0d87cb46075c78bf77a38b569a133b509262", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0xa865", + "effectiveGasPrice": null, + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + } + ], + "currentDifficulty": null, + "gasUsed": "0xa865", + "currentBaseFee": "0x9", + "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "currentExcessBlobGas": "0x0", + "currentBlobGasUsed": "0x20000" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/28/txs.rlp b/cmd/evm/testdata/28/txs.rlp new file mode 100644 index 000000000..8df20e3aa --- /dev/null +++ b/cmd/evm/testdata/28/txs.rlp @@ -0,0 +1 @@ +"0xf88bb88903f8860180026483061a8094b94f5374fce5edbc8e2a8697c15331677e6ebf0b8080c00ae1a001a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d801a025e16bb498552165016751911c3608d79000ab89dc3100776e729e6ea13091c7a03acacff7fc0cff6eda8a927dec93ca17765e1ee6cbc06c5954ce102e097c01d2" \ No newline at end of file diff --git a/cmd/evm/testdata/29/alloc.json b/cmd/evm/testdata/29/alloc.json new file mode 100644 index 000000000..70d47862a --- /dev/null +++ b/cmd/evm/testdata/29/alloc.json @@ -0,0 +1,16 @@ +{ + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x016345785d8a0000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + } + }, + "0xbEac00dDB15f3B6d645C48263dC93862413A222D" : { + "balance" : "0x1", + "code" : "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500", + "nonce" : "0x00", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/29/env.json b/cmd/evm/testdata/29/env.json new file mode 100644 index 000000000..e752a909a --- /dev/null +++ b/cmd/evm/testdata/29/env.json @@ -0,0 +1,20 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentNumber" : "0x01", + "currentTimestamp" : "0x079e", + "currentGasLimit" : "0x7fffffffffffffff", + "previousHash" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6", + "currentBlobGasUsed" : "0x00", + "parentTimestamp" : "0x03b6", + "parentDifficulty" : "0x00", + "parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "withdrawals" : [ + ], + "parentBaseFee" : "0x0a", + "parentGasUsed" : "0x00", + "parentGasLimit" : "0x7fffffffffffffff", + "parentExcessBlobGas" : "0x00", + "parentBlobGasUsed" : "0x00", + "parentBeaconBlockRoot": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00" +} \ No newline at end of file diff --git a/cmd/evm/testdata/29/exp.json b/cmd/evm/testdata/29/exp.json new file mode 100644 index 000000000..16a881777 --- /dev/null +++ b/cmd/evm/testdata/29/exp.json @@ -0,0 +1,45 @@ +{ + "alloc": { + "0xbeac00ddb15f3b6d645c48263dc93862413a222d": { + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500", + "storage": { + "0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e", + "0x000000000000000000000000000000000000000000000000000000000001879e": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00" + }, + "balance": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x16345785d871db8", + "nonce": "0x1" + } + }, + "result": { + "stateRoot": "0x2db9f6bc233e8fd0af2d8023404493a19b37d9d69ace71f4e73158851fced574", + "txRoot": "0x248074fabe112f7d93917f292b64932394f835bb98da91f21501574d58ec92ab", + "receiptsRoot": "0xf78dfb743fbd92ade140711c8bbc542b5e307f0ab7984eff35d751969fe57efa", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x84f70aba406a55628a0620f26d260f90aeb6ccc55fed6ec2ac13dd4f727032ed", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "effectiveGasPrice": null, + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + } + ], + "currentDifficulty": null, + "gasUsed": "0x5208", + "currentBaseFee": "0x9", + "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "currentExcessBlobGas": "0x0", + "currentBlobGasUsed": "0x0" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/29/readme.md b/cmd/evm/testdata/29/readme.md new file mode 100644 index 000000000..4383e328e --- /dev/null +++ b/cmd/evm/testdata/29/readme.md @@ -0,0 +1,29 @@ +## EIP 4788 + +This test contains testcases for EIP-4788. The 4788-contract is +located at address `0xbeac00ddb15f3b6d645c48263dc93862413a222d`, and this test executes a simple transaction. It also +implicitly invokes the system tx, which sets calls the contract and sets the +storage values +``` +$ dir=./testdata/29/ && go run . t8n --state.fork=Cancun --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +INFO [08-15|20:07:56.335] Trie dumping started root=ecde45..2af8a7 +INFO [08-15|20:07:56.335] Trie dumping complete accounts=2 elapsed="225.848µs" +INFO [08-15|20:07:56.335] Wrote file file=result.json +{ + "alloc": { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x16345785d871db8", + "nonce": "0x1" + }, + "0xbeac00541d49391ed88abf392bfc1f4dea8c4143": { + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500", + "storage": { + "0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e", + "0x000000000000000000000000000000000000000000000000000000000001879e": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00" + }, + "balance": "0x + } + } +} + +``` diff --git a/cmd/evm/testdata/29/txs.json b/cmd/evm/testdata/29/txs.json new file mode 100644 index 000000000..d6743cc4d --- /dev/null +++ b/cmd/evm/testdata/29/txs.json @@ -0,0 +1,19 @@ +[ + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x0", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "chainId" : "0x1", + "type" : "0x2", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/3/readme.md b/cmd/evm/testdata/3/readme.md index 499f03d7a..246c58ef3 100644 --- a/cmd/evm/testdata/3/readme.md +++ b/cmd/evm/testdata/3/readme.md @@ -1,2 +1,2 @@ -These files examplify a transition where a transaction (excuted on block 5) requests +These files exemplify a transition where a transaction (executed on block 5) requests the blockhash for block `1`. diff --git a/cmd/evm/testdata/4/readme.md b/cmd/evm/testdata/4/readme.md index 08840d37b..eede41a9f 100644 --- a/cmd/evm/testdata/4/readme.md +++ b/cmd/evm/testdata/4/readme.md @@ -1,3 +1,3 @@ -These files examplify a transition where a transaction (excuted on block 5) requests +These files exemplify a transition where a transaction (executed on block 5) requests the blockhash for block `4`, but where the hash for that block is missing. It's expected that executing these should cause `exit` with errorcode `4`. diff --git a/cmd/evm/testdata/5/readme.md b/cmd/evm/testdata/5/readme.md index e2b608fac..1a84afaab 100644 --- a/cmd/evm/testdata/5/readme.md +++ b/cmd/evm/testdata/5/readme.md @@ -1 +1 @@ -These files examplify a transition where there are no transcations, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2). \ No newline at end of file +These files exemplify a transition where there are no transactions, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2). \ No newline at end of file diff --git a/cmd/evm/testdata/8/readme.md b/cmd/evm/testdata/8/readme.md index c991fb217..4dffdab91 100644 --- a/cmd/evm/testdata/8/readme.md +++ b/cmd/evm/testdata/8/readme.md @@ -7,7 +7,7 @@ This test contains testcases for EIP-2930, which uses transactions with access l The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the following code: `0x5854505854`: `PC ;SLOAD; POP; PC; SLOAD`. -Essentialy, this contract does `SLOAD(0)` and `SLOAD(3)`. +Essentially, this contract does `SLOAD(0)` and `SLOAD(3)`. The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`. diff --git a/cmd/evm/testdata/9/readme.md b/cmd/evm/testdata/9/readme.md index b14464465..539478028 100644 --- a/cmd/evm/testdata/9/readme.md +++ b/cmd/evm/testdata/9/readme.md @@ -7,7 +7,7 @@ This test contains testcases for EIP-1559, which uses an new transaction type an The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the following code: `0x58585454`: `PC; PC; SLOAD; SLOAD`. -Essentialy, this contract does `SLOAD(0)` and `SLOAD(1)`. +Essentially, this contract does `SLOAD(0)` and `SLOAD(1)`. The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`. diff --git a/cmd/evm/transition-test.sh b/cmd/evm/transition-test.sh index a2ea53418..8cc6aa41d 100644 --- a/cmd/evm/transition-test.sh +++ b/cmd/evm/transition-test.sh @@ -280,7 +280,7 @@ To make `t8n` apply these, the following inputs are required: - For ethash, it is `5000000000000000000` `wei`, - If this is not defined, mining rewards are not applied, - A value of `0` is valid, and causes accounts to be 'touched'. -- For each ommer, the tool needs to be given an `addres\` and a `delta`. This +- For each ommer, the tool needs to be given an `address\` and a `delta`. This is done via the `ommers` field in `env`. Note: the tool does not verify that e.g. the normal uncle rules apply, diff --git a/cmd/faucet/README.md b/cmd/faucet/README.md index 6e9b14ebc..7e857fa0e 100644 --- a/cmd/faucet/README.md +++ b/cmd/faucet/README.md @@ -12,7 +12,6 @@ First things first, the `faucet` needs to connect to an Ethereum network, for wh - `-genesis` is a path to a file containing the network `genesis.json`. or using: - `-goerli` with the faucet with Görli network config - - `-rinkeby` with the faucet with Rinkeby network config - `-sepolia` with the faucet with Sepolia network config - `-network` is the devp2p network id used during connection - `-bootnodes` is a list of `enode://` ids to join the network through diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index b66cc4614..e4d6ad697 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -86,7 +86,6 @@ var ( twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API") goerliFlag = flag.Bool("goerli", false, "Initializes the faucet with Görli network config") - rinkebyFlag = flag.Bool("rinkeby", false, "Initializes the faucet with Rinkeby network config") sepoliaFlag = flag.Bool("sepolia", false, "Initializes the faucet with Sepolia network config") ) @@ -140,7 +139,7 @@ func main() { log.Crit("Failed to render the faucet template", "err", err) } // Load and parse the genesis block requested by the user - genesis, err := getGenesis(*genesisFlag, *goerliFlag, *rinkebyFlag, *sepoliaFlag) + genesis, err := getGenesis(*genesisFlag, *goerliFlag, *sepoliaFlag) if err != nil { log.Crit("Failed to parse genesis config", "err", err) } @@ -269,11 +268,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*enode.Node, network ui } } // Attach to the client and retrieve and interesting metadatas - api, err := stack.Attach() - if err != nil { - stack.Close() - return nil, err - } + api := stack.Attach() client := ethclient.NewClient(api) return &faucet{ @@ -880,7 +875,7 @@ func authNoAuth(url string) (string, string, common.Address, error) { } // getGenesis returns a genesis based on input args -func getGenesis(genesisFlag string, goerliFlag bool, rinkebyFlag bool, sepoliaFlag bool) (*core.Genesis, error) { +func getGenesis(genesisFlag string, goerliFlag bool, sepoliaFlag bool) (*core.Genesis, error) { switch { case genesisFlag != "": var genesis core.Genesis @@ -888,8 +883,6 @@ func getGenesis(genesisFlag string, goerliFlag bool, rinkebyFlag bool, sepoliaFl return &genesis, err case goerliFlag: return core.DefaultGoerliGenesisBlock(), nil - case rinkebyFlag: - return core.DefaultRinkebyGenesisBlock(), nil case sepoliaFlag: return core.DefaultSepoliaGenesisBlock(), nil default: diff --git a/cmd/geth/attach_test.go b/cmd/geth/attach_test.go index 7c5f95175..91007ccf6 100644 --- a/cmd/geth/attach_test.go +++ b/cmd/geth/attach_test.go @@ -61,7 +61,7 @@ func TestRemoteDbWithHeaders(t *testing.T) { } func testReceiveHeaders(t *testing.T, ln net.Listener, gethArgs ...string) { - var ok uint32 + var ok atomic.Uint32 server := &http.Server{ Addr: "localhost:0", Handler: &testHandler{func(w http.ResponseWriter, r *http.Request) { @@ -72,12 +72,12 @@ func testReceiveHeaders(t *testing.T, ln net.Listener, gethArgs ...string) { if have, want := r.Header.Get("second"), "two"; have != want { t.Fatalf("missing header, have %v want %v", have, want) } - atomic.StoreUint32(&ok, 1) + ok.Store(1) }}} go server.Serve(ln) defer server.Close() runGeth(t, gethArgs...).WaitExit() - if atomic.LoadUint32(&ok) != 1 { + if ok.Load() != 1 { t.Fatal("Test fail, expected invocation to succeed") } } diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index b27ce5571..fad2c71e6 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -39,7 +39,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/trie" "github.com/urfave/cli/v2" ) @@ -49,7 +48,10 @@ var ( Name: "init", Usage: "Bootstrap and initialize a new genesis block", ArgsUsage: "", - Flags: flags.Merge([]cli.Flag{utils.CachePreimagesFlag}, utils.DatabasePathFlags), + Flags: flags.Merge([]cli.Flag{ + utils.CachePreimagesFlag, + utils.StateSchemeFlag, + }, utils.DatabasePathFlags), Description: ` The init command initializes a new genesis block and definition for the network. This is a destructive action and changes the network in which you will be @@ -94,6 +96,9 @@ if one is set. Otherwise it prints the genesis from the datadir.`, utils.MetricsInfluxDBBucketFlag, utils.MetricsInfluxDBOrganizationFlag, utils.TxLookupLimitFlag, + utils.TransactionHistoryFlag, + utils.StateSchemeFlag, + utils.StateHistoryFlag, }, utils.DatabasePathFlags), Description: ` The import command imports blocks from an RLP-encoded form. The form can be one file @@ -110,6 +115,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`, Flags: flags.Merge([]cli.Flag{ utils.CacheFlag, utils.SyncModeFlag, + utils.StateSchemeFlag, }, utils.DatabasePathFlags), Description: ` Requires a first argument of the file to write to. @@ -159,6 +165,7 @@ It's deprecated, please use "geth db export" instead. utils.IncludeIncompletesFlag, utils.StartKeyFlag, utils.DumpLimitFlag, + utils.StateSchemeFlag, }, utils.DatabasePathFlags), Description: ` This command dumps out the state for a given block (or latest, if none provided). @@ -195,14 +202,15 @@ func initGenesis(ctx *cli.Context) error { if err != nil { utils.Fatalf("Failed to open database: %v", err) } - triedb := trie.NewDatabaseWithConfig(chaindb, &trie.Config{ - Preimages: ctx.Bool(utils.CachePreimagesFlag.Name), - }) + defer chaindb.Close() + + triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false) + defer triedb.Close() + _, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis) if err != nil { utils.Fatalf("Failed to write genesis block: %v", err) } - chaindb.Close() log.Info("Successfully wrote genesis state", "database", name, "hash", hash) } return nil @@ -241,7 +249,7 @@ func dumpGenesis(ctx *cli.Context) error { if ctx.IsSet(utils.DataDirFlag.Name) { utils.Fatalf("no existing datadir at %s", stack.Config().DataDir) } - utils.Fatalf("no network preset provided. no exisiting genesis in the default datadir") + utils.Fatalf("no network preset provided, no existing genesis in the default datadir") return nil } @@ -261,16 +269,16 @@ func importChain(ctx *cli.Context) error { defer db.Close() // Start periodically gathering memory profiles - var peakMemAlloc, peakMemSys uint64 + var peakMemAlloc, peakMemSys atomic.Uint64 go func() { stats := new(runtime.MemStats) for { runtime.ReadMemStats(stats) - if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { - atomic.StoreUint64(&peakMemAlloc, stats.Alloc) + if peakMemAlloc.Load() < stats.Alloc { + peakMemAlloc.Store(stats.Alloc) } - if atomic.LoadUint64(&peakMemSys) < stats.Sys { - atomic.StoreUint64(&peakMemSys, stats.Sys) + if peakMemSys.Load() < stats.Sys { + peakMemSys.Store(stats.Sys) } time.Sleep(5 * time.Second) } @@ -303,8 +311,8 @@ func importChain(ctx *cli.Context) error { mem := new(runtime.MemStats) runtime.ReadMemStats(mem) - fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) - fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) + fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(peakMemAlloc.Load())/1024/1024) + fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(peakMemSys.Load())/1024/1024) fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) @@ -465,10 +473,10 @@ func dump(ctx *cli.Context) error { if err != nil { return err } - config := &trie.Config{ - Preimages: true, // always enable preimage lookup - } - state, err := state.New(root, state.NewDatabaseWithConfig(db, config), nil) + triedb := utils.MakeTrieDatabase(ctx, db, true, false) // always enable preimage lookup + defer triedb.Close() + + state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil) if err != nil { return err } diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 42ded4932..2761ee745 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -22,16 +22,17 @@ import ( "fmt" "os" "reflect" + "runtime" + "strings" "unicode" - "github.com/urfave/cli/v2" - "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/external" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/scwallet" "github.com/ethereum/go-ethereum/accounts/usbwallet" "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/internal/ethapi" @@ -42,6 +43,7 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/naoina/toml" + "github.com/urfave/cli/v2" ) var ( @@ -170,8 +172,26 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { v := ctx.Uint64(utils.OverrideCancun.Name) cfg.Eth.OverrideCancun = &v } + if ctx.IsSet(utils.OverrideVerkle.Name) { + v := ctx.Uint64(utils.OverrideVerkle.Name) + cfg.Eth.OverrideVerkle = &v + } backend, eth := utils.RegisterEthService(stack, &cfg.Eth) + // Create gauge with geth system and build information + if eth != nil { // The 'eth' backend may be nil in light mode + var protos []string + for _, p := range eth.Protocols() { + protos = append(protos, fmt.Sprintf("%v/%d", p.Name, p.Version)) + } + metrics.NewRegisteredGaugeInfo("geth/info", nil).Update(metrics.GaugeInfoValue{ + "arch": runtime.GOARCH, + "os": runtime.GOOS, + "version": cfg.Node.Version, + "eth_protocols": strings.Join(protos, ","), + }) + } + // Configure log filter RPC API. filterSystem := utils.RegisterFilterAPI(stack, backend, &cfg.Eth) @@ -189,6 +209,22 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { if ctx.IsSet(utils.SyncTargetFlag.Name) && cfg.Eth.SyncMode == downloader.FullSync { utils.RegisterFullSyncTester(stack, eth, ctx.Path(utils.SyncTargetFlag.Name)) } + + // Start the dev mode if requested, or launch the engine API for + // interacting with external consensus client. + if ctx.IsSet(utils.DeveloperFlag.Name) { + simBeacon, err := catalyst.NewSimulatedBeacon(ctx.Uint64(utils.DeveloperPeriodFlag.Name), eth) + if err != nil { + utils.Fatalf("failed to register dev mode catalyst service: %v", err) + } + catalyst.RegisterSimulatedBeaconAPIs(stack, simBeacon) + stack.RegisterLifecycle(simBeacon) + } else if cfg.Eth.SyncMode != downloader.LightSync { + err := catalyst.Register(stack, eth) + if err != nil { + utils.Fatalf("failed to register catalyst service: %v", err) + } + } return stack, backend } @@ -272,6 +308,10 @@ func deprecated(field string) bool { return true case "ethconfig.Config.EWASMInterpreter": return true + case "ethconfig.Config.TrieCleanCacheJournal": + return true + case "ethconfig.Config.TrieCleanCacheRejournal": + return true default: return false } diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index aeee6f9c9..526ede961 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -75,10 +75,7 @@ func localConsole(ctx *cli.Context) error { defer stack.Close() // Attach to the newly started node and create the JavaScript console. - client, err := stack.Attach() - if err != nil { - return fmt.Errorf("failed to attach to the inproc geth: %v", err) - } + client := stack.Attach() config := console.Config{ DataDir: utils.MakeDataDir(ctx), DocRoot: ctx.String(utils.JSpathFlag.Name), diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index b409b1926..a1868eb8c 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -151,6 +151,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, ArgsUsage: " ", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, + utils.StateSchemeFlag, }, utils.NetworkFlags, utils.DatabasePathFlags), Description: "This command looks up the specified database key from the database.", } @@ -482,6 +483,9 @@ func dbDumpTrie(ctx *cli.Context) error { db := utils.MakeChainDatabase(ctx, stack, true) defer db.Close() + triedb := utils.MakeTrieDatabase(ctx, db, false, true) + defer triedb.Close() + var ( state []byte storage []byte @@ -515,12 +519,16 @@ func dbDumpTrie(ctx *cli.Context) error { } } id := trie.StorageTrieID(common.BytesToHash(state), common.BytesToHash(account), common.BytesToHash(storage)) - theTrie, err := trie.New(id, trie.NewDatabase(db)) + theTrie, err := trie.New(id, triedb) + if err != nil { + return err + } + trieIt, err := theTrie.NodeIterator(start) if err != nil { return err } var count int64 - it := trie.NewIterator(theTrie.NodeIterator(start)) + it := trie.NewIterator(trieIt) for it.Next() { if max > 0 && count == max { fmt.Printf("Exiting after %d values\n", count) diff --git a/cmd/geth/genesis_test.go b/cmd/geth/genesis_test.go index ffe8176b0..2506b42d1 100644 --- a/cmd/geth/genesis_test.go +++ b/cmd/geth/genesis_test.go @@ -176,12 +176,12 @@ func TestCustomBackend(t *testing.T) { { // Can't start pebble on top of leveldb initArgs: []string{"--db.engine", "leveldb"}, execArgs: []string{"--db.engine", "pebble"}, - execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`, + execExpect: `Fatal: Could not open database: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`, }, { // Can't start leveldb on top of pebble initArgs: []string{"--db.engine", "pebble"}, execArgs: []string{"--db.engine", "leveldb"}, - execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`, + execExpect: `Fatal: Could not open database: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`, }, { // Reject invalid backend choice initArgs: []string{"--db.engine", "mssql"}, diff --git a/cmd/geth/les_test.go b/cmd/geth/les_test.go index 607b454ea..b36c3265a 100644 --- a/cmd/geth/les_test.go +++ b/cmd/geth/les_test.go @@ -107,10 +107,10 @@ func ipcEndpoint(ipcPath, datadir string) string { // but windows require pipes to sit in "\\.\pipe\". Therefore, to run several // nodes simultaneously, we need to distinguish between them, which we do by // the pipe filename instead of folder. -var nextIPC = uint32(0) +var nextIPC atomic.Uint32 func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc { - ipcName := fmt.Sprintf("geth-%d.ipc", atomic.AddUint32(&nextIPC, 1)) + ipcName := fmt.Sprintf("geth-%d.ipc", nextIPC.Add(1)) args = append([]string{"--networkid=42", "--port=0", "--authrpc.port", "0", "--ipcpath", ipcName}, args...) t.Logf("Starting %v with rpc: %v", name, args) @@ -146,13 +146,13 @@ func startLightServer(t *testing.T) *gethrpc { t.Logf("Importing keys to geth") runGeth(t, "account", "import", "--datadir", datadir, "--password", "./testdata/password.txt", "--lightkdf", "./testdata/key.prv").WaitExit() account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105" - server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--miner.etherbase=0x02f0d131f1f97aef08aec6e3291b957d9efe7105", "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1", "--verbosity=4") + server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--miner.etherbase=0x02f0d131f1f97aef08aec6e3291b957d9efe7105", "--mine", "--light.serve=100", "--light.maxpeers=1", "--discv4=false", "--nat=extip:127.0.0.1", "--verbosity=4") return server } func startClient(t *testing.T, name string) *gethrpc { datadir := initGeth(t) - return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4") + return startGethWithIpc(t, name, "--datadir", datadir, "--discv4=false", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4") } func TestPriorityClient(t *testing.T) { diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 8ff471028..ec8d0fa93 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -40,6 +40,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" + "go.uber.org/automaxprocs/maxprocs" "github.com/ethereum/go-ethereum/plugins" "github.com/ethereum/go-ethereum/plugins/wrappers/backendwrapper" @@ -71,6 +72,7 @@ var ( utils.USBFlag, utils.SmartCardDaemonPathFlag, utils.OverrideCancun, + utils.OverrideVerkle, utils.EnablePersonal, utils.TxPoolLocalsFlag, utils.TxPoolNoLocalsFlag, @@ -83,21 +85,24 @@ var ( utils.TxPoolAccountQueueFlag, utils.TxPoolGlobalQueueFlag, utils.TxPoolLifetimeFlag, + utils.BlobPoolDataDirFlag, + utils.BlobPoolDataCapFlag, + utils.BlobPoolPriceBumpFlag, utils.SyncModeFlag, utils.SyncTargetFlag, utils.ExitWhenSyncedFlag, utils.GCModeFlag, utils.SnapshotFlag, utils.TxLookupLimitFlag, + utils.TransactionHistoryFlag, + utils.StateSchemeFlag, + utils.StateHistoryFlag, utils.LightServeFlag, utils.LightIngressFlag, utils.LightEgressFlag, utils.LightMaxPeersFlag, utils.LightNoPruneFlag, utils.LightKDFFlag, - utils.UltraLightServersFlag, - utils.UltraLightFractionFlag, - utils.UltraLightOnlyAnnounceFlag, utils.LightNoSyncServeFlag, utils.EthRequiredBlocksFlag, utils.LegacyWhitelistFlag, @@ -127,14 +132,16 @@ var ( utils.MinerNewPayloadTimeout, utils.NATFlag, utils.NoDiscoverFlag, + utils.DiscoveryV4Flag, utils.DiscoveryV5Flag, + utils.LegacyDiscoveryV5Flag, utils.NetrestrictFlag, utils.NodeKeyFileFlag, utils.NodeKeyHexFlag, utils.DNSDiscoveryFlag, utils.DeveloperFlag, - utils.DeveloperPeriodFlag, utils.DeveloperGasLimitFlag, + utils.DeveloperPeriodFlag, utils.VMEnableDebugFlag, utils.NetworkIdFlag, utils.EthStatsURLFlag, @@ -174,6 +181,8 @@ var ( utils.RPCGlobalEVMTimeoutFlag, utils.RPCGlobalTxFeeCapFlag, utils.AllowUnprotectedTxs, + utils.BatchRequestLimit, + utils.BatchResponseMaxSize, } metricsFlags = []cli.Flag{ @@ -243,6 +252,7 @@ func init() { ) app.Before = func(ctx *cli.Context) error { + maxprocs.Set() // Automatically set GOMAXPROCS to match Linux container CPU quota. flags.MigrateGlobalFlags(ctx) return debug.Setup(ctx) } @@ -265,15 +275,15 @@ func main() { func prepare(ctx *cli.Context) { // If we're running a known preset, log it for convenience. switch { - case ctx.IsSet(utils.RinkebyFlag.Name): - log.Info("Starting Geth on Rinkeby testnet...") - case ctx.IsSet(utils.GoerliFlag.Name): log.Info("Starting Geth on Görli testnet...") case ctx.IsSet(utils.SepoliaFlag.Name): log.Info("Starting Geth on Sepolia testnet...") + case ctx.IsSet(utils.HoleskyFlag.Name): + log.Info("Starting Geth on Holesky testnet...") + case ctx.IsSet(utils.DeveloperFlag.Name): log.Info("Starting Geth in ephemeral dev mode...") log.Warn(`You are running Geth in --dev mode. Please note the following: @@ -298,8 +308,8 @@ func prepare(ctx *cli.Context) { // If we're a full node on mainnet without --cache specified, bump default cache allowance if ctx.String(utils.SyncModeFlag.Name) != "light" && !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) { // Make sure we're not on any supported preconfigured testnet either - if !ctx.IsSet(utils.SepoliaFlag.Name) && - !ctx.IsSet(utils.RinkebyFlag.Name) && + if !ctx.IsSet(utils.HoleskyFlag.Name) && + !ctx.IsSet(utils.SepoliaFlag.Name) && !ctx.IsSet(utils.GoerliFlag.Name) && !ctx.IsSet(utils.DeveloperFlag.Name) { // Nope, we're really on mainnet. Bump that cache up! @@ -375,10 +385,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon stack.AccountManager().Subscribe(events) // Create a client to interact with local geth node. - rpcClient, err := stack.Attach() - if err != nil { - utils.Fatalf("Failed to attach to self: %v", err) - } + rpcClient := stack.Attach() ethClient := ethclient.NewClient(rpcClient) go func() { @@ -439,7 +446,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon } // Start auxiliary services if enabled - if ctx.Bool(utils.MiningEnabledFlag.Name) || ctx.Bool(utils.DeveloperFlag.Name) { + if ctx.Bool(utils.MiningEnabledFlag.Name) { // Mining only makes sense if a full Ethereum node is running if ctx.String(utils.SyncModeFlag.Name) == "light" { utils.Fatalf("Light clients do not support mining") @@ -450,7 +457,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon } // Set the gas price to the limits from the CLI and start mining gasprice := flags.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) - ethBackend.TxPool().SetGasPrice(gasprice) + ethBackend.TxPool().SetGasTip(gasprice) if err := ethBackend.StartMining(); err != nil { utils.Fatalf("Failed to start mining: %v", err) } diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 0759341fd..5e1c78473 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -50,7 +50,6 @@ var ( ArgsUsage: "", Action: pruneState, Flags: flags.Merge([]cli.Flag{ - utils.CacheTrieJournalFlag, utils.BloomFilterSizeFlag, }, utils.NetworkFlags, utils.DatabasePathFlags), Description: ` @@ -62,10 +61,7 @@ two version states are available: genesis and the specific one. The default pruning target is the HEAD-127 state. -WARNING: It's necessary to delete the trie clean cache after the pruning. -If you specify another directory for the trie clean cache via "--cache.trie.journal" -during the use of Geth, please also specify it here for correct deletion. Otherwise -the trie clean cache with default directory will be deleted. +WARNING: it's only supported in hash mode(--state.scheme=hash)". `, }, { @@ -73,7 +69,9 @@ the trie clean cache with default directory will be deleted. Usage: "Recalculate state hash based on the snapshot for verification", ArgsUsage: "", Action: verifyState, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge([]cli.Flag{ + utils.StateSchemeFlag, + }, utils.NetworkFlags, utils.DatabasePathFlags), Description: ` geth snapshot verify-state will traverse the whole accounts and storages set based on the specified @@ -108,7 +106,9 @@ information about the specified address. Usage: "Traverse the state with given root hash and perform quick verification", ArgsUsage: "", Action: traverseState, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge([]cli.Flag{ + utils.StateSchemeFlag, + }, utils.NetworkFlags, utils.DatabasePathFlags), Description: ` geth snapshot traverse-state will traverse the whole state from the given state root and will abort if any @@ -123,7 +123,9 @@ It's also usable without snapshot enabled. Usage: "Traverse the state with given root hash and perform detailed verification", ArgsUsage: "", Action: traverseRawState, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge([]cli.Flag{ + utils.StateSchemeFlag, + }, utils.NetworkFlags, utils.DatabasePathFlags), Description: ` geth snapshot traverse-rawstate will traverse the whole state from the given root and will abort if any referenced @@ -144,6 +146,7 @@ It's also usable without snapshot enabled. utils.ExcludeStorageFlag, utils.StartKeyFlag, utils.DumpLimitFlag, + utils.StateSchemeFlag, }, utils.NetworkFlags, utils.DatabasePathFlags), Description: ` This command is semantically equivalent to 'geth dump', but uses the snapshots @@ -160,15 +163,17 @@ block is used. // Deprecation: this command should be deprecated once the hash-based // scheme is deprecated. func pruneState(ctx *cli.Context) error { - stack, config := makeConfigNode(ctx) + stack, _ := makeConfigNode(ctx) defer stack.Close() chaindb := utils.MakeChainDatabase(ctx, stack, false) defer chaindb.Close() + if rawdb.ReadStateScheme(chaindb) != rawdb.HashScheme { + log.Crit("Offline pruning is not required for path scheme") + } prunerconfig := pruner.Config{ Datadir: stack.ResolvePath(""), - Cachedir: stack.ResolvePath(config.Eth.TrieCleanCacheJournal), BloomSize: ctx.Uint64(utils.BloomFilterSizeFlag.Name), } pruner, err := pruner.NewPruner(chaindb, prunerconfig) @@ -207,13 +212,16 @@ func verifyState(ctx *cli.Context) error { log.Error("Failed to load head block") return errors.New("no head block") } - snapconfig := snapshot.Config{ + triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true) + defer triedb.Close() + + snapConfig := snapshot.Config{ CacheSize: 256, Recovery: false, NoBuild: true, AsyncBuild: false, } - snaptree, err := snapshot.New(snapconfig, chaindb, trie.NewDatabase(chaindb), headBlock.Root()) + snaptree, err := snapshot.New(snapConfig, chaindb, triedb, headBlock.Root()) if err != nil { log.Error("Failed to open snapshot tree", "err", err) return err @@ -255,6 +263,11 @@ func traverseState(ctx *cli.Context) error { defer stack.Close() chaindb := utils.MakeChainDatabase(ctx, stack, true) + defer chaindb.Close() + + triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true) + defer triedb.Close() + headBlock := rawdb.ReadHeadBlock(chaindb) if headBlock == nil { log.Error("Failed to load head block") @@ -279,7 +292,6 @@ func traverseState(ctx *cli.Context) error { root = headBlock.Root() log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) } - triedb := trie.NewDatabase(chaindb) t, err := trie.NewStateTrie(trie.StateTrieID(root), triedb) if err != nil { log.Error("Failed to open trie", "root", root, "err", err) @@ -292,7 +304,12 @@ func traverseState(ctx *cli.Context) error { lastReport time.Time start = time.Now() ) - accIter := trie.NewIterator(t.NodeIterator(nil)) + acctIt, err := t.NodeIterator(nil) + if err != nil { + log.Error("Failed to open iterator", "root", root, "err", err) + return err + } + accIter := trie.NewIterator(acctIt) for accIter.Next() { accounts += 1 var acc types.StateAccount @@ -307,7 +324,12 @@ func traverseState(ctx *cli.Context) error { log.Error("Failed to open storage trie", "root", acc.Root, "err", err) return err } - storageIter := trie.NewIterator(storageTrie.NodeIterator(nil)) + storageIt, err := storageTrie.NodeIterator(nil) + if err != nil { + log.Error("Failed to open storage iterator", "root", acc.Root, "err", err) + return err + } + storageIter := trie.NewIterator(storageIt) for storageIter.Next() { slots += 1 } @@ -345,6 +367,11 @@ func traverseRawState(ctx *cli.Context) error { defer stack.Close() chaindb := utils.MakeChainDatabase(ctx, stack, true) + defer chaindb.Close() + + triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true) + defer triedb.Close() + headBlock := rawdb.ReadHeadBlock(chaindb) if headBlock == nil { log.Error("Failed to load head block") @@ -369,7 +396,6 @@ func traverseRawState(ctx *cli.Context) error { root = headBlock.Root() log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) } - triedb := trie.NewDatabase(chaindb) t, err := trie.NewStateTrie(trie.StateTrieID(root), triedb) if err != nil { log.Error("Failed to open trie", "root", root, "err", err) @@ -385,7 +411,16 @@ func traverseRawState(ctx *cli.Context) error { hasher = crypto.NewKeccakState() got = make([]byte, 32) ) - accIter := t.NodeIterator(nil) + accIter, err := t.NodeIterator(nil) + if err != nil { + log.Error("Failed to open iterator", "root", root, "err", err) + return err + } + reader, err := triedb.Reader(root) + if err != nil { + log.Error("State is non-existent", "root", root) + return nil + } for accIter.Next(true) { nodes += 1 node := accIter.Hash() @@ -393,7 +428,7 @@ func traverseRawState(ctx *cli.Context) error { // Check the present for non-empty hash node(embedded node doesn't // have their own hash). if node != (common.Hash{}) { - blob := rawdb.ReadLegacyTrieNode(chaindb, node) + blob, _ := reader.Node(common.Hash{}, accIter.Path(), node) if len(blob) == 0 { log.Error("Missing trie node(account)", "hash", node) return errors.New("missing account") @@ -422,7 +457,11 @@ func traverseRawState(ctx *cli.Context) error { log.Error("Failed to open storage trie", "root", acc.Root, "err", err) return errors.New("missing storage trie") } - storageIter := storageTrie.NodeIterator(nil) + storageIter, err := storageTrie.NodeIterator(nil) + if err != nil { + log.Error("Failed to open storage iterator", "root", acc.Root, "err", err) + return err + } for storageIter.Next(true) { nodes += 1 node := storageIter.Hash() @@ -430,7 +469,7 @@ func traverseRawState(ctx *cli.Context) error { // Check the presence for non-empty hash node(embedded node doesn't // have their own hash). if node != (common.Hash{}) { - blob := rawdb.ReadLegacyTrieNode(chaindb, node) + blob, _ := reader.Node(common.BytesToHash(accIter.LeafKey()), storageIter.Path(), node) if len(blob) == 0 { log.Error("Missing trie node(storage)", "hash", node) return errors.New("missing storage") @@ -490,13 +529,16 @@ func dumpState(ctx *cli.Context) error { if err != nil { return err } + triedb := utils.MakeTrieDatabase(ctx, db, false, true) + defer triedb.Close() + snapConfig := snapshot.Config{ CacheSize: 256, Recovery: false, NoBuild: true, AsyncBuild: false, } - snaptree, err := snapshot.New(snapConfig, db, trie.NewDatabase(db), root) + snaptree, err := snapshot.New(snapConfig, db, triedb, root) if err != nil { return err } @@ -517,14 +559,14 @@ func dumpState(ctx *cli.Context) error { Root common.Hash `json:"root"` }{root}) for accIt.Next() { - account, err := snapshot.FullAccount(accIt.Account()) + account, err := types.FullAccount(accIt.Account()) if err != nil { return err } da := &state.DumpAccount{ Balance: account.Balance.String(), Nonce: account.Nonce, - Root: account.Root, + Root: account.Root.Bytes(), CodeHash: account.CodeHash, SecureKey: accIt.Hash().Bytes(), } diff --git a/cmd/geth/testdata/vcheck/minisig-sigs-new/data.json.minisig b/cmd/geth/testdata/vcheck/minisig-sigs-new/data.json.minisig new file mode 100644 index 000000000..eaea9f905 --- /dev/null +++ b/cmd/geth/testdata/vcheck/minisig-sigs-new/data.json.minisig @@ -0,0 +1,4 @@ +untrusted comment: signature from minisign secret key +RUQkliYstQBOKLK05Sy5f3bVRMBqJT26ABo6Vbp3BNJAVjejoqYCu4GWE/+7qcDfHBqYIniDCbFIUvYEnOHxV6vZ93wO1xJWDQw= +trusted comment: timestamp:1693986492 file:data.json hashed +6Fdw2H+W1ZXK7QXSF77Z5AWC7+AEFAfDmTSxNGylU5HLT1AuSJQmxslj+VjtUBamYCvOuET7plbXza942AlWDw== diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index e856eaeb9..9ba2b4167 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -74,7 +74,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error switch node := root.(type) { case *verkle.InternalNode: for i, child := range node.Children() { - childC := child.ComputeCommitment().Bytes() + childC := child.Commit().Bytes() childS, err := resolver(childC[:]) if bytes.Equal(childC[:], zero[:]) { @@ -86,7 +86,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error // depth is set to 0, the tree isn't rebuilt so it's not a problem childN, err := verkle.ParseNode(childS, 0, childC[:]) if err != nil { - return fmt.Errorf("decode error child %x in db: %w", child.ComputeCommitment().Bytes(), err) + return fmt.Errorf("decode error child %x in db: %w", child.Commitment().Bytes(), err) } if err := checkChildren(childN, resolver); err != nil { return fmt.Errorf("%x%w", i, err) // write the path to the erroring node @@ -100,7 +100,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error return nil } } - return errors.New("Both balance and nonce are 0") + return errors.New("both balance and nonce are 0") case verkle.Empty: // nothing to do default: diff --git a/cmd/geth/version_check_test.go b/cmd/geth/version_check_test.go index bd4d820a7..4458ab5c0 100644 --- a/cmd/geth/version_check_test.go +++ b/cmd/geth/version_check_test.go @@ -30,17 +30,24 @@ import ( ) func TestVerification(t *testing.T) { - // Signatures generated with `minisign` - t.Run("minisig", func(t *testing.T) { - // For this test, the pubkey is in testdata/minisign.pub + // Signatures generated with `minisign`. Legacy format, not pre-hashed file. + t.Run("minisig-legacy", func(t *testing.T) { + // For this test, the pubkey is in testdata/vcheck/minisign.pub // (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' ) pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp" testVerification(t, pub, "./testdata/vcheck/minisig-sigs/") }) + t.Run("minisig-new", func(t *testing.T) { + // For this test, the pubkey is in testdata/vcheck/minisign.pub + // (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' ) + // `minisign -S -s ./minisign.sec -m data.json -x ./minisig-sigs-new/data.json.minisig` + pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp" + testVerification(t, pub, "./testdata/vcheck/minisig-sigs-new/") + }) // Signatures generated with `signify-openbsd` t.Run("signify-openbsd", func(t *testing.T) { t.Skip("This currently fails, minisign expects 4 lines of data, signify provides only 2") - // For this test, the pubkey is in testdata/signifykey.pub + // For this test, the pubkey is in testdata/vcheck/signifykey.pub // (the privkey is `signifykey.sec`, if we want to expand this test. Password 'test' ) pub := "RWSKLNhZb0KdATtRT7mZC/bybI3t3+Hv/O2i3ye04Dq9fnT9slpZ1a2/" testVerification(t, pub, "./testdata/vcheck/signify-sigs/") @@ -58,6 +65,9 @@ func testVerification(t *testing.T, pubkey, sigdir string) { if err != nil { t.Fatal(err) } + if len(files) == 0 { + t.Fatal("Missing tests") + } for _, f := range files { sig, err := os.ReadFile(filepath.Join(sigdir, f.Name())) if err != nil { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 8e1d637e8..b04e468d6 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -26,6 +26,7 @@ import ( "fmt" "math" "math/big" + "net" "net/http" "os" "path/filepath" @@ -41,13 +42,13 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/eth" - ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst" + "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/filters" @@ -60,7 +61,6 @@ import ( "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/les" - lescatalyst "github.com/ethereum/go-ethereum/les/catalyst" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics/exp" @@ -74,6 +74,9 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" pcsclite "github.com/gballet/go-libpcsclite" gopsutil "github.com/shirou/gopsutil/mem" "github.com/urfave/cli/v2" @@ -141,7 +144,7 @@ var ( } NetworkIdFlag = &cli.Uint64Flag{ Name: "networkid", - Usage: "Explicitly set network id (integer)(For testnets: use --rinkeby, --goerli, --sepolia instead)", + Usage: "Explicitly set network id (integer)(For testnets: use --goerli, --sepolia, --holesky instead)", Value: ethconfig.Defaults.NetworkId, Category: flags.EthCategory, } @@ -150,11 +153,6 @@ var ( Usage: "Ethereum mainnet", Category: flags.EthCategory, } - RinkebyFlag = &cli.BoolFlag{ - Name: "rinkeby", - Usage: "Rinkeby network: pre-configured proof-of-authority test network", - Category: flags.EthCategory, - } GoerliFlag = &cli.BoolFlag{ Name: "goerli", Usage: "Görli network: pre-configured proof-of-authority test network", @@ -165,14 +163,18 @@ var ( Usage: "Sepolia network: pre-configured proof-of-work test network", Category: flags.EthCategory, } - + HoleskyFlag = &cli.BoolFlag{ + Name: "holesky", + Usage: "Holesky network: pre-configured proof-of-stake test network", + Category: flags.EthCategory, + } // Dev mode DeveloperFlag = &cli.BoolFlag{ Name: "dev", Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled", Category: flags.DevCategory, } - DeveloperPeriodFlag = &cli.IntFlag{ + DeveloperPeriodFlag = &cli.Uint64Flag{ Name: "dev.period", Usage: "Block period to use in developer mode (0 = mine only if transaction pending)", Category: flags.DevCategory, @@ -231,30 +233,12 @@ var ( } defaultSyncMode = ethconfig.Defaults.SyncMode - SyncModeFlag = &flags.TextMarshalerFlag{ - Name: "syncmode", - Usage: `Blockchain sync mode ("snap", "full" or "light")`, - Value: &defaultSyncMode, - Category: flags.EthCategory, - } - GCModeFlag = &cli.StringFlag{ - Name: "gcmode", - Usage: `Blockchain garbage collection mode ("full", "archive")`, - Value: "full", - Category: flags.EthCategory, - } - SnapshotFlag = &cli.BoolFlag{ + SnapshotFlag = &cli.BoolFlag{ Name: "snapshot", Usage: `Enables snapshot-database mode (default = enable)`, Value: true, Category: flags.EthCategory, } - TxLookupLimitFlag = &cli.Uint64Flag{ - Name: "txlookuplimit", - Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain)", - Value: ethconfig.Defaults.TxLookupLimit, - Category: flags.EthCategory, - } LightKDFFlag = &cli.BoolFlag{ Name: "lightkdf", Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", @@ -265,11 +249,6 @@ var ( Usage: "Comma separated block number-to-hash mappings to require for peering (=)", Category: flags.EthCategory, } - LegacyWhitelistFlag = &cli.StringFlag{ - Name: "whitelist", - Usage: "Comma separated block number-to-hash mappings to enforce (=) (deprecated in favor of --eth.requiredblocks)", - Category: flags.DeprecatedCategory, - } BloomFilterSizeFlag = &cli.Uint64Flag{ Name: "bloomfilter.size", Usage: "Megabytes of memory allocated to bloom-filter for pruning", @@ -281,6 +260,41 @@ var ( Usage: "Manually specify the Cancun fork timestamp, overriding the bundled setting", Category: flags.EthCategory, } + OverrideVerkle = &cli.Uint64Flag{ + Name: "override.verkle", + Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting", + Category: flags.EthCategory, + } + SyncModeFlag = &flags.TextMarshalerFlag{ + Name: "syncmode", + Usage: `Blockchain sync mode ("snap", "full" or "light")`, + Value: &defaultSyncMode, + Category: flags.StateCategory, + } + GCModeFlag = &cli.StringFlag{ + Name: "gcmode", + Usage: `Blockchain garbage collection mode, only relevant in state.scheme=hash ("full", "archive")`, + Value: "full", + Category: flags.StateCategory, + } + StateSchemeFlag = &cli.StringFlag{ + Name: "state.scheme", + Usage: "Scheme to use for storing ethereum state ('hash' or 'path')", + Value: rawdb.HashScheme, + Category: flags.StateCategory, + } + StateHistoryFlag = &cli.Uint64Flag{ + Name: "history.state", + Usage: "Number of recent blocks to retain state history for (default = 90,000 blocks, 0 = entire chain)", + Value: ethconfig.Defaults.StateHistory, + Category: flags.StateCategory, + } + TransactionHistoryFlag = &cli.Uint64Flag{ + Name: "history.transactions", + Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain)", + Value: ethconfig.Defaults.TransactionHistory, + Category: flags.StateCategory, + } // Light server and client settings LightServeFlag = &cli.IntFlag{ Name: "light.serve", @@ -306,23 +320,6 @@ var ( Value: ethconfig.Defaults.LightPeers, Category: flags.LightCategory, } - UltraLightServersFlag = &cli.StringFlag{ - Name: "ulc.servers", - Usage: "List of trusted ultra-light servers", - Value: strings.Join(ethconfig.Defaults.UltraLightServers, ","), - Category: flags.LightCategory, - } - UltraLightFractionFlag = &cli.IntFlag{ - Name: "ulc.fraction", - Usage: "Minimum % of trusted ultra-light servers required to announce a new head", - Value: ethconfig.Defaults.UltraLightFraction, - Category: flags.LightCategory, - } - UltraLightOnlyAnnounceFlag = &cli.BoolFlag{ - Name: "ulc.onlyannounce", - Usage: "Ultra light server sends announcements only", - Category: flags.LightCategory, - } LightNoPruneFlag = &cli.BoolFlag{ Name: "light.nopruning", Usage: "Disable ancient light chain data pruning", @@ -347,18 +344,18 @@ var ( TxPoolJournalFlag = &cli.StringFlag{ Name: "txpool.journal", Usage: "Disk journal for local transaction to survive node restarts", - Value: txpool.DefaultConfig.Journal, + Value: ethconfig.Defaults.TxPool.Journal, Category: flags.TxPoolCategory, } TxPoolRejournalFlag = &cli.DurationFlag{ Name: "txpool.rejournal", Usage: "Time interval to regenerate the local transaction journal", - Value: txpool.DefaultConfig.Rejournal, + Value: ethconfig.Defaults.TxPool.Rejournal, Category: flags.TxPoolCategory, } TxPoolPriceLimitFlag = &cli.Uint64Flag{ Name: "txpool.pricelimit", - Usage: "Minimum gas price limit to enforce for acceptance into the pool", + Usage: "Minimum gas price tip to enforce for acceptance into the pool", Value: ethconfig.Defaults.TxPool.PriceLimit, Category: flags.TxPoolCategory, } @@ -398,7 +395,25 @@ var ( Value: ethconfig.Defaults.TxPool.Lifetime, Category: flags.TxPoolCategory, } - + // Blob transaction pool settings + BlobPoolDataDirFlag = &cli.StringFlag{ + Name: "blobpool.datadir", + Usage: "Data directory to store blob transactions in", + Value: ethconfig.Defaults.BlobPool.Datadir, + Category: flags.BlobPoolCategory, + } + BlobPoolDataCapFlag = &cli.Uint64Flag{ + Name: "blobpool.datacap", + Usage: "Disk space to allocate for pending blob transactions (soft limit)", + Value: ethconfig.Defaults.BlobPool.Datacap, + Category: flags.BlobPoolCategory, + } + BlobPoolPriceBumpFlag = &cli.Uint64Flag{ + Name: "blobpool.pricebump", + Usage: "Price bump percentage to replace an already existing blob transaction", + Value: ethconfig.Defaults.BlobPool.PriceBump, + Category: flags.BlobPoolCategory, + } // Performance tuning settings CacheFlag = &cli.IntFlag{ Name: "cache", @@ -418,18 +433,6 @@ var ( Value: 15, Category: flags.PerfCategory, } - CacheTrieJournalFlag = &cli.StringFlag{ - Name: "cache.trie.journal", - Usage: "Disk journal directory for trie cache to survive node restarts", - Value: ethconfig.Defaults.TrieCleanCacheJournal, - Category: flags.PerfCategory, - } - CacheTrieRejournalFlag = &cli.DurationFlag{ - Name: "cache.trie.rejournal", - Usage: "Time interval to regenerate the trie cache journal", - Value: ethconfig.Defaults.TrieCleanCacheRejournal, - Category: flags.PerfCategory, - } CacheGCFlag = &cli.IntFlag{ Name: "cache.gc", Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)", @@ -726,6 +729,18 @@ var ( Usage: "Allow for unprotected (non EIP155 signed) transactions to be submitted via RPC", Category: flags.APICategory, } + BatchRequestLimit = &cli.IntFlag{ + Name: "rpc.batch-request-limit", + Usage: "Maximum number of requests in a batch", + Value: node.DefaultConfig.BatchRequestLimit, + Category: flags.APICategory, + } + BatchResponseMaxSize = &cli.IntFlag{ + Name: "rpc.batch-response-max-size", + Usage: "Maximum number of bytes returned from a batched call", + Value: node.DefaultConfig.BatchResponseMaxSize, + Category: flags.APICategory, + } EnablePersonal = &cli.BoolFlag{ Name: "rpc.enabledeprecatedpersonal", Usage: "Enables the (deprecated) personal namespace", @@ -778,8 +793,16 @@ var ( Usage: "Disables the peer discovery mechanism (manual peer addition)", Category: flags.NetworkingCategory, } + DiscoveryV4Flag = &cli.BoolFlag{ + Name: "discovery.v4", + Aliases: []string{"discv4"}, + Usage: "Enables the V4 discovery mechanism", + Category: flags.NetworkingCategory, + Value: true, + } DiscoveryV5Flag = &cli.BoolFlag{ - Name: "v5disc", + Name: "discovery.v5", + Aliases: []string{"discv5"}, Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism", Category: flags.NetworkingCategory, } @@ -939,9 +962,9 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server. var ( // TestnetFlags is the flag group of all built-in supported testnets. TestnetFlags = []cli.Flag{ - RinkebyFlag, GoerliFlag, SepoliaFlag, + HoleskyFlag, } // NetworkFlags is the flag group of all built-in supported networks. NetworkFlags = append([]cli.Flag{MainnetFlag}, TestnetFlags...) @@ -966,15 +989,15 @@ func init() { // then a subdirectory of the specified datadir will be used. func MakeDataDir(ctx *cli.Context) string { if path := ctx.String(DataDirFlag.Name); path != "" { - if ctx.Bool(RinkebyFlag.Name) { - return filepath.Join(path, "rinkeby") - } if ctx.Bool(GoerliFlag.Name) { return filepath.Join(path, "goerli") } if ctx.Bool(SepoliaFlag.Name) { return filepath.Join(path, "sepolia") } + if ctx.Bool(HoleskyFlag.Name) { + return filepath.Join(path, "holesky") + } return path } Fatalf("Cannot determine default data directory, please set manually (--datadir)") @@ -1021,10 +1044,10 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) { switch { case ctx.IsSet(BootnodesFlag.Name): urls = SplitAndTrim(ctx.String(BootnodesFlag.Name)) + case ctx.Bool(HoleskyFlag.Name): + urls = params.HoleskyBootnodes case ctx.Bool(SepoliaFlag.Name): urls = params.SepoliaBootnodes - case ctx.Bool(RinkebyFlag.Name): - urls = params.RinkebyBootnodes case ctx.Bool(GoerliFlag.Name): urls = params.GoerliBootnodes } @@ -1149,6 +1172,14 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) { if ctx.IsSet(AllowUnprotectedTxs.Name) { cfg.AllowUnprotectedTxs = ctx.Bool(AllowUnprotectedTxs.Name) } + + if ctx.IsSet(BatchRequestLimit.Name) { + cfg.BatchRequestLimit = ctx.Int(BatchRequestLimit.Name) + } + + if ctx.IsSet(BatchResponseMaxSize.Name) { + cfg.BatchResponseMaxSize = ctx.Int(BatchResponseMaxSize.Name) + } } // setGraphQL creates the GraphQL listener interface string from the set @@ -1214,19 +1245,6 @@ func setLes(ctx *cli.Context, cfg *ethconfig.Config) { if ctx.IsSet(LightMaxPeersFlag.Name) { cfg.LightPeers = ctx.Int(LightMaxPeersFlag.Name) } - if ctx.IsSet(UltraLightServersFlag.Name) { - cfg.UltraLightServers = strings.Split(ctx.String(UltraLightServersFlag.Name), ",") - } - if ctx.IsSet(UltraLightFractionFlag.Name) { - cfg.UltraLightFraction = ctx.Int(UltraLightFractionFlag.Name) - } - if cfg.UltraLightFraction <= 0 && cfg.UltraLightFraction > 100 { - log.Error("Ultra light fraction is invalid", "had", cfg.UltraLightFraction, "updated", ethconfig.Defaults.UltraLightFraction) - cfg.UltraLightFraction = ethconfig.Defaults.UltraLightFraction - } - if ctx.IsSet(UltraLightOnlyAnnounceFlag.Name) { - cfg.UltraLightOnlyAnnounce = ctx.Bool(UltraLightOnlyAnnounceFlag.Name) - } if ctx.IsSet(LightNoPruneFlag.Name) { cfg.LightNoPrune = ctx.Bool(LightNoPruneFlag.Name) } @@ -1367,13 +1385,17 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { cfg.NoDiscovery = true } - // if we're running a light client or server, force enable the v5 peer discovery - // unless it is explicitly disabled with --nodiscover note that explicitly specifying - // --v5disc overrides --nodiscover, in which case the later only disables v4 discovery - forceV5Discovery := (lightClient || lightServer) && !ctx.Bool(NoDiscoverFlag.Name) - if ctx.IsSet(DiscoveryV5Flag.Name) { - cfg.DiscoveryV5 = ctx.Bool(DiscoveryV5Flag.Name) - } else if forceV5Discovery { + // Disallow --nodiscover when used in conjunction with light mode. + if (lightClient || lightServer) && ctx.Bool(NoDiscoverFlag.Name) { + Fatalf("Cannot use --" + NoDiscoverFlag.Name + " in light client or light server mode") + } + CheckExclusive(ctx, DiscoveryV4Flag, NoDiscoverFlag) + CheckExclusive(ctx, DiscoveryV5Flag, NoDiscoverFlag) + cfg.DiscoveryV4 = ctx.Bool(DiscoveryV4Flag.Name) + cfg.DiscoveryV5 = ctx.Bool(DiscoveryV5Flag.Name) + + // If we're running a light client or server, force enable the v5 peer discovery. + if lightClient || lightServer { cfg.DiscoveryV5 = true } @@ -1472,12 +1494,12 @@ func SetDataDir(ctx *cli.Context, cfg *node.Config) { cfg.DataDir = ctx.String(DataDirFlag.Name) case ctx.Bool(DeveloperFlag.Name): cfg.DataDir = "" // unless explicitly requested, use memory databases - case ctx.Bool(RinkebyFlag.Name) && cfg.DataDir == node.DefaultDataDir(): - cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") case ctx.Bool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir(): cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli") case ctx.Bool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir(): cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia") + case ctx.Bool(HoleskyFlag.Name) && cfg.DataDir == node.DefaultDataDir(): + cfg.DataDir = filepath.Join(node.DefaultDataDir(), "holesky") } } @@ -1501,7 +1523,7 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config, light bool) { } } -func setTxPool(ctx *cli.Context, cfg *txpool.Config) { +func setTxPool(ctx *cli.Context, cfg *legacypool.Config) { if ctx.IsSet(TxPoolLocalsFlag.Name) { locals := strings.Split(ctx.String(TxPoolLocalsFlag.Name), ",") for _, account := range locals { @@ -1634,16 +1656,11 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) { // SetEthConfig applies eth-related command line flags to the config. func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { // Avoid conflicting network flags - CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RinkebyFlag, GoerliFlag, SepoliaFlag) + CheckExclusive(ctx, MainnetFlag, DeveloperFlag, GoerliFlag, SepoliaFlag, HoleskyFlag) CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light") CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer - if ctx.String(GCModeFlag.Name) == "archive" && ctx.Uint64(TxLookupLimitFlag.Name) != 0 { - ctx.Set(TxLookupLimitFlag.Name, "0") - log.Warn("Disable transaction unindexing for archive node") - } - if ctx.IsSet(LightServeFlag.Name) && ctx.Uint64(TxLookupLimitFlag.Name) != 0 { - log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited") - } + + // Set configurations from CLI flags setEtherbase(ctx, cfg) setGPO(ctx, &cfg.GPO, ctx.String(SyncModeFlag.Name) == "light") setTxPool(ctx, &cfg.TxPool) @@ -1700,18 +1717,40 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { cfg.Preimages = true log.Info("Enabling recording of key preimages since archive mode is used") } - if ctx.IsSet(TxLookupLimitFlag.Name) { - cfg.TxLookupLimit = ctx.Uint64(TxLookupLimitFlag.Name) + if ctx.IsSet(StateHistoryFlag.Name) { + cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name) + } + // Parse state scheme, abort the process if it's not compatible. + chaindb := tryMakeReadOnlyDatabase(ctx, stack) + scheme, err := ParseStateScheme(ctx, chaindb) + chaindb.Close() + if err != nil { + Fatalf("%v", err) + } + cfg.StateScheme = scheme + + // Parse transaction history flag, if user is still using legacy config + // file with 'TxLookupLimit' configured, copy the value to 'TransactionHistory'. + if cfg.TransactionHistory == ethconfig.Defaults.TransactionHistory && cfg.TxLookupLimit != ethconfig.Defaults.TxLookupLimit { + log.Warn("The config option 'TxLookupLimit' is deprecated and will be removed, please use 'TransactionHistory'") + cfg.TransactionHistory = cfg.TxLookupLimit + } + if ctx.IsSet(TransactionHistoryFlag.Name) { + cfg.TransactionHistory = ctx.Uint64(TransactionHistoryFlag.Name) + } else if ctx.IsSet(TxLookupLimitFlag.Name) { + log.Warn("The flag --txlookuplimit is deprecated and will be removed, please use --history.transactions") + cfg.TransactionHistory = ctx.Uint64(TxLookupLimitFlag.Name) + } + if ctx.String(GCModeFlag.Name) == "archive" && cfg.TransactionHistory != 0 { + cfg.TransactionHistory = 0 + log.Warn("Disabled transaction unindexing for archive node") + } + if ctx.IsSet(LightServeFlag.Name) && cfg.TransactionHistory != 0 { + log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited") } if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) { cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100 } - if ctx.IsSet(CacheTrieJournalFlag.Name) { - cfg.TrieCleanCacheJournal = ctx.String(CacheTrieJournalFlag.Name) - } - if ctx.IsSet(CacheTrieRejournalFlag.Name) { - cfg.TrieCleanCacheRejournal = ctx.Duration(CacheTrieRejournalFlag.Name) - } if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheGCFlag.Name) { cfg.TrieDirtyCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheGCFlag.Name) / 100 } @@ -1770,28 +1809,18 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { } cfg.Genesis = core.DefaultGenesisBlock() SetDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash) + case ctx.Bool(HoleskyFlag.Name): + if !ctx.IsSet(NetworkIdFlag.Name) { + cfg.NetworkId = 17000 + } + cfg.Genesis = core.DefaultHoleskyGenesisBlock() + SetDNSDiscoveryDefaults(cfg, params.HoleskyGenesisHash) case ctx.Bool(SepoliaFlag.Name): if !ctx.IsSet(NetworkIdFlag.Name) { cfg.NetworkId = 11155111 } cfg.Genesis = core.DefaultSepoliaGenesisBlock() SetDNSDiscoveryDefaults(cfg, params.SepoliaGenesisHash) - case ctx.Bool(RinkebyFlag.Name): - log.Warn("") - log.Warn("--------------------------------------------------------------------------------") - log.Warn("Please note, Rinkeby has been deprecated. It will still work for the time being,") - log.Warn("but there will be no further hard-forks shipped for it.") - log.Warn("The network will be permanently halted in Q2/Q3 of 2023.") - log.Warn("For the most future proof testnet, choose Sepolia as") - log.Warn("your replacement environment (--sepolia instead of --rinkeby).") - log.Warn("--------------------------------------------------------------------------------") - log.Warn("") - - if !ctx.IsSet(NetworkIdFlag.Name) { - cfg.NetworkId = 4 - } - cfg.Genesis = core.DefaultRinkebyGenesisBlock() - SetDNSDiscoveryDefaults(cfg, params.RinkebyGenesisHash) case ctx.Bool(GoerliFlag.Name): if !ctx.IsSet(NetworkIdFlag.Name) { cfg.NetworkId = 5 @@ -1847,17 +1876,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { log.Info("Using developer account", "address", developer.Address) // Create a new developer genesis block or reuse existing one - cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.Int(DeveloperPeriodFlag.Name)), ctx.Uint64(DeveloperGasLimitFlag.Name), developer.Address) + cfg.Genesis = core.DeveloperGenesisBlock(ctx.Uint64(DeveloperGasLimitFlag.Name), developer.Address) if ctx.IsSet(DataDirFlag.Name) { - // If datadir doesn't exist we need to open db in write-mode - // so leveldb can create files. - readonly := true - if !common.FileExist(stack.ResolvePath("chaindata")) { - readonly = false - } - // Check if we have an already initialized chain and fall back to - // that if so. Otherwise we need to generate a new genesis spec. - chaindb := MakeChainDatabase(ctx, stack, readonly) + chaindb := tryMakeReadOnlyDatabase(ctx, stack) if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) { cfg.Genesis = nil // fallback to db content } @@ -1907,9 +1928,6 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend Fatalf("Failed to register the Ethereum service: %v", err) } stack.RegisterAPIs(tracers.APIs(backend.ApiBackend)) - if err := lescatalyst.Register(stack, backend); err != nil { - Fatalf("Failed to register the Engine API service: %v", err) - } return backend.ApiBackend, nil } backend, err := eth.New(stack, cfg) @@ -1922,9 +1940,6 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend Fatalf("Failed to create the LES server: %v", err) } } - if err := ethcatalyst.Register(stack, backend); err != nil { - Fatalf("Failed to register the Engine API service: %v", err) - } stack.RegisterAPIs(tracers.APIs(backend.APIBackend)) return backend.APIBackend, backend } @@ -1971,7 +1986,7 @@ func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, path string) { if err := rlp.DecodeBytes(rlpBlob, &block); err != nil { Fatalf("Failed to decode block: %v", err) } - ethcatalyst.RegisterFullSyncTester(stack, eth, &block) + catalyst.RegisterFullSyncTester(stack, eth, &block) log.Info("Registered full-sync tester", "number", block.NumberU64(), "hash", block.Hash()) } @@ -2027,7 +2042,7 @@ func SetupMetrics(ctx *cli.Context) { } if ctx.IsSet(MetricsHTTPFlag.Name) { - address := fmt.Sprintf("%s:%d", ctx.String(MetricsHTTPFlag.Name), ctx.Int(MetricsPortFlag.Name)) + address := net.JoinHostPort(ctx.String(MetricsHTTPFlag.Name), fmt.Sprintf("%d", ctx.Int(MetricsPortFlag.Name))) log.Info("Enabling stand-alone metrics HTTP endpoint", "address", address) exp.Setup(address) } else if ctx.IsSet(MetricsPortFlag.Name) { @@ -2081,6 +2096,18 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb. return chainDb } +// tryMakeReadOnlyDatabase try to open the chain database in read-only mode, +// or fallback to write mode if the database is not initialized. +func tryMakeReadOnlyDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { + // If the database doesn't exist we need to open it in write-mode to allow + // the engine to create files. + readonly := true + if rawdb.PreexistingDatabase(stack.ResolvePath("chaindata")) == "" { + readonly = false + } + return MakeChainDatabase(ctx, stack, readonly) +} + func IsNetworkPreset(ctx *cli.Context) bool { for _, flag := range NetworkFlags { bFlag, _ := flag.(*cli.BoolFlag) @@ -2120,10 +2147,10 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis { switch { case ctx.Bool(MainnetFlag.Name): genesis = core.DefaultGenesisBlock() + case ctx.Bool(HoleskyFlag.Name): + genesis = core.DefaultHoleskyGenesisBlock() case ctx.Bool(SepoliaFlag.Name): genesis = core.DefaultSepoliaGenesisBlock() - case ctx.Bool(RinkebyFlag.Name): - genesis = core.DefaultRinkebyGenesisBlock() case ctx.Bool(GoerliFlag.Name): genesis = core.DefaultGoerliGenesisBlock() case ctx.Bool(DeveloperFlag.Name): @@ -2149,6 +2176,10 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) } + scheme, err := ParseStateScheme(ctx, chainDb) + if err != nil { + Fatalf("%v", err) + } cache := &core.CacheConfig{ TrieCleanLimit: ethconfig.Defaults.TrieCleanCache, TrieCleanNoPrefetch: ctx.Bool(CacheNoPrefetchFlag.Name), @@ -2157,6 +2188,8 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh TrieTimeLimit: ethconfig.Defaults.TrieTimeout, SnapshotLimit: ethconfig.Defaults.SnapshotCache, Preimages: ctx.Bool(CachePreimagesFlag.Name), + StateScheme: scheme, + StateHistory: ctx.Uint64(StateHistoryFlag.Name), } if cache.TrieDirtyDisabled && !cache.Preimages { cache.Preimages = true @@ -2201,3 +2234,62 @@ func MakeConsolePreloads(ctx *cli.Context) []string { } return preloads } + +// ParseStateScheme resolves scheme identifier from CLI flag. If the provided +// state scheme is not compatible with the one of persistent scheme, an error +// will be returned. +// +// - none: use the scheme consistent with persistent state, or fallback +// to hash-based scheme if state is empty. +// - hash: use hash-based scheme or error out if not compatible with +// persistent state scheme. +// - path: use path-based scheme or error out if not compatible with +// persistent state scheme. +func ParseStateScheme(ctx *cli.Context, disk ethdb.Database) (string, error) { + // If state scheme is not specified, use the scheme consistent + // with persistent state, or fallback to hash mode if database + // is empty. + stored := rawdb.ReadStateScheme(disk) + if !ctx.IsSet(StateSchemeFlag.Name) { + if stored == "" { + // use default scheme for empty database, flip it when + // path mode is chosen as default + log.Info("State schema set to default", "scheme", "hash") + return rawdb.HashScheme, nil + } + log.Info("State scheme set to already existing", "scheme", stored) + return stored, nil // reuse scheme of persistent scheme + } + // If state scheme is specified, ensure it's compatible with + // persistent state. + scheme := ctx.String(StateSchemeFlag.Name) + if stored == "" || scheme == stored { + log.Info("State scheme set by user", "scheme", scheme) + return scheme, nil + } + return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, scheme) +} + +// MakeTrieDatabase constructs a trie database based on the configured scheme. +func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database { + config := &trie.Config{ + Preimages: preimage, + } + scheme, err := ParseStateScheme(ctx, disk) + if err != nil { + Fatalf("%v", err) + } + if scheme == rawdb.HashScheme { + // Read-only mode is not implemented in hash mode, + // ignore the parameter silently. TODO(rjl493456442) + // please config it if read mode is implemented. + config.HashDB = hashdb.Defaults + return trie.NewDatabase(disk, config) + } + if readOnly { + config.PathDB = pathdb.ReadOnly + } else { + config.PathDB = pathdb.Defaults + } + return trie.NewDatabase(disk, config) +} diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go index 930b68fb9..6669ff176 100644 --- a/cmd/utils/flags_legacy.go +++ b/cmd/utils/flags_legacy.go @@ -19,6 +19,7 @@ package utils import ( "fmt" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/internal/flags" "github.com/urfave/cli/v2" ) @@ -33,15 +34,49 @@ var ShowDeprecated = &cli.Command{ var DeprecatedFlags = []cli.Flag{ NoUSBFlag, + LegacyWhitelistFlag, + CacheTrieJournalFlag, + CacheTrieRejournalFlag, + LegacyDiscoveryV5Flag, + TxLookupLimitFlag, } var ( - // (Deprecated May 2020, shown in aliased flags section) + // Deprecated May 2020, shown in aliased flags section NoUSBFlag = &cli.BoolFlag{ Name: "nousb", Usage: "Disables monitoring for and managing USB hardware wallets (deprecated)", Category: flags.DeprecatedCategory, } + // Deprecated March 2022 + LegacyWhitelistFlag = &cli.StringFlag{ + Name: "whitelist", + Usage: "Comma separated block number-to-hash mappings to enforce (=) (deprecated in favor of --eth.requiredblocks)", + Category: flags.DeprecatedCategory, + } + // Deprecated July 2023 + CacheTrieJournalFlag = &cli.StringFlag{ + Name: "cache.trie.journal", + Usage: "Disk journal directory for trie cache to survive node restarts", + Category: flags.DeprecatedCategory, + } + CacheTrieRejournalFlag = &cli.DurationFlag{ + Name: "cache.trie.rejournal", + Usage: "Time interval to regenerate the trie cache journal", + Category: flags.DeprecatedCategory, + } + LegacyDiscoveryV5Flag = &cli.BoolFlag{ + Name: "v5disc", + Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism (deprecated, use --discv5 instead)", + Category: flags.DeprecatedCategory, + } + // Deprecated August 2023 + TxLookupLimitFlag = &cli.Uint64Flag{ + Name: "txlookuplimit", + Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain) (deprecated, use history.transactions instead)", + Value: ethconfig.Defaults.TransactionHistory, + Category: flags.DeprecatedCategory, + } ) // showDeprecated displays deprecated flags that will be soon removed from the codebase. diff --git a/common/lru/basiclru_test.go b/common/lru/basiclru_test.go index e2d3559f5..29812bda1 100644 --- a/common/lru/basiclru_test.go +++ b/common/lru/basiclru_test.go @@ -170,6 +170,20 @@ func TestBasicLRUContains(t *testing.T) { } } +// Test that Peek doesn't update recent-ness +func TestBasicLRUPeek(t *testing.T) { + cache := NewBasicLRU[int, int](2) + cache.Add(1, 1) + cache.Add(2, 2) + if v, ok := cache.Peek(1); !ok || v != 1 { + t.Errorf("1 should be set to 1") + } + cache.Add(3, 3) + if cache.Contains(1) { + t.Errorf("should not have updated recent-ness of 1") + } +} + func BenchmarkLRU(b *testing.B) { var ( capacity = 1000 diff --git a/common/math/big.go b/common/math/big.go index 1c2afa749..013c0ba4b 100644 --- a/common/math/big.go +++ b/common/math/big.go @@ -82,7 +82,7 @@ func (i *HexOrDecimal256) MarshalText() ([]byte, error) { // it however accepts either "0x"-prefixed (hex encoded) or non-prefixed (decimal) type Decimal256 big.Int -// NewHexOrDecimal256 creates a new Decimal256 +// NewDecimal256 creates a new Decimal256 func NewDecimal256(x int64) *Decimal256 { b := big.NewInt(x) d := Decimal256(*b) diff --git a/common/types.go b/common/types.go index bdfd96423..bf74e4371 100644 --- a/common/types.go +++ b/common/types.go @@ -65,6 +65,11 @@ func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) } // If b is larger than len(h), b will be cropped from the left. func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) } +// Cmp compares two hashes. +func (h Hash) Cmp(other Hash) int { + return bytes.Compare(h[:], other[:]) +} + // Bytes gets the byte representation of the underlying hash. func (h Hash) Bytes() []byte { return h[:] } @@ -226,6 +231,11 @@ func IsHexAddress(s string) bool { return len(s) == 2*AddressLength && isHex(s) } +// Cmp compares two addresses. +func (a Address) Cmp(other Address) int { + return bytes.Compare(a[:], other[:]) +} + // Bytes gets the string representation of the underlying address. func (a Address) Bytes() []byte { return a[:] } diff --git a/common/types_test.go b/common/types_test.go index ad892671b..cec689ea3 100644 --- a/common/types_test.go +++ b/common/types_test.go @@ -25,6 +25,7 @@ import ( "reflect" "strings" "testing" + "time" ) func TestBytesConversion(t *testing.T) { @@ -583,3 +584,14 @@ func TestAddressEIP55(t *testing.T) { t.Fatal("Unexpected address after unmarshal") } } + +func BenchmarkPrettyDuration(b *testing.B) { + var x = PrettyDuration(time.Duration(int64(1203123912312))) + b.Logf("Pre %s", time.Duration(x).String()) + var a string + b.ResetTimer() + for i := 0; i < b.N; i++ { + a = x.String() + } + b.Logf("Post %s", a) +} diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 75f1b65ef..e856f4e6c 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -23,7 +23,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" @@ -257,7 +258,7 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa return consensus.ErrInvalidNumber } // Verify the header's EIP-1559 attributes. - if err := misc.VerifyEip1559Header(chain.Config(), parent, header); err != nil { + if err := eip1559.VerifyEIP1559Header(chain.Config(), parent, header); err != nil { return err } // Verify existence / non-existence of withdrawalsHash. @@ -268,13 +269,24 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa if !shanghai && header.WithdrawalsHash != nil { return fmt.Errorf("invalid withdrawalsHash: have %x, expected nil", header.WithdrawalsHash) } - // Verify the existence / non-existence of excessDataGas + // Verify the existence / non-existence of cancun-specific header fields cancun := chain.Config().IsCancun(header.Number, header.Time) - if cancun && header.ExcessDataGas == nil { - return errors.New("missing excessDataGas") - } - if !cancun && header.ExcessDataGas != nil { - return fmt.Errorf("invalid excessDataGas: have %d, expected nil", header.ExcessDataGas) + if !cancun { + switch { + case header.ExcessBlobGas != nil: + return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas) + case header.BlobGasUsed != nil: + return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed) + case header.ParentBeaconRoot != nil: + return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot) + } + } else { + if header.ParentBeaconRoot == nil { + return errors.New("header is missing beaconRoot") + } + if err := eip4844.VerifyEIP4844Header(parent, header); err != nil { + return err + } } return nil } diff --git a/consensus/clique/api.go b/consensus/clique/api.go index cb270d321..374b50692 100644 --- a/consensus/clique/api.go +++ b/consensus/clique/api.go @@ -205,7 +205,7 @@ func (sb *blockNumberOrHashOrRLP) UnmarshalJSON(data []byte) error { } // GetSigner returns the signer for a specific clique block. -// Can be called with either a blocknumber, blockhash or an rlp encoded blob. +// Can be called with a block number, a block hash or a rlp encoded blob. // The RLP encoded blob can either be a block or a header. func (api *API) GetSigner(rlpOrBlockNr *blockNumberOrHashOrRLP) (common.Address, error) { if len(rlpOrBlockNr.RLP) == 0 { diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index dd35e9d28..f708050ab 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -33,6 +33,7 @@ import ( lru "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -343,7 +344,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainHeaderReader, header if err := misc.VerifyGaslimit(parent.GasLimit, header.GasLimit); err != nil { return err } - } else if err := misc.VerifyEip1559Header(chain.Config(), parent, header); err != nil { + } else if err := eip1559.VerifyEIP1559Header(chain.Config(), parent, header); err != nil { // Verify the header's EIP-1559 attributes. return err } diff --git a/consensus/clique/clique_test.go b/consensus/clique/clique_test.go index f2c6d740c..7cd5919c5 100644 --- a/consensus/clique/clique_test.go +++ b/consensus/clique/clique_test.go @@ -30,7 +30,7 @@ import ( ) // This test case is a repro of an annoying bug that took us forever to catch. -// In Clique PoA networks (Rinkeby, Görli, etc), consecutive blocks might have +// In Clique PoA networks (Görli, etc), consecutive blocks might have // the same state root (no block subsidy, empty block). If a node crashes, the // chain ends up losing the recent state and needs to regenerate it from blocks // already in the database. The bug was that processing the block *prior* to an diff --git a/consensus/clique/snapshot.go b/consensus/clique/snapshot.go index e5efa5108..a97115121 100644 --- a/consensus/clique/snapshot.go +++ b/consensus/clique/snapshot.go @@ -19,7 +19,6 @@ package clique import ( "bytes" "encoding/json" - "sort" "time" "github.com/ethereum/go-ethereum/common" @@ -29,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "golang.org/x/exp/slices" ) // Vote represents a single vote that an authorized signer made to modify the @@ -62,13 +62,6 @@ type Snapshot struct { Tally map[common.Address]Tally `json:"tally"` // Current vote tally to avoid recalculating } -// signersAscending implements the sort interface to allow sorting a list of addresses -type signersAscending []common.Address - -func (s signersAscending) Len() int { return len(s) } -func (s signersAscending) Less(i, j int) bool { return bytes.Compare(s[i][:], s[j][:]) < 0 } -func (s signersAscending) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // newSnapshot creates a new snapshot with the specified startup parameters. This // method does not initialize the set of recent signers, so only ever use if for // the genesis block. @@ -315,7 +308,7 @@ func (s *Snapshot) signers() []common.Address { for sig := range s.Signers { sigs = append(sigs, sig) } - sort.Sort(signersAscending(sigs)) + slices.SortFunc(sigs, common.Address.Cmp) return sigs } diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go index 66e667276..26cebe008 100644 --- a/consensus/clique/snapshot_test.go +++ b/consensus/clique/snapshot_test.go @@ -21,7 +21,6 @@ import ( "crypto/ecdsa" "fmt" "math/big" - "sort" "testing" "github.com/ethereum/go-ethereum/common" @@ -31,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "golang.org/x/exp/slices" ) // testerAccountPool is a pool to maintain currently active tester accounts, @@ -53,7 +53,7 @@ func (ap *testerAccountPool) checkpoint(header *types.Header, signers []string) for i, signer := range signers { auths[i] = ap.address(signer) } - sort.Sort(signersAscending(auths)) + slices.SortFunc(auths, common.Address.Cmp) for i, auth := range auths { copy(header.Extra[extraVanity+i*common.AddressLength:], auth.Bytes()) } diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index acbbdacc3..8eb9863da 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" @@ -254,7 +255,7 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa if err := misc.VerifyGaslimit(parent.GasLimit, header.GasLimit); err != nil { return err } - } else if err := misc.VerifyEip1559Header(chain.Config(), parent, header); err != nil { + } else if err := eip1559.VerifyEIP1559Header(chain.Config(), parent, header); err != nil { // Verify the header's EIP-1559 attributes. return err } diff --git a/consensus/misc/eip1559.go b/consensus/misc/eip1559/eip1559.go similarity index 92% rename from consensus/misc/eip1559.go rename to consensus/misc/eip1559/eip1559.go index fbaf9eec7..84b82c4c4 100644 --- a/consensus/misc/eip1559.go +++ b/consensus/misc/eip1559/eip1559.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package misc +package eip1559 import ( "errors" @@ -23,20 +23,21 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" ) -// VerifyEip1559Header verifies some header attributes which were changed in EIP-1559, +// VerifyEIP1559Header verifies some header attributes which were changed in EIP-1559, // - gas limit check // - basefee check -func VerifyEip1559Header(config *params.ChainConfig, parent, header *types.Header) error { +func VerifyEIP1559Header(config *params.ChainConfig, parent, header *types.Header) error { // Verify that the gas limit remains within allowed bounds parentGasLimit := parent.GasLimit if !config.IsLondon(parent.Number) { parentGasLimit = parent.GasLimit * config.ElasticityMultiplier() } - if err := VerifyGaslimit(parentGasLimit, header.GasLimit); err != nil { + if err := misc.VerifyGaslimit(parentGasLimit, header.GasLimit); err != nil { return err } // Verify the header is not malformed diff --git a/consensus/misc/eip1559_test.go b/consensus/misc/eip1559/eip1559_test.go similarity index 98% rename from consensus/misc/eip1559_test.go rename to consensus/misc/eip1559/eip1559_test.go index 1a9f96bc4..b5afdf0fe 100644 --- a/consensus/misc/eip1559_test.go +++ b/consensus/misc/eip1559/eip1559_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package misc +package eip1559 import ( "math/big" @@ -95,7 +95,7 @@ func TestBlockGasLimits(t *testing.T) { BaseFee: initial, Number: big.NewInt(tc.pNum + 1), } - err := VerifyEip1559Header(config(), parent, header) + err := VerifyEIP1559Header(config(), parent, header) if tc.ok && err != nil { t.Errorf("test %d: Expected valid header: %s", i, err) } diff --git a/consensus/misc/eip4844.go b/consensus/misc/eip4844.go deleted file mode 100644 index 66ca9bd26..000000000 --- a/consensus/misc/eip4844.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package misc - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/params" -) - -var ( - minDataGasPrice = big.NewInt(params.BlobTxMinDataGasprice) - dataGaspriceUpdateFraction = big.NewInt(params.BlobTxDataGaspriceUpdateFraction) -) - -// CalcBlobFee calculates the blobfee from the header's excess data gas field. -func CalcBlobFee(excessDataGas *big.Int) *big.Int { - // If this block does not yet have EIP-4844 enabled, return the starting fee - if excessDataGas == nil { - return big.NewInt(params.BlobTxMinDataGasprice) - } - return fakeExponential(minDataGasPrice, excessDataGas, dataGaspriceUpdateFraction) -} - -// fakeExponential approximates factor * e ** (numerator / denominator) using -// Taylor expansion. -func fakeExponential(factor, numerator, denominator *big.Int) *big.Int { - var ( - output = new(big.Int) - accum = new(big.Int).Mul(factor, denominator) - ) - for i := 1; accum.Sign() > 0; i++ { - output.Add(output, accum) - - accum.Mul(accum, numerator) - accum.Div(accum, denominator) - accum.Div(accum, big.NewInt(int64(i))) - } - return output.Div(output, denominator) -} diff --git a/consensus/misc/eip4844/eip4844.go b/consensus/misc/eip4844/eip4844.go new file mode 100644 index 000000000..2dad9a0cd --- /dev/null +++ b/consensus/misc/eip4844/eip4844.go @@ -0,0 +1,98 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eip4844 + +import ( + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" +) + +var ( + minBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice) + blobGaspriceUpdateFraction = big.NewInt(params.BlobTxBlobGaspriceUpdateFraction) +) + +// VerifyEIP4844Header verifies the presence of the excessBlobGas field and that +// if the current block contains no transactions, the excessBlobGas is updated +// accordingly. +func VerifyEIP4844Header(parent, header *types.Header) error { + // Verify the header is not malformed + if header.ExcessBlobGas == nil { + return errors.New("header is missing excessBlobGas") + } + if header.BlobGasUsed == nil { + return errors.New("header is missing blobGasUsed") + } + // Verify that the blob gas used remains within reasonable limits. + if *header.BlobGasUsed > params.MaxBlobGasPerBlock { + return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, params.MaxBlobGasPerBlock) + } + if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 { + return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob) + } + // Verify the excessBlobGas is correct based on the parent header + var ( + parentExcessBlobGas uint64 + parentBlobGasUsed uint64 + ) + if parent.ExcessBlobGas != nil { + parentExcessBlobGas = *parent.ExcessBlobGas + parentBlobGasUsed = *parent.BlobGasUsed + } + expectedExcessBlobGas := CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed) + if *header.ExcessBlobGas != expectedExcessBlobGas { + return fmt.Errorf("invalid excessBlobGas: have %d, want %d, parent excessBlobGas %d, parent blobDataUsed %d", + *header.ExcessBlobGas, expectedExcessBlobGas, parentExcessBlobGas, parentBlobGasUsed) + } + return nil +} + +// CalcExcessBlobGas calculates the excess blob gas after applying the set of +// blobs on top of the excess blob gas. +func CalcExcessBlobGas(parentExcessBlobGas uint64, parentBlobGasUsed uint64) uint64 { + excessBlobGas := parentExcessBlobGas + parentBlobGasUsed + if excessBlobGas < params.BlobTxTargetBlobGasPerBlock { + return 0 + } + return excessBlobGas - params.BlobTxTargetBlobGasPerBlock +} + +// CalcBlobFee calculates the blobfee from the header's excess blob gas field. +func CalcBlobFee(excessBlobGas uint64) *big.Int { + return fakeExponential(minBlobGasPrice, new(big.Int).SetUint64(excessBlobGas), blobGaspriceUpdateFraction) +} + +// fakeExponential approximates factor * e ** (numerator / denominator) using +// Taylor expansion. +func fakeExponential(factor, numerator, denominator *big.Int) *big.Int { + var ( + output = new(big.Int) + accum = new(big.Int).Mul(factor, denominator) + ) + for i := 1; accum.Sign() > 0; i++ { + output.Add(output, accum) + + accum.Mul(accum, numerator) + accum.Div(accum, denominator) + accum.Div(accum, big.NewInt(int64(i))) + } + return output.Div(output, denominator) +} diff --git a/consensus/misc/eip4844_test.go b/consensus/misc/eip4844/eip4844_test.go similarity index 55% rename from consensus/misc/eip4844_test.go rename to consensus/misc/eip4844/eip4844_test.go index 5838cab8e..ec417380f 100644 --- a/consensus/misc/eip4844_test.go +++ b/consensus/misc/eip4844/eip4844_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package misc +package eip4844 import ( "fmt" @@ -24,22 +24,51 @@ import ( "github.com/ethereum/go-ethereum/params" ) +func TestCalcExcessBlobGas(t *testing.T) { + var tests = []struct { + excess uint64 + blobs uint64 + want uint64 + }{ + // The excess blob gas should not increase from zero if the used blob + // slots are below - or equal - to the target. + {0, 0, 0}, + {0, 1, 0}, + {0, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, 0}, + + // If the target blob gas is exceeded, the excessBlobGas should increase + // by however much it was overshot + {0, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 1, params.BlobTxBlobGasPerBlob}, + {1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 1, params.BlobTxBlobGasPerBlob + 1}, + {1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 2, 2*params.BlobTxBlobGasPerBlob + 1}, + + // The excess blob gas should decrease by however much the target was + // under-shot, capped at zero. + {params.BlobTxTargetBlobGasPerBlock, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, params.BlobTxTargetBlobGasPerBlock}, + {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, params.BlobTxTargetBlobGasPerBlock - params.BlobTxBlobGasPerBlob}, + {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 2, params.BlobTxTargetBlobGasPerBlock - (2 * params.BlobTxBlobGasPerBlob)}, + {params.BlobTxBlobGasPerBlob - 1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, 0}, + } + for i, tt := range tests { + result := CalcExcessBlobGas(tt.excess, tt.blobs*params.BlobTxBlobGasPerBlob) + if result != tt.want { + t.Errorf("test %d: excess blob gas mismatch: have %v, want %v", i, result, tt.want) + } + } +} + func TestCalcBlobFee(t *testing.T) { tests := []struct { - excessDataGas int64 + excessBlobGas uint64 blobfee int64 }{ {0, 1}, - {1542706, 1}, - {1542707, 2}, - {10 * 1024 * 1024, 111}, - } - have := CalcBlobFee(nil) - if have.Int64() != params.BlobTxMinDataGasprice { - t.Errorf("nil test: blobfee mismatch: have %v, want %v", have, params.BlobTxMinDataGasprice) + {2314057, 1}, + {2314058, 2}, + {10 * 1024 * 1024, 23}, } for i, tt := range tests { - have := CalcBlobFee(big.NewInt(tt.excessDataGas)) + have := CalcBlobFee(tt.excessBlobGas) if have.Int64() != tt.blobfee { t.Errorf("test %d: blobfee mismatch: have %v want %v", i, have, tt.blobfee) } diff --git a/consensus/misc/gaslimit.go b/consensus/misc/gaslimit.go index 25f35300b..dfcabd9a8 100644 --- a/consensus/misc/gaslimit.go +++ b/consensus/misc/gaslimit.go @@ -17,7 +17,6 @@ package misc import ( - "errors" "fmt" "github.com/ethereum/go-ethereum/params" @@ -36,7 +35,7 @@ func VerifyGaslimit(parentGasLimit, headerGasLimit uint64) error { return fmt.Errorf("invalid gas limit: have %d, want %d +-= %d", headerGasLimit, parentGasLimit, limit-1) } if headerGasLimit < params.MinGasLimit { - return errors.New("invalid gas limit below 5000") + return fmt.Errorf("invalid gas limit below %d", params.MinGasLimit) } return nil } diff --git a/console/bridge.go b/console/bridge.go index 21ef0e8e7..c67686d6c 100644 --- a/console/bridge.go +++ b/console/bridge.go @@ -18,6 +18,7 @@ package console import ( "encoding/json" + "errors" "fmt" "io" "reflect" @@ -77,18 +78,18 @@ func (b *bridge) NewAccount(call jsre.Call) (goja.Value, error) { return nil, err } if password != confirm { - return nil, fmt.Errorf("passwords don't match!") + return nil, errors.New("passwords don't match!") } // A single string password was specified, use that case len(call.Arguments) == 1 && call.Argument(0).ToString() != nil: password = call.Argument(0).ToString().String() default: - return nil, fmt.Errorf("expected 0 or 1 string argument") + return nil, errors.New("expected 0 or 1 string argument") } // Password acquired, execute the call and return newAccount, callable := goja.AssertFunction(getJeth(call.VM).Get("newAccount")) if !callable { - return nil, fmt.Errorf("jeth.newAccount is not callable") + return nil, errors.New("jeth.newAccount is not callable") } ret, err := newAccount(goja.Null(), call.VM.ToValue(password)) if err != nil { @@ -102,7 +103,7 @@ func (b *bridge) NewAccount(call jsre.Call) (goja.Value, error) { func (b *bridge) OpenWallet(call jsre.Call) (goja.Value, error) { // Make sure we have a wallet specified to open if call.Argument(0).ToObject(call.VM).ClassName() != "String" { - return nil, fmt.Errorf("first argument must be the wallet URL to open") + return nil, errors.New("first argument must be the wallet URL to open") } wallet := call.Argument(0) @@ -115,7 +116,7 @@ func (b *bridge) OpenWallet(call jsre.Call) (goja.Value, error) { // Open the wallet and return if successful in itself openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet")) if !callable { - return nil, fmt.Errorf("jeth.openWallet is not callable") + return nil, errors.New("jeth.openWallet is not callable") } val, err := openWallet(goja.Null(), wallet, passwd) if err == nil { @@ -198,7 +199,7 @@ func (b *bridge) readPassphraseAndReopenWallet(call jsre.Call) (goja.Value, erro } openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet")) if !callable { - return nil, fmt.Errorf("jeth.openWallet is not callable") + return nil, errors.New("jeth.openWallet is not callable") } return openWallet(goja.Null(), wallet, call.VM.ToValue(input)) } @@ -219,7 +220,7 @@ func (b *bridge) readPinAndReopenWallet(call jsre.Call) (goja.Value, error) { } openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet")) if !callable { - return nil, fmt.Errorf("jeth.openWallet is not callable") + return nil, errors.New("jeth.openWallet is not callable") } return openWallet(goja.Null(), wallet, call.VM.ToValue(input)) } @@ -230,13 +231,13 @@ func (b *bridge) readPinAndReopenWallet(call jsre.Call) (goja.Value, error) { // the RPC call. func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) { if len(call.Arguments) < 1 { - return nil, fmt.Errorf("usage: unlockAccount(account, [ password, duration ])") + return nil, errors.New("usage: unlockAccount(account, [ password, duration ])") } account := call.Argument(0) // Make sure we have an account specified to unlock. if goja.IsUndefined(account) || goja.IsNull(account) || account.ExportType().Kind() != reflect.String { - return nil, fmt.Errorf("first argument must be the account to unlock") + return nil, errors.New("first argument must be the account to unlock") } // If password is not given or is the null value, prompt the user for it. @@ -250,7 +251,7 @@ func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) { passwd = call.VM.ToValue(input) } else { if call.Argument(1).ExportType().Kind() != reflect.String { - return nil, fmt.Errorf("password must be a string") + return nil, errors.New("password must be a string") } passwd = call.Argument(1) } @@ -259,7 +260,7 @@ func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) { duration := goja.Null() if !goja.IsUndefined(call.Argument(2)) && !goja.IsNull(call.Argument(2)) { if !isNumber(call.Argument(2)) { - return nil, fmt.Errorf("unlock duration must be a number") + return nil, errors.New("unlock duration must be a number") } duration = call.Argument(2) } @@ -267,7 +268,7 @@ func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) { // Send the request to the backend and return. unlockAccount, callable := goja.AssertFunction(getJeth(call.VM).Get("unlockAccount")) if !callable { - return nil, fmt.Errorf("jeth.unlockAccount is not callable") + return nil, errors.New("jeth.unlockAccount is not callable") } return unlockAccount(goja.Null(), account, passwd, duration) } @@ -277,7 +278,7 @@ func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) { // jeth.sign) with it to actually execute the RPC call. func (b *bridge) Sign(call jsre.Call) (goja.Value, error) { if nArgs := len(call.Arguments); nArgs < 2 { - return nil, fmt.Errorf("usage: sign(message, account, [ password ])") + return nil, errors.New("usage: sign(message, account, [ password ])") } var ( message = call.Argument(0) @@ -286,10 +287,10 @@ func (b *bridge) Sign(call jsre.Call) (goja.Value, error) { ) if goja.IsUndefined(message) || message.ExportType().Kind() != reflect.String { - return nil, fmt.Errorf("first argument must be the message to sign") + return nil, errors.New("first argument must be the message to sign") } if goja.IsUndefined(account) || account.ExportType().Kind() != reflect.String { - return nil, fmt.Errorf("second argument must be the account to sign with") + return nil, errors.New("second argument must be the account to sign with") } // if the password is not given or null ask the user and ensure password is a string @@ -301,13 +302,13 @@ func (b *bridge) Sign(call jsre.Call) (goja.Value, error) { } passwd = call.VM.ToValue(input) } else if passwd.ExportType().Kind() != reflect.String { - return nil, fmt.Errorf("third argument must be the password to unlock the account") + return nil, errors.New("third argument must be the password to unlock the account") } // Send the request to the backend and return sign, callable := goja.AssertFunction(getJeth(call.VM).Get("sign")) if !callable { - return nil, fmt.Errorf("jeth.sign is not callable") + return nil, errors.New("jeth.sign is not callable") } return sign(goja.Null(), message, account, passwd) } @@ -315,11 +316,11 @@ func (b *bridge) Sign(call jsre.Call) (goja.Value, error) { // Sleep will block the console for the specified number of seconds. func (b *bridge) Sleep(call jsre.Call) (goja.Value, error) { if nArgs := len(call.Arguments); nArgs < 1 { - return nil, fmt.Errorf("usage: sleep()") + return nil, errors.New("usage: sleep()") } sleepObj := call.Argument(0) if goja.IsUndefined(sleepObj) || goja.IsNull(sleepObj) || !isNumber(sleepObj) { - return nil, fmt.Errorf("usage: sleep()") + return nil, errors.New("usage: sleep()") } sleep := sleepObj.ToFloat() time.Sleep(time.Duration(sleep * float64(time.Second))) @@ -336,17 +337,17 @@ func (b *bridge) SleepBlocks(call jsre.Call) (goja.Value, error) { ) nArgs := len(call.Arguments) if nArgs == 0 { - return nil, fmt.Errorf("usage: sleepBlocks([, max sleep in seconds])") + return nil, errors.New("usage: sleepBlocks([, max sleep in seconds])") } if nArgs >= 1 { if goja.IsNull(call.Argument(0)) || goja.IsUndefined(call.Argument(0)) || !isNumber(call.Argument(0)) { - return nil, fmt.Errorf("expected number as first argument") + return nil, errors.New("expected number as first argument") } blocks = call.Argument(0).ToInteger() } if nArgs >= 2 { if goja.IsNull(call.Argument(1)) || goja.IsUndefined(call.Argument(1)) || !isNumber(call.Argument(1)) { - return nil, fmt.Errorf("expected number as second argument") + return nil, errors.New("expected number as second argument") } sleep = call.Argument(1).ToInteger() } @@ -421,7 +422,7 @@ func (b *bridge) Send(call jsre.Call) (goja.Value, error) { JSON := call.VM.Get("JSON").ToObject(call.VM) parse, callable := goja.AssertFunction(JSON.Get("parse")) if !callable { - return nil, fmt.Errorf("JSON.parse is not a function") + return nil, errors.New("JSON.parse is not a function") } resultVal, err := parse(goja.Null(), call.VM.ToValue(string(result))) if err != nil { diff --git a/console/console_test.go b/console/console_test.go index cc61f021c..ee5c36be4 100644 --- a/console/console_test.go +++ b/console/console_test.go @@ -94,7 +94,7 @@ func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester { t.Fatalf("failed to create node: %v", err) } ethConf := ðconfig.Config{ - Genesis: core.DeveloperGenesisBlock(15, 11_500_000, common.Address{}), + Genesis: core.DeveloperGenesisBlock(11_500_000, common.Address{}), Miner: miner.Config{ Etherbase: common.HexToAddress(testAddress), }, @@ -110,10 +110,11 @@ func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester { if err = stack.Start(); err != nil { t.Fatalf("failed to start test stack: %v", err) } - client, err := stack.Attach() - if err != nil { - t.Fatalf("failed to attach to node: %v", err) - } + client := stack.Attach() + t.Cleanup(func() { + client.Close() + }) + prompter := &hookedPrompter{scheduler: make(chan string)} printer := new(bytes.Buffer) diff --git a/core/block_validator.go b/core/block_validator.go index bcb228830..f3d65cea2 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -17,6 +17,7 @@ package core import ( + "errors" "fmt" "github.com/ethereum/go-ethereum/consensus" @@ -67,20 +68,48 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash { return fmt.Errorf("transaction root hash mismatch (header value %x, calculated %x)", header.TxHash, hash) } + // Withdrawals are present after the Shanghai fork. if header.WithdrawalsHash != nil { // Withdrawals list must be present in body after Shanghai. if block.Withdrawals() == nil { - return fmt.Errorf("missing withdrawals in block body") + return errors.New("missing withdrawals in block body") } if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash { return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash) } } else if block.Withdrawals() != nil { - // Withdrawals are not allowed prior to shanghai fork - return fmt.Errorf("withdrawals present in block body") + // Withdrawals are not allowed prior to Shanghai fork + return errors.New("withdrawals present in block body") } + // Blob transactions may be present after the Cancun fork. + var blobs int + for i, tx := range block.Transactions() { + // Count the number of blobs to validate against the header's blobGasUsed + blobs += len(tx.BlobHashes()) + + // If the tx is a blob tx, it must NOT have a sidecar attached to be valid in a block. + if tx.BlobTxSidecar() != nil { + return fmt.Errorf("unexpected blob sidecar in transaction at index %d", i) + } + + // The individual checks for blob validity (version-check + not empty) + // happens in StateTransition. + } + + // Check blob gas usage. + if header.BlobGasUsed != nil { + if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated + return fmt.Errorf("blob gas used mismatch (header %v, calculated %v)", *header.BlobGasUsed, blobs*params.BlobTxBlobGasPerBlob) + } + } else { + if blobs > 0 { + return errors.New("data blobs present in block body") + } + } + + // Ancestor block must be known. if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { return consensus.ErrUnknownAncestor diff --git a/core/block_validator_test.go b/core/block_validator_test.go index d18954516..48bdceff6 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -18,7 +18,6 @@ package core import ( "math/big" - "runtime" "testing" "time" @@ -36,6 +35,11 @@ import ( // Tests that simple header verification works, for both good and bad blocks. func TestHeaderVerification(t *testing.T) { + testHeaderVerification(t, rawdb.HashScheme) + testHeaderVerification(t, rawdb.PathScheme) +} + +func testHeaderVerification(t *testing.T, scheme string) { // Create a simple chain to verify var ( gspec = &Genesis{Config: params.TestChainConfig} @@ -46,7 +50,7 @@ func TestHeaderVerification(t *testing.T) { headers[i] = block.Header() } // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces - chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer chain.Stop() for i := 0; i < len(blocks); i++ { @@ -235,118 +239,6 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) { } } -// Tests that concurrent header verification works, for both good and bad blocks. -func TestHeaderConcurrentVerification2(t *testing.T) { testHeaderConcurrentVerification(t, 2) } -func TestHeaderConcurrentVerification8(t *testing.T) { testHeaderConcurrentVerification(t, 8) } -func TestHeaderConcurrentVerification32(t *testing.T) { testHeaderConcurrentVerification(t, 32) } - -func testHeaderConcurrentVerification(t *testing.T, threads int) { - // Create a simple chain to verify - var ( - gspec = &Genesis{Config: params.TestChainConfig} - _, blocks, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 8, nil) - ) - headers := make([]*types.Header, len(blocks)) - for i, block := range blocks { - headers[i] = block.Header() - } - // Set the number of threads to verify on - old := runtime.GOMAXPROCS(threads) - defer runtime.GOMAXPROCS(old) - - // Run the header checker for the entire block chain at once both for a valid and - // also an invalid chain (enough if one arbitrary block is invalid). - for i, valid := range []bool{true, false} { - var results <-chan error - - if valid { - chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - _, results = chain.engine.VerifyHeaders(chain, headers) - chain.Stop() - } else { - chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFakeFailer(uint64(len(headers)-1)), vm.Config{}, nil, nil) - _, results = chain.engine.VerifyHeaders(chain, headers) - chain.Stop() - } - // Wait for all the verification results - checks := make(map[int]error) - for j := 0; j < len(blocks); j++ { - select { - case result := <-results: - checks[j] = result - - case <-time.After(time.Second): - t.Fatalf("test %d.%d: verification timeout", i, j) - } - } - // Check nonce check validity - for j := 0; j < len(blocks); j++ { - want := valid || (j < len(blocks)-2) // We chose the last-but-one nonce in the chain to fail - if (checks[j] == nil) != want { - t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, checks[j], want) - } - if !want { - // A few blocks after the first error may pass verification due to concurrent - // workers. We don't care about those in this test, just that the correct block - // errors out. - break - } - } - // Make sure no more data is returned - select { - case result := <-results: - t.Fatalf("test %d: unexpected result returned: %v", i, result) - case <-time.After(25 * time.Millisecond): - } - } -} - -// Tests that aborting a header validation indeed prevents further checks from being -// run, as well as checks that no left-over goroutines are leaked. -func TestHeaderConcurrentAbortion2(t *testing.T) { testHeaderConcurrentAbortion(t, 2) } -func TestHeaderConcurrentAbortion8(t *testing.T) { testHeaderConcurrentAbortion(t, 8) } -func TestHeaderConcurrentAbortion32(t *testing.T) { testHeaderConcurrentAbortion(t, 32) } - -func testHeaderConcurrentAbortion(t *testing.T, threads int) { - // Create a simple chain to verify - var ( - gspec = &Genesis{Config: params.TestChainConfig} - _, blocks, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 1024, nil) - ) - headers := make([]*types.Header, len(blocks)) - for i, block := range blocks { - headers[i] = block.Header() - } - // Set the number of threads to verify on - old := runtime.GOMAXPROCS(threads) - defer runtime.GOMAXPROCS(old) - - // Start the verifications and immediately abort - chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFakeDelayer(time.Millisecond), vm.Config{}, nil, nil) - defer chain.Stop() - - abort, results := chain.engine.VerifyHeaders(chain, headers) - close(abort) - - // Deplete the results channel - verified := 0 - for depleted := false; !depleted; { - select { - case result := <-results: - if result != nil { - t.Errorf("header %d: validation failed: %v", verified, result) - } - verified++ - case <-time.After(50 * time.Millisecond): - depleted = true - } - } - // Check that abortion was honored by not processing too many POWs - if verified > 2*threads { - t.Errorf("verification count too large: have %d, want below %d", verified, 2*threads) - } -} - func TestCalcGasLimit(t *testing.T) { for i, tc := range []struct { pGasLimit uint64 diff --git a/core/blockchain.go b/core/blockchain.go index 6fc36dac5..272121e33 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -23,7 +23,6 @@ import ( "io" "math/big" "runtime" - "sort" "strings" "sync" "sync/atomic" @@ -34,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state/snapshot" @@ -48,6 +48,9 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" + "golang.org/x/exp/slices" ) var ( @@ -57,6 +60,8 @@ var ( headFinalizedBlockGauge = metrics.NewRegisteredGauge("chain/head/finalized", nil) headSafeBlockGauge = metrics.NewRegisteredGauge("chain/head/safe", nil) + chainInfoGauge = metrics.NewRegisteredGaugeInfo("chain/info", nil) + accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil) accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil) accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil) @@ -87,6 +92,8 @@ var ( errInsertionInterrupted = errors.New("insertion is interrupted") errChainStopped = errors.New("blockchain is stopped") + errInvalidOldChain = errors.New("invalid old chain") + errInvalidNewChain = errors.New("invalid new chain") ) const ( @@ -125,22 +132,40 @@ const ( ) // CacheConfig contains the configuration values for the trie database -// that's resident in a blockchain. +// and state snapshot these are resident in a blockchain. type CacheConfig struct { TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory - TrieCleanJournal string // Disk journal for saving clean cache entries. - TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node) TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory Preimages bool // Whether to store preimage of trie key to the disk + StateHistory uint64 // Number of blocks from head whose state histories are reserved. + StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top SnapshotNoBuild bool // Whether the background generation is allowed SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it } +// triedbConfig derives the configures for trie database. +func (c *CacheConfig) triedbConfig() *trie.Config { + config := &trie.Config{Preimages: c.Preimages} + if c.StateScheme == rawdb.HashScheme { + config.HashDB = &hashdb.Config{ + CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, + } + } + if c.StateScheme == rawdb.PathScheme { + config.PathDB = &pathdb.Config{ + StateHistory: c.StateHistory, + CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, + DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024, + } + } + return config +} + // defaultCacheConfig are the default caching values if none are specified by the // user (also used during testing). var defaultCacheConfig = &CacheConfig{ @@ -149,6 +174,15 @@ var defaultCacheConfig = &CacheConfig{ TrieTimeLimit: 5 * time.Minute, SnapshotLimit: 256, SnapshotWait: true, + StateScheme: rawdb.HashScheme, +} + +// DefaultCacheConfigWithScheme returns a deep copied default cache config with +// a provided trie node scheme. +func DefaultCacheConfigWithScheme(scheme string) *CacheConfig { + config := *defaultCacheConfig + config.StateScheme = scheme + return &config } // BlockChain represents the canonical chain given a database with a genesis @@ -234,11 +268,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis cacheConfig = defaultCacheConfig } // Open trie database with provided config - triedb := trie.NewDatabaseWithConfig(db, &trie.Config{ - Cache: cacheConfig.TrieCleanLimit, - Journal: cacheConfig.TrieCleanJournal, - Preimages: cacheConfig.Preimages, - }) + triedb := trie.NewDatabase(db, cacheConfig.triedbConfig()) + // Setup the genesis block, commit the provided genesis specification // to database if the genesis block is not present yet, or load the // stored one from database. @@ -293,6 +324,9 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis bc.currentFinalBlock.Store(nil) bc.currentSafeBlock.Store(nil) + // Update chain info data metrics + chainInfoGauge.Update(metrics.GaugeInfoValue{"chain_id": bc.chainConfig.ChainID.String()}) + // If Geth is initialized with an external ancient store, re-initialize the // missing chain indexes and chain flags. This procedure can survive crash // and can be resumed in next restart since chain flags are updated in last step. @@ -345,7 +379,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis needRewind = true low = fullBlock.Number.Uint64() } - // In fast sync, it may happen that ancient data has been written to the + // In snap sync, it may happen that ancient data has been written to the // ancient store, but the LastFastBlock has not been updated, truncate the // extra data here. snapBlock := bc.CurrentSnapBlock() @@ -409,18 +443,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis bc.wg.Add(1) go bc.updateFutureBlocks() - // If periodic cache journal is required, spin it up. - if bc.cacheConfig.TrieCleanRejournal > 0 { - if bc.cacheConfig.TrieCleanRejournal < time.Minute { - log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute) - bc.cacheConfig.TrieCleanRejournal = time.Minute - } - bc.wg.Add(1) - go func() { - defer bc.wg.Done() - bc.triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit) - }() - } // Rewind the chain in case of an incompatible config upgrade. if compat, ok := genesisErr.(*params.ConfigCompatError); ok { log.Warn("Rewinding chain to upgrade configuration", "err", compat) @@ -485,7 +507,7 @@ func (bc *BlockChain) loadLastState() error { } bc.hc.SetCurrentHeader(headHeader) - // Restore the last known head fast block + // Restore the last known head snap block bc.currentSnapBlock.Store(headBlock.Header()) headFastBlockGauge.Update(int64(headBlock.NumberU64())) @@ -520,21 +542,21 @@ func (bc *BlockChain) loadLastState() error { } log.Info("Loaded most recent local block", "number", headBlock.Number(), "hash", headBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0))) if headBlock.Hash() != currentSnapBlock.Hash() { - fastTd := bc.GetTd(currentSnapBlock.Hash(), currentSnapBlock.Number.Uint64()) - log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0))) + snapTd := bc.GetTd(currentSnapBlock.Hash(), currentSnapBlock.Number.Uint64()) + log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "td", snapTd, "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0))) } if currentFinalBlock != nil { finalTd := bc.GetTd(currentFinalBlock.Hash(), currentFinalBlock.Number.Uint64()) log.Info("Loaded most recent local finalized block", "number", currentFinalBlock.Number, "hash", currentFinalBlock.Hash(), "td", finalTd, "age", common.PrettyAge(time.Unix(int64(currentFinalBlock.Time), 0))) } if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil { - log.Info("Loaded last fast-sync pivot marker", "number", *pivot) + log.Info("Loaded last snap-sync pivot marker", "number", *pivot) } return nil } // SetHead rewinds the local chain to a new head. Depending on whether the node -// was fast synced or full synced and in which state, the method will try to +// was snap synced or full synced and in which state, the method will try to // delete minimal data from disk whilst retaining chain consistency. func (bc *BlockChain) SetHead(head uint64) error { if _, err := bc.setHeadBeyondRoot(head, 0, common.Hash{}, false); err != nil { @@ -555,7 +577,7 @@ func (bc *BlockChain) SetHead(head uint64) error { } // SetHeadWithTimestamp rewinds the local chain to a new head that has at max -// the given timestamp. Depending on whether the node was fast synced or full +// the given timestamp. Depending on whether the node was snap synced or full // synced and in which state, the method will try to delete minimal data from // disk whilst retaining chain consistency. func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error { @@ -601,7 +623,7 @@ func (bc *BlockChain) SetSafe(header *types.Header) { // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition // that the rewind must pass the specified state root. This method is meant to be // used when rewinding with snapshots enabled to ensure that we go back further than -// persistent disk layer. Depending on whether the node was fast synced or full, and +// persistent disk layer. Depending on whether the node was snap synced or full, and // in which state, the method will try to delete minimal data from disk whilst // retaining chain consistency. // @@ -624,6 +646,25 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha pivot := rawdb.ReadLastPivotNumber(bc.db) frozen, _ := bc.db.Ancients() + // resetState resets the persistent state to genesis if it's not available. + resetState := func() { + // Short circuit if the genesis state is already present. + if bc.HasState(bc.genesisBlock.Root()) { + return + } + // Reset the state database to empty for committing genesis state. + // Note, it should only happen in path-based scheme and Reset function + // is also only call-able in this mode. + if bc.triedb.Scheme() == rawdb.PathScheme { + if err := bc.triedb.Reset(types.EmptyRootHash); err != nil { + log.Crit("Failed to clean state", "err", err) // Shouldn't happen + } + } + // Write genesis state into database. + if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil { + log.Crit("Failed to commit genesis state", "err", err) + } + } updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (*types.Header, bool) { // Rewind the blockchain, ensuring we don't end up with a stateless head // block. Note, depth equality is permitted to allow using SetHead as a @@ -633,6 +674,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha if newHeadBlock == nil { log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) newHeadBlock = bc.genesisBlock + resetState() } else { // Block exists, keep rewinding until we find one with state, // keeping rewinding until we exceed the optional threshold @@ -644,7 +686,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root { beyondRoot, rootNumber = true, newHeadBlock.NumberU64() } - if !bc.HasState(newHeadBlock.Root()) { + if !bc.HasState(newHeadBlock.Root()) && !bc.stateRecoverable(newHeadBlock.Root()) { log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) if pivot == nil || newHeadBlock.NumberU64() > *pivot { parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) @@ -661,16 +703,12 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha } if beyondRoot || newHeadBlock.NumberU64() == 0 { if newHeadBlock.NumberU64() == 0 { - // Recommit the genesis state into disk in case the rewinding destination - // is genesis block and the relevant state is gone. In the future this - // rewinding destination can be the earliest block stored in the chain - // if the historical chain pruning is enabled. In that case the logic - // needs to be improved here. - if !bc.HasState(bc.genesisBlock.Root()) { - if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil { - log.Crit("Failed to commit genesis state", "err", err) - } - log.Debug("Recommitted genesis state to disk") + resetState() + } else if !bc.HasState(newHeadBlock.Root()) { + // Rewind to a block with recoverable state. If the state is + // missing, run the state recovery here. + if err := bc.triedb.Recover(newHeadBlock.Root()); err != nil { + log.Crit("Failed to rollback state", "err", err) // Shouldn't happen } } log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) @@ -689,7 +727,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha bc.currentBlock.Store(newHeadBlock.Header()) headBlockGauge.Update(int64(newHeadBlock.NumberU64())) } - // Rewind the fast block in a simpleton way to the target head + // Rewind the snap block in a simpleton way to the target head if currentSnapBlock := bc.CurrentSnapBlock(); currentSnapBlock != nil && header.Number.Uint64() < currentSnapBlock.Number.Uint64() { newHeadSnapBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) // If either blocks reached nil, reset to the genesis state @@ -725,7 +763,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha if num+1 <= frozen { // Truncate all relative data(header, total difficulty, body, receipt // and canonical hash) from ancient store. - if err := bc.db.TruncateHead(num); err != nil { + if _, err := bc.db.TruncateHead(num); err != nil { log.Crit("Failed to truncate ancient data", "number", num, "err", err) } // Remove the hash <-> number mapping from the active store. @@ -747,7 +785,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha } } else { // Rewind the chain to the requested head and keep going backwards until a - // block with a state is found or fast sync pivot is passed + // block with a state is found or snap sync pivot is passed if time > 0 { log.Warn("Rewinding blockchain to timestamp", "target", time) bc.hc.SetHeadWithTimestamp(time, updateFn, delFn) @@ -784,7 +822,13 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error { if block == nil { return fmt.Errorf("non existent block [%x..]", hash[:4]) } + // Reset the trie database with the fresh snap synced state. root := block.Root() + if bc.triedb.Scheme() == rawdb.PathScheme { + if err := bc.triedb.Reset(root); err != nil { + return err + } + } if !bc.HasState(root) { return fmt.Errorf("non existent state [%x..]", root[:4]) } @@ -865,7 +909,7 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { return fmt.Errorf("export failed on #%d: not found", nr) } if nr > first && block.ParentHash() != parentHash { - return fmt.Errorf("export failed: chain reorg during export") + return errors.New("export failed: chain reorg during export") } parentHash = block.Hash() if err := block.EncodeRLP(w); err != nil { @@ -881,7 +925,7 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { // writeHeadBlock injects a new head block into the current block chain. This method // assumes that the block is indeed a true head. It will also reset the head -// header and the head fast sync block to this very same block if they are older +// header and the head snap sync block to this very same block if they are older // or if they are on a different side chain. // // Note, this function assumes that the `mu` mutex is held! @@ -949,46 +993,47 @@ func (bc *BlockChain) Stop() { log.Error("Failed to journal state snapshot", "err", err) } } + if bc.triedb.Scheme() == rawdb.PathScheme { + // Ensure that the in-memory trie nodes are journaled to disk properly. + if err := bc.triedb.Journal(bc.CurrentBlock().Root); err != nil { + log.Info("Failed to journal in-memory trie nodes", "err", err) + } + } else { + // Ensure the state of a recent block is also stored to disk before exiting. + // We're writing three different states to catch different restart scenarios: + // - HEAD: So we don't need to reprocess any blocks in the general case + // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle + // - HEAD-127: So we have a hard limit on the number of blocks reexecuted + if !bc.cacheConfig.TrieDirtyDisabled { + triedb := bc.triedb - // Ensure the state of a recent block is also stored to disk before exiting. - // We're writing three different states to catch different restart scenarios: - // - HEAD: So we don't need to reprocess any blocks in the general case - // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle - // - HEAD-127: So we have a hard limit on the number of blocks reexecuted - if !bc.cacheConfig.TrieDirtyDisabled { - triedb := bc.triedb + for _, offset := range []uint64{0, 1, TriesInMemory - 1} { + if number := bc.CurrentBlock().Number.Uint64(); number > offset { + recent := bc.GetBlockByNumber(number - offset) - for _, offset := range []uint64{0, 1, TriesInMemory - 1} { - if number := bc.CurrentBlock().Number.Uint64(); number > offset { - recent := bc.GetBlockByNumber(number - offset) - - log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) - if err := triedb.Commit(recent.Root(), true); err != nil { + log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) + if err := triedb.Commit(recent.Root(), true); err != nil { + log.Error("Failed to commit recent state trie", "err", err) + } + } + } + if snapBase != (common.Hash{}) { + log.Info("Writing snapshot state to disk", "root", snapBase) + if err := triedb.Commit(snapBase, true); err != nil { log.Error("Failed to commit recent state trie", "err", err) } } - } - if snapBase != (common.Hash{}) { - log.Info("Writing snapshot state to disk", "root", snapBase) - if err := triedb.Commit(snapBase, true); err != nil { - log.Error("Failed to commit recent state trie", "err", err) + for !bc.triegc.Empty() { + triedb.Dereference(bc.triegc.PopItem()) + } + if _, nodes, _ := triedb.Size(); nodes != 0 { // all memory is contained within the nodes return for hashdb + log.Error("Dangling trie nodes after full cleanup") } } - for !bc.triegc.Empty() { - triedb.Dereference(bc.triegc.PopItem()) - } - if size, _ := triedb.Size(); size != 0 { - log.Error("Dangling trie nodes after full cleanup") - } } - // Flush the collected preimages to disk - if err := bc.stateCache.TrieDB().Close(); err != nil { - log.Error("Failed to close trie db", "err", err) - } - // Ensure all live cached entries be saved into disk, so that we can skip - // cache warmup when node restarts. - if bc.cacheConfig.TrieCleanJournal != "" { - bc.triedb.SaveCache(bc.cacheConfig.TrieCleanJournal) + // Close the trie database, release all the held resources as the last step. + if err := bc.triedb.Close(); err != nil { + log.Error("Failed to close trie database", "err", err) } log.Info("Blockchain stopped") } @@ -1013,8 +1058,8 @@ func (bc *BlockChain) procFutureBlocks() { } } if len(blocks) > 0 { - sort.Slice(blocks, func(i, j int) bool { - return blocks[i].NumberU64() < blocks[j].NumberU64() + slices.SortFunc(blocks, func(a, b *types.Block) int { + return a.Number().Cmp(b.Number()) }) // Insert one by one as chain insertion needs contiguous ancestry between blocks for i := range blocks { @@ -1045,19 +1090,30 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ ancientReceipts, liveReceipts []types.Receipts ) // Do a sanity check that the provided chain is actually ordered and linked - for i := 0; i < len(blockChain); i++ { + for i, block := range blockChain { if i != 0 { - if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { - log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), - "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) - return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, blockChain[i-1].NumberU64(), - blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) + prev := blockChain[i-1] + if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() { + log.Error("Non contiguous receipt insert", + "number", block.Number(), "hash", block.Hash(), "parent", block.ParentHash(), + "prevnumber", prev.Number(), "prevhash", prev.Hash()) + return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", + i-1, prev.NumberU64(), prev.Hash().Bytes()[:4], + i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4]) } } - if blockChain[i].NumberU64() <= ancientLimit { - ancientBlocks, ancientReceipts = append(ancientBlocks, blockChain[i]), append(ancientReceipts, receiptChain[i]) + if block.NumberU64() <= ancientLimit { + ancientBlocks, ancientReceipts = append(ancientBlocks, block), append(ancientReceipts, receiptChain[i]) } else { - liveBlocks, liveReceipts = append(liveBlocks, blockChain[i]), append(liveReceipts, receiptChain[i]) + liveBlocks, liveReceipts = append(liveBlocks, block), append(liveReceipts, receiptChain[i]) + } + + // Here we also validate that blob transactions in the block do not contain a sidecar. + // While the sidecar does not affect the block hash / tx hash, sending blobs within a block is not allowed. + for txIndex, tx := range block.Transactions() { + if tx.Type() == types.BlobTxType && tx.BlobTxSidecar() != nil { + return 0, fmt.Errorf("block #%d contains unexpected blob sidecar in tx at index %d", block.NumberU64(), txIndex) + } } } @@ -1067,7 +1123,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ size = int64(0) ) - // updateHead updates the head fast sync block if the inserted blocks are better + // updateHead updates the head snap sync block if the inserted blocks are better // and returns an indicator whether the inserted blocks are canonical. updateHead := func(head *types.Block) bool { if !bc.chainmu.TryLock() { @@ -1153,7 +1209,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ size += int64(batch.ValueSize()) if err = batch.Write(); err != nil { snapBlock := bc.CurrentSnapBlock().Number.Uint64() - if err := bc.db.TruncateHead(snapBlock + 1); err != nil { + if _, err := bc.db.TruncateHead(snapBlock + 1); err != nil { log.Error("Can't truncate ancient store after failed insert", "err", err) } return 0, err @@ -1166,12 +1222,12 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ if err := bc.db.Sync(); err != nil { return 0, err } - // Update the current fast block because all block data is now present in DB. + // Update the current snap block because all block data is now present in DB. previousSnapBlock := bc.CurrentSnapBlock().Number.Uint64() if !updateHead(blockChain[len(blockChain)-1]) { // We end up here if the header chain has reorg'ed, and the blocks/receipts // don't match the canonical chain. - if err := bc.db.TruncateHead(previousSnapBlock + 1); err != nil { + if _, err := bc.db.TruncateHead(previousSnapBlock + 1); err != nil { log.Error("Can't truncate ancient store after failed insert", "err", err) } return 0, errSideChainReceipts @@ -1354,10 +1410,15 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. log.Crit("Failed to write block into disk", "err", err) } // Commit all cached state changes into underlying memory database. - root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) + root, err := state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number())) if err != nil { return err } + // If node is running in path mode, skip explicit gc operation + // which is unnecessary in this mode. + if bc.triedb.Scheme() == rawdb.PathScheme { + return nil + } // If we're running an archive node, always flush if bc.cacheConfig.TrieDirtyDisabled { return bc.triedb.Commit(root, false) @@ -1366,15 +1427,15 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. bc.triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive bc.triegc.Push(root, -int64(block.NumberU64())) - current := block.NumberU64() // Flush limits are not considered for the first TriesInMemory blocks. + current := block.NumberU64() if current <= TriesInMemory { return nil } // If we exceeded our memory allowance, flush matured singleton nodes to disk var ( - nodes, imgs = bc.triedb.Size() - limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 + _, nodes, imgs = bc.triedb.Size() // all memory is contained within the nodes return for hashdb + limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 ) if nodes > limit || imgs > 4*1024*1024 { bc.triedb.Cap(limit - ethdb.IdealBatchSize) @@ -1613,11 +1674,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) block, err = it.next() } // The remaining blocks are still known blocks, the only scenario here is: - // During the fast sync, the pivot point is already submitted but rollback + // During the snap sync, the pivot point is already submitted but rollback // happens. Then node resets the head full block to a lower height via `rollback` // and leaves a few known blocks in the database. // - // When node runs a fast sync again, it can re-import a batch of known blocks via + // When node runs a snap sync again, it can re-import a batch of known blocks via // `insertChain` while a part of them have higher total difficulty than current // head full block(new pivot point). for block != nil && bc.skipBlock(err, it) { @@ -1827,8 +1888,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) stats.processed++ stats.usedGas += usedGas - dirty, _ := bc.triedb.Size() - stats.report(chain, it.index, dirty, setHead) + var snapDiffItems, snapBufItems common.StorageSize + if bc.snaps != nil { + snapDiffItems, snapBufItems = bc.snaps.Size() + } + trieDiffNodes, trieBufNodes, _ := bc.triedb.Size() + stats.report(chain, it.index, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes, setHead) if !setHead { // After merge we expect few side chains. Simply count @@ -1970,6 +2035,12 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i ) parent := it.previous() for parent != nil && !bc.HasState(parent.Root) { + if bc.stateRecoverable(parent.Root) { + if err := bc.triedb.Recover(parent.Root); err != nil { + return 0, err + } + break + } hashes = append(hashes, parent.Hash()) numbers = append(numbers, parent.Number.Uint64()) @@ -2026,6 +2097,12 @@ func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error) parent = block ) for parent != nil && !bc.HasState(parent.Root()) { + if bc.stateRecoverable(parent.Root()) { + if err := bc.triedb.Recover(parent.Root()); err != nil { + return common.Hash{}, err + } + break + } hashes = append(hashes, parent.Hash()) numbers = append(numbers, parent.NumberU64()) parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) @@ -2062,17 +2139,22 @@ func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error) // collectLogs collects the logs that were generated or removed during // the processing of a block. These logs are later announced as deleted or reborn. func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log { + var blobGasPrice *big.Int + excessBlobGas := b.ExcessBlobGas() + if excessBlobGas != nil { + blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas) + } receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64()) - receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), b.Transactions()) - + if err := receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), blobGasPrice, b.Transactions()); err != nil { + log.Error("Failed to derive block receipts fields", "hash", b.Hash(), "number", b.NumberU64(), "err", err) + } var logs []*types.Log for _, receipt := range receipts { for _, log := range receipt.Logs { - l := *log if removed { - l.Removed = true + log.Removed = true } - logs = append(logs, &l) + logs = append(logs, log) } } return logs @@ -2114,10 +2196,10 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error { } } if oldBlock == nil { - return errors.New("invalid old chain") + return errInvalidOldChain } if newBlock == nil { - return errors.New("invalid new chain") + return errInvalidNewChain } // Both sides of the reorg are at the same number, reduce both until the common // ancestor is found @@ -2137,11 +2219,11 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error { // Step back with both chains oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) if oldBlock == nil { - return fmt.Errorf("invalid old chain") + return errInvalidOldChain } newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) if newBlock == nil { - return fmt.Errorf("invalid new chain") + return errInvalidNewChain } } @@ -2376,6 +2458,12 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool { func (bc *BlockChain) indexBlocks(tail *uint64, head uint64, done chan struct{}) { defer func() { close(done) }() + // If head is 0, it means the chain is just initialized and no blocks are inserted, + // so don't need to indexing anything. + if head == 0 { + return + } + // The tail flag is not existent, it means the node is just initialized // and all blocks(may from ancient store) are not indexed yet. if tail == nil { @@ -2433,6 +2521,15 @@ func (bc *BlockChain) maintainTxIndex() { return } defer sub.Unsubscribe() + log.Info("Initialized transaction indexer", "limit", bc.TxLookupLimit()) + + // Launch the initial processing if chain is not empty. This step is + // useful in these scenarios that chain has no progress and indexer + // is never triggered. + if head := rawdb.ReadHeadBlock(bc.db); head != nil { + done = make(chan struct{}) + go bc.indexBlocks(rawdb.ReadTxIndexTail(bc.db), head.NumberU64(), done) + } for { select { @@ -2518,3 +2615,8 @@ func (bc *BlockChain) SetBlockValidatorAndProcessorForTesting(v Validator, p Pro func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) { bc.flushInterval.Store(int64(interval)) } + +// GetTrieFlushInterval gets the in-memroy tries flush interval +func (bc *BlockChain) GetTrieFlushInterval() time.Duration { + return time.Duration(bc.flushInterval.Load()) +} diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go index 8f496e182..9bf662b6b 100644 --- a/core/blockchain_insert.go +++ b/core/blockchain_insert.go @@ -39,7 +39,7 @@ const statsReportLimit = 8 * time.Second // report prints statistics if some number of blocks have been processed // or more than a few seconds have passed since the last message. -func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize, setHead bool) { +func (st *insertStats) report(chain []*types.Block, index int, snapDiffItems, snapBufItems, trieDiffNodes, triebufNodes common.StorageSize, setHead bool) { // Fetch the timings for the batch var ( now = mclock.Now() @@ -63,7 +63,16 @@ func (st *insertStats) report(chain []*types.Block, index int, dirty common.Stor if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute { context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) } - context = append(context, []interface{}{"dirty", dirty}...) + if snapDiffItems != 0 || snapBufItems != 0 { // snapshots enabled + context = append(context, []interface{}{"snapdiffs", snapDiffItems}...) + if snapBufItems != 0 { // future snapshot refactor + context = append(context, []interface{}{"snapdirty", snapBufItems}...) + } + } + if trieDiffNodes != 0 { // pathdb + context = append(context, []interface{}{"triediffs", trieDiffNodes}...) + } + context = append(context, []interface{}{"triedirty", triebufNodes}...) if st.queued > 0 { context = append(context, []interface{}{"queued", st.queued}...) diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index fd65cb2db..466a86c14 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -293,10 +293,16 @@ func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { return bc.HasState(block.Root()) } -// TrieNode retrieves a blob of data associated with a trie node -// either from ephemeral in-memory cache, or from persistent storage. -func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { - return bc.stateCache.TrieDB().Node(hash) +// stateRecoverable checks if the specified state is recoverable. +// Note, this function assumes the state is not present, because +// state is not treated as recoverable if it's available, thus +// false will be returned in this case. +func (bc *BlockChain) stateRecoverable(root common.Hash) bool { + if bc.triedb.Scheme() == rawdb.HashScheme { + return false + } + result, _ := bc.triedb.Recoverable(root) + return result } // ContractCodeWithPrefix retrieves a blob of data associated with a contract @@ -306,9 +312,11 @@ func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { // new code scheme. func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) { type codeReader interface { - ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) + ContractCodeWithPrefix(address common.Address, codeHash common.Hash) ([]byte, error) } - return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash) + // TODO(rjl493456442) The associated account address is also required + // in Verkle scheme. Fix it once snap-sync is supported for Verkle. + return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Address{}, hash) } // State returns a new mutable state based on the current HEAD block. diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index 6a4a9c9d2..b2df39d17 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -22,6 +22,7 @@ package core import ( "math/big" + "path" "testing" "time" @@ -1749,16 +1750,24 @@ func testLongReorgedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { } func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + testRepairWithScheme(t, tt, snapshots, scheme) + } +} + +func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) { // It's hard to follow the test case, visualize the input //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) // fmt.Println(tt.dump(true)) // Create a temporary persistent database datadir := t.TempDir() + ancient := path.Join(datadir, "ancient") db, err := rawdb.Open(rawdb.OpenOptions{ Directory: datadir, - AncientsDirectory: datadir, + AncientsDirectory: ancient, + Ephemeral: true, }) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) @@ -1777,6 +1786,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { TrieDirtyLimit: 256, TrieTimeLimit: 5 * time.Minute, SnapshotLimit: 0, // Disable snapshot by default + StateScheme: scheme, } ) defer engine.Close() @@ -1806,7 +1816,9 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { t.Fatalf("Failed to import canonical chain start: %v", err) } if tt.commitBlock > 0 { - chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), false) + if err := chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false); err != nil { + t.Fatalf("Failed to flush trie state: %v", err) + } if snapshots { if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil { t.Fatalf("Failed to flatten snapshots: %v", err) @@ -1828,21 +1840,22 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { rawdb.WriteLastPivotNumber(db, *tt.pivotBlock) } // Pull the plug on the database, simulating a hard crash + chain.triedb.Close() db.Close() chain.stopWithoutSaving() // Start a new blockchain back up and see where the repair leads us db, err = rawdb.Open(rawdb.OpenOptions{ Directory: datadir, - AncientsDirectory: datadir, + AncientsDirectory: ancient, + Ephemeral: true, }) - if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } defer db.Close() - newChain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil) + newChain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -1885,17 +1898,22 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { // In this case the snapshot layer of B3 is not created because of existent // state. func TestIssue23496(t *testing.T) { + testIssue23496(t, rawdb.HashScheme) + testIssue23496(t, rawdb.PathScheme) +} + +func testIssue23496(t *testing.T, scheme string) { // It's hard to follow the test case, visualize the input //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) // Create a temporary persistent database datadir := t.TempDir() + ancient := path.Join(datadir, "ancient") db, err := rawdb.Open(rawdb.OpenOptions{ Directory: datadir, - AncientsDirectory: datadir, + AncientsDirectory: ancient, }) - if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -1908,15 +1926,8 @@ func TestIssue23496(t *testing.T) { BaseFee: big.NewInt(params.InitialBaseFee), } engine = ethash.NewFullFaker() - config = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 256, - SnapshotWait: true, - } ) - chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) } @@ -1929,7 +1940,7 @@ func TestIssue23496(t *testing.T) { if _, err := chain.InsertChain(blocks[:1]); err != nil { t.Fatalf("Failed to import canonical chain start: %v", err) } - chain.stateCache.TrieDB().Commit(blocks[0].Root(), false) + chain.triedb.Commit(blocks[0].Root(), false) // Insert block B2 and commit the snapshot into disk if _, err := chain.InsertChain(blocks[1:2]); err != nil { @@ -1943,7 +1954,7 @@ func TestIssue23496(t *testing.T) { if _, err := chain.InsertChain(blocks[2:3]); err != nil { t.Fatalf("Failed to import canonical chain start: %v", err) } - chain.stateCache.TrieDB().Commit(blocks[2].Root(), false) + chain.triedb.Commit(blocks[2].Root(), false) // Insert the remaining blocks if _, err := chain.InsertChain(blocks[3:]); err != nil { @@ -1951,20 +1962,22 @@ func TestIssue23496(t *testing.T) { } // Pull the plug on the database, simulating a hard crash + chain.triedb.Close() db.Close() chain.stopWithoutSaving() // Start a new blockchain back up and see where the repair leads us db, err = rawdb.Open(rawdb.OpenOptions{ Directory: datadir, - AncientsDirectory: datadir, + AncientsDirectory: ancient, + Ephemeral: true, }) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } defer db.Close() - chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -1976,8 +1989,12 @@ func TestIssue23496(t *testing.T) { if head := chain.CurrentSnapBlock(); head.Number.Uint64() != uint64(4) { t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, uint64(4)) } - if head := chain.CurrentBlock(); head.Number.Uint64() != uint64(1) { - t.Errorf("Head block mismatch: have %d, want %d", head.Number, uint64(1)) + expHead := uint64(1) + if scheme == rawdb.PathScheme { + expHead = uint64(2) + } + if head := chain.CurrentBlock(); head.Number.Uint64() != expHead { + t.Errorf("Head block mismatch: have %d, want %d", head.Number, expHead) } // Reinsert B2-B4 diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index 9dc350db8..fa739f924 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -22,6 +22,7 @@ package core import ( "fmt" "math/big" + "path" "strings" "testing" "time" @@ -29,9 +30,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) // rewindTest is a test case for chain rollback upon user request. @@ -1949,16 +1954,24 @@ func testLongReorgedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { } func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + testSetHeadWithScheme(t, tt, snapshots, scheme) + } +} + +func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) { // It's hard to follow the test case, visualize the input // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) // fmt.Println(tt.dump(false)) // Create a temporary persistent database datadir := t.TempDir() + ancient := path.Join(datadir, "ancient") db, err := rawdb.Open(rawdb.OpenOptions{ Directory: datadir, - AncientsDirectory: datadir, + AncientsDirectory: ancient, + Ephemeral: true, }) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) @@ -1977,6 +1990,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { TrieDirtyLimit: 256, TrieTimeLimit: 5 * time.Minute, SnapshotLimit: 0, // Disable snapshot + StateScheme: scheme, } ) if snapshots { @@ -2007,7 +2021,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { t.Fatalf("Failed to import canonical chain start: %v", err) } if tt.commitBlock > 0 { - chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), false) + chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false) if snapshots { if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil { t.Fatalf("Failed to flatten snapshots: %v", err) @@ -2017,13 +2031,17 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil { t.Fatalf("Failed to import canonical chain tail: %v", err) } - // Manually dereference anything not committed to not have to work with 128+ tries - for _, block := range sideblocks { - chain.stateCache.TrieDB().Dereference(block.Root()) - } - for _, block := range canonblocks { - chain.stateCache.TrieDB().Dereference(block.Root()) + // Reopen the trie database without persisting in-memory dirty nodes. + chain.triedb.Close() + dbconfig := &trie.Config{} + if scheme == rawdb.PathScheme { + dbconfig.PathDB = pathdb.Defaults + } else { + dbconfig.HashDB = hashdb.Defaults } + chain.triedb = trie.NewDatabase(chain.db, dbconfig) + chain.stateCache = state.NewDatabaseWithNodeDB(chain.db, chain.triedb) + // Force run a freeze cycle type freezer interface { Freeze(threshold uint64) error diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index 3bd876a58..dd012c430 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -24,6 +24,7 @@ import ( "fmt" "math/big" "os" + "path" "strings" "testing" "time" @@ -39,6 +40,7 @@ import ( // snapshotTestBasic wraps the common testing fields in the snapshot tests. type snapshotTestBasic struct { + scheme string // Disk scheme used for storing trie nodes chainBlocks int // Number of blocks to generate for the canonical chain snapshotBlock uint64 // Block number of the relevant snapshot disk layer commitBlock uint64 // Block number for which to commit the state to disk @@ -51,6 +53,7 @@ type snapshotTestBasic struct { // share fields, set in runtime datadir string + ancient string db ethdb.Database genDb ethdb.Database engine consensus.Engine @@ -60,10 +63,12 @@ type snapshotTestBasic struct { func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) { // Create a temporary persistent database datadir := t.TempDir() + ancient := path.Join(datadir, "ancient") db, err := rawdb.Open(rawdb.OpenOptions{ Directory: datadir, - AncientsDirectory: datadir, + AncientsDirectory: ancient, + Ephemeral: true, }) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) @@ -75,13 +80,8 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo Config: params.AllEthashProtocolChanges, } engine = ethash.NewFullFaker() - - // Snapshot is enabled, the first snapshot is created from the Genesis. - // The snapshot memory allowance is 256MB, it means no snapshot flush - // will happen during the block insertion. - cacheConfig = defaultCacheConfig ) - chain, err := NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(basic.scheme), gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) } @@ -102,7 +102,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo startPoint = point if basic.commitBlock > 0 && basic.commitBlock == point { - chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), false) + chain.TrieDB().Commit(blocks[point-1].Root(), false) } if basic.snapshotBlock > 0 && basic.snapshotBlock == point { // Flushing the entire snap tree into the disk, the @@ -121,6 +121,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo // Set runtime fields basic.datadir = datadir + basic.ancient = ancient basic.db = db basic.genDb = genDb basic.engine = engine @@ -210,6 +211,7 @@ func (basic *snapshotTestBasic) teardown() { basic.db.Close() basic.genDb.Close() os.RemoveAll(basic.datadir) + os.RemoveAll(basic.ancient) } // snapshotTest is a test case type for normal snapshot recovery. @@ -226,7 +228,7 @@ func (snaptest *snapshotTest) test(t *testing.T) { // Restart the chain normally chain.Stop() - newchain, err := NewBlockChain(snaptest.db, nil, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -235,7 +237,7 @@ func (snaptest *snapshotTest) test(t *testing.T) { snaptest.verify(t, newchain, blocks) } -// crashSnapshotTest is a test case type for innormal snapshot recovery. +// crashSnapshotTest is a test case type for irregular snapshot recovery. // It can be used for testing that restart Geth after the crash. type crashSnapshotTest struct { snapshotTestBasic @@ -251,13 +253,14 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { db := chain.db db.Close() chain.stopWithoutSaving() + chain.triedb.Close() // Start a new blockchain back up and see where the repair leads us newdb, err := rawdb.Open(rawdb.OpenOptions{ Directory: snaptest.datadir, - AncientsDirectory: snaptest.datadir, + AncientsDirectory: snaptest.ancient, + Ephemeral: true, }) - if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } @@ -267,13 +270,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { // the crash, we do restart twice here: one after the crash and one // after the normal stop. It's used to ensure the broken snapshot // can be detected all the time. - newchain, err := NewBlockChain(newdb, nil, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err := NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } newchain.Stop() - newchain, err = NewBlockChain(newdb, nil, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err = NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -300,7 +303,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { // Insert blocks without enabling snapshot if gapping is required. chain.Stop() - gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.gapped, func(i int, b *BlockGen) {}) + gappedBlocks, _ := GenerateChain(snaptest.gspec.Config, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.gapped, func(i int, b *BlockGen) {}) // Insert a few more blocks without enabling snapshot var cacheConfig = &CacheConfig{ @@ -308,6 +311,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { TrieDirtyLimit: 256, TrieTimeLimit: 5 * time.Minute, SnapshotLimit: 0, + StateScheme: snaptest.scheme, } newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { @@ -317,7 +321,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { newchain.Stop() // Restart the chain with enabling the snapshot - newchain, err = NewBlockChain(snaptest.db, nil, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -345,7 +349,7 @@ func (snaptest *setHeadSnapshotTest) test(t *testing.T) { chain.SetHead(snaptest.setHead) chain.Stop() - newchain, err := NewBlockChain(snaptest.db, nil, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -379,22 +383,24 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { TrieDirtyLimit: 256, TrieTimeLimit: 5 * time.Minute, SnapshotLimit: 0, + StateScheme: snaptest.scheme, } newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } - newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.newBlocks, func(i int, b *BlockGen) {}) + newBlocks, _ := GenerateChain(snaptest.gspec.Config, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.newBlocks, func(i int, b *BlockGen) {}) newchain.InsertChain(newBlocks) newchain.Stop() - // Restart the chain, the wiper should starts working + // Restart the chain, the wiper should start working config = &CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, TrieTimeLimit: 5 * time.Minute, SnapshotLimit: 256, SnapshotWait: false, // Don't wait rebuild + StateScheme: snaptest.scheme, } tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { @@ -402,14 +408,15 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { } // Simulate the blockchain crash. + tmp.triedb.Close() tmp.stopWithoutSaving() - newchain, err = NewBlockChain(snaptest.db, nil, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } - defer newchain.Stop() snaptest.verify(t, newchain, blocks) + newchain.Stop() } // Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot @@ -433,20 +440,23 @@ func TestRestartWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : C8 // Expected snapshot disk : G - test := &snapshotTest{ - snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 8, - expSnapshotBottom: 0, // Initial disk layer built from genesis - }, + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + test := &snapshotTest{ + snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 8, + expSnapshotBottom: 0, // Initial disk layer built from genesis + }, + } + test.test(t) + test.teardown() } - test.test(t) - test.teardown() } // Tests a Geth was crashed and restarts with a broken snapshot. In this case the @@ -472,20 +482,23 @@ func TestNoCommitCrashWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : G // Expected snapshot disk : C4 - test := &crashSnapshotTest{ - snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 0, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }, + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + test := &crashSnapshotTest{ + snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 0, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }, + } + test.test(t) + test.teardown() } - test.test(t) - test.teardown() } // Tests a Geth was crashed and restarts with a broken snapshot. In this case the @@ -511,20 +524,23 @@ func TestLowCommitCrashWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : C2 // Expected snapshot disk : C4 - test := &crashSnapshotTest{ - snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 2, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 2, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }, + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + test := &crashSnapshotTest{ + snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 2, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 2, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }, + } + test.test(t) + test.teardown() } - test.test(t) - test.teardown() } // Tests a Geth was crashed and restarts with a broken snapshot. In this case @@ -550,20 +566,27 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : G // Expected snapshot disk : C4 - test := &crashSnapshotTest{ - snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 6, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }, + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + expHead := uint64(0) + if scheme == rawdb.PathScheme { + expHead = uint64(4) + } + test := &crashSnapshotTest{ + snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 6, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: expHead, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }, + } + test.test(t) + test.teardown() } - test.test(t) - test.teardown() } // Tests a Geth was running with snapshot enabled. Then restarts without @@ -587,21 +610,24 @@ func TestGappedNewSnapshot(t *testing.T) { // Expected head fast block: C10 // Expected head block : C10 // Expected snapshot disk : C10 - test := &gappedSnapshotTest{ - snapshotTestBasic: snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 10, - expHeadHeader: 10, - expHeadFastBlock: 10, - expHeadBlock: 10, - expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD - }, - gapped: 2, + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + test := &gappedSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 10, + expHeadHeader: 10, + expHeadFastBlock: 10, + expHeadBlock: 10, + expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD + }, + gapped: 2, + } + test.test(t) + test.teardown() } - test.test(t) - test.teardown() } // Tests the Geth was running with snapshot enabled and resetHead is applied. @@ -625,21 +651,24 @@ func TestSetHeadWithNewSnapshot(t *testing.T) { // Expected head fast block: C4 // Expected head block : C4 // Expected snapshot disk : G - test := &setHeadSnapshotTest{ - snapshotTestBasic: snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 4, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - expSnapshotBottom: 0, // The initial disk layer is built from the genesis - }, - setHead: 4, + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + test := &setHeadSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 4, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + expSnapshotBottom: 0, // The initial disk layer is built from the genesis + }, + setHead: 4, + } + test.test(t) + test.teardown() } - test.test(t) - test.teardown() } // Tests the Geth was running with a complete snapshot and then imports a few @@ -663,19 +692,22 @@ func TestRecoverSnapshotFromWipingCrash(t *testing.T) { // Expected head fast block: C10 // Expected head block : C8 // Expected snapshot disk : C10 - test := &wipeCrashSnapshotTest{ - snapshotTestBasic: snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 0, - expCanonicalBlocks: 10, - expHeadHeader: 10, - expHeadFastBlock: 10, - expHeadBlock: 10, - expSnapshotBottom: 10, - }, - newBlocks: 2, + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + test := &wipeCrashSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 0, + expCanonicalBlocks: 10, + expHeadHeader: 10, + expHeadFastBlock: 10, + expHeadBlock: 10, + expSnapshotBottom: 10, + }, + newBlocks: 2, + } + test.test(t) + test.teardown() } - test.test(t) - test.teardown() } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 5ec685c99..992f8d168 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -52,7 +52,7 @@ var ( // chain. Depending on the full flag, if creates either a full block chain or a // header only chain. The database and genesis specification for block generation // are also returned in case more test blocks are needed later. -func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *Genesis, *BlockChain, error) { +func newCanonical(engine consensus.Engine, n int, full bool, scheme string) (ethdb.Database, *Genesis, *BlockChain, error) { var ( genesis = &Genesis{ BaseFee: big.NewInt(params.InitialBaseFee), @@ -60,7 +60,7 @@ func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *G } ) // Initialize a fresh chain with only a genesis block - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) // Create and inject the requested chain if n == 0 { @@ -83,9 +83,9 @@ func newGwei(n int64) *big.Int { } // Test fork of length N starting from block i -func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) { +func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string) { // Copy old chain up to #i into a new db - genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full) + genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme) if err != nil { t.Fatal("could not make new canonical in testFork", err) } @@ -176,7 +176,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error { blockchain.chainmu.MustLock() rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))) rawdb.WriteBlock(blockchain.db, block) - statedb.Commit(false) + statedb.Commit(block.NumberU64(), false) blockchain.chainmu.Unlock() } return nil @@ -198,9 +198,13 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error } return nil } - func TestLastBlock(t *testing.T) { - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true) + testLastBlock(t, rawdb.HashScheme) + testLastBlock(t, rawdb.PathScheme) +} + +func testLastBlock(t *testing.T, scheme string) { + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -217,9 +221,9 @@ func TestLastBlock(t *testing.T) { // Test inserts the blocks/headers after the fork choice rule is changed. // The chain is reorged to whatever specified. -func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool) { +func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool, scheme string) { // Copy old chain up to #i into a new db - genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full) + genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme) if err != nil { t.Fatal("could not make new canonical in testFork", err) } @@ -266,14 +270,20 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b // Tests that given a starting canonical chain of a given size, it can be extended // with various length chains. -func TestExtendCanonicalHeaders(t *testing.T) { testExtendCanonical(t, false) } -func TestExtendCanonicalBlocks(t *testing.T) { testExtendCanonical(t, true) } +func TestExtendCanonicalHeaders(t *testing.T) { + testExtendCanonical(t, false, rawdb.HashScheme) + testExtendCanonical(t, false, rawdb.PathScheme) +} +func TestExtendCanonicalBlocks(t *testing.T) { + testExtendCanonical(t, true, rawdb.HashScheme) + testExtendCanonical(t, true, rawdb.PathScheme) +} -func testExtendCanonical(t *testing.T, full bool) { +func testExtendCanonical(t *testing.T, full bool, scheme string) { length := 5 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -286,41 +296,53 @@ func testExtendCanonical(t *testing.T, full bool) { } } // Start fork from current height - testFork(t, processor, length, 1, full, better) - testFork(t, processor, length, 2, full, better) - testFork(t, processor, length, 5, full, better) - testFork(t, processor, length, 10, full, better) + testFork(t, processor, length, 1, full, better, scheme) + testFork(t, processor, length, 2, full, better, scheme) + testFork(t, processor, length, 5, full, better, scheme) + testFork(t, processor, length, 10, full, better, scheme) } // Tests that given a starting canonical chain of a given size, it can be extended // with various length chains. -func TestExtendCanonicalHeadersAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, false) } -func TestExtendCanonicalBlocksAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, true) } +func TestExtendCanonicalHeadersAfterMerge(t *testing.T) { + testExtendCanonicalAfterMerge(t, false, rawdb.HashScheme) + testExtendCanonicalAfterMerge(t, false, rawdb.PathScheme) +} +func TestExtendCanonicalBlocksAfterMerge(t *testing.T) { + testExtendCanonicalAfterMerge(t, true, rawdb.HashScheme) + testExtendCanonicalAfterMerge(t, true, rawdb.PathScheme) +} -func testExtendCanonicalAfterMerge(t *testing.T, full bool) { +func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) { length := 5 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } defer processor.Stop() - testInsertAfterMerge(t, processor, length, 1, full) - testInsertAfterMerge(t, processor, length, 10, full) + testInsertAfterMerge(t, processor, length, 1, full, scheme) + testInsertAfterMerge(t, processor, length, 10, full, scheme) } // Tests that given a starting canonical chain of a given size, creating shorter // forks do not take canonical ownership. -func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false) } -func TestShorterForkBlocks(t *testing.T) { testShorterFork(t, true) } +func TestShorterForkHeaders(t *testing.T) { + testShorterFork(t, false, rawdb.HashScheme) + testShorterFork(t, false, rawdb.PathScheme) +} +func TestShorterForkBlocks(t *testing.T) { + testShorterFork(t, true, rawdb.HashScheme) + testShorterFork(t, true, rawdb.PathScheme) +} -func testShorterFork(t *testing.T, full bool) { +func testShorterFork(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -333,93 +355,117 @@ func testShorterFork(t *testing.T, full bool) { } } // Sum of numbers must be less than `length` for this to be a shorter fork - testFork(t, processor, 0, 3, full, worse) - testFork(t, processor, 0, 7, full, worse) - testFork(t, processor, 1, 1, full, worse) - testFork(t, processor, 1, 7, full, worse) - testFork(t, processor, 5, 3, full, worse) - testFork(t, processor, 5, 4, full, worse) + testFork(t, processor, 0, 3, full, worse, scheme) + testFork(t, processor, 0, 7, full, worse, scheme) + testFork(t, processor, 1, 1, full, worse, scheme) + testFork(t, processor, 1, 7, full, worse, scheme) + testFork(t, processor, 5, 3, full, worse, scheme) + testFork(t, processor, 5, 4, full, worse, scheme) } // Tests that given a starting canonical chain of a given size, creating shorter // forks do not take canonical ownership. -func TestShorterForkHeadersAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, false) } -func TestShorterForkBlocksAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, true) } +func TestShorterForkHeadersAfterMerge(t *testing.T) { + testShorterForkAfterMerge(t, false, rawdb.HashScheme) + testShorterForkAfterMerge(t, false, rawdb.PathScheme) +} +func TestShorterForkBlocksAfterMerge(t *testing.T) { + testShorterForkAfterMerge(t, true, rawdb.HashScheme) + testShorterForkAfterMerge(t, true, rawdb.PathScheme) +} -func testShorterForkAfterMerge(t *testing.T, full bool) { +func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } defer processor.Stop() - testInsertAfterMerge(t, processor, 0, 3, full) - testInsertAfterMerge(t, processor, 0, 7, full) - testInsertAfterMerge(t, processor, 1, 1, full) - testInsertAfterMerge(t, processor, 1, 7, full) - testInsertAfterMerge(t, processor, 5, 3, full) - testInsertAfterMerge(t, processor, 5, 4, full) + testInsertAfterMerge(t, processor, 0, 3, full, scheme) + testInsertAfterMerge(t, processor, 0, 7, full, scheme) + testInsertAfterMerge(t, processor, 1, 1, full, scheme) + testInsertAfterMerge(t, processor, 1, 7, full, scheme) + testInsertAfterMerge(t, processor, 5, 3, full, scheme) + testInsertAfterMerge(t, processor, 5, 4, full, scheme) } // Tests that given a starting canonical chain of a given size, creating longer // forks do take canonical ownership. -func TestLongerForkHeaders(t *testing.T) { testLongerFork(t, false) } -func TestLongerForkBlocks(t *testing.T) { testLongerFork(t, true) } +func TestLongerForkHeaders(t *testing.T) { + testLongerFork(t, false, rawdb.HashScheme) + testLongerFork(t, false, rawdb.PathScheme) +} +func TestLongerForkBlocks(t *testing.T) { + testLongerFork(t, true, rawdb.HashScheme) + testLongerFork(t, true, rawdb.PathScheme) +} -func testLongerFork(t *testing.T, full bool) { +func testLongerFork(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } defer processor.Stop() - testInsertAfterMerge(t, processor, 0, 11, full) - testInsertAfterMerge(t, processor, 0, 15, full) - testInsertAfterMerge(t, processor, 1, 10, full) - testInsertAfterMerge(t, processor, 1, 12, full) - testInsertAfterMerge(t, processor, 5, 6, full) - testInsertAfterMerge(t, processor, 5, 8, full) + testInsertAfterMerge(t, processor, 0, 11, full, scheme) + testInsertAfterMerge(t, processor, 0, 15, full, scheme) + testInsertAfterMerge(t, processor, 1, 10, full, scheme) + testInsertAfterMerge(t, processor, 1, 12, full, scheme) + testInsertAfterMerge(t, processor, 5, 6, full, scheme) + testInsertAfterMerge(t, processor, 5, 8, full, scheme) } // Tests that given a starting canonical chain of a given size, creating longer // forks do take canonical ownership. -func TestLongerForkHeadersAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, false) } -func TestLongerForkBlocksAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, true) } +func TestLongerForkHeadersAfterMerge(t *testing.T) { + testLongerForkAfterMerge(t, false, rawdb.HashScheme) + testLongerForkAfterMerge(t, false, rawdb.PathScheme) +} +func TestLongerForkBlocksAfterMerge(t *testing.T) { + testLongerForkAfterMerge(t, true, rawdb.HashScheme) + testLongerForkAfterMerge(t, true, rawdb.PathScheme) +} -func testLongerForkAfterMerge(t *testing.T, full bool) { +func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } defer processor.Stop() - testInsertAfterMerge(t, processor, 0, 11, full) - testInsertAfterMerge(t, processor, 0, 15, full) - testInsertAfterMerge(t, processor, 1, 10, full) - testInsertAfterMerge(t, processor, 1, 12, full) - testInsertAfterMerge(t, processor, 5, 6, full) - testInsertAfterMerge(t, processor, 5, 8, full) + testInsertAfterMerge(t, processor, 0, 11, full, scheme) + testInsertAfterMerge(t, processor, 0, 15, full, scheme) + testInsertAfterMerge(t, processor, 1, 10, full, scheme) + testInsertAfterMerge(t, processor, 1, 12, full, scheme) + testInsertAfterMerge(t, processor, 5, 6, full, scheme) + testInsertAfterMerge(t, processor, 5, 8, full, scheme) } // Tests that given a starting canonical chain of a given size, creating equal // forks do take canonical ownership. -func TestEqualForkHeaders(t *testing.T) { testEqualFork(t, false) } -func TestEqualForkBlocks(t *testing.T) { testEqualFork(t, true) } +func TestEqualForkHeaders(t *testing.T) { + testEqualFork(t, false, rawdb.HashScheme) + testEqualFork(t, false, rawdb.PathScheme) +} +func TestEqualForkBlocks(t *testing.T) { + testEqualFork(t, true, rawdb.HashScheme) + testEqualFork(t, true, rawdb.PathScheme) +} -func testEqualFork(t *testing.T, full bool) { +func testEqualFork(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -432,44 +478,56 @@ func testEqualFork(t *testing.T, full bool) { } } // Sum of numbers must be equal to `length` for this to be an equal fork - testFork(t, processor, 0, 10, full, equal) - testFork(t, processor, 1, 9, full, equal) - testFork(t, processor, 2, 8, full, equal) - testFork(t, processor, 5, 5, full, equal) - testFork(t, processor, 6, 4, full, equal) - testFork(t, processor, 9, 1, full, equal) + testFork(t, processor, 0, 10, full, equal, scheme) + testFork(t, processor, 1, 9, full, equal, scheme) + testFork(t, processor, 2, 8, full, equal, scheme) + testFork(t, processor, 5, 5, full, equal, scheme) + testFork(t, processor, 6, 4, full, equal, scheme) + testFork(t, processor, 9, 1, full, equal, scheme) } // Tests that given a starting canonical chain of a given size, creating equal // forks do take canonical ownership. -func TestEqualForkHeadersAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, false) } -func TestEqualForkBlocksAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, true) } +func TestEqualForkHeadersAfterMerge(t *testing.T) { + testEqualForkAfterMerge(t, false, rawdb.HashScheme) + testEqualForkAfterMerge(t, false, rawdb.PathScheme) +} +func TestEqualForkBlocksAfterMerge(t *testing.T) { + testEqualForkAfterMerge(t, true, rawdb.HashScheme) + testEqualForkAfterMerge(t, true, rawdb.PathScheme) +} -func testEqualForkAfterMerge(t *testing.T, full bool) { +func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) { length := 10 // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full) + _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } defer processor.Stop() - testInsertAfterMerge(t, processor, 0, 10, full) - testInsertAfterMerge(t, processor, 1, 9, full) - testInsertAfterMerge(t, processor, 2, 8, full) - testInsertAfterMerge(t, processor, 5, 5, full) - testInsertAfterMerge(t, processor, 6, 4, full) - testInsertAfterMerge(t, processor, 9, 1, full) + testInsertAfterMerge(t, processor, 0, 10, full, scheme) + testInsertAfterMerge(t, processor, 1, 9, full, scheme) + testInsertAfterMerge(t, processor, 2, 8, full, scheme) + testInsertAfterMerge(t, processor, 5, 5, full, scheme) + testInsertAfterMerge(t, processor, 6, 4, full, scheme) + testInsertAfterMerge(t, processor, 9, 1, full, scheme) } // Tests that chains missing links do not get accepted by the processor. -func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false) } -func TestBrokenBlockChain(t *testing.T) { testBrokenChain(t, true) } +func TestBrokenHeaderChain(t *testing.T) { + testBrokenChain(t, false, rawdb.HashScheme) + testBrokenChain(t, false, rawdb.PathScheme) +} +func TestBrokenBlockChain(t *testing.T) { + testBrokenChain(t, true, rawdb.HashScheme) + testBrokenChain(t, true, rawdb.PathScheme) +} -func testBrokenChain(t *testing.T, full bool) { +func testBrokenChain(t *testing.T, full bool, scheme string) { // Make chain starting from genesis - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme) if err != nil { t.Fatalf("failed to make new canonical chain: %v", err) } @@ -491,19 +549,31 @@ func testBrokenChain(t *testing.T, full bool) { // Tests that reorganising a long difficult chain after a short easy one // overwrites the canonical numbers and links in the database. -func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) } -func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) } +func TestReorgLongHeaders(t *testing.T) { + testReorgLong(t, false, rawdb.HashScheme) + testReorgLong(t, false, rawdb.PathScheme) +} +func TestReorgLongBlocks(t *testing.T) { + testReorgLong(t, true, rawdb.HashScheme) + testReorgLong(t, true, rawdb.PathScheme) +} -func testReorgLong(t *testing.T, full bool) { - testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full) +func testReorgLong(t *testing.T, full bool, scheme string) { + testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme) } // Tests that reorganising a short difficult chain after a long easy one // overwrites the canonical numbers and links in the database. -func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) } -func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true) } +func TestReorgShortHeaders(t *testing.T) { + testReorgShort(t, false, rawdb.HashScheme) + testReorgShort(t, false, rawdb.PathScheme) +} +func TestReorgShortBlocks(t *testing.T) { + testReorgShort(t, true, rawdb.HashScheme) + testReorgShort(t, true, rawdb.PathScheme) +} -func testReorgShort(t *testing.T, full bool) { +func testReorgShort(t *testing.T, full bool, scheme string) { // Create a long easy chain vs. a short heavy one. Due to difficulty adjustment // we need a fairly long chain of blocks with different difficulties for a short // one to become heavier than a long one. The 96 is an empirical value. @@ -515,12 +585,12 @@ func testReorgShort(t *testing.T, full bool) { for i := 0; i < len(diff); i++ { diff[i] = -9 } - testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full) + testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme) } -func testReorg(t *testing.T, first, second []int64, td int64, full bool) { +func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string) { // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -588,12 +658,18 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool) { } // Tests that the insertion functions detect banned hashes. -func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) } -func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) } +func TestBadHeaderHashes(t *testing.T) { + testBadHashes(t, false, rawdb.HashScheme) + testBadHashes(t, false, rawdb.PathScheme) +} +func TestBadBlockHashes(t *testing.T) { + testBadHashes(t, true, rawdb.HashScheme) + testBadHashes(t, true, rawdb.PathScheme) +} -func testBadHashes(t *testing.T, full bool) { +func testBadHashes(t *testing.T, full bool, scheme string) { // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -622,12 +698,18 @@ func testBadHashes(t *testing.T, full bool) { // Tests that bad hashes are detected on boot, and the chain rolled back to a // good state prior to the bad hash. -func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) } -func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) } +func TestReorgBadHeaderHashes(t *testing.T) { + testReorgBadHashes(t, false, rawdb.HashScheme) + testReorgBadHashes(t, false, rawdb.PathScheme) +} +func TestReorgBadBlockHashes(t *testing.T) { + testReorgBadHashes(t, true, rawdb.HashScheme) + testReorgBadHashes(t, true, rawdb.PathScheme) +} -func testReorgBadHashes(t *testing.T, full bool) { +func testReorgBadHashes(t *testing.T, full bool, scheme string) { // Create a pristine chain and database - genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full) + genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -657,7 +739,7 @@ func testReorgBadHashes(t *testing.T, full bool) { blockchain.Stop() // Create a new BlockChain and check that it rolled back the state. - ncm, err := NewBlockChain(blockchain.db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ncm, err := NewBlockChain(blockchain.db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create new chain manager: %v", err) } @@ -677,13 +759,19 @@ func testReorgBadHashes(t *testing.T, full bool) { } // Tests chain insertions in the face of one entity containing an invalid nonce. -func TestHeadersInsertNonceError(t *testing.T) { testInsertNonceError(t, false) } -func TestBlocksInsertNonceError(t *testing.T) { testInsertNonceError(t, true) } +func TestHeadersInsertNonceError(t *testing.T) { + testInsertNonceError(t, false, rawdb.HashScheme) + testInsertNonceError(t, false, rawdb.PathScheme) +} +func TestBlocksInsertNonceError(t *testing.T) { + testInsertNonceError(t, true, rawdb.HashScheme) + testInsertNonceError(t, true, rawdb.PathScheme) +} -func testInsertNonceError(t *testing.T, full bool) { +func testInsertNonceError(t *testing.T, full bool, scheme string) { doTest := func(i int) { // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full) + genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -738,6 +826,11 @@ func testInsertNonceError(t *testing.T, full bool) { // Tests that fast importing a block chain produces the same chain data as the // classical full block processing. func TestFastVsFullChains(t *testing.T) { + testFastVsFullChains(t, rawdb.HashScheme) + testFastVsFullChains(t, rawdb.PathScheme) +} + +func testFastVsFullChains(t *testing.T, scheme string) { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -770,7 +863,7 @@ func TestFastVsFullChains(t *testing.T) { }) // Import the chain as an archive node for the comparison baseline archiveDb := rawdb.NewMemoryDatabase() - archive, _ := NewBlockChain(archiveDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + archive, _ := NewBlockChain(archiveDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer archive.Stop() if n, err := archive.InsertChain(blocks); err != nil { @@ -778,7 +871,7 @@ func TestFastVsFullChains(t *testing.T) { } // Fast import the chain as a non-archive node to test fastDb := rawdb.NewMemoryDatabase() - fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer fast.Stop() headers := make([]*types.Header, len(blocks)) @@ -797,7 +890,8 @@ func TestFastVsFullChains(t *testing.T) { t.Fatalf("failed to create temp freezer db: %v", err) } defer ancientDb.Close() - ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + + ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer ancient.Stop() if n, err := ancient.InsertHeaderChain(headers); err != nil { @@ -865,6 +959,11 @@ func TestFastVsFullChains(t *testing.T) { // Tests that various import methods move the chain head pointers to the correct // positions. func TestLightVsFastVsFullChainHeads(t *testing.T) { + testLightVsFastVsFullChainHeads(t, rawdb.HashScheme) + testLightVsFastVsFullChainHeads(t, rawdb.PathScheme) +} + +func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -910,6 +1009,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { archiveCaching := *defaultCacheConfig archiveCaching.TrieDirtyDisabled = true + archiveCaching.StateScheme = scheme archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) if n, err := archive.InsertChain(blocks); err != nil { @@ -924,7 +1024,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { // Import the chain as a non-archive node and ensure all pointers are updated fastDb := makeDb() defer fastDb.Close() - fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer fast.Stop() headers := make([]*types.Header, len(blocks)) @@ -944,7 +1044,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { // Import the chain as a ancient-first node and ensure all pointers are updated ancientDb := makeDb() defer ancientDb.Close() - ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer ancient.Stop() if n, err := ancient.InsertHeaderChain(headers); err != nil { @@ -963,7 +1063,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { // Import the chain as a light node and ensure all pointers are updated lightDb := makeDb() defer lightDb.Close() - light, _ := NewBlockChain(lightDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + light, _ := NewBlockChain(lightDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) if n, err := light.InsertHeaderChain(headers); err != nil { t.Fatalf("failed to insert header %d: %v", n, err) } @@ -976,6 +1076,11 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) { // Tests that chain reorganisations handle transaction removals and reinsertions. func TestChainTxReorgs(t *testing.T) { + testChainTxReorgs(t, rawdb.HashScheme) + testChainTxReorgs(t, rawdb.PathScheme) +} + +func testChainTxReorgs(t *testing.T, scheme string) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -1031,7 +1136,7 @@ func TestChainTxReorgs(t *testing.T) { }) // Import the chain. This runs all block validation rules. db := rawdb.NewMemoryDatabase() - blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) if i, err := blockchain.InsertChain(chain); err != nil { t.Fatalf("failed to insert original chain[%d]: %v", i, err) } @@ -1090,6 +1195,11 @@ func TestChainTxReorgs(t *testing.T) { } func TestLogReorgs(t *testing.T) { + testLogReorgs(t, rawdb.HashScheme) + testLogReorgs(t, rawdb.PathScheme) +} + +func testLogReorgs(t *testing.T, scheme string) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1100,7 +1210,7 @@ func TestLogReorgs(t *testing.T) { signer = types.LatestSigner(gspec.Config) ) - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer blockchain.Stop() rmLogsCh := make(chan RemovedLogsEvent) @@ -1145,13 +1255,18 @@ var logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd // This test checks that log events and RemovedLogsEvent are sent // when the chain reorganizes. func TestLogRebirth(t *testing.T) { + testLogRebirth(t, rawdb.HashScheme) + testLogRebirth(t, rawdb.PathScheme) +} + +func testLogRebirth(t *testing.T, scheme string) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} signer = types.LatestSigner(gspec.Config) engine = ethash.NewFaker() - blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) + blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) ) defer blockchain.Stop() @@ -1222,12 +1337,17 @@ func TestLogRebirth(t *testing.T) { // This test is a variation of TestLogRebirth. It verifies that log events are emitted // when a side chain containing log events overtakes the canonical chain. func TestSideLogRebirth(t *testing.T) { + testSideLogRebirth(t, rawdb.HashScheme) + testSideLogRebirth(t, rawdb.PathScheme) +} + +func testSideLogRebirth(t *testing.T, scheme string) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} signer = types.LatestSigner(gspec.Config) - blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) ) defer blockchain.Stop() @@ -1312,6 +1432,11 @@ func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan Re } func TestReorgSideEvent(t *testing.T) { + testReorgSideEvent(t, rawdb.HashScheme) + testReorgSideEvent(t, rawdb.PathScheme) +} + +func testReorgSideEvent(t *testing.T, scheme string) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1321,7 +1446,7 @@ func TestReorgSideEvent(t *testing.T) { } signer = types.LatestSigner(gspec.Config) ) - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer blockchain.Stop() _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {}) @@ -1392,7 +1517,12 @@ done: // Tests if the canonical block can be fetched from the database during chain insertion. func TestCanonicalBlockRetrieval(t *testing.T) { - _, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true) + testCanonicalBlockRetrieval(t, rawdb.HashScheme) + testCanonicalBlockRetrieval(t, rawdb.PathScheme) +} + +func testCanonicalBlockRetrieval(t *testing.T, scheme string) { + _, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme) if err != nil { t.Fatalf("failed to create pristine chain: %v", err) } @@ -1436,8 +1566,12 @@ func TestCanonicalBlockRetrieval(t *testing.T) { } pend.Wait() } - func TestEIP155Transition(t *testing.T) { + testEIP155Transition(t, rawdb.HashScheme) + testEIP155Transition(t, rawdb.PathScheme) +} + +func testEIP155Transition(t *testing.T, scheme string) { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -1496,7 +1630,7 @@ func TestEIP155Transition(t *testing.T) { } }) - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer blockchain.Stop() if _, err := blockchain.InsertChain(blocks); err != nil { @@ -1546,8 +1680,12 @@ func TestEIP155Transition(t *testing.T) { t.Errorf("have %v, want %v", have, want) } } - func TestEIP161AccountRemoval(t *testing.T) { + testEIP161AccountRemoval(t, rawdb.HashScheme) + testEIP161AccountRemoval(t, rawdb.PathScheme) +} + +func testEIP161AccountRemoval(t *testing.T, scheme string) { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -1585,7 +1723,7 @@ func TestEIP161AccountRemoval(t *testing.T) { block.AddTx(tx) }) // account must exist pre eip 161 - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer blockchain.Stop() if _, err := blockchain.InsertChain(types.Blocks{blocks[0]}); err != nil { @@ -1618,6 +1756,11 @@ func TestEIP161AccountRemoval(t *testing.T) { // // https://github.com/ethereum/go-ethereum/pull/15941 func TestBlockchainHeaderchainReorgConsistency(t *testing.T) { + testBlockchainHeaderchainReorgConsistency(t, rawdb.HashScheme) + testBlockchainHeaderchainReorgConsistency(t, rawdb.PathScheme) +} + +func testBlockchainHeaderchainReorgConsistency(t *testing.T, scheme string) { // Generate a canonical chain to act as the main dataset engine := ethash.NewFaker() genesis := &Genesis{ @@ -1638,7 +1781,7 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) { } // Import the canonical and fork chain side by side, verifying the current block // and current header consistency - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -1698,10 +1841,10 @@ func TestTrieForkGC(t *testing.T) { } // Dereference all the recent tries and ensure no past trie is left in for i := 0; i < TriesInMemory; i++ { - chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) - chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) + chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) + chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) } - if nodes, _ := chain.TrieDB().Size(); nodes > 0 { + if _, nodes, _ := chain.TrieDB().Size(); nodes > 0 { // all memory is returned in the nodes return for hashdb t.Fatalf("stale tries still alive after garbase collection") } } @@ -1709,6 +1852,11 @@ func TestTrieForkGC(t *testing.T) { // Tests that doing large reorgs works even if the state associated with the // forking point is not available any more. func TestLargeReorgTrieGC(t *testing.T) { + testLargeReorgTrieGC(t, rawdb.HashScheme) + testLargeReorgTrieGC(t, rawdb.PathScheme) +} + +func testLargeReorgTrieGC(t *testing.T, scheme string) { // Generate the original common chain segment and the two competing forks engine := ethash.NewFaker() genesis := &Genesis{ @@ -1720,7 +1868,10 @@ func TestLargeReorgTrieGC(t *testing.T) { competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) // Import the shared chain and the original canonical one - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) + db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + defer db.Close() + + chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -1733,7 +1884,7 @@ func TestLargeReorgTrieGC(t *testing.T) { t.Fatalf("failed to insert original chain: %v", err) } // Ensure that the state associated with the forking point is pruned away - if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil { + if chain.HasState(shared[len(shared)-1].Root()) { t.Fatalf("common-but-old ancestor still cache") } // Import the competitor chain without exceeding the canonical's TD and ensure @@ -1742,7 +1893,7 @@ func TestLargeReorgTrieGC(t *testing.T) { t.Fatalf("failed to insert competitor chain: %v", err) } for i, block := range competitor[:len(competitor)-2] { - if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil { + if chain.HasState(block.Root()) { t.Fatalf("competitor %d: low TD chain became processed", i) } } @@ -1751,14 +1902,30 @@ func TestLargeReorgTrieGC(t *testing.T) { if _, err := chain.InsertChain(competitor[len(competitor)-2:]); err != nil { t.Fatalf("failed to finalize competitor chain: %v", err) } - for i, block := range competitor[:len(competitor)-TriesInMemory] { - if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil { + // In path-based trie database implementation, it will keep 128 diff + 1 disk + // layers, totally 129 latest states available. In hash-based it's 128. + states := TriesInMemory + if scheme == rawdb.PathScheme { + states = states + 1 + } + for i, block := range competitor[:len(competitor)-states] { + if chain.HasState(block.Root()) { + t.Fatalf("competitor %d: unexpected competing chain state", i) + } + } + for i, block := range competitor[len(competitor)-states:] { + if !chain.HasState(block.Root()) { t.Fatalf("competitor %d: competing chain state missing", i) } } } func TestBlockchainRecovery(t *testing.T) { + testBlockchainRecovery(t, rawdb.HashScheme) + testBlockchainRecovery(t, rawdb.PathScheme) +} + +func testBlockchainRecovery(t *testing.T, scheme string) { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -1775,7 +1942,7 @@ func TestBlockchainRecovery(t *testing.T) { t.Fatalf("failed to create temp freezer db: %v", err) } defer ancientDb.Close() - ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) headers := make([]*types.Header, len(blocks)) for i, block := range blocks { @@ -1795,7 +1962,7 @@ func TestBlockchainRecovery(t *testing.T) { rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash()) // Reopen broken blockchain again - ancient, _ = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancient, _ = NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer ancient.Stop() if num := ancient.CurrentBlock().Number.Uint64(); num != 0 { t.Errorf("head block mismatch: have #%v, want #%v", num, 0) @@ -1810,8 +1977,13 @@ func TestBlockchainRecovery(t *testing.T) { // This test checks that InsertReceiptChain will roll back correctly when attempting to insert a side chain. func TestInsertReceiptChainRollback(t *testing.T) { + testInsertReceiptChainRollback(t, rawdb.HashScheme) + testInsertReceiptChainRollback(t, rawdb.PathScheme) +} + +func testInsertReceiptChainRollback(t *testing.T, scheme string) { // Generate forked chain. The returned BlockChain object is used to process the side chain blocks. - tmpChain, sideblocks, canonblocks, gspec, err := getLongAndShortChains() + tmpChain, sideblocks, canonblocks, gspec, err := getLongAndShortChains(scheme) if err != nil { t.Fatal(err) } @@ -1842,7 +2014,7 @@ func TestInsertReceiptChainRollback(t *testing.T) { } defer ancientDb.Close() - ancientChain, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + ancientChain, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer ancientChain.Stop() // Import the canonical header chain. @@ -1887,6 +2059,11 @@ func TestInsertReceiptChainRollback(t *testing.T) { // - https://github.com/ethereum/go-ethereum/issues/18977 // - https://github.com/ethereum/go-ethereum/pull/18988 func TestLowDiffLongChain(t *testing.T) { + testLowDiffLongChain(t, rawdb.HashScheme) + testLowDiffLongChain(t, rawdb.PathScheme) +} + +func testLowDiffLongChain(t *testing.T, scheme string) { // Generate a canonical chain to act as the main dataset engine := ethash.NewFaker() genesis := &Genesis{ @@ -1901,11 +2078,14 @@ func TestLowDiffLongChain(t *testing.T) { }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) + diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + defer diskdb.Close() + + chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } - defer chain.stopWithoutSaving() + defer chain.Stop() if n, err := chain.InsertChain(blocks); err != nil { t.Fatalf("block %d: failed to insert into chain: %v", n, err) @@ -2085,11 +2265,20 @@ func TestPrunedImportSideWithMerging(t *testing.T) { testSideImport(t, 1, -10, 1) } -func TestInsertKnownHeaders(t *testing.T) { testInsertKnownChainData(t, "headers") } -func TestInsertKnownReceiptChain(t *testing.T) { testInsertKnownChainData(t, "receipts") } -func TestInsertKnownBlocks(t *testing.T) { testInsertKnownChainData(t, "blocks") } +func TestInsertKnownHeaders(t *testing.T) { + testInsertKnownChainData(t, "headers", rawdb.HashScheme) + testInsertKnownChainData(t, "headers", rawdb.PathScheme) +} +func TestInsertKnownReceiptChain(t *testing.T) { + testInsertKnownChainData(t, "receipts", rawdb.HashScheme) + testInsertKnownChainData(t, "receipts", rawdb.PathScheme) +} +func TestInsertKnownBlocks(t *testing.T) { + testInsertKnownChainData(t, "blocks", rawdb.HashScheme) + testInsertKnownChainData(t, "blocks", rawdb.PathScheme) +} -func testInsertKnownChainData(t *testing.T, typ string) { +func testInsertKnownChainData(t *testing.T, typ string, scheme string) { engine := ethash.NewFaker() genesis := &Genesis{ Config: params.TestChainConfig, @@ -2112,7 +2301,7 @@ func testInsertKnownChainData(t *testing.T, typ string) { } defer chaindb.Close() - chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(chaindb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -2385,7 +2574,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i } // getLongAndShortChains returns two chains: A is longer, B is heavier. -func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, *Genesis, error) { +func getLongAndShortChains(scheme string) (*BlockChain, []*types.Block, []*types.Block, *Genesis, error) { // Generate a canonical chain to act as the main dataset engine := ethash.NewFaker() genesis := &Genesis{ @@ -2397,7 +2586,7 @@ func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, *Gene genDb, longChain, _ := GenerateChainWithGenesis(genesis, engine, 80, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { return nil, nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err) } @@ -2443,7 +2632,12 @@ func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, *Gene // 3. Then there should be no canon mapping for the block at height X // 4. The forked block should still be retrievable by hash func TestReorgToShorterRemovesCanonMapping(t *testing.T) { - chain, canonblocks, sideblocks, _, err := getLongAndShortChains() + testReorgToShorterRemovesCanonMapping(t, rawdb.HashScheme) + testReorgToShorterRemovesCanonMapping(t, rawdb.PathScheme) +} + +func testReorgToShorterRemovesCanonMapping(t *testing.T, scheme string) { + chain, canonblocks, sideblocks, _, err := getLongAndShortChains(scheme) if err != nil { t.Fatal(err) } @@ -2481,7 +2675,12 @@ func TestReorgToShorterRemovesCanonMapping(t *testing.T) { // as TestReorgToShorterRemovesCanonMapping, but applied on headerchain // imports -- that is, for fast sync func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) { - chain, canonblocks, sideblocks, _, err := getLongAndShortChains() + testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.HashScheme) + testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.PathScheme) +} + +func testReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T, scheme string) { + chain, canonblocks, sideblocks, _, err := getLongAndShortChains(scheme) if err != nil { t.Fatal(err) } @@ -2624,6 +2823,11 @@ func TestTransactionIndices(t *testing.T) { } func TestSkipStaleTxIndicesInSnapSync(t *testing.T) { + testSkipStaleTxIndicesInSnapSync(t, rawdb.HashScheme) + testSkipStaleTxIndicesInSnapSync(t, rawdb.PathScheme) +} + +func testSkipStaleTxIndicesInSnapSync(t *testing.T, scheme string) { // Configure and generate a sample block chain var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -2682,7 +2886,7 @@ func TestSkipStaleTxIndicesInSnapSync(t *testing.T) { // Import all blocks into ancient db, only HEAD-32 indices are kept. l := uint64(32) - chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l) + chain, err := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -2816,6 +3020,11 @@ func BenchmarkBlockChain_1x1000Executions(b *testing.B) { // 2. Downloader starts to sync again // 3. The blocks fetched are all known and canonical blocks func TestSideImportPrunedBlocks(t *testing.T) { + testSideImportPrunedBlocks(t, rawdb.HashScheme) + testSideImportPrunedBlocks(t, rawdb.PathScheme) +} + +func testSideImportPrunedBlocks(t *testing.T, scheme string) { // Generate a canonical chain to act as the main dataset engine := ethash.NewFaker() genesis := &Genesis{ @@ -2825,7 +3034,7 @@ func TestSideImportPrunedBlocks(t *testing.T) { // Generate and import the canonical chain _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, nil) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -2834,15 +3043,20 @@ func TestSideImportPrunedBlocks(t *testing.T) { if n, err := chain.InsertChain(blocks); err != nil { t.Fatalf("block %d: failed to insert into chain: %v", n, err) } - - lastPrunedIndex := len(blocks) - TriesInMemory - 1 + // In path-based trie database implementation, it will keep 128 diff + 1 disk + // layers, totally 129 latest states available. In hash-based it's 128. + states := TriesInMemory + if scheme == rawdb.PathScheme { + states = TriesInMemory + 1 + } + lastPrunedIndex := len(blocks) - states - 1 lastPrunedBlock := blocks[lastPrunedIndex] // Verify pruning of lastPrunedBlock if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) { t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64()) } - firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory] + firstNonPrunedBlock := blocks[len(blocks)-states] // Verify firstNonPrunedBlock is not pruned if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) { t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64()) @@ -2864,6 +3078,11 @@ func TestSideImportPrunedBlocks(t *testing.T) { // each transaction, so this works ok. The rework accumulated writes in memory // first, but the journal wiped the entire state object on create-revert. func TestDeleteCreateRevert(t *testing.T) { + testDeleteCreateRevert(t, rawdb.HashScheme) + testDeleteCreateRevert(t, rawdb.PathScheme) +} + +func testDeleteCreateRevert(t *testing.T, scheme string) { var ( aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb") @@ -2915,7 +3134,7 @@ func TestDeleteCreateRevert(t *testing.T) { b.AddTx(tx) }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -2934,6 +3153,11 @@ func TestDeleteCreateRevert(t *testing.T) { // Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct, // and then the new slots exist func TestDeleteRecreateSlots(t *testing.T) { + testDeleteRecreateSlots(t, rawdb.HashScheme) + testDeleteRecreateSlots(t, rawdb.PathScheme) +} + +func testDeleteRecreateSlots(t *testing.T, scheme string) { var ( engine = ethash.NewFaker() @@ -3023,7 +3247,7 @@ func TestDeleteRecreateSlots(t *testing.T) { b.AddTx(tx) }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{ + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ Tracer: logger.NewJSONLogger(nil, os.Stdout), }, nil, nil) if err != nil { @@ -3057,6 +3281,11 @@ func TestDeleteRecreateSlots(t *testing.T) { // regular value-transfer // Expected outcome is that _all_ slots are cleared from A func TestDeleteRecreateAccount(t *testing.T) { + testDeleteRecreateAccount(t, rawdb.HashScheme) + testDeleteRecreateAccount(t, rawdb.PathScheme) +} + +func testDeleteRecreateAccount(t *testing.T, scheme string) { var ( engine = ethash.NewFaker() @@ -3100,7 +3329,7 @@ func TestDeleteRecreateAccount(t *testing.T) { b.AddTx(tx) }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{ + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ Tracer: logger.NewJSONLogger(nil, os.Stdout), }, nil, nil) if err != nil { @@ -3130,6 +3359,11 @@ func TestDeleteRecreateAccount(t *testing.T) { // Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct, // and then the new slots exist func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) { + testDeleteRecreateSlotsAcrossManyBlocks(t, rawdb.HashScheme) + testDeleteRecreateSlotsAcrossManyBlocks(t, rawdb.PathScheme) +} + +func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) { var ( engine = ethash.NewFaker() @@ -3270,7 +3504,7 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) { current = exp }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{ + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ //Debug: true, //Tracer: vm.NewJSONLogger(nil, os.Stdout), }, nil, nil) @@ -3328,7 +3562,14 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) { // to the destructset in case something is created "onto" an existing item. // We need to either roll back the snapDestructs, or not place it into snapDestructs // in the first place. +// + func TestInitThenFailCreateContract(t *testing.T) { + testInitThenFailCreateContract(t, rawdb.HashScheme) + testInitThenFailCreateContract(t, rawdb.PathScheme) +} + +func testInitThenFailCreateContract(t *testing.T, scheme string) { var ( engine = ethash.NewFaker() @@ -3401,7 +3642,7 @@ func TestInitThenFailCreateContract(t *testing.T) { }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{ + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ //Debug: true, //Tracer: vm.NewJSONLogger(nil, os.Stdout), }, nil, nil) @@ -3439,6 +3680,11 @@ func TestInitThenFailCreateContract(t *testing.T) { // checking that the gas usage of a hot SLOAD and a cold SLOAD are calculated // correctly. func TestEIP2718Transition(t *testing.T) { + testEIP2718Transition(t, rawdb.HashScheme) + testEIP2718Transition(t, rawdb.PathScheme) +} + +func testEIP2718Transition(t *testing.T, scheme string) { var ( aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") engine = ethash.NewFaker() @@ -3486,7 +3732,7 @@ func TestEIP2718Transition(t *testing.T) { }) // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -3516,6 +3762,11 @@ func TestEIP2718Transition(t *testing.T) { // gasFeeCap - gasTipCap < baseFee. // 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap). func TestEIP1559Transition(t *testing.T) { + testEIP1559Transition(t, rawdb.HashScheme) + testEIP1559Transition(t, rawdb.PathScheme) +} + +func testEIP1559Transition(t *testing.T, scheme string) { var ( aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") engine = ethash.NewFaker() @@ -3526,8 +3777,9 @@ func TestEIP1559Transition(t *testing.T) { addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) + config = *params.AllEthashProtocolChanges gspec = &Genesis{ - Config: params.AllEthashProtocolChanges, + Config: &config, Alloc: GenesisAlloc{ addr1: {Balance: funds}, addr2: {Balance: funds}, @@ -3574,7 +3826,7 @@ func TestEIP1559Transition(t *testing.T) { b.AddTx(tx) }) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -3656,6 +3908,11 @@ func TestEIP1559Transition(t *testing.T) { // Tests the scenario the chain is requested to another point with the missing state. // It expects the state is recovered and all relevant chain markers are set correctly. func TestSetCanonical(t *testing.T) { + testSetCanonical(t, rawdb.HashScheme) + testSetCanonical(t, rawdb.PathScheme) +} + +func testSetCanonical(t *testing.T, scheme string) { //log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) var ( @@ -3678,7 +3935,10 @@ func TestSetCanonical(t *testing.T) { } gen.AddTx(tx) }) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) + diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + defer diskdb.Close() + + chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -3735,6 +3995,11 @@ func TestSetCanonical(t *testing.T) { // TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted // correctly in case reorg is called. func TestCanonicalHashMarker(t *testing.T) { + testCanonicalHashMarker(t, rawdb.HashScheme) + testCanonicalHashMarker(t, rawdb.PathScheme) +} + +func testCanonicalHashMarker(t *testing.T, scheme string) { var cases = []struct { forkA int forkB int @@ -3782,7 +4047,7 @@ func TestCanonicalHashMarker(t *testing.T) { _, forkB, _ := GenerateChainWithGenesis(gspec, engine, c.forkB, func(i int, gen *BlockGen) {}) // Initialize test chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -4129,6 +4394,7 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { if err != nil { t.Fatalf("failed to create tester chain: %v", err) } + defer chain.Stop() // Import the blocks for _, block := range blocks { if _, err := chain.InsertChain([]*types.Block{block}); err != nil { @@ -4137,6 +4403,116 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { } } +func TestDeleteThenCreate(t *testing.T) { + var ( + engine = ethash.NewFaker() + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + address = crypto.PubkeyToAddress(key.PublicKey) + factoryAddr = crypto.CreateAddress(address, 0) + funds = big.NewInt(1000000000000000) + ) + /* + contract Factory { + function deploy(bytes memory code) public { + address addr; + assembly { + addr := create2(0, add(code, 0x20), mload(code), 0) + if iszero(extcodesize(addr)) { + revert(0, 0) + } + } + } + } + */ + factoryBIN := common.Hex2Bytes("608060405234801561001057600080fd5b50610241806100206000396000f3fe608060405234801561001057600080fd5b506004361061002a5760003560e01c80627743601461002f575b600080fd5b610049600480360381019061004491906100d8565b61004b565b005b6000808251602084016000f59050803b61006457600080fd5b5050565b600061007b61007684610146565b610121565b905082815260208101848484011115610097576100966101eb565b5b6100a2848285610177565b509392505050565b600082601f8301126100bf576100be6101e6565b5b81356100cf848260208601610068565b91505092915050565b6000602082840312156100ee576100ed6101f5565b5b600082013567ffffffffffffffff81111561010c5761010b6101f0565b5b610118848285016100aa565b91505092915050565b600061012b61013c565b90506101378282610186565b919050565b6000604051905090565b600067ffffffffffffffff821115610161576101606101b7565b5b61016a826101fa565b9050602081019050919050565b82818337600083830152505050565b61018f826101fa565b810181811067ffffffffffffffff821117156101ae576101ad6101b7565b5b80604052505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080fd5b600080fd5b600080fd5b600080fd5b6000601f19601f830116905091905056fea2646970667358221220ea8b35ed310d03b6b3deef166941140b4d9e90ea2c92f6b41eb441daf49a59c364736f6c63430008070033") + + /* + contract C { + uint256 value; + constructor() { + value = 100; + } + function destruct() public payable { + selfdestruct(payable(msg.sender)); + } + receive() payable external {} + } + */ + contractABI := common.Hex2Bytes("6080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c63430008070033") + contractAddr := crypto.CreateAddress2(factoryAddr, [32]byte{}, crypto.Keccak256(contractABI)) + + gspec := &Genesis{ + Config: params.TestChainConfig, + Alloc: GenesisAlloc{ + address: {Balance: funds}, + }, + } + nonce := uint64(0) + signer := types.HomesteadSigner{} + _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2, func(i int, b *BlockGen) { + fee := big.NewInt(1) + if b.header.BaseFee != nil { + fee = b.header.BaseFee + } + b.SetCoinbase(common.Address{1}) + + // Block 1 + if i == 0 { + tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ + Nonce: nonce, + GasPrice: new(big.Int).Set(fee), + Gas: 500000, + Data: factoryBIN, + }) + nonce++ + b.AddTx(tx) + + data := common.Hex2Bytes("00774360000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a76080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c6343000807003300000000000000000000000000000000000000000000000000") + tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ + Nonce: nonce, + GasPrice: new(big.Int).Set(fee), + Gas: 500000, + To: &factoryAddr, + Data: data, + }) + b.AddTx(tx) + nonce++ + } else { + // Block 2 + tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ + Nonce: nonce, + GasPrice: new(big.Int).Set(fee), + Gas: 500000, + To: &contractAddr, + Data: common.Hex2Bytes("2b68b9c6"), // destruct + }) + nonce++ + b.AddTx(tx) + + data := common.Hex2Bytes("00774360000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a76080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c6343000807003300000000000000000000000000000000000000000000000000") + tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ + Nonce: nonce, + GasPrice: new(big.Int).Set(fee), + Gas: 500000, + To: &factoryAddr, // re-creation + Data: data, + }) + b.AddTx(tx) + nonce++ + } + }) + // Import the canonical chain + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + for _, block := range blocks { + if _, err := chain.InsertChain([]*types.Block{block}); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) + } + } +} + // TestTransientStorageReset ensures the transient storage is wiped correctly // between transactions. func TestTransientStorageReset(t *testing.T) { @@ -4215,6 +4591,7 @@ func TestTransientStorageReset(t *testing.T) { if err != nil { t.Fatalf("failed to create tester chain: %v", err) } + defer chain.Stop() // Import the blocks if _, err := chain.InsertChain(blocks); err != nil { t.Fatalf("failed to insert into chain: %v", err) @@ -4309,6 +4686,7 @@ func TestEIP3651(t *testing.T) { if err != nil { t.Fatalf("failed to create tester chain: %v", err) } + defer chain.Stop() if n, err := chain.InsertChain(blocks); err != nil { t.Fatalf("block %d: failed to insert into chain: %v", n, err) } diff --git a/core/chain_indexer.go b/core/chain_indexer.go index 23ab23ef0..f5fce7258 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -19,6 +19,7 @@ package core import ( "context" "encoding/binary" + "errors" "fmt" "sync" "sync/atomic" @@ -403,7 +404,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com if header == nil { return common.Hash{}, fmt.Errorf("block #%d [%x..] not found", number, hash[:4]) } else if header.ParentHash != lastHead { - return common.Hash{}, fmt.Errorf("chain reorged during section processing") + return common.Hash{}, errors.New("chain reorged during section processing") } if err := c.backend.Process(c.ctx, header); err != nil { return common.Hash{}, err diff --git a/core/chain_makers.go b/core/chain_makers.go index 61d0098af..c9c880dd6 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -23,6 +23,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -86,6 +88,11 @@ func (b *BlockGen) SetPoS() { b.header.Difficulty = new(big.Int) } +// SetBlobGas sets the data gas used by the blob in the generated block. +func (b *BlockGen) SetBlobGas(blobGasUsed uint64) { + b.header.BlobGasUsed = &blobGasUsed +} + // addTx adds a transaction to the generated block. If no coinbase has // been set, the block's coinbase is set to the zero address. // @@ -202,7 +209,7 @@ func (b *BlockGen) AddUncle(h *types.Header) { // The gas limit and price should be derived from the parent h.GasLimit = parent.GasLimit if b.config.IsLondon(h.Number) { - h.BaseFee = misc.CalcBaseFee(b.config, parent) + h.BaseFee = eip1559.CalcBaseFee(b.config, parent) if !b.config.IsLondon(parent.Number) { parentGasLimit := parent.GasLimit * b.config.ElasticityMultiplier() h.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit) @@ -282,7 +289,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse } blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) chainreader := &fakeChainReader{config: config} - genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) { + genblock := func(i int, parent *types.Block, triedb *trie.Database, statedb *state.StateDB) (*types.Block, types.Receipts) { b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} b.header = makeHeader(chainreader, parent, statedb, b.engine) @@ -321,23 +328,27 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse } // Write state changes to db - root, err := statedb.Commit(config.IsEIP158(b.header.Number)) + root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number)) if err != nil { panic(fmt.Sprintf("state write error: %v", err)) } - if err := statedb.Database().TrieDB().Commit(root, false); err != nil { + if err = triedb.Commit(root, false); err != nil { panic(fmt.Sprintf("trie write error: %v", err)) } return block, b.receipts } return nil, nil } + // Forcibly use hash-based state scheme for retaining all nodes in disk. + triedb := trie.NewDatabase(db, trie.HashDefaults) + defer triedb.Close() + for i := 0; i < n; i++ { - statedb, err := state.New(parent.Root(), state.NewDatabase(db), nil) + statedb, err := state.New(parent.Root(), state.NewDatabaseWithNodeDB(db, triedb), nil) if err != nil { panic(err) } - block, receipt := genblock(i, parent, statedb) + block, receipt := genblock(i, parent, triedb, statedb) blocks[i] = block receipts[i] = receipt parent = block @@ -350,7 +361,9 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse // then generate chain on top. func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) { db := rawdb.NewMemoryDatabase() - _, err := genesis.Commit(db, trie.NewDatabase(db)) + triedb := trie.NewDatabase(db, trie.HashDefaults) + defer triedb.Close() + _, err := genesis.Commit(db, triedb) if err != nil { panic(err) } @@ -380,12 +393,26 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.S Time: time, } if chain.Config().IsLondon(header.Number) { - header.BaseFee = misc.CalcBaseFee(chain.Config(), parent.Header()) + header.BaseFee = eip1559.CalcBaseFee(chain.Config(), parent.Header()) if !chain.Config().IsLondon(parent.Number()) { parentGasLimit := parent.GasLimit() * chain.Config().ElasticityMultiplier() header.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit) } } + if chain.Config().IsCancun(header.Number, header.Time) { + var ( + parentExcessBlobGas uint64 + parentBlobGasUsed uint64 + ) + if parent.ExcessBlobGas() != nil { + parentExcessBlobGas = *parent.ExcessBlobGas() + parentBlobGasUsed = *parent.BlobGasUsed() + } + excessBlobGas := eip4844.CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed) + header.ExcessBlobGas = &excessBlobGas + header.BlobGasUsed = new(uint64) + header.ParentBeaconRoot = new(common.Hash) + } return header } diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go index 4f7355527..db220cf24 100644 --- a/core/chain_makers_test.go +++ b/core/chain_makers_test.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) func TestGenerateWithdrawalChain(t *testing.T) { @@ -74,8 +75,7 @@ func TestGenerateWithdrawalChain(t *testing.T) { Storage: storage, Code: common.Hex2Bytes("600154600354"), } - - genesis := gspec.MustCommit(gendb) + genesis := gspec.MustCommit(gendb, trie.NewDatabase(gendb, trie.HashDefaults)) chain, _ := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(address), address, big.NewInt(1000), params.TxGas, new(big.Int).Add(gen.BaseFee(), common.Big1), nil), signer, key) @@ -146,6 +146,7 @@ func ExampleGenerateChain() { addr2 = crypto.PubkeyToAddress(key2.PublicKey) addr3 = crypto.PubkeyToAddress(key3.PublicKey) db = rawdb.NewMemoryDatabase() + genDb = rawdb.NewMemoryDatabase() ) // Ensure that key1 has some funds in the genesis block. @@ -153,13 +154,13 @@ func ExampleGenerateChain() { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}}, } - genesis := gspec.MustCommit(db) + genesis := gspec.MustCommit(genDb, trie.NewDatabase(genDb, trie.HashDefaults)) // This call generates a chain of 5 blocks. The function runs for // each block and adds different features to gen based on the // block index. signer := types.HomesteadSigner{} - chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 5, func(i int, gen *BlockGen) { + chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), genDb, 5, func(i int, gen *BlockGen) { switch i { case 0: // In block 1, addr1 sends addr2 some ether. @@ -188,7 +189,7 @@ func ExampleGenerateChain() { }) // Import the chain. This runs all block validation rules. - blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.HashScheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer blockchain.Stop() if i, err := blockchain.InsertChain(chain); err != nil { diff --git a/core/dao_test.go b/core/dao_test.go index f2e8dfe8f..b9a899ef2 100644 --- a/core/dao_test.go +++ b/core/dao_test.go @@ -83,7 +83,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { if _, err := bc.InsertChain(blocks); err != nil { t.Fatalf("failed to import contra-fork chain for expansion: %v", err) } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil { + if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil { t.Fatalf("failed to commit contra-fork head for expansion: %v", err) } bc.Stop() @@ -106,7 +106,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { if _, err := bc.InsertChain(blocks); err != nil { t.Fatalf("failed to import pro-fork chain for expansion: %v", err) } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil { + if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil { t.Fatalf("failed to commit pro-fork head for expansion: %v", err) } bc.Stop() @@ -131,7 +131,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { if _, err := bc.InsertChain(blocks); err != nil { t.Fatalf("failed to import contra-fork chain for expansion: %v", err) } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil { + if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil { t.Fatalf("failed to commit contra-fork head for expansion: %v", err) } blocks, _ = GenerateChain(&proConf, conBc.GetBlockByHash(conBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) @@ -149,7 +149,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { if _, err := bc.InsertChain(blocks); err != nil { t.Fatalf("failed to import pro-fork chain for expansion: %v", err) } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil { + if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil { t.Fatalf("failed to commit pro-fork head for expansion: %v", err) } blocks, _ = GenerateChain(&conConf, proBc.GetBlockByHash(proBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) diff --git a/core/error.go b/core/error.go index 872ba8d36..4214ed207 100644 --- a/core/error.go +++ b/core/error.go @@ -100,4 +100,8 @@ var ( // ErrSenderNoEOA is returned if the sender of a transaction is a contract. ErrSenderNoEOA = errors.New("sender not an eoa") + + // ErrBlobFeeCapTooLow is returned if the transaction fee cap is less than the + // blob gas fee of the block. + ErrBlobFeeCapTooLow = errors.New("max fee per blob gas less than block blob gas fee") ) diff --git a/core/evm.go b/core/evm.go index bd4f2b0e5..104f2c09d 100644 --- a/core/evm.go +++ b/core/evm.go @@ -56,24 +56,26 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common random = &header.MixDigest } return vm.BlockContext{ - CanTransfer: CanTransfer, - Transfer: Transfer, - GetHash: GetHashFn(header, chain), - Coinbase: beneficiary, - BlockNumber: new(big.Int).Set(header.Number), - Time: header.Time, - Difficulty: new(big.Int).Set(header.Difficulty), - BaseFee: baseFee, - GasLimit: header.GasLimit, - Random: random, + CanTransfer: CanTransfer, + Transfer: Transfer, + GetHash: GetHashFn(header, chain), + Coinbase: beneficiary, + BlockNumber: new(big.Int).Set(header.Number), + Time: header.Time, + Difficulty: new(big.Int).Set(header.Difficulty), + BaseFee: baseFee, + GasLimit: header.GasLimit, + Random: random, + ExcessBlobGas: header.ExcessBlobGas, } } // NewEVMTxContext creates a new transaction context for a single transaction. func NewEVMTxContext(msg *Message) vm.TxContext { return vm.TxContext{ - Origin: msg.From, - GasPrice: new(big.Int).Set(msg.GasPrice), + Origin: msg.From, + GasPrice: new(big.Int).Set(msg.GasPrice), + BlobHashes: msg.BlobHashes, } } diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go index f536019da..76825d3be 100644 --- a/core/forkid/forkid.go +++ b/core/forkid/forkid.go @@ -24,13 +24,12 @@ import ( "math" "math/big" "reflect" - "sort" "strings" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "golang.org/x/exp/slices" ) var ( @@ -73,12 +72,12 @@ type ID struct { type Filter func(id ID) error // NewID calculates the Ethereum fork ID from the chain config, genesis hash, head and time. -func NewID(config *params.ChainConfig, genesis common.Hash, head, time uint64) ID { +func NewID(config *params.ChainConfig, genesis *types.Block, head, time uint64) ID { // Calculate the starting checksum from the genesis hash - hash := crc32.ChecksumIEEE(genesis[:]) + hash := crc32.ChecksumIEEE(genesis.Hash().Bytes()) // Calculate the current fork checksum and the next fork block - forksByBlock, forksByTime := gatherForks(config) + forksByBlock, forksByTime := gatherForks(config, genesis.Time()) for _, fork := range forksByBlock { if fork <= head { // Fork already passed, checksum the previous hash and the fork number @@ -104,7 +103,7 @@ func NewIDWithChain(chain Blockchain) ID { return NewID( chain.Config(), - chain.Genesis().Hash(), + chain.Genesis(), head.Number.Uint64(), head.Time, ) @@ -115,7 +114,7 @@ func NewIDWithChain(chain Blockchain) ID { func NewFilter(chain Blockchain) Filter { return newFilter( chain.Config(), - chain.Genesis().Hash(), + chain.Genesis(), func() (uint64, uint64) { head := chain.CurrentHeader() return head.Number.Uint64(), head.Time @@ -124,7 +123,7 @@ func NewFilter(chain Blockchain) Filter { } // NewStaticFilter creates a filter at block zero. -func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter { +func NewStaticFilter(config *params.ChainConfig, genesis *types.Block) Filter { head := func() (uint64, uint64) { return 0, 0 } return newFilter(config, genesis, head) } @@ -132,14 +131,14 @@ func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter { // newFilter is the internal version of NewFilter, taking closures as its arguments // instead of a chain. The reason is to allow testing it without having to simulate // an entire blockchain. -func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() (uint64, uint64)) Filter { +func newFilter(config *params.ChainConfig, genesis *types.Block, headfn func() (uint64, uint64)) Filter { // Calculate the all the valid fork hash and fork next combos var ( - forksByBlock, forksByTime = gatherForks(config) + forksByBlock, forksByTime = gatherForks(config, genesis.Time()) forks = append(append([]uint64{}, forksByBlock...), forksByTime...) sums = make([][4]byte, len(forks)+1) // 0th is the genesis ) - hash := crc32.ChecksumIEEE(genesis[:]) + hash := crc32.ChecksumIEEE(genesis.Hash().Bytes()) sums[0] = checksumToBytes(hash) for i, fork := range forks { hash = checksumUpdate(hash, fork) @@ -240,7 +239,7 @@ func checksumToBytes(hash uint32) [4]byte { // gatherForks gathers all the known forks and creates two sorted lists out of // them, one for the block number based forks and the second for the timestamps. -func gatherForks(config *params.ChainConfig) ([]uint64, []uint64) { +func gatherForks(config *params.ChainConfig, genesis uint64) ([]uint64, []uint64) { // Gather all the fork block numbers via reflection kind := reflect.TypeOf(params.ChainConfig{}) conf := reflect.ValueOf(config).Elem() @@ -270,8 +269,8 @@ func gatherForks(config *params.ChainConfig) ([]uint64, []uint64) { } } } - sort.Slice(forksByBlock, func(i, j int) bool { return forksByBlock[i] < forksByBlock[j] }) - sort.Slice(forksByTime, func(i, j int) bool { return forksByTime[i] < forksByTime[j] }) + slices.Sort(forksByBlock) + slices.Sort(forksByTime) // Deduplicate fork identifiers applying multiple forks for i := 1; i < len(forksByBlock); i++ { @@ -290,7 +289,8 @@ func gatherForks(config *params.ChainConfig) ([]uint64, []uint64) { if len(forksByBlock) > 0 && forksByBlock[0] == 0 { forksByBlock = forksByBlock[1:] } - if len(forksByTime) > 0 && forksByTime[0] == 0 { + // Skip any forks before genesis. + for len(forksByTime) > 0 && forksByTime[0] <= genesis { forksByTime = forksByTime[1:] } return forksByBlock, forksByTime diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index ab59a454d..3d49b2ece 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -18,10 +18,14 @@ package forkid import ( "bytes" + "hash/crc32" "math" + "math/big" "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" ) @@ -36,13 +40,13 @@ func TestCreation(t *testing.T) { } tests := []struct { config *params.ChainConfig - genesis common.Hash + genesis *types.Block cases []testcase }{ // Mainnet test cases { params.MainnetChainConfig, - params.MainnetGenesisHash, + core.DefaultGenesisBlock().ToBlock(), []testcase{ {0, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced {1149999, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block @@ -74,34 +78,10 @@ func TestCreation(t *testing.T) { {30000000, 2000000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // Future Shanghai block }, }, - // Rinkeby test cases - { - params.RinkebyChainConfig, - params.RinkebyGenesisHash, - []testcase{ - {0, 0, ID{Hash: checksumToBytes(0x3b8e0691), Next: 1}}, // Unsynced, last Frontier block - {1, 0, ID{Hash: checksumToBytes(0x60949295), Next: 2}}, // First and last Homestead block - {2, 0, ID{Hash: checksumToBytes(0x8bde40dd), Next: 3}}, // First and last Tangerine block - {3, 0, ID{Hash: checksumToBytes(0xcb3a64bb), Next: 1035301}}, // First Spurious block - {1035300, 0, ID{Hash: checksumToBytes(0xcb3a64bb), Next: 1035301}}, // Last Spurious block - {1035301, 0, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // First Byzantium block - {3660662, 0, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // Last Byzantium block - {3660663, 0, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // First Constantinople block - {4321233, 0, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // Last Constantinople block - {4321234, 0, ID{Hash: checksumToBytes(0xafec6b27), Next: 5435345}}, // First Petersburg block - {5435344, 0, ID{Hash: checksumToBytes(0xafec6b27), Next: 5435345}}, // Last Petersburg block - {5435345, 0, ID{Hash: checksumToBytes(0xcbdb8838), Next: 8290928}}, // First Istanbul block - {8290927, 0, ID{Hash: checksumToBytes(0xcbdb8838), Next: 8290928}}, // Last Istanbul block - {8290928, 0, ID{Hash: checksumToBytes(0x6910c8bd), Next: 8897988}}, // First Berlin block - {8897987, 0, ID{Hash: checksumToBytes(0x6910c8bd), Next: 8897988}}, // Last Berlin block - {8897988, 0, ID{Hash: checksumToBytes(0x8E29F2F3), Next: 0}}, // First London block - {10000000, 0, ID{Hash: checksumToBytes(0x8E29F2F3), Next: 0}}, // Future London block - }, - }, // Goerli test cases { params.GoerliChainConfig, - params.GoerliGenesisHash, + core.DefaultGoerliGenesisBlock().ToBlock(), []testcase{ {0, 0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block {1561650, 0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Last Petersburg block @@ -118,7 +98,7 @@ func TestCreation(t *testing.T) { // Sepolia test cases { params.SepoliaChainConfig, - params.SepoliaGenesisHash, + core.DefaultSepoliaGenesisBlock().ToBlock(), []testcase{ {0, 0, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin and first London block {1735370, 0, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Last London block @@ -377,7 +357,7 @@ func TestValidation(t *testing.T) { //{params.MainnetChainConfig, 20999999, 1677999999, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, ErrLocalIncompatibleOrStale}, } for i, tt := range tests { - filter := newFilter(tt.config, params.MainnetGenesisHash, func() (uint64, uint64) { return tt.head, tt.time }) + filter := newFilter(tt.config, core.DefaultGenesisBlock().ToBlock(), func() (uint64, uint64) { return tt.head, tt.time }) if err := filter(tt.id); err != tt.err { t.Errorf("test %d: validation error mismatch: have %v, want %v", i, err, tt.err) } @@ -406,3 +386,55 @@ func TestEncoding(t *testing.T) { } } } + +// Tests that time-based forks which are active at genesis are not included in +// forkid hash. +func TestTimeBasedForkInGenesis(t *testing.T) { + var ( + time = uint64(1690475657) + genesis = types.NewBlockWithHeader(&types.Header{Time: time}) + forkidHash = checksumToBytes(crc32.ChecksumIEEE(genesis.Hash().Bytes())) + config = func(shanghai, cancun uint64) *params.ChainConfig { + return ¶ms.ChainConfig{ + ChainID: big.NewInt(1337), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: nil, + DAOForkSupport: true, + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + TerminalTotalDifficultyPassed: true, + MergeNetsplitBlock: big.NewInt(0), + ShanghaiTime: &shanghai, + CancunTime: &cancun, + Ethash: new(params.EthashConfig), + } + } + ) + tests := []struct { + config *params.ChainConfig + want ID + }{ + // Shanghai active before genesis, skip + {config(time-1, time+1), ID{Hash: forkidHash, Next: time + 1}}, + + // Shanghai active at genesis, skip + {config(time, time+1), ID{Hash: forkidHash, Next: time + 1}}, + + // Shanghai not active, skip + {config(time+1, time+2), ID{Hash: forkidHash, Next: time + 1}}, + } + for _, tt := range tests { + if have := NewID(tt.config, genesis, 0, time); have != tt.want { + t.Fatalf("incorrect forkid hash: have %x, want %x", have, tt.want) + } + } +} diff --git a/core/gen_genesis.go b/core/gen_genesis.go index 4e0844e88..38614252a 100644 --- a/core/gen_genesis.go +++ b/core/gen_genesis.go @@ -18,19 +18,21 @@ var _ = (*genesisSpecMarshaling)(nil) // MarshalJSON marshals as JSON. func (g Genesis) MarshalJSON() ([]byte, error) { type Genesis struct { - Config *params.ChainConfig `json:"config"` - Nonce math.HexOrDecimal64 `json:"nonce"` - Timestamp math.HexOrDecimal64 `json:"timestamp"` - ExtraData hexutil.Bytes `json:"extraData"` - GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"` - Mixhash common.Hash `json:"mixHash"` - Coinbase common.Address `json:"coinbase"` - Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"` - Number math.HexOrDecimal64 `json:"number"` - GasUsed math.HexOrDecimal64 `json:"gasUsed"` - ParentHash common.Hash `json:"parentHash"` - BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` + Config *params.ChainConfig `json:"config"` + Nonce math.HexOrDecimal64 `json:"nonce"` + Timestamp math.HexOrDecimal64 `json:"timestamp"` + ExtraData hexutil.Bytes `json:"extraData"` + GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"` + Mixhash common.Hash `json:"mixHash"` + Coinbase common.Address `json:"coinbase"` + Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"` + Number math.HexOrDecimal64 `json:"number"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` + ParentHash common.Hash `json:"parentHash"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` + ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"` + BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"` } var enc Genesis enc.Config = g.Config @@ -51,25 +53,29 @@ func (g Genesis) MarshalJSON() ([]byte, error) { enc.GasUsed = math.HexOrDecimal64(g.GasUsed) enc.ParentHash = g.ParentHash enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee) + enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas) + enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed) return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (g *Genesis) UnmarshalJSON(input []byte) error { type Genesis struct { - Config *params.ChainConfig `json:"config"` - Nonce *math.HexOrDecimal64 `json:"nonce"` - Timestamp *math.HexOrDecimal64 `json:"timestamp"` - ExtraData *hexutil.Bytes `json:"extraData"` - GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"` - Mixhash *common.Hash `json:"mixHash"` - Coinbase *common.Address `json:"coinbase"` - Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"` - Number *math.HexOrDecimal64 `json:"number"` - GasUsed *math.HexOrDecimal64 `json:"gasUsed"` - ParentHash *common.Hash `json:"parentHash"` - BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` + Config *params.ChainConfig `json:"config"` + Nonce *math.HexOrDecimal64 `json:"nonce"` + Timestamp *math.HexOrDecimal64 `json:"timestamp"` + ExtraData *hexutil.Bytes `json:"extraData"` + GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"` + Mixhash *common.Hash `json:"mixHash"` + Coinbase *common.Address `json:"coinbase"` + Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"number"` + GasUsed *math.HexOrDecimal64 `json:"gasUsed"` + ParentHash *common.Hash `json:"parentHash"` + BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` + ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"` + BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"` } var dec Genesis if err := json.Unmarshal(input, &dec); err != nil { @@ -120,5 +126,11 @@ func (g *Genesis) UnmarshalJSON(input []byte) error { if dec.BaseFee != nil { g.BaseFee = (*big.Int)(dec.BaseFee) } + if dec.ExcessBlobGas != nil { + g.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) + } + if dec.BlobGasUsed != nil { + g.BlobGasUsed = (*uint64)(dec.BlobGasUsed) + } return nil } diff --git a/core/genesis.go b/core/genesis.go index 68eeec216..86a3e42a6 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -59,10 +59,12 @@ type Genesis struct { // These fields are used for consensus tests. Please don't use them // in actual genesis blocks. - Number uint64 `json:"number"` - GasUsed uint64 `json:"gasUsed"` - ParentHash common.Hash `json:"parentHash"` - BaseFee *big.Int `json:"baseFeePerGas"` + Number uint64 `json:"number"` + GasUsed uint64 `json:"gasUsed"` + ParentHash common.Hash `json:"parentHash"` + BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559 + ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844 + BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844 } func ReadGenesis(db ethdb.Database) (*Genesis, error) { @@ -73,7 +75,7 @@ func ReadGenesis(db ethdb.Database) (*Genesis, error) { } blob := rawdb.ReadGenesisStateSpec(db, stored) if blob == nil { - return nil, fmt.Errorf("genesis state missing from db") + return nil, errors.New("genesis state missing from db") } if len(blob) != 0 { if err := genesis.Alloc.UnmarshalJSON(blob); err != nil { @@ -82,11 +84,11 @@ func ReadGenesis(db ethdb.Database) (*Genesis, error) { } genesis.Config = rawdb.ReadChainConfig(db, stored) if genesis.Config == nil { - return nil, fmt.Errorf("genesis config missing from db") + return nil, errors.New("genesis config missing from db") } genesisBlock := rawdb.ReadBlock(db, stored, 0) if genesisBlock == nil { - return nil, fmt.Errorf("genesis block missing from db") + return nil, errors.New("genesis block missing from db") } genesisHeader := genesisBlock.Header() genesis.Nonce = genesisHeader.Nonce.Uint64() @@ -96,6 +98,9 @@ func ReadGenesis(db ethdb.Database) (*Genesis, error) { genesis.Difficulty = genesisHeader.Difficulty genesis.Mixhash = genesisHeader.MixDigest genesis.Coinbase = genesisHeader.Coinbase + genesis.BaseFee = genesisHeader.BaseFee + genesis.ExcessBlobGas = genesisHeader.ExcessBlobGas + genesis.BlobGasUsed = genesisHeader.BlobGasUsed return &genesis, nil } @@ -125,14 +130,16 @@ func (ga *GenesisAlloc) deriveHash() (common.Hash, error) { return common.Hash{}, err } for addr, account := range *ga { - statedb.AddBalance(addr, account.Balance) + if account.Balance != nil { + statedb.AddBalance(addr, account.Balance) + } statedb.SetCode(addr, account.Code) statedb.SetNonce(addr, account.Nonce) for key, value := range account.Storage { statedb.SetState(addr, key, value) } } - return statedb.Commit(false) + return statedb.Commit(0, false) } // flush is very similar with deriveHash, but the main difference is @@ -144,14 +151,16 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhas return err } for addr, account := range *ga { - statedb.AddBalance(addr, account.Balance) + if account.Balance != nil { + statedb.AddBalance(addr, account.Balance) + } statedb.SetCode(addr, account.Code) statedb.SetNonce(addr, account.Nonce) for key, value := range account.Storage { statedb.SetState(addr, key, value) } } - root, err := statedb.Commit(false) + root, err := statedb.Commit(0, false) if err != nil { return err } @@ -189,8 +198,6 @@ func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash comm switch blockhash { case params.MainnetGenesisHash: genesis = DefaultGenesisBlock() - case params.RinkebyGenesisHash: - genesis = DefaultRinkebyGenesisBlock() case params.GoerliGenesisHash: genesis = DefaultGoerliGenesisBlock() case params.SepoliaGenesisHash: @@ -216,15 +223,17 @@ type GenesisAccount struct { // field type overrides for gencodec type genesisSpecMarshaling struct { - Nonce math.HexOrDecimal64 - Timestamp math.HexOrDecimal64 - ExtraData hexutil.Bytes - GasLimit math.HexOrDecimal64 - GasUsed math.HexOrDecimal64 - Number math.HexOrDecimal64 - Difficulty *math.HexOrDecimal256 - BaseFee *math.HexOrDecimal256 - Alloc map[common.UnprefixedAddress]GenesisAccount + Nonce math.HexOrDecimal64 + Timestamp math.HexOrDecimal64 + ExtraData hexutil.Bytes + GasLimit math.HexOrDecimal64 + GasUsed math.HexOrDecimal64 + Number math.HexOrDecimal64 + Difficulty *math.HexOrDecimal256 + Alloc map[common.UnprefixedAddress]GenesisAccount + BaseFee *math.HexOrDecimal256 + ExcessBlobGas *math.HexOrDecimal64 + BlobGasUsed *math.HexOrDecimal64 } type genesisAccountMarshaling struct { @@ -268,6 +277,7 @@ func (e *GenesisMismatchError) Error() string { // ChainOverrides contains the changes to chain config. type ChainOverrides struct { OverrideCancun *uint64 + OverrideVerkle *uint64 } // SetupGenesisBlock writes or updates the genesis block in db. @@ -296,6 +306,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen if overrides != nil && overrides.OverrideCancun != nil { config.CancunTime = overrides.OverrideCancun } + if overrides != nil && overrides.OverrideVerkle != nil { + config.VerkleTime = overrides.OverrideVerkle + } } } // Just commit the new block if there is no stored genesis block. @@ -314,10 +327,12 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen applyOverrides(genesis.Config) return genesis.Config, block.Hash(), nil } - // We have the genesis block in database(perhaps in ancient database) - // but the corresponding state is missing. + // The genesis block is present(perhaps in ancient database) while the + // state database is not initialized yet. It can happen that the node + // is initialized with an external ancient store. Commit genesis state + // in this case. header := rawdb.ReadHeader(db, stored, 0) - if header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { + if header.Root != types.EmptyRootHash && !triedb.Initialized(header.Root) { if genesis == nil { genesis = DefaultGenesisBlock() } @@ -366,7 +381,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen // are returned to the caller unless we're already at block zero. head := rawdb.ReadHeadHeader(db) if head == nil { - return newcfg, stored, fmt.Errorf("missing head header") + return newcfg, stored, errors.New("missing head header") } compatErr := storedcfg.CheckCompatible(newcfg, head.Number.Uint64(), head.Time) if compatErr != nil && ((head.Number.Uint64() != 0 && compatErr.RewindToBlock != 0) || (head.Time != 0 && compatErr.RewindToTime != 0)) { @@ -420,8 +435,6 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { return params.MainnetChainConfig case ghash == params.SepoliaGenesisHash: return params.SepoliaChainConfig - case ghash == params.RinkebyGenesisHash: - return params.RinkebyChainConfig case ghash == params.GoerliGenesisHash: return params.GoerliChainConfig default: @@ -463,9 +476,27 @@ func (g *Genesis) ToBlock() *types.Block { } } var withdrawals []*types.Withdrawal - if g.Config != nil && g.Config.IsShanghai(big.NewInt(int64(g.Number)), g.Timestamp) { - head.WithdrawalsHash = &types.EmptyWithdrawalsHash - withdrawals = make([]*types.Withdrawal, 0) + if conf := g.Config; conf != nil { + num := big.NewInt(int64(g.Number)) + if conf.IsShanghai(num, g.Timestamp) { + head.WithdrawalsHash = &types.EmptyWithdrawalsHash + withdrawals = make([]*types.Withdrawal, 0) + } + if conf.IsCancun(num, g.Timestamp) { + // EIP-4788: The parentBeaconBlockRoot of the genesis block is always + // the zero hash. This is because the genesis block does not have a parent + // by definition. + head.ParentBeaconRoot = new(common.Hash) + // EIP-4844 fields + head.ExcessBlobGas = g.ExcessBlobGas + head.BlobGasUsed = g.BlobGasUsed + if head.ExcessBlobGas == nil { + head.ExcessBlobGas = new(uint64) + } + if head.BlobGasUsed == nil { + head.BlobGasUsed = new(uint64) + } + } } return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)).WithWithdrawals(withdrawals) } @@ -506,10 +537,8 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block // MustCommit writes the genesis block and state to db, panicking on error. // The block is committed as the canonical head block. -// Note the state changes will be committed in hash-based scheme, use Commit -// if path-scheme is preferred. -func (g *Genesis) MustCommit(db ethdb.Database) *types.Block { - block, err := g.Commit(db, trie.NewDatabase(db)) +func (g *Genesis) MustCommit(db ethdb.Database, triedb *trie.Database) *types.Block { + block, err := g.Commit(db, triedb) if err != nil { panic(err) } @@ -528,18 +557,6 @@ func DefaultGenesisBlock() *Genesis { } } -// DefaultRinkebyGenesisBlock returns the Rinkeby network genesis block. -func DefaultRinkebyGenesisBlock() *Genesis { - return &Genesis{ - Config: params.RinkebyChainConfig, - Timestamp: 1492009146, - ExtraData: hexutil.MustDecode("0x52657370656374206d7920617574686f7269746168207e452e436172746d616e42eb768f2244c8811c63729a21a3569731535f067ffc57839b00206d1ad20c69a1981b489f772031b279182d99e65703f0076e4812653aab85fca0f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), - GasLimit: 4700000, - Difficulty: big.NewInt(1), - Alloc: decodePrealloc(rinkebyAllocData), - } -} - // DefaultGoerliGenesisBlock returns the Görli network genesis block. func DefaultGoerliGenesisBlock() *Genesis { return &Genesis{ @@ -565,22 +582,30 @@ func DefaultSepoliaGenesisBlock() *Genesis { } } -// DeveloperGenesisBlock returns the 'geth --dev' genesis block. -func DeveloperGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *Genesis { - // Override the default period to the user requested one - config := *params.AllCliqueProtocolChanges - config.Clique = ¶ms.CliqueConfig{ - Period: period, - Epoch: config.Clique.Epoch, +// DefaultHoleskyGenesisBlock returns the Holesky network genesis block. +func DefaultHoleskyGenesisBlock() *Genesis { + return &Genesis{ + Config: params.HoleskyChainConfig, + Nonce: 0x1234, + ExtraData: hexutil.MustDecode("0x686f77206d7563682069732074686520666973683f"), + GasLimit: 0x17d7840, + Difficulty: big.NewInt(0x01), + Timestamp: 1694786100, + Alloc: decodePrealloc(holeskyAllocData), } +} + +// DeveloperGenesisBlock returns the 'geth --dev' genesis block. +func DeveloperGenesisBlock(gasLimit uint64, faucet common.Address) *Genesis { + // Override the default period to the user requested one + config := *params.AllDevChainProtocolChanges // Assemble and return the genesis with the precompiles and faucet pre-funded return &Genesis{ Config: &config, - ExtraData: append(append(make([]byte, 32), faucet[:]...), make([]byte, crypto.SignatureLength)...), GasLimit: gasLimit, BaseFee: big.NewInt(params.InitialBaseFee), - Difficulty: big.NewInt(1), + Difficulty: big.NewInt(0), Alloc: map[common.Address]GenesisAccount{ common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256 @@ -597,13 +622,34 @@ func DeveloperGenesisBlock(period uint64, gasLimit uint64, faucet common.Address } func decodePrealloc(data string) GenesisAlloc { - var p []struct{ Addr, Balance *big.Int } + var p []struct { + Addr *big.Int + Balance *big.Int + Misc *struct { + Nonce uint64 + Code []byte + Slots []struct { + Key common.Hash + Val common.Hash + } + } `rlp:"optional"` + } if err := rlp.NewStream(strings.NewReader(data), 0).Decode(&p); err != nil { panic(err) } ga := make(GenesisAlloc, len(p)) for _, account := range p { - ga[common.BigToAddress(account.Addr)] = GenesisAccount{Balance: account.Balance} + acc := GenesisAccount{Balance: account.Balance} + if account.Misc != nil { + acc.Nonce = account.Misc.Nonce + acc.Code = account.Misc.Code + + acc.Storage = make(map[common.Hash]common.Hash) + for _, slot := range account.Misc.Slots { + acc.Storage[slot.Key] = slot.Val + } + } + ga[common.BigToAddress(account.Addr)] = acc } return ga } diff --git a/core/genesis_alloc.go b/core/genesis_alloc.go index 542dcc412..94c4ec535 100644 --- a/core/genesis_alloc.go +++ b/core/genesis_alloc.go @@ -22,871 +22,6 @@ package core // nolint: misspell const mainnetAllocData = "\xfa\x04]X\u0793\r\x83b\x011\x8e\u0189\x9agT\x06\x908'\x80t2\x80\x89\n\u05ce\xbcZ\xc6 \x00\x00\u0793\x17bC\x0e\xa9\u00e2nWI\xaf\xdbp\xda_x\u077b\x8c\x89\n\u05ce\xbcZ\xc6 \x00\x00\u0793\x1d\x14\x80K9\x9cn\xf8\x0edWoev`\x80O\xec\v\x89\u3bb5sr@\xa0\x00\x00\u07932@5\x87\x94{\x9f\x15b*h\xd1\x04\xd5M3\xdb\xd1\u0349\x043\x87Oc,\xc6\x00\x00\u0793I~\x92\xcd\xc0\xe0\xb9c\xd7R\xb2)j\u02c7\u0682\x8b$\x89\n\x8fd\x9f\xe7\xc6\x18\x00\x00\u0793K\xfb\xe1Tk\xc6\xc6[\\~\xaaU0K8\xbb\xfe\xc6\u04c9lk\x93[\x8b\xbd@\x00\x00\u0793Z\x9c\x03\xf6\x9d\x17\xd6l\xbb\x8a\xd7!\x00\x8a\x9e\xbb\xb86\xfb\x89lk\x93[\x8b\xbd@\x00\x00\u0793]\x0e\xe8\x15^\xc0\xa6\xffh\bU,\xa5\xf1k\xb5\xbe2:\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u0793v\"\xd8J#K\xb8\xb0x#\x0f\u03c4\xb6z\u9a2c\xae\x89%\xe1\xccQ\x99R\xf8\x00\x00\u0793{\x9f\xc3\x19\x05\xb4\x99K\x04\xc9\xe2\xcf\xdc^'pP?B\x89l]\xb2\xa4\xd8\x15\xdc\x00\x00\u0793\u007fJ#\xca\x00\xcd\x04=%\u0088\x8c\x1a\xa5h\x8f\x81\xa3D\x89)\xf0\xa9[\xfb\xf7)\x00\x00\u0793\x869\u06bb\u3bac\x88{]\xc0\xe4>\x13\xbc\u0487\xd7l\x89\x10\xd0\xe3\xc8}n,\x00\x00\u0793\x89P\x86y\xab\xf8\xc7\x1b\xf6x\x16\x87\x12\x0e>j\x84XM\x89a\x94\x04\x9f0\xf7 \x00\x00\u0793\x8f\xc7\u02ed\xff\xbd\r\u007f\xe4O\x8d\xfd`\xa7\x9dr\x1a\x1c\x9c\x8965\u026d\xc5\u07a0\x00\x00\u0793\x95`\xa3\xdebxh\xf9\x1f\xa8\xbf\xe1\xc1\xb7\xaf\xaf\b\x18k\x89\x1cg\xf5\xf7\xba\xa0\xb0\x00\x00\u0793\x96\x97G\xf7\xa5\xb3\x06E\xfe\x00\xe4I\x01CZ\xce$\xcc7\x89\\(=A\x03\x94\x10\x00\x00\u0793\x9am}\xb3&g\x9bw\xc9\x03\x91\xa7Gm#\x8f;\xa3>\x89\n\xdaUGK\x814\x00\x00\u0793\x9e\xef\n\b\x86\x05n?i!\x18S\xb9\xb7E\u007f7\x82\u4262\xa8x\x06\x9b(\xe0\x00\x00\u0793\x9f\xdb\xf4N\x1fJcb\xb7i\u00daG_\x95\xa9l+\u01c9\x1e\x93\x12\x83\xcc\xc8P\x00\x00\u07d3\xa5y\u007fR\xc9\u054f\x18\x9f6\xb1\xd4]\x1b\xf6\x04\x1f/k\x8a\x01'\u0473F\x1a\xcd\x1a\x00\x00\u0793\xaaS\x81\xb2\x13\x8e\xbe\xff\xc1\x91\xd5\xd8\u00d1u;p\x98\u04895\xab\xb0\x9f\xfe\u07b6\x80\x00\u0793\xaa\xda%\xea\"\x86p\x9a\xbbB-A\x92?\u04c0\xcd\x04\u01c9#=\xf3)\x9far\x00\x00\u0793\xac\xbf\xb2\xf2ZT\x85\xc79\xefp\xa4N\xee\xeb|e\xa6o\x89\x05k\xc7^-c\x10\x00\x00\u07d3\xac\xc6\xf0\x82\xa4B\x82\x87d\xd1\x1fX\u0589J\xe4\b\xf0s\x8a\f\xb4\x9bD\xba`-\x80\x00\x00\u0793\xb2w\xb0\x99\xa8\xe8f\xca\x0e\xc6[\u02c7(O\xd1B\xa5\x82\x89j\xcb=\xf2~\x1f\x88\x00\x00\u0753\xbd\xd4\x01:\xa3\x1c\x04al+\xc9x_'\x88\xf9\x15g\x9b\x88\xb9\xf6]\x00\xf6<\x00\x00\u0793\xc2}c\xfd\xe2K\x92\xee\x8a\x1e~\xd5\xd2m\x8d\xc5\xc8;\x03\x89lk\x93[\x8b\xbd@\x00\x00\u0553\xc4\x0f\xe2\tT#P\x9b\x9f\u0677T21X\xaf#\x10\xf3\x80\u0793\xd7^\xd6\fwO\x8b:ZQs\xfb\x183\xadq\x05\xa2\u0649l\xb7\xe7Hg\xd5\xe6\x00\x00\u07d3\u05cd\x89\xb3_G'\x16\xec\xea\xfe\xbf`\x05'\u04e1\xf9i\x8a\x05\xe0T\x9c\x962\xe1\xd8\x00\x00\u0793\xda\xe2{5\v\xae \xc5e!$\xaf]\x8b\\\xba\x00\x1e\xc1\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u0793\xdc\x01\xcb\xf4Ix\xa4.\x8d\xe8\xe46\xed\xf9B\x05\u03f6\xec\x89O\x0f\xeb\xbc\u068c\xb4\x00\x00\u07d3\u607c-\x10\xdbb\u0785\x84\x83$I\"P4\x8e\x90\xbf\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d3\xf4c\xe17\xdc\xf6%\xfb\U000fc8de\u0298\u04b9h\xcf\u007f\x8a\x01@a\xb9\xd7z^\x98\x00\x00\u07d4\x01\x00\a9K\x8bue\xa1e\x8a\xf8\x8c\xe4cI\x915\u05b7\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x01\r\xf1\xdfK\xed#v\r-\x1c\x03x\x15\x86\xdd\xf7\x91\x8eT\x89\x03@\xaa\xd2\x1b;p\x00\x00\xe0\x94\x01\x0fJ\x98\u07e1\xd9y\x9b\xf5\u01d6\xfbU\x0e\xfb\xe7\xec\xd8w\x8a\x01\xb2\xf2\x92#b\x92\xc7\x00\x00\u07d4\x01\x15PW\x00/k\r\x18\xac\xb98\x8d;\xc8\x12\x9f\x8fz \x89H\xa4\xa9\x0f\xb4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x01k`\xbbmg\x92\x8c)\xfd\x03\x13\xc6f\u068f\x16\x98\xd9\u0149lk\x93[\x8b\xbd@\x00\x00\u07d4\x01l\x85\xe1a;\x90\x0f\xa3W\xb8(;\x12\x0ee\xae\xfc\xdd\b\x89+]\x97\x84\xa9|\xd5\x00\x00\u07d4\x01\x84\x92H\x8b\xa1\xa2\x924\"G\xb3\x18U\xa5Y\x05\xfe\xf2i\x89\a\x96\xe3\xea?\x8a\xb0\x00\x00\u07d4\x01\x8f \xa2{'\xecD\x1a\xf7#\xfd\x90\x99\xf2\u02f7\x9dbc\x89uy*\x8a\xbd\xef|\x00\x00\u07d4\x01\x91\xebT~{\xf6\x97k\x9b\x1bWuFv\x1d\xe6V\"\xe2\x89lkLM\xa6\u077e\x00\x00\u07d4\x01\x9dp\x95y\xffK\xc0\x9f\xdc\xdd\xe41\xdc\x14G\xd2\xc2`\xbc\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x01\xa2Z_Z\xf0\x16\x9b0\x86L;\xe4\xd7V<\xcdD\xf0\x9e\x89M\x85<\x8f\x89\b\x98\x00\x00\xe0\x94\x01\xa7\xd9\xfa}\x0e\xb1\x18\\g\xe5M\xa8<.u\xdbi\u37ca\x01\x9dJ\xdd\xd0\u063c\x96\x00\x00\u07d4\x01\xa8\x18\x13ZAB\x10\xc3|b\xb6%\xac\xa1\xa5F\x11\xac6\x89\x0e\x189\x8ev\x01\x90\x00\x00\u07d4\x01\xb1\xca\xe9\x1a;\x95Y\xaf\xb3<\xdcmh\x94B\xfd\xbf\xe07\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x01\xb5\xb5\xbcZ\x11\u007f\xa0\x8b4\xed\x1d\xb9D\x06\bYz\xc5H\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x01\xbb\xc1Og\xaf\x069\xaa\xb1D\x1ej\b\xd4\xceqb\t\x0f\x89F\xfc\xf6\x8f\xf8\xbe\x06\x00\x00\xe0\x94\x01\xd08\x15\xc6\x1fAkq\xa2a\n-\xab\xa5\x9f\xf6\xa6\xde[\x8a\x02\x05\xdf\xe5\v\x81\xc8.\x00\x00\u07d4\x01\u0559\xee\r_\x8c8\xab-9.,e\xb7L<\xe3\x18 \x89\x1b\xa5\xab\xf9\xe7y8\x00\x00\u07d4\x01\xe4\x05!\x12%0\u066c\x91\x11<\x06\xa0\x19\vmc\x85\v\x89Hz\x9a0E9D\x00\x00\u07d4\x01\xe6A]X{\x06T\x90\xf1\xed\u007f!\xd6\xe0\xf3\x86\xeegG\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x01\xe8d\xd3Tt\x1bB>oB\x85\x17$F\x8ct\xf5\xaa\x9c\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x01\xed_\xba\x8d.\xabg:\xec\x04-0\xe4\xe8\xa6\x11\xd8\xc5Z\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x01\xfb\x8e\xc1$%\xa0O\x81>F\xc5L\x05t\x8c\xa6\xb2\x9a\xa9\x89\x0e\x15s\x03\x85F|\x00\x00\u07d4\x01\xff\x1e\xb1\u07adP\xa7\xf2\xf9c\x8f\xde\xe6\xec\xcf:{*\u0209 \x86\xac5\x10R`\x00\x00\u07d4\x02\x03b\u00ed\xe8x\u0290\u05b2\u0609\xa4\xccU\x10\xee\xd5\xf3\x898\x88\xe8\xb3\x11\xad\xb3\x80\x00\u07d4\x02\x03\xae\x01\xd4\xc4\x1c\xae\x18e\xe0K\x1f[S\xcd\xfa\xec\xae1\x896\x89\xcd\u03b2\x8c\xd7\x00\x00\u07d4\x02\b\x93a\xa3\xfetQ\xfb\x1f\x87\xf0\x1a-\x86fS\xdc\v\a\x89\x02*\xc7H2\xb5\x04\x00\x00\u07d4\x02\x1fi\x04=\xe8\x8cI\x17\xca\x10\xf1\x84(\x97\xee\xc0X\x9c|\x89kD\u03f8\x14\x87\xf4\x00\x00\u07d4\x02)\x0f\xb5\xf9\xa5\x17\xf8(E\xac\xde\xca\x0f\xc8F\x03\x9b\xe23\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x029\xb4\xf2\x1f\x8e\x05\xcd\x01Q++\xe7\xa0\xe1\x8am\x97F\a\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x02Gr\x12\xff\xddu\xe5\x15VQ\xb7e\x06\xb1dfq\xa1\xeb\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4\x02J\t\x8a\xe7\x02\xbe\xf5@l\x9c\"\xb7\x8b\xd4\xeb,\u01e2\x93\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x02K\xdd,{\xfdP\x0e\xe7@O\u007f\xb3\xe9\xfb1\xdd \xfb\u0449\t\xc2\x00vQ\xb2P\x00\x00\u07d4\x02Sg\x96\x03\x04\xbe\xee4Y\x11\x18\xe9\xac-\x13X\xd8\x02\x1a\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x02V\x14\x9f[Pc\xbe\xa1N\x15f\x1f\xfbX\xf9\xb4Y\xa9W\x89&)\xf6n\fS\x00\x00\x00\u07d4\x02`=z;\xb2\x97\xc6|\x87~]4\xfb\u0579\x13\xd4\xc6:\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x02a\xad:\x17*\xbf\x13\x15\xf0\xff\xec2p\x98j\x84\t\xcb%\x89\v\b!;\u03cf\xfe\x00\x00\u07d4\x02d2\xaf7\xdcQ\x13\xf1\xf4mH\nM\u0c80R#~\x89\x13I\xb7\x86\xe4\v\xfc\x00\x00\u07d4\x02f\xab\x1ck\x02\x16#\v\x93\x95D=_\xa7^hEh\u018965\u026d\xc5\u07a0\x00\x00\u07d4\x02u\x1d\u018c\xb5\xbdsp'\xab\xf7\u0777s\x90\xcdw\xc1k\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\x02w\x8e9\x0f\xa1u\x10\xa3B\x8a\xf2\x87\fBsT}8l\x8a\x03lw\x80\x18\x8b\xf0\xef\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x037|\x0eUkd\x01\x03(\x9aa\x89\u1baecI4g\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x03IcM\u00a9\xe8\f?w!\xee+PF\xae\xaa\xed\xfb\xb5\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x03U\xbc\xac\xbd!D\x1e\x95\xad\xee\xdc0\xc1r\x18\u0224\b\u0389\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x03n\xef\xf5\xba\x90\xa6\x87\x9a\x14\xdf\xf4\xc5\x04;\x18\xca\x04`\u0249\x05k\xc7^-c\x10\x00\x00\xe0\x94\x03qKA\u04a6\xf7Q\x00\x8e\xf8\xddM+)\xae\u02b8\xf3n\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\x03r\xe8RX.\t44J\x0f\xed!x0M\xf2]F(\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x03r\xeeU\b\xbf\x81c\xed(N^\xef\x94\xceMsg\xe5\"\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x03}\xd0V\xe7\xfd\xbdd\x1d\xb5\xb6\xbe\xa2\xa8x\n\x83\xfa\u1009\a\x96\xe3\xea?\x8a\xb0\x00\x00\xe0\x94\x03\x83#\xb1\x84\xcf\xf7\xa8*\xe2\u1f67y?\xe41\x9c\xa0\xbf\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x03\x87y\xca-\xbef>c\xdb?\xe7V\x83\xea\x0e\xc6.#\x83\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\xe0\x94\x03\x8eE\xea\xdd=\x88\xb8\u007f\xe4\u06b0fh\x05\"\xf0\xdf\xc8\xf9\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x03\x92T\x9ar\u007f\x81eT)\u02d2\x8bR\x9f%\xdfM\x13\x85\x89\x01lC\xa0\xee\xa0t\x00\x00\u07d4\x03\x94\xb9\x0f\xad\xb8`O\x86\xf4?\xc1\xe3]1$\xb3*Y\x89\x89)j\xa1@'\x8ep\x00\x00\u0794\x03\x9ezN\xbc(N,\xcdB\xb1\xbd\xd6\v\xd6Q\x1c\x0fw\x06\x88\xf0\x15\xf2W6B\x00\x00\u07d4\x03\x9e\xf1\xceR\xfeyc\xf1f\u0562u\u0131\x06\x9f\xe3\xa82\x89\x15\xaf9\u4ab2t\x00\x00\u07d4\x03\xa2l\xfcL\x181op\u055e\x9e\x1ay\xee>\x8b\x96/L\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x03\xaab(\x81#m\xd0\xf4\x94\f$\xc3$\xff\x8b{~!\x86\x89\xadx\xeb\u016cb\x00\x00\x00\u07d4\x03\xafz\xd9\xd5\"<\xf7\xc8\xc1? \xdfg\xeb\xe5\xff\u017bA\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x03\xb0\xf1|\xd4F\x9d\xdc\u03f7\xdai~\x82\xa9\x1a_\x9ewt\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x03\xb4\x1bQ\xf4\x1d\xf2\r\xd2y\xba\xe1\x8c\x12w_w\xadw\x1c\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x03\xbe[F)\xae\xfb\xbc\xab\x9d\xe2m9Wl\xb7\xf6\x91\xd7d\x89\n\xdf0\xbap\u0217\x00\x00\u07d4\x03\xc6G\xa9\xf9)\xb0x\x1f\xe9\xae\x01\u02a3\xe1\x83\xe8vw~\x89\x18*\xb7\xc2\f\xe5$\x00\x00\u07d4\x03\xc9\x1d\x92\x946\x03\xe7R >\x054\x0eV`\x13\xb9\x00E\x89+|\xc2\xe9\xc3\"\\\x00\x00\u07d4\x03\xcbLOE\x16\xc4\xffy\xa1\xb6$O\xbfW.\x1c\u007f\xeay\x89\x94\x89#z\u06daP\x00\x00\u07d4\x03\u02d8\u05ec\xd8\x17\u079d\x88m\"\xfa\xb3\xf1\xb5}\x92\xa6\b\x89V\xbcu\xe2\xd61\x00\x00\x00\u07d4\x03\u031d-!\xf8k\x84\xac\x8c\xea\xf9q\u06e7\x8a\x90\xe6%p\x89WG=\x05\u06ba\xe8\x00\x00\u07d4\x03\xd1rO\xd0\x0eT\xaa\xbc\xd2\xde*\x91\xe8F+\x10I\xdd:\x89\x8f\x1d\\\x1c\xae7@\x00\x00\u07d4\x03\xde\xdf\xcd\v<.\x17\xc7\x05\xda$\x87\x90\uf626\xbdWQ\x89Hz\x9a0E9D\x00\x00\u07d4\x03\u8c04SuW\xe7\t\xea\xe2\xe1\u1966\xbc\xe1\xef\x83\x14\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x03\xeam&\u0400\xe5z\xee9&\xb1\x8e\x8e\xd7:N[(&\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x03\xeb<\xb8`\xf6\x02\x8d\xa5T\xd3D\xa2\xbbZP\n\xe8\xb8o\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x03\xeb\xc6?\xdaf`\xa4e\x04^#_\xben\\\xf1\x95s_\x89\a\xb0l\xe8\u007f\xddh\x00\x00\xe0\x94\x03\xefj\xd2\x0f\xf7\xbdO\x00+\xacX\xd4uD\u03c7\x9a\xe7(\x8a\x01u\xc7X\u0439n\\\x00\x00\u07d4\x03\xf7\xb9 \b\x81:\xe0\xa6v\xeb!(\x14\xaf\xab5\"\x10i\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x04\x11p\xf5\x81\u0780\xe5\x8b*\x04\\\x8f|\x14\x93\xb0\x01\xb7\u02c90\xc8\xeca2\x89\nZ\xa8P\t\xe3\x9c\x00\x00\u07d4\x04i\xe8\xc4@E\v\x0eQ&&\xfe\x81~gT\xa8\x15(0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x04m'K\x1a\xf6\x15\xfbPZvJ\xd8\u0767p\xb1\xdb/=\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x04}Z&\u05ed\x8f\x8ep`\x0fp\xa3\x98\u076a\x1c-\xb2o\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x04~\x87\xc8\xf7\xd1\xfc\xe3\xb0\x13S\xa8Xb\xa9H\xac\x04\x9f>\x89P\xc5\xe7a\xa4D\b\x00\x00\u07d4\x04\u007f\x9b\xf1R\x9d\xaf\x87\xd4\a\x17^o\x17\x1b^Y\xe9\xff>\x89#<\x8f\xe4'\x03\xe8\x00\x00\xe0\x94\x04\x85'2\xb4\xc6R\xf6\xc2\u53b3e\x87\xe6\nb\xda\x14\u06ca\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\x04\x8a\x89p\xeaAE\xc6MU\x17\xb8\xde[F\xd0YZ\xad\x06\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x04\x9c]K\xc6\xf2]NEli{R\xa0x\x11\xcc\u045f\xb1\x89\x10D\x00\xa2G\x0eh\x00\x00\u07d4\x04\xa1\xca\xda\x1c\xc7Q\b/\xf8\u0692\x8e<\xfa\x00\b \xa9\xe9\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\x04\xa8\n\xfa\xd5>\xf1\xf8Ae\xcf\xd8R\xb0\xfd\xf1\xb1\xc2K\xa8\x89\x03$\xe9d\xb3\xec\xa8\x00\x00\u07d4\x04\xaa\xfc\x8a\xe5\xceoI\x03\u021d\u007f\xac\x9c\xb1\x95\x12\"Gw\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x04\xbaK\xb8q@\x02,!Jo\xacB\xdbZ\x16\u0755@E\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x04\xba\x8a?\x03\xf0\x8b\x89P\x95\x99M\xdaa\x9e\u06ac\xee>z\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x04\xc2\xc6K\xb5L>\xcc\xd0U\x85\xe1\x0e\xc6\xf9\x9a\f\xdb\x01\xa3\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x04\xceE\xf6\x00\xdb\x18\xa9\u0405\x1b)\xd99>\xbd\xaa\xfe=\u0149\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x04\u05b8\xd4\u0686t\a\xbb\x99wI\u07bb\xcd\xc0\xb3XS\x8a\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x04\xd78\x96\xcfe\x93\xa6\x91\x97*\x13\xa6\xe4\x87\x1f\xf2\xc4+\x13\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x04\xd8*\xf9\xe0\x1a\x93m\x97\xf8\xf8Y@\xb9p\xf9\xd4\u06d96\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x04\xe5\xf5\xbc|\x92?\xd1\xe3\x175\xe7.\xf9h\xfdg\x11\fn\x89WU\x1d\xbc\x8ebL\x00\x00\u07d4\x04\xec\xa5\x01c\n\xbc\xe3R\x18\xb1t\x95k\x89\x1b\xa2^\xfb#\x8966\x9e\xd7t}&\x00\x00\u07d4\x05\x05\xa0\x8e\"\xa1\t\x01Z\"\xf6\x850STf*U1\u0549\x8c\xf2?\x90\x9c\x0f\xa0\x00\x00\u07d4\x05\x14\x95L\xe8\x81\xc807\x03d\x00\x89lO\xd1\xee$nx\x00\x00\u07d4\x05\x1dBBv\xb2\x129fQ\x86\x13=e;\xb8\xb1\x86/\x89\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x05!\xbc:\x9f\x87\x11\xfe\xcb\x10\xf5\a\x97\xd7\x10\x83\xe3A\ub749\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x05#mL\x90\xd0e\xf9\u34c3X\xaa\xff\xd7w\xb8j\xecI\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x05*X\xe05\xf1\xfe\x9c\xdd\x16\x9b\xcf \x97\x03E\xd1+\x9cQ\x89P\xc5\xe7a\xa4D\b\x00\x00\u07d4\x05.\xab\x1fa\xb6\xd4U\x17(?A\xd1D\x18$\x87\x87I\u0409\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x053n\x9ar'(\xd9c\xe7\xa1\xcf'Y\xfd\x02tS\x0f\u02891\xa2D?\x88\x8ay\x80\x00\u07d4\x054q\u035aA\x92[9\x04\xa5\xa8\xff\xca6Y\xe04\xbe#\x89\n\xd2\x01\xa6yO\xf8\x00\x00\u07d4\x056\x1d\x8e\xb6\x94\x1dN\x90\xfb~\x14\x18\xa9Z2\xd5%w2\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x05B:T\xc8\xd0\xf9p~pAs\xd9#\xb9F\xed\xc8\xe7\x00\x89\x06\xea\x03\u00bf\x8b\xa5\x80\x00\u07d4\x05D\f[\a;R\x9bH) \x9d\xff\x88\t\x0e\a\xc4\xf6\xf5\x89E\u04977\xe2/ \x00\x00\u07d4\x05Z\xb6X\xc6\xf0\xedO\x87^\xd6t.K\xc7)-\x1a\xbb\xf0\x89\x04\x86\u02d7\x99\x19\x1e\x00\x00\u07d4\x05[\xd0,\xaf\x19\xd6 +\xbc\u0703m\x18{\xd1\xc0\x1c\xf2a\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x05^\xacO\x1a\xd3\xf5\x8f\v\xd0$\u058e\xa6\r\xbe\x01\u01af\xb3\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x05fQU\xccI\xcb\xf6\xaa\xbd\u056e\x92\xcb\xfa\xad\x82\xb8\xc0\xc1\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x05f\x86\a\x8f\xb6\xbc\xf9\xba\n\x8a\x8d\xc6:\x90o_\xea\xc0\xea\x89\x1b\x18\x1eK\xf24<\x00\x00\u07d4\x05iks\x91k\xd3\x03>\x05R\x1e2\x11\xdf\xec\x02n\x98\xe4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x05k\x15F\x89O\x9a\x85\xe2\x03\xfb3m\xb5i\xb1l%\xe0O\x89\t.\xdb\t\xff\b\u0600\x00\u07d4\x05yI\xe1\xca\x05pF\x9eL\xe3\u0190\xaea:k\x01\xc5Y\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x05}\u049f-\x19\xaa=\xa4#'\xeaP\xbc\xe8o\xf5\xc9\x11\u0649\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x05\u007f\u007f\x81\xcdz@o\xc4Y\x94@\x8bPI\x91,Vdc\x89\\(=A\x03\x94\x10\x00\x00\u07d4\x05\x91]N\"Zf\x81b\xae\xe7\xd6\xc2_\xcf\xc6\xed\x18\xdb\x03\x89\x03\x98\xc3ry%\x9e\x00\x00\u07d4\x05\x96\xa2}\xc3\xee\x11_\xce/\x94\xb4\x81\xbc z\x9e&\x15%\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x05\xa80rC\x02\xbc\x0fn\xbd\xaa\x1e\xbe\xee\xb4nl\xe0\v9\x89\x05V\xf6L\x1f\xe7\xfa\x00\x00\u07d4\x05\xae\u007f\u053b\u0300\xca\x11\xa9\n\x1e\u01e3\x01\xf7\xcc\u0303\u06c91T\xc9r\x9d\x05x\x00\x00\u07d4\x05\xbbd\xa9\x16\xbef\xf4`\xf5\xe3\xb6C2\x11\r \x9e\x19\xae\x89\u3bb5sr@\xa0\x00\x00\xe0\x94\x05\xbfO\xcf\xe7r\xe4[\x82dC\x85.l5\x13P\xcer\xa2\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\xe0\x94\x05\xc6@\x04\xa9\xa8&\xe9N^N\xe2g\xfa*v2\xddNo\x8a\x03m\xc4.\xbf\xf9\v\u007f\x80\x00\xe0\x94\x05\xc76\xd3e\xaa7\xb5\xc0\xbe\x9c\x12\u022d\\\xd9\x03\xc3,\xf9\x8a\x01E^{\x80\n\x86\x88\x00\x00\xe0\x94\x05\xcbl;\x00r\xd3\x11ga\xb52\xb2\x18D;S\xe8\xf6\u014a\x1e\x02\xc3\xd7\xfc\xa9\xb6(\x00\x00\u07d4\x05\xd0\xf4\xd7(\xeb\xe8.\x84\xbfYu\x15\xadA\xb6\v\xf2\x8b9\x89\u3bb5sr@\xa0\x00\x00\u07d4\x05\u058d\xada\u04fb\u07f3\xf7y&\\IGJ\xff?\xcd0\x89\x02\"\xc5]\xc1Q\x9d\x80\x00\u07d4\x05\xe6q\xdeU\xaf\xec\x96K\aM\xe5t\xd5\x15\x8d]!\xb0\xa3\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4\x05\xe9{\tI,\u058fc\xb1+\x89.\xd1\xd1\x1d\x15,\x0e\u02897\b\xba\xed=h\x90\x00\x00\u07d4\x05\xf3c\x1fVd\xbd\xad]\x012\xc88\x8d6\xd7\u0612\t\x18\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\x06\t\xd8:l\xe1\xff\u0276\x90\xf3\xe9\xa8\x1e\x98>\x8b\xdcM\x9d\x8a\x0e\u04b5%\x84\x1a\xdf\xc0\x00\x00\u07d4\x06\x1e\xa4\x87|\u0409D\xebd\u0096n\x9d\xb8\xde\xdc\xfe\xc0k\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x06%\xd0`V\x96\x8b\x00\"\x06\xff\x91\x98\x01@$+\xfa\xa4\x99\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x06(\xbf\xbeU5x/\xb5\x88@k\xc9f`\xa4\x9b\x01\x1a\xf5\x89Rf<\u02b1\xe1\xc0\x00\x00\u07d4\x061\u044b\xbb\xbd0\xd9\xe1s+\xf3n\xda\xe2\u0389\x01\xab\x80\x89\xa3\xf9\x88U\xec9\x90\x00\x00\u07d4\x061\xdc@\xd7NP\x95\xe3r\x9e\xdd\xf4\x95D\xec\xd49og\x89\b\xacr0H\x9e\x80\x00\x00\xe0\x94\x067Y\xdd\x1cN6.\xb1\x93\x98\x95\x1f\xf9\xf8\xfa\xd1\xd3\x10h\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x06_\xf5u\xfd\x9c\x16\xd3\xcbo\u058f\xfc\x8fH?\xc3.\xc85\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x06a\x8e\x9dWb\xdfb\x02\x86\x01\xa8\x1dD\x87\u05a0\xec\xb8\x0e\x89Hz\x9a0E9D\x00\x00\xe0\x94\x06fG\xcf\xc8]#\xd3v\x05W= \x8c\xa1T\xb2D\xd7l\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x06xeJ\xc6v\x1d\xb9\x04\xa2\xf7\xe8Y^\xc1\xea\xacsC\b\x89/\x98\xb2\x9c(\x18\xf8\x00\x00\u07d4\x06\x86\n\x93RYU\xffbI@\xfa\xdc\xff\xb8\xe1I\xfdY\x9c\x89lh\xcc\u041b\x02,\x00\x00\xe0\x94\x06\x8c\xe8\xbdn\x90*E\u02c3\xb5\x15A\xb4\x0f9\xc4F\x97\x12\x8a\x01\x1c\x0f\x9b\xadJF\xe0\x00\x00\u07d4\x06\x8e)\xb3\xf1\x91\xc8\x12\xa699\x18\xf7\x1a\xb93\xaehG\xf2\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\x06\x8eeWf\xb9D\xfb&6\x19e\x87@\xb8P\xc9J\xfa1\x89\x01\xe8\u007f\x85\x80\x9d\xc0\x00\x00\u0794\x06\x96N-\x17\xe9\x18\x9f\x88\xa8 96\xb4\n\xc9nS<\x06\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\x06\x99L\xd8:\xa2d\n\x97\xb2`\vA3\x9d\x1e\r>\xdel\x89\r\x8drkqw\xa8\x00\x00\u07d4\x06\x9e\u042bz\xa7}\xe5q\xf1a\x06\x05\x1d\x92\xaf\xe1\x95\xf2\u0409\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x06\xac&\xad\x92\u02c5\x9b\u0550]\xdc\xe4&j\xa0\xecP\xa9\u0149*\x03I\x19\u07ff\xbc\x00\x00\u07d4\x06\xb0\xc1\xe3\u007fZ^\u013b\xf5\b@T\x8f\x9d:\xc0(\x88\x97\x89\xd8\u0602\u148e}\x00\x00\u07d4\x06\xb0\xff\x83@s\xcc\xe1\xcb\xc9\xeaU~\xa8{`Yc\u8d09\x10CV\x1a\x88)0\x00\x00\xe0\x94\x06\xb1\x06d\x9a\xa8\xc4!\xdd\xcd\x1b\x8c2\xcd\x04\x18\xcf0\xda\x1f\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\x06\xb5\xed\xe6\xfd\xf1\xd6\xe9\xa3G!7\x9a\xea\xa1|q=\xd8*\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x06\xcb\xfa\b\xcd\xd4\xfb\xa77\xba\xc4\a\xbe\x82$\xf4\xee\xf3X(\x89 +\xe5\xe88.\x8b\x80\x00\u07d4\x06\xd6\xcb0\x84\x81\xc36\xa6\xe1\xa2%\xa9\x12\xf6\xe65Y@\xa1\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4\x06\xdc\u007f\x18\xce\xe7\xed\xab[yS7\xb1\xdfj\x9e\x8b\u062eY\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x06\xf6\x8d\xe3\xd79\xdbA\x12\x1e\xac\xf7y\xaa\xda=\xe8v!\a\x89\x01\x84\x93\xfb\xa6N\xf0\x00\x00\u07d4\x06\xf7\u070d\x1b\x94b\xce\xf6\xfe\xb13h\xa7\xe3\x97K\t\u007f\x9f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\a\x01\xf9\xf1G\xecHhV\xf5\xe1\xb7\x1d\xe9\xf1\x17\xe9\x9e!\x05\x89\te\xdaq\u007f\u0578\x00\x00\u07d4\a\r]6L\xb7\xbb\xf8\"\xfc,\xa9\x1a5\xbd\xd4A\xb2\x15\u0549lk\x93[\x8b\xbd@\x00\x00\xe0\x94\a\x1d\xd9\r\x14\xd4\x1fO\xf7\xc4\x13\xc2B8\xd35\x9c\xd6\x1a\a\x8a\a\xb5?y\xe8\x88\xda\xc0\x00\x00\u07d4\a&\xc4.\x00\xf4T\x04\x83n\xb1\xe2\x80\xd0s\xe7\x05\x96\x87\xf5\x89X\x00>?\xb9G\xa3\x80\x00\xe0\x94\a'\xbe\n*\x00! H\xb5R\x0f\xbe\xfb\x95>\xbc\x9dT\xa0\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\a)\xa8\xa4\xa5\xba#\xf5y\xd0\x02[\x1a\xd0\xf8\xa0\xd3\\\xdf\u048a\x02\r\u058a\xaf2\x89\x10\x00\x00\u07d4\a)\xb4\xb4|\t\xeb\x16\x15\x84d\u022a\u007f\xd9i\vC\x889\x89lh\xcc\u041b\x02,\x00\x00\u0794\a4\xa0\xa8\x1c\x95b\xf4\xd9\xe9\xe1\n\x85\x03\xda\x15\xdbF\xd7n\x88\xfc\x93c\x92\x80\x1c\x00\x00\xe0\x94\a\xa7\xef[G\x00\x00\xe0\x94\ap\xc6\x1b\xe7\x87r#\f\xb5\xa3\xbb$)\xa7&\x14\xa0\xb36\x8a\x01n\u0899\xb7\x13A\x80\x00\u07d4\ar><0\xe8\xb71\xeeEj)\x1e\xe0\u7630 Jw\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\as\xee\xac\xc0P\xf7G \xb4\xa1\xbdW\x89[\x1c\xce\xebI]\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\a\x80\r/\x80h\xe4H\u01daOi\xb1\xf1^\xf6\x82\xaa\xe5\xf6\x8a\x04\x1b\xad\x15^e\x12 \x00\x00\u07d4\a\xa8\xda\xde\xc1BW\x1a}S\xa4)pQxm\a,\xbaU\x89\x01;m\xa1\x13\x9b\u0680\x00\u07d4\a\xaf\x93\x8c\x127\xa2|\x900\tM\xcf$\aP$n=,\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\a\xb1\xa3\x06\xcbC\x12\xdffH,,\xaer\xd1\xe0a@\x0f\u034a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\a\xb7\xa5p3\xf8\xf1\x130\xe4f^\x18]#N\x83\xec\x14\v\x89\xea~\xe9*\f\x9a\v\x80\x00\u07d4\a\xbc,\xc8\xee\xdc\x01\x97\a\x00\xef\xc9\xc4\xfb6s^\x98\xcdq\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\a\xd4\x12\x17\xba\u0725\xe0\xe6\x03'\xd8E\xa3FO\x0f'\xf8J\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\a\xd43N\u00c5\xe8\xaaT\xee\xda\xea\xdb0\x02/\f\u07e4\xab\x89\x8e\x91\xd5 \xf2\xeby\x00\x00\u07d4\a\xda\xe6\"c\r\x1168\x193\u04adk\"\xb89\xd8!\x02\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\a\xdc+\xf8;\u01af\x19\xa8B\xff\xeaf\x1a\xf5\xb4\x1bg\xfd\xa1\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4\a\u070c\x8b\x92z\xdb\xed\xfa\x8f]c\x9bCR5\x1f/6\u0489\x11\n\xed;U0\xdb\x00\x00\u07d4\a\xdd\xd0B,\x86\xefe\xbf\f\u007f\xc3E(b\xb1\"\x8b\b\xb8\x89o\xf5\u04aa\x8f\x9f\xcf\x00\x00\u07d4\a\xe1\x16,\xea\xe3\xcf!\xa3\xf6-\x10Y\x900.0\u007fN;\x89R\xf1\x03\xed\xb6k\xa8\x00\x00\u07d4\a\xe2\xb4\xcd\xee\xd9\u0407\xb1.Um\x9ew\f\x13\xc0\x99a_\x89$=M\x18\"\x9c\xa2\x00\x00\u07d4\a\xfe\xefT\xc16\x85\b)\xba\xdcKI\xc3\xf2\xa7<\x89\xfb\x9e\x89\x06hZ\xc1\xbf\xe3,\x00\x00\u07d4\b\x05FP\x8a=&\x82\u0239\x88O\x13c{\x88G\xb4M\xb3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\b\t\bv\xba\xad\xfe\xe6\\=6;\xa5S\x12t\x8c\xfa\x87=\x89\\*\x997\x1c\xff\xe1\x00\x00\u07d4\b\x16o\x021?\xea\u12f0D\xe7\x87|\x80\x8bU\xb5\xbfX\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\b)\xd0\xf7\xbb|Dl\xfb\xb0\u07ad\xb29M\x9d\xb7$\x9a\x87\x89\x02,\xa3X|\xf4\xeb\x00\x00\u07d4\b0m\xe5\x19\x81\u7b21\x85hY\xb7\xc7xijki\xf9\x89\xadx\xeb\u016cb\x00\x00\x00\xe0\x94\b7S\x9b_jR*H,\xdc\u04e9\xbbpC\xaf9\xbd\u048a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\b8\xa7v\x8d\x9c*\u028b\xa2y\xad\xfe\xe4\xb1\xf4\x91\xe3&\xf1\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\bA\x16R\xc8qq6\t\xaf\x00b\xa8\xa1(\x1b\xf1\xbb\xcf\u0649K\xe4\xe7&{j\xe0\x00\x00\xe0\x94\bM\x102Tu\x9b4<\xb2\xb9\xc2\xd8\xff\x9e\x1a\xc5\xf1E\x96\x8a\x01\x9b\xff/\xf5yh\xc0\x00\x00\u07d4\bPO\x05d?\xabY\x19\xf5\xee\xa5Y%\u05e3\xed}\x80z\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\b[J\xb7]\x83b\xd9\x14C\\\xed\xee\x1d\xaa+\x1e\xe1\xa2;\x89\xd2U\xd1\x12\xe1\x03\xa0\x00\x00\u07d4\b[\xa6_\xeb\xe2>\xef\xc2\xc8\x02fj\xb1&#\x82\xcf\u0114\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\bt\x98\xc0FFh\xf3\x11P\xf4\xd3\u013c\u0765\"\x1b\xa1\x02\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\bw\uebabx\xd5\xc0\x0e\x83\xc3+-\x98\xfay\xadQH/\x89\x17\xd2-q\xdab&\x00\x00\u0794\b\x93j7\u07c5\xb3\xa1X\xca\xfd\x9d\xe0!\xf5\x817h\x13G\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\b\xa9\xa4N\x1fA\xde=\xbb\xa7\xa3c\xa3\xabA,\x12L\xd1^\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\b\xb7\xbd\u03d4MUp\x83\x8b\xe7\x04`$:\x86\x94HXX\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\b\xb8E6\xb7L\x8c\x01T=\xa8\x8b\x84\u05cb\xb9WG\xd8\"\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\b\xc2\xf26\xacJ\xdc\xd3\xfd\xa9\xfb\xc6\xe4S\"S\xf9\xda;\xec\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\b\xc8\x02\xf8wX4\x9f\xa0>k\xc2\xe2\xfd\a\x91\x19~\ua689lk\x93[\x8b\xbd@\x00\x00\u07d4\b\xc9\U0007fd89\xfd\xf8\x04\xd7i\xf8!#6\x02\x15\xaf\xf9;\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\b\xca\u0215&A\xd8\xfcRn\xc1\xabO-\xf8&\xa5\xe7q\x0f\x89\x10CV\x1a\x88)0\x00\x00\xe0\x94\b\xcc\xdaP\xe4\xb2j\x0f\xfc\x0e\xf9.\x92\x051\a\x06\xbe\xc2\u01ca\x01Iul8W\xc6\x00\x00\x00\u07d4\b\u0406M\xc3/\x9a\xcb6\xbfN\xa4G\xe8\xddg&\x90j\x15\x89lnY\xe6|xT\x00\x00\u07d4\b\xd4&\u007f\xeb\x15\u0697\x00\xf7\xcc\xc3\xc8J\x89\x18\xbf\x17\xcf\u0789a\t=|,m8\x00\x00\xe0\x94\b\xd41\x1c\x9c\x1b\xba\xf8\u007f\xab\xe1\xa1\xd0\x14c\x82\x8d]\x98\u038a\x13\x0e\xe8\xe7\x17\x90D@\x00\x00\u07d4\b\xd5N\x83\xadHj\x93L\xfa\xea\u20e3>\xfd\"|\x0e\x99\x898S\x05\x83$^\xdc\x00\x00\u07d4\b\xd9~\xad\xfc\xb7\xb0d\xe1\xcc\xd9\u0217\x9f\xbe\xe5\xe7z\x97\x19\x89\x0el]\xa8\xd6z\xc1\x80\x00\u07d4\b\xda:z\x0fE!a\u03fc\xec1\x1b\xb6\x8e\xbf\xde\xe1~\x88\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\b\xe3\x8e\xe0\xceH\xc9\xcad\\\x10\x19\xf7;SUX\x1cV\xe6\x89V\xbcu\xe2\xd61\x00\x00\x00\u07d4\b\xef?\xa4\xc4<\xcd\xc5{\"\xa4\xb9\xb23\x1a\x82\xe58\x18\xf2\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\t\td\x8c\x18\xa3\xce[\xaez\x04~\xc2\xf8h\xd2L\u0768\x1d\x89\xcf\x15&@\xc5\xc80\x00\x00\u07d4\t\f\xd6{`\xe8\x1dT\xe7\xb5\xf6\a\x8f>\x02\x1b\xa6[\x9a\x1e\x8965\u026d\xc5\u07a0\x00\x00\u07d4\t\f\xeb\xef),>\xb0\x81\xa0_\u062a\xf7\u04db\xf0{\x89\u0509\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\t\x0f\xa96{\xdaW\xd0\xd3%:\n\x8f\xf7l\xe0\xb8\xe1\x9as\x8965\u026d\xc5\u07a0\x00\x00\u07d4\t\x14n\xa3\x88Qv\xf0w\x82\xe1\xfe0\xdc\xe3\xce$\u011e\x1f\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\t!`_\x99\x16N;\xcc(\xf3\x1c\xae\xcex\x971\x82V\x1d\x89+\ai*\x90e\xa8\x00\x00\xe0\x94\t&\x1f\x9a\xcbE\x1c7\x88\x84O\f\x14Q\xa3[\xadP\x98\xe3\x8a\x01\u056d'P) `\x00\x00\xe0\x94\t'\"\x04\x92\x19K.\u069f\u013b\xe3\x8f%\u0581\xdf\xd3l\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u0794\t*\xcbbK\b\xc0U\x10\x18\x9b\xbb\xe2\x1ee$\xd6D\u032d\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\t.\x81UX@-g\xf9\rk\xfem\xa0\xb2\xff\xfa\x91EZ\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4\tP0\xe4\xb8&\x92\xdc\xf8\xb8\u0411$\x94\xb9\xb3x\xec\x93(\x89H\xa4zu\x80\x00\u07d4\t\x89\xc2\x00D\v\x87\x89\x91\xb6\x9d`\x95\xdf\xe6\x9e3\xa2.p\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4\t\x90\xe8\x1c\u05c5Y\x9e\xa26\xbd\x19f\xcfRc\x02\xc3[\x9c\x8965\u026d\xc5\u07a0\x00\x00\u07d4\t\x98\xd8'1\x15\xb5j\xf4%\xff\xc8>!\x8c\x1e\n\xfe\x89(\u01c8\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\t\xaeI\xe3\u007f\x12\x1d\xf5\xdc\x15\x8c\xfd\xe8\x06\xf1s\xa0k\f\u007f\x89\xd80\x9e&\xab\xa1\xd0\x00\x00\u07d4\t\xaf\xa7;\xc0G\xefF\xb9w\xfd\x97c\xf8r\x86\xa6\xbeh\u0189\x1b/\xb5\xe8\xf0jf\x00\x00\u07d4\t\xb4f\x86\x96\xf8j\b\x0f\x8b\xeb\xb9\x1d\xb8\xe6\xf8p\x15\x91Z\x89#\x8f\xf7\xb3O`\x01\x00\x00\xe0\x94\t\xb5\x9b\x86\x98\xa7\xfb\xd3\xd2\xf8\xc7:\x00\x89\x88\xde>@k+\x8a\bxg\x83&\xea\xc9\x00\x00\x00\xe0\x94\t\xb7\xa9\x88\xd1?\xf8\x91\x86so\x03\xfd\xf4au\xb5=\x16\xe0\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\t\xc1w\xf1\xaeD$\x11\u076c\xf1\x87\xd4m\xb9V\x14\x83`\xe7\x8a\x01\xe5.3l\xde\"\x18\x00\x00\xe0\x94\t\u020f\x91~Mj\xd4s\xfa\x12\u93a3\xc4G*^\xd6\u068a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\t\u0438\xcd\a|i\xd9\xf3-\x9c\xcaC\xb3\xc2\b\xa2\x1e\u050b\x89\b!\xd2!\xb5)\x1f\x80\x00\xe0\x94\t\xd6\xce\xfdu\xb0\u0133\xf8\xf1\u0587\xa5\"\xc9a#\xf1\xf59\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\t\xe47\xd4H\x86\x12(\xa22\xb6.\xe8\xd3ye\xa9\x04\ud70a\x04\x98\xcf@\x1d\xf8\x84.\x80\x00\u07d4\t\xee\x12\xb1\xb4+\x05\xaf\x9c\xf2\a\xd5\xfc\xac%[.\xc4\x11\xf2\x89\x031\xcd\xddG\xe0\xfe\x80\x00\u07d4\t\xf3\xf6\x01\xf6\x05D\x11@Xl\xe0eo\xa2J\xa5\xb1\u066e\x89Sswo\xe8\xc4T\x00\x00\u07d4\t\xf9W[\xe5}\x00G\x93\u01e4\ub137\x15\x87\xf9|\xbbj\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\n\x06P\x86\x1fx^\xd8\xe4\xbf\x10\x05\xc4P\xbb\xd0n\xb4\x8f\xb6\x89\xa6A;y\x14N~\x00\x00\u07d4\n\x06\xfa\xd7\xdc\u05e4\x92\xcb\xc0S\xee\xab\xdei4\xb3\x9d\x867\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\n\a}\xb1?\xfe\xb0\x94\x84\xc2\x17p\x9dX\x86\xb8\xbf\x9cZ\x8b\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\n\x0e\u0366cow\x16\xef\x19saF\x87\xfd\x89\xa8 \xa7\x06\x89\x15[\xd90\u007f\x9f\xe8\x00\x00\u07d4\n)\xa8\xa4\xd5\xfd\x95\x00u\xff\xb3Mw\xaf\xeb-\x82;\u0589\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\n*\u0795\xb2\xe8\xc6m\x8a\xe6\xf0\xbad\xcaW\u05c3\xbemD\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\n+O\xc5\xd8\x1a\xceg\xdcK\xba\x03\xf7\xb4UA=F\xfe=\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\n-\xcbzg\x17\x01\u06f8\xf4\x95r\x80\x88&Xs5l\x8e\x89\b?\x16\xce\b\xa0l\x00\x00\u07d4\n=\xe1U\xd5\xec\xd8\xe8\x1c\x1f\xf9\xbb\xf07\x83\x01\xf8\xd4\xc6#\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\nG\xad\x90Y\xa2I\xfc\x93k&b5=\xa6\x90_u\u00b9\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\nH)ov1p\x8c\x95\u04b7Iu\xbcJ\xb8\x8a\xc19*\x8a\x01\x0f\f\xf0d\xddY \x00\x00\xe0\x94\nJ\x01\x19\x95\u0181\xbc\x99\x9f\xddyuN\x9a2J\xe3\xb3y\x8a\b\xc1\x9a\xb0n\xb8\x9a\xf6\x00\x00\u07d4\nX\xfd\xddq\x89\x8d\xe7s\xa7O\xda\xe4^{\xd8N\xf46F\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\n[y\xd8\xf2;d\x83\xdb\u2f6ab\xb1\x06L\xc7cf\xae\x89j\u0202\x10\tR\u01c0\x00\u07d4\ne.*\x8bw\xbd\x97\xa7\x90\xd0\xe9\x13a\u0248\x90\u06f0N\x8965\u026d\xc5\u07a0\x00\x00\u07d4\nn\xber;n\xd1\xf9\xa8ji\xdd\xdah\xdcGF\\+\x1b\x89@=-\xb5\x99\xd5\xe4\x00\x00\u07d4\nw\xe7\xf7+C{WO\x00\x12\x8b!\xf2\xac&Q3R\x8c\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\n\x91\u007f;\\\xb0\xb8\x83\x04\u007f\u0676Y=\xbc\xd5W\xf4S\xb9\x8965\u026d\xc5\u07a0\x00\x00\u07d4\n\x93\x1bD\x9e\xa8\xf1,\xdb\xd5\xe2\xc8\xccv\xba\xd2\xc2|\x069\x89\x01?\x9e\x8cy\xfe\x05\x80\x00\u0794\n\x98\x04\x13x\x03\xbahh\xd9:U\xf9\x98_\xcdT\x04Q\u4239\x8b\xc8)\xa6\xf9\x00\x00\u07d4\n\x9a\xb2c\x8b\x1c\xfdeM%\u06b0\x18\xa0\xae\xbd\u07c5\xfdU\x89\x01.\x8c\xb5\xfeLJ\x80\x00\u07d4\n\xb3f\xe6\xe7\u056b\xbc\xe6\xb4JC\x8di\xa1\u02bb\x90\xd13\x89\x11X\xe4`\x91=\x00\x00\x00\u07d4\n\xb4(\x1e\xbb1\x85\x90\xab\xb8\x9a\x81\xdf\a\xfa:\xf9\x04%\x8a\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u0794\n\xb5\x9d9\a\x02\xc9\xc0Y\xdb\x14\x8e\xb4\xf3\xfc\xfa}\x04\xc7\xe7\x88\xfc\x93c\x92\x80\x1c\x00\x00\xe0\x94\n\xbf\xb3\x9b\x11HmyW(f\x19[\xa2lc\vg\x84\u06ca\x19\xba\x877\xf9i(\xf0\x00\x00\u07d4\n\u029aV&\x91;\b\xcf\u0266m@P\x8d\xceR\xb6\x0f\x87\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4\n\xd3\xe4M<\x00\x1f\xa2\x90\xb3\x93ap0TA\b\xacn\xb9\x89j\xbd\xa0\xbc0\xb2\u07c0\x00\u07d4\n\xec.Bn\xd6\xcc\f\xf3\xc2I\xc1\x89~\xacG\xa7\xfa\xa9\xbd\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\n\xf6_\x14xNU\xa6\xf9Vg\xfds%*\x1c\x94\a-*\x89\nv;\x8e\x02\xd4O\x80\x00\u07d4\n\xf6\xc8\xd59\xc9mP%\x9e\x1b\xa6q\x9e\x9c\x80`\xf3\x88\u008965\u026d\xc5\u07a0\x00\x00\u07d4\v\x069\x0f$7\xb2\x0e\u0123\xd3C\x1b2y\xc6X>^\u05c9\n\x84Jt$\xd9\xc8\x00\x00\u07d4\v\v8b\x11*\xee\u00e04\x92\xb1\xb0_D\x0e\xcaT%n\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\v\x0e\x05[(\xcb\xd0=\xc5\xffD\xaad\xf3\xdc\xe0O^c\xfb\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\v\x11\x9d\xf9\x9ck\x8d\xe5\x8a\x1e,?)zgD\xbfU\"w\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\v\x14\x89\x19\x99\xa6\\\x9e\xf73\b\xef\xe3\x10\f\xa1\xb2\x0e\x81\x92\x89+^:\xf1k\x18\x80\x00\x00\u07d4\v!\x13PE4d*\x1d\xaf\x10.\xee\x10\xb9\xeb\xdev\xe2a\x89\x94,\xdd|\x95\xf2\xbd\x80\x00\xe0\x94\v(\x8aZ\x8bu\xf3\xdcA\x91\xeb\x04W\xe1\xc8=\xbd M%\x8a\x01\a\x14\xe7{\xb4:\xb4\x00\x00\u07d4\v6\x9e\x00.\x1bLy\x13\xfc\xf0\x0f-^\x19\u0141eG\x8f\x89\x03\u007fe\x16(\x8c4\x00\x00\u07d4\vC\xbd#\x91\x02U\x81\u0615l\xe4*\a%y\u02ff\xcb\x14\x89\x01\x04\xe7\x04d\xb1X\x00\x00\u07d4\vP|\xf5SV\x8d\xaa\xf6U\x04\xaeN\xaa\x17\xa8\xea<\xdb\xf5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\v]f\xb1<\x87\xb3\x92\xe9M\x91\xd5\xf7l\rE\nU(C\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\v^ \x11\xeb\xc2Z\x00\u007f!6)`I\x8a\xfb\x8a\xf2\x80\xfb\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\vd\x9d\xa3\xb9j\x10,\xdcm\xb6R\xa0\xc0}e\xb1\xe4C\xe6\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\vi \xa6K6;\x8d]\x90\x80$\x94\xcfVKT|C\r\x89A\rXj \xa4\xc0\x00\x00\u07d4\vp\x11\x01\xa4\x10\x9f\x9c\xb3`\xdcW\xb7tBg=^Y\x83\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\vq\xf5T\x12$i\uf5ce/\x1f\xef\xd7\u02f4\x10\x98'r\x89\xd2U\xd1\x12\xe1\x03\xa0\x00\x00\xe0\x94\v{\xb3B\xf0\x1b\u0248\x8ej\x9a\xf4\xa8\x87\xcb\xf4\xc2\xdd,\xaf\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\u07d4\v}3\x93q\xe5\xbeg'\xe6\xe31\xb5\x82\x1f\xa2K\u06ddZ\x89.\u007f\x81\x86\x82b\x01\x00\x00\u07d4\v\u007f\xc9\xdd\xf7\x05v\xf63\x06i\xea\xaaq\xb6\xa81\xe9\x95(\x89\a\x96\xe3\xea?\x8a\xb0\x00\x00\u07d4\v\x80\xfcp(,\xbd\xd5\xfd\xe3[\xf7\x89\x84\xdb;\xdb\x12\x01\x88\x8968\x02\x1c\xec\u06b0\x00\x00\u07d4\v\x92M\xf0\a\xe9\xc0\x87\x84\x17\xcf\xe6;\x97n\xa1\xa3\x82\xa8\x97\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\v\x93\xfc\xa4\xa4\xf0\x9c\xac \xdb`\xe0e\xed\xcc\xcc\x11\u0976\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\v\x9d\xf8\x0f\xbe# \t\xda\xcf\n\xa8\xca\u0153v\xe2Gb\x03\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\v\xa6\xe4j\xf2Z\x13\xf5qi%Z4\xa4\xda\xc7\xce\x12\xbe\x04\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\v\xa8p[\xf5\\\xf2\x19\xc0\x95k^?\xc0\x1cDt\xa6\xcd\xc1\x89\x05%\xe0Y]Mk\x80\x00\u07d4\v\xafn\u0379\x1a\xcb6\x06\xa85|\v\xc4\xf4\\\xfd-~o\x8965\u026d\xc5\u07a0\x00\x00\u07d4\v\xb0_r$\xbbX\x04\x85eV\xc0~\xea\xdb\ud1fa\x8f|\x89\x15\xbeat\xe1\x91.\x00\x00\u07d4\v\xb0\xc1&\x82\xa2\xf1\\\x9bWA\xb28\\\xbeA\xf04\x06\x8e\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4\v\xb2\\\xa7\u0448\xe7\x1eMi={\x17\a\x17\xd6\xf8\xf0\xa7\n\x89\x12C\x02\xa8/\xad\xd7\x00\x00\u07d4\v\xb2e\x0e\xa0\x1a\xcau[\xc0\xc0\x17\xb6K\x1a\xb5\xa6m\x82\xe3\x89Hz\x9a0E9D\x00\x00\u07d4\v\xb5Lr\xfdf\x10\xbf\xa463\x97\xe0 8K\x02+\fI\x89Hz\x9a0E9D\x00\x00\u07d4\v\xb7\x16\n\xba)7b\xf8sO>\x03&\xff\u0264\xca\xc1\x90\x8965\u026d\xc5\u07a0\x00\x00\u07d4\v\xc9\\\xb3-\xbbWL\x83/\xa8\x17J\x815m8\xbc\x92\xac\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\v\xd6}\xbd\xe0z\x85n\xbd\x89;^\xdcO:[\xe4 &\x16\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\v\xdb\xc5L\u023d\xbb\xb4\x02\xa0\x89\x11\xe2#*T`\u0386k\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\v\xddX\xb9n|\x91m\xd2\xfb05o*\xeb\xfa\xaf\x1d\x860\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\v\u1f39\x03C\xfa\xe501s\xf4a\xbd\x91JH9\x05l\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\v\xe1\xfd\xf6&\xeea\x89\x10-p\xd1;1\x01,\x95\xcd\x1c\u0589lk\x93[\x8b\xbd@\x00\x00\u07d4\v\xe2\xb9J\xd9P\xa2\xa6&@\xc3[\xfc\xcdlg\xda\xe4P\xf6\x89i*\xe8\x89p\x81\xd0\x00\x00\xe0\x94\v\u681eC\a\xfeH\xd4\x12\xb8\u0461\xa8(M\xceHba\x8a\x04\x0f\xbf\xf8\\\x0180\x00\x00\u07d4\v\xef\xb5G\a\xf6\x1b,\x9f\xb0G\x15\xab\x02n\x1b\xb7 B\xbd\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\v\xf0dB\x8f\x83bg\"\xa7\xb5\xb2j\x9a\xb2\x04!\xa7r>\x89\a?u\u0460\x85\xba\x00\x00\u07d4\v\xfb\xb6\x92]\xc7^R\xcf&\x84\"K\xbe\x05P\xfe\xa6\x85\u04c9j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\f\b\x80\x06\xc6K0\xc4\u076f\xbc6\xcb_\x05F\x9e\xb6(4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\f s\xbaD\xd3\u077d\xb69\xc0N\x19\x109\xa7\x17\x16#\u007f\x89M\x85<\x8f\x89\b\x98\x00\x00\xe0\x94\f\",|A\u0270H\xef\xcc\xe0\xa22CCb\xe1-g;\x8a\x02\x1e\x83Yivw8\x00\x00\xe0\x94\f(\b\xb9Q\ud787-{2y\x0f\xccY\x94\xaeA\xff\u070a\x15\x99n[<\u05b3\xc0\x00\x00\u07d4\f(\x84~O\t\xdf\xce_\x9b%\xaf|NS\x0fY\u0200\xfe\x8965\u026d\xc5\u07a0\x00\x00\u07d4\f-\\\x92\x058\xe9S\u02af$\xf0s\u007fUL\u0192wB\x8965\u026d\xc5\u07a0\x00\x00\u07d4\f0\xca\xcc?r&\x9f\x8bO\x04\xcf\a=+\x05\xa8=\x9a\u0449lyt\x12?d\xa4\x00\x00\u07d4\f29\xe2\xe8A$-\xb9\x89\xa6\x15\x18\xc2\"G\xe8\xc5R\b\x89\x0eJ\xf6G\x174d\x00\x00\xe0\x94\fH\r\xe9\xf7F\x10\x02\x90\x8bI\xf6\x0f\xc6\x1e+b\xd3\x14\v\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\fH\xaeb\xd1S\x97\x88\xeb\xa0\x13\xd7^\xa6\vd\xee\xbaN\x80\x89w\xfb\xdcC\xe00\x99\x80\x00\u07d4\fU\x89\xa7\xa8\x9b\x9a\xd1[\x02u\x190AYH\xa8u\xfb\xef\x89\x06\u0519\xeclc8\x00\x00\u07d4\fg\x03=\xd8\xee\u007f\f\x8a\xe54\xd4*Q\xf7\xd9\xd4\xf7\x97\x8f\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\fhE\xbfA\xd5\xee'<>\u6d70\u059fo\xd5\xea\xbb\xf7\x89\xa2\xa1\xb9h.X\t\x00\x00\xe0\x94\f\u007f\x86\x9f\x8e\x90\xd5?\xdc\x03\u8c81\x9b\x01k\x9d\x18\xeb&\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\f\x86\x92\xee\xff*S\xd6\xd1h\x8e\xd5j\x9d\u06fdh\u06bb\xa1\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\f\x8ff\xc6\x01{\xce[ 4r\x04\xb6\x02\xb7C\xba\u05cd`\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\f\x8f\xd7w^T\xa6\xd9\u0263\xbf\x89\x0ev\x1fewi?\xf0\x8a\x02\x15\xf85\xbcv\x9d\xa8\x00\x00\u07d4\f\x92Z\xd5\xeb5,\x8e\xf7m\f\"-\x11[\a\x91\xb9b\xa1\x89\xacc]\u007f\xa3N0\x00\x00\u07d4\f\x96~0a\xb8zu>\x84P~\xb6\t\x86x,\x8f0\x13\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\f\xa1*\xb0\xb9fl\xf0\xce\xc6g\x1a\x15)/&SGj\xb2\x8a,x'\xc4-\"\xd0|\x00\x00\u07d4\f\xa6p\xeb,\x8b\x96\u02e3y!\u007fY)\u00b8\x92\xf3\x9e\xf6\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\f\xae\x10\x8em\xb9\x9b\x9ecxv\xb0d\xc60>\u068ae\u0209\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\f\xbd\x92\x1d\xbe\x12\x15c\xb9\x8ahq\xfe\xcb\x14\xf1\xcc~\x88\u05c9\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\f\xbf\x87p\xf0\xd1\b.\\ \u016e\xad4\xe5\xfc\xa9\xaez\xe2\x8965\u026d\xc5\u07a0\x00\x00\u07d4\f\xc6\u007f\x82s\xe1\xba\xe0\x86\u007f\xd4.\x8b\x81\x93\xd7&y\xdb\xf8\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\f\u05a1A\x91\x8d\x12k\x10m\x9f.\xbfi\xe1\x02\xdeM2w\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\f\xda\x12\xbfr\xd4a\xbb\xc4y\xeb\x92\xe6I\x1d\x05~kZ\u044a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\f\u0716\v\x99\x8c\x14\x19\x98\x16\r\xc1y\xb3l\x15\u0484p\xed\x89\x1b\x1bk\u05efd\xc7\x00\x00\xe0\x94\f\xfb\x17#5\xb1l\x87\xd5\x19\xcd\x14uS\r W\u007f^\x0e\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94\r\x1f*Wq>\xbcn\x94\xde)\x84n\x88D\xd3vfWc\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\r2e\xd3\u7f79=^\x8e\x8b\x1c\xa4\u007f!\ny>\u030e\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\r5@\x8f\"ef\x11o\xb8\xac\u06a9\xe2\xc9\u055bvh?\x892\xf5\x1e\u06ea\xa30\x00\x00\u07d4\rU\x1e\xc1\xa2\x13<\x98\x1d_\u01a8\xc8\x17?\x9e|OG\xaf\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\r]\x98V\\d|\xa5\xf1w\xa2\xad\xb9\xd3\x02/\xac(\u007f!\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\re\x80\x14\xa1\x99\x06\x1c\xf6\xb3\x943\x14\x03\x03\xc2\x0f\xfdNZ\x8a\x01\xbc\x85\xdc*\x89\xbb \x00\x00\u07d4\rg\x87\x06\xd07\x18\u007f>\"\xe6\xf6\x9b\x99\xa5\x92\xd1\x1e\xbcY\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4\ri\x10\f9\\\xe6\xc5\xea\xad\xf9]\x05\xd8r\x83~\xde\xdd!\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94\rt~\u559b\xf7\x9dW8\x1do\xe3\xa2@l\xd0\xd8\xce'\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\r\x80#\x92\x9d\x91r4\xae@Q+\x1a\xab\xb5\xe8\xa4Q'q\x89\b\x05\xe9\x9f\xdc\xc5\xd0\x00\x00\xe0\x94\r\x8a\xab\x8ft\xea\x86,\xdfvh\x05\x00\x9d?>B\xd8\xd0\v\x8a\x01;\x80\xb9\x9cQ\x85p\x00\x00\u07d4\r\x8c@\xa7\x9e\x18\x99O\xf9\x9e\xc2Q\xee\x10\u0408\u00d1.\x80\x89\x066d\xfc\u04bb\xc4\x00\x00\u07d4\r\x8e\xd7\xd0\xd1V83\x0e\xd7\xe4\xea\u032b\x8aE\x8dus~\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\r\x92X/\u06e0^\xab\xc3\xe5\x158\xc5m\xb8\x817\x85\xb3(\x89\nZ\xa8P\t\xe3\x9c\x00\x00\u07d4\r\x94C\xa7\x94h\xa5\xbb\xf7\xc1\xe5\xb9\x15\xb3d\x87\xf9\x16\x1f\x19\x84m\x10\x1431\x8a\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4\r\xbdA|7+\x8b\r\x01\xbc\xd9Dpk\xd3.`\xae(\u0449\x12nr\xa6\x9aP\xd0\x00\x00\u07d4\r\xc1\x00\xb1\a\x01\x1c\u007f\xc0\xa13\x96\x12\xa1l\xce\xc3(R\b\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\r\u03dd\x8c\x98\x04E\x9fd|\x14\x13\x8e\xd5\x0f\xadV;AT\x89\t`\xdbwh\x1e\x94\x00\x00\u07d4\r\xcf\xe87\xea\x1c\xf2\x8ce\xfc\xce\u00fe\xf1\xf8NY\xd1P\xc0\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\r\xd4\xe6t\xbb\xad\xb1\xb0\u0702D\x98q=\xce;QV\xda)\x89\t79SM(h\x00\x00\u07d4\r\xfb\u0501pP\xd9\x1d\x9db\\\x02\x05<\xf6\x1a>\xe2\x85r\x89\x12nr\xa6\x9aP\xd0\x00\x00\u07d4\x0e\x02N\u007f\x02\x9cj\xaf:\x8b\x91\x0f^\b\bs\xb8W\x95\xaa\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x0e\tdl\x99\xafC\x8e\x99\xfa'L\xb2\xf9\xc8V\xcbe\xf76\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4\x0e\f\x9d\x00^\xa0\x16\u0095\xcdy\\\xc9!>\x87\xfe\xbc3\xeb\x89\n\xbb\xcdN\xf3wX\x00\x00\u07d4\x0e\rf3\xdb\x1e\f\u007f#Jm\xf1c\xa1\x0e\n\xb3\x9c \x0f\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x0e\x11\xd7z\x89w\xfa\xc3\r&\x84E\xe51\x14\x9b1T\x1a$\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x0e\x12=}\xa6\xd1\xe6\xfa\xc2\u072d\xd2p)$\v\xb3\x90R\xfe\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x0e\x18\x01\xe7\vbb\x86\x1b\x114\u033c9\x1fV\x8a\xfc\x92\xf7\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x0e \x94\xac\x16T\xa4k\xa1\xc4\u04e4\v\xb8\xc1}\xa7\U000d6209\x13h?\u007f<\x15\xd8\x00\x00\u07d4\x0e!\xaf\x1b\x8d\xbf'\xfc\xf6?7\xe0G\xb8z\x82\\\xbe|'\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\x0e.PJ-\x11\"\xb5\xa9\xfe\xee\\\xb1E\x1b\xf4\u00ac\xe8{\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4\x0e/\x8e(\xa6\x81\xf7|X;\xd0\xec\xde\x16cK\xdd~\x00\u0349\x05'8\xf6Y\xbc\xa2\x00\x00\u07d4\x0e2\x02\x19\x83\x8e\x85\x9b/\x9f\x18\xb7.=@s\xcaP\xb3}\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x0e3\xfc\xbb\xc0\x03Q\v\xe3W\x85\xb5*\x9c]!k\xc0\x05\xf4\x89e\xea=\xb7UF`\x00\x00\u07d4\x0e6\x96\xcf\x1fB\x17\xb1c\u047c\x12\xa5\xeas\x0f\x1c2\xa1J\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x0e9\x0fD\x05=\xdf\xce\xf0\xd6\b\xb3^M\x9c,\xbe\x98q\xbb\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x0e:(\xc1\u07ef\xb0P[\xdc\xe1\x9f\xe0%\xf5\x06\xa6\xd0\x1c\xeb\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x0e=\xd7\xd4\xe4)\xfe90\xa6A@5\xf5+\xdcY\x9dxM\x89\x02,\xa3X|\xf4\xeb\x00\x00\u07d4\x0eGey\x03Rek\xc6Vh,$\xfc^\xf3\xe7j#\u01c9\x02\x86\xd7\xfc\f\xb4\xf5\x00\x00\u07d4\x0eI\x88\x00Dqw\xb8\u022f\xc3\xfd\xfa\u007fi\xf4\x05\x1b\xb6)\x89t\x05\xb6\x9b\x8d\xe5a\x00\x00\u07d4\x0ek\xaa\xa3\u07b9\x89\xf2\x89b\x00vf\x86\x18\xe9\xac3(e\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x0el\xd6d\xad\x9c\x1e\xd6K\xf9\x87I\xf4\x06D\xb6&\xe3y,\x8a\f\xb4\x9bD\xba`-\x80\x00\x00\xe0\x94\x0em\xfdU;.\x87=*\xec\x15\xbd_\xbb?\x84r\xd8\u04d4\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4\x0en\xc3\x137bq\xdf\xf5T#\xabT\"\xcc:\x8b\x06\xb2+\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\x0en\u0399\x11\x1c\xad\x19a\xc7H\xed=\xf5\x1e\xddi\u04a3\xb1\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\x0e\x83\xb8PH\x1a\xb4MI\xe0\xa2)\xa2\xe4d\x90,iS\x9b\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x0e\x89\xed\xdd?\xa0\xd7\x1d\x8a\xb0\xff\x8d\xa5X\x06\x86\xe3\xd4\xf7O\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x0e\x90\x96\xd3C\xc0`\xdbX\x1a\x12\x01\x12\xb2x`~\xc6\xe5+\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\x0e\x9cQ\x18d\xa1w\xf4\x9b\xe7\x82\x02w?`H\x9f\xe0NR\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\x0e\xa2\xa2\x101+>\x86~\xe0\xd1\xcch,\xe1\xd6f\xf1\x8e\u054a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x0e\xb1\x89\xef,-Wb\xa9c\u05b7\xbd\xf9i\x8e\xa8\u7d0a\x89Hz\x9a0E9D\x00\x00\xe0\x94\x0e\xb5\xb6b\xa1\xc7\x18`\x8f\xd5/\f%\xf97\x880\x17\x85\x19\x8a\x01J7(\x1aa.t\x00\x00\xe0\x94\x0e\xc4f\x96\xff\xac\x1fX\x00_\xa8C\x98$\xf0\x8e\xed\x1d\xf8\x9b\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\x0e\xc5\n\xa8#\xf4e\xb9FK\v\xc0\u0125w$\xa5U\xf5\u058a\f\x83\xd1Bj\u01f1\xf0\x00\x00\u07d4\x0e\xc50\x8b1(.!\x8f\xc9\xe7Y\xd4\xfe\xc5\xdb7\b\xce\u01096C\xaady\x86\x04\x00\x00\u07d4\x0e\xcc\xf6\x17\x84O\xd6\x1f\xbab\xcb\x0eD[z\u018b\xcc\x1f\xbe\x89\x14\xfeO\xe65e\xc6\x00\x00\u07d4\x0e\u04fb:N\xb5T\xcf\u0297\x94}WU\a\xcd\xfdm!\u0609\x1d\xb3 _\xcc#\u0540\x00\u07d4\x0e\xd7l,;]P\xff\x8f\xb5\v>\xea\xcdh\x15\x90\xbe\x1c-\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x0e\u0680\xf4\xed\aJ\xeaiz\xed\xdf(;c\xdb\xca=\xc4\u0689lk\x93[\x8b\xbd@\x00\x00\u07d4\x0e\xddKX\x0f\xf1\x0f\xe0lJ\x03\x11b9\xef\x96b+\xae5\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\x0e\xe3\x91\xf0^\u038a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x0f\x92\x9c\xf8\x95\xdb\x01z\xf7\x9f>\xad\"\x16\xb1\xbdi\xc3}\u01c9lk\x93[\x8b\xbd@\x00\x00\u07d4\x0f\xa0\x10\xce\fs\x1d;b\x8e6\xb9\x1fW\x13\x00\u477e\xab\x8963\x03\"\xd5#\x8c\x00\x00\u07d4\x0f\xa5\xd8\u0173\xf2\x94\xef\u0515\xabi\xd7h\xf8\x18rP\x85H\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x0f\xa6\u01f0\x97=\v\xae)@T\x0e$}6'\xe3|\xa3G\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x0f\xad\x05P|\u070f$\xb2\xbeL\xb7\xfa]\x92}\u06d1\x1b\x88\x89\xa2\xdf\x13\xf4A\xf0\t\x80\x00\u07d4\x0f\xb5\xd2\xc6s\xbf\xb1\xdd\xca\x14\x1b\x98\x94\xfdm?\x05\xdag \x89\x05k\xc7^-c\x10\x00\x00\u07d4\x0f\u0260\xe3AE\xfb\xfd\xd2\xc9\u04a4\x99\xb6\x17\u05e0)i\xb9\x89\t\xc2\x00vQ\xb2P\x00\x00\xe0\x94\x0f\xcf\xc4\x06P\b\xcf\xd3#0_b\x86\xb5zM\xd7\xee\xe2;\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\x0f\xdde@#\x95\u07db\u045f\xeeE\a\xefSE\xf7E\x10L\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\x0f\xecN\xe0\xd7\xca\x18\x02\x90\xb6\xbd \xf9\x99#B\xf6\x0f\xf6\x8d\x89\x12 \u007f\x0e\xdc\xe9q\x80\x00\u07d4\x0f\ue06c3\x1e\xfd\x8f\x81\x16\x1cW8+\xb4P{\xb9\xeb\xec\x89\x15\xaf\x88\r\x8c\u06c3\x00\x00\u07d4\x0f\xfe\xa0mq\x13\xfbj\xec(i\xf4\xa9\u07f0\x90\a\xfa\xce\xf4\x89\f8F\x81\xb1\xe1t\x00\x00\u07d4\x10\tq\x98\xb4\xe7\xee\x91\xff\x82\xcc/;\xd9_\xeds\xc5@\xc0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x10\vM\tw\xfc\xba\xd4\u07bd^d\xa0Iz\xea\xe5\x16\x8f\xab\x89\x11\f\x90s\xb5$Z\x00\x00\xe0\x94\x10\x1a\nd\xf9\xaf\xccD\x8a\x8a\x13\rM\xfc\xbe\xe8\x957\xd8T\x8a\x037\xfe_\xea\xf2\u0440\x00\x00\u07d4\x10,G}i\xaa\u06e9\xa0\xb0\xf6+tY\xe1\u007f\xbb\x1c\x15a\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x101\xe0\xec\xb5I\x85\xae!\xaf\x17\x93\x95\r\xc8\x11\x88\x8f\xde|\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x104d\x14\xbe\xc6\xd3\xdc\xc4NP\xe5MT\u00b8\xc3sN>\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x108\x98X\xb8\x00\xe8\xc0\xec2\xf5\x1e\xd6\x1a5YF\xcc@\x9b\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x10Y\xcb\xc6>6\xc4>\x88\xf3\x00\b\xac\xa7\xce\x05\x8e\ua816\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94\x10n\xd5\xc7\x19\xb5&\x14w\x89\x04%\xaeuQ\xdcY\xbd%\\\x8a\x02\x89jX\xc9[\xe5\x88\x00\x00\u07d4\x10q\x1c=\xda21x\x85\xf0\xa2\xfd\x8a\xe9.\x82\x06\x9b\r\v\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x10sy\xd4\xc4gFO#[\xc1\x8eU\x93\x8a\xad>h\x8a\u05c9\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\xe0\x94\x10v!-Ou\x8c\x8e\xc7\x12\x1c\x1c}t%I&E\x92\x84\x8a\ai[Y\xb5\xc1{L\x00\x00\u07d4\x10x\xd7\xf6\x1b\x0eV\xc7N\xe6c[.\x18\x19\xef\x1e=\x87\x85\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x10z\x03\xcf\bB\xdb\u07b0a\x8f\xb5\x87\xcai\x18\x9e\xc9/\xf5\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x10\x80\xc1\xd85\x8a\x15\xbc\x84\xda\xc8%\x89\u0392\xb9\x81\x89t\xc1\xfa\xb8\xad\xb4T\x00\x00\u07d4\x10\xe1\xe37x\x85\xc4-}\xf2\x18R.\xe7vh\x87\xc0^j\x89\x10C\xc4<\xde\x1d9\x80\x00\u07d4\x10\u342d+\xa3=\x82\xb3s\x88\u041cED\u01b0\"]\xe5\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x10\xf4\xbf\xf0\u02a5\x02|\nj-\xcf\xc9R\x82M\xe2\x94\t\t\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x11\x00\x1b\x89\xed\x87>:\xae\xc1\x15V4\xb4h\x16C\x98c#\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x11\x027\u03d1\x17\xe7g\x92/\u0121\xb7\x8dyd\u0682\xdf \x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4\x11\x11\xe5\xdb\xf4^o\x90mb\x86o\x17\b\x10\x17\x88\xdd\xd5q\x89F{\xe6S>\xc2\xe4\x00\x00\xe0\x94\x11\x17+'\x8d\xddD\xee\xa2\xfd\xf4\xcb\x1d\x16\x96#\x91\xc4S\u064a\xc6/=\x9b\xfdH\x95\xf0\x00\x00\u07d4\x11&4\xb4\xec0\xffxn\x02AY\xf7\x96\xa5y9\xea\x14N\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\x110l}WX\x867x\x0f\xc9\xfd\xe8\xe9\x8e\xcb\x00\x8f\x01d\x89lj\xccg\u05f1\xd4\x00\x00\xe0\x94\x116\x12\xbc;\xa0\xeeH\x98\xb4\x9d\xd2\x023\x90_/E\x8fb\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4\x11A_\xaba\xe0\xdf\u0539\x06v\x14\x1aUz\x86\x9b\xa0\xbd\xe9\x89o\x05\xb5\x9d; \x00\x00\x00\u07d4\x11L\xbb\xbfo\xb5*\xc4\x14\xbe~\xc6\x1f{\xb7\x14\x95\xce\x1d\xfa\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\x11L\xfe\xfeP\x17\r\xd9z\xe0\x8f\nDTIx\u0159T\x8d\x89.\u0207\xe7\xa1J\x1c\x00\x00\u07d4\x11a\b\xc1 \x84a.\xed\xa7\xa9=\xdc\xf8\xd2`.'\x9e\\\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x11d\u02aa\x8c\u0157z\xfe\x1f\xad\x8a}`(\xce-W)\x9b\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x11gZ%UF\a\xa3\xb6\xc9*\x9e\xe8\xf3ou\xed\xd3\xe36\x89\b\xa9\xab\xa5W\xe3l\x00\x00\u07d4\x11j\t\xdff\xcb\x15\x0e\x97W\x8e)\u007f\xb0n\x13\x04\f\x89<\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x11o\xef^`\x16B\xc9\x18\u02c9\x16\x0f\xc2);\xa7\x1d\xa96\x89+|\xc2\xe9\xc3\"\\\x00\x00\u07d4\x11xP\x1f\xf9J\xdd\x1cX\x81\xfe\x88a6\xf6\xdf\xdb\xe6\x1a\x94\x89\b\x90\xb0\xc2\xe1O\xb8\x00\x00\u07d4\x11y\xc6\r\xbd\x06\x8b\x15\v\aM\xa4\xbe#\x03; \u0185X\x89$\xdc\xe5M4\xa1\xa0\x00\x00\u07d4\x11}\x9a\xa3\xc4\xd1;\xee\x12\xc7P\x0f\t\xf5\xdd\x1cf\xc4e\x04\x89\v*\xd3\x04\x90\xb2x\x00\x00\xe0\x94\x11}\xb867\u007f\xe1TU\xe0,.\xbd\xa4\v\x1c\xebU\x1b\x19\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\x11\x8c\x18\xb2\xdc\xe1p\xe8\xf4Eu;\xa5\xd7Q<\xb7cm-\x8a\x01\xdd\f\x88_\x9a\r\x80\x00\x00\u07d4\x11\x8f\xbdu;\x97\x929Z\xefzMx\xd2c\xcd\u02ab\xd4\xf7\x8963\x03\"\xd5#\x8c\x00\x00\xe0\x94\x11\x92\x83x\xd2}U\xc5 \xce\xed\xf2L\xeb\x1e\x82-\x89\r\xf0\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\x11\x9a\xa6M[}\x18\x1d\xae\x9d<\xb4I\x95\\\x89\xc1\xf9c\xfa\x89%\xf2s\x93=\xb5p\x00\x00\xe0\x94\x11\xc05\x8a\xa6G\x9d\xe2\x18f\xfe!\a\x19$\xb6^p\xf8\xb9\x8a\a\xb5?y\xe8\x88\xda\xc0\x00\x00\xe0\x94\x11\xd2$z\"\x1ep\xc2\xd6m\x17\xee\x13\x8d8\xc5_\xfb\x86@\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\x11\u05c4JG\x1e\xf8\x9a\x8d\x87uUX<\xee\xbd\x149\xea&\x8a\x02#i\u6e80\u0188\x00\x00\u07d4\x11\xdda\x85\u0668\xd7=\xdf\u06a7\x1e\x9bwtC\x1cM\xfe\u008965\u026d\xc5\u07a0\x00\x00\u07d4\x11\xe7\x99~\u0750E\x03\xd7}\xa6\x03\x8a\xb0\xa4\xc84\xbb\xd5c\x89\x15\b\x94\xe8I\xb3\x90\x00\x00\u07d4\x11\xec\x00\xf8I\xb61\x9c\xf5\x1a\xa8\u074ff\xb3U)\xc0\xbew\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x11\ufe22\x04Q\x16\x1bdJ\x8c\u03bb\xc1\xd3C\xa3\xbb\xcbR\x89\xadx\xeb\u016cb\x00\x00\x00\xe0\x94\x11\xfe\xfb]\xc1\xa4Y\x8a\xa7\x12d\fQwu\u07e1\xd9\x1f\x8c\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x12\x0f\x9d\xe6\xe0\xaf~\xc0*\a\xc6\t\u0284G\xf1W\xe64L\x89\x0e~\xeb\xa3A\vt\x00\x00\u07d4\x12\x10\xf8\v\u06c2l\x17Tb\xab\a\x16\xe6\x9eF\xc2J\xd0v\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x12\x13N\u007fk\x01{\xf4\x8e\x85Z9\x9c\xa5\x8e.\x89/\xa5\u020965\u026d\xc5\u07a0\x00\x00\u07d4\x12\x170t\x98\x01S\xae\xaaK\r\xcb\xc7\x13.\xad\xce\xc2\x1bd\x89\r\x02\xabHl\xed\xc0\x00\x00\u07d4\x12\x1f\x85[p\x14\x9a\xc84s\xb9po\xb4MG\x82\x8b\x98;\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\x12'\xe1\nM\xbf\x9c\xac\xa3\x1b\x17\x80#\x9fUv\x15\xfc5\xc1\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x12-\xcf\xd8\x1a\u0779}\x1a\x0eI%\u0135I\x80n\x9f;\xeb\x89R 5\xccn\x01!\x00\x00\u07d4\x12/V\x12%I\xd1h\xa5\xc5\xe2g\xf5&b\xe5\xc5\xcc\xe5\u0209\n\ad\a\xd3\xf7D\x00\x00\xe0\x94\x121o\xc7\xf1x\xea\xc2.\xb2\xb2Z\xed\xea\xdf=u\xd0\x01w\x8a\x04<3\xbe\x05\xf6\xbf\xb9\x80\x00\xe0\x94\x127Y\xf33\xe1>0i\xe2\x03KO\x059\x89\x18\x11\x9d6\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x12\\\xc5\xe4\xd5k+\xcc.\xe1\xc7\t\xfb\x9eh\xfb\x17t@\xbd\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x12c#\x88\xb2v^\xe4E+P\x16\x1d\x1f\xff\xd9\x1a\xb8\x1fJ\x89(\x1d\x90\x1fO\xdd\x10\x00\x00\u07d4\x12h\x97\xa3\x11\xa1J\xd4;x\xe0\x92\x01\x00\xc4Bk\xfdk\u07494\xc7&\x89?-\x94\x80\x00\u07d4\x12m\x91\xf7\xad\x86\u07bb\x05W\xc6\x12\xca'n\xb7\xf9m\x00\xa1\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x12}?\xc5\x00;\xf6<\r\x83\xe99W\x83e\x15\xfd'\x90E\x89\x06\x10\xc9\".nu\x00\x00\xe0\x94\x12}\xb1\xca\xdf\x1bw\x1c\xbdtu\xe1\xb2ri\x0fU\x8c\x85e\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4\x12\x84\xf0\xce\xe9\xd2\xff)\x89\xb6Ut\xd0o\xfd\x9a\xb0\xf7\xb8\x05\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x12\x8b\x90\x8f\xe7C\xa44 =\xe2\x94\xc4A\xc7\xe2\n\x86\xeag\x89&\xab\x14\xe0\xc0\xe1<\x00\x00\xe0\x94\x12\x93\u01cc}jD;\x9dt\xb0\xba^\xe7\xbbG\xfdA\x85\x88\x8a\x01je\x02\xf1Z\x1eT\x00\x00\u07d4\x12\x96\xac\xde\xd1\xe0c\xaf9\xfe\x8b\xa0\xb4\xb6=\xf7\x89\xf7\x05\x17\x89\x05k\xf9\x1b\x1ae\xeb\x00\x00\u07d4\x12\xaa}\x86\xdd\xfb\xad0\x16\x92\xfe\xac\x8a\b\xf8A\xcb!\\7\x89\amA\xc6$\x94\x84\x00\x00\xe0\x94\x12\xaf\xbc\xba\x14'\xa6\xa3\x9e{\xa4\x84\x9fz\xb1\xc45\x8a\xc3\x1b\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\x12\xb5\xe2\x89E\xbb)i\xf9\xc6Lc\xcc\x05\xb6\xf1\xf8\xd6\xf4\u054a\x01\xa2\x9e\x86\x91;t\x05\x00\x00\u0794\x12\u03cb\x0eFR\x13!\x1a[S\u07f0\xdd'\x1a(,\x12\u0248\xd2\xf1?w\x89\xf0\x00\x00\u07d4\x12\xd2\a\x90\xb7\xd3\xdb\u060c\x81\xa2y\xb8\x12\x03\x9e\x8a`;\u0409V\xf9\x85\u04c6D\xb8\x00\x00\xe0\x94\x12\xd6\re\xb7\xd9\xfcH\x84\v\xe5\xf8\x91\xc7E\xcev\xeeP\x1e\x8a\x04\x85\xe58\x8d\fv\x84\x00\x00\u0794\x12\xd9\x1a\x92\xd7O\xc8a\xa7)dm\xb1\x92\xa1%\xb7\x9fSt\x88\xfc\x93c\x92\x80\x1c\x00\x00\xe0\x94\x12\u992d*\xd5t\x84\xddp\x05e\xbd\xdbFB;\u067d1\x8a\x04<0\xfb\b\x84\xa9l\x00\x00\u07d4\x12\xf3,\n\x1f-\xaa\xb6v\xfei\xab\xd9\xe0\x185-L\xcdE\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4\x12\xf4`\xaedl\xd2x\x0f\xd3\\P\xa6\xafK\x9a\xcc\xfa\x85\u018965\u026d\xc5\u07a0\x00\x00\u07d4\x12\xff\xc1\x12\x86\x05\xcb\f\x13p\x9ar\x90Po&\x90\x97q\x93\x89\xb5\x0f\u03ef\xeb\xec\xb0\x00\x00\u07d4\x13\x03$F\xe7\xd6\x10\xaa\x00\xec\x8cV\u0275t\xd3l\xa1\xc0\x16\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x13\x1cy,\x19}\x18\xbd\x04]p$\x93|\x1f\x84\xb6\x0fD8\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x13\x1d\xf8\xd30\xeb|\xc7\x14}\nUWo\x05\u078d&\xa8\xb7\x89\n1\x06+\xee\xedp\x00\x00\u07d4\x13\x1f\xae\xd1%a\xbbz\xee\x04\xe5\x18Z\xf8\x02\xb1\xc3C\x8d\x9b\x89\v\xdf\x0e\u0733\x90\xc9\xc8V\b\xb7\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x13!\xcc\xf2\x979\xb9t\xe5\xa5\x16\xf1\x8f:\x846q\xe3\x96B\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x13'\xd7Y\xd5n\n\xb8z\xf3~\xcfc\xfe\x01\xf3\x10\xbe\x10\n\x89#\xbc<\xdbh\xa1\x80\x00\x00\u07d4\x13)\xdd\x19\xcdK\xaa\x9f\xc6C\x10\xef\xec\xea\xb2!\x17%\x1f\x12\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x137\x1f\x92\xa5n\xa88\x1eC\x05\x9a\x95\x12\x8b\xdcMC\u0166\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x13O\x15\xe1\xe3\x9cSCY0\xaa\xed\xf3\xe0\xfeV\xfd\xe8C\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x13Ac\xbe\x9f\xbb\xe1\xc5in\xe2U\xe9\v\x13%C\x95\xc3\x18\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x13\\\xec\xd9U\xe5y\x83pv\x920\x15\x93\x03\u0671\x83\x9ff\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\x13]\x17\x19\xbf\x03\xe3\xf8f1$y\xfe3\x81\x18\xcd8~p\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x13^\xb8\xc0\xe9\xe1\x01\xde\xed\xec\x11\xf2\xec\xdbf\xae\x1a\xae\x88g\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x13`\xe8}\xf2Li\xeemQ\xc7nsv\u007f\xfe\x19\xa2\x13\x1c\x89\x04\xfc\xc1\xa8\x90'\xf0\x00\x00\u07d4\x13l\x83K\xf1\x112m s\x95)[.X>\xa7\xf35r\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x13mKf+\xbd\x10\x80\xcf\xe4D[\x0f\xa2\x13\x86D5\xb7\xf1\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x13oI\a\u02b4\x1e'\bK\x98E\x06\x9f\xf2\xfd\f\x9a\xdey\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x13t\xfa\xcd{?\x8dhd\x9d`\xd4U\x0e\xe6\x9f\xf0HA3\x89\x0e\x9e\xd6\xe1\x11r\xda\x00\x00\u07d4\x13|\xf3A\xe8Ql\x81X\x14\xeb\xcds\xe6V\x9a\xf1L\xf7\xbc\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\x13\x84\x8bF\xeau\xbe\xb7\xea\xa8_Y\xd8f\xd7\u007f\xd2L\xf2\x1a\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\u07d4\x13\x9d51\u0252*\xd5bi\xf60\x9a\xa7\x89\xfb$\x85\xf9\x8c\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x13\x9eG\x97d\xb4\x99\xd6f \x8cJ\x8a\x04z\x97\x041c\u0749 w!*\xffm\xf0\x00\x00\u07d4\x13\xa5\xee\xcb80]\xf9Iq\xef-\x9e\x17\x9a\xe6\u03ba\xb37\x89\x11\u3ac3\x95\xc6\xe8\x00\x00\u07d4\x13\xac\xad\xa8\x98\n\xff\xc7PI!\xbe\x84\xebID\xc8\xfb\xb2\xbd\x89V\u04aa:\\\t\xa0\x00\x00\u07d4\x13\xb9\xb1\a\x15qL\t\xcf\xd6\x10\u03dc\x98F\x05\x1c\xb1\xd5\x13\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x13\xce3-\xffe\xa6\xab\x938\x97X\x8a\xa2>\x00\t\x80\xfa\x82\x89\x0e\x02\x056\xf0(\xf0\x00\x00\u07d4\x13\xd6z~%\xf2\xb1,\u06c5XP\t\xf8\xac\u011b\x96s\x01\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\x13\xde\xe0>7\x99\x95-\a8\x84=K\xe8\xfc\n\x80?\xb2\x0e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x13\xe0/\xb4H\xd6\xc8J\xe1}\xb3\x10\xad(m\x05a`\u0695\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x13\xe3!r\x8c\x9cWb\x80X\xe9?\xc8f\xa02\xdd\v\u0690\x89&\xbc\xca#\xfe.\xa2\x00\x00\u07d4\x13\xec\x81\"\x84\x02n@\x9b\xc0f\xdf\xeb\xf9\u0564\xa2\xbf\x80\x1e\x89WG=\x05\u06ba\xe8\x00\x00\xe0\x94\x14\x01)\xea\xa7f\xb5\xa2\x9f[:\xf2WND\t\xf8\xf6\xd3\xf1\x8a\x01Z\xf1\u05cbX\xc4\x00\x00\x00\u07d4\x14\x05\x18\xa3\x19K\xad\x13P\xb8\x94\x9ee\x05e\u07bem\xb3\x15\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x14\x06\x85M\x14\x9e\b\x1a\xc0\x9c\xb4\xcaV\r\xa4c\xf3\x120Y\x89Hz\x9a0E9D\x00\x00\u07d4\x14\f\xa2\x8f\xf3;\x9ff\xd7\xf1\xfc\x00x\xf8\xc1\xee\xf6\x9a\x1b\xc0\x89V\xbcu\xe2\xd61\x00\x00\x00\u07d4\x14\x0f\xbaX\xdb\xc0H\x03\xd8L!0\xf0\x19x\xf9\xe0\xc71)\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94\x14\x1a^9\xee/h\n`\x0f\xbfo\xa2\x97\u0790\xf3\"\\\u074a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x14%N\xa1&\xb5-\x01B\xda\n~\x18\x8c\xe2U\xd8\xc4qx\x89*\x03I\x19\u07ff\xbc\x00\x00\u07d4\x14+\x87\xc5\x04?\xfbZ\x91\xdf\x18\xc2\xe1\t\xce\xd6\xfeJq\u06c9\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x14\x87\xf5\xa5$\u0288Q^\x8a\x01\xab,\xf7\xc9\xf8~ \x00\x00\u07d4\x14\xa75 f6D\x04\xdbP\xf0\xd0\u05cduJ\"\x19\x8e\xf4\x89e\xea=\xb7UF`\x00\x00\u07d4\x14\xab\x16K;RL\x82\u05ab\xfb\xc0\u0783\x11&\xae\x8d\x13u\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x14\xb1`>\xc6+ \x02 3\xee\xc4\xd6\xd6eZ\xc2J\x01Z\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\xe0\x94\x14\xc6;\xa2\u0731\xddM\xf3=\u06b1\x1cO\x00\a\xfa\x96\xa6-\x8a\x03HA\xb6\x05z\xfa\xb0\x00\x00\xe0\x94\x14\xcd\u077c\x8b\t\xe6gZ\x9e\x9e\x05\t\x1c\xb9\"8\u00de\x1e\x8a\x01\x14x\xb7\xc3\n\xbc0\x00\x00\u07d4\x14\xd0\n\xad9\xa0\xa7\u045c\xa0SP\xf7\xb07'\xf0\x8d\xd8.\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x14\xee\xc0\x9b\xf0>5+\xd6\xff\x1b\x1e\x87k\xe6d\xce\xff\xd0\u03c9\x01\x16\xdc:\x89\x94\xb3\x00\x00\u07d4\x14\xf2!\x15\x95\x18x;\u0127\x06go\xc4\xf3\xc5\xee@X)\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x14\xfc\xd19\x1e}s/Avl\xda\u0344\xfa\x1d\xeb\x9f\xfd\u0489lk\x93[\x8b\xbd@\x00\x00\u07d4\x15\x0e=\xbc\xbc\xfc\x84\xcc\xf8\x9bsBwc\xa5e\xc2>`\u0409\x02+\x1c\x8c\x12'\xa0\x00\x00\xe0\x94\x15\x18b{\x885\x1f\xed\xe7\x96\xd3\xf3\b3d\xfb\u0508{\f\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\u0794\x15\"J\xd1\xc0\xfa\xceF\xf9\xf5V\xe4wJ0%\xad\x06\xbdR\x88\xb9\x8b\xc8)\xa6\xf9\x00\x00\u07d4\x15/+\xd2)\xdd\xf3\xcb\x0f\xda\xf4U\xc1\x83 \x9c\x0e\x1e9\xa2\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x15/N\x86\x0e\xf3\xee\x80jP'w\xa1\xb8\xdb\xc9\x1a\x90vh\x89 \x86\xac5\x10R`\x00\x00\u07d4\x15<\b\xaa\x8b\x96\xa6\x11\xefc\xc0%>*C4\x82\x9eW\x9d\x89\x15[\xd90\u007f\x9f\xe8\x00\x00\u07d4\x15<\xf2\x84,\xb9\u0787l'o\xa6Gg\u0468\xec\xf5s\xbb\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x15>\xf5\x8a\x1e.z>\xb6\xb4Y\xa8\n\xb2\xa5G\xc9A\x82\xa2\x8a\x14T+\xa1*3|\x00\x00\x00\u07d4\x15DY\xfa/!1\x8e44D\x97\x89\xd8&\xcd\xc1W\f\xe5\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x15G\xb9\xbfz\xd6bt\xf3A8'#\x1b\xa4\x05\ue308\xc1\x8a\x03\xa9\u057a\xa4\xab\xf1\xd0\x00\x00\u07d4\x15H\xb7p\xa5\x11\x8e\u0787\u06e2\xf6\x903\u007fam\u60eb\x89\x1c\x99V\x85\u0fc7\x00\x00\u07d4\x15R\x83P\xe0\xd9g\n.\xa2\u007f{J3\xb9\xc0\xf9b\x1d!\x89\xd8\xd8X?\xa2\xd5/\x00\x00\u07d4\x15[7y\xbbmV4./\u0681{[-\x81\xc7\xf4\x13'\x89\x02\xb8\xaa:\al\x9c\x00\x00\u07d4\x15e\xaf\x83~\xf3\xb0\xbdN+#V\x8dP#\xcd4\xb1d\x98\x89\x15Q\xe9rJ\u013a\x00\x00\u07d4\x15f\x91\x80\xde\u2558\x86\x9b\b\xa7!\xc7\xd2LL\x0e\xe6?\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\x15r\xcd\xfa\xb7*\x01\u0396\x8ex\xf5\xb5D\x8d\xa2\x98S\xfb\u074a\x01\x12blI\x06\x0f\xa6\x00\x00\xe0\x94\x15uY\xad\xc5Wd\xccm\xf7\x93#\t%4\xe3\xd6dZf\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x15x\xbd\xbc7\x1bM$8E3\x05V\xff\xf2\xd5\xefM\xffg\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x15~\xb3\xd3\x11;\u04f5\x97qM:\x95N\xdd\x01\x89\x82\xa5\u02c9lk\x93[\x8b\xbd@\x00\x00\u07d4\x15\x84\xa2\xc0f\xb7\xa4U\xdb\u05ae(\a\xa73N\x83\xc3_\xa5\x89\a\f\x1c\xc7;\x00\xc8\x00\x00\u07d4\x15\x87F\x86\xb6s=\x10\xd7\x03\xc9\xf9\xbe\xc6\xc5.\xb8b\x8dg\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x15\x8a\ra\x92S\xbfD2\xb5\xcd\x02\u01f8b\xf7\u00b7V6\x89\a[\xac|[\x12\x18\x80\x00\u07d4\x15\x98\x12y\x82\xf2\xf8\xad;k\x8f\xc3\xcf'\xbfax\x01\xba+\x89\t`\xdbwh\x1e\x94\x00\x00\xe0\x94\x15\x9a\xdc\xe2z\xa1\vG#d)\xa3JZ\xc4,\xad[d\x16\x8a\x06\xbf\x90\xa9n\xdb\xfaq\x80\x00\u07d4\x15\xa0\xae\xc3\u007f\xf9\xff=T\t\xf2\xa4\xf0\xc1!*\xac\xcb\x02\x96\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x15\xaaS\r\xc3iX\xb4\xed\xb3\x8e\xeem\xd9\xe3\xc7}L\x91E\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x15\xac\xb6\x15h\xecJ\xf7\xea(\x198a\x81\xb1\x16\xa6\xc5\xeep\x8a\x06\x90\x83l\n\xf5\xf5`\x00\x00\u07d4\x15\xb9o0\xc2;\x86d\xe7I\x06Q\x06k\x00\xc49\x1f\xbf\x84\x89\x16B\xe9\xdfHv)\x00\x00\u07d4\x15\xc7\xed\xb8\x11\x8e\xe2{4\"\x85\xebY&\xb4z\x85[\u01e5\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x15\u0654hPz\xa0A?\xb6\r\xca*\xdc\u007fV\x9c\xb3kT\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x15\u06f4\x8c\x980\x97d\xf9\x9c\xed6\x92\xdc\xca5\xee0k\xac\x8a\x1f\u00c4+\xd1\xf0q\xc0\x00\x00\xe0\x94\x15\u072f\xcc+\xac\xe7\xb5[T\xc0\x1a\x1cQF&\xbfa\xeb\u060a\x01\xfd\x934\x94\xaa_\xe0\x00\x00\u07d4\x15\u3d44\x05kb\xc9s\xcf^\xb0\x96\xf1s>T\xc1\\\x91\x892\xc7Z\x02#\xdd\xf3\x00\x00\u07d4\x15\xeb\xd1\xc7\xca\u04af\xf1\x92u\xc6W\xc4\xd8\b\xd0\x10\xef\xa0\xf5\x89\n\xdf0\xbap\u0217\x00\x00\u07d4\x15\xee\x0f\xc6>\xbf\x1b\x1f\u011d{\xb3\x8f\x88c\x82:.\x17\u0489g\x8a\x93 b\xe4\x18\x00\x00\u07d4\x15\xf1\xb3R\x11\rh\x90\x1d\x8fg\xaa\xc4jl\xfa\xfe\x03\x14w\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x15\xf2\xb7\xb1d2\xeeP\xa5\xf5[A#/c4\xedX\xbd\xc0\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x16\x01\x9aM\xaf\xabC\xf4\u067fAc\xfa\xe0\x84}\x84\x8a\xfc\xa2\x89\x01[\xc7\x019\xf7J\x00\x00\u07d4\x16\x02&\xef\xe7\xb5:\x8a\xf4b\xd1\x17\xa0\x10\x80\x89\xbd\xec\xc2\u0449\n\xdf0\xbap\u0217\x00\x00\u07d4\x16\f\xebo\x98\x0e\x041_S\xc4\xfc\x98\x8b+\xf6\x9e(M}\x89\x01\t\x10\xd4\xcd\xc9\xf6\x00\x00\xe0\x94\x16\x1c\xafZ\x97*\u0383y\xa6\u0420J\xe6\xe1c\xfe!\xdf+\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\x16\x1d&\xefgY\xba[\x9f \xfd\xcdf\xf1a2\xc3RA^\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x16!\x10\xf2\x9e\xac_}\x02\xb5C\xd8\xdc\u057bY\xa5\xe3;s\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x16+\xa5\x03'b\x14\xb5\t\xf9u\x86\xbd\x84!\x10\xd1\x03\xd5\x17\x8a\x01\xe7\xff\u0609\\\"h\x00\x00\u07d4\x16-v\xc2\xe6QJ:\xfbo\xe3\xd3\u02d3\xa3\\Z\xe7\x83\xf1\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x16;\xadJ\x12+E}d\xe8\x15\nA>\xaeM\a\x02>k\x89\x01\x04\xe7\x04d\xb1X\x00\x00\u07d4\x16<\u023e\"vF\xcb\tq\x91Y\xf2\x8e\u041c]\xc0\xdc\xe0\x89Hz\x9a0E9D\x00\x00\u07d4\x16=\xcas\xd7\xd6\xea?>`b2*\x874\x18\f\vx\uf25ft \x03\xcb}\xfc\x00\x00\u07d4\x16Mz\xac>\xec\xba\uc86dQ\x91\xb7S\xf1s\xfe\x12\xec3\x89(VR\xb8\xa4hi\x00\x00\u07d4\x16Rl\x9e\u07d4>\xfaOm\x0f\v\xae\x81\xe1\x8b1\xc5@y\x895e\x9e\xf9?\x0f\xc4\x00\x00\u07d4\x16S\x05\xb7\x872.%\xdcj\xd0\xce\xfelo3Fx\xd5i\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x16e\xab\x179\xd7\x11\x19\xeea2\xab\xbd\x92j'\x9f\xe6yH\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\x16k\xf6\u06b2-\x84\x1bHl8\xe7\xbaj\xb3:\x14\x87\ud30a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x16v\x99\xf4\x8ax\xc6\x15Q%\x15s\x99X\x993\x12WO\a\x89\x02\x1d;\xd5^\x80<\x00\x00\u07d4\x16x\xc5\xf2\xa5\"92%\x19ca\x89OS\xccu/\xe2\xf3\x89h\xf3e\xae\xa1\xe4@\x00\x00\u07d4\x16|\xe7\xdee\xe8G\bYZRT\x97\xa3\xeb^ZfPs\x89\x1f1Gsfo\xc4\x00\x00\u07d4\x16~>:\xe2\x003HE\x93\x92\xf7\xdf\xceD\xaf|!\xadY\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\x16\x80\xce\xc5\x02\x1e\xe90P\xf8\xae\x12rQ\x83\x9et\xc1\xf1\xfd\x8a\x02\xc6\x14a\xe5\xd7C\u0580\x00\u07d4\x16\x81j\xac\x0e\xde\r-<\xd4B\xday\xe0c\x88\x0f\x0f\x1dg\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x16\x8bP\x19\xb8\x18i\x16D\x83_\xe6\x9b\xf2)\xe1q\x12\xd5,\x8a\x05\xed\xe2\x0f\x01\xa4Y\x80\x00\x00\u07d4\x16\x8b\xde\xc8\x18\xea\xfcm)\x92\xe5\xefT\xaa\x0e\x16\x01\xe3\xc5a\x8967Pz0\xab\xeb\x00\x00\u07d4\x16\x8d0\xe5?\xa6\x81\t+R\xe9\xba\xe1Z\r\xcbA\xa8\u027b\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x16\x9b\xbe\xfcA\xcf\xd7\xd7\u02f8\xdf\xc60 \xe9\xfb\x06\u0515F\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x16\xa5\x8e\x98]\xcc\xd7\a\xa5\x94\u0453\xe7\u0327\x8b]\x02xI\x89I\xb9\u029aiC@\x00\x00\u07d4\x16\xa9\xe9\xb7:\u92c6M\x17(y\x8b\x87f\xdb\xc6\xea\x8d\x12\x893\xe7\xb4K\r\xb5\x04\x00\x00\u07d4\x16\xaaR\xcb\vUG#\xe7\x06\x0f!\xf3'\xb0\xa6\x83\x15\xfe\xa3\x89\r\x8drkqw\xa8\x00\x00\u07d4\x16\xab\xb8\xb0!\xa7\x10\xbd\u01ce\xa54\x94\xb2\x06\x14\xffN\xaf\xe8\x89\b\x90\xb0\xc2\xe1O\xb8\x00\x00\u07d4\x16\xaf\xa7\x87\xfc\x9f\x94\xbd\xffiv\xb1\xa4/C\n\x8b\xf6\xfb\x0f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x16\xba\xe5\xd2N\xff\x91w\x8c\u064bM:\x1c\xc3\x16/D\xaaw\x89\x15\xbeat\xe1\x91.\x00\x00\u07d4\x16\xbc@!Z\xbb\u066e](\v\x95\xb8\x01\vE\x14\xff\x12\x92\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x16\xbeu\u9299Z9R\"\xd0\v\u05df\xf4\xb6\xe68\u144a\a\x9f\x90\\o\xd3N\x80\x00\x00\u07d4\x16\xc1\xbf[}\xc9\xc8<\x17\x9e\xfa\xcb\xcf.\xb1t\xe3V\x1c\xb3\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x16\u01f3\x1e\x8c7b\x82\xac\"qr\x8c1\xc9^5\xd9R\u00c9lk\x93[\x8b\xbd@\x00\x00\u07d4\x16\xf3\x13\u03ca\xd0\x00\x91J\n\x17m\u01a44+y\xec%8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x16\xff\xac\x84\x03)@\xf0\x12\x1a\tf\x8b\x85\x8a~y\xff\xa3\xbb\x89\xd2J\xdan\x10\x87\x11\x00\x00\xe0\x94\x17\x03\xb4\xb2\x92\xb8\xa9\xde\xdd\xed\xe8\x1b\xb2]\x89\x17\x9fdF\xb6\x8a\x04+e\xa4U\xe8\xb1h\x00\x00\u07d4\x17\x04\x93\x11\x10\x1d\x81~\xfb\x1de\x91\x0ff6b\xa6\x99\u024c\x89lh\xcc\u041b\x02,\x00\x00\u07d4\x17\x04\xce\xfc\xfb\x131\xeczx8\x8b)9>\x85\xc1\xafy\x16\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x17\n\x88\xa8\x99\u007f\x92\xd287\x0f\x1a\xff\xde\xe64pP\xb0\x13\x89\xa2\xacw5\x14\x880\x00\x00\u07d4\x17\x10\x8d\xab,P\xf9\x9d\xe1\x10\u1cf3\xb4\u0342\xf5\xdf(\xe7\x895 ;g\xbc\xca\xd0\x00\x00\xe0\x94\x17\x12[Y\xacQ\xce\xe0)\xe4\xbdx\xd7\xf5\x94}\x1e\xa4\x9b\xb2\x8a\x04\xa8\x9fT\xef\x01!\xc0\x00\x00\u07d4\x17\x1a\u0660K\xed\u0238a\xe8\xedK\xdd\xf5qx\x13\xb1\xbbH\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x17\x1c\xa0*\x8bmb\xbfL\xa4~\x90i\x14\a\x98a\x97,\xb2\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x17\"\xc4\xcb\xe7\n\x94\xb6U\x9dBP\x84\xca\xee\xd4\xd6\xe6n!\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x17X\vvotSR\\\xa4\u01a8\x8b\x01\xb5\x05p\xea\b\x8c\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x17X\x9al\x00jT\xca\xd7\x01\x03\x12:\xae\n\x82\x13_\u07b4\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\x17Z\x18::#_\xfb\xb0;\xa85gRg\"\x94\x17\xa0\x91\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\u07d4\x17_\xee\xea*\xa4\xe0\xef\xda\x12\xe1X\x8d/H2\x90\xed\xe8\x1a\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x17e6\x1c.\xc2\xf86\x16\u0383c\xaa\xe2\x10%\xf2Vo@\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\x17gR\\_Z\"\xed\x80\xe9\xd4\xd7q\x0f\x03b\u049e\xfa3\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x17v%`\xe8*\x93\xb3\xf5\"\xe0\xe5$\xad\xb8a,:tp\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x17}\xaex\xbc\x01\x13\xd8\u04dcD\x02\xf2\xa6A\xae*\x10Z\xb8\x89b\x92BV \xb4H\x00\x00\xe0\x94\x17\x84\x94\x8b\xf9\x98H\u021eDV8PM\u0598'\x1bY$\x8a\x01GLA\r\x87\xba\xee\x00\x00\u07d4\x17\x88\u069bW\xfd\x05\xed\xc4\xff\x99\xe7\xfe\xf3\x01Q\x9c\x8a\n\x1e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x17\x8e\xafk\x85T\xc4]\xfd\xe1kx\xce\f\x15\u007f.\xe3\x13Q\x89\x11X\xe4`\x91=\x00\x00\x00\u07d4\x17\x96\x1dc;\xcf \xa7\xb0)\xa7\xd9K}\xf4\xda.\xc5B\u007f\x89\fo\xf0p\U000532c0\x00\u07d4\x17\x96\xbc\xc9{\x8a\xbcq\u007fKJ|k\x106\xea!\x82c\x9f\x89\x13A\xf9\x1c\xd8\xe3Q\x00\x00\u07d4\x17\x99=1*\xa1\x10iW\x86\x8fjU\xa5\xe8\xf1/w\xc8C\x89\x18e\xe8\x14\xf4\x14.\x80\x00\u07d4\x17\x9a\x82^\x0f\x1fn\x98S\tf\x84e\xcf\xfe\xd46\xf6\xae\xa9\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x17\xb2\xd6\xcfe\xc6\xf4\xa3G\xdd\xc6W&U5M\x8aA+)\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x17\xb8\a\xaf\xa3\xdd\xd6G\xe7#T.{R\xfe\xe3\x95'\xf3\x06\x89\x15\xaf@\xff\xa7\xfc\x01\x00\x00\u07d4\x17\xc0G\x86W\xe1\xd3\xd1z\xaa3\x1d\xd4)\xce\u03d1\xf8\xae]\x8964\xfb\x9f\x14\x89\xa7\x00\x00\u07d4\x17\xc0\xfe\xf6\x98l\xfb.@A\xf9\x97\x9d\x99@\xb6\x9d\xff=\xe2\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x17\u0511\x8d\xfa\xc1]w\xc4\u007f\x9e\xd4\x00\xa8P\x19\rd\xf1Q\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x17\xd5!\xa8\xd9w\x90#\xf7\x16M#<;d \xff\xd2#\xed\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x17\xd91\xd4\xc5b\x94\u073ew\xc8e[\xe4i_\x00mJ<\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x17\xdfIQ\x8ds\xb1)\xf0\xda6\xb1\u0274\f\xb6d \xfd\u01ca\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x17\xe4\xa0\xe5+\xac>\xe4N\xfe\tT\xe7S\u0538]dN\x05\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x17\xe5\x84\xe8\x10\xe5gp,a\xd5]CK4\u0375\xee0\xf6\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\x17\xe8.px\xdcO\xd9\xe8y\xfb\x8aPf\u007fS\xa5\xc5E\x91\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x17\xe8o;[0\xc0\xbaY\xf2\xb2\xe8XB[\xa8\x9f\n\x10\xb0\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x17\xee\x9fT\xd4\xdd\xc8Mg\x0e\xff\x11\xe5Je\x9f\xd7/DU\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\xe0\x94\x17\xefJ\xcc\x1b\xf1G\xe3&t\x9d\x10\xe6w\xdc\xff\xd7o\x9e\x06\x8a\bwQ\xf4\xe0\xe1\xb50\x00\x00\u07d4\x17\xf1F2\xa7\xe2\x82\v\xe6\xe8\xf6\u07c25X(=\xad\xab-\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x17\xf5#\xf1\x17\xbc\x9f\xe9x\xaaH\x1e\xb4\xf5V\x17\x117\x1b\u0209li\xf7>)\x13N\x00\x00\u07d4\x17\xfd\x9bU\x1a\x98\xcba\xc2\xe0\u007f\xbfA\xd3\xe8\u02650\u02e5\x89\x01v\x8c0\x81\x93\x04\x80\x00\u07d4\x18\x04x\xa6U\u05cd\x0f;\fO +aH[\xc4\x00/\u0549lk\x93[\x8b\xbd@\x00\x00\u07d4\x18\x13l\x9d\xf1g\xaa\x17\xb6\xf1\x8e\"\xa7\x02\u020fK\u0082E\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\x18\x15'\x9d\xff\x99R\xda;\xe8\xf7rI\xdb\xe2\"C7{\xe7\x8a\x01\x01|\xb7n{&d\x00\x00\u07d4\x18\x1f\xbb\xa8R\xa7\xf5\x01x\xb1\xc7\xf0>\xd9\xe5\x8dT\x16))\x89$\x1a\x9bOaz(\x00\x00\xe0\x94\x18'\x03\x9f\tW\x02\x94\b\x8f\xdd\xf0G\x16\\3\u65a4\x92\x8a\x02\x05\xb4\u07e1\xeetx\x00\x00\u07d4\x18-\xb8R\x93\xf6\x06\u8248\xc3pL\xb3\xf0\xc0\xbb\xbf\xcaZ\x89\a?u\u0460\x85\xba\x00\x00\u07d4\x18H\x00<%\xbf\u052a\x90\xe7\xfc\xb5\u05f1k\xcd\f\xff\xc0\u060965\u026d\xc5\u07a0\x00\x00\xe0\x94\x18JO\v\xebq\xff\xd5X\xa6\xb6\xe8\xf2(\xb7\x87\x96\xc4\xcf>\x8a\x02\x8a\x85t%Fo\x80\x00\x00\xe0\x94\x18M\x86\xf3Fj\xe6h;\x19r\x99\x82\xe7\xa7\u1903G\xb2\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x18Q\xa0c\xcc\xdb0T\x90w\xf1\xd19\xe7-\xe7\x97\x11\x97\u0549lk\x93[\x8b\xbd@\x00\x00\u07d4\x18UF\xe8v\x8dPhs\x81\x8a\xc9u\x1c\x1f\x12\x11j;\xef\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x18X\xcf\x11\xae\xa7\x9fS\x98\xad+\xb2\"g\xb5\xa3\xc9R\xeat\x8a\x02\x15\xf85\xbcv\x9d\xa8\x00\x00\xe0\x94\x18Z\u007f\u012c\xe3h\xd23\xe6 \xb2\xa4Y5f\x12\x92\xbd\xf2\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x18d\xa3\u01f4\x81UD\x8cT\u020cp\x8f\x16g\tsm1\x89\a?u\u0460\x85\xba\x00\x00\u07d4\x18j\xfd\xc0\x85\xf2\xa3\xdc\xe4a^\xdf\xfb\xad\xf7\x1a\x11x\x0fP\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x18k\x95\xf8\xe5\xef\xfd\xdc\xc9O\x1a1[\xf0)];\x1e\xa5\x88\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\x18}\x9f\f\a\xf8\xebt\xfa\xaa\xd1^\xbc{\x80Dt\x17\xf7\x82\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x18\x95\xa0\xebJCrr/\xcb\u016f\xe6\x93o(\x9c\x88\xa4\x19\x891T\xc9r\x9d\x05x\x00\x00\u07d4\x18\x99\xf6\x9fe;\x05\xa5\xa6\xe8\x1fH\a\x11\u041b\xbf\x97X\x8c\x89i\xfb\x13=\xf7P\xac\x00\x00\u07d4\x18\xa6\xd2\xfcR\xbes\b@#\xc9\x18\x02\xf0[\xc2JK\xe0\x9f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x18\xb0@|\xda\xd4\xceR`\x06#\xbd^\x1fj\x81\xaba\xf0&\x89\x11Q\xcc\xf0\xc6T\u0180\x00\u07d4\x18\xb8\xbc\xf9\x83!\xdaa\xfbN>\xac\xc1\xecT\x17'-\xc2~\x89/\xb4t\t\x8fg\xc0\x00\x00\u07d4\x18\xc6r:gS)\x9c\xb9\x14G}\x04\xa3\xbd!\x8d\xf8\xc7u\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x18\xe1\x13\xd8\x17|i\x1aa\xbexXR\xfa[\xb4z\uef6f\x89Hz\x9a0E9D\x00\x00\xe0\x94\x18\xe4\xceGH;S\x04\n\u06eb5\x17,\x01\xefdPn\f\x8a\x01\xe7\xe4\x17\x1b\xf4\u04e0\x00\x00\xe0\x94\x18\xe52C\x98\x1a\xab\xc8v}\xa1\fsD\x9f\x13\x91V\x0e\xaa\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x18\xfa\x86%\xc9\u0704>\x00\x15\x9e\x892\xf5\x1e\u06ea\xa30\x00\x00\xe0\x94\x193\xe34\xc4\x0f:\u02ed\f\v\x85\x11X i$\xbe\xca:\x8a\x01\x99^\xaf\x01\xb8\x96\x18\x80\x00\xe0\x94\x197\xc5\xc5\x15\x05uS\u033dF\u0546dU\xcef)\x02\x84\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\u07d4\x19:\xc6Q\x83e\x18\x00\xe25\x80\xf8\xf0\xea\u04fbY~\xb8\xa4\x89\x02\xb6*\xbc\xfb\x91\n\x00\x00\u07d4\x19=7\xed4}\x1c/N55\r\x9aDK\xc5|\xa4\xdbC\x89\x03@\xaa\xd2\x1b;p\x00\x00\xe0\x94\x19@\u0713d\xa8R\x16_GAN'\xf5\x00$E\xa4\xf1C\x8a\x02L-\xffj<|H\x00\x00\u07d4\x19E\xfe7\u007f\xe6\u0537\x1e>y\x1fo\x17\xdb$<\x9b\x8b\x0f\x89vy\u7fb9\x886\x00\x00\u07d4\x19Jk\xb3\x02\xb8\xab\xa7\xa5\xb5y\u07d3\xe0\xdf\x15t\x96v%\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x19L\ubd12\x98\x82\xbf;K\xf9\x86L+\x1b\x0fb\u0083\xf9\x89\x1e\xf8aS\x1ft\xaa\x00\x00\u07d4\x19O\xf4J\xef\xc1{\xd2\x0e\xfdz LG\xd1b\f\x86\xdb]\x89\xa2\x99\th\u007fj\xa4\x00\x00\xe0\x94\x19O\xfex\xbb\xf5\xd2\r\u044a\x1f\x01\xdaU.\x00\xb7\xb1\x1d\xb1\x8a\x01{x\x83\xc0i\x16`\x00\x00\u07d4\x19S1>*\xd7F#\x9c\xb2'\x0fH\xaf4\u063b\x9cDe\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x19W\x1a+\x8f\x81\u01bc\xf6j\xb3\xa1\x00\x83)V\x17\x15\x00\x03\x89\x1a\xb2\xcf|\x9f\x87\xe2\x00\x00\xe0\x94\x19h}\xaa9\xc3h\x13\x9bn{\xe6\r\xc1u:\x9f\f\xbe\xa3\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\x19l\x02!\nE\n\xb0\xb3cpe_qz\xa8{\xd1\xc0\x04\x89\x0e\x10\xac\xe1W\xdb\xc0\x00\x00\u07d4\x19n\x85\xdf~s+J\x8f\x0e\xd06#\xf4\u06dd\xb0\xb8\xfa1\x89\x01%\xb9/\\\xef$\x80\x00\u07d4\x19s+\xf9s\x05]\xbd\x91\xa4S:\u06a2\x14\x9a\x91\u04c3\x80\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x19vr\xfd9\xd6\xf2F\xcef\xa7\x90\xd1:\xa9\"\xd7\x0e\xa1\t\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x19y\x8c\xbd\xa7\x15\ua69b\x9dj\xab\x94,U\x12\x1e\x98\xbf\x91\x89A\rXj \xa4\xc0\x00\x00\u07d4\x19\x8b\xfc\xf1\xb0z\xe3\b\xfa,\x02\x06\x9a\xc9\xda\xfeq5\xfbG\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\x19\x8e\xf1\xec2Z\x96\xcc5Lrf\xa08\xbe\x8b\\U\x8fg\x8a\x80\xd1\xe47>\u007f!\xda\x00\x00\xe0\x94\x19\x91\x8a\xa0\x9e}IN\x98\xff\xa5\xdbP5\b\x92\xf7\x15j\u018a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x19\xb3k\f\x87\xeafN\xd8\x03\x18\xdcw\xb6\x88\xdd\xe8}\x95\xa5\x89i\x9fI\x98\x020=\x00\x00\u07d4\x19\u07d4E\xa8\x1c\x1b=\x80J\xea\xebon NB6f?\x89\x02\x06\xd9NjI\x87\x80\x00\u07d4\x19\xe5\u07a37\n,tj\xae4\xa3|S\x1fA\xda&N\x83\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x19\xe7\xf3\xeb{\xf6\u007f5\x99 \x9e\xbe\b\xb6*\xd32\u007f\x8c\u0789lk\x93[\x8b\xbd@\x00\x00\u07d4\x19\xe9Nb\x00P\xaa\xd7f\xb9\xe1\xba\xd91#\x83\x12\u053fI\x89\x81\xe3-\xf9r\xab\xf0\x00\x00\u07d4\x19\xec\xf2\xab\xf4\f\x9e\x85{%/\xe1\xdb\xfd=L]\x8f\x81n\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x19\xf5\xca\xf4\xc4\x0ei\b\x81<\aE\xb0\xae\xa9Xm\x9d\xd91\x89#\xfe\xd9\xe1\xfa+`\x00\x00\u07d4\x19\xf6C\xe1\xa8\xfa\x04\xae\x16\x00`(\x13\x833\xa5\x9a\x96\u0787\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x19\xf9\x9f,\vF\u0389\x06\x87]\xc9\xf9\n\xe1\x04\xda\xe3U\x94\x89\xf4WZ]M\x16*\x00\x00\u07d4\x19\xff$O\xcf\xe3\xd4\xfa/O\u065f\x87\xe5[\xb3\x15\xb8\x1e\xb6\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x1a\x04\xce\xc4 \xadC\"\x15$mw\xfe\x17\x8d3\x9e\u0435\x95\x89\x11!a\x85\u009fp\x00\x00\xe0\x94\x1a\x04\xd58\x9e\xb0\x06\xf9\u0388\f0\xd1SS\xf8\xd1\x1cK1\x8a\x03\x9d\x84\xb2\x18m\xc9\x10\x00\x00\u07d4\x1a\bA\xb9*\u007fpuV\x9d\xc4b~kv\u02b0Z\u0791\x89Rf<\u02b1\xe1\xc0\x00\x00\xe0\x94\x1a\b]C\xec\x92AN\xa2{\x91O\xe7g\xb6\xd4k\x1e\xefD\x8a\x06A\xe8\xa15c\xd8\xf8\x00\x00\u07d4\x1a\t\xfd\xc2\u01e2\x0e#WK\x97\u019e\x93\u07bag\xd3r \x89lO\xd1\xee$nx\x00\x00\u07d4\x1a\n\x1d\u07f01\xe5\xc8\xcc\x1dF\xcf\x05\x84-P\xfd\xdcq0\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\x1a\x1c\x9a&\xe0\xe0$\x18\xa5\xcfh}\xa7Z'\\b,\x94@\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\x1a \x1bC'\u03a7\xf3\x99\x04bF\xa3\xc8~n\x03\xa3\u0368\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x1a$4\xccwD\"\u050dS\u055c]V,\u0384\a\xc9K\x89\x01\xa0Ui\r\x9d\xb8\x00\x00\u07d4\x1a%\xe1\u017c~_P\xec\x16\xf8\x88_!\x0e\xa1\xb98\x80\x0e\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x1a&\x94\xec\a\xcf^Mh\xba@\xf3\xe7\xa1LS\xf3\x03\x8cn\x8966\xcd\x06\xe2\xdb:\x80\x00\u07d4\x1a5 E5\x82\xc7\x18\xa2\x1cB7[\xc5\as%RS\xe1\x89*\xd3s\xcef\x8e\x98\x00\x00\xe0\x94\x1a7n\x1b-/Y\ai\xbb\x85\x8dEu2\rN\x14\x99p\x8a\x01\x06q%v9\x1d\x18\x00\x00\u07d4\x1a:3\x0eO\xcbi\xdb\xef^i\x01x;\xf5\x0f\xd1\xc1SB\x89\u3bb5sr@\xa0\x00\x00\u07d4\x1aN\u01a0\xae\u007fZ\x94'\xd2=\xb9rL\r\f\xff\xb2\xab/\x89\t\xb4\x1f\xbf\x9e\n\xec\x00\x00\u07d4\x1aP^b\xa7N\x87\xe5wG>O:\xfa\x16\xbe\xdd<\xfaR\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x1a^\xe53\xac\xbf\xb3\xa2\xd7m[hRw\xb7\x96\xc5j\x05+\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1adJP\xcb\u00ae\xe8#\xbd+\xf2C\xe8%\xbeMG\xdf\x02\x89\x05k\xe0<\xa3\xe4}\x80\x00\u07d4\x1apD\xe28?\x87\b0[I[\xd1\x17k\x92\xe7\xef\x04:\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x1ay\xc7\xf4\x03\x9cg\xa3\x9du\x13\x88L\xdc\x0e,4\"$\x90\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x1a\x89\x89\x9c\xbe\xbd\xbbd\xbb&\xa1\x95\xa6<\bI\x1f\u035e\xee\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x1a\x8a\\\xe4\x14\u079c\xd1r\x93~7\xf2\u055c\xffq\xceW\xa0\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x1a\x95\xa8\xa8\b.FR\xe4\x17\r\xf9'\x1c\xb4\xbbC\x05\xf0\xb2\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4\x1a\x95\u0277Tk]\x17\x86\u00c5\x8f\xb1#dF\xbc\f\xa4\u0389j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x1a\x98~?\x83\xdeu\xa4/\x1b\xde|\x99|\x19!{J_$\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1a\x9ep/8]\xcd\x10^\x8b\x9f\xa4(\xee\xa2\x1cW\xffR\x8a\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\x1a\xa1\x02\x1fU\n\xf1X\xc7Gf\x8d\xd1;F1`\xf9Z@\x89O\xb0Y\x1b\x9b08\x00\x00\u07d4\x1a\xa2v\x99\xca\u068d\u00e7oy3\xaaf\xc7\x19\x19\x04\x0e\x88\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x1a\xa4\x02p\xd2\x1e\\\u0786\xb61m\x1a\xc3\xc53IKy\xed\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x1a\xb5:\x11\xbc\xc6=\u07ea@\xa0+\x9e\x18d\x96\u037b\x8a\xff\x89l?*\xac\x80\f\x00\x00\x00\u07d4\x1a\xbcN%;\b\n\xebCy\x84\xab\x05\xbc\xa0\x97\x9a\xa4>\x1c\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x1a\xc0\x89\u00fcM\x82\xf0j \x05\x1a\x9ds-\xc0\xe74\xcba\x89%\xf6\x9dc\xa6\xce\x0e\x00\x00\xe0\x94\x1a\xd4V>\xa5xk\xe1\x15\x995\xab\xb0\xf1\u0547\x9c>sr\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x1a\xd7- \xa7n\u007f\xcckv@X\xf4\x8dA}Io\xa6\u0349lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x1a\xda\xf4\xab\xfa\x86}\xb1\u007f\x99\xafj\xbe\xbfpz<\xf5]\xf6\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x1a\xf6\x03C6\x0e\v-u%R\x107W \xdf!\xdb\\}\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x1a\xfc\u0145\x89l\xd0\xed\xe1)\xee-\xe5\xc1\x9e\xa8\x11T\vd\x89\xaf*\xba\f\x8e[\xef\x80\x00\u07d4\x1b\x05\xeajj\u022f|\xb6\xa8\xb9\x11\xa8\xcc\xe8\xfe\x1a*\xcf\u0209lk\x93[\x8b\xbd@\x00\x00\u07d4\x1b\v1\xaf\xffKm\xf3e:\x94\xd7\xc8yx\xae5\xf3J\xae\x89\x139\x10E?\xa9\x84\x00\x00\u07d4\x1b\r\ah\x17\xe8\u058e\xe2\xdfN\x1d\xa1\xc1\x14-\x19\x8cD5\x89T\x06\x923\xbf\u007fx\x00\x00\u07d4\x1b\x13\ro\xa5\x1d\\H\xec\x8d\x1dR\u070a\"{\xe8s\\\x8a\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1b#\u02c6cUHq\xfb\xbe\r\x9e`9~\xfbo\xae\xdc>\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x1b&9X\x8bU\xc3D\xb0#\xe8\xde_\xd4\b{\x1f\x04\x03a\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4\x1b9 \xd0\x01\xc4>r\xb2N|\xa4o\x0f\xd6\xe0\xc2\n_\xf2\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x1b<\xb8\x1eQ\x01\x1bT\x9dx\xbfr\v\r\x92J\xc7c\xa7\u008av\x95\xa9, \xd6\xfe\x00\x00\x00\u07d4\x1bC#,\xcdH\x80\xd6\xf4o\xa7Q\xa9l\xd8$s1XA\x89\x04V9\x18$O@\x00\x00\u07d4\x1bK\xbc\xb1\x81e!\x1b&[(\a\x16\xcb?\x1f!!v\xe8\x89\x19\x9a\xd3}\x03\xd0`\x80\x00\u07d4\x1bM\a\xac\u04c1\x83\xa6\x1b\xb2x=+{\x17\x8d\xd5\x02\xac\x8d\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x1bckzIo\x04MsYYn5:\x10F\x16Cok\x89\x13\x88\xea\x95\xc3?\x1d\x00\x00\u07d4\x1bd\x95\x89\x12@\xe6NYD\x93\xc2f!q\xdb^0\xce\x13\x89\tX\x87\u0595\xedX\x00\x00\u07d4\x1bf\x10\xfbh\xba\xd6\xed\x1c\xfa\xa0\xbb\xe3:$\xeb.\x96\xfa\xfb\x89\b=lz\xabc`\x00\x00\u07d4\x1by\x903\xefm\xc7\x12x\"\xf7EB\xbb\"\xdb\xfc\t\xa3\b\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x1b~\xd9t\xb6\xe24\u0381$t\x98B\x9a[\u0520\xa2\xd19\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1b\x82o\xb3\xc0\x12\xb0\xd1Y\u253a[\x8aI\x9f\xf3\xc0\xe0<\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1b\x8a\xa0\x16\f\u05df\x00_\x88Q\nqI\x13\xd7\n\u04fe3\x89\n\xef\xfb\x83\a\x9a\xd0\x00\x00\xe0\x94\x1b\x8b\xd6\xd2\xec\xa2\x01\x85\xa7\x8e}\x98\xe8\xe1\x85g\x8d\xacH0\x8a\x03\x89O\x0eo\x9b\x9fp\x00\x00\u07d4\x1b\x9b-\u0096\x0eL\xb9@\x8ft\x05\x82|\x9bY\a\x16\x12\xfd\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\x1b\xa9\"\x8d8\x87'\xf3\x89\x15\x0e\xa0;s\xc8-\xe8\xeb.\t\x8a\x01\x89t\xfb\xe1w\xc9(\x00\x00\u07d4\x1b\xa9\xf7\x99~S\x87\xb6\xb2\xaa\x015\xac$R\xfe6\xb4\xc2\r\x89.\x14\x1e\xa0\x81\xca\b\x00\x00\u07d4\x1b\xba\x03\xffkJ\u057f\x18\x18J\xcb!\xb1\x88\xa3\x99\xe9\xebJ\x89a\t=|,m8\x00\x00\u07d4\x1b\xbc\x19\x9eXg\x90\xbe\x87\xaf\xed\xc8I\xc0G&t\\]{\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x1b\xbc`\xbc\xc8\x0e\\\xdc5\xc5Aj\x1f\n@\xa8=\xae\x86{\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1b\xc4L\x87a#\x1b\xa1\xf1\x1f_\xaa@\xfaf\x9a\x01>\x12\u0389\v\tR\xc4Z\xea\xad\x00\x00\u07d4\x1b\xcf4A\xa8f\xbd\xbe\x960\t\xce3\xc8\x1c\xbb\x02a\xb0,\x89\t\xdd\xc1\xe3\xb9\x01\x18\x00\x00\u07d4\x1b\u048c\xd5\u01ca\xeeQ5|\x95\xc1\xef\x925\xe7\xc1\x8b\xc8T\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1b\xd8\xeb\xaavt\xbb\x18\u1458\xdb$OW\x03\x13\a_C\x89\b!\xab\rD\x14\x98\x00\x00\u07d4\x1b\xd9\t\xac\rJ\x11\x02\xec\x98\xdc\xf2\u0329j\n\xdc\u05e9Q\x89\x01\x16Q\xac>zu\x80\x00\u07d4\x1b\xe3T,6\x13hte\xf1Zp\xae\xeb\x81f+e\u0328\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1b\xeaM\xf5\x12/\xaf\u07b3`~\xdd\xda\x1e\xa4\xff\u06da\xbf*\x89\x12\xc1\xb6\xee\xd0=(\x00\x00\u07d4\x1b\xecM\x02\u0385\xfcH\xfe\xb6$\x89\x84\x1d\x85\xb1pXj\x9b\x89\x82\x1a\xb0\xd4AI\x80\x00\x00\u07d4\x1b\xf9t\u0650OE\u0381\xa8E\xe1\x1e\xf4\xcb\xcf'\xafq\x9e\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\x1c\x04VI\xcdS\xdc#T\x1f\x8e\xd4\xd3A\x81(\b\xd5\u075c\x8a\x01{x\x83\xc0i\x16`\x00\x00\u07d4\x1c\x12\x8b\xd6\u0365\xfc\xa2uu\xe4\xb4;2S\xc8\xc4\x17*\xfe\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1c\x13\u04c67\xb9\xa4|\xe7\x9d7\xa8oP\xfb@\x9c\x06\a(\x89Hz\x9a0E9D\x00\x00\u07d4\x1c \x10\xbdf-\xf4\x17\xf2\xa2q\x87\x9a\xfb\x13\xefL\x88\xa3\xae\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\x1c%z\u0525Q\x05\xea;X\xed7K\x19\x8d\xa2f\xc8_c\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\x1c.6\a\xe1'\xca\xca\x0f\xbd\\YH\xad\xad}\xd80\xb2\x85\x8a\x04+\xf0kx\xed;P\x00\x00\u07d4\x1c5l\xfd\xb9_\xeb\xb7\x14c;(\xd5\xc12\u0744\xa9\xb46\x89\x01Z\xf1\u05cbX\xc4\x00\x00\u07d4\x1c5\xaa\xb6\x88\xa0\u034e\xf8.vT\x1b\xa7\xac9R\u007ft;\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\x1c>\xf0]\xae\x9d\xcb\u0509\xf3\x02D\bf\x9d\xe2D\xc5*\x02\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x1cJ\xf0\xe8c\xd2el\x865\xbco\xfe\xc8\u0759(\x90\x8c\xb5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1c`\x19\x93x\x92\a\xf9e\xbb\x86\\\xbbL\xd6W\xcc\xe7o\xc0\x89\x05T\x1ap7P?\x00\x00\u07d4\x1cc\xfa\x9e,\xbb\xf21a\xda3\xa1\xda}\xf7\r\x1b\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x1c\xb6\xb2\xd7\xcf\xc5Y\xb7\xf4\x1eoV\xab\x95\xc7\xc9X\xcd\x0eL\x89Hz\x9a0E9D\x00\x00\u07d4\x1c\xc1\xd3\xc1O\x0f\xb8d\x0e6rM\xc42)\xd2\xeaz\x1eH\x89\\(=A\x03\x94\x10\x00\x00\u07d4\x1c\xc9\bv\x00A\t\xcdy\xa3\u07a8f\u02c4\n\xc3d\xba\x1b\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1c\xd1\xf0\xa3\x14\u02f2\x00\xde\n\f\xb1\xef\x97\xe9 p\x9d\x97\u0089lk\x93[\x8b\xbd@\x00\x00\u0794\x1c\xdaA\x1b\xd5\x16;\xae\xca\x1eU\x85c`\x1c\xe7 \xe2N\xe1\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\x1c\xe8\x1d1\xa7\x920\"\xe1%\xbfH\xa3\xe06\x93\xb9\x8d\xc9\u0749lk\x93[\x8b\xbd@\x00\x00\u07d4\x1c\xeb\xf0\x98]\u007fh\n\xaa\x91\\D\xccb\xed\xb4\x9e\xab&\x9e\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\x1c\xedg\x15\xf8b\xb1\xff\x86\x05\x82\x01\xfc\xceP\x82\xb3nb\xb2\x8a\x01j^`\xbe\xe2s\xb1\x00\x00\u07d4\x1c\xf0L\xb1C\x80\x05\x9e\xfd?#\x8be\u057e\xb8j\xfa\x14\u0609\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x1c\xf1\x05\xab#\x02;ULX>\x86\u05d2\x11y\xee\x83\x16\x9f\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x1c\xf2\xebz\x8c\xca\u00ad\xea\xef\x0e\xe8sG\xd55\u04f9@X\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1c\xfc\xf7Q\u007f\f\bE\x97 \x94+dz\u0452\xaa\x9c\x88(\x89+^:\xf1k\x18\x80\x00\x00\xe0\x94\x1d\t\xad$\x12i\x1c\u0141\xc1\xab6\xb6\xf9CL\xd4\xf0\x8bT\x8a\x01{x\x83\xc0i\x16`\x00\x00\u07d4\x1d\x15|Xv\xc5\xca\xd5S\xc9\x12\xca\xf6\xce-Rw\xe0\\s\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x1d&\x15\xf8\xb6\xcaP\x12\xb6c\xbd\u0414\xb0\xc5\x13|w\x8d\u07ca\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x1d)\u01ea\xb4+ H\u04b2R%\u0518\u06e6z\x03\xfb\xb2\x89\n\u05ce\xbcZ\xc6 \x00\x00\u0794\x1d4\x1f\xa5\xa3\xa1\xbd\x05\x1f}\xb8\a\xb6\xdb/\u01faO\x9bE\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\x1d4N\x96%g\xcb'\xe4M\xb9\xf2\xfa\u01f6\x8d\xf1\xc1\xe6\xf7\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\x1d6h0c\xb7\xe9\xeb\x99F-\xab\xd5i\xbd\xdc\xe7\x16\x86\xf2\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x1d7aky?\x94\x91\x188\xac\x8e\x19\xee\x94I\u07d2\x1e\u0109QP\xae\x84\xa8\xcd\xf0\x00\x00\xe0\x94\x1d9[0\xad\xda\x1c\xf2\x1f\t\x1aOJ{u3q\x18\x94A\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\x1dEXn\xb8\x03\xca!\x90e\v\xf7H\xa2\xb1t1+\xb5\a\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\x1dW.\xdd-\x87\xca'\x1ag\x14\xc1Z;7v\x1d\u0320\x05\x89\x06\xeb\xd5*\x8d\xdd9\x00\x00\u07d4\x1dc0\x97\xa8R%\xa1\xffC!\xb1)\x88\xfd\xd5\\+8D\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\x1di\xc8=(\xff\x04t\xce\xeb\xea\xcb:\xd2'\xa1D\xec\u78ca\x01(\xcc\x03\x92\nb\u0480\x00\u07d4\x1d\x96\xbc\u0544W\xbb\xf1\xd3\u00a4o\xfa\xf1m\xbf}\x83hY\x89\tIr\t\xd8F~\x80\x00\u07d4\x1d\x9ej\xaf\x80\x19\xa0_#\x0e]\xef\x05\xaf]\x88\x9b\xd4\xd0\xf2\x89\a?u\u0460\x85\xba\x00\x00\u07d4\x1d\xab\x17.\xff\xa6\xfb\xeeSL\x94\xb1~yN\xda\xc5OU\xf8\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x1d\xb9\xac\x9a\x9e\xae\xec\nR7W\x05\fq\xf4rx\xc7-P\x89Hz\x9a0E9D\x00\x00\u07d4\x1d\xbe\x8e\x1c+\x8a\x00\x9f\x85\xf1\xad<\xe8\r.\x055\x0e\u3709\aW\rn\x9e\xbb\xe4\x00\x00\u07d4\x1d\xc7\xf7\xda\xd8]\xf5?\x12q\x15$\x03\xf4\xe1\xe4\xfd\xb3\xaf\xa0\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x1d\u03bc\xb7em\xf5\u072a3h\xa0U\xd2/\x9e\xd6\xcd\xd9@\x89\x1b\x18\x1eK\xf24<\x00\x00\xe0\x94\x1d\xd7tA\x84J\xfe\x9c\xc1\x8f\x15\xd8\xc7{\xcc\xfbe^\xe04\x8a\x01\x06\xebEW\x99D\x88\x00\x00\u07d4\x1d\xde\xfe\xfd5\xab\x8fe\x8b$q\xe5G\x90\xbc\x17\xaf\x98\u07a4\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x1d\xee\xc0\x1a\xbe\\\r\x95-\xe9\x10l=\xc3\x069\xd8P\x05\u0589lk\x93[\x8b\xbd@\x00\x00\u07d4\x1d\xf6\x91\x16rg\x9b\xb0\xef5\t\x03\x8c\f'\xe3\x94\xfd\xfe0\x89\x1dF\x01b\xf5\x16\xf0\x00\x00\u07d4\x1d\xfa\xee\ar\x12\xf1\xbe\xaf\x0eo/\x18@Sz\xe1T\xad\x86\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\x1e\x06\r\xc6\xc5\xf1\u02cc\xc7\xe1E.\x02\xee\x16u\b\xb5eB\x8a\x02\xb1O\x02\xc8d\xc7~\x00\x00\xe0\x94\x1e\x13\xecQ\x14,\ubde2`\x83A,<\xe3QD\xbaV\xa1\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\x1e\x1aH(\x11\x9b\xe3\t\xbd\x88#nMH+PM\xc5W\x11\x89\xa00\xdc\xeb\xbd/L\x00\x00\u07d4\x1e\x1a\ud178leb\u02cf\xa1\xebo\x8f;\xc9\u072eny\x89\xf4\xd2\u0744%\x9b$\x00\x00\u07d4\x1e\x1ccQwj\xc3\x10\x919~\xcf\x16\x00-\x97\x9a\x1b-Q\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\x1e\x1dz_$h\xb9N\xa8&\x98-\xbf!%yR*\xb7\xdf\n\u02ac\x9e\xee\xd3Y09\xe5\xacuy\x8a+\x14F\xddj\xef\xe4\x1c\x00\x00\u07d4\x1e{^M\x1fW+\xec\xf2\xc0\x0f\xc9\f\xb4v{Jn3\u0509\x06\x1f\xc6\x10u\x93\xe1\x00\x00\u07d4\x1e\x8eh\x9b\x02\x91|\xdc)$]\f\x9ch\xb0\x94\xb4\x1a\x9e\u0589lk\x93[\x8b\xbd@\x00\x00\u07d4\x1e\xa34\xb5u\b\a\xeat\xaa\u016b\x86\x94\xec_(\xaaw\u03c9\x1a\xb2\xcf|\x9f\x87\xe2\x00\x00\u07d4\x1e\xa4qU\x04\u01af\x10{\x01\x94\xf4\xf7\xb1\xcbo\xcc\xcdoK\x89 \x041\x97\xe0\xb0'\x00\x00\u07d4\x1e\xa4\x92\xbc\xe1\xad\x10~3\u007fK\u0527\xac\x9a{\xab\xcc\u036b\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x1e\xa6\xbf/\x15\xae\x9c\x1d\xbcd\u06a7\xf8\xeaM\r\x81\xaa\xd3\xeb\x89\u3bb5sr@\xa0\x00\x00\u07d4\x1e\xb4\xbfs\x15j\x82\xa0\xa6\x82 \x80\xc6\xed\xf4\x9cF\x9a\xf8\xb9\x89g\x8a\x93 b\xe4\x18\x00\x00\xe0\x94\x1e\xba\xcbxD\xfd\xc3\"\xf8\x05\x90O\xbf\x19b\x80-\xb1S|\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x1e\xc4\xecKw\xbf\x19\u0411\xa8h\xe6\xf4\x91T\x18\x05A\xf9\x0e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1e\xd0n\xe5\x16b\xa8lcE\x88\xfbb\xdcC\xc8\xf2~|\x17\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x1e\u063b?\x06w\x8b\x03\x9e\x99a\xd8\x1c\xb7\x1as\xe6x|\x8e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x1e\xda\bNye\x00\xba\x14\xc5\x12\x1c\r\x90\x84of\xe4\xbeb\x89\x1c\xfd\xd7F\x82\x16\xe8\x00\x00\u07d4\x1e\xeel\xbe\xe4\xfe\x96\xadaZ\x9c\xf5\x85zdy@\u07ccx\x89\x01\r:\xa56\xe2\x94\x00\x00\u07d4\x1e\xf2\u073f\xe0\xa5\x00A\x1d\x95n\xb8\u0213\x9c=l\xfef\x9d\x89*\x11)\u0413g \x00\x00\xe0\x94\x1e\xf5\xc9\xc76P\u03fb\xde\\\x88U1\xd4'\xc7\xc3\xfeUD\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x1f\x04\x12\xbf\xed\u0356N\x83}\t,q\xa5\xfc\xba\xf3\x01&\xe2\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x1f\x17O@\xa0Dr4\xe6fS\x91Mu\xbc\x00>V\x90\u0709\b\xacr0H\x9e\x80\x00\x00\u07d4\x1f!\x86\xde\xd2>\f\xf9R\x16\x94\xe4\xe1dY>i\n\x96\x85\x89\x10CV\x1a\x88)0\x00\x00\u07d4\x1f*\xfc\n\xed\x11\xbf\xc7\x1ew\xa9\ae{6\xeav\xe3\xfb\x99\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u0794\x1f9Y\xfc)\x11\x10\xe8\x822\xc3kvg\xfcx\xa3ya?\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\x1f=\xa6\x8f\xe8~\xafC\xa8)\xabm~\u0166\xe0\t\xb2\x04\xfb\x89\x1e\x16\x01u\x8c,~\x00\x00\u07d4\x1fI\xb8m\r9EY\x06\x98\xa6\xaa\xf1g<7u\\\xa8\r\x89%\xf2s\x93=\xb5p\x00\x00\u07d4\x1f_;4\xbd\x13K'\x81\xaf\xe5\xa0BJ\u0144l\xde\xfd\x11\x89\x05]\xe6\xa7y\xbb\xac\x00\x00\u07d4\x1fo\x0004\x97R\x06\x1c\x96\a+\xc3\xd6\xeb5I \x8dk\x89\x01K\x8d\xe1\xeb\x88\u06c0\x00\u07d4\x1f}\x8e\x86\xd6\xee\xb0%E\xaa\xd9\x0e\x912{\xd3i\xd7\xd2\xf3\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x1f\x81\x16\xbd\n\xf5W\x0e\xaf\fV\u011cz\xb5\xe3zX\x04X\x89lk\x93[\x8b\xbd@\x00\x00\u0794\x1f\x88\xf8\xa13\x8f\xc7\xc1\tv\xab\xcd?\xb8\u04c5T\xb5\uc708\xb9\xf6]\x00\xf6<\x00\x00\u07d4\x1f\x9c2hE\x8d\xa3\x01\xa2\xbeZ\xb0\x82W\xf7{\xb5\xa9\x8a\xa4\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x1f\xa21\x9f\xed\x8c-F*\xdf.\x17\xfe\xecjo0Qn\x95\x89\x06\xca\xe3\x06!\xd4r\x00\x00\u07d4\x1f\xb4c\xa08\x99\x83\xdf}Y?{\xddmxI\u007f\xed\x88y\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x1f\xb7\xbd1\r\x95\xf2\xa6\u067a\xaf\x8a\x8aC\n\x9a\x04E:\x8b\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\x1f\xcc|\xe6\xa8HX\x95\xa3\x19\x9e\x16H\x1fr\xe1\xf7b\xde\xfe\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x1f\xcf\xd1\xd5\u007f\x87\"\x90V\f\xb6-`\x0e\x1d\xef\xbe\xfc\xcc\x1c\x89P\xc5\xe7a\xa4D\b\x00\x00\u0794\x1f\u0496\xbe\x03\xads|\x92\xf9\u0186\x9e\x8d\x80\xa7\x1cW\x14\xaa\x88\xb9\x8b\xc8)\xa6\xf9\x00\x00\u07d4\x1f\xdd\xd8_\u024b\xe9\xc4\x04Ya\xf4\x0f\x93\x80^\xccEI\xe5\x89\b\xe3\xf5\v\x17<\x10\x00\x00\u07d4 \x01\xbe\xf7{f\xf5\x1e\x15\x99\xb0/\xb1\x10\x19J\x00\x99\xb7\x8d\x89lk\x93[\x8b\xbd@\x00\x00\u07d4 \x02d\xa0\x9f\x8ch\xe3\xe6b\x97\x95(\x0fV%O\x86@\u0409\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4 \x03qy\a\xa7%`\xf40\u007f\x1b\xee\xccT6\xf4=!\xe7\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4 \r\xfc\vq\xe3Y\xb2\xb4eD\n6\xa6\xcd\xc3Rw0\a\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4 \x13L\xbf\xf8\x8b\xfa\xdcFkR\xec\ua9d8W\x89\x1d\x83\x1e\x8965\u026d\xc5\u07a0\x00\x00\u07d4 \x14&\x1f\x01\b\x9fSyV0\xba\x9d\xd2O\x9a4\xc2\xd9B\x89Hz\x9a0E9D\x00\x00\u07d4 \x16\x89]\xf3,\x8e\xd5G\x82iF\x84#\xae\xa7\xb7\xfb\xceP\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4 \x18\x1cKA\xf6\xf9r\xb6iX!_\x19\xf5p\xc1]\xdf\xf1\x89V\xbcu\xe2\xd61\x00\x00\x00\u07d4 \x18d\xa8\xf7\x84\xc2'{\v|\x9e\xe74\xf7\xb3w\xea\xb6H\x89\xf2(\x14\x00\xd1\xd5\xec\x00\x00\u07d4 \xb8\x1a\xe59&\xac\xe9\xf7\xd7AZ\x05\f\x03\x1dX_ \x89\x12\u007f\x19\xe8>\xb3H\x00\x00\xe0\x94 \x1d\x9e\xc1\xbc\v\x89-C\xf3\xeb\xfa\xfb,\x00\x00\u07d4 \xa1RV\xd5\f\xe0X\xbf\x0e\xacC\xaaS:\xa1n\u0273\x80\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4 \xa2\x9cPy\xe2k?\x181\x8b\xb2\xe5\x0e\x8e\x8b4n[\xe8\x89\x1b\x1a\xb3\x19\xf5\xecu\x00\x00\u07d4 \xa8\x16\x80\xe4e\xf8\x87\x90\xf0\aO`\xb4\xf3_]\x1ej\xa5\x89Ea\x80'\x8f\fw\x80\x00\u07d4 \xb9\xa9\u6f48\x80\u0659J\xe0\r\u0439(*\v\xea\xb8\x16\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4 \u0084\xba\x10\xa2\b0\xfc=i\x9e\xc9}-\xfa'\xe1\xb9^\x89lk\x93[\x8b\xbd@\x00\x00\u07d4 \xd1A\u007f\x99\xc5i\u3fb0\x95\x85e0\xfe\x12\xd0\xfc\uaa89@\x15\xf9K\x11\x83i\x80\x00\u07d4 \u074f\u02f4n\xa4o\u3066\x8b\x8c\xa0\xea[\xe2\x1f\u9949lk\x93[\x8b\xbd@\x00\x00\xe0\x94 \xff>\u078c\xad\xb5\xc3{H\xcb\x14X\x0f\xb6^#\t\n{\x8a\b\xe4\xd3\x16\x82v\x86@\x00\x00\xe0\x94!\x008\x1d`\xa5\xb5J\xdc\t\u0456\x83\xa8\xf6\u057bK\xfb\u02ca\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94!\x18\xc1\x16\xab\f\xdfo\xd1\x1dT\xa40\x93\a\xb4w\xc3\xfc\x0f\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94!\x1b)\xce\xfcy\xae\x97gD\xfd\xeb\u03bd<\xbb2\xc5\x13\x03\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4! l\xe2.\xa4\x80\xe8Y@\xd3\x13\x14\xe0\xd6ONM:\x04\x8965\u026d\xc5\u07a0\x00\x00\u07d4!2\xc0Qj.\x17\x17J\xc5G\xc4;{\x00 \xd1\xebLY\x895e\x9e\xf9?\x0f\xc4\x00\x00\xe0\x94!@\x8bMz,\x0en\xcaAC\xf2\xca\u037b\u033a\x12\x1b\u060a\x04<3\xc1\x93ud\x80\x00\x00\u07d4!Kt9U\xa5\x12\xden\r\x88j\x8c\xbd\x02\x82\xbe\xe6\u04a2\x89lk\x93[\x8b\xbd@\x00\x00\u07d4!L\x89\u017d\x8e}\"\xbcWK\xb3^H\x95\x02\x11\xc6\xf7v\x89\x01\x06T\xf2X\xfd5\x80\x00\xe0\x94!Ti\x14\xdf\u04ef*\xddA\xb0\xff>\x83\xff\xdat\x14\xe1\xe0\x8a\x01C\x95\xe78ZP.\x00\x00\u07d4!X.\x99\xe5\x02\xcb\xf3\xd3\xc2;\xdf\xfbv\xe9\x01\xacmV\xb2\x89\x05k\xc7^-c\x10\x00\x00\u07d4!Y$\b\x13\xa70\x95\xa7\xeb\xf7\u00f3t>\x80(\xae_\t\x89lk\x93[\x8b\xbd@\x00\x00\u07d4!`\xb4\xc0,\xac\n\x81\u0791\b\xdeCE\x90\xa8\xbf\xe6\x875\x89j\xcb=\xf2~\x1f\x88\x00\x00\xe0\x94!nA\x86N\xf9\x8f\x06\r\xa0\x8e\xca\xe1\x9a\xd1\x16j\x17\xd06\x8a\x016\x9f\xb9a(\xacH\x00\x00\u07d4!\x84o/\xdfZA\xed\x8d\xf3n^\xd8TM\xf7Y\x88\xec\xe3\x89lj\xccg\u05f1\xd4\x00\x00\xe0\x94!\xa6\xdbe'F{\xc6\xda\xd5K\xc1n\x9f\xe2\x95;g\x94\xed\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4!\xa6\xfe\xb6\xab\x11\xc7f\xfd\xd9w\xf8\xdfA!\x15_G\xa1\xc0\x89\x03\x19\xcf8\xf1\x00X\x00\x00\u07d4!\xb1\x82\xf2\xda+8D\x93\xcf_5\xf8=\x9d\x1e\xe1O*!\x89lk\x93[\x8b\xbd@\x00\x00\u07d4!\xbf\xe1\xb4\\\xac\xdebt\xfd\x86\b\u0661x\xbf>\xebn\u0709l\xee\x06\u077e\x15\xec\x00\x00\u07d4!\xc0s\x80HOl\xbc\x87$\xad2\xbc\x86L;Z\xd5\x00\xb7\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94!\u00e8\xbb\xa2g\xc8\u0322{\x1a\x9a\xfa\xba\xd8o`z\xf7\b\x8a\x01\xe4\xa3lI\u06580\x00\x00\u07d4!\xcem[\x90\x18\xce\xc0J\u0596yD\xbe\xa3\x9e\x800\xb6\xb8\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4!\xd0'\x05\xf3\xf6I\x05\xd8\x0e\xd9\x14y\x13\xea\x8cs\a\u0595\x89I\xed\xb1\xc0\x98\x876\x00\x00\u07d4!\xd1?\f@$\xe9g\xd9G\a\x91\xb5\x0f\"\xde:\xfe\xcf\x1b\x89\xf1Z\xd3^.1\xe5\x00\x00\xe0\x94!\xdb\u06c1z\r\x84\x04\u01bd\xd6\x15\x047N\x9cC\xc9!\x0e\x8a\x02\x1e\x18\xb9\xe9\xabE\xe4\x80\x00\xe0\x94!\xdf\x1e\xc2KNK\xfey\xb0\xc0\x95\u03ba\xe1\x98\xf2\x91\xfb\u044a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94!\xdf-\u036ft\xb2\xbf\x804\x04\xddM\xe6\xa3^\xab\xec\x1b\xbd\x8a\x01w\"J\xa8D\xc7 \x00\x00\u07d4!\xe2\x19\u021c\xa8\xac\x14\xaeL\xbaa0\xee\xb7}\x9em9b\x89*\u035f\xaa\xa08\xee\x00\x00\u07d4!\xe5\u04ba\xe9\x95\xcc\xfd\b\xa5\xc1k\xb5$\xe1\xf60D\x8f\x82\x89\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4!\xe5\xd7s 0L \x1c\x1eS\xb2a\xa1#\u0421\x06>\x81\x89\x04\xb6\xfa\x9d3\xddF\x00\x00\xe0\x94!\xea\xe6\xfe\xff\xa9\xfb\xf4\u0347OG9\xac\xe50\u033eY7\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4!\xec\xb2\u07e6Wy\xc7Y-\x04\x1c\xd2\x10Z\x81\xf4\xfdNF\x8965\u026d\xc5\u07a0\x00\x00\u07d4!\uff20\x9b5\x80\xb9\x8es\xf5\xb2\xf7\xf4\xdc\v\xf0,R\x9c\x89lk\x93[\x8b\xbd@\x00\x00\u07d4!\xfd\v\xad\xe5\xf4\xeftt\xd0X\xb7\xf3\xd8T\xcb\x13\x00RN\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94!\xfdG\xc5%`\x12\x19\x8f\xa5\xab\xf11\xc0mj\xa1\x96_u\x8a\x01\xab,\xf7\xc9\xf8~ \x00\x00\u07d4!\xfdl]\x97\xf9\xc6\x00\xb7h!\xdd\xd4\xe7v5\x0f\xce+\xe0\x89lj\u04c2\xd4\xfba\x00\x00\u07d4\"\r\u018d\xf0\x19\xb6\xb0\u033f\xfbxKZZ\xb4\xb1]@`\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4\"\x0e+\x92\xc0\xf6\xc9\x02\xb5\x13\xd9\xf1\xe6\xfa\xb6\xa8\xb0\xde\xf3\u05c9+^:\xf1k\x18\x80\x00\x00\u07d4\"V\x1cY1\x14560\x9c\x17\xe82X{b\\9\v\x9a\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\"W\xfc\xa1jn\\*d|<)\xf3l\xe2)\xab\x93\xb1~\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\"]5\xfa\xed\xb3\x91\u01fc-\xb7\xfa\x90q\x16\x04\x05\x99m\x00\x89\t\x18T\xfc\x18bc\x00\x00\u07d4\"_\x9e\xb3\xfbo\xf3\xe9\xe3\xc8D~\x14\xa6n\x8dO7y\xf6\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\"r\x18n\xf2}\xcb\xe2\xf5\xfc70P\xfd\xae\u007f*\xce#\x16\x8a\x03h\xc8b:\x8bM\x10\x00\x00\u07d4\"s\xba\u05fcNHv\"\xd1u\xefzf\x98\x8bj\x93\xc4\xee\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\"v&K\xec\x85&\xc0\xc0\xf2pgz\xba\xf4\xf0\xe4A\xe1g\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\"\x82B\xf83n\xec\xd8$.\x1f\x00\x0fA\x93~q\xdf\xfb\xbf\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\"\x84*\xb80\xdaP\x99\x13\xf8\x1d\xd1\xf0O\x10\xaf\x9e\xdd\x1cU\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\"\x94O\xbc\xa9\xb5yc\bN\xb8M\xf7\xc8_\xb9\xbc\u07f8V\x89\xfc\x11\x8f\uf43a8\x80\x00\u07d4\"\x9c\xc4q\x1bbu^\xa2\x96DZ\u00f7\u007f\xc63\x82\x1c\xf2\x89\x02#\xe8\xb0R\x192\x80\x00\u0794\"\x9eC\r\xe2\xb7OD&Q\xdd\u0377\x01v\xbc\x05L\xadT\x88\xbb\xf9\x81\xbcJ\xaa\x80\x00\u07d4\"\x9fO\x1a*OT\atP[G\a\xa8\x1d\xe4D\x10%[\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\"\x9f\xf8\v\xf5p\x80\t\xa9\xf79\xe0\xf8\xb5`\x91@\x16\u0566\x89\x12\x11\xec\xb5m\x13H\x80\x00\u07d4\"\xa2X\x12\xabV\xdc\xc4#\x17^\xd1\u062d\xac\xce3\xcd\x18\x10\x89dI\xe8NG\xa8\xa8\x00\x00\xe0\x94\"\xb9j\xb2\xca\xd5]\xb1\x00\xb50\x01\xf9\xe4\xdb7\x81\x04\xc8\a\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\"\xbd\xff\xc2@\xa8\x8f\xf7C\x1a\xf3\xbf\xf5\x0e\x14\xda7\xd5\x18>\x8965\u026d\xc5\u07a0\x00\x00\u07d4\"\xce4\x91Y\xee\xb1D\xef\x06\xff&6X\x8a\xefy\xf6(2\x89\n1\x06+\xee\xedp\x00\x00\u07d4\"\xdbU\x9f,<\x14u\xa2\xe6\xff\xe8:YyY\x91\x96\xa7\xfa\x8965\u026d\xc5\u07a0\x00\x00\u07d4\"\xe1QX\xb5\xee>\x86\xeb\x032\xe3\u6a6cl\u0675^\u0349\b\xacr0H\x9e\x80\x00\x00\u07d4\"\xe2H\x8e-\xa2jI\xae\x84\xc0\x1b\xd5K!\xf2\x94x\x91\u0189]\u0212\xaa\x111\xc8\x00\x00\u07d4\"\xe5\x12\x14\x9a\x18\xd3i\xb7\x86\xc9\xed\xab\xaf\x1d\x89N\xe0.g\x14a\\\x00\x00\u07d4\"\xeb}\xb0\xbaV\xb0\xf8\xb8\x16\u0332\x06\xe6\x15\xd9)\x18[\r\x89\x04])s~\"\xf2\x00\x00\u07d4\"\xee\xd3'\xf8\xeb\x1d\x138\xa3\xcb{\x0f\x8aK\xaaY\a\u0355\x89\x01E]_Hw\b\x80\x00\xe0\x94\"\xf0\x04\u07cd\xe9\xe6\xeb\xf5#\u032c\xe4W\xac\xcb&\xf9r\x81\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u0794\"\xf2\xdc\xffZ\u05cc>\xb6\x85\v\\\xb9Q\x12{e\x95\"\u623e -j\x0e\xda\x00\x00\u07d4\"\xf3\xc7y\xddy\x02>\xa9*x\xb6\\\x1a\x17\x80\xf6-\\J\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\"\xfe\x88M\x907)\x1bMR\xe6(Z\xe6\x8d\xea\v\xe9\xff\xb5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4#\x06\u07d3\x1a\x94\rX\xc0\x16e\xfaM\b\x00\x80,\x02\xed\xfe\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94#\t\xd3@\x91D[22Y\v\xd7\x0fO\x10\x02[,\x95\t\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4#\x12\x00F\xf6\x83!\x02\xa7R\xa7fVi\x1c\x86>\x17\u5709\x11\xe0\xe4\xf8\xa5\v\xd4\x00\x00\u07d4#\x1a\x15\xac\xc1\x99\u021f\xa9\xcb\"D\x1c\xc7\x030\xbd\xcc\xe6\x17\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4#\x1d\x94\x15]\xbc\xfe*\x93\xa3\x19\xb6\x17\x1fc\xb2\v\u04b6\xfa\x89\xcf\x14{\xb9\x06\xe2\xf8\x00\x00\u07d4#(2\xcdYw\xe0\nL0\xd0\x16?.$\xf0\x88\xa6\xcb\t\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4#,m\x03\xb5\xb6\xe6q\x1e\xff\xf1\x90\xe4\x9c(\xee\xf3l\x82\xb0\x89Hz\x9a0E9D\x00\x00\xe0\x94#,\xb1\xcdI\x99<\x14J?\x88\xb3a\x1e#5i\xa8k\u058a\x03L`lB\u042c`\x00\x00\u07d4#,\xe7\x82Pb%\xfd\x98`\xa2\xed\xc1Jz0Gsm\xa2\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4#/R]U\x85\x9b}N`\x8d H\u007f\xaa\xdb\x00)15\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94#4\u0150\u01e4\x87i\x100E\u0176SL\x8a4i\xf4J\x8a\x03\xb1\x99\a=\xf7-\xc0\x00\x00\u07d4#7n\u02bftl\xe53!\xcfB\xc8fI\xb9+g\xb2\xff\x89lk\x93[\x8b\xbd@\x00\x00\u07d4#7\x8fB\x92m\x01\x84\xb7\x93\xb0\xc8'\xa6\xdd>=3O\u0349\x03\t'\xf7L\x9d\xe0\x00\x00\u07d4#8B\xb1\xd0i/\xd1\x11@\xcfZ\u0364\xbf\x960\xba\xe5\xf8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4#9\xe9I(p\xaf\xea%7\xf3\x89\xac/\x83\x83\x02\xa3<\x06\x89lk\x93[\x8b\xbd@\x00\x00\u07d4#;\xdd\xdd]\xa9HR\xf4\xad\xe8\xd2\x12\x88V\x82\xd9\ak\u0189\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4#OF\xba\xb7?\xe4]1\xbf\x87\xf0\xa1\xe0Fa\x99\xf2\ubb09\x1aJ\xba\"\\ t\x00\x00\u07d4#U\x1fV\x97_\xe9+1\xfaF\x9cI\xeaf\xeefb\xf4\x1e\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4#V\x95B\xc9}V`\x18\xc9\a\xac\xfc\xf3\x91\xd1@g\xe8~\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94#_\xa6l\x02^\xf5T\x00p\xeb\xcf\r7-\x81w\xc4g\xab\x8a\a\x12\x9e\x1c\xdf7>\xe0\x00\x00\xe0\x94#r\xc4\xc1\u0253\x9fz\xafl\xfa\xc0@\x90\xf0\x04t\x84\n\t\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4#s\f5z\x91\x02nD\xb1\xd0\xe2\xfc*Q\xd0q\xd8\xd7{\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4#v\xad\xa9\x033\xb1\u0441\bL\x97\xe6E\xe8\x10\xaa[v\xf1\x89(\xa8WBTf\xf8\x00\x00\u07d4#x\xfdC\x82Q\x1e\x96\x8e\u0452\x10g7\xd3$\xf4T\xb55\x8965\u026d\xc5\u07a0\x00\x00\u07d4#\x82\xa9\u050e\xc8>\xa3e(\x90\xfd\x0e\u7710{[-\xc1\x89\a?u\u0460\x85\xba\x00\x00\u07d4#\x83\xc2\"\xe6~\x96\x91\x90\xd3!\x9e\xf1M\xa3xP\xe2lU\x89lk\x93[\x8b\xbd@\x00\x00\u07d4#\x8akv5%/RDHl\n\xf0\xa7: s\x85\xe09\x89JD\x91\xbdm\xcd(\x00\x00\u07d4#\x9as>k\x85Z\u0152\xd6c\x15a\x86\xa8\xa1t\xd2D\x9e\x89X\xbe7X\xb2A\xf6\x00\x00\xe0\x94#\xab\t\xe7?\x87\xaa\x0f;\xe0\x13\x9d\xf0\xc8\xebk\xe5cO\x95\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\xe0\x94#\xab\xd9\xe9>yW\xe5\xb66\xbeey\x05\x1c\x15\xe5\xce\v\x0e\x8a\x03\xa3\xc8\xf7\xcb\xf4,8\x00\x00\u07d4#\xb1\u0111\u007f\xbd\x93\xee=H8\x93\x06\x95s\x84\xa5Il\xbf\x89\xd8\xd8X?\xa2\xd5/\x00\x00\xe0\x94#\xba8d\xdaX=\xabV\xf4 \x87<7g\x96\x90\xe0/\x00\x8a\x02\x13BR\r_\xec \x00\x00\u07d4#\xc5Z\xebW9\x87o\n\xc8\xd7\xeb\xea\x13\xber\x96\x85\xf0\x00\x89Hz\x9a0E9D\x00\x00\u07d4#\u025b\xa0\x87D\x8e\x19\xc9p\x1d\xf6n\f\xabR6\x831\xfa\x89lk\x93[\x8b\xbd@\x00\x00\u07d4#\xcc\xc3\u01ac\xd8\\.F\fO\xfd\xd8+\xc7]\xc8I\xea\x14\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4#\xcd%\x98\xa2\x0e\x14\x9e\xad*\u0593yWn\xce\xdb`\u3389lk\x93[\x8b\xbd@\x00\x00\u07d4#\u07cfH\xee\x00\x92V\xeay~\x1f\xa3i\xbe\xeb\xcfk\xc6c\x89|\xd3\xfa\xc2m\x19\x81\x80\x00\u07d4#\xe2\u01a8\xbe\x8e\n\u03e5\xc4\xdf^6\x05\x8b\xb7\u02ecZ\x81\x89lk\x93[\x8b\xbd@\x00\x00\u07d4#\xeaf\x9e5d\x81\x9a\x83\xb0\xc2l\x00\xa1m\x9e\x82olF\x89M\x8dl\xa9h\xca\x13\x00\x00\u07d4#\xebo\xd8Vq\xa9\x06:\xb7g\x8e\xbe&Z \xf6\x1a\x02\xb3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4#\xf9\xec\xf3\xe5\xdd\u0723\x88\x15\xd3\xe5\x9e\xd3K[\x90\xb4\xa3S\x89\v\x17\x81\xa3\xf0\xbb \x00\x00\u07d4#\xfa~\xb5\x1aH\"\x95\x98\xf9~v+\xe0\x86\x96R\xdf\xfcf\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94$\x03\x05rs\x13\xd0\x1esT,w_\xf5\x9d\x11\xcd5\xf8\x19\x8a\x01A\x88Vf\x80\u007f\\\x80\x00\u07d4$\x04k\x91\u069ba\xb6)\u02cb\x8e\xc0\xc3Q\xa0~\a\x03\xe4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4$\x0eU\x9e'J\xae\xf0\xc2X\x99\x8c\x97\x9fg\x1d\x11s\xb8\x8b\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94$\x13aU\x9f\xee\xf8\x0e\xf170!S\xbd\x9e\xd2\xf2]\xb3\xef\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94$;;\xcaj)\x93Y\xe8\x86\xce3\xa3\x03A\xfa\xfeMW=\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4$<\x84\xd1$ W\f\xc4\xef;\xab\xa1\xc9Y\u0083$\x95 \x89\u007f\x1fi\x93\xa8S\x04\x00\x00\xe0\x94$CJ>2\xe5N\xcf'/\xe3G\v_oQ/gU \x8a\x01@a\xb9\xd7z^\x98\x00\x00\u07d4$HYo\x91\xc0\x9b\xaa0\xbc\x96\x10j-7\xb5p^](\x89lk\x93[\x8b\xbd@\x00\x00\u0794$Xn\xc5E\x175\xee\xaa\xebG\r\xc8sj\xaeu/\x82\xe5\x88\xf4?\xc2\xc0N\xe0\x00\x00\u07d4$X\xd6U_\xf9\x8a\x12\x9c\xce@7\x95=\x00 n\xffB\x87\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4$b\x91\x16[Y3-\xf5\xf1\x8c\xe5\u0248V\xfa\xe9X\x97\u0589\\(=A\x03\x94\x10\x00\x00\u07d4$g\u01a5\u0196\xed\xe9\xa1\xe5B\xbf\x1a\xd0k\xccK\x06\xac\xa0\x89\x01\x00\xbd3\xfb\x98\xba\x00\x00\u07d4$v\xb2\xbbu\x1c\xe7H\xe1\xa4\xc4\xff{#\v\xe0\xc1]\"E\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4$z\n\x11\xc5\u007f\x03\x83\xb9I\xdeT\vf\xde\xe6\x86\x04\xb0\xa1\x899\xfb\xae\x8d\x04-\xd0\x00\x00\u07d4$\x87\xc3\u013e\x86\xa2r=\x91|\x06\xb4XU\x01p\xc3\xed\xba\x8965\u026d\xc5\u07a0\x00\x00\u07d4$\x89\xac\x12i4\xd4\u05a9M\xf0\x87C\xda{v\x91\xe9y\x8e\x8965\u026d\xc5\u07a0\x00\x00\u07d4$\x9d\xb2\x9d\xbc\x19\xd1#]\xa7)\x8a\x04\b\x1c1WB\u9b09a\xac\xff\x81\xa7\x8a\xd4\x00\x00\u07d4$\xa4\xeb6\xa7\xe4\x98\xc3o\x99\x97\\\x1a\x8dr\x9f\u05b3\x05\u05c9\r\xfcx!\x0e\xb2\xc8\x00\x00\u07d4$\xa7P\xea\xe5\x87G\x11\x11m\xd7\xd4{q\x86\u0399\r1\x03\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4$\xaa\x11Q\xbbv_\xa3\xa8\x9c\xa5\x0e\xb6\xe1\xb1\xc7\x06A\u007f\u0509\xa8\r$g~\xfe\xf0\x00\x00\u0794$\xac\xa0\x8d[\xe8^\xbb\x9f12\xdf\xc1\xb6 \x82N\xdf\xed\xf9\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4$\xb2\xbe\x11\x8b\x16\u0632\x17Gi\xd1{L\xf8O\a\u0294m\x89lk\x93[\x8b\xbd@\x00\x00\u07d4$\xb8\xb4F\u07bd\x19G\x95]\u0404\xf2\xc5D\x933F\u04ed\x89\xeaim\x90@9\xbd\x80\x00\u07d4$\xb9^\xbe\xf7\x95\x00\xba\xa0\xed\xa7.w\xf8wA]\xf7\\3\x891T\xc9r\x9d\x05x\x00\x00\u07d4$\xb9\xe6dOk\xa4\xcd\xe1&'\r\x81\xf6\xab`\xf2\x86\xdf\xf4\x89\a?u\u0460\x85\xba\x00\x00\u07d4$\xbdY\x04\x05\x90\x91\xd2\xf9\xe1-j&\xa0\x10\xca\"\xab\x14\xe8\x89e\xea=\xb7UF`\x00\x00\u07d4$\xc0\u020bT\xa3TG\t\x82\x8a\xb4\xab\x06\x84\x05Y\xf6\xc5\u2250\xf54`\x8ar\x88\x00\x00\u07d4$\xc1\x17\xd1\u04b3\xa9z\xb1\x1aFy\u025awJ\x9e\xad\xe8\u044965\u026d\xc5\u07a0\x00\x00\u07d4$\xcf\xf0\xe93j\x9f\x80\xf9\xb1\u02d6\x8c\xafk\x1d\x1cI2\xa4\x89\n\xdaUGK\x814\x00\x00\u07d4$\u06aa\xdd\xf7\xb0k\xbc\ua6c0Y\x00\x85\xa8\x85gh+N\x89\x11K \x15\u04bb\xd0\x00\x00\u07d4$\xdc\xc2K\xd9\xc7!\f\xea\u03f3\r\xa9\x8a\xe0JM{\x8a\xb9\x8965\u026d\xc5\u07a0\x00\x00\u07d4$\xf7E\r\xdb\xf1\x8b\x02\x0f\xeb\x1a 2\xd9\xd5Kc>\xdf7\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4$\xfcs\xd2\a\x93\t\x8e\t\u076bW\x98Pb$\xfa\x1e\x18P\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4$\xfd\x9al\x87L/\xab?\xf3n\x9a\xfb\xf8\xce\r2\xc7\u0792\x89Hz\x9a0E9D\x00\x00\u07d4%\n@\xce\xf3 #\x97\xf2@F\x95H\xbe\xb5bj\xf4\xf2<\x89\x05\x03\xb2\x03\xe9\xfb\xa2\x00\x00\u07d4%\niC\av\xf64w\x03\xf9R\x97\x83\x95Za\x97\xb6\x82\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4%\x0e\xb7\xc6o\x86\x9d\xdfI\u0685\xf39>\x98\f\x02\x9a\xa44\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4%\x10j\xb6u]\xf8mkc\xa1\x87p;\f\xfe\xa0\u5520\x89\x01|@Z\xd4\x1d\xb4\x00\x00\xe0\x94%\x18_2Z\xcf-dP\x06\x98\xf6\\v\x9d\xdfh0\x16\x02\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4%\x1c\x12r,hy\"y\x92\xa3\x04\xeb5v\xcd\x18CN\xa5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4%\x1eh8\xf7\xce\u0173\x83\xc1\xd9\x01F4\x12t\xda\xf8\xe5\x02\x89\a\xff\x1c\xcbua\xdf\x00\x00\u07d4%%\x9d\x97Z!\xd8:\xe3\x0e3\xf8\x00\xf5?7\u07e0\x198\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4%({\x81_\\\x828\ns\xb0\xb1?\xba\xf9\x82\xbe$\xc4\u04c9\x02+\x1c\x8c\x12'\xa0\x00\x00\xe0\x94%+eU\xaf\u0700\xf2\xd9m\x97-\x17\u06c4\xeaZ\xd5!\xac\x8a\x01\xab,\xf7\xc9\xf8~ \x00\x00\u07d4%8S)6\x81<\x91\xe6S(O\x01|\x80\u00f8\xf8\xa3o\x89l\x87T\xc8\xf3\f\b\x00\x00\xe0\x94%>2\xb7N\xa4I\n\xb9&\x06\xfd\xa0\xaa%{\xf2=\u02cb\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94%?\x1et*,\uc1b0\u05f3\x06\xe5\xea\xcbl\xcb/\x85T\x8a\x04>^\xde\x1f\x87\x8c \x00\x00\u07d4%A1J\v@\x8e\x95\xa6\x94DIwq*Pq5\x91\xab\x89X\x9e\x1a]\xf4\u05f5\x00\x00\u07d4%L\x1e\xccc\f(w\u0780\x95\xf0\xa8\u06e1\xe8\xbf\x1fU\f\x89\\(=A\x03\x94\x10\x00\x00\u07d4%Z\xbc\x8d\b\xa0\x96\xa8\x8f=j\xb5_\xbcsR\xbd\u0739\u0389\x04t6\x821>\u0780\x00\u07d4%[\xdddt\u0302b\xf2j\"\u00cfE\x94\x0e\x1c\ue99b\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4%`\xb0\x9b\x89\xa4\xaehI\xedZ<\x99XBf1qDf\x89\\(=A\x03\x94\x10\x00\x00\u07d4%a\xa18\xdc\xf8;\xd8\x13\xe0\xe7\xf1\bd+\xe3\xde=o\x05\x8964\xf4\x84\x17@\x1a\x00\x00\u0794%a\xec\x0f7\x92\x18\xfe^\xd4\xe0(\xa3\xf7D\xaaAuLr\x88\xb9\x8b\xc8)\xa6\xf9\x00\x00\u0794%b\x92\xa1\x91\xbd\xda4\xc4\xdakk\u0591G\xbfu\u2a6b\x88\xc2\xff.\r\xfb\x03\x80\x00\u07d4%i~\xf2\f\u032ap\xd3-7o\x82r\xd9\xc1\a\f=x\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4%o\xa1P\u0307\xb5\x05j\a\xd0\x04\xef\xc8E$s\x9eb\xb5\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4%r\x1c\x87\xb0\xdc!7|r\x00\xe5$\xb1J\"\xf0\xafi\xfb\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4%\x899\xbb\xf0\f\x9d\xe9\xafS8\xf5\xd7\x14\xab\xf6\xd0\xc1\xc6q\x89T\x06\x923\xbf\u007fx\x00\x00\xe0\x94%\x90\x12hp\xe0\xbd\xe8\xa6c\xab\x04\nr\xa5W=\x8dA\u008a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4%\x9e\xc4\xd2e\xf3\xabSk|p\xfa\x97\xac\xa1Bi,\x13\xfc\x89\x01\x1b\x1b[\xea\x89\xf8\x00\x00\xe0\x94%\xa5\x00\xee\xeczf*\x84\x15R\xb5\x16\x8bp{\r\xe2\x1e\x9e\x8a\x02\x1f/o\x0f\xc3\xc6\x10\x00\x00\xe0\x94%\xa5\xa4M8\xa2\xf4Lj\x9d\xb9\u037ck\x1e.\x97\xab\xb5\t\x8a\x03\x99\x92d\x8a#\u0220\x00\x00\u07d4%\xa7L*\xc7]\u023a\xa8\xb3\x1a\x9c|\xb4\xb7\x82\x9b$V\u0689lk\x93[\x8b\xbd@\x00\x00\xe0\x94%\xad\xb8\xf9o9I,\x9b\xb4|^\u0708bNF\aV\x97\x8a\x05\xa9\x94\v\xc5hyP\x00\x00\u07d4%\xae\xe6\x8d\t\xaf\xb7\x1d\x88\x17\xf3\xf1\x84\xecV/x\x97\xb74\x89lk\x93[\x8b\xbd@\x00\x00\u07d4%\xb0S;\x81\xd0*a{\x92)\xc7\xec]o/g.[Z\x8965\u026d\xc5\u07a0\x00\x00\u07d4%\xb7\x8c\x9f\xad\x85\xb43C\xf0\xbf\xcd\x0f\xac\x11\u0254\x9c\xa5\xeb\x89lk\x93[\x8b\xbd@\x00\x00\u07d4%\xbcI\xef(\x8c\xd1e\xe5%\xc6a\xa8\x12\u03c4\xfb\xec\x8f3\x89\x12Y!\xae\xbd\xa9\xd0\x00\x00\u07d4%\xbd\xfa>\xe2o8Ia{#\x00bX\x8a\x97\xe3\xca\xe7\x01\x8965\xe6\x19\xbb\x04\xd4\x00\x00\u07d4%\xc1\xa3~\xe5\xf0\x82e\xa1\xe1\r=\x90\xd5G)U\xf9x\x06\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4%\xc6\xe7O\xf1\xd9(\u07d8\x13z\xf4\u07c40\xdf$\xf0|\u05c9\x15$VU\xb1\x02X\x00\x00\xe0\x94%\xcf\xc4\xe2\\5\xc1;i\xf7\xe7}\xbf\xb0\x8b\xafXuk\x8d\x8a\bxg\x83&\xea\xc9\x00\x00\x00\xe0\x94%\xda\u0515\xa1\x1a\x86\xb9\xee\xec\xe1\xee\xec\x80^W\xf1W\xfa\xff\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\xe0\x94%\xe07\xf0\n\x18'\v\xa5\xec4 \"\x9d\xdb\n,\u33e2\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4%\xe6a\xc99\x86:\xcc\x04No\x17\xb5i\x8c\xce7\x9e\xc3\u0309JD\x91\xbdm\xcd(\x00\x00\u07d4&\x04\x8f\xe8M\x9b\x01\nb\xe71b~I\xbc.\xb7?@\x8f\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4&\x06\u00f3\xb4\xca\x1b\t\x14\x98`,\xb1\x97\x8b\xf3\xb9R!\xc0\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4&\n#\x0eDe\a~\v\x14\xeeDB\xa4\x82\u0570\xc9\x14\xbf\x89Z\xf6\x06\xa0k[\x11\x80\x00\u07d4&\r\xf8\x94:\x8c\x9a]\xbayE2\u007f\xd7\xe0\x83|\x11\xad\a\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4&\x14\xf4-]\xa8D7ux\xe6\xb4H\xdc$0[\xef+\x03\x89lk\x93[\x8b\xbd@\x00\x00\u07d4&\x15\x10\x0e\xa7\xe2[\xba\x9b\xcat`X\xaf\xbb\xb4\xff\xbeBD\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4&\x15u\xe9\xcfY\xc8\"o\xa7\xaa\xf9\x1d\xe8o\xb7\x0fZ\u00ee\x89\x10C\xa4CjR?\x00\x00\xe0\x94&\x1e\x0f\xa6LQ\x13te\xee\xcf[\x90\xf1\x97\xf7\x93\u007f\xdb\x05\x8a\x03\xcf\xc8.7\xe9\xa7@\x00\x00\u07d4&*\x8b\xfd}\x9d\xc5\xdd:\u05c1a\xb6\xbbV\b$76U\x89?j\x83\x84\a+v\x00\x00\xe0\x94&*\xedK\xc0\xf4\xa4\xb2\xc6\xfb5y>\x83ZI\x18\x9c\xdf\xec\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94&-\xc16L\xcfm\xf8\\C&\x8e\xe1\x82UM\xaei.)\x8a\x01\v /\xect\xce\xd8\x00\x00\u07d4&8\x140\x9d\xe4\xe65\xcfX^\r6Tw\xfc@\xe6l\xf7\x89\a\xea(2uw\b\x00\x00\u07d4&9\xee\xe9\x87<\xee\xc2o\u0314T\xb5H\xb9\xe7\xc5J\xa6\\\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94&>W\xda\xcb\xe0\x14\x9f\x82\xfee\xa2fH\x98\x86o\xf5\xb4c\x8a\b\v\xfb\xef\xcb_\v\xc0\x00\x00\u07d4>\x19\xc0m_\x14z\xa5\x97$\x8e\xb4l\xf7\xbe\xfad\xa5\x89X\xe7\x92n\xe8X\xa0\x00\x00\u07d4&L\xc8\bj\x87\x10\xf9\x1b!r\t\x05\x91,\u05d6J\xe8h\x89\x01s\x17\x90SM\xf2\x00\x00\xe0\x94&S\x83\u058bR\xd04\x16\x1b\xfa\xb0\x1a\xe1\xb0G\x94/\xbc2\x8a\x04rq\xde\xe2\rt\\\x00\x00\u07d4&Y\xfa\xcb\x1e\x83CeS\xb5\xb4)\x89\xad\xb8\a_\x99S\xed\x89\x01\x97evw\x1a^\x00\x00\xe0\x94&o-\xa7\xf0\b^\xf3\xf3\xfa\t\xba\xee#+\x93\xc7D\xdb.\x8a\f\xb4\x9bD\xba`-\x80\x00\x00\u07d4&qH\xfdr\xc5Ob\nY/\xb9'\x991\x9c\xc4S+\\\x89\x169\u46fa\x16(\x00\x00\xe0\x94&xJ\u0791\u0228:\x8e9e\x8c\x8d\x82wA<\u0319T\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4&z~n\x82\xe1\xb9\x1dQ\xde\u0776D\xf0\xe9m\xbb\x1f\u007f~\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4&\x80q=@\x80\x8e*P\xed\x011P\xa2\xa6\x94\xb9j\u007f\x1d\x89a\t=|,m8\x00\x00\u07d4&\x97\xb39\x81;\f-\x96K$q\xeb\x1c`oN\u02d6\x16\x89>\x8e\xf7\x95\u0610\xc8\x00\x00\u07d4&\xa6\x8e\xab\x90Z\x8b=\xce\x00\xe3\x170\x82%\u06b1\xb9\xf6\xb8\x89kV\x05\x15\x82\xa9p\x00\x00\u07d4&\xb1\x1d\x06e\x88\xcet\xa5r\xa8Zc(s\x92\x12\xaa\x8b@\x89lk\x93[\x8b\xbd@\x00\x00\u07d4&\xba\xbfB\xb2g\xfd\xcf8a\xfd\xd4#j^GHH\xb3X\x8965\u026d\xc5\u07a0\x00\x00\u07d4&\xc0\x05Kp\r:|-\xcb\xe2uh\x9dOL\xad\x16\xa35\x89lk\x93[\x8b\xbd@\x00\x00\u07d4&\xc2\xff\xc3\x0e\xfd\xc5'>v\x18:\x16\xc2i\x8dnS\x12\x86\x89*\x11)\u0413g \x00\x00\u07d4&\u025f\x88I\u0240+\x83\xc8a!\u007f\xd0z\x9e\x84\u0377\x9d\x89\x10CV\x1a\x88)0\x00\x00\u07d4&\xcf\xff\xd0R\x15+\xb3\xf9W\xb4x\xd5\xf9\x8b#:|+\x92\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4&\u0521h\x91\xf5)\"x\x92\x17\xfc\u0606\xf7\xfc\xe2\x96\xd4\x00\x89lk\x93[\x8b\xbd@\x00\x00\u07d4&\xd4\xec\x17\xd5\u03b2\u0214\xbd\u015d\nji]\xad+C\u0309\x9f\x1fxv\x1d4\x1a\x00\x00\u07d4&\xe8\x01\xb6,\x82q\x91\xddh\xd3\x1a\x01\x19\x90\x94\u007f\xd0\xeb\xe0\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4&\xe9\xe2\xadr\x97\x02bd\x17\xef%\xde\r\xc8\x00\xf7\xa7y\xb3\x8965\u026d\xc5\u07a0\x00\x00\u07d4&\xf9\xf7\xce\xfd~9K\x9d9$A+\xf2\u0083\x1f\xaf\x1f\x85\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94&\xfe\x17L\xbfRfP\xe0\xcd\x00\x9b\xd6\x12e\x02\u038ehM\x8a\x02w\x01s8\xa3\n\xe0\x00\x00\xe0\x94&\xff\nQ\xe7\xce\u0384\x00'ix\xdb\xd6#n\xf1b\xc0\xe6\x8a\x15.\x18V'T\nP\x00\x00\u07d4'\x10\x1a\x0fV\u04da\x88\u0168O\x9b2L\xdd\xe3>\\\xb6\x8c\x89lk\x93[\x8b\xbd@\x00\x00\u07d4'\x14L\xa9\xa7w\x1a\x83j\xd5\x0f\x80?d\xd8i\xb2\xae+ \x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4'\x14i\x13V:\xa7E\xe2X\x840\xd94\x8e\x86\xea|5\x10\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4'\x1d=H\x1c\xb8\x8evq\xad!iI\xb66^\x060=\xe0\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4' \xf9\xcaBn\xf2\xf2\xcb\xd2\xfe\xcd9\x92\fO\x1a\x89\xe1m\x89lk\x93[\x8b\xbd@\x00\x00\u07d4'*\x13\x1aZejz:\xca5\u023d \"\"\xa7Y\"X\x89\x90\xf54`\x8ar\x88\x00\x00\u07d4'D\xffgFA!\xe3Z\xfc)\"\x17qd\xfa/\xcb\x02g\x89\x05k\xc7^-c\x10\x00\x00\u07d4'J=w\x1a=p\x97\x96\xfb\xc4\xd5\xf4\x8f\xce/\xe3\x8cy\u0589\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4'Mi\x17\x0f\xe7\x14\x14\x01\x88+\x88j\xc4a\x8cj\xe4\x0e\u06c93\xc5I\x901r\f\x00\x00\u07d4'R\x1d\xeb;n\xf1An\xa4\u01c1\xa2\xe5\u05f3n\xe8\x1ca\x89lk\x93[\x8b\xbd@\x00\x00\u07d4'Xu\xffO\xbb\f\xf3\xa40!1'H\u007fv\b\xd0L\xba\x89\x1b\x1c\x01\x0evmX\x00\x00\u07d4'j\x00n0(\xec\xd4L\xdbb\xba\nw\u0394\xeb\xd9\xf1\x0f\x89a\x94\x04\x9f0\xf7 \x00\x00\u07d4'k\x05!\xb0\xe6\x8b'}\xf0\xbb2\xf3\xfdH2cP\xbf\xb2\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4'o\xd7\xd2O\x8f\x88?Zz()[\xf1qQ\u01e8K\x03\x89lk\x93[\x8b\xbd@\x00\x00\u07d4'p\xf1N\xfb\x16]\u07bay\xc1\v\xb0\xaf1\xc3\x1eY3L\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4'vw\xab\xa1\xe5,;S\xbf\xa2\a\x1dN\x85\x9a\n\xf7\xe8\xe1\x8965\u026d\xc5\u07a0\x00\x00\u07d4'\x82Ff\xd2x\xd7\x04#\xf0=\xfe\x1d\u01e3\xf0/C\u2d4966\xc2^f\xec\xe7\x00\x00\u07d4'\x83\f_`#\xaf\xaa\xf7\x97Egl J\x0f\xac\u0360\xba\x89\r\x02\xabHl\xed\xc0\x00\x00\xe0\x94'\x84\x90?\x1d|\x1b\\\xd9\x01\xf8\x87]\x14\xa7\x9b<\xbe*V\x8a\x04\xbd\xa7\xe9\xd7J\xd5P\x00\x00\u07d4'\x8c\v\xdec\x0e\u00d3\xb1\xe7&\u007f\xc9\xd7\xd9p\x19\xe4\x14[\x89lk\x93[\x8b\xbd@\x00\x00\u07d4'\x98q\x10\"\x1a\x88\b&\xad\xb2\xe7\xab^\xcax\xc6\xe3\x1a\xec\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94'\xac\a;\xe7\x9c\xe6W\xa9:\xa6\x93\xeeC\xbf\x0f\xa4\x1f\xef\x04\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\u07d4'\xb1iN\xaf\xa1e\xeb\xd7\xcc{\u025et\x81J\x95\x14\x19\u0709+^:\xf1k\x18\x80\x00\x00\u07d4'\xb6(\x16\xe1\xe3\xb8\u045by\xd1Q=]\xfa\x85[\f:*\x89\x05j\xf5\xc1\xfdiP\x80\x00\u07d4'\xbf\x94<\x163\xfe2\xf8\xbc\xcc\xdbc\x02\xb4\a\xa5rND\x892\xf8Lm\xf4\b\xc0\x80\x00\u07d4'\xbf\x9fD\xba}\x05\xc35@\u00e5;\xb0,\xbb\xff\xe7\xc3\u0189lk\x93[\x8b\xbd@\x00\x00\u07d4'\xc2\xd7\xcaPM\xaa=\x90f\xdc\t\x13}\xc4/:\xaa\xb4R\x89 \x86\xac5\x10R`\x00\x00\u07d4'\xd1X\xac=>\x11\t\xabnW\x0e\x90\xe8]8\x92\xcdv\x80\x89\x05k\xc7^-c\x10\x00\x00\u07d4'\xe69\x89\xca\x1e\x90;\xc6 \xcf\x1b\x9c?g\xb9\xe2\xaee\x81\x89Hz\x9a0E9D\x00\x00\xe0\x94'\xf0<\xf1\xab\xc5\xe1\xb5\x1d\xbcDK(\x9eT,\x9d\u07f0\xe6\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4'\xfc\x85\xa4\x9c\xff\x90\xdb\xcf\xda\u071d\xdd@\u05b9\xa2!\nl\x89\x05k\xc7^-c\x10\x00\x00\u07d4(\x05A^\x1d\u007f\xde\xc6\xde\u07f8\x9eR\x1d\x10Y-t<\x10\x89\x05k\xc7^-c\x10\x00\x00\u07d4(\a>\xfc\x17\xd0\\\xab1\x95\xc2\xdb3+a\x98Gw\xa6\x12\x8965\u026d\xc5\u07a0\x00\x00\u07d4(\x12P\xa2\x91!'\nN\xe5\u05cd$\xfe\xaf\xe8,p\xba:\x8965\u026d\xc5\u07a0\x00\x00\u07d4(\x13\xd2c\xfc_\xf2G\x9e\x97\x05\x95\u05b6\xb5`\xf8\xd6\xd6\u0449lk\x93[\x8b\xbd@\x00\x00\u07d4(.\x80\xa5T\x87ZVy\x9f\xa0\xa9\u007fU\x10\u7557LN\x8965\u026d\xc5\u07a0\x00\x00\u07d4(3\x96\xce<\xac9\x8b\xcb\xe7\"\u007f2>x\xff\x96\u0407g\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4(4\x9f~\xf9t\xeaU\xfe6\xa1X;4\xce\xc3\xc4Pe\xf0\x89\f\xb63\u051eeY\x00\x00\u07d4(6\x120F\xb2\x84\xe5\xef\x10+\xfd\"\xb1v^P\x81\x16\xad\x89\x16S\xfb\xb5\xc4'\xe4\x00\x00\u07d4(<#\x14(<\x92\u0530d\xf0\xae\xf9\xbbRF\xa7\x00\u007f9\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4(>\x11 7I\xb1\xfaO2\xfe\xbbq\xe4\x9d\x13Y\x198*\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94(>bR\xb4\xef\xcfFT9\x1a\xcbu\xf9\x03\u015bx\xc5\xfb\x8a\x02\x8a\x85t%Fo\x80\x00\x00\xe0\x94(Q\x0en\xff\x1f\xc8)\xb6WoC(\xbc98\xecze\x80\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4(X\xac\xac\xaf!\xea\x81\u02b7Y\x8f\xdb\xd8kE.\x9e\x8e\x15\x89$\x1a\x9bOaz(\x00\x00\u07d4(Z\xe5\x1b\x95\x00\u014dT\x13e\xd9ui\xf1K\xb2\xa3p\x9b\x89lk\x93[\x8b\xbd@\x00\x00\u07d4(f\xb8\x1d\xec\xb0.\xe7\n\xe2P\xce\xe5\xcd\xc7{Y\u05f6y\x89lk\x93[\x8b\xbd@\x00\x00\u07d4(i\x06\xb6\xbdIr\xe3\xc7\x16U\xe0K\xaf6&\f|\xb1S\x89\x12nr\xa6\x9aP\xd0\x00\x00\u07d4(k\x18ma\xea\x1f\u05cd\x990\xfe\x12\xb0e7\xb0\\=Q\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94(t\xf3\xe2\x98]_{@f'\xe1{\xaaw+\x01\xab\u031e\x8a\x01F\x05\x04\x10v_8\x00\x00\xe0\x94(|\xf9\u0410.\xf8\x19\xa7\xa5\xf1ID[\xf1w^\xe8\xc4|\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\u07d4(\x81\x8e\x18\xb6\x10\x00\x13!\xb3\x1d\xf6\xfe}(\x15\u036d\xc9\xf5\x8965\u026d\xc5\u07a0\x00\x00\u07d4(\x86\x83$3~\x11\xba\x10l\xb4\x81\u0696/:\x84S\x80\x8d\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94(\x90K\xb7\xc40)C\xb7\t\xb1Myp\xe4+\x83$\u184a\x02\x1f\x97\x84j\a-~\x00\x00\u07d4(\x95\xe8\t\x99\xd4\x06\xadY.+&'7\xd3_}\xb4\xb6\x99\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4(\x96r\x80!N!\x8a\x12\f]\xda7\x04\x1b\x11\x1e\xa3mt\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4(\xa3\xda\t\xa8\x19H\x19\xae\x19\x9f.m\x9d\x13\x04\x81~(\xa5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4(\xab\x16_\xfbi\xed\xa0\xc5I\xae8\xe9\x82o_\u007f\x92\xf8S\x89FM\xf6\xd7\xc8DY\x00\x00\u07d4(\xb7u\x85\xcb=U\xa1\x99\xab)\x1d:\x18\u018f\u8684\x8a\x89j@v\xcfy\x95\xa0\x00\x00\xe0\x94(\xd4\xeb\xf4\x1e=\x95\xf9\xbb\x9a\x89u#\\\x1d\x009>\x80\x00\u07d4)\nV\xd4\x1fn\x9e\xfb\xdc\xea\x03B\u0dd2\x9a\x8c\xdf\xcb\x05\x89\x12\xa5\xf5\x81h\xee`\x00\x00\u07d4)\x15bK\xcbg\x917\xb8\xda\xe9\xabW\xd1\x1bI\x05\xea\xeeK\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4)\x1e\xfe\x00\x81\xdc\xe8\xc1G\x99\xf7\xb2\xa46\x19\xc0\u00f3\xfc\x1f\x89A\rXj \xa4\xc0\x00\x00\u07d4)\x1f\x92\x9c\xa5\x9bT\xf8D>=Mu\xd9]\xee$<\xefx\x89\x1b\x1a\b\x927\a=\x00\x00\xe0\x94))\x8c\xcb\xdf\xf6\x89\xf8\u007f\xe4\x1a\xa6\xe9\x8f\u07f5=\xea\xf3z\x8a\x041\\2\xd7\x1a\x9e`\x00\x00\u07d4)/\"\x8b\n\x94t\x8c\x8e\xeca-$o\x98\x93c\xe0\x8f\b\x89\n\ad\a\xd3\xf7D\x00\x00\u07d4)3\x84\xc4+o\x8f)\x05\xceR\xb7 \\\"t7la+\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4)4\xc0\xdf{\xbc\x17+l\x18k\vrTz\u038b\xf7TT\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4)<#\x06\xdf6\x04\xaeO\xda\r z\xbasog\xde\a\x92\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94)I\xfd\x1d\xef\\v\xa2\x86\xb3\x87$$\x80\x9a\a\xdb9f\xf3\x8a\x01\x1b\xd9\x06\u06a0\xc9C\x80\x00\u07d4)OIK?.\x14\xa3\xf8\xab\x00\x00\x00\u07d4)U\xc3W\xfd\x8fu\xd5\x15\x9a=\xfai\u0178z5\x9d\ua309lk\x93[\x8b\xbd@\x00\x00\u07d4)a\xfb9\x1ca\x95|\xb5\xc9\xe4\a\u0762\x938\u04f9,\x80\x8964\xfb\x9f\x14\x89\xa7\x00\x00\u07d4)h\x1d\x99\x12\xdd\xd0~\xaa\xbb\x88\xd0]\x90\xf7f\xe8bA}\x8965\u026d\xc5\u07a0\x00\x00\u07d4)kq\xc0\x01X\x19\xc2B\xa7\x86\x1eo\xf7\xed\xed\x8a_q\xe3\x89lh\xcc\u041b\x02,\x00\x00\u07d4)mf\xb5!W\x1aNA\x03\xa7\xf5b\xc5\x11\xe6\xaas-\x81\x89$=M\x18\"\x9c\xa2\x00\x00\u07d4)o\x00\xde\x1d\u00fb\x01\xd4z\x8c\xcd\x1e]\x1d\u0661\xebw\x91\x8965\u026d\xc5\u07a0\x00\x00\u07d4)s\x85\xe8\x864FV\x85\xc21\xa3\x14\xa0\xd5\xdc\xd1F\xaf\x01\x89T\x06\x923\xbf\u007fx\x00\x00\u07d4)v=\xd6\u069a|\x16\x11s\x88\x83!\ub9b6<\x8f\xb8E\x89\x11\xc7\xea\x16.x \x00\x00\u07d4)yt\x11t\xa8\xc1\xea\v\u007f\x9e\xdfe\x81w\x85\x94\x17\xf5\x12\x89\x19\x01\x96l\x84\x96\x83\x80\x00\u07d4)z\x88\x92\x1b_\xca\x10\u5edd\xed`\x02T7\xae\"\x16\x94\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94)}]\xbe\"//\xb5%1\xac\xbd\v\x01=\xc4F\xacsh\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4)\x82N\x94\xccCH\xbc\x962y\xdc\xdfG9\x17\x152L\u04c9i*\xe8\x89p\x81\xd0\x00\x00\u07d4)\x82\xd7j\x15\xf8G\xddA\xf1\x92*\xf3h\xfeg\x8d\x0eh\x1e\x89\x05k\xc7^-c\x10\x00\x00\u07d4)\x88\x87\xba\xb5|[\xa4\xf0aR)\xd7R_\xa1\x13\xb7\ua249\x02+\x1c\x8c\x12'\xa0\x00\x00\xe0\x94)\x8e\xc7kD\r\x88\a\xb3\xf7\x8b_\x90\x97\x9b\xeeB\xedC\u06ca\x06ZM\xa2]0\x16\xc0\x00\x00\u07d4)\x93h`\x90B\xa8X\xd1\xec\xdf\x1f\xc0\xad\xa5\xea\xce\xca)\u03c9lk\x93[\x8b\xbd@\x00\x00\u07d4)\x9e\v\xcaU\xe0i\u0785\x04\xe8\x9a\xcan\xca!\u04ca\x9a]\x89\x03\x027\x9b\xf2\xca.\x00\x00\u07d4)\xac+E\x84T\xa3l~\x96\xc7:\x86g\"*\x12$,q\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94)\xad\u03c3\xb6\xb2\n\u01a44\xab\xb1\x99<\xbd\x05\xc6\x0e\xa2\xe4\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94)\xae\xf4\x8d\xe8\xc9\xfb\xadK\x9eL\xa9pyzU3\xebr-\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4)\xb3\xf5a\xeezn%\x94\x1e\x98\xa52[x\xad\u01d7\x85\xf3\x89\x05k\xc7^-c\x10\x00\x00\u07d4)\xbd\xc4\xf2\x8d\xe0\x18\x0fC<&\x94\xebt\xf5PL\xe9C7\x89lk\x93[\x8b\xbd@\x00\x00\u07d4)\u0300M\x92+\xe9\x1fY\t\xf3H\xb0\xaa\xa5\xd2\x1b`x0\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94)\xda>5\xb2;\xb1\xf7/\x8e\"X\xcf\u007fU3Y\xd2K\xac\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94)\xe6y\x90\xe1\xb6\xd5.\x10U\xff\xe0I\xc51\x95\xa8\x15B\u03ca\x04<3\xc1\x93ud\x80\x00\x00\u07d4)\uab82v\x17b\xf4\xd2\xdbS\xa9\u018b\x0fk\vmNf\x89lk\x93[\x8b\xbd@\x00\x00\u07d4)\xeb~\xef\xda\xe9\xfe\xb4I\xc6?\xf5\xf2y\xd6u\x10\xeb\x14\"\x89\x01\r:\xa56\xe2\x94\x00\x00\u07d4)\xf0\xed\xc6\x038\xe7\x11 \x85\xa1\xd1\x14\u068cB\u038fU\u0589\xa0Z\u007f\x0f\xd8%x\x00\x00\u07d4)\xf8\xfb\xa4\xc3\ar\xb0W\xed\xbb\xe6*\xe7B\f9\x05r\xe1\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94)\xf9(l\x0es\x8d\x17!\xa6\x91\u01b9Z\xb3\u0667\x97\xed\xe8\x8a*Z\x05\x8f\u0095\xed\x00\x00\x00\u07d4*\b^%\xb6Hb\xf5\xe6\x8dv\x8e+\x0fz\x85)\x85\x8e\xee\x89k\x88:\xcdWf\xcd\x00\x00\u07d4**\xb6\xb7Lz\xf1\xd9Gk\xb5\xbc\xb4RG\x97\xbe\xdc5R\x8965\u026d\xc5\u07a0\x00\x00\u07d4*9\x19\nO\u0783\u07f3\xdd\xcbL_\xbb\x83\xaclIu\\\x8965\u026d\xc5\u07a0\x00\x00\u07d4*@\r\xff\x85\x94\xder(\xb4\xfd\x15\xc3#\"\xb7[\xb8}\xa8\x89\x051\xa1\u007f`z-\x00\x00\xe0\x94*D\xa7!\x8f\xe4Me\xa1\xb4\xb7\xa7\u0671\xc2\xc5,\x8c>4\x8a\r-\x06\xc3\x05\xa1\xebW\x80\x00\u07d4*F\xd3Swqv\xff\x8e\x83\xff\xa8\x00\x1fOp\xf9s:\xa5\x89\x05\xbf\v\xa6cOh\x00\x00\u07d4*Y_\x16\xee\xe4\xcb\f\x17\u0662\xd99\xb3\xc1\x0flgrC\x89;\xa1\x91\v\xf3A\xb0\x00\x00\u07d4*Y\xe4~\xa5\xd8\xf0\xe7\xc0(\xa3\xe8\xe0\x93\xa4\x9c\x1bP\xb9\xa3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4*[\xa9\xe3L\u054d\xa5L\x9a'\x12f:;\xe2t\xc8\xe4{\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\xe0\x94*^:@\xd2\xcd\x03%vm\xe7:=g\x18\x96\xb3b\xc7;\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94*cY\x0e\xfe\x99\x86\xc3\xfe\xe0\x9b\n\n3\x8b\x15\xbe\xd9\x1f!\x8a\x01^\x1cN\x05\xee&\xd0\x00\x00\u07d4*gf\n\x13h\xef\xcdbn\xf3k+\x1b`\x19\x80\x94\x1c\x05\x89\a?u\u0460\x85\xba\x00\x00\u07d4*t+\x89\x10\x94\x1e\t2\x83\n\x1d\x96\x92\xcf\u0484\x94\xcf@\x89\x1b\x1a\xb3\x19\xf5\xecu\x00\x00\u07d4*tl\xd4@'\xaf>\xbd7\xc3x\xc8^\xf7\xf7T\xab_(\x89\x15[\xd90\u007f\x9f\xe8\x00\x00\u07d4*\x81\xd2|\xb6\xd4w\x0f\xf4\xf3\u0123\xba\x18\xe5\xe5\u007f\aQ|\x89lk\x93[\x8b\xbd@\x00\x00\u07d4*\x91\xa9\xfe\xd4\x1b}\x0e\\\xd2\xd81X\xd3\xe8\xa4\x1a\x9a-q\x89i*\xe8\x89p\x81\xd0\x00\x00\xe0\x94*\x9cW\xfe{k\x13\x8a\x92\rgo{\x1a%\x10\x80\xff\xb9\x8a4\xf0\x86\xf3\xb3;h@\x00\x00\u07d4+p\x1d\x16\xc0\xd3\xcc\x1eL\xd8TE\xe6\xad\x02\ue92c\x01-\x89 \x86\xac5\x10R`\x00\x00\xe0\x94+q|\xd42\xa3#\xa4e\x909\x84\x8d;\x87\xde&\xfc\x95F\x8ai\xe1\r\xe7fv\u0400\x00\x00\u07d4+t\xc3s\xd0K\xfb\x0f\xd6\n\x18\xa0\x1a\x88\xfb\xe8Gp\u5309\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4+w\xa4\u060c\rV\xa3\xdb\xe3\xba\xe0J\x05\xf4\xfc\u0477W\xe1\x89\x10CV\x1a\x88)0\x00\x00\xe0\x94+\x84\x88\xbd-<\x19z=&\x15\x18\x15\xb5\xa7\x98\xd2qh\u070a\x01j\x1f\x9f_\xd7\xd9`\x00\x00\u07d4+\x8a\r\xee\\\xb0\xe1\xe9~\x15\xcf\xcan\x19\xad!\xf9\x95\ufb49\x1bUC\x8d\x9a$\x9b\x00\x00\xe0\x94+\x8f\xe4\x16n#\xd1\x19c\xc0\x93+\x8a\u078e\x01E\xea\ap\x8a\t(\x96R\x9b\xad\u0708\x00\x00\xe0\x94+\x99\xb4.OBa\x9e\xe3k\xaa~J\xf2\xd6^\xac\xfc\xba5\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4+\xab\x0f\xbe(\u0544 \xb5 6w\n\x12\xf9\x95*\xeai\x11\x89\xcf\x15&@\xc5\xc80\x00\x00\u07d4+\xad\xe9\x1d\x15E\x17b\x0f\u05349\xac\x97\x15zA\x02\xa9\xf7\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4+\xaf\x8dn\"\x11t\x12H \xeeI+\x94Y\xecO\xad\xaf\xbb\x89lk\x93[\x8b\xbd@\x00\x00\u07d4+\xaf\xbf\x9e\x9e\xd2\xc2\x19\xf7\xf2y\x13t\xe7\xd0\\\xb0gw\xe7\x89\v\xed\x1d\x02c\xd9\xf0\x00\x00\xe0\x94+\xb3f\xb9\xed\xcb\r\xa6\x80\xf0\xe1\v;n(t\x81\x90\xd6\u00ca\x01:b\u05f5v@d\x00\x00\xe0\x94+\xb6\xf5x\xad\xfb\u7ca1\x16\xb3UO\xac\xf9\x96\x98\x13\xc3\x19\x8a\x01\x91'\xa19\x1e\xa2\xa0\x00\x00\u07d4+\xbeb\xea\xc8\f\xa7\xf4\xd6\xfd\xee~}\x8e(\xb6:\xcfw\x0e\x89\x81\xe3-\xf9r\xab\xf0\x00\x00\u07d4+\xbeg*\x18WP\x8fc\x0f*^\xdbV=\x9e\x9d\xe9(\x15\x89lk\x93[\x8b\xbd@\x00\x00\u07d4+\xc4)\xd6\x18\xa6jL\xf8-\xbb-\x82N\x93V\xef\xfa\x12j\x89lj\xccg\u05f1\xd4\x00\x00\u07d4+\xd2R\xe0\xd72\xff\x1d|x\xf0\xa0.l\xb2T#\xcf\x1b\x1a\x89\x90\xf54`\x8ar\x88\x00\x00\u07d4+\xdd\x03\xbe\xbb\xee';l\xa1\x05\x9b4\x99\x9a[\xbda\xbby\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4,\x04\x11\\>R\x96\x1b\r\xc0\xb0\xbf1\xfb\xa4ToYf\xfd\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94,\x06\u0752+aQJ\xaf\xed\xd8D\x88\xc0\u008em\xcf\x0e\x99\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94,\f\xc3\xf9QH,\u0222\x92X\x15hN\xb9\xf9N\x06\x02\x00\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4,\x0e\xe14\u0633aE\xb4{\xee\u7bcd'8\xdb\xda\b\xe8\x89\n\xe5os\x0em\x84\x00\x00\u07d4,\x0f[\x9d\xf46%y\x8e~\x03\xc1\xa5\xfdjm\t\x1a\xf8+\x89\x01\xb0\xfc\xaa\xb2\x000\x00\x00\u07d4,\x12\x8c\x95\xd9W!Q\x01\xf0C\u074f\u0142EmA\x01m\x89-C\xf3\xeb\xfa\xfb,\x00\x00\u07d4,\x18\x00\xf3_\xa0->\xb6\xff[%(_^J\xdd\x13\xb3\x8d\x891\"\u04ed\xaf\xde\x10\x00\x00\u07d4,\x1c\x19\x11N=m\xe2xQHK\x8d'\x15\xe5\x0f\x8a\x10e\x89\x05k\xc7^-c\x10\x00\x00\u07d4,\x1c\xc6\xe1\x8c\x15$\x88\xba\x11\xc2\xcc\x1b\xce\xfa-\xf3\x06\xab\u0449Z\x87\xe7\xd7\xf5\xf6X\x00\x00\xe0\x94,\x1d\xf8\xa7oH\xf6\xb5K\u03dc\xafV\xf0\xee\x1c\xf5z\xb3=\x8a\x02$\u007fu\x00\x89\xdaX\x00\x00\u07d4,!G\x94z\xe3?\xb0\x98\xb4\x89\xa5\xc1k\xff\xf9\xab\xcdN*\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4,#OP\\\xa8\xdc\xc7}\x9b~\x01\xd2W\xc3\x18\xcc\x199m\x89\x05k\xc7^-c\x10\x00\x00\u07d4,$(\xe4\xa6it\xed\xc8\"\xd5\xdb\xfb$\x1b'(\aQX\x89lk\x93[\x8b\xbd@\x00\x00\u07d4,-\x15\xff9V\x1c\x1br\xed\xa1\xcc\x02\u007f\xfe\xf27C\xa1D\x89\u0500\xed\x9e\xf3+@\x00\x00\u07d4,-\xb2\x8c3\t7^\xea1\x82\x1b\x84\xd4\b\x93\x0e\xfa\x1a\u01c9lk\x93[\x8b\xbd@\x00\x00\u07d4,Z-\n\xbd\xa0;\xbe!W\x81\xb4\xff)l\x8ca\xbd\xba\xf6\x89\x01\xa8\xe5oH\xc0\"\x80\x00\u07d4,[}{\x19Z7\x1b\xf9\xab\u0774/\xe0O/\x1d\x9a\x99\x10\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4,]\xf8ffj\x19K&\u03bb@~J\x1f\xd7> \x8d^\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94,`?\xf0\xfe\x93alCW>\xf2y\xbf\xea@\x88\x8dj\xe7\x8a\x01\x00\xf4\xb6\xd6gW\x90\x00\x00\xe0\x94,hF\xa1\xaa\x99\x9a\"F\xa2\x87\x05`\x00\xbaM\u02e8\xe6=\x8a\x02\x1f/o\x0f\xc3\xc6\x10\x00\x00\u0794,j\xfc\xd4\x03|\x1e\xd1O\xa7O\xf6u\x8e\tE\xa1\x85\xa8\xe8\x88\xf4?\xc2\xc0N\xe0\x00\x00\u07d4,ki\x9d\x9e\xad4\x9f\x06\u007fEq\x1a\aJd\x1d\xb6\xa8\x97\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4,o\\\x12L\u01c9\xf8\xbb9\x8e?\x88\x97Q\xbcK`-\x9e\x89\x01Y\xf2\v\xed\x00\xf0\x00\x00\u07d4,\x83\xae\xb0/\xcf\x06}e\xa4p\x82\xfd\x97x3\xab\x1c\uc449\b'8#%\x8a\xc0\x00\x00\xe0\x94,\x89\xf5\xfd\xca=\x15T\t\xb68\xb9\x8at.U\xebFR\xb7\x8a\x14\u06f2\x19\\\xa2(\x90\x00\x00\u07d4,\x96HI\xb1\xf6\x9c\xc7\u03a4D%8\xed\x87\xfd\xf1l\xfc\x8f\x89lk\x93[\x8b\xbd@\x00\x00\u0794,\x9f\xa7,\x95\xf3}\b\xe9\xa3`\t\u7930\u007f)\xba\xd4\x1a\x88\xdfn\xb0\xb2\xd3\xca\x00\x00\u07d4,\xafk\xf4\xec}Z\x19\xc5\xe0\x89z^\xeb\x01\x1d\xce\xceB\x10\x89\a\x93H5\xa01\x16\x00\x00\u07d4,\xb4\xc3\xc1k\xb1\xc5^|kz\x19\xb1'\xa1\xac\x93\x90\xcc\t\x89\xb8'\x94\xa9$O\f\x80\x00\xe0\x94,\xb5IZPS6\xc2FT\x10\xd1\xca\xe0\x95\xb8\xe1\xba\\\u074a\x04<3\xc1\x93ud\x80\x00\x00\u07d4,\xb6\x15\a:@\xdc\u06d9\xfa\xa8HW.\x98{;\x05n\xfb\x89+X\xad\u06c9\xa2X\x00\x00\u07d4,\xbam]\r\xc2\x04\xea\x8a%\xad\xa2\xe2oVu\xbd_/\u0709H#\xef}\u06da\xf3\x80\x00\u07d4,\xbb\fs\u07d1\xb9\x17@\xb6i;wJ}\x05\x17~\x8eX\x89dI\xe8NG\xa8\xa8\x00\x00\u07d4,\xcbfIM\n\xf6\x89\xab\xf9H=6]x$D\xe7\u07ad\x8965\u026d\xc5\u07a0\x00\x00\u07d4,\xcc\x1f\x1c\xb5\xf4\xa8\x00.\x18k \x88]\x9d\xbc\x03\f\b\x94\x89lk\x93[\x8b\xbd@\x00\x00\u07d4,\u03c0\xe2\x18\x98\x12^\xb4\xe8\a\u0342\xe0\x9b\x9d(Y/n\x89lk\x93[\x8b\xbd@\x00\x00\u07d4,\u0456\x94\u0452j\x0f\xa9\x18\x9e\u07ba\xfcg\x1c\xf1\xb2\u02a5\x8965\u026d\xc5\u07a0\x00\x00\u07d4,\u04d34\xac~\xacyrW\xab\xe3sa\x95\xf5\xb4\xb5\xce\x0f\x89\x05kGx^7&\x00\x00\u07d4,\u05de\xb5 '\xb1,\x18\x82\x8e>\xaa\xb2\x96\x9b\xfc\u0487\xe9\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4,\xd8xfV\x8d\xd8\x1a\xd4}\x9d:\u0404nZePss\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4,\xdb9De\x06\x16\xe4|\xb1\x82\xe0`2/\xa1Hyx\u0389b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4,\xe1\x1a\x92\xfa\xd0$\xff+>\x87\xe3\xb5B\xe6\xc6\r\xcb\u0656\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4-\x03&\xb2?\x04\t\xc0\xc0\xe9#hc\xa13\aZ\x94\xba\x18\x89\vg\x9b\xe7[\xe6\xae\x00\x00\u07d4-\r\xecQ\xa6\xe8s0\xa6\xa8\xfa*\x0fe\u060dJ\xbc\xdfs\x89\n\ad\a\xd3\xf7D\x00\x00\u07d4-#vkok\x05s}\xad\x80\xa4\x19\xc4\x0e\xdaMw\x10>\x89\xcf\x15&@\xc5\xc80\x00\x00\u07d4-+\x03#Y\xb3c\x96O\xc1\x1aQ\x82c\xbf\xd0T1\xe8g\x89\b\x1c\x1d\xf7b\x9ep\x00\x00\u07d4-4\x80\xbf\be\aJr\xc7u\x9e\xe5\x13{Mp\xc5\x1c\xe9\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4-5\xa9\xdfbu\u007f\u007f\xfa\xd1\x04\x9a\xfb\x06\xcaJ\xfcFLQ\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4-@U\x8b\x06\xf9\n9#\x14U\x92\x12;gt\xe4n1\xf4\x8965\u026d\xc5\u07a0\x00\x00\u07d4-Bi\x12\xd0Y\xfa\xd9t\v.9\n.\xea\xc0To\xf0\x1b\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4-S-\xf4\xc69\x11\xd1\u0391\xf6\xd1\xfc\xbf\xf7\x96\x0fx\xa8\x85\x89Z\x85\x96\x8aXx\u0680\x00\u07d4-S\x91\xe98\xb3HX\u03d6[\x84\x051\xd5\xef\xdaA\v\t\x89K\xe4\xe7&{j\xe0\x00\x00\xe0\x94-[B\xfcY\xeb\xda\r\xfdf\xae\x91K\u008c\x1b\nn\xf8:\x8a+\u0235\x9f\xdc\xd86c\x80\x00\u07d4-]s5\xac\xb06+G\u07e3\xa8\xa4\xd3\xf5\x94\x95D\u04c0\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94-a\xbf\xc5hs\x92<+\x00\t]\xc3\xea\xa0\xf5\x90\u062e\x0f\x8a\x04ef\xdf\xf8\xceU`\x00\x00\u07d4-e\x11\xfdz8\x00\xb2hT\xc7\xec9\xc0\u0735\xf4\xc4\xe8\xe8\x89\x15\xad\u077a/\x9ew\x00\x00\u07d4-}\\@\u076f\xc4P\xb0Jt\xa4\u06bc+\xb5\xd6e\x00.\x89lk\x93[\x8b\xbd@\x00\x00\u07d4-\x89\xa8\x00jO\x13z \xdc+\xecF\xfe.\xb3\x12\xea\x96T\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4-\x8cR2\x9f8\u04a2\xfa\x9c\xba\xf5\u0143\xda\xf1I\v\xb1\x1c\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4-\x8e\x06\x18\x92\xa5\xdc\xce!\x96j\xe1\xbb\a\x88\xfd>\x8b\xa0Y\x89\r\x8e\\\xe6\x17\xf2\xd5\x00\x00\u07d4-\x8e[\xb8\xd3R\x16\x95\xc7~|\x83N\x02\x91\xbf\xac\xeet\b\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4-\x90\xb4\x15\xa3\x8e.\x19\xcd\xd0/\U000ed069z\xf7\xcb\xf6r\x89\x05\xf3\xc7\xf6A1\xe4\x00\x00\u07d4-\x9b\xado\x1e\xe0*p\xf1\xf1=\xef\\\u0332z\x9a'@1\x89a\t=|,m8\x00\x00\u07d4-\x9c_\xec\u04b4O\xbbj\x1e\xc72\xea\x05\x9fO\x1f\x9d+\\\x896\xca2f\x1d\x1a\xa7\x00\x00\xe0\x94-\xa6\x17iP\t\xccW\xd2j\u0510\xb3*]\xfb\xeb\x93N^\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4-\xa7k|9\xb4 \u323a,\x10 \xb0\x85k\x02pd\x8a\x89lk\x93[\x8b\xbd@\x00\x00\u07d4-\u01ddn\u007fU\xbc\xe2\xe2\xd0\xc0*\xd0|\uca3bR\x93T\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\xe0\x94-\xca\x0eD\x9a\xb6F\xdb\xdf\u04d3\xa9fb\x96\v\u02b5\xae\x1e\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4-\xd3%\xfd\xff\xb9{\x19\x99R\x84\xaf\xa5\xab\xdbWJ\x1d\xf1j\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4-\xd5x\xf7@}\xfb\xd5H\xd0^\x95\xcc\u00dcHT)bj\x89\u3bb5sr@\xa0\x00\x00\u07d4-\xd8\xee\xef\x87\x19J\xbc,\xe7X]\xa1\xe3[|\xeax\f\xb7\x8965\xc6 G9\u0640\x00\u07d4-\xdf@\x90Wi\xbc\xc4&\xcb,)8\xff\xe0w\xe1\u8758\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4-\xe0\x96D\x00\u0082\xbd\u05ca\x91\x9ck\xf7|k_yay\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94-\xe3\x1a\xfd\x18\x9a\x13\xa7o\xf6\xfes\xea\xd9\xf7K\xb5\u0126)\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4-\xec\x982\x9d\x1f\x96\u00e5\x9c\xaay\x81uTR\xd4\xdaI\u0549\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94-\ue422\x8f\x19-gj\x87s#+V\xf1\x8f#\x9e/\xad\x8a\x03\xef\xa7\xe7G\xb6\u046d\x00\x00\xe0\x94.\b\x80\xa3E\x96#\a \xf0Z\xc8\xf0e\xaf\x86\x81\u0736\u008a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4.\fW\xb4qP\xf9Z\xa6\xa7\xe1j\xb9\xb1\xcb\xf5C(\x97\x9a\x89\x05k\xc7^-c\x10\x00\x00\u07d4.\x10\x91\v\xa6\xe0\xbc\x17\xe0UUf\x14\u02c7\t\x0fM~[\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94.$\xb5\x97\x87;\xb1A\xbd\xb27\xea\x8aZ\xb7Gy\x9a\xf0-\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4.(\x10\xde\xe4J\xe4\xdf\xf3\xd8cB\xab\x12fW\xd6S\xc36\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4.,\xbdz\xd8%G\xb4\xf5\xff\x8b:\xb5o\x94*dE\xa3\xb0\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4.-~\xa6k\x9fG\xd8\xccR\xc0\x1cR\xb6\u147c}G\x86\x89\xd8\xd4`,&\xbfl\x00\x00\u07d4.C\x93H\u07caBw\xb2*v\x84W\xd1\x15\x8e\x97\xc4\t\x04\x89*\x1e\x9f\xf2o\xbfA\x00\x00\xe0\x94.F\xfc\xeej;\xb1E\xb5\x94\xa2C\xa3\x91?\xce]\xado\xba\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u0794.G\xf2\x87\xf4\x98#7\x13\x85\r1&\x82<\xc6}\xce\xe2U\x88\u029d\x9e\xa5X\xb4\x00\x00\u07d4.N\u1b99j\xa0\xa1\xd9$(\xd0fR\xa6\xbe\xa6\xd2\xd1]\x89lk\x93[\x8b\xbd@\x00\x00\u07d4.R\x91+\xc1\x0e\xa3\x9dT\xe2\x93\xf7\xae\u05b9\x9a\x0fLs\xbe\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4.a\x9fW\xab\xc1\u91ea\x93j\xe3\xa2&Ib\xe7\xeb-\x9a\x89(\xfb\x9b\x8a\x8aSP\x00\x00\u07d4.d\xa8\xd7\x11\x11\xa2/L]\xe1\xe09\xb36\xf6\x8d9\x8a|\x89lk\x93[\x8b\xbd@\x00\x00\u07d4.i3T=O,\xc0\vSP\xbd\x80h\xba\x92C\u05be\xb0\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94.~\x05\xe2\x9e\u0767\xe4\xae%\xc5\x175C\xef\xd7\x1fm=\x80\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4.\u007fFU \xec5\xcc#\u058eue\x1b\xb6h\x95D\xa1\x96\x898\xec[r\x1a\x1a&\x80\x00\u07d4.\x8e\xb3\nqn_\xe1\\t#>\x03\x9b\xfb\x11\x06\xe8\x1d\x12\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94.\x98$\xb5\xc12\x11\x1b\xca$\xdd\xfb\xa7\xe5u\xa5\xcdr\x96\xc1\x8a\x03\xa4\x84Qnm\u007f\xfe\x00\x00\u07d4.\xa5\xfe\xe6?3z7nK\x91\x8e\xa8!H\xf9MH\xa6&\x89e\x0f\x8e\r\u0493\xc5\x00\x00\u07d4.\xafN*F\xb7\x89\xcc\u0088\xc8\xd1\xd9)N?\xb0\x858\x96\x89lk\x93[\x8b\xbd@\x00\x00\u07d4.\xaf\xf9\xf8\xf8\x110d\u04d5z\xc6\xd6\xe1\x1e\xeeB\xc8\x19]\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4.\xba\fn\xe5\xa1\x14\\\x1cW9\x84\x96:`]\x88\nz \x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4.\xc9X\"\xeb\x88{\xc1\x13\xb4q*M\xfd\u007f\x13\xb0\x97\xb5\xe7\x8965\u026d\xc5\u07a0\x00\x00\u07d4.\xcaj<]\x9fD\x9d\tV\xbdC\xfa{M{\xe8CYX\x89lk\xdaip\x9c\xc2\x00\x00\xe0\x94.\xca\xc5\x04\xb23\x86n\xb5\xa4\xa9\x9e{\u0490\x13Y\xe4;=\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4.\xeb\xf5\x942\xb5(\x92\xf98\v\xd1@\xaa\x99\xdc\xf8\xad\f\x0f\x89\b=lz\xabc`\x00\x00\u07d4.\xee\xd5\x04q\xa1\xa2\xbfS\xee0\xb1#.n\x9d\x80\xef\x86m\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4.\xefk\x14\x17\u05f1\x0e\xcf\xc1\x9b\x12:\x8a\x89\xe7>RlX\x89 \x86\xac5\x10R`\x00\x00\u07d4.\xf8i\xf05\vW\xd54x\xd7\x01\xe3\xfe\xe5)\xbc\x91\x1cu\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4.\xf9\xe4eqj\xca\u03f8\xc8%/\xa8\xe7\xbcyi\xeb\xf6\u4255\x9e\xb1\xc0\xe4\xae \x00\x00\xe0\x94.\xfcLd}\xacj\xca\xc3Uw\xad\"\x17X\xfe\xf6ao\xaa\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4/\x13eu&\xb1w\xca\xd5G\u00d0\x8c\x84\x0e\xffd{E\u0649?v\x84\x9c\xf1\xee,\x80\x00\u07d4/\x18}ZpMZ3\x8c[(v\xa0\x90\xdc\xe9d(N)\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94/%#\u0303O\x00\x86\x05$\x02bb\x96gQ\x86\xa8\u508a\x03c\\\x9a\xdc]\xea\x00\x00\x00\u07d4/(*\xbb\xb6\u0523\xc3\xcd;\\\xa8\x12\xf7d>\x800_\x06\x89dI\xe8NG\xa8\xa8\x00\x00\u07d4/+\xba\x1b\x17\x96\x82\x1avo\xced\xb8O(\xech\xf1Z\xea\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4/1]\x90\x16\xe8\xee_Sf\x81 /\x90\x84\xb02TMM\x898<\xd1+\x9e\x86<\x00\x00\u07d4/M\xa7SC\x0f\xc0\x9es\xac\xbc\xcd\xcd\xe9\xdad\u007f+]7\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4/P\x80\xb8?~-\xc0\xa1\xdd\x11\xb0\x92\xad\x04+\xffx\x8fL\x89\xb4\xf8\xfby#\x1d+\x80\x00\u07d4/a\uf941\x9dp_+\x1eN\xe7T\xae\xb8\xa8\x19Pju\x89O%\x91\xf8\x96\xa6P\x00\x00\xe0\x94/f\xbf\xbf\"b\xef\u030d+\xd0DO\u0170ib\x98\xff\x1e\x8a\x02\x1a\xd95\xf7\x9fv\xd0\x00\x00\u07d4/m\xce\x130\u015e\xf9!`!TW-MK\xac\xbd\x04\x8a\x8965\u026d\xc5\u07a0\x00\x00\u07d4/}2\x90\x85\x1b\xe5\u01b4\xb4?}Et2\x9fa\xa7\x92\u00c9\x05k\xc7^-c\x10\x00\x00\u07d4/\x858\x17\xaf\u04f8\xf3\xb8n\x9f`\xeew\xb5\xd9ws\xc0\xe3\x89N\xae\xeaD\xe3h\xb9\x00\x00\u07d4/\xa4\x91\xfbY \xa6WN\xbd(\x9f9\xc1\xb2C\r-\x9aj\x89lk\x93[\x8b\xbd@\x00\x00\u07d4/\xb5f\xc9K\xbb\xa4\xe3\xcbg\xcd\xda}_\xadq1S\x91\x02\x89lk\x93[\x8b\xbd@\x00\x00\u07d4/\xbbPJ]\xc5'\xd3\xe3\xeb\x00\x85\xe2\xfc<}\xd58\xcbz\x89C\u00b1\x8a\xec<\n\x80\x00\u07d4/\xbc\x85y\x8aX5\x98\xb5\"\x16mn\x9d\xda\x12\x1db}\xbc\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4/\xbc\xef3\x84\xd4 \xe4\xbfa\xa0f\x99\x90\xbcpT\U00065bc9lk\x93[\x8b\xbd@\x00\x00\xe0\x94/\xc8.\xf0v\x93#A&Oaz\f\x80\xddW\x1ej\xe99\x8a\x01\x84$\xf5\xf0\xb1\xb4\xe0\x00\x00\u07d4/\u075by\u07cd\xf50\xadc\xc2\x0eb\xafC\x1a\xe9\x92\x16\xb8\x89\x01#n\xfc\xbc\xbb4\x00\x00\u07d4/\xe0\x02?W\"e\x0f:\x8a\xc0\x10\t\x12^t\xe3\xf8.\x9b\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4/\xe0\xccBKS\xa3\x1f\t\x16\xbe\b\xec\x81\xc5\v\xf8\xea\xb0\xc1\x89 \x86\xac5\x10R`\x00\x00\u07d4/\xe1:\x8d\a\x85\u0787X\xa5\xe4\x18v\xc3n\x91l\xf7Pt\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4/\xea\x1b/\x83O\x02\xfcT3?\x8a\x80\x9f\x048\xe5\x87\n\xa9\x89\x01\x18T\xd0\xf9\xce\xe4\x00\x00\u07d4/\xee6\xa4\x9e\xe5\x0e\xcfqo\x10G\x91VFw\x9f\x8b\xa0?\x899B\"\xc4\u0686\xd7\x00\x00\u07d4/\xef\x81G\x8aK.\x80\x98\xdb_\xf3\x87\xba!S\xf4\xe2+y\x896'\xe8\xf7\x127<\x00\x00\u07d4/\xf1`\xc4Or\xa2\x99\xb5\xec-q\xe2\x8c\xe5Dm/\u02ef\x89\x13\x84\x00\xec\xa3d\xa0\x00\x00\u07d4/\xf1\xcaU\xfd\x9c\xec\x1b\x1f\xe9\U00029af7LQ<\x1e*\xaa\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\xe0\x94/\xf5\u02b1,\r\x95\u007f\xd33\xf3\x82\xee\xb7Q\a\xa6L\xb8\xe8\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94/\xf80\xcfU\xfb\x00\u0560\xe05\x14\xfe\xcdD1K\xd6\xd9\xf1\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4/\xfe\x93\xec\x1aV6\xe9\xee4\xafp\xdf\xf5&\x82\xe6\xffpy\x89lk\x93[\x8b\xbd@\x00\x00\u07d40\x03y\x88p&q\xac\xbe\x89,\x03\xfeW\x88\xaa\x98\xaf(z\x89\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d40$\x8dX\xe4\x14\xb2\x0f\xed:lH+Y\xd9\xd8\xf5\xa4\xb7\xe2\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4019\xbcYd\x03\xd5\u04d3\x1fwLf\u013aFtT\u06c9\\%\xe1J\xea(?\x00\x00\u079408\x00\x87xie\x14\x9e\x81B;\x15\xe3\x13\xba2\xc5\u01c3\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d40:0\xacB\x86\xae\x17\xcfH=\xad{\x87\fk\xd6M{J\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d40?\xba\xeb\xbeF\xb3[n[t\x94j_\x99\xbc\x15\x85\xca\xe7\x89/\x9a\xc0i_[\xba\x00\x00\u07d40ADZ3\xba\x15\x87A\x16\r\x9c4N\xb8\x8e\\0o\x94\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d40H\x01d\xbc\xd8It\xeb\xc0\xd9\f\x9b\x9a\xfa\xb6&\xcd\x1cs\x89+^:\xf1k\x18\x80\x00\x00\u07d40N\u019atTW!\xd71j\xefM\u03f4\x1a\u015e\xe2\xf0\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x940Q\x182\x91\x8d\x804\xa7\xbe\xe7.\xf2\xbf\xeeD\x0e\u02fc\xf6\x8a\x03h\xc8b:\x8bM\x10\x00\x00\u07d40Q?\u029f6\xfdx\x8c\xfe\xa7\xa3@\xe8m\xf9\x82\x94\xa2D\x89\x18;_\x03\xb1G\x9c\x00\x00\u07d40U\xef\xd2`)\xe0\xd1\x1b\x93\r\xf4\xf5;\x16,\x8c?\xd2\u0389\x1b\x1a\b\x927\a=\x00\x00\u07d40]&\xc1\v\xdc\x10?k\x9c!'.\xb7\xcb-\x91\b\xc4~\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d40_x\xd6\x18\xb9\x90\xb4)[\xac\x8a-\xfa&(\x84\xf8\x04\xea\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x940d\x89\x9a\x96\x1a>\x1d\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d40\x98\xb6]\xb9>\xca\xca\xf75\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d40\uc4d2$J!\b\u0247\xbc\\\xdd\xe0\ud7c3z\x81{\x89T\x99%\xf6\xc9\xc5%\x00\x00\xe0\x940\xed\x11\xb7{\xc1~^f\x94\u023c[nG\x98\xf6\x8d\x9c\xa7\x8a\x1eo\xb3B\x1f\xe0)\x9e\x00\x00\u07d40\xf7\xd0%\xd1o{\xee\x10U\x80Ho\x9fV\x1c{\xae?\xef\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x940\xfb\xe5\x88_\x9f\xcc\xe9\xea^\u06c2\xedJ\x11\x96\xdd%\x9a\xed\x8a\x01\x19\xe4\u007f!8\x1f@\x00\x00\u07d41\x04}p?c\xb94$\xfb\xbdn/\x1f\x9et\xde\x13\xe7\t\x89\x9a\x81f\xf7\u6ca7\x80\x00\u07d411?\xfdc[\xf2\xf32HA\xa8\x8c\a\xed\x14aD\xce\xeb\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d41Y\xe9\fH\xa9\x15\x90J\xdf\u24b2/\xa5\xfd^ryk\x896\xaf\xe9\x8f&\x06\x10\x00\x00\u07d41]\xb7C\x9f\xa1\u0574#\xaf\xa7\xddq\x98\xc1\xcft\xc9\x18\xbc\x89 \x86\xac5\x10R`\x00\x00\u07d41^\xf2\xdab\x0f\xd30\xd1.\xe5]\xe5\xf3)\xa6\x96\xe0\xa9h\x89\b!\xab\rD\x14\x98\x00\x00\u07d41n\x92\xa9\x1b\xbd\xa6\x8b\x9e/\x98\xb3\xc0H\x93N<\xc0\xb4\x16\x89lk\x93[\x8b\xbd@\x00\x00\u07d41n\xb4\xe4}\xf7\x1bB\xe1mo\xe4h%\xb72{\xaf1$\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d41q\x87~\x9d\x82\f\xc6\x18\xfc\t\x19\xb2\x9e\xfd3?\xdaI4\x8965\u026d\xc5\u07a0\x00\x00\u07d41|\xf4\xa2<\xb1\x91\xcd\xc5c\x12\u009d\x15\xe2\x10\xb3\xb9\xb7\x84\x89\a\xcef\xc5\x0e(@\x00\x00\u07d41\x8b.\xa5\xf0\xaa\xa8y\xc4\xd5\xe5H\xac\x9d\x92\xa0\xc6t\x87\xb7\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d41\x8cv\xec\xfd\x8a\xf6\x8dpUSR\xe1\xf6\x01\xe3Y\x88\x04-\x89\x1b1\x19.h\xc7\xf0\x00\x00\u07d41\x8f\x1f\x8b\xd2 \xb0U\x8b\x95\xfb3\x10\x0f\xfd\xbbd\r|\xa6\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d41\xaa;\x1e\xbe\x8cM\xbc\xb6\xa7\b\xb1\xd7H1\xe6\x0eIv`\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d41\xab\b\x89f\xec\xc7\"\x92X\xf6\t\x8f\xceh\xcf9\xb3\x84\x85\x8965\u026d\xc5\u07a0\x00\x00\xe0\x941\xadM\x99F\xef\t\xd8\xe9\x88\xd9F\xb1\"\u007f\x91A\x90\x176\x8a\x04\xd8S\xc8\xf8\x90\x89\x80\x00\x00\xe0\x941\xb4;\x01]\x00\x81d~h\x00\x00\u07d424\x86\xcad\xb3uGO\xb2\xb7Y\xa9\xe7\xa15\x85\x9b\xd9\xf6\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d427I\xa3\xb9q\x95\x9eF\u0234\x82-\xca\xfa\xf7\xaa\xf9\xbdn\x89\x01\x16q\xa5\xb2Ep\x00\x00\u07d42:\xadA\xdfKo\xc8\xfe\u038c\x93\x95\x8a\xa9\x01\xfah\bC\x894\x95tD\xb8@\xe8\x00\x00\xe0\x942;<\xfe>\xe6+\xbd\xe2\xa2a\xe5<\xb3\xec\xc0X\x10\xf2\u018a\x02\ub3b1\xa1r\u0738\x00\x00\u07d42?\xca^\xd7\u007fi\x9f\x9d\x990\xf5\xce\xef\xf8\xe5oY\xf0<\x89Hz\x9a0E9D\x00\x00\u07d42H\\\x81\x87(\xc1\x97\xfe\xa4\x87\xfb\xb6\xe8)\x15\x9e\xba\x83p\x899!\xb4\x13\xbcN\xc0\x80\x00\xe0\x942P\xe3\xe8X\xc2j\xde\u032d\xf3jVc\xc2*\xa8LAp\x8a\x01\x0f\f\xf0d\xddY \x00\x00\xe0\x942Y\xbd/\xdd\xfb\xbco\xba\u04f6\xe8t\xf0\xbb\xc0,\xda\x18\xb5\x8a\x02\x84`VI[\r\x18\x80\x00\u07d42uIo\xd4\u07491\xfdi\xfb\n\v\x04\xc4\xd1\xff\x87\x9e\xf5\x89\x18-~L\xfd\xa08\x00\x00\u07d42{\xb4\x9euOo\xb4\xf73\xc6\xe0o9\x89\xb4\xf6]K\xee\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d42\x82y\x1do\xd7\x13\xf1\xe9OK\xfdV^\xaax\xb3\xa0Y\x9d\x89Hz\x9a0E9D\x00\x00\u07d42\x83\xeb\u007f\x917\xdd9\xbe\xd5_\xfek\x8d\xc8E\xf3\xe1\xa0y\x89\x03\x97\n\xe9!Ux\x00\x00\u07d42\x86\t\x97\xd70\xb2\xd8;s$\x1a%\xd3f}Q\xc9\b\xef\x89\x1b\x1a\b\x927\a=\x00\x00\xe0\x942\x86\u047cez1,\x88G\xd9<\xb3\xcbyP\xf2\xb0\xc6\xe3\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x942\xa2\r\x02\x8e,b\x18\xb9\xd9[D\\w\x15$cj\"\xef\x8a\x02\x02\xfe\xfb\xf2\xd7\xc2\xf0\x00\x00\u07d42\xa7\x06\x91%\\\x9f\xc9y\x1aOu\u0238\x1f8\x8e\n%\x03\x895e\x9e\xf9?\x0f\xc4\x00\x00\u07d42\xb7\xfe\xeb\xc5\u015b\xf6^\x86\x1cL\v\xe4*v\x11\xa5T\x1a\x89w\u9aa8R\\\x10\x00\x00\xe0\x942\xba\x9a}\x04#\xe0:R_\xe2\xeb\xebf\x1d \x85w\x8b\u060a\x04<3\xc1\x93ud\x80\x00\x00\u07d42\xbb.\x96\x93\xe4\xe0\x854M/\r\xbdF\xa2\x83\u3807\xfd\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x942\xc2\xfd\u2daa\xbb\x80\u5ba2\xb9I\xa2\x17\xf3\xcb\t\"\x83\x8a\x010a`\xaf\xdf 7\x80\x00\u07d42\xd9P\xd5\xe9>\xa1\u0574\x8d\xb4qO\x86{\x03 \xb3\x1c\x0f\x897\b\xba\xed=h\x90\x00\x00\u07d42\u06f6qlT\xe81e\x82\x9aJ\xbb6uxI\xb6\xe4}\x8965\u026d\xc5\u07a0\x00\x00\u07d42\xebd\xbe\x1b]\xed\xe4\b\u01bd\xef\xben@\\\x16\xb7\xed\x02\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d42\xef\\\xdcg\x1d\xf5V*\x90\x1a\xee]\xb7\x16\xb9\xbev\xdc\xf6\x89lk\x93[\x8b\xbd@\x00\x00\u07d42\xf2\x9e\x87'\xa7LkC\x01\xe3\xff\xff\x06\x87\xc1\xb8p\xda\xe9\x8965\u026d\xc5\u07a0\x00\x00\u07d42\xfa\x0e\x86\xcd\b}\u058di1\x90\xf3-\x931\t\t\xedS\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d42\xfb\xee\xd6\xf6&\xfc\xdf\xd5\x1a\xca\xfbs\v\x9e\xef\xf6\x12\xf5d\x89lk\x93[\x8b\xbd@\x00\x00\u07943\x00\xfb\x14\x9a\xde\xd6[\u02e6\xc0N\x9c\u05b7\xa0;\x89;\xb1\x88\xfc\x93c\x92\x80\x1c\x00\x00\xe0\x943\x01\xd9\xca/;\xfe\x02by\xcdh\x19\xf7\x9a)=\x98\x15n\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\xe0\x943\b\xb04f\xc2z\x17\xdf\xe1\xaa\xfc\xeb\x81\xe1m)4Vo\x8a\x03\x99\x92d\x8a#\u0220\x00\x00\u07943\x1a\x1c&\xcci\x94\xcd\xd3\xc1K\xec\xe2v\xff\xffK\x9d\xf7|\x88\xfaz\xed\xdfO\x06\x80\x00\xe0\x943&\xb8\x8d\xe8\x06\x18DT\xc4\v'\xf3\t\xd9\xddm\u03f9x\x8a\x03\xca\\f\u067cD0\x00\x00\xe0\x943)\xeb;\xafCE\xd6\x00\xce\xd4\x0en\x99ueo\x117B\x8a\x01\x0f\b\xed\xa8\xe5U\t\x80\x00\u07d432\r\xd9\x0f+\xaa\x11\r\xd34\x87*\x99\x8f\x14\x84&E<\x8965f3\xeb\xd8\xea\x00\x00\u07d436\xc3\xefn\x8bP\xee\x90\xe07\xb1d\xb7\xa8\xea_\xaa\xc6]\x89\x0e\u0223\xa7\x1c\"T\x00\x00\xe0\x9438\fo\xffZ\xcd&Q0\x96)\u06daq\xbf? \u017a\x8a\x03h\xc8b:\x8bM\x10\x00\x00\u07d43:\xd1Yd\x01\xe0Z\xea-6\xcaG1\x8e\xf4\xcd,\xb3\u07c9\x9d\xc0\\\xce(\u00b8\x00\x00\u07d43C@\xeeK\x9c\u0701\xf8P\xa7Q\x16\xd5\x0e\u9d98%\xbf\x89lk\x93[\x8b\xbd@\x00\x00\u07d43H\x1e\x85n\xbe\u050e\xa7\b\xa2t&\xef(\xe8g\xf5|\u0449\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x943V[\xa9\xda,\x03\xe7x\xce\x12)O\b\x1d\xfe\x81\x06M$\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\u07943X\x1c\xee#0\x88\xc0\x86\r\x94N\f\xf1\u03ab\xb8&\x1c.\x88\xb9\x8b\xc8)\xa6\xf9\x00\x00\u07d43XX\xf7I\xf1i\u02bc\xfeR\xb7\x96\xe3\xc1\x1e\xc4~\xa3\u0089\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x943^\"\x02[zw\u00e0t\u01cb\x8e=\xfe\a\x13A\x94n\x8a\x02'\xcas\n\xb3\xf6\xac\x00\x00\u07d43b\x9b\xd5/\x0e\x10{\xc0q\x17ld\xdf\x10\x8fdw}I\x89\x01\xcf\xddth!n\x80\x00\u07d43{;\u07c6\xd7\x13\xdb\xd0{]\xbf\xcc\x02+z{\x19F\xae\x89\xd7\xc1\x98q\x0ef\xb0\x00\x00\u07d43|\xfe\x11W\xa5\u0191 \x10\xddV\x153y\x17i\u00b6\xa6\x8965\u026d\xc5\u07a0\x00\x00\u07d43\xb36\xf5\xba^\xdb{\x1c\xcc~\xb1\xa0\u0644\xc1#\x1d\x0e\u0709lk\x93[\x8b\xbd@\x00\x00\u07d43\xc4\a\x13;\x84\xb3\xcaL=\xed\x1fFX\x90\f8\x10\x16$\x89\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\xe0\x943\xd1r\xab\a\\Q\xdb\x1c\xd4\n\x8c\xa8\xdb\xff\r\x93\xb8C\xbb\x8a\x016x\x05\x10\xd1-\xe3\x80\x00\u07d43\xe9\xb7\x18#\x95.\x1ff\x95\x8c'\x8f\u008b\x11\x96\xa6\u0164\x89\x05k\xc7^-c\x10\x00\x00\u07d43\xeakxU\xe0[\a\xab\x80\u06b1\xe1M\xe9\xb6I\xe9\x9bl\x89\x1c\xd6\xfb\xadW\xdb\xd0\x00\x00\u07d43\xf1R#1\rD\u078bf6h_:L=\x9cVU\xa5\x89\r\x94b\xc6\xcbKZ\x00\x00\u07d43\xf4\xa6G\x1e\xb1\xbc\xa6\xa9\xf8[;Hr\xe1\aU\xc8+\xe1\x89lk\x93[\x8b\xbd@\x00\x00\u07d43\xfbWzM!O\xe0\x10\xd3,\xca|>\xed\xa6?\x87\xce\xef\x8965\u026d\xc5\u07a0\x00\x00\u07d43\xfdq\x8f\v\x91\xb5\xce\u020a]\xc1^\xec\xf0\xec\xef\xa4\xef=\x89\x17r$\xaa\x84Lr\x00\x00\u07d44\x14\x80\u030c\xb4v\xf8\xd0\x1f\xf3\b\x12\xe7\xc7\x0e\x05\xaf\xaf]\x89lk\x93[\x8b\xbd@\x00\x00\u07d44'-^ut1]\xca\u9afd1{\xac\x90(\x9dGe\x89b\xa9\x92\xe5:\n\xf0\x00\x00\xe0\x9440\xa1c\x81\xf8i\xf6\xeaT#\x91XU\xe8\x00\x885%\xa9\x8a\x03\xca\\f\u067cD0\x00\x00\u07d441\x86%\x81\x8e\xc1?\x11\x83Z\xe9sS\xce7}oY\n\x89Rf<\u02b1\xe1\xc0\x00\x00\u07d449<]\x91\xb9\xdeYr\x03\xe7[\xacC\t\xb5\xfa=(\u00c9\n\x84Jt$\xd9\xc8\x00\x00\u07d449\x99\x8b$|\xb4\xbf\x8b\xc8\nm+5'\xf1\xdf\xe9\xa6\u0489\a\x96\xe3\xea?\x8a\xb0\x00\x00\u07d44C}\x14ed\v\x13l\xb5\x84\x1c?\x93O\x9b\xa0\xb7\t}\x89\t`\xdbwh\x1e\x94\x00\x00\u07d44J\x8d\xb0\x86\xfa\xedN\xfc7\x13\x1b:\"\xb0x-\xadp\x95\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x944fM\"\x0f\xa7\xf3yX\x02J32\u0584\xbc\xc6\xd4\u023d\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d44f\xf6~9cl\x01\xf4;:!\xa0\xe8R\x93%\xc0\x86$\x89-\xb1\x16vP\xac\xd8\x00\x00\u07d44\x856\x1e\xe6\xbf\x06\xefe\b\xcc\xd2=\x94d\x1f\x81M>/\x89lk\x93[\x8b\xbd@\x00\x00\u07d44\x85\xf6!%d3\xb9\x8aB\x00\xda\xd8W\xef\xe5Y7\uc609lk\x93[\x8b\xbd@\x00\x00\u07d44\x95\x8aF\xd3\x0e0\xb2s\xec\xc6\xe5\xd3X\xa2\x12\xe50~\x8c\x89lk\x93[\x8b\xbd@\x00\x00\u07d44\x97\xddf\xfd\x11\x80q\xa7\x8c,\xb3n@\xb6e\x1c\xc8%\x98\x89\x05\xf1\x01kPv\xd0\x00\x00\xe0\x944\x9a\x81k\x17\xab='\xbb\xc0\xae\x00Q\xf6\xa0p\xbe\x1f\xf2\x9d\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d44\x9d,\x91\x8f\u041e(\a1\x8ef\xceC)\t\x17k\xd5\v\x89<\xb7\x1fQ\xfcU\x80\x00\x00\u07d44\xa0C\x1f\xff^\xad\x92\u007f\xb6`\f\x1e\xa8\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d44\xff&\xeb`\xa8\u0469ZH\x9f\xae\x13n\xe9\x1dNX\bL\x89 \x86\xac5\x10R`\x00\x00\u07d44\xffX)R\xff$E\x8f{\x13\xd5\x1f\vO\x98p\"\xc1\xfe\x89\x98\x06\xde=\xa6\xe9x\x00\x00\u07d45\x10k\xa9N\x85c\u0533\xcb<\\i,\x10\xe6\x04\xb7\xce\u0609lk\x93[\x8b\xbd@\x00\x00\xe0\x945\x14_b\x03\x97\u019c\xb8\xe0\tb\x96\x1f\x0fH\x86d9\x89\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d45\x14t0\xc3\x10e\x00\u77e2\xf5\x02F.\x94p<#\xb1\x89lj\xccg\u05f1\xd4\x00\x00\xe0\x945\x17\x87\x845\x05\xf8\xe4\xef\xf4ef\xcc\u695fM\x1c_\xe7\x8a\x01\xf5q\x89\x87fKH\x00\x00\xe0\x945\x1f\x16\xe5\xe0sZ\xf5gQ\xb0\xe2%\xb2B\x11q9@\x90\x8a\x02\xd4\xca\x05\xe2\xb4<\xa8\x00\x00\xe0\x945$\xa0\x00#N\xba\xaf\a\x89\xa14\xa2\xa4\x178<\xe5(*\x8a\x011yU\x94}\x8e,\x00\x00\u07d45&\xee\xce\x1ak\xdc>\xe7\xb4\x00\xfe\x93[HF?1\xbe\u05c9\x04w\x87\x9bm\x140\x00\x00\u07d45*x_J\x92\x162PL\xe5\xd0\x15\xf8\xd7FO\xa3\xdb\x14\xe7r\x92\x13\u03aa7\x8c\t^\x89Rf<\u02b1\xe1\xc0\x00\x00\u07d45\xaf\x04\n\f\xc23zv\xaf(\x81T\xc7V\x1e\x1a#3I\x8965\u026d\xc5\u07a0\x00\x00\u07d45\xb0>\xa4$W6\xf5{\x85\xd2\xebyb\x8f\x03m\xdc\xd7\x05\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d45\xbd$he\xfa\xb4\x90\xac\bz\xc1\xf1\xd4\xf2\xc1\r\f\xda\x03\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x945\xbff\x88R/5Fz\u007fu0#\x14\xc0+\xa1v\x80\x0e\x8a\x03\xafA\x82\x02\xd9T\xe0\x00\x00\u07d45\u022d\xc1\x11%C+;w\xac\xd6F%\xfeX\xeb\xee\x9df\x89lk\x93[\x8b\xbd@\x00\x00\u07d45\u0497\x0fI\xdc\xc8\x1e\xa9\xeep~\x9c\x8a\n\xb2\xa8\xbbtc\x89N\x10\x03\xb2\x8d\x92\x80\x00\x00\u07d45\xe0\x96\x12\r\xea\xa5\xc1\xec\xb1d^,\u02cbN\xdb\xd9)\x9a\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d45\xea!c\xa3\x8c\u07da\x12?\x82\xa5\xec\x00%\x8d\xae\v\xc7g\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d45\xf1\xda\x12{\x837o\x1b\x88\xc8*3Y\xf6z^g\xddP\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d45\xf2\x94\x9c\xf7\x8b\xc2\x19\xbbO\x01\x90|\xf3\xb4\xb3\u04c6T\x82\x89\x0f\xb5\xc8l\x92\xe44\x00\x00\u07d45\xf5\x86\x01I\xe4\xbb\xc0K\x8a\u0172r\xbeU\xad\x1a\xcaX\xe0\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d46\x02E\x8d\xa8omj\x9d\x9e\xb0=\xaf\x97\xfeV\x19\xd4B\xfa\x89lk\x93[\x8b\xbd@\x00\x00\u07d46\x057-\x93\xa9\x01\t\x88\x01\x8f\x9f1]\x03.\u0448\x0f\xa1\x89\x1b\x1b\xcfQ\x89j}\x00\x00\u07d46\x16\xd4H\x98_]2\xae\xfa\x8b\x93\xa9\x93\xe0\x94\xbd\x85I\x86\x89\v\"\u007fc\xbe\x81<\x00\x00\u07d46\x16\xfbF\xc8\x15x\xc9\xc8\xebM;\xf8\x80E\x1a\x887\x9d}\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d46\x1cu\x93\x16\x96\xbc=B}\x93\xe7lw\xfd\x13\xb2A\xf6\xf4\x89\x1d\xc5\xd8\xfc&m\xd6\x00\x00\u07d46\x1d\x9e\xd8\v[\xd2|\xf9\xf1\"o&u2X\xee_\x9b?\x89\xbfi\x14\xba}r\xc2\x00\x00\u07d46\x1f;\xa9\xed\x95kw\x0f%}6r\xfe\x1f\xf9\xf7\xb0$\f\x89 \x86\xac5\x10R`\x00\x00\u07d46\"|\u07e0\xfd;\x9d~jtF\x85\xf5\xbe\x9a\xa3f\xa7\xf0\x89\n\xc2s\x0e\xe9\xc6\xc1\x80\x00\u07d46/\xbc\xb1\x06b7\n\x06\x8f\xc2e&\x02\xa2Wy7\xcc\xe6\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d460\xc5\xe5e\u03aa\x8a\x0f\x0f\xfe2\x87^\xae*l\xe6<\x19\x89\t7r+7t\xd0\x00\x00\u07d463\x9f\x84\xa5\u00b4L\xe5=\xfd\xb6\xd4\xf9}\xf7\x82\x12\xa7\u07c9\x11o\x18\xb8\x17\x15\xa0\x00\x00\u07d464:\xec\xa0{n\u054a\x0eb\xfaN\xcbI\x8a\x12O\xc9q\x89\x10CV\x1a\x88)0\x00\x00\u07d46au@4\x81\xe0\xab\x15\xbbQF\x15\u02f9\x89\xeb\u018f\x82\x89lk\x93[\x8b\xbd@\x00\x00\u07d46ro;\x88Z$\xf9)\x96\u0681b^\u022d\x16\xd8\xcb\xe6\x89S\xafu\u0441HW\x80\x00\xe0\x946s\x95C\x99\xf6\u07feg\x18\x18%\x9b\xb2x\xe2\xe9.\xe3\x15\x8a*Z\x05\x8f\u0095\xed\x00\x00\x00\u07d46u\x8e\x04\x9c\u064b\u03a1\"w\xa6v\xf9)sb\x89\x00#\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d46\u007fY\u0302yS)8NA\xe1(1\x15\xe7\x91\xf2j\x01\x89lk\x93[\x8b\xbd@\x00\x00\u07d46\x81\x0f\xf9\xd2\x13\xa2q\xed\xa2\xb8\xaay\x8b\xe6T\xfaK\xbe\x06\x89lk\x93[\x8b\xbd@\x00\x00\u07d46\x8cT\x14\xb5k\x84U\x17\x1f\xbf\ab \xc1\u02e4\xb5\xca1\x89\x1e>\xf9\x11\xe8=r\x00\x00\xe0\x946\x90$k\xa3\xc8\x06y\xe2.\xacD\x12\xa1\xae\xfc\xe6\xd7\u0342\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d46\x92\x8bU\xbc\x86\x15\t\xd5\x1c\x8c\xf1\xd5F\xbf\xecn>\x90\xaf\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d46\x98\"\xf5W\x8b@\xdd\x1fDqpk\"\u0357\x13R\xdak\x89\x12\xc1\xb6\xee\xd0=(\x00\x00\u07d46\x9e\xf7a\x19_:7>$\xec\xe6\xcd\"R\x0f\xe0\xb9\xe8n\x89\x1c\xff\xaf\xc9M\xb2\b\x80\x00\u07d46\xa0\x8f\xd6\xfd\x1a\xc1|\xe1^\xd5~\xef\xb1*+\u2048\xbf\x89Hz\x9a0E9D\x00\x00\u07d46\xa0\xe6\x1e\x1b\xe4\u007f\xa8~0\xd3(\x88\xee\x030\x90\x1c\xa9\x91\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x946\xb2\xc8^:\xee\xeb\xb7\rc\u0124s\f\xe2\xe8\xe8\x8a6$\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x946\xbfC\xff5\u07d0\x90\x88$3l\x9b1\xce3\x06~/P\x8aIr\x15\x10\xc1\xc1\xe9H\x00\x00\u07d46\xbf\xe1\xfa;{p\xc1r\xeb\x04/h\x19\xa8\x97%\x95A>\x8965\u026d\xc5\u07a0\x00\x00\xe0\x946\xc5\x10\xbf\x8dnV\x9b\xf2\xf3}G&]\xbc\xb5\x02\xff+\u038a\x06ZM\xa2]0\x16\xc0\x00\x00\xe0\x946\xd8]\xc3h1V\xe6;\xf8\x80\xa9\xfa\xb7x\x8c\xf8\x14:'\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d46\u07cf\x88<\x12s\xec\x8a\x17\x1fz3\xcf\xd6I\xb1\xfe`u\x89\fRHJ\xc4\x16\x89\x00\x00\xe0\x946\xe1Va\f\xd8\xffd\xe7\x80\u061d\x00T8\\\xa7gU\xaa\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d46\xfe\xc6,,B^!\x9b\x18D\x8a\xd7W\x00\x9d\x8cT\x02o\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d47\x00\xe3\x02t$\xd99\xdb\xde]B\xfbx\xf6\xc4\xdb\xec\x1a\x8f\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d47\x02\xe7\x04\xcc!at9\xadN\xa2zW\x14\xf2\xfd\xa1\xe92\x8965\u026d\xc5\u07a0\x00\x00\u07d47\x035\fMo\xe374,\xdd\xc6[\xf1\xe28k\xf3\xf9\xb2\x89m\x81!\xa1\x94\xd1\x10\x00\x00\xe0\x947\b\xe5\x9d\xe6\xb4\x05P\x88x)\x02\xe0W\x9cr\x01\xa8\xbfP\x8a*Z\x05\x8f\u0095\xed\x00\x00\x00\u07d47\x126~^U\xa9mZ\x19\x16\x8fn\xb2\xbc~\x99q\xf8i\x8965\u026d\xc5\u07a0\x00\x00\u07d47\x19Zc]\xccb\xf5jq\x80I\xd4~\x8f\x9f\x96\x83(\x91\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d47'4\x1f&\xc1 \x01\xe3x@^\xe3\x8b-\x84d\xecq@\x89lk\x93[\x8b\xbd@\x00\x00\u07d47.E:kb\x9f'g\x8c\u022e\xb5\xe5|\xe8^\xc0\xae\xf9\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d474\xcb\x18t\x91\xed\xe7\x13\xae[;-\x12(J\xf4k\x81\x01\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d477!n\xe9\x1f\x17w2\xfbX\xfa@\x97&r\a\xe2\xcfU\x89Rf<\u02b1\xe1\xc0\x00\x00\u07d47M;\xbb\x057Q\xf9\xf6\x8d\xdb\a\xa0\x89lk\x93[\x8b\xbd@\x00\x00\u07d48r\xf4\x8d\xc5\xe3\xf8\x17\xbck*\xd2\xd00\xfc^\x04q\x19=\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d48~\xea\xfdk@\t\u07af\x8b\u0578Zr\x98:\x8d\xcc4\x87\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d48\x81\xde\xfa\xe1\xc0{<\xe0Lx\xab\xe2k\f\u070ds\xf0\x10\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d48\x83\xbe\xcc\b\xb9\xbeh\xad;\b6\xaa\u00f6 \xdc\x00\x17\xef\x89lk\x93[\x8b\xbd@\x00\x00\u07d48\x85\xfe\xe6q\a\xdc:\xa9\x8a\x1d\x99:t\xdf\\\xd7T\xb9\x8dR\x9a\x89a\t=|,m8\x00\x00\u07d48\xe4m\xe4E<8\xe9A\xe7\x93\x0fC0O\x94\xbb{+\xe8\x89l\xb7\xe7Hg\xd5\xe6\x00\x00\u07d48\xe7\u06e8\xfdO\x1f\x85\r\xbc&I\xd8\xe8O\tR\xe3\xeb<\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d48\xe8\xa3\x1a\xf2\xd2e\xe3\x1a\x9f\xff-\x8fF(m\x12E\xa4g\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d48\xeao[Z{\x88AuQ\xb4\x12=\xc1'\xdf\xe94-\xa6\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d48\xee\xc6\xe2\x17\xf4\xd4\x1a\xa9 \xe4$\xb9RQ\x97\x04\x1c\xd4\u0189\xf0\r%\xeb\x92.g\x00\x00\xe0\x948\xf3\x87\xe1\xa4\xedJs\x10n\xf2\xb4b\xe4t\xe2\xe3\x14:\u040a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d49\x11a\xb0\xe4<0 f\u898d,\xe7\xe1\x99\xec\xdb\x1dW\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x949\x15\uad6b.Yw\xd0u\xde\xc4}\x96\xb6\x8bK\\\xf5\x15\x8a\r\a\x01\x81\x85\x12\x0f@\x00\x00\u07d49\x1aw@\\\t\xa7+^\x846#z\xaa\xf9]h\xda\x17\t\x89\x02\xa9&J\xf3\u0479\x00\x00\u07d49\x1f \x17m\x126\rrMQG\n\x90p6uYJM\x89V\xbcu\xe2\xd61\x00\x00\x00\u07d49$3\xd2\u0383\xd3\xfbJv\x02\u0323\xfa\xcaN\xc1@\xa4\xb0\x89\x02\xc3\xc4e\xcaX\xec\x00\x00\xe0\x949?x;\\\u06c6\"\x1b\xf0)O\xb7\x14\x95\x9c{E\x89\x9c\x8a\x01@a\xb9\xd7z^\x98\x00\x00\u07d49?\xf4%^\\e\x8f.\u007f\x10\xec\xbd)%rg\x1b\xc2\u0489lk\x93[\x8b\xbd@\x00\x00\u07d49A2`\x0fAU\xe0\u007fME\xbc>\xb8\xd9\xfbr\xdc\u05c4\x89\x9fn\x92\xed\xea\a\xd4\x00\x00\u07d49Q\xe4\x8e<\x86\x9ekr\xa1C\xb6\xa4Ph\u0379\xd4f\u0409\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x949T\xbd\xfe\v\xf5\x87\u0195\xa3\x05\xd9$L=[\xdd\xda\u027b\x8a\x04\x10'\x83'\xf9\x85`\x80\x00\u07d49]m%U \xa8\xdb)\xab\xc4}\x83\xa5\u06ca\x1a}\xf0\x87\x89\x05k\xc7^-c\x10\x00\x00\u07d49ck%\x81\x1b\x17j\xbf\xcf\xee\xcad\xbc\x87E/\x1f\xdf\xf4\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d49i\xb4\xf7\x1b\xb8u\x1e\xdeC\xc0\x166:zaOv\x11\x8e\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x949x/\xfe\x06\xacx\x82*<:\x8a\xfe0^P\xa5a\x88\u038a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d49zn\xf8v:\x18\xf0\x0f\xac!~\x05\\\r0\x94\x10\x10\x11\x89lk\x93[\x8b\xbd@\x00\x00\u07d49|\u06cc\x80\xc6yP\xb1\x8deB)a\x0e\x93\xbf\xa6\xee\x1a\x89?\x95\xc8\xe0\x82\x15!\x00\x00\u07d49\x82O\x8b\xce\xd1v\xfd>\xa2.\u01a4\x93\xd0\xcc\xc3?\xc1G\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d49\x93l'\x19E\v\x94 \xcc%\"\u03d1\xdb\x01\xf2'\xc1\xc1\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d49\x95\xe0\x96\xb0\x8aZrh\x00\xfc\xd1}\x9cd\xc6N\b\x8d+\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d49\x9a\xa6\xf5\xd0x\xcb\tp\x88+\u0259 \x06\xf8\xfb\xdf4q\x8965\u026d\xc5\u07a0\x00\x00\u07d49\xaa\x05\xe5m}28T!\u03d36\xe9\r=\x15\xa9\xf8Y\x89\x01h\u048e?\x00(\x00\x00\u07d49\xaa\xf0\x85M\xb6\xeb9\xbc{.C\x84jv\x17\x1c\x04E\u0789dI\xe8NG\xa8\xa8\x00\x00\u07d49\xb1\xc4q\xae\x94\xe1!dE.\x81\x1f\xbb\xe2\xb3\xcdru\xac\x89lk\x93[\x8b\xbd@\x00\x00\u07d49\xb2\x992t\x90\xd7/\x9a\x9e\xdf\xf1\x1b\x83\xaf\xd0\xe9\xd3\xc4P\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d49\xba\u018d\x94xY\xf5\x9e\x92&\b\x9c\x96\xd6.\x9f\xbe<\u0789\x02+\x1c\x8c\x12'\xa0\x00\x00\xe0\x949\xbf\xd9xh\x9b\xec\x04\x8f\xc7v\xaa\x15$\u007f^\x1d|9\xa2\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d49\xc7s6|\x88%\xd3YlhoB\xbf\r\x141\x9e?\x84\x89\a?u\u0460\x85\xba\x00\x00\u07d49\u05291@,\fy\xc4W\x18o$\u07c7)\u03d5p1\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d49\xd6\xca\xca\"\xbc\xcdjr\xf8~\xe7\u05b5\x9e\v\xde!\xd7\x19\x89l\x87T\xc8\xf3\f\b\x00\x00\u07d49\xe0\xdbM`V\x8c\x80\v\x8cU\x00\x02l%\x94\xf5v\x89`\x8965\u026d\xc5\u07a0\x00\x00\xe0\x949\xeeO\xe0\x0f\xbc\xeddph\xd4\xf5|\x01\xcb\"\xa8\v\xcc\u044a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d49\xf1\x983\x1eK!\xc1\xb7`\xa3\x15_J\xb2\xfe\x00\xa7F\x19\x89lk\x93[\x8b\xbd@\x00\x00\u07d49\xf4Fc\xd9%a\t\x1b\x82\xa7\r\xcfY=u@\x05\x97:\x89\n\u05cb.\xdc!Y\x80\x00\u07d4:\x03U\x94\xc7GGmB\xd1\xee\x96l6\"L\xdd\"I\x93\x89\x13J\xf7Ei\xf9\xc5\x00\x00\u07d4:\x04W(G\xd3\x1e\x81\xf7v\\\xa5\xbf\xc9\xd5W\x15\x9f6\x83\x89\a6-\r\xab\xea\xfd\x80\x00\xe0\x94:\x06\xe3\xbb\x1e\xdc\xfd\fD\xc3\aM\xe0\xbb`k\x04\x98\x94\xa2\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u0794:\x10\x88\x8b~\x14\x9c\xae',\x010,2}\n\xf0\x1a\v$\x88\xeb\xec!\xee\x1d\xa4\x00\x00\u07d4:1\b\xc1\u6023;3l!\x13\x134@\x9d\x97\xe5\xad\xec\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4:6\x8e\xfeJ\u05c6\xe2c\x95\xec\x9f\u01adi\x8c\xae)\xfe\x01\x89\"E\x89\x96u\xf9\xf4\x00\x00\u07d4:=\xd1\x04\xcd~\xb0O!\x93/\xd43\xeaz\xff\u04d3i\xf5\x89\x13aO#\xe2B&\x00\x00\u07d4:B\x97\xda\xc4.\x1eO\xb8\xcb1\xec\xddC\xaew<\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94:\x99`&m\xf6I cS\x8a\x99\xf4\x87\xc9P\xa3\xa5\uc78a\x05\x15\n\xe8J\x8c\xdf\x00\x00\x00\u07d4:\x9b\x11\x10)\xce\x1f \xc9\x10\x9czt\xee\xee\xf3OO.\xb2\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4:\x9eTA\xd4K$;\xe5[u\x02z\x1c\ub7ac\xf5\r\xf2\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94:\xa0z4\xa1\xaf\u0216}=\x13\x83\xb9kb\u03d6\xd5\xfa\x90\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94:\xa4,!\xb9\xb3\x1c>'\xcc\xd1~\t\x9a\xf6y\xcd\xf5i\a\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4:\xa9H\xea\x029wU\xef\xfb/\x9d\xc99-\xf1\x05\x8f~3\x89.\x14\x1e\xa0\x81\xca\b\x00\x00\u07d4:\xad\xf9\x8ba\xe5\u0216\xe7\xd1\x00\xa39\x1d2P\"]a\u07c9\f\xafg\x007\x01h\x00\x00\u07d4:\xaeHr\xfd\x90\x93\xcb\xca\xd1@o\x1e\x80x\xba\xb5\x03Y\xe2\x89\x02\"\xc8\xeb?\xf6d\x00\x00\u07d4:\xbb\x8a\xdf\xc6\x04\xf4\x8dY\x84\x81\x1d\u007f\x1dR\xfe\xf6u\x82p\x89\xf2\x97\x19\xb6o\x11\f\x00\x00\u07d4:\xc2\xf0\xff\x16\x12\xe4\xa1\xc3F\xd53\x82\xab\xf6\u0622[\xaaS\x89lk\x93[\x8b\xbd@\x00\x00\u07d4:\xc9\xdczCj\xe9\x8f\xd0\x1cz\x96!\xaa\x8e\x9d\v\x8bS\x1d\x89a\t=|,m8\x00\x00\xe0\x94:\xd0aI\xb2\x1cU\xff\x86|\xc3\xfb\x97@\u04bc\xc7\x10\x121\x8a)\xb7d2\xb9DQ \x00\x00\u07d4:\xd7\x02C\u060b\xf0@\x0fW\xc8\xc1\xfdW\x81\x18H\xaf\x16*\x89.\x9e\xe5\u00c6S\xf0\x00\x00\u07d4:\xd9\x15\xd5P\xb7#AV \xf5\xa9\xb5\xb8\x8a\x85\xf3\x82\xf05\x8965\u026d\xc5\u07a0\x00\x00\u07d4:\xe1`\xe3\xcd`\xae1\xb9\xd6t-h\xe1Nv\xbd\x96\xc5\x17\x89\x01\xa0Ui\r\x9d\xb8\x00\x00\u07d4:\xe6+\xd2q\xa7`c\u007f\xady\xc3\x1c\x94\xffb\xb4\xcd\x12\xf7\x89lk\x93[\x8b\xbd@\x00\x00\u07d4:\xeaN\x82\xd2@\x02H\xf9\x98q\xa4\x1c\xa2W\x06\r:\"\x1b\x8965\u026d\xc5\u07a0\x00\x00\u07d4:\xf6[>(\x89ZJ\x00\x11S9\x1d\x1ei\xc3\x1f\xb9\xdb9\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4;\a\xdbZ5\u007fZ\xf2HL\xbc\x9dw\xd7;\x1f\xd0Q\x9f\u01c9\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4;\n\u032fK`|\xfea\xd1s4\xc2\x14\xb7\\\xde\xfd\xbd\x89\x89lk\x93[\x8b\xbd@\x00\x00\u07d4;\x13c\x1a\x1b\x89\xcbVeH\x89\x9a\x1d`\x91\\\xdc\xc4 [\x89lk\x93[\x8b\xbd@\x00\x00\u07d4;\x15\x90\x99\aR\a\u0180vc\xb1\xf0\xf7\xed\xa5J\xc8\xcc\xe3\x89j\xc4\xe6[i\xf9-\x80\x00\u07d4;\x197\xd5\u74f8\x9bc\xfb\x8e\xb5\xf1\xb1\xc9\xcak\xa0\xfa\x8e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4;\"\xda*\x02q\xc8\xef\xe1\x02S'scji\xb1\xc1~\t\x89\x1b6\xa6DJ>\x18\x00\x00\u07d4;\"\u07a3\xc2_\x1bY\u01fd'\xbb\x91\u04e3\xea\xec\xef9\x84\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94;#g\xf8IK_\xe1\x8dh<\x05]\x89\x99\x9c\x9f=\x1b4\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4;,E\x99\x0e!GDQ\xcfOY\xf0\x19U\xb31\xc7\xd7\u0249lk\x93[\x8b\xbd@\x00\x00\xe0\x94;A\x00\xe3\ns\xb0\xc74\xb1\x8f\xfa\x84&\u045b\x191/\x1a\x8a\v\xb5\u046ap\n\xfd\x90\x00\x00\u07d4;B\xa6m\x97\x9fX(4tz\x8b`B\x8e\x9bN\xec\xcd#\x89!\xa1\u01d0\xfa\xdcX\x00\x00\u07d4;Gh\xfdq\xe2\xdb,\xbe\u007f\xa0PH<'\xb4\xeb\x93\x1d\xf3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4;Vj\x8a\xfa\u0456\x82\xdc,\xe8g\x9a<\xe4D\xa5\xb0\xfdO\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94;\\%\x1d\u007f\u05c9;\xa2\t\xfeT\x1c\xec\xd0\xce%:\x99\r\x8a\x06ZM\xa2]0\x16\xc0\x00\x00\u07d4;^\x8b\x17w\xca\x18A\x896\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94;\x93\xb1a6\xf1\x1e\xaf\x10\x99l\x95\x99\r;'9\xcc\xea_\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4;\xabK\x01\xa7\xc8K\xa1?\uea70\xbb\x19\x1bw\xa3\xaa\u0723\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4;\xb55\x98\xcc \xe2\x05]\xc5S\xb0I@J\u0277\xdd\x1e\x83\x89!W\x1d\xf7|\x00\xbe\x00\x00\u07d4;\xbc\x13\xd0J\xcc\xc0pz\xeb\u072e\xf0\x87\u0438~\v^\u327e\xd1\xd0&=\x9f\x00\x00\x00\u07d4;\xc6\xe3\xeezV\u038f\x14\xa3u2Y\x0fcqk\x99f\xe8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4;\xc8]ls[\x9c\xdaK\xba_H\xb2K\x13\xe7\x0600{\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4;\xd6$\xb5H\xcbe\x976\x90~\u062a<\fp^$\xb5u\x89lk\x93[\x8b\xbd@\x00\x00\u07d4;\u0660m\x1b\xd3lN\xdd'\xfc\r\x1f[\b\x8d\xda\xe3\xc7*\x89\x1b\x1azB\v\xa0\r\x00\x00\u0794;\u077c\x814\xf7}UY\u007f\xc9|&\xd2f\x98\t\x06\x04\ub23e -j\x0e\xda\x00\x00\xe0\x94;\xf8n\u0623\x15>\xc93xj\x02\xac\t\x03\x01\x85^Wk\x8a_J\x8c\x83u\xd1U@\x00\x00\u07d4;\xfb\u04c4|\x17\xa6\x1c\xf3\xf1{R\xf8\ub879`\xb3\U000df262\xa1]\tQ\x9b\xe0\x00\x00\u07d4<\x03\xbb\xc0#\xe1\xe9?\xa3\xa3\xa6\xe4(\xcf\f\xd8\xf9^\x1e\u0189Rf<\u02b1\xe1\xc0\x00\x00\u07d4<\f=\ufb1c\xeaz\xcc1\x9a\x96\xc3\v\x8e\x1f\xed\xabEt\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4<\x15\xb3Q\x1d\xf6\xf04.sH\u0309\xaf9\xa1h\xb7s\x0f\x8965\u026d\xc5\u07a0\x00\x00\u07d4<\x1f\x91\xf3\x01\xf4\xb5e\xbc\xa2GQ\xaa\x1fv\x13\"p\x9d\u0749a\t=|,m8\x00\x00\xe0\x94<(l\xfb0\x14n_\u05d0\xc2\xc8T\x15RW\x8d\xe34\u060a\x02)\x1b\x11\xaa0n\x8c\x00\x00\u07d4<2.a\x1f\u06c2\rG\xc6\xf8\xfcd\xb6\xfa\xd7L\xa9_^\x89\r%\x8e\xce\x1b\x13\x15\x00\x00\u07d4\xa5\xe5\xbfb\xbb\u0309\x05V\xf6L\x1f\xe7\xfa\x00\x00\u07d4<\x86\x9c\tie#\xce\xd8$\xa0pAF\x05\xbbv#\x1f\xf2\x8965\u026d\xc5\u07a0\x00\x00\u07d4<\x92V\x19\u02731DF?\x057\u06165\x87\x06\xc5 \xb0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4<\x98YK\xf6\x8bW5\x1e\x88\x14\xae\x9em\xfd-%J\xa0o\x89\x10CV\x1a\x88)0\x00\x00\u07d4<\xad\xeb=>\xed?b1\x1dRU>p\xdfJ\xfc\xe5o#\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94<\xae\xdbS\x19\xfe\x80eC\xc5nP!\xd3r\xf7\x1b\xe9\x06.\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4<\xaf\xaf^bPV\x15\x06\x8a\xf8\xeb\"\xa1:\u0629\xe5Pp\x89lf\x06E\xaaG\x18\x00\x00\u07d4<\xb1y\xcbH\x01\xa9\x9b\x95\u00f0\xc3$\xa2\xbd\xc1\x01\xa6S`\x89\x01h\u048e?\x00(\x00\x00\u07d4<\xb5a\u0386BK5\x98\x91\xe3d\xec\x92_\xfe\xff'}\xf7\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4<\xcbq\xaah\x80\xcb\v\x84\x01-\x90\xe6\a@\xec\x06\xac\u05cf\x89lk\x93[\x8b\xbd@\x00\x00\u07d4<\xce\xf8\x86yW9G\xe9I\x97y\x8a\x1e2~\b`:e\x89+\xc9\x16\u059f;\x02\x00\x00\xe0\x94<\xd1\xd9s\x1b\xd5H\xc1\xddo\u03a6\x1b\xebu\xd9\x17T\xf7\u04ca\x01\x16\x1d\x01\xb2\x15\xca\xe4\x80\x00\u07d4<\u04e6\xe95y\xc5mIAq\xfcS>z\x90\xe6\xf5\x94d\x89lk\x93[\x8b\xbd@\x00\x00\u07d4<\u05b7Y<\xbe\xe7x0\xa8\xb1\x9d\b\x01\x95\x8f\xcdK\xc5z\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4<\xd7\xf7\xc7\xc257\x80\xcd\xe0\x81\xee\xecE\x82+%\xf2\x86\f\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4<\xe1\u0717\xfc\u05f7\xc4\u04e1\x8aI\xd6\xf2\xa5\xc1\xb1\xa9\x06\u05c9\n\u05ce\xbcZ\xc6 \x00\x00\u07d4<\xea0*G*\x94\x03y\xdd9\x8a$\xea\xfd\xba\u07c8\xady\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\xe0\x94<\xec\xa9k\xb1\xcd\xc2\x14\x02\x9c\xbc^\x18\x1d9\x8a\xb9M=A\x8a\x10\xf0\xcf\x06M\u0552\x00\x00\x00\u07d4<\xf4\x84RO\xbd\xfa\xda\xe2m\xc1\x85\xe3++c\x0f\xd2\xe7&\x89\x18TR\xcb*\x91\xc3\x00\x00\u07d4<\xf9\xa1\xd4e\xe7\x8bp9\xe3iDx\xe2b{6\xfc\xd1A\x89J`S*\xd5\x1b\xf0\x00\x00\u07d4<\xfb\xf0fVYpc\x9e\x13\r\xf2\xa7\xd1k\x0e\x14\xd6\t\x1c\x89\\(=A\x03\x94\x10\x00\x00\xe0\x94=\th\x8d\x93\xad\a\xf3\xab\xe6\x8cr'#\xcdh\t\x90C^\x8a\x06ZL\xe9\x9fv\x9en\x00\x00\u07d4=1X{_\u0546\x98Ex\x87%\xa6c)\nI\xd3g\x8c\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4=?\xadI\xc9\xe5\xd2u\x9c\x8e\x8eZzM`\xa0\xdd\x13V\x92\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4=WO\xcf\x00\xfa\xe1\u064c\u023f\x9d\u07e1\xb3\x95;\x97A\xbc\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4=Z\x8b+\x80\xbe\x8b5\xd8\xec\xf7\x89\xb5\xedz\au\xc5\al\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4=f\xcdK\xd6M\\\x8c\x1b^\xea(\x1e\x10m\x1cZ\xad#s\x89i\xc4\xf3\xa8\xa1\x10\xa6\x00\x00\u0794=j\xe0S\xfc\xbc1\x8do\xd0\xfb\xc3S\xb8\xbfT.h\r'\x88\xc6s\xce<@\x16\x00\x00\u07d4=o\xf8,\x93w\x05\x9f\xb3\r\x92\x15r?`\xc7u\u0211\xfe\x89\r\x8e\\\xe6\x17\xf2\xd5\x00\x00\u07d4=y\xa8S\xd7\x1b\xe0b\x1bD\xe2\x97Yel\xa0u\xfd\xf4\t\x89lk\x93[\x8b\xbd@\x00\x00\u07d4=~\xa5\xbf\x03R\x81\x00\xed\x8a\xf8\xae\xd2e>\x92\x1bng%\x8965\u026d\xc5\u07a0\x00\x00\u07d4=\x81?\xf2\xb6\xedW\xb97\u06bf+8\x1d\x14\x8aA\x1f\xa0\x85\x89\x05k\xc7^-c\x10\x00\x00\u07d4=\x88\x143\xf0J}\r'\xf8ID\xe0\x8aQ-\xa3UR\x87\x89A\rXj \xa4\xc0\x00\x00\u07d4=\x89\xe5\x05\xcbF\xe2\x11\xa5?2\xf1g\xa8w\xbe\xc8\u007fK\n\x89\x01[5W\xf1\x93\u007f\x80\x00\xe0\x94=\x8d\a#r\x1es\xa6\xc0\xd8`\xaa\x05W\xab\xd1L\x1e\xe3b\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4=\x8f9\x88\x1b\x9e\xdf\xe9\x12'\xc3?\xa4\xcd\xd9\x1eg\x85D\xb0\x89\x04\xab\a\xbaC\xad\xa9\x80\x00\u07d4=\x9dk\xe5\u007f\xf8>\x06Y\x85fO\x12VD\x83\xf2\xe6\x00\xb2\x89n\xac\xe4?#\xbd\x80\x00\x00\u07d4=\xa3\x9c\xe3\xefJz9f\xb3.\xe7\xeaN\xbc#5\xa8\xf1\x1f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4=\xaa\x01\u03b7\x0e\xaf\x95\x91\xfaR\x1b\xa4\xa2~\xa9\xfb\x8e\xdeJ\x89Zc\xd2\u027cvT\x00\x00\u07d4=\xb5\xfejh\xbd6\x12\xac\x15\xa9\x9aa\xe5U\x92\x8e\xec\xea\xf3\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4=\xb9\xed\u007f\x02L~&7/\xea\xcf+\x05\b\x03D^8\x10\x89E\xb1H\xb4\x99j0\x00\x00\u07d4=\xbf\r\xbf\xd7x\x90\x80\x053\xf0\x9d\xea\x83\x01\xb9\xf0%\u04a6\x8965\u026d\xc5\u07a0\x00\x00\u07d4=\xce\U0005c18b\x15\xd3N\xdaBn\xc7\xe0K\x18\xb6\x01p\x02\x89lh\xcc\u041b\x02,\x00\x00\xe0\x94=\xd1.Uj`76\xfe\xbaJo\xa8\xbdJ\xc4]f*\x04\x8a#u{\x91\x83\xe0x(\x00\x00\u07d4=\u078b\x15\xb3\u033a\xa5x\x01\x12\xc3\xd6t\xf3\x13\xbb\xa6\x80&\x89`\x1dQZ>O\x94\x00\x00\xe0\x94=\xde\xdb\xe4\x89#\xfb\xf9\xe56\xbf\x9f\xfb\aG\xc9\xcd\u04de\xef\x8a\x03h\xc8b:\x8bM\x10\x00\x00\u07d4=\xea\xe43'\x91?b\x80\x8f\xaa\x1bbv\xa2\xbdch\xea\u0649lk\x93[\x8b\xbd@\x00\x00\u07d4=\xf7b\x04\x9e\u068a\u0192}\x90Lz\xf4/\x94\xe5Q\x96\x01\x89lk\x93[\x8b\xbd@\x00\x00\u07d4>\x04\r@\u02c0\xba\x01%\xf3\xb1_\xde\xfc\xc8?0\x05\xda\x1b\x898E$\xccp\xb7x\x00\x00\u07d4>\v\x8e\xd8n\xd6i\xe1'#\xafur\xfb\xac\xfe\x82\x9b\x1e\x16\x89QM\xe7\xf9\xb8\x12\xdc\x00\x00\xe0\x94>\f\xbejm\xcba\xf1\x10\xc4[\xa2\xaa6\x1d\u007f\xca\xd3\xdas\x8a\x01\xb2\u07dd!\x9fW\x98\x00\x00\u07d4>\x19KN\xce\xf8\xbbq\x1e\xa2\xff$\xfe\xc4\xe8{\xd02\xf7\u0449\x8b\x9d\xc1\xbc\x1a\x03j\x80\x00\xe0\x94>\x1b\"0\xaf\xbb\xd3\x10\xb4\x92jLwmZ\u705cf\x1d\x8a\x06ZM\xa2]0\x16\xc0\x00\x00\u07d4>\x1cS0\x0eL\x16\x89\x12\x16<~\x99\xb9]\xa2h\xad(\n\x896b2\\\u044f\xe0\x00\x00\u07d4>\x1c\x96 c\xe0\xd5)YA\xf2\x10\u0723\xabS\x1e\xec\x88\t\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4>,\xa0\xd24\xba\xf6\a\xadFj\x1b\x85\xf4\xa6H\x8e\xf0\n\xe7\x89\x04\xda!\xa3H=V\x80\x00\u07d4>/&#^\x13zs$\xe4\xdc\x15K]\xf5\xafF\xea\x1aI\x89\x017\xaa\xd8\x03-\xb9\x00\x00\xe0\x94>1a\xf1\xea/\xbf\x12ny\xda\x18\x01\u0695\x12\xb3y\x88\u024a\nm\xd9\f\xaeQ\x14H\x00\x00\xe0\x94>6\xc1rS\xc1\x1c\xf3\x89t\xed\r\xb1\xb7Y\x16\r\xa67\x83\x8a\x01{x\x83\xc0i\x16`\x00\x00\u07d4><\u04fe\xc0e\x91\xd64o%Kb\x1e\xb4\x1c\x89\x00\x8d1\x895\u07fe\u069f74\x00\x00\u07d4>E\xbdU\u06d0`\xec\xed\x92;\xb9\xcbs<\xb3W?\xb51\x89X\xe7\x92n\xe8X\xa0\x00\x00\u07d4>M\x13\xc5Z\x84\xe4n\xd7\xe9\u02d0\xfd5^\x8a\u0651\u33c965\u026d\xc5\u07a0\x00\x00\u07d4>N\x92e\"<\x9782L\xf2\v\xd0`\x06\xd0\a>\u06cc\x89\a?u\u0460\x85\xba\x00\x00\xe0\x94>O\xbdf\x10\x15\xf6F\x1e\xd6s\\\xef\xef\x01\xf3\x14E\xde:\x8a\x03n4)\x98\xb8\xb0 \x00\x00\xe0\x94>S\xff!\a\xa8\u07be3(I:\x92\xa5\x86\xa7\xe1\xf4\x97X\x8a\x04\xe6\x9c*q\xa4\x05\xab\x00\x00\u07d4>Z9\xfd\xdap\xdf\x11&\xab\r\u011asx1\x1aSz\x1f\x89\x82\x1a\xb0\xd4AI\x80\x00\x00\xe0\x94>Z\xbd\t\xceZ\xf7\xba\x84\x87\xc3Y\xe0\xf2\xa9:\x98k\v\x18\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4>\\\xb8\x92\x8cAx%\xc0:;\xfc\xc5!\x83\xe5\xc9\x1eB\u05c9\xe71\xd9\xc5,\x96/\x00\x00\u07d4>^\x93\xfbL\x9c\x9d\x12F\xf8\xf2G5\x8e\"\xc3\xc5\xd1{j\x89\b!\xab\rD\x14\x98\x00\x00\u07d4>a\x83P\xfa\x01ez\xb0\xef>\xba\xc8\xe3p\x12\xf8\xfc+o\x89\x98\x06\xde=\xa6\xe9x\x00\x00\u07d4>c\xce;$\xca(e\xb4\u0166\x87\xb7\xae\xa3Y~\xf6\xe5H\x89lk\x93[\x8b\xbd@\x00\x00\u07d4>f\xb8GiVj\xb6yE\xd5\xfa\x8175V\xbc\u00e1\xfa\x89\b=lz\xabc`\x00\x00\xe0\x94>v\xa6-\xb1\x87\xaat\xf68\x17S;0l\xea\xd0\xe8\u03be\x8a\x06\x9bZ\xfa\xc7P\xbb\x80\x00\x00\u07d4>z\x96k]\xc3W\xff\xb0~\x9f\xe0g\xc4W\x91\xfd\x8e0I\x89\x034-`\xdf\xf1\x96\x00\x00\xe0\x94>\x81w!u#~\xb4\xcb\xe0\xfe-\xca\xfd\xad\xff\xebj\x19\x99\x8a\x01\xdd\f\x88_\x9a\r\x80\x00\x00\u07d4>\x83I\xb6\u007fWED\x9fe\x93g\u066dG\x12\xdb[\x89Z\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4>\x83TO\x00\x82U%r\u01c2\xbe\xe5\xd2\x18\xf1\xef\x06J\x9d\x89\x05l\xd5_\xc6M\xfe\x00\x00\u07d4>\x84\xb3\\[\"ePpa\xd3\vo\x12\xda\x03?\xe6\xf8\xb9\x89a\t=|,m8\x00\x00\u07d4>\x86A\xd4\x87E\xba2/_\xd6\xcbP\x12N\xc4f\x88\u01e6\x9a\u007f\xae\x8a\x01\n\xfc\x1a\xde;N\xd4\x00\x00\u07d4>\x91N0\x18\xac\x00D\x93A\u011d\xa7\x1d\x04\xdf\xee\xedb!\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4>\x94\x10\u04f9\xa8~\xd5\xe4Q\xa6\xb9\x1b\xb8\x92?\xe9\x0f\xb2\xb5\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4>\x94\xdfS\x13\xfaR\x05p\xef#+\xc31\x1d_b/\xf1\x83\x89lk\x93[\x8b\xbd@\x00\x00\u0794>\x9b4\xa5\u007f3u\xaeY\xc0\xa7^\x19\u0136A\"\x8d\x97\x00\x88\xf8i\x93)g~\x00\x00\u07d4>\xad\xa8\xc9/V\x06~\x1b\xb7<\xe3x\xdaV\xdc,\xdf\xd3e\x89w\xcd\xe9:\xeb\rH\x00\x00\xe0\x94>\xaf\by\xb5\xb6\xdb\x15\x9bX\x9f\x84W\x8bjt\xf6\xc1\x03W\x8a\x01\x898\xb6q\xfae\xa2\x80\x00\u07d4>\xaf1k\x87a]\x88\xf7\xad\xc7|X\xe7\x12\xedMw\x96k\x89\x05m\xbcL\xee$d\x80\x00\u07d4>\xb8\xb3;!\xd2<\u0686\xd8(\x88\x84\xabG\x0e\x16F\x91\xb5\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4>\xb9\xef\x06\xd0\xc2Y\x04\x03\x19\x94~\x8czh\x12\xaa\x02S\u0609\t\r\x97/22<\x00\x00\u07d4>\u030e\x16h\xdd\xe9\x95\xdcW\x0f\xe4\x14\xf4B\x11\xc54\xa6\x15\x89lk\x93[\x8b\xbd@\x00\x00\u07d4>\u03752\xe3\x97W\x96b\xb2\xa4aA\u73c25\x93j_\x89\x03\x9f\xba\xe8\xd0B\xdd\x00\x00\u07d4>\xeeo\x1e\x966\vv\x89\xb3\x06\x9a\xda\xf9\xaf\x8e\xb6\f\u404965\u026d\xc5\u07a0\x00\x00\xe0\x94?\b\u066d\x89O\x81>\x8e!H\xc1`\xd2K5:\x8et\xb0\x8a\f\xb4\x9bD\xba`-\x80\x00\x00\u07d4?\f\x83\xaa\xc5qybsN\\\xea\xea\xec\u04db(\xad\x06\xbe\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94?\x10\x80\x02\x82\u0477\xdd\u01cf\xa9-\x820\aN\x1b\xf6\xae\xae\x8a\x01\n\xfc\x1a\xde;N\xd4\x00\x00\u07d4?\x123qO M\xe9\xdeN\xe9m\a;6\x8d\x81\x97\x98\x9f\x89\x02\x17\xc4\x10t\xe6\xbb\x00\x00\u07d4?\x17:\xa6\xed\xf4i\u0445\xe5\x9b\xd2j\xe4#k\x92\xb4\xd8\xe1\x89\x11X\xe4`\x91=\x00\x00\x00\u07d4?\x1b\xc4 \xc5<\x00,\x9e\x90\x03|D\xfej\x8e\xf4\xdd\xc9b\x89\t`\xdbwh\x1e\x94\x00\x00\u07d4?#a\b\xee\xc7\"\x89\xba\u00e6\\\u0483\xf9^\x04\x1d\x14L\x8964\xbf9\xab\x98x\x80\x00\u07d4?-\xa0\x93\xbb\x16\xeb\x06O\x8b\xfa\x9e0\xb9)\xd1_\x8e\x1cL\x89lk\x93[\x8b\xbd@\x00\x00\u07d4?-\xd5]\xb7\xea\xb0\xeb\xeee\xb3>\xd8 ,\x1e\x99.\x95\x8b\x89,s\xc97t,P\x00\x00\u07d4?/8\x14\x91y|\xc5\xc0\u0502\x96\xc1O\xd0\xcd\x00\xcd\xfa-\x89+\x95\xbd\xcc9\xb6\x10\x00\x00\u07d4?0\u04fc\x9f`\"2\xbcrB\x88\xcaF\xcd\v\a\x88\xf7\x15\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4?<\x8ea\xe5`L\xef\x06\x05\xd46\xdd\"\xac\u0346\"\x17\xfc\x89Hz\x9a0E9D\x00\x00\u07d4??F\xb7\\\xab\xe3{\xfa\u0307`(\x1fCA\xca\u007fF=\x89 \xacD\x825\xfa\xe8\x80\x00\u07d4?G)c\x19x\x83\xbb\xdaZ\x9b}\xfc\xb2-\xb1\x14@\xad1\x89\x1a\x19d<\xb1\xef\xf0\x80\x00\u07d4?L\xd19\x9f\x8a4\xed\u06da\x17\xa4q\xfc\x92+Xp\xaa\xfc\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4?U\x1b\xa9<\xd5F\x93\xc1\x83\xfb\x9a\xd6\re\xe1`\x96s\u0249lk\x93[\x8b\xbd@\x00\x00\xe0\x94?bzv\x9ej\x95\x0e\xb8p\x17\xa7\u035c\xa2\bq\x13h1\x8a\x02\ub3b1\xa1r\u0738\x00\x00\u07d4?m\xd3e\x0e\xe4(\u0737u\x95S\xb0\x17\xa9j\x94(j\u0249Hz\x9a0E9D\x00\x00\u07d4?tr7\x80o\xed?\x82\x8ahR\xeb\bg\xf7\x90'\xaf\x89\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4?u\xaea\xcc\x1d\x80Be;[\xae\xc4D>\x05\x1c^z\xbd\x89\x05-T(\x04\xf1\xce\x00\x00\u07d4?\xb7\u0457\xb3\xbaO\xe0E\xef\xc2=P\xa1E\x85\xf5X\u0672\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94?\xbc\x1eE\x18\xd74\x00\xc6\xd0F5\x949\xfbh\xea\x1aI\xf4\x8a\x03y\v\xb8U\x13v@\x00\x00\u07d4?\xbe\xd6\xe7\xe0\u029c\x84\xfb\xe9\xeb\u03ddN\xf9\xbbIB\x81e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4?\u043bGy\x8c\xf4L\u07feM3=\xe67\xdfJ\x00\xe4\\\x89\x05lUy\xf7\"\x14\x00\x00\xe0\x94?\xe4\x0f\xbd\x91\x9a\xad(\x18\xdf\x01\xeeM\xf4lF\x84*\xc59\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4?\xe8\x01\xe6\x135\xc5\x14\r\xc7\xed\xa2\xefR\x04F\nP\x120\x89lk\x93[\x8b\xbd@\x00\x00\u07d4?\xf86\xb6\xf5{\x90\x1bD\f0\xe4\xdb\xd0e\xcf7\xd3\u050c\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4?\xfc\xb8p\xd4\x02=%]Qg\u0625\a\xce\xfc6kh\xba\x89#4^\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4@s\xfaI\xb8q\x17\u02d0\x8c\xf1\xabQ-\xa7T\xa92\xd4w\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4@\x8ai\xa4\a\x15\xe1\xb3\x13\xe15N`\b\x00\xa1\xe6\xdc\x02\xa5\x89\x01\u7e11\u0312T\x00\x00\u07d4@\x9b\xd7P\x85\x82\x1c\x1d\xe7\f\xdc;\x11\xff\xc3\xd9#\xc7@\x10\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4@\x9dZ\x96.\xde\uefa1x\x01\x8c\x0f8\xb9\u0372\x13\xf2\x89\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4@\xa31\x19[\x97s%\u00aa(\xfa/B\xcb%\xec<%<\x89lk\x93[\x8b\xbd@\x00\x00\u07d4@\xa7\xf7(g\xa7\u0706w\v\x16+uW\xa44\xedP\xcc\xe9\x8965\u026d\xc5\u07a0\x00\x00\u07d4@\xab\n>\x83\xd0\u022c\x93f\x91\x05 \xea\xb1w+\xac;\x1a\x894\xf1\f-\xc0^|\x00\x00\u07d4@\xabf\xfe!>\xa5l:\xfb\x12\xc7[\xe3?\x8e2\xfd\b]\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94@\xadt\xbc\v\xce*E\xe5/6\xc3\u07bb\x1b:\xda\x1bv\x19\x8a\x01p\x16-\xe1\t\xc6X\x00\x00\u07d4@\u03c9\x05\x91\xea\u484f\x81*)T\xcb)_c3'\xe6\x89\x02\x9b\xf76\xfcY\x1a\x00\x00\u07d4@\u03d0\xef[v\x8c]\xa5\x85\x00,\xcb\xe6avP\xd8\xe87\x8963\x03\"\xd5#\x8c\x00\x00\xe0\x94@\xd4]\x9dv%\xd1QV\xc92\xb7q\xca{\x05'\x13\tX\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4@\xdb\x1b\xa5\x85\xce4S\x1e\xde\xc5IHI9\x13\x81\xe6\xcc\u04c9a\t=|,m8\x00\x00\xe0\x94@\xdfI^\xcf?\x8bL\xef*l\x18\x99W$\x8f\u813c+\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4@\xe0\xdb\xf3\xef\uf404\xea\x1c\xd7\xe5\x03\xf4\v;J\x84C\xf6\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4@\xe2D\n\xe1B\u02006j\x12\xc6\xd4\x10/K\x844\xb6*\x8965\u026d\xc5\u07a0\x00\x00\u07d4@\xe3\u0083\xf7\xe2M\xe0A\f\x12\x1b\xee`\xa5`\u007f>)\xa6\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94@\xeaPD\xb2\x04\xb20v\xb1\xa5\x80;\xf1\xd3\f\x0f\x88\x87\x1a\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\xe0\x94@\xed\xdbD\x8di\x0e\xd7.\x05\xc2%\xd3O\xc85\x0f\xa1\xe4\u014a\x01{x\x83\xc0i\x16`\x00\x00\xe0\x94@\xf4\xf4\xc0ls,\xd3[\x11\x9b\x89;\x12~}\x9d\aq\xe4\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4A\x01\x0f\u023a\xf8C}\x17\xa0Ci\x80\x9a\x16\x8a\x17\xcaV\xfb\x89\x05k\xc7^-c\x10\x00\x00\u07d4A\x03)\x96q\xd4gc\x97\x8f\xa4\xaa\x19\xee4\xb1\xfc\x95'\x84\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4A\x03<\x1bm\x05\xe1\u0289\xb0\x94\x8f\xc6DS\xfb\xe8z\xb2^\x89Hz\x9a0E9D\x00\x00\u07d4A\t\x8a\x81E#\x17\xc1\x9e>\xef\v\xd1#\xbb\xe1x\xe9\xe9\u0289\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4A\x16\x10\xb1x\xd5a}\xfa\xb94\u0493\xf5\x12\xa9>\\\x10\xe1\x89\t79SM(h\x00\x00\u07d4A\x1c\x83\x1c\xc6\xf4O\x19e\xecWW\xabN[<\xa4\xcf\xfd\x1f\x89\x17\n\x0fP@\xe5\x04\x00\x00\xe0\x94A*h\xf6\xc6EU\x9c\xc9w\xfcId\x04z \x1d\x1b\xb0\xe2\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\u07d4A?K\x02f\x9c\xcf\xf6\x80k\xc8&\xfc\xb7\xde\xca;\x0e\xa9\xbc\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4AE\x99\t.\x87\x9a\xe2Sr\xa8MsZ\xf5\xc4\xe5\x10\xcdm\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4AHV\x12\xd04F\xecL\x05\xe5$NV?\x1c\xba\xe0\xf1\x97\x894\x95tD\xb8@\xe8\x00\x00\u07d4A]\tj\xb0b\x93\x18?<\x03=%\xf6\xcfqx\xac;\u01c9\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4Af\xfc\b\u0285\xf7f\xfd\xe81F\x0e\x9d\xc9<\x0e!\xaal\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94Ag\x84\xaf`\x960\xb0p\u051a\x8b\xcd\x12#\\d(\xa4\b\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4Ag\xcdH\xe73A\x8e\x8f\x99\xff\xd14\x12\x1cJJ\xb2x\u0109\xc5S%\xcat\x15\xe0\x00\x00\u07d4Al\x86\xb7 \x83\xd1\xf8\x90}\x84\xef\xd2\xd2\u05c3\xdf\xfa>\xfb\x89lj\xccg\u05f1\xd4\x00\x00\u07d4AsA\x9d\\\x9fc)U\x1d\xc4\xd3\xd0\u03ac\x1bp\x1b\x86\x9e\x89\x04\xc5>\xcd\xc1\x8a`\x00\x00\u07d4At\xfa\x1b\xc1*;q\x83\u02eb\xb7z\vYU{\xa5\xf1\u06c9lk\x93[\x8b\xbd@\x00\x00\u07d4Axj\x10\xd4G\xf4\x84\xd32D\u0337\xfa\u034bB{[\x8c\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94Az<\u0454\x96S\nmB\x04\u00f5\xa1|\xe0\xf2\a\xb1\xa5\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4A~N&\x88\xb1\xfdf\xd8!R\x9eF\xedOB\xf8\xb3\xdb=\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94A\x9aq\xa3l\x11\xd1\x05\xe0\xf2\xae\xf5\xa3\xe5\x98\a\x8e\x85\xc8\v\x8a\x01\x0f\f\xf0d\xddY \x00\x00\xe0\x94A\x9b\xdes\x16\xcc\x1e\u0495\u0205\xac\xe3B\u01db\xf7\xee3\xea\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4A\xa2\xf2\xe6\xec\xb8c\x94\xec\x0e3\x8c\x0f\xc9~\x9cU\x83\xde\u0489l\xee\x06\u077e\x15\xec\x00\x00\u07d4A\xa8\u0083\x00\x81\xb1\x02\xdfn\x011e|\a\xabc[T\u0389lj\xccg\u05f1\xd4\x00\x00\u07d4A\xa8\xe26\xa3\x0emc\xc1\xffdM\x13*\xa2\\\x89S~\x01\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4A\xa9\xa4\x04\xfc\x9f[\xfe\xe4\x8e\xc2e\xb1%#3\x8e)\xa8\xbf\x89\x15\b\x94\xe8I\xb3\x90\x00\x00\u07d4A\xad6\x9fu\x8f\xef8\xa1\x9a\xa3\x14\x93y\x83,\x81\x8e\xf2\xa0\x8966\x9e\xd7t}&\x00\x00\u07d4A\xb2\xd3O\xde\v\x10)&+Ar\xc8\x1c\x15\x90@[\x03\xae\x8965\u026d\xc5\u07a0\x00\x00\u07d4A\xb2\xdb\u05dd\u069b\x86Ojp0'T\x19\u00dd>\xfd;\x89\xadx\xeb\u016cb\x00\x00\x00\u07d4A\xc3\xc26u4\xd1;\xa2\xb3?\x18\\\xdb\xe6\xacC\xc2\xfa1\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4A\u02d8\x96D_p\xa1\n\x14!R\x96\xda\xf6\x14\xe3,\xf4\u0549g\x8a\x93 b\xe4\x18\x00\x00\u07d4A\xcey\x95\t5\xcf\xf5[\xf7\x8eL\xce\xc2\xfec\x17\x85\u06d5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4A\u04f71\xa3&\xe7hX\xba\xa5\xf4\xbd\x89\xb5{6\x93#C\x89\x15[\xd90\u007f\x9f\xe8\x00\x00\xe0\x94A\xe4\xa2\x02u\xe3\x9b\xdc\xef\xebe\\\x03\"tKvQ@\u008a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4A\xed-\x8ep\x81H,\x91\x9f\xc2=\x8f\x00\x91\xb3\xc8,F\x85\x89F:\x1ev[\u05ca\x00\x00\xe0\x94A\xf2~tK\u049d\xe2\xb0Y\x8f\x02\xa0\xbb\x9f\x98\xe6\x81\ua90a\x01\xa4\xab\xa2%\xc2\a@\x00\x00\u07d4A\xf4\x89\xa1\xect{\u009c>_\x9d\x8d\xb9xw\xd4\u0474\xe9\x89\a?u\u0460\x85\xba\x00\x00\u07d4B\x0f\xb8n}+Q@\x1f\xc5\xe8\xc7 \x15\xde\xcbN\xf8\xfc.\x8965\u026d\xc5\u07a0\x00\x00\u07d4B\x16\x84\xba\xa9\xc0\xb4\xb5\xf5S8\xe6\xf6\xe7\xc8\xe1F\xd4\x1c\xb7\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4B9\x96Y\xac\xa6\xa5\xa8c\xea\"E\xc93\xfe\x9a5\xb7\x88\x0e\x89n\xce2\xc2l\x82p\x00\x00\xe0\x94B;\xcaG\xab\xc0\fpW\xe3\xad4\xfc\xa6>7_\xbd\x8bJ\x8a\x03\xcf\xc8.7\xe9\xa7@\x00\x00\u07d4B<1\a\xf4\xba\xceANI\x9cd9\nQ\xf7F\x15\xca^\x89lk\x93[\x8b\xbd@\x00\x00\u07d4B<\xc4YL\xf4\xab\xb66\x8d\xe5\x9f\u04b1#\a4a!C\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94BD\xf13\x11X\xb9\xce&\xbb\xe0\xb9#k\x92\x03\xca5\x144\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u0794BQw\xebt\xad\n\x9d\x9aWR\"\x81G\xeemcV\xa6\u6239\x8b\xc8)\xa6\xf9\x00\x00\u07d4BW%\xc0\xf0\x8f\b\x11\xf5\xf0\x06\xee\xc9\x1c\\\\\x12k\x12\xae\x89\b!\xab\rD\x14\x98\x00\x00\xe0\x94BX\xfdf/\xc4\xce2\x95\xf0\xd4\xed\x8f{\xb1D\x96\x00\xa0\xa9\x8a\x01lE.\xd6\b\x8a\xd8\x00\x00\xe0\x94B\\\x18\x16\x86\x8fww\xcc+\xa6\xc6\u048c\x9e\x1eylR\xb3\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4B\\3\x8a\x13%\xe3\xa1W\x8e\xfa)\x9eW\u0646\xebGO\x81\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94BbY\xb0\xa7Vp\x1a\x8bf5(R!V\xc0(\x8f\x0f$\x8a\x02\x18\xae\x19k\x8dO0\x00\x00\u07d4Bm\x15\xf4\a\xa0\x115\xb1:kr\xf8\xf2R\v51\xe3\x02\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4Box\xf7\r\xb2Y\xac\x854\x14[)4\xf4\xef\x10\x98\xb5\u0609\x13\x84\x00\xec\xa3d\xa0\x00\x00\u07d4Bs-\x8e\xf4\x9f\xfd\xa0K\x19x\x0f\xd3\xc1\x84i\xfb7A\x06\x89\x17\v\x00\xe5\u4a7e\x00\x00\u07d4Bt\x17\xbd\x16\xb1\xb3\xd2-\xbb\x90-\x8f\x96W\x01o$\xa6\x1c\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Btj\xee\xa1O'\xbe\xff\f\r\xa6BS\xf1\xe7\x97\x18\x90\xa0\x89T\x06\x923\xbf\u007fx\x00\x00\u07d4B{F*\xb8NP\x91\xf4\x8aF\xeb\f\u0712\xdd\xcb&\xe0x\x89lk\x93[\x8b\xbd@\x00\x00\u07d4B~GQ\u00fa\xbex\xcf\xf8\x83\b\x86\xfe\xbc\x10\xf9\x90\x8dt\x89j\xcb=\xf2~\x1f\x88\x00\x00\xe0\x94B~\xc6h\xac\x94\x04\xe8\x95\u0306\x15\x11\xd1b\nI\x12\xbe\x98\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4B\x80\xa5\x8f\x8b\xb1\v\x94@\u0794\xf4+OY! \x82\x01\x91\x89lk\x93[\x8b\xbd@\x00\x00\u07d4B\x8a\x1e\xe0\xed3\x1dyR\u033e\x1cyt\xb2\x85+\u0453\x8a\x89w\xb7JN\x8d\xe5e\x00\x00\u0794B\x9c\x06\xb4\x87\xe8Tj\xbd\xfc\x95\x8a%\xa3\xf0\xfb\xa5?o\x00\x88\xbbdJ\xf5B\x19\x80\x00\xe0\x94B\xa9\x8b\xf1`'\xceX\x9cN\xd2\xc9X1\xe2rB\x05\x06N\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4B\xc6\xed\xc5\x15\xd3UW\x80\x8d\x13\xcdD\xdc\xc4@\v%\x04\xe4\x89\n\xba\x14\u015b\xa72\x00\x00\u07d4B\xce\xcf\u0492\x10y\xc2\xd7\xdf?\b\xb0z\xa3\xbe\xee^!\x9a\x8965\u026d\xc5\u07a0\x00\x00\u07d4B\u04669\x9b0\x16\xa8Y\u007f\x8bd\t'\xb8\xaf\xbc\xe4\xb2\x15\x89\xa1\x8b\xce\xc3H\x88\x10\x00\x00\u07d4B\xd3I@\xed\xd2\xe7\x00]F\xe2\x18\x8eL\xfe\u0383\x11\xd7M\x89\b\x90\xb0\xc2\xe1O\xb8\x00\x00\u07d4B\u04e5\xa9\x01\xf2\xf6\xbd\x93V\xf1\x12\xa7\x01\x80\xe5\xa1U\v`\x892$\xf4'#\xd4T\x00\x00\u07d4B\u05b2c\xd9\xe9\xf4\x11lA\x14$\xfc\x99Ux;\xa1\xc5\x1b\x81\x0f\xc4g\u057aM\xeaB\xf7\xa9\x88^i\x8a\bxg\x83&\xea\xc9\x00\x00\x00\xe0\x94C>\xb9J3\x90\x86\xed\x12\u067d\xe9\xcd\x1dE\x86\x03\xc9}\u058a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4CI\"Zb\xf7\n\xeaH\n\x02\x99\x15\xa0\x1eSy\xe6O\xa5\x89\x8c\xd6~#4\xc0\xd8\x00\x00\u07d4CT\"\x1eb\xdc\t\xe6@d6\x16:\x18^\xf0m\x11J\x81\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94CTC\xb8\x1d\xfd\xb9\xbd\x8cg\x87\xbc%\x18\xe2\xd4~W\xc1_\x8a\x01C\x8d\x93\x97\x88\x1e\xf2\x00\x00\u07d4Ca\u0504o\xaf\xb3w\xb6\xc0\xeeI\xa5\x96\xa7\x8d\xdf5\x16\xa3\x89\xc2\x12z\xf8X\xdap\x00\x00\xe0\x94Cd0\x9a\x9f\xa0p\x95`\x0fy\xed\xc6Q \xcd\xcd#\xdcd\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4Cg\xaeK\f\xe9d\xf4\xa5J\xfdK\\6\x84\x96\xdb\x16\x9e\x9a\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Ct\x89(\xe8\xc3\xecD6\xa1\u0412\xfb\xe4:\xc7I\xbe\x12Q\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4Cv{\xf7\xfd*\xf9[r\xe91-\xa9D<\xb1h\x8eCC\x89\x10CV\x1a\x88)0\x00\x00\xe0\x94Cy\x838\x8a\xb5\x9aO\xfc!_\x8e\x82iF\x10)\xc3\xf1\xc1\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4C\x89\x8cI\xa3MP\x9b\xfe\xd4\xf7`A\xee\x91\xca\xf3\xaaj\xa5\x89\x10CV\x1a\x88)0\x00\x00\u07d4C\x8c/T\xff\x8eb\x9b\xab6\xb1D+v\v\x12\xa8\x8f\x02\xae\x89lk\x93[\x8b\xbd@\x00\x00\u07d4C\x98b\x8e\xa6c-9>\x92\x9c\xbd\x92\x84d\xc5h\xaaJ\f\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4C\x9d//Q\x10\xa4\u054b\x17W\x93P\x15@\x87@\xfe\xc7\xf8\x89\u03e5\xc5\x15\x0fL\x88\x80\x00\u07d4C\x9d\xee?vy\xff\x100s?\x93@\xc0\x96hkI9\v\x89lk\x93[\x8b\xbd@\x00\x00\u07d4C\xb0y\xba\xf0ry\x99\xe6k\xf7C\u057c\xbfwl;\t\"\x89lk\x93[\x8b\xbd@\x00\x00\u07d4C\xbc-M\xdc\xd6X;\xe2\u01fc\tK(\xfbr\xe6+\xa8;\x89lk\x93[\x8b\xbd@\x00\x00\u07d4C\xc7\xeb\u0173\xe7\xaf\x16\xf4}\xc5az\xb1\x0e\x0f9\xb4\xaf\xbb\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4C\u02d6R\x81\x8coMg\x96\xb0\xe8\x94\t0ly\xdbcI\x89lk\x93[\x8b\xbd@\x00\x00\u07d4C\xcc\b\xd0s*\xa5\x8a\xde\xf7a\x9b\xedFU\x8a\xd7wAs\x89\xf0\xe7\u0730\x12*\x8f\x00\x00\xe0\x94C\u0567\x1c\xe8\xb8\xf8\xae\x02\xb2\xea\xf8\xea\xf2\xca(@\xb9?\xb6\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u0794C\xdb\u007f\xf9Z\bm(\ubff8/\xb8\xfb_#\n^\xbc\u0348\xdfn\xb0\xb2\xd3\xca\x00\x00\u07d4C\xe7\xec\x84cX\xd7\xd0\xf97\xad\x1c5\v\xa0i\u05ffr\xbf\x89\x06p\xaeb\x92\x14h\x00\x00\u07d4C\xf1o\x1eu\xc3\xc0j\x94x\xe8\u0157\xa4\n<\xb0\xbf\x04\u0309\x9d\xf7\u07e8\xf7`H\x00\x00\u07d4C\xf4p\xede\x9e)\x91\xc3u\x95~]\xde\u017d\x1d8\"1\x89\x05k\xc7^-c\x10\x00\x00\u07d4C\xf7\xe8n8\x1e\xc5\x1e\u0110m\x14v\u02e9z=\xb5\x84\xe4\x8965\u026d\xc5\u07a0\x00\x00\u07d4C\xff8t>\xd0\xcdC0\x8c\x06e\t\u030e~r\xc8b\xaa\x89i*\xe8\x89p\x81\xd0\x00\x00\xe0\x94C\xff\x88S\xe9\x8e\xd8@k\x95\x00\n\u0684\x83b\u05a09*\x8a\x04\xae\v\x1cM.\x84\xd0\x00\x00\u07d4D\t\x88f\xa6\x9bh\xc0\xb6\xbc\x16\x82)\xb9`5\x87\x05\x89g\x89\n1\x06+\xee\xedp\x00\x00\u07d4D\x19\xaca\x8d]\xea|\xdc`w o\xb0}\xbd\xd7\x1c\x17\x02\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4D\x1aR\x00\x16a\xfa\xc7\x18\xb2\u05f3Q\xb7\xc6\xfbR\x1az\xfd\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94D\x1a\u0282c\x13$\xac\xbf\xa2F\x8b\xda2[\xbdxG{\xbf\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4D\x1f7\xe8\xa0)\xfd\x02H/(\x9cI\xb5\xd0m\x00\xe4\b\xa4\x89\x12\x11\xec\xb5m\x13H\x80\x00\u07d4D \xaa5F[\xe6\x17\xad$\x98\xf3p\xde\n<\xc4\xd20\xaf\x89lk\x93[\x8b\xbd@\x00\x00\u07d4D#/\xf6m\xda\xd1\xfd\x84\x12f8\x006\xaf\xd7\xcf}\u007fB\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4D%\rGn\x06$\x84\xe9\b\n9g\xbf:Js*\xd7?\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4D)\xa2\x9f\xee\x19\x84Pg,\f\x1d\a1b%\v\xecdt\x896*\xaf\x82\x02\xf2P\x00\x00\u07d4D5RS\xb2wH\xe3\xf3O\xe9\xca\xe1\xfbq\x8c\x8f$\x95)\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4D8\xe8\x80\xcb'f\xb0\xc1\u03ae\xc9\xd2A\x8f\u03b9R\xa0D\x89\a?\xa0s\x90?\b\x00\x00\u07d4DL\xafy\xb7\x138\ue6a7\xc73\xb0*\u02a7\xdc\x02YH\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4D\\\xb8\xde^=\xf5 \xb4\x99\xef\u0240\xf5+\xff@\xf5\\v\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94Dj\x809\xce\u03dd\xceHy\xcb\xca\xf3I;\xf5E\xa8\x86\x10\x8a\x01{x\x83\xc0i\x16`\x00\x00\u07d4Dt)\x9d\x0e\xe0\x90\u0710x\x9a\x14\x86H\x9c=\rd^m\x8965\u026d\xc5\u07a0\x00\x00\u07d4D\x8b\xf4\x10\xad\x9b\xbc/\xec\xc4P\x8d\x87\xa7\xfc.K\x85a\xad\x89\n\xd6\xee\xdd\x17\xcf;\x80\x00\u07d4D\x90\x1e\r\x0e\b\xac=^\x95\xb8\xec\x9d^\x0f\xf5\xf1.\x03\x93\x89\x16\xa1\xf9\xf5\xfd}\x96\x00\x00\xe0\x94D\x93\x12<\x02\x1e\xce;3\xb1\xa4R\xc9&\x8d\xe1@\a\xf9\u04ca\x01je\x02\xf1Z\x1eT\x00\x00\xe0\x94D\x9a\xc4\xfb\xe3\x83\xe3g8\x85^6JW\xf4q\xb2\xbf\xa11\x8a)\xb7d2\xb9DQ \x00\x00\u07d4D\xa0\x1f\xb0J\xc0\xdb,\xce]\xbe(\x1e\x1cF\xe2\x8b9\xd8x\x89lj\xccg\u05f1\xd4\x00\x00\u07d4D\xa6=\x18BE\x87\xb9\xb3\a\xbf\xc3\xc3d\xae\x10\xcd\x04\xc7\x13\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94D\xa8\x98\x9e20\x81!\xf7$f\x97\x8d\xb3\x95\xd1\xf7l:K\x8a\x01\x88P)\x9fB\xb0j\x00\x00\u07d4D\xc1\x11\v\x18\x87\x0e\xc8\x11x\xd9=!X8\xc5Q\u050ed\x89\n\xd6\xf9\x85\x93\xbd\x8f\x00\x00\u07d4D\xc1Ge\x12|\xde\x11\xfa\xb4l],\xf4\u0532\x89\x00#\xfd\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94D\xc5N\xaa\x8a\xc9@\xf9\xe8\x0f\x1et\xe8/\xc1O\x16v\x85j\x8a\x01\xab,\xf7\xc9\xf8~ \x00\x00\u07d4D\xcdwSZ\x89?\xa7\xc4\xd5\xeb:$\x0ey\u0419\xa7--\x89,s\xc97t,P\x00\x00\u07d4D\u07faP\xb8)\xbe\xcc_O\x14\u0470J\xab3 \xa2\x95\xe5\x8965\u026d\xc5\u07a0\x00\x00\u07d4D\xe2\xfd\xc6y\xe6\xbe\xe0\x1e\x93\xefJ:\xb1\xbc\xce\x01*\xbc|\x89\x16=\x19I\x00\xc5E\x80\x00\xe0\x94D\xf6/*\xaa\xbc)\xad:k\x04\xe1\xffo\x9c\xe4R\xd1\xc1@\x8a\x03\x99\x92d\x8a#\u0220\x00\x00\u07d4D\xff\xf3{\xe0\x1a8\x88\u04f8\xb8\u1200\xa7\xdd\xef\xee\xea\u04c9\x0e\f[\xfc}\xae\x9a\x80\x00\u07d4E\x06\xfe\x19\xfaK\x00k\xaa9\x84R\x9d\x85\x16\xdb++P\xab\x89lk\x93[\x8b\xbd@\x00\x00\u07d4E\x1b6\x99G[\xed]y\x05\xf8\x90Z\xa3Eo\x1e\u05c8\xfc\x89\x8a\xc7#\x04\x89\xe8\x00\x00\x00\u0794E\x1bpp%\x9b\u06e2q\x00\xe3n#B\x8aS\xdf\xe3\x04\u9239\x8b\xc8)\xa6\xf9\x00\x00\u07d4E'+\x8fb\xe9\xf9\xfa\x8c\xe0D \u1ba3\xeb\xa9hn\xac\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94E+d\u06ce\xf7\xd6\u07c7\u01c8c\x9c\"\x90\xbe\x84\x82\xd5u\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4E>5\x9a3\x97\x94LZ'Z\xb1\xa2\xf7\n^Z?i\x89\x89\r\x02\xabHl\xed\xc0\x00\x00\u07d4EI\xb1Yy%_~e\xe9\x9b\rV\x04\u06d8\xdf\xca\u023f\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4EKa\xb3D\xc0\xef\x96Qy#\x81U\xf2w\u00c2\x9d\v8\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94EO\x01A\xd7!\xd3<\xbd\xc4\x10\x18\xbd\x01\x11\x9a\xa4xH\x18\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4ES3\x90\xe3@\xfe\r\xe3\xb3\xcf_\xb9\xfc\x8e\xa5R\xe2\x9eb\x89O%\x91\xf8\x96\xa6P\x00\x00\u07d4ES\x96\xa4\xbb\u067a\u8bdf\xb7\xc4\xd6MG\x1d\xb9\xc2E\x05\x89\b\xbaR\xe6\xfcE\xe4\x00\x00\u07d4E[\x92\x96\x92\x1at\xd1\xfcAa\u007fC\xb80>o>\xd7l\x89\u3bb5sr@\xa0\x00\x00\u07d4E\\\xb8\xee9\xff\xbcu#1\xe5\xae\xfcX\x8e\xf0\xeeY4T\x8965F:x\r\xef\x80\x00\u07d4Ej\u0b24\x8e\xbc\xfa\xe1f\x06\x02PR_c\x96^v\x0f\x89\x10CV\x1a\x88)0\x00\x00\u07d4Eo\x8dtf\x82\xb2$g\x93I\x06M\x1b6\x8c|\x05\xb1v\x89\u0213\u041c\x8fQP\x00\x00\u07d4Ep)\xc4i\xc4T\x8d\x16\x8c\xec>e\x87.D(\xd4+g\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Eq\xdeg+\x99\x04\xba\xd8t6\x92\xc2\x1cO\xdc\xeaL.\x01\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4Ex\x1b\xbew\x14\xa1\xc8\xf7;\x1cty!\xdfO\x84'\x8bp\x89lk\x93[\x8b\xbd@\x00\x00\u07d4E{\xce\xf3}\xd3\xd6\v-\xd0\x19\xe3\xfea\xd4k?\x1erR\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94E\x8e<\u025e\x94xD\xa1\x8ejB\x91\x8f\xef~\u007f_^\xb3\x8a\a\xb5?y\xe8\x88\xda\xc0\x00\x00\u07d4E\x93\x93\xd6:\x06>\xf3r\x1e\x16\xbd\x9f\xdeE\ue77dw\xfb\x89j\xba\u05a3\xc1S\x05\x00\x00\u07d4E\xa5p\xdc\xc2\t\f\x86\xa6\xb3\xea)\xa6\bc\xdd\xe4\x1f\x13\xb5\x89\f\x9a\x95\xee)\x86R\x00\x00\u07d4E\xa8 \xa0g/\x17\xdct\xa0\x81\x12\xbcd?\xd1\x16w6\u00c9\n\xd6\xc4;(\x15\xed\x80\x00\u07d4E\xb4q\x05\xfeB\xc4q-\xcen*!\xc0[\xff\xd5\xeaG\xa9\x89lk\x93[\x8b\xbd@\x00\x00\u07d4E\xbb\x82\x96R\u063f\xb5\x8b\x85'\xf0\xec\xb6!\u009e!.\u00c9lk\x93[\x8b\xbd@\x00\x00\xe0\x94E\xc0\u045f\v\x8e\x05O\x9e\x8986\xd5\xec\xaey\x01\xaf(\x12\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4E\xc4\xec\xb4\xee\x89\x1e\xa9\x84\xa7\xc5\xce\xfd\x8d\xfb\x001\v(P\x89kV\x05\x15\x82\xa9p\x00\x00\u07d4E\u028d\x95f\b\xf9\xe0\n/\x99t\x02\x86@\x88\x84ef\x8f\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94E\u0298b\x00;N@\xa3\x17\x1f\xb5\xca\xfa\x90(\xca\xc8\xde\x19\x8a\x02\ub3b1\xa1r\u0738\x00\x00\u07d4E\xd1\xc9\xee\xdf|\xabA\xa7y\x05{y9_T(\xd8\x05(\x89lk\x93[\x8b\xbd@\x00\x00\u07d4E\u0535M7\xa8\xcfY\x98!#_\x06/\xa9\xd1p\xed\u8909\x11\x90g;_\u0690\x00\x00\xe0\x94E\xdb\x03\xbc\xcf\u05a5\xf4\xd0&k\x82\xa2*6\x87\x92\xc7}\x83\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4E\xe3\xa9>r\x14J\u0686\f\xbcV\xff\x85\x14Z\xda8\xc6\u0689WG=\x05\u06ba\xe8\x00\x00\u07d4E\u6378\u06fa\xba_\xc2\xcb3|b\xbc\xd0\xd6\x1b\x05\x91\x89\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94E\u6379L}\n\xb7\xacA\x85zq\xd6qG\x87\x0fNq\x8aT\xb4\v\x1f\x85+\xda\x00\x00\x00\u07d4E\xf4\xfc`\xf0\x8e\xac\xa1\x05\x98\xf03c)\x80\x1e<\x92\xcbF\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4F\rSU\xb2\xce\xebnb\x10}\x81\xe5\x12p\xb2k\xf4V \x89l\xb7\xe7Hg\xd5\xe6\x00\x00\xe0\x94F\"O2\xf4\xec\xe5\u0206p\x90\xd4@\x9dU\xe5\v\x18C-\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4F'\xc6\x06\x84&q\xab\u0782\x95\xee]\xd9L\u007fT\x954\xf4\x89\x0f\x89_\xbd\x872\xf4\x00\x00\u07d4F+g\x8bQ\xb5\x84\xf3\xedz\xda\a\v\\\u065c\v\xf7\xb8\u007f\x89\x05k\xc7^-c\x10\x00\x00\u07d4FM\x9c\x89\xcc\xe4\x84\xdf\x00\x02w\x19\x8e\xd8\a_\xa65r\u0449\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4FPNj!Z\xc8;\xcc\xf9V\xbe\xfc\x82\xabZg\x93q\u0209\x1c!(\x05\u00b4\xa5\x00\x00\xe0\x94FQ\xdcB\x0e\b\xc3);'\xd2Ix\x90\xebP\":\xe2\xf4\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4FS\x1e\x8b\x1b\xde\t\u007f\u07c4\x9dm\x11\x98\x85`\x8a\x00\x8d\xf7\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4Fb\x92\xf0\xe8\rC\xa7\x87t'u\x90\xa9\xebE\x96\x12\x14\xf4\x894\x95tD\xb8@\xe8\x00\x00\xe0\x94Fb\xa1v^\xe9!\x84-\u0708\x89\x8d\x1d\xc8bu\x97\xbd~\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4Fe\xe4s\x96\xc7\u06d7\xeb*\x03\xd9\bc\xd5\u053a1\x9a\x94\x89 \x86\xac5\x10R`\x00\x00\u07d4Fo\xdak\x9bX\xc5S'P0j\x10\xa2\xa8\xc7h\x10;\a\x89\n\xd6\xee\xdd\x17\xcf;\x80\x00\u07d4Fq$\xae\u007fE/&\xb3\xd5t\xf6\b\x88\x94\xfa]\x1c\xfb;\x89\x92^\x06\xee\xc9r\xb0\x00\x00\u0794Fr*6\xa0\x1e\x84\x1d\x03\xf7\x80\x93^\x91}\x85\u0566z\xbd\x88\xce\xc7o\x0eqR\x00\x00\u07d4Fw\x9aVV\xff\x00\xd7>\xac:\xd0\u00cbl\x850\x94\xfb@\x89\f\x82S\xc9lj\xf0\x00\x00\u07d4Fw\xb0N\x03C\xa3!1\xfdj\xbb9\xb1\xb6\x15k\xba=[\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4F}Y\x88$\x9ahaG\x16e\x98@\xed\n\xe6\xf6\xf4W\xbc\x89\x15\x01\xa4\x8c\xef\xdf\xde\x00\x00\u07d4F~\x0e\xd5O;v\xae\x066\x17n\aB\b\x15\xa0!sn\x89lk\x93[\x8b\xbd@\x00\x00\u07d4F~\xa1\x04E\x82~\xf1\xe5\x02\xda\xf7k\x92\x8a \x9e\r@2\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94F\u007f\xbfAD\x16\x00u\u007f\xe1X0\xc8\xcd_O\xfb\xbb\xd5`\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94F\x93Xp\x932\xc8+\x88~ \xbc\xdd\xd0\"\x0f\x8e\u06e7\u040a\x03\xa9\u057a\xa4\xab\xf1\xd0\x00\x00\u07d4F\x97\xba\xaf\x9c\xcb`?\xd3\x040h\x9dCTE\xe9\u024b\xf5\x89\n\xd2\x01\xa6yO\xf8\x00\x00\u07d4F\xa3\v\x8a\x80\x891!tE\xc3\xf5\xa9>\x88,\x03E\xb4&\x89\r\x8d\xb5\xeb\u05f2c\x80\x00\u07d4F\xa40\xa2\u0528\x94\xa0\u062a?\xea\xc6\x156\x14\x15\xc3\xf8\x1f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4F\xaaP\x18pg~\u007f\nPHv\xb4\xe8\x80\x1a\n\xd0\x1cF\x89+^:\xf1k\x18\x80\x00\x00\u07d4F\xbf\u0172\a\xeb \x13\xe2\xe6\x0fw_\xec\xd7\x18\x10\u0159\f\x89T\x06\x923\xbf\u007fx\x00\x00\u07d4F\xc1\xaa\"D\xb9\u0229W\u028f\xacC\x1b\x05\x95\xa3\xb8h$\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4F\xd8\x061(B\x03\xf6(\x8e\xcdNWX\xbb\x9dA\xd0]\xbe\x89lk\x93[\x8b\xbd@\x00\x00\u07d4G\n\xc5\xd1\xf3\xef\xe2\x8f8\x02\xaf\x92[W\x1ec\x86\x8b9}\x89lk\x93[\x8b\xbd@\x00\x00\u07d4G\x10\x10\xdaI/@\x18\x83;\b\x8d\x98r\x90\x1e\x06\x12\x91t\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4G\x12T\x02e\xcb\xee\u00c4p\"\u015f\x1b1\x8dC@\n\x9e\x89\xbd\xbcA\xe04\x8b0\x00\x00\xe0\x94G\x14\u03e4\xf4k\u05bdps}u\x87\x81\x97\xe0\x8f\x88\xe61\x8a\x02\u007f>\u07f3Nn@\x00\x00\u07d4G H\xcc`\x9a\xeb$!e\uaa87\x05\x85\f\xf3\x12]\xe0\x8965\u026d\xc5\u07a0\x00\x00\u07d4G!\x92)\xe8\xcdVe\x9ae\u00a9C\xe2\u075a\x8fK\xfd\x89\x89Rf<\u02b1\xe1\xc0\x00\x00\u07d4G7\xd0B\xdcj\xe7>\xc7:\xe2Qz\u03a2\xfd\xd9d\x87\u014965\u026d\xc5\u07a0\x00\x00\u07d4GAX\xa1\xa9\xdci<\x13?e\xe4{\\:\xe2\xf7s\xa8o\x89\n\xdaUGK\x814\x00\x00\u07d4GE\xab\x18\x1a6\xaa\x8c\xbf\"\x89\xd0\xc4Qe\xbc~\xbe#\x81\x89\x02\"\xc8\xeb?\xf6d\x00\x00\u07d4GPf\xf9\xad&eQ\x96\xd5SS'\xbb\xeb\x9by)\xcb\x04\x89\xa4\xccy\x95c\u00c0\x00\x00\xe0\x94GR!\x8eT\xdeB?\x86\xc0P\x193\x91z\xea\b\xc8\xfe\u054a\x04<3\xc1\x93ud\x80\x00\x00\u07d4GZa\x93W-JNY\u05fe\t\u02d6\r\u074cS\x0e/\x89$,\xf7\x8c\xdf\a\xff\x80\x00\u07d4Gd\x8b\xed\x01\xf3\xcd2I\bNc]\x14\u06a9\xe7\xec<\x8a\x89\n\x84Jt$\xd9\xc8\x00\x00\u07d4Gh\x84\x10\xff%\xd6T\xd7.\xb2\xbc\x06\xe4\xad$\xf83\xb0\x94\x89\b\xb2\x8da\xf3\u04ec\x00\x00\u07d4GkU\x99\b\x9a?\xb6\xf2\x9clr\xe4\x9b.G@\ua00d\x89\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4Gs\x0f_\x8e\xbf\x89\xacr\xef\x80\xe4l\x12\x19P8\xec\xdcI\x89\xabM\xcf9\x9a:`\x00\x00\xe0\x94G{$\xee\u80deO\u045d\x12P\xbd\vfEyJa\u028a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4G\x81\xa1\nM\xf5\uef02\xf4\xcf\xe1\a\xba\x1d\x8av@\xbdf\x89a\t=|,m8\x00\x00\u07d4G\x88Z\xba\xbe\xdfM\x92\x8e\x1c\x88\x83\xa6a\x9cl(\x11\x84\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94G\xe2]\xf8\x82%8\xa8Yk(\xc67\x89kM\x14<5\x1d\x8a\x11\v\xe9\xeb$\xb8\x81P\x00\x00\u07d4G\xf4ik\xd4b\xb2\r\xa0\x9f\xb8>\xd2\x03\x98\x18\xd7v%\xb3\x89\b\x13\xcaV\x90m4\x00\x00\u07d4G\xfe\xf5\x85\x84FRH\xa0\x81\r`F>\xe9>Zn\xe8\u04c9\x0fX\xcd>\x12i\x16\x00\x00\u07d4G\xffo\xebC! `\xbb\x15\x03\u05e3\x97\xfc\b\xf4\xe7\x03R\x89lk\x93[\x8b\xbd@\x00\x00\u07d4G\xff\xf4,g\x85Q\xd1A\xebu\xa6\xee9\x81\x17\xdf>J\x8d\x89\x05k\xea\xe5\x1f\xd2\xd1\x00\x00\u07d4H\x01\x0e\xf3\xb8\xe9^?0\x8f0\xa8\xcb\u007fN\xb4\xbf`\xd9e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4H\n\xf5 v\x00\x9c\xa77\x81\xb7\x0eC\xb9Y\x16\xa6\"\x03\xab\x892\x19r\xf4\b=\x87\x80\x00\u07d4H\x0f1\xb9\x891\x1eA$\u01a7F_ZD\tM6\xf9\u04097\x90\xbb\x85Q7d\x00\x00\xe0\x94H\x11\x15)j\xb7\xdbRI/\xf7\xb6G\xd63)\xfb\\\xbck\x8a\x03h\xc8b:\x8bM\x10\x00\x00\u07d4H\x1e:\x91\xbf\xdc/\x1c\x84(\xa0\x11\x9d\x03\xa4\x16\x01A~\x1c\x8965\u026d\xc5\u07a0\x00\x00\u07d4H(\xe4\xcb\xe3N\x15\x10\xaf\xb7,+\ueb0aE\x13\xea\xeb\u0649\u0556{\xe4\xfc?\x10\x00\x00\xe0\x94H)\x82\xac\x1f\x1cm\x17!\xfe\xec\u0679\xc9l\xd9I\x80PU\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4H0,1\x1e\xf8\xe5\xdcfAX\xddX<\x81\x19Mn\rX\x89\xb6gl\xe0\xbc\xcb\\\x00\x00\u07d4H;\xa9\x904\xe9\x00\xe3\xae\xdfaI\x9d;+\xce9\xbe\xb7\xaa\x895e\x9e\xf9?\x0f\xc4\x00\x00\u07d4HT\x8bK\xa6+\xcb/\r4\xa8\x8d\u019ah\x0eS\x9c\xf0F\x89\x05l\xf1\u02fbt2\x00\x00\u07d4Hc\x84\x979&Zc\xb0\xa2\xbf#jY\x13\xe6\xf9Y\xce\x15\x89Rf<\u02b1\xe1\xc0\x00\x00\u07d4He\x9d\x8f\x8c\x9a/\xd4Oh\u06a5]#\xa6\b\xfb\xe5\x00\u0709lk\x93[\x8b\xbd@\x00\x00\xe0\x94Hf\x9e\xb5\xa8\x01\u0637_\xb6\xaaX\xc3E\x1bpX\xc2C\xbf\x8a\x06\x8dB\xc18\u06b9\xf0\x00\x00\u07d4Hjl\x85\x83\xa8D\x84\xe3\xdfC\xa1#\x83\u007f\x8c~#\x17\u0409\x11\x87\xc5q\xab\x80E\x00\x00\u07d4Hz\xdf}p\xa6t\x0f\x8dQ\xcb\xddh\xbb?\x91\u0125\xceh\x89\x03\x9f\xba\xe8\xd0B\xdd\x00\x00\u07d4H~\x10\x85\x02\xb0\xb1\x89\uf70cm\xa4\xd0\xdbba\xee\xc6\xc0\x89g\x8a\x93 b\xe4\x18\x00\x00\xe0\x94H\x88\xfb%\xcdP\u06f9\xe0H\xf4\x1c\xa4}x\xb7\x8a'\xc7\u064a\x03\xa9\u057a\xa4\xab\xf1\xd0\x00\x00\u0794H\x934\u00b6\x95\xc8\xee\a\x94\xbd\x86B\x17\xfb\x9f\xd8\xf8\xb15\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4H\xa3\r\xe1\xc9\x19\xd3\xfd1\x80\xe9}_+*\x9d\xbd\x96M-\x89\x02b\x9ff\xe0\xc50\x00\x00\u07d4H\xbf\x14\u05f1\xfc\x84\xeb\xf3\xc9k\xe1/{\xce\x01\xaai\xb0>\x89\x06\x81U\xa46v\xe0\x00\x00\u07d4H\xc2\ue465\aV\xd8\u039a\xbe\xebu\x89\xd2,o\xee]\xfb\x89\xae\x8ez\v\xb5u\xd0\x00\x00\u07d4H\xc5\u0197\v\x91a\xbb\x1c{z\xdf\xed\x9c\xde\u078a\x1b\xa8d\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94H\xd2CKz}\xbb\xff\b\";c\x87\xb0]\xa2\xe5\t1&\x8a\x03\xcf\xc8.7\xe9\xa7@\x00\x00\u07d4H\xd4\xf2F\x8f\x96?\u05da\x00a\x98\xbbg\x89]-Z\xa4\u04c9K\xe4\xe7&{j\xe0\x00\x00\u07d4H\xe0\xcb\xd6\u007f\x18\xac\xdbzb\x91\xe1%M\xb3.\trs\u007f\x89\x05k\xe0<\xa3\xe4}\x80\x00\u07d4H\xf6\n5HO\xe7y+\u030a{c\x93\xd0\u0761\xf6\xb7\x17\x89\xc3(\t>a\xee@\x00\x00\u07d4H\xf8\x83\xe5g\xb46\xa2{\xb5\xa3\x12M\xbc\x84\xde\xc7u\xa8\x00\x89)\xd7n\x86\x9d\u0340\x00\x00\xe0\x94I\x01E\xaf\xa8\xb5E\"\xbb!\xf3R\xf0m\xa5\xa7\x88\xfa\x8f\x1d\x8a\x01\xf4lb\x90\x1a\x03\xfb\x00\x00\u07d4I\t\xb3\x19\x98\xea\xd4\x14\xb8\xfb\x0e\x84k\xd5\xcb\xde995\xbe\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4I\x12\xd9\x02\x93\x16v\xff9\xfc4\xfe<<\xc8\xfb!\x82\xfaz\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4I\x13o\xe6\xe2\x8btS\xfc\xb1kk\xbb\u9aac\xba\x837\xfd\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94I\x15a\u06cbo\xaf\xb9\x00~b\xd0P\u0082\xe9,Kk\u020a\x06ZM\xa2]0\x16\xc0\x00\x00\u07d4I\x18]\xd7\xc262\xf4lu\x94s\ubb96`\b\xcd5\x98\x89\r\xc5_\xdb\x17d{\x00\x00\u07d4I,\xb5\xf8a\xb1\x87\xf9\xdf!\xcdD\x85\xbe\xd9\vP\xff\xe2-\x89\x1b\x19\xe5\vD\x97|\x00\x00\u07d4I-\xe4j\xaf\x8f\x1dp\x8dY\u05da\xf1\xd0:\xd2\xcb`\x90/\x89lk\x93[\x8b\xbd@\x00\x00\u07d4I.p\xf0M\x18@\x8c\xb4\x1e%`70Pk5\xa2\x87k\x89\x02\"\xc8\xeb?\xf6d\x00\x00\u07d4I:g\xfe#\xde\xccc\xb1\r\xdau\xf3(v\x95\xa8\x1b\u056b\x89/\xb4t\t\x8fg\xc0\x00\x00\u07d4I=H\xbd\xa0\x15\xa9\xbf\xcf\x16\x03\x93n\xabh\x02L\xe5Q\xe0\x89\x018\xa3\x88\xa4<\x00\x00\x00\xe0\x94IBV\xe9\x9b\x0f\x9c\xd6\xe5\xeb\xca8\x99\x862R\x90\x01e\u020a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4IM\xecM^\xe8\x8a'q\xa8\x15\xf1\xeerd\x94/\xb5\x8b(\x89lk\x93[\x8b\xbd@\x00\x00\u07d4I[d\x1b\x1c\u07a3b\u00f4\u02fd\x0f\\\xc5\v\x1e\x17k\x9c\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94Ih\xa2\xce\xdbEuU\xa19)Z\xea(wnT\x00<\x87\x8a\x02#\x1a\xef\u0266b\x8f\x00\x00\u07d4Im6U4S\n_\xc1W|\nRA\u02c8\xc4\xdapr\x89a\t=|,m8\x00\x00\xe0\x94In1\x95\x92\xb3A\xea\xcc\xd7x\u0767\xc8\x19mT\xca\xc7u\x8a\x01\xf5q\x89\x87fKH\x00\x00\u07d4IoXC\xf6\xd2L\u064d%^L#\xd1\xe1\xf0#\"uE\x89_\x17\x9f\u0526\xee\t\x80\x00\xe0\x94Ip\u04ec\xf7+[\x1f2\xa7\x00<\xf1\x02\xc6N\xe0TyA\x8a\x1d\xa5jK\b5\xbf\x80\x00\x00\u07d4Iw\xa7\x93\x9d\t9h\x94U\xce&9\xd0\xeeZL\xd9\x10\xed\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4Iy\x19N\xc9\xe9}\xb9\xbe\xe84;|w\xd9\xd7\xf3\xf1\u071f\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4Iy4c\xe1h\x10\x83\u05ab\xd6\xe7%\u057b\xa7E\xdc\xcd\xe8\x89\x1d\x98\xe9LNG\x1f\x00\x00\u07d4I\x81\xc5\xfff\xccN\x96\x80%\x1f\xc4\xcd/\xf9\a\xcb2xe\x89(\xa8WBTf\xf8\x00\x00\u07d4I\x89\u007f\xe92\xbb\xb3\x15L\x95\u04fc\xe6\xd9;ms)\x04\u0749\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4I\x89\xe1\xab^|\xd0\aF\xb3\x93\x8e\xf0\xf0\xd0d\xa2\x02[\xa5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4I\x8a\xbd\xeb\x14\xc2k{r4\xd7\x0f\u03ae\xf3a\xa7m\xffr\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4I\xa6E\xe0f}\xfd{2\xd0u\xcc$g\u074ch\t\a\u0109\a\x06\x01\x95\x8f\u02dc\x00\x00\xe0\x94I\xb7N\x16\x92e\xf0\x1a\x89\xecL\x90r\u0164\xcdr\xe4\xe85\x8a\x03h\xc8b:\x8bM\x10\x00\x00\u07d4I\xbd\xbc{\xa5\xab\xeb\xb68\x9e\x91\xa3(R \xd3E\x1b\xd2S\x8965\u026d\xc5\u07a0\x00\x00\u07d4I\xc9A\xe0\xe5\x01\x87&\xb7)\x0f\xc4s\xb4q\xd4\x1d\xae\x80\u0449\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94I\xc9w\x1f\xca\x19\u0579\xd2E\u0211\xf8\x15\x8f\xe4\x9fG\xa0b\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4I\xcf\x1eT\xbe61\x06\xb9 r\x9d-\v\xa4o\bg\x98\x9a\x89\x0e\x87?D\x13<\xb0\x00\x00\u07d4I\xd2\u008e\xe9\xbcT^\xaa\xf7\xfd\x14\xc2|@s\xb4\xbb_\x1a\x89O\xe9\xb8\x06\xb4\r\xaf\x00\x00\u07d4I\xdd\xee\x90.\x1d\f\x99\u0471\x1a\xf3\u030a\x96\xf7\x8eM\xcf\x1a\x89\n\u03a5\xe4\xc1\x8cS\x00\x00\u07d4I\xf0(9[Z\x86\xc9\xe0\u007fwxc\x0eL.=7:w\x89\x06\xa7JP8\u06d1\x80\x00\xe0\x94J\x19 5\xe2a\x9b$\xb0p\x9dVY\x0e\x91\x83\xcc\xf2\xc1\u064a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4J@S\xb3\x1d\x0e\xe5\u06ef\xb1\xd0k\u05ec\u007f\xf3\",G\u0589K\xe4\xe7&{j\xe0\x00\x00\u07d4JC\x01p\x15-\xe5\x17&3\u0742b\xd1\a\xa0\xaf\xd9j\x0f\x89\xabM\xcf9\x9a:`\x00\x00\u07d4JG\xfc>\x17\u007fVz\x1e8\x93\xe0\x00\xe3k\xba#R\n\xb8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4JR\xba\xd2\x03W\"\x8f\xaa\x1e\x99k\xedy\f\x93gK\xa7\u0409Hz\x9a0E9D\x00\x00\u07d4JS\xdc\xdbV\xceL\xdc\xe9\xf8.\xc0\xeb\x13\xd6sR\xe7\u020b\x89\u3bb5sr@\xa0\x00\x00\u07d4J_\xae;\x03r\xc20\xc1%\xd6\xd4p\x14\x037\xab\x91VV\x89V\xbcu\xe2\xd61\x00\x00\x00\u07d4Jq\x90a\xf5(T\x95\xb3{\x9d~\xf8\xa5\x1b\a\xd6\u6b2c\x89\n\xd4\xc81j\v\f\x00\x00\u07d4Js8\x92\x98\x03\x1b\x88\x16\u0329FB\x1c\x19\x9e\x18\xb3C\u0589\"8h\xb8y\x14o\x00\x00\u07d4Js]\"G\x927m3\x13g\xc0\x93\xd3\x1c\x87\x944\x15\x82\x89f\xff\xcb\xfd^Z0\x00\x00\u07d4Jt\x94\xcc\xe4HU\u0300X(B\xbe\x95\x8a\r\x1c\x00r\ue242\x1a\xb0\xd4AI\x80\x00\x00\u07d4Ju\xc3\xd4\xfao\u033d]\u0567\x03\xc1Sy\xa1\xe7\x83\u9dc9b\xa9\x92\xe5:\n\xf0\x00\x00\xe0\x94J\x81\xab\xe4\x98L|k\xefc\u0598 \xe5WC\xc6\x1f \x1c\x8a\x03d\x01\x00N\x9a\xa3G\x00\x00\u07d4J\x82iO\xa2\x9d\x9e!2\x02\xa1\xa2\t(]\xf6\xe7E\xc2\t\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4J\x83\\%\x82LG\xec\xbf\u01d49\xbf?\\4\x81\xaau\u0349K\xe4\xe7&{j\xe0\x00\x00\u07d4J\x91\x802C\x91Y\xbb1[g%\xb6\x83\r\xc86\x97s\x9f\x89\x12\xa3.\xf6x3L\x00\x00\u07d4J\x97\xe8\xfc\xf4c^\xa7\xfc^\x96\xeeQu.\u00c8qk`\x89\x1d\x99E\xab+\x03H\x00\x00\u07d4J\x9a&\xfd\n\x8b\xa1\x0f\x97}\xa4\xf7|1\x90\x8d\xabJ\x80\x16\x89a\t=|,m8\x00\x00\u07d4J\xa1H\xc2\xc34\x01\xe6j+Xnew\u0132\x92\xd3\xf2@\x89\v\xb8`\xb2\x85\xf7t\x00\x00\u07d4J\xa6\x93\xb1\"\xf3\x14H*G\xb1\x1c\xc7|h\xa4\x97\x87ab\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4J\xb2\xd3O\x04\x83O\xbftyd\x9c\xab\x92=,G%\xc5S\x89\xbe\xd1\xd0&=\x9f\x00\x00\x00\u07d4J\xc0vs\xe4/d\xc1\xa2^\xc2\xfa-\x86\xe5\xaa+4\xe09\x89lk\x93[\x8b\xbd@\x00\x00\u07d4J\u016c\xad\x00\v\x88w!L\xb1\xae\x00\xea\u0263}Y\xa0\xfd\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4J\u0250ZL\xb6\xab\x1c\xfdbTn\xe5\x91s\x00\xb8|O\u07897\b\xba\xed=h\x90\x00\x00\u07d4J\u03e9\xd9N\xdaf%\xc9\u07e5\xf9\xf4\xf5\xd1\a\xc4\x03\x1f\u07c9\x02\"\xc8\xeb?\xf6d\x00\x00\u07d4J\xd0G\xfa\xe6~\xf1b\xfeh\xfe\xdb\xc2};e\xca\xf1\f6\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4J\xd9]\x18\x8dddp\x9a\xdd%U\xfbM\x97\xfe\x1e\xbf1\x1f\x89\x12\xc1\xb6\xee\xd0=(\x00\x00\u07d4J\xdb\xf4\xaa\xe0\xe3\xefD\xf7\xddM\x89\x85\u03ef\tn\u010e\x98\x89\b!\xab\rD\x14\x98\x00\x00\u07d4J\xe2\xa0M9\t\xefENTL\xcf\xd6\x14\xbf\xef\xa7\x10\x89\xae\x89\x18\x01\x15\x9d\xf1\xee\xf8\x00\x00\xe0\x94J\xe90\x82\xe4Q\x87\xc2a`\xe6g\x92\xf5\u007f\xad5Q\xc7:\x8a\x04\x96\x15 \xda\xff\x82(\x00\x00\u07d4J\xf0\xdb\a{\xb9\xba^D>!\xe1H\xe5\x9f7\x91\x05\u0152\x89 \x86\xac5\x10R`\x00\x00\u07d4K\x06\x19\xd9\u062a1:\x951\xac}\xbe\x04\xca\rjZ\u0476\x89lk\x93[\x8b\xbd@\x00\x00\u07d4K\v\u062c\xfc\xbcS\xa6\x01\v@\xd4\u040d\xdd-\x9dib-\x89$=M\x18\"\x9c\xa2\x00\x00\u07d4K\x19\xeb\f5K\xc199`\xeb\x06\x06;\x83\x92o\rg\xb2\x89\x01\x92t\xb2Y\xf6T\x00\x00\u07d4K)C|\x97\xb4\xa8D\xbeq\u0323\xb6H\xd4\xca\x0f\u075b\xa4\x89\b$q\x984\u03ec\x00\x00\u07d4K1\xbfA\xab\xc7\\\x9a\xe2\u034f\u007f5\x16;n+tPT\x89\x14\xb5P\xa0\x13\xc78\x00\x00\u07d4K:|\u00e7\u05f0\x0e\xd5(\"!\xa6\x02Y\xf2[\xf6S\x8a\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94K:\xab3^\xbb\xfa\xa8p\xccM`^}.t\xc6h6\x9f\x8a\f\xb4\x9bD\xba`-\x80\x00\x00\u07d4K\xcd\xc1\x8a`\x00\x00\u07d4K`\xa3\xe2S\xbf8\xc8\xd5f \x10\xbb\x93\xa4s\xc9e\xc3\xe5\x89P\xc5\xe7a\xa4D\b\x00\x00\u07d4Kt\xf5\xe5\x8e.\xdfv\xda\xf7\x01Q\x96J\v\x8f\x1d\xe0f<\x89\x11\x90\xaeID\xba\x12\x00\x00\u07d4Kv!f\xdd\x11\x18\xe8Ci\xf8\x04\xc7_\x9c\xd6W\xbfs\f\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4Ky.)h>\xb5\x86\u353b3Rl`\x01\xb3\x97\x99\x9e\x89 \x86\xac5\x10R`\x00\x00\u07d4K\x90N\x93K\xd0\u030b p_\x87\x9e\x90[\x93\xea\f\xcc0\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94K\x92\x06\xbakT\x9a\x1a\u007f\x96\x9e\x1d]\xba\x86u9\xd1\xfag\x8a\x01\xab,\xf7\xc9\xf8~ \x00\x00\u07d4K\x98N\xf2lWn\x81Z.\xae\xd2\xf5\x17\u007f\a\u06f1\xc4v\x89T\x91YV\xc4\t`\x00\x00\u07d4K\x9e\x06\x8f\xc4h\tv\xe6\x15\x04\x91)\x85\xfd\\\xe9K\xab\r\x89$=M\x18\"\x9c\xa2\x00\x00\u07d4K\xa0\xd9\xe8\x96\x01w+IhG\xa2\xbbC@\x18g\x87\xd2e\x8965\u026d\xc5\u07a0\x00\x00\u07d4K\xa5:\xb5I\xe2\x01m\xfa\"<\x9e\u0563\x8f\xad\x91(\x8d\a\x89K\xe4\xe7&{j\xe0\x00\x00\xe0\x94K\xa8\xe0\x11\u007f\xc0\xb6\xa3\xe5k$\xa3\xa5\x8f\xe6\xce\xf4B\xff\x98\x8a\x011\xbe\xb9%\xff\xd3 \x00\x00\u07d4K\xac\x84j\xf4\x16\x9f\x1d\x95C\x1b4\x1d\x88\x00\xb2!\x80\xaf\x1a\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4K\xb6\xd8k\x83\x14\xc2-\x8d7\xeaQm\x00\x19\xf1V\xaa\xe1-\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94K\xb9e\\\xfb*6\xea|cz{\x85\x9bJ1T\xe2n\xbe\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\xe0\x94K\xbc\xbf8\xb3\xc9\x01c\xa8K\x1c\u04a9;X\xb2\xa34\x8d\x87\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\xe0\x94K\xd6\xdd\f\xff#@\x0e\x170\xba{\x89E\x04W}\x14\xe7J\x8a+\xa0\xcc\xdd\xd0\xdfs\xb0\x00\x00\u07d4K\xe8b\x8a\x81T\x87N\x04\x8d\x80\xc1B\x18\x10\"\xb1\x80\xbc\xc1\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4K\xe9\rA!)\u0564\xd0BCa\xd6d\x9dNG\xa6#\x16\x897\b\xba\xed=h\x90\x00\x00\xe0\x94K\xea(\x8e\xeaB\u0115^\xb9\xfa\xad*\x9f\xafG\x83\xcb\u076c\x8a\x06\x18\xbe\x16c\u012fI\x00\x00\u07d4K\xf4G\x97\x99\xef\x82\xee\xa2\tC7OV\xa1\xbfT\x00\x1e^\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4K\xf8\xbf\x1d5\xa211Wd\xfc\x80\x01\x80\x9a\x94\x92\x94\xfcI\x89\x03\x9f\xba\xe8\xd0B\xdd\x00\x00\u07d4K\xf8\xe2oL'\x90\xdae3\xa2\xac\x9a\xba\xc3\u019a\x19\x943\x89\n\u05ce\xbcZ\xc6 \x00\x00\u0794L\n\xcaP\x8b<\xaf^\xe0(\xbcp}\xd1\xe8\x00\xb88\xf4S\x88\xfc\x93c\x92\x80\x1c\x00\x00\xe0\x94L\v\x15\x15\xdf\xce\u05e1>\x13\xee\x12\xc0\xf5#\xaePO\x03+\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\u07d4L\x13\x98\f2\xdc\xf3\x92\vx\xa4\xa7\x903\x12\x90|\x1b\x12?\x89\x03A\x00\x15\xfa\xae\f\x00\x00\u07d4L\x15y\xaf3\x12\xe4\xf8\x8a\xe9\x95\xcc9W\xd2R\xce\v\xf0\xc8}[O\"4g.p\x89\x87\x86x2n\xac\x90\x00\x00\u07d4LB1y\x82i\x1d\x10\x89\x05k\xc7^-c\x10\x00\x00\u07d4LZ\xfe@\xf1\x8f\xfcH\u04e1\xae\xc4\x1f\u009d\xe1y\xf4\u0497\x89lk\x93[\x8b\xbd@\x00\x00\u07d4L[=\xc0\xe2\xb96\x0f\x91(\x9b\x1f\xe1<\xe1,\x0f\xbd\xa3\xe1\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Lfk\x86\xf1\xc5\ue324\x12\x85\xf5\xbd\xe4\xf7\x90R\b\x14\x06\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4Lik\xe9\x9f:i\x04@\xc3CjY\xa7\xd7\xe97\u05ba\r\x89\xbb\x91%T\"c\x90\x00\x00\u07d4Lj$\x8f\xc9}p]\xefI\\\xa2\aY\x16\x9e\xf0\xd3dq\x89)3\x1eeX\xf0\xe0\x00\x00\u07d4Lj\x9d\xc2\u02b1\n\xbb.|\x13p\x06\xf0\x8f\ucd77y\xe1\x89\x1b\r\x04 /G\xec\x00\x00\u07d4Lk\x93\xa3\xbe\xc1cIT\f\xbf\xca\xe9l\x96!\xd6dP\x10\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Lu\x98\x13\xad\x13\x86\xbe\xd2\u007f\xfa\xe9\xe4\x81^60\u0323\x12\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94Lv\f\xd9\xe1\x95\xeeO-k\xce%\x00\xff\x96\xda|C\ue44a\f\xb4\x9bD\xba`-\x80\x00\x00\u07d4Lv{e\xfd\x91\x16\x1fO\xbd\xccji\xe2\xf6\xadq\x1b\xb9\x18\x89'\b\x01\xd9F\xc9@\x00\x00\u07d4L~.+w\xad\f\xd6\xf4J\xcb(a\xf0\xfb\x8b(u\x0e\xf9\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4L\x85\xed6/$\xf6\xb9\xf0L\xdf\xcc\xd0\"\xaeSQG\u02f9\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4L\x93[\xb2Pw\x8b0\x9b==\x89\x82\x1a\xb0\xd4AI\x80\x00\x00\u07d4L\xee\x90\x1bJ\u0231V\xc5\xe2\xf8\xa6\xf1\xbe\xf5r\xa7\xdc\xeb~\x8965\u026d\xc5\u07a0\x00\x00\u07d4L\xef\xbe#\x98\xe4}R\u73743L\x8bivu\U00053b89\xd9o\u0390\u03eb\xcc\x00\x00\u07d4L\xf5S{\x85\x84/\x89\xcf\xee5\x9e\xaeP\x0f\xc4I\xd2\x11\x8f\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94M\bG\x1dh\x00z\xff*\xe2y\xbc^?\xe4\x15o\xbb\xe3\u078a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4M \x01\x10\x12@\b\xd5ov\x98\x12VB\f\x94jo\xf4\\\x89\n\xd6\xee\xdd\x17\xcf;\x80\x00\u07d4M$\xb7\xacG\xd2\xf2}\xe9\tt\xba=\xe5\xea\xd2\x03TK\u0349\x05k\xc7^-c\x10\x00\x00\u0794M)\xfcR:,\x16)S!!\u0699\x98\u9d6b\x9d\x1bE\x88\xdbD\xe0I\xbb,\x00\x00\u07d4M8\xd9\x0f\x83\xf4Q\\\x03\xccx2j\x15M5\x8b\u0602\xb7\x89\n\ad\a\xd3\xf7D\x00\x00\u07d4ML\xf5\x80t)a^0\xcd\xfa\xce\x1eZ\xaeM\xad0U\xe6\x89 \x86\xac5\x10R`\x00\x00\u07d4MW\xe7\x16\x87l\f\x95\xef^\xae\xbd5\xc8\xf4\x1b\x06\x9bk\xfe\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94Mg\U000ab159\xfe\xf5\xfcA9\x99\xaa\x01\xfd\u007f\xcep\xb4=\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4Mn\x8f\xe1\t\xcc\xd2\x15\x8eM\xb1\x14\x13/\xe7_\xec\u023e[\x89\x01[5W\xf1\x93\u007f\x80\x00\xe0\x94Mq\xa6\xeb=\u007f2~\x184'\x8e(\v\x03\x9e\xdd\xd3\x1c/\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4M|\xfa\xa8L\xb31\x06\x80\n\x8c\x80/\xb8\xaaF8\x96\u0159\x89a\t=|,m8\x00\x00\u07d4M\x80\x10\x93\xc1\x9c\xa9\xb8\xf3B\xe3<\xc9\xc7{\xbdL\x83\x12\u03c9\x12\xb3\xe7\xfb\x95\u0364\x80\x00\u07d4M\x82\x88\x94u/o%\x17]\xaf!w\tD\x87\x95Ko\x9f\x89O!+\xc2\u011c\x83\x80\x00\xe0\x94M\x82\xd7p\f\x12;\xb9\x19A\x9b\xba\xf0Fy\x9ck\x0e,f\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4M\x83m\x9d;\x0e,\xbdM\xe0PYo\xaaI\f\xff\xb6\r]\x89\x10CV\x1a\x88)0\x00\x00\u07d4M\x86\x97\xaf\x0f\xbf,\xa3n\x87h\xf4\xaf\"\x135phZ`\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4M\x92y\x96 )\xa8\xbdEc\x977\xe9\x8bQ\x1e\xff\aL!\x89Hz\x9a0E9D\x00\x00\u07d4M\x93io\xa2HY\xf5\u0493\x9a\xeb\xfaT\xb4\xb5\x1a\xe1\xdc\u0309\x01\t\x10\xd4\xcd\xc9\xf6\x00\x00\u07d4M\x9cw\xd0u\f^o\xbc$\u007f/\u05d2thl\xb3S\u0589\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4M\xa5\xed\u0188\xb0\xcbb\xe1@=\x17\x00\xd9\u0739\x9f\xfe?\u04c9lk\x93[\x8b\xbd@\x00\x00\xe0\x94M\xa8\x03\ai\x84K\xc3A\x86\xb8\\\xd4\xc74\x88I\xffI\xe9\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4M\xb1\xc4:\x0f\x83M}\x04x\xb8\x96\ag\xec\x1a\xc4L\x9a\xeb\x89/Q\x810V'7\x00\x00\u07d4M\xb2\x12\x84\xbc\xd4\xf7\x87\xa7Ue\x00\xd6\xd7\xd8\xf3f#\xcf5\x89i(7Ow\xa3c\x00\x00\u07d4M\xc3\xda\x13\xb2\xb4\xaf\xd4O]\r1\x89\xf4D\xd4\xdd\xf9\x1b\x1b\x89lk\x93[\x8b\xbd@\x00\x00\u07d4M\u013f^u\x89\xc4{(7\x8du\x03\u03d6H\x80a\u06fd\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4M\xc9\u057bK\x19\xce\u0354\xf1\x9e\xc2] \x0e\xa7/%\xd7\xed\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94M\xcd\x11\x81X\x18\xae)\xb8]\x016sI\xa8\xa7\xfb\x12\xd0k\x8a\x01\xacB\x86\x10\x01\x91\xf0\x00\x00\u07d4M\xcfb\xa3\xde?\x06\x1d\xb9\x14\x98\xfda\x06\x0f\x1fc\x98\xffs\x89lj\xccg\u05f1\xd4\x00\x00\u07d4M\xd11\xc7J\x06\x8a7\xc9\n\xde\xd4\xf3\t\xc2@\x9fdx\u04c9\x15\xaf9\u4ab2t\x00\x00\xe0\x94M\u0767Xk\"7\xb0S\xa7\xf3(\x9c\xf4`\xdcW\xd3z\t\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4M\xe3\xfe4\xa6\xfb\xf64\xc0Q\x99\u007fG\xcc\u007fHy\x1fX$\x89l]\xb2\xa4\xd8\x15\xdc\x00\x00\u07d4M\xf1@\xbaye\x85\xddT\x891[\xcaK\xbah\n\u06f8\x18\x89\x90\xf54`\x8ar\x88\x00\x00\u07d4N\x02\ay\xb5\xdd\xd3\xdf\"\x8a\x00\xcbH\xc2\xfc\x97\x9d\xa6\xae8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4N\v\xd3$s\xc4\xc5\x1b\xf2VT\xde\xf6\x9fy|k)\xa22\x89V\xc9]\xe8\xe8\xca\x1d\x00\x00\u07d4N\"%\xa1\xbbY\xbc\x88\xa21ft\xd33\xb9\xb0\xaf\xcafU\x89\bg\x0e\x9e\xc6Y\x8c\x00\x00\u07d4N#\x10\x19\x1e\xad\x8d;\xc6H\x98s\xa5\xf0\xc2\xeck\x87\u1f8965\u026d\xc5\u07a0\x00\x00\u07d4N#-S\xb3\u6f8f\x89Sa\xd3\x1c4\xd4v+\x12\xc8.\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4N+\xfaJFo\x82g\x1b\x80\x0e\xeeBj\xd0\f\a\x1b\xa1p\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4N>\xda\u0506M\xabd\xca\xe4\xc5Azvw@S\xdcd2\x89 \b\xfbG\x8c\xbf\xa9\x80\x00\u07d4NC\x18\xf5\xe1>\x82JT\xed\xfe0\xa7\xedO&\xcd=\xa5\x04\x89lk\x93[\x8b\xbd@\x00\x00\u07d4N[w\xf9\x06aY\xe6\x15\x93?-\xdatw\xfaNG\xd6H\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94Nf\x00\x80b\x89EJ\u03630\xa2\xa3U`\x10\u07ec\xad\xe6\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4Ns\xcf#y\xf1$\x86\x0fs\xd6\xd9\x1b\xf5\x9a\xcc\\\xfc\x84[\x89\x02,\xa3X|\xf4\xeb\x00\x00\xe0\x94Nz\xa6~\x12\x18>\xf9\xd7F\x8e\xa2\x8a\xd29\xc2\xee\xf7\x1bv\x8a\x01\n\xfc\x1a\xde;N\xd4\x00\x00\xe0\x94N{TGM\x01\xfe\xfd8\x8d\xfc\xd5;\x9ff&$A\x8a\x05\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\xe0\x94N\x89.\x80\x81\xbf6\xe4\x88\xfd\xdb;&0\xf3\xf1\xe8\xda0\u048a\x02\x8a\xba0u$Q\xfc\x00\x00\xe0\x94N\x8amcH\x9c\xcc\x10\xa5\u007f\x88_\x96\xeb\x04\xec\xbbT`$\x8a\x03\xea\xe3\x13\x0e\u0316\x90\x00\x00\u07d4N\x8eG\xae;\x1e\xf5\f\x9dT\xa3\x8e\x14 \x8c\x1a\xbd6\x03\u0089y(\xdb\x12vf\f\x00\x00\u0794N\x90\u03312X\xac\xaa\x9fO\xeb\xc0\xa3B\x92\xf9Y\x91\xe20\x88\xdbD\xe0I\xbb,\x00\x00\u07d4N\xa5n\x11\x12d\x1c\x03\x8d\x05e\xa9\u0096\xc4c\xaf\xef\xc1~\x89\t\xdd\xc1\xe3\xb9\x01\x18\x00\x00\xe0\x94N\xa7\x0f\x041?\xaee\xc3\xff\"J\x05\\=-\xab(\xdd\u07ca\x04<0\xfb\b\x84\xa9l\x00\x00\u07d4N\xb1EKW8\x05\u022c\xa3~\xde\xc7\x14\x9aA\xf6\x12\x02\xf4\x89\x10CV\x1a\x88)0\x00\x00\u07d4N\xb8{\xa8x\x8e\xba\r\xf8~[\x9b\xd5\n\x8eE6\x80\x91\xc1\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4N\xbcV)\xf9\xa6\xa6k,\xf36:\u0109\\\x03H\u8fc7\x8967\tlK\xcci\x00\x00\u07d4N\xc7h)^\xea\xba\xfcB\x95\x84\x15\xe2+\xe2\x16\xcd\xe7v\x18\x89\x03;\x1d\xbc9\xc5H\x00\x00\u07d4N\xcc\x19\x94\x8d\xd9\u0347\xb4\xc7 \x1a\xb4\x8eu\x8f(\xe7\xccv\x89\x1b\x1d\xaba\u04ead\x00\x00\u07d4N\xd1M\x81\xb6\v#\xfb%\x05M\x89%\u07e5s\u072eah\x89\x12nr\xa6\x9aP\xd0\x00\x00\xe0\x94N\xe1<\rA \vF\u045d\xee\\K\xce\xc7\x1d\x82\xbb\x8e8\x8a\x01\xab\xee\x13\u033e\ufbc0\x00\u07d4N\xea\xd4\n\xad\x8cs\xef\b\xfc\x84\xbc\n\x92\xc9\t/j6\xbf\x89\x01s\x17\x90SM\xf2\x00\x00\u07d4N\xeb\xe8\f\xb6\xf3\xaeY\x04\xf6\xf4\xb2\x8d\x90\u007f\x90q\x89\xfc\xab\x89lj\xccg\u05f1\xd4\x00\x00\u07d4N\xeb\xf1 ]\f\xc2\f\xeel\u007f\x8f\xf3\x11_V\u050f\xba&\x89\x01\r:\xa56\xe2\x94\x00\x00\u07d4N\xf1\xc2\x14c:\xd9\xc0p;N#t\xa2\xe3>>B\x92\x91\x89Hz\x9a0E9D\x00\x00\u07d4N\xfc\xd9\u01df\xb43L\xa6${\n3\xbd\x9c\xc32\b\xe2r\x89Hz\x9a0E9D\x00\x00\xe0\x94O\x06$k\x8dK\u0496a\xf4>\x93v\"\x01\u0486\x93Z\xb1\x8a\x01\x059O\xfcF6\x11\x00\x00\u07d4O\x15+/\xb8e\x9dCwn\xbb\x1e\x81g:\xa8Ai\xbe\x96\x89lk\x93[\x8b\xbd@\x00\x00\u07d4O\x17\u007f\x9dV\x95=\xedq\xa5a\x1f93\"\xc3\x02y\x89\\\x89\rU\uf422\xda\x18\x00\x00\u07d4O\x1a-\xa5JLm\xa1\x9d\x14$\x12\xe5n\x81WA\xdb#%\x89\x05k\xc7^-c\x10\x00\x00\u07d4O#\xb6\xb8\x17\xff\xa5\xc6d\xac\xda\u05db\xb7\xb7&\xd3\n\xf0\xf9\x89_h\xe8\x13\x1e\u03c0\x00\x00\xe0\x94O&i\f\x99+z1*\xb1.\x13\x85\xd9J\xcdX(\x8e{\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4O+G\xe2wZ\x1f\xa7\x17\x8d\xad\x92\x98Z[\xbeI;\xa6\u0589\n\u05ce\xbcZ\xc6 \x00\x00\u07d4O:HT\x91\x11E\xea\x01\xc6D\x04K\xdb.Z\x96\n\x98/\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4O?,g0i\xac\x97\xc2\x026\a\x15)\x81\xf5\xcd`c\xa0\x89 \x86\xac5\x10R`\x00\x00\xe0\x94OJ\x9b\xe1\f\xd5\xd3\xfb]\xe4\x8c\x17\xbe)o\x89V\x90d[\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4OR\xadap\xd2[*.\x85\x0e\xad\xbbRA?\xf20>\u007f\x89\xa4\xccy\x95c\u00c0\x00\x00\u07d4OX\x01\xb1\xeb0\xb7\x12\u0620WZ\x9aq\xff\x96]O4\xeb\x89\x10CV\x1a\x88)0\x00\x00\u07d4O]\xf5\xb9CW\u0794\x86\x04\xc5\x1bx\x93\xcd\xdf`v\xba\xad\x89\xcb\xd4{n\xaa\x8c\xc0\x00\x00\u07d4Od\xa8^\x8e\x9a@I\x8c\fu\xfc\xeb\x037\xfbI\b>^\x8965\u026d\xc5\u07a0\x00\x00\u07d4Og9m%S\xf9\x98x_pN\a\xa69\x19}\u0454\x8d\x89\x10DrR\x1b\xa78\x00\x00\u07d4OmG7\u05e9@8$\x87&H\x86i|\xf7c\u007f\x80\x15\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\u07d4Os0\toy\xed&N\xe0\x12\u007f]0\xd2\xf7?!\xcb\u007f\x04\x89\x04\x82\xfe&\f\xbc\xa9\x00\x00\u07d4O\xeeP\xc5\xf9\x88 k\t\xa5sF\x9f\xb1\u0434.\xbbm\u0389l\xee\x06\u077e\x15\xec\x00\x00\u07d4O\xf6v\xe2\u007fh\x1a\x98-\x8f\xd9\xd2\x0ed\x8b=\xce\x05\xe9E\x89\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4O\xf6\u007f\xb8\u007fn\xfb\xa9'\x990\u03fd\x1bz4L\u057a\x8bN\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94PFf\u03891\x17^\x11\xa5\xed\x11\xc1\u072a\x06\xe5\u007fNf\x8a\x02\u007f>\u07f3Nn@\x00\x00\u0794PXM\x92\x06\xa4l\xe1\\0\x11\x17\xee(\xf1\\0\xe6\x0eu\x88\xb9\xf6]\x00\xf6<\x00\x00\xe0\x94PZ3\xa1\x864\xddH\x00i)\x13N\x00\x00\u07d4P\u0286\xb5\xeb\x1d\x01\x87M\xf8\xe5\xf3IE\u051cl\x1a\xb8H\x8965\u026d\xc5\u07a0\x00\x00\u07d4P\u0357\xe97\x8b\\\xf1\x8f\x179c#l\x99Q\xeft8\xa5\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4P\u073c'\xbc\xad\x98@\x93\xa2\x12\xa9\xb4\x17\x8e\xab\xe9\x01ua\x89\a\xe3by\v\\\xa4\x00\x00\u07d4P\xe10#\xbd\x9c\xa9j\xd4\xc5?\xdf\xd4\x10\xcbk\x1fB\v\u07c9\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94P\xe1\xc8\xec\x98A[\xefD&\x18p\x87\x99C{\x86\xe6\xc2\x05\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4P\xf8\xfaK\xb9\xe2g|\x99\nN\xe8\xcep\xdd\x15#%\x1eO\x89\x01i=#\x16Ok\x00\x00\u07d4P\xfb6\xc2q\a\xee,\xa9\xa3#n'F\u0321\x9a\xcekI\x89lk\x93[\x8b\xbd@\x00\x00\u07d4P\xfe\xf2\x96\x95U\x88\u02aet\xc6.\xc3*#\xa4T\xe0\x9a\xb8\x89A\x1d\xff\xab\xc5\a8\x00\x00\u07d4Q\x02\xa4\xa4 w\xe1\x1cX\xdfGs\u3b14F#\xa6m\x9f\x89lp\x15\xfdR\xed@\x80\x00\u07d4Q\x03\x93w\xee\xd0\xc5s\xf9\x86\xc5\xe8\xa9_\xb9\x9aY\xe93\x0f\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4Q\x03\xbc\t\x93>\x99!\xfdS\xdcSo\x11\xf0]\rG\x10}\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94Q\x04\xec\xc0\xe30\xdd\x1f\x81\xb5\x8a\xc9\u06f1\xa9\xfb\xf8\x8a<\x85\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4Q\r\x81Y\u0314Wh\xc7E\a\x90\xba\a>\xc0\xd9\xf8\x9e0\x89\x8a\xc7#\x04\x89\xe8\x00\x00\x00\u07d4Q\x0e\xdaV\x01I\x9a\r^\x1a\x00k\xff\xfd\x836r\xf2\xe2g\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Q\x12dF\xab=\x802U~\x8e\xbaeY}u\xfa\u0701\\\x89\x11t\xa5\xcd\xf8\x8b\xc8\x00\x00\xe0\x94Q\x18U}`\r\x05\xc2\xfc\xbf8\x06\xff\xbd\x93\xd0 %\xd70\x8a\x02g\u04ebd#\xf5\x80\x00\x00\u07d4Q\x1e\x0e\xfb\x04\xacN?\xf2\xe6U\x0eI\x82\x95\xbf\xcdV\xff\u0549$=M\x18\"\x9c\xa2\x00\x00\u07d4Q!\x16\x81{\xa9\xaa\xf8C\xd1P|e\xa5\xead\n{\x9e\xec\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4Q&F\ri,q\u026fo\x05WM\x93\x99\x83h\xa27\x99\x89\x02\u0465\x1c~\x00P\x00\x00\u07d4Q'\u007f\xe7\xc8\x1e\xeb\xd2R\xa0=\xf6\x9ak\x9f2n'\"\a\x89\x03@.y\u02b4L\x80\x00\u07d4Q)oPD'\r\x17pvF\x12\x9c\x86\xaa\xd1d^\xad\xc1\x89H|r\xb3\x10\xd4d\x80\x00\xe0\x94Q+\x91\xbb\xfa\xa9\xe5\x81\xefh?\xc9\r\x9d\xb2*\x8fI\xf4\x8b\x8aA\xa5\"8m\x9b\x95\xc0\x00\x00\u07d4Q5\xfb\x87W`\f\xf4tTbR\xf7M\xc0tm\x06&,\x89lk\x93[\x8b\xbd@\x00\x00\u07d4QF2\xef\xbdd,\x04\xdel\xa3B1]@\u0750\xa2\u06e6\x89\x90\xf54`\x8ar\x88\x00\x00\u07d4QKu\x12\u026e^\xa6<\xbf\x11q[c\xf2\x1e\x18\u0496\xc1\x89lj\xccg\u05f1\xd4\x00\x00\u07d4QS\xa0\xc3\u0211(\x81\xbf\x1c5\x01\xbfd\xb4VI\xe4\x82\"\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94QVQ\xd6\xdbO\xaf\x9e\xcd\x10:\x92\x1b\xbb\xbej\xe9p\xfd\u050a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94Q_0\xbc\x90\xcd\xf4W~\xe4}e\u05c5\xfb\xe2\xe87\u01bc\x8a\x02'\x1b^\x01\x8b\xa0X\x00\x00\u07d4Q`\xeda.\x1bH\xe7??\xc1[\xc42\x1b\x8f#\xb8\xa2K\x89\x1e\x82kB(e\xd8\x00\x00\u07d4Qa\xfdI\xe8G\xf6tU\xf1\u023bz\xbb6\xe9\x85&\r\x03\x89A\rXj \xa4\xc0\x00\x00\u07d4QiT\x02_\xca&\b\xf4}\xa8\x1c!^\xed\xfd\x84J\t\xff\x89\x14\xb5P\xa0\x13\xc78\x00\x00\u07d4Qi\xc6\n\xeeL\xee\u0444\x9a\xb3mfL\xff\x97\x06\x1e\x8e\xa8\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4Q|uC\r\xe4\x01\xc3A\x03&\x86\x11'\x90\xf4mM6\x9e\x89\x15\b\x94\xe8I\xb3\x90\x00\x00\u07d4Q|\xd7`\x8e]\r\x83\xa2kq\u007f6\x03\xda\xc2'}\u00e4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Q\x86]\xb1H\x88\x19Q\xf5\x12Qq\x0e\x82\xb9\xbe\r~\xad\xb2\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Q\x89\x1b,\xcd\xd2\xf5\xa4K*\x8b\u011a]\x9b\xcadw%\x1c\x89\x10\xce\x1d=\x8c\xb3\x18\x00\x00\u07d4Q\x8c\xef'\xb1\x05\x82\xb6\xd1OiH=\u06a0\xdd<\x87\xbb\\\x89 \x86\xac5\x10R`\x00\x00\u07d4Q\xa6\xd6'\xf6j\x89#\u060d`\x94\xc4qS\x80\xd3\x05|\xb6\x89>s\xd2z5\x94\x1e\x00\x00\u07d4Q\xa8\xc2\x166\x02\xa3.\xe2L\xf4\xaa\x97\xfd\x9e\xa4\x14QiA\x89\x03h\xf7\xe6\xb8g,\x00\x00\u07d4Q\xb4u\x8e\x9e\x14P\xe7\xafBh\xc3\u01f1\xe7\xbdo\\uP\x8965\u026d\xc5\u07a0\x00\x00\u07d4Q\u028b\xd4\xdcdO\xacG\xafgUc\u0540J\r\xa2\x1e\xeb\x89*\xb7\xb2`\xff?\xd0\x00\x00\u07d4Q\xd2K\xc3so\x88\xddc\xb7\" &\x88f0\xb6\ub1cd\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Q\u05cb\x17\x8dp~9n\x87\x10\x96\\OA\xb1\xa1\xd9\x17\x9d\x89\x05\xfe\xe2\"\x04\x1e4\x00\x00\u07d4Q\xe3/\x14\xf4\xca^(|\xda\xc0W\xa7y^\xa9\xe0C\x99S\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4Q\xe4?\xe0\xd2\\x(`\xaf\x81\xea\x89\xddy<\x13\xf0\u02f1\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4Q\xe7\xb5\\/\x98 \xee\xd78\x846\x1bPf\xa5\x9boE\u0189lk\x93[\x8b\xbd@\x00\x00\xe0\x94Q\xea\x1c\t4\xe3\xd0@\"\ud715\xa0\x87\xa1P\xefp^\x81\x8a\x01Tp\x81\xe7\"M \x00\x00\u07d4Q\xee\f\xca;\xcb\x10\xcd>\x987\"\xce\xd8I=\x92l\bf\x8965f3\xeb\xd8\xea\x00\x00\xe0\x94Q\xf4f:\xb4O\xf7\x93E\xf4'\xa0\xf6\xf8\xa6\u0225?\xf24\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4Q\xf5^\xf4~dV\xa4\x18\xab2\xb9\"\x1e\xd2}\xbaf\b\xee\x89\u3bb5sr@\xa0\x00\x00\xe0\x94Q\xf9\xc42\xa4\xe5\x9a\xc8b\x82\u05ad\xabL.\xb8\x91\x91`\xeb\x8ap;[\x89\u00e6\xe7@\x00\x00\u07d4R\x0ff\xa0\xe2e\u007f\xf0\xacA\x95\xf2\xf0d\xcf/\xa4\xb2BP\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4R\x10#T\xa6\xac\xa9]\x8a.\x86\xd5\u07bd\xa6\xdei4`v\x89lk\x93[\x8b\xbd@\x00\x00\u07d4R\x13\xf4Y\xe0x\xad:\xb9Z\t #\x9f\xcf\x163\xdc\x04\u0289\x8c\xf2\x18|*\xfb\x18\x80\x00\u07d4R\x15\x18;\x8f\x80\xa9\xbc\x03\xd2l\xe9\x12\a\x83*\r9\xe6 \x8965\u026d\xc5\u07a0\x00\x00\xe0\x94R!Cx\xb5@\x04\x05j|\xc0\x8c\x89\x13'y\x8a\u01b2H\x8a\x037\xfe_\xea\xf2\u0440\x00\x00\xe0\x94R##\xaa\xd7\x1d\xbc\x96\xd8Z\xf9\x0f\bK\x99\xc3\xf0\x9d\ucdca\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4R>\x14\r\xc8\x11\xb1\x86\xde\xe5\xd6\u020b\xf6\x8e\x90\xb8\xe0\x96\xfd\x89lk\x93[\x8b\xbd@\x00\x00\u07d4R?mdi\x0f\xda\u0354(SY\x1b\xb0\xff \xd3em\x95\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4RO\xb2\x10R,^#\xbbg\u07ff\x8c&\xaaam\xa4\x99U\x8965b\xa6m4#\x80\x00\u07d4RU\xdci\x15ZE\xb9p\xc6\x04\xd3\x00G\xe2\xf50i\x0e\u007f\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4R`\xdcQ\xee\a\xbd\u06ab\xab\xb9\xeetK9<\u007fG\x93\xa6\x89\x01\xd8f_\xa5\xfaL\x00\x00\u07d4Rg\xf4\xd4\x12\x92\xf3p\x86<\x90\u05d3)i\x03\x846%\u01c9K\xe4\xe7&{j\xe0\x00\x00\u07d4Rk\xb53\xb7n \xc8\xee\x1e\xbf\x12?\x1e\x9f\xf4\x14\x8e@\xbe\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4Rl\xb0\x9c\u3b63g.\xec\x1d\xebF [\xe8\x9aKV>\x89\x85\xcaa[\xf9\xc0\x10\x00\x00\u07d4Rs\x8c\x90\xd8`\xe0L\xb1/I\x8d\x96\xfd\xb5\xbf6\xfc4\x0e\x89\x01\xa0Ui\r\x9d\xb8\x00\x00\u07d4Rz\x8c\xa1&\x863\xa6\xc99\xc5\xde\x1b\x92\x9a\ue4ae\xac\x8d\x890\xca\x02O\x98{\x90\x00\x00\u07d4R\x81\x01\xceF\xb7 \xa2!M\u036ef\x18\xa51w\xff\xa3w\x89\x1b\x96\x12\xb9\xdc\x01\xae\x00\x00\xe0\x94R\x81s4s\xe0\r\x87\xf1\x1e\x99U\u5275\x9fJ\u008ez\x8a\x8b\xd6/\xf4\xee\xc5Y \x00\x00\u07d4R\x98\xab\x18*\x195\x9f\xfc\xec\xaf\xd7\u0475\xfa!-\xed\xe6\u0749\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4R\x9a\xa0\x02\u0196*:\x85E\x02\u007f\u0630_\"\xb5\xbf\x95d\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\u07d4R\x9e\x82O\xa0rX+@2h:\xc7\xee\xcc\x1c\x04\xb4\xca\xc1\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94R\xa5\xe4\xdeC\x93\xee\xcc\xf0X\x1a\xc1\x1bR\u0183\xc7n\xa1]\x8a\x04<0\xfb\b\x84\xa9l\x00\x00\u07d4R\xb4%|\xf4\x1bn(\x87\x8dP\xd5{\x99\x91O\xfa\x89\x87:\x89\xd5\r\u026a,Aw\x00\x00\u07d4R\xb8\xa9Y&4\xf70\v|\\Y\xa34[\x83_\x01\xb9\\\x89lk\x93[\x8b\xbd@\x00\x00\u07d4R\xbd\u066fYx\x85\v\xc2A\x10q\x8b7#u\x9bC~Y\x89]\u0212\xaa\x111\xc8\x00\x00\u07d4R\xcd @;\xa7\xed\xa6\xbc0z=c\xb5\x91\x1b\x81|\x12c\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u0794R\u04c0Q\x1d\xf1\x9d^\u0080{\xbc\xb6vX\x1bg\xfd7\xa3\x88\xb9\xf6]\x00\xf6<\x00\x00\xe0\x94R\xe1s\x13P\xf9\x83\xcc,A\x89\x84/\xde\x06\x13\xfa\xd5\f\xe1\x8a\x02w\x01s8\xa3\n\xe0\x00\x00\u07d4R\xe4g\x832\x9av\x93\x01\xb1u\x00\x9d4gh\xf4\xc8~\xe4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4R\xf0X\xd4aG\xe9\x00m)\xbf,\t0J\xd1\xcd\xddn\x15\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4R\xf1T#2<$\xf1\x9a\xe2\xabg7\x17\"\x9d?t}\x9b\x897\xa04\xcb\xe8\xe3\xf3\x80\x00\u07d4R\xf8\xb5\t\xfe\xe1\xa8t\xabo\x9d\x876\u007f\xbe\xaf\x15\xac\x13\u007f\x8965\u026d\xc5\u07a0\x00\x00\u07d4R\xfbF\xac]\x00\xc3Q\x8b,:\x1c\x17}D/\x81eU_\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4S\x00w\xc9\xf7\xb9\a\xff\x9c\xec\fw\xa4\x1ap\xe9\x02\x9a\xddJ\x89lk\x93[\x8b\xbd@\x00\x00\u07d4S\x03\x19\xdb\n\x8f\x93\xe5\xbb}M\xbfH\x161O\xbe\xd86\x1b\x89lk\x93[\x8b\xbd@\x00\x00\u07d4S\x04}\u022c\x90\x83\xd9\x06r\xe8\xb3G<\x10\f\xcd'\x83#\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4S\va\xe4/9Bm$\b\xd4\bR\xb9\xe3J\xb5\xeb\xeb\u0149\x0e~\xeb\xa3A\vt\x00\x00\u07d4S\x0f\xfa\u00fc4\x12\xe2\xec\x0e\xa4{y\x81\xc7p\xf5\xbb/5\x89\a?u\u0460\x85\xba\x00\x00\u07d4S\x17\xec\xb0#\x05,\xa7\xf5e+\xe2\xfa\x85L\xfeEc\xdfM\x89\x1b\x1a\xb3\x19\xf5\xecu\x00\x00\u07d4S\x19M\x8a\xfa>\x885\x02v~\xdb\xc3\x05\x86\xaf3\xb1\x14\u04c9lk\x93[\x8b\xbd@\x00\x00\u07d4S*}\xa0\xa5\xadt\aF\x8d;\xe8\xe0~i\xc7\xddd\xe8a\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4S-2\xb0\x0f0[\xcc$\xdc\xefV\x81}b/4\xfb,$\x89a\x94\x04\x9f0\xf7 \x00\x00\u07d4S4DX@\x82\xeb\xa6T\xe1\xad0\xe1Is\\o{\xa9\"\x89]\u0212\xaa\x111\xc8\x00\x00\u07d4S8\xefp\xea\xc9\u075a\xf5\xa0P;^\xfa\xd1\x03\x9eg\xe7%\x89\x90\xf54`\x8ar\x88\x00\x00\xe0\x94S9oJ&\u00b4`D\x960lTB\xe7\xfc\xba'.6\x8a\x04?/\b\xd4\x0eZ\xfc\x00\x00\xe0\x94S:s\xa4\xa2\"\x8e\xee\x05\xc4\xff\xd7\x18\xbb\xf3\xf9\xc1\xb1)\xa7\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4S<\x06\x92\x8f\x19\u0429V\xcc(\x86k\xf6\xc8\xd8\xf4\x19\x1a\x94\x89\x0f\xd8\xc1C8\xe60\x00\x00\u07d4S@e6\x1c\xb8T\xfa\xc4+\xfb\\\x9f\xcd\xe0`J\xc9\x19\u0689lk\x93[\x8b\xbd@\x00\x00\u07d4SC\u007f\xec\xf3J\xb9\xd45\xf4\u07b8\xca\x18\x15\x19\xe2Y 5\x89\n1\x06+\xee\xedp\x00\x00\u07d4SR\x01\xa0\xa1\xd74\"\x80\x1fU\xde\xd4\u07ee\xe4\xfb\xaan;\x89\x02&!\x1fy\x15B\x80\x00\xe0\x94S`\x81\x05\xceK\x9e\x11\xf8k\xf4\x97\xff\xca;x\x96{_\x96\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4SnM\x80)\xb7?Uy\u0723>p\xb2N\xba\x89\xe1\x1d~\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4Sp\rS%MC\x0f\"x\x1aJv\xa4c\x93;]k\b\x89j\xcb=\xf2~\x1f\x88\x00\x00\xe0\x94S\u007f\x9dM1\xefp\x83\x9d\x84\xb0\xd9\u0377+\x9a\xfe\xdb\xdf5\x8a\x0e\u04b5%\x84\x1a\xdf\xc0\x00\x00\xe0\x94S\x81D\x85\x03\xc0\xc7\x02T+\x1d\xe7\xcc_\xb5\xf6\xab\x1c\xf6\xa5\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\xe0\x94S\x94.yI\xd6x\x8b\xb7\x80\xa7\xe8\xa0y'\x81\xb1aK\x84\x8a\x03]\xebFhO\x10\xc8\x00\x00\u07d4S\x95\xa4E]\x95\xd1x\xb4S*\xa4r[\x19?\xfeQ)a\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94S\x98\x9e\xd30V?\xd5}\xfe\u027d4<7`\xb0y\x93\x90\x8a\x01P\x89N\x84\x9b9\x00\x00\x00\u07d4S\xa2Dg(\x95H\x0fJ+\x1c\xdf}\xa5\xe5\xa2B\xecM\xbc\x8965\u026d\xc5\u07a0\x00\x00\u07d4S\xa7\x14\xf9\x9f\xa0\x0f\xefu\x8e#\xa2\xe7F2m\xad$|\xa7\x89P\xc5\xe7a\xa4D\b\x00\x00\u07d4S\xaf2\xc2/\uf640?\x17\x8c\xf9\v\x80/\xb5q\xc6\x1c\xb9\x89\xd2U\xd1\x12\xe1\x03\xa0\x00\x00\u07d4S\xc0\xbb\u007f\u020e\xa4\"\xd2\xef~T\x0e-\x8f(\xb1\xbb\x81\x83\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94S\xc5\xfe\x01\x19\xe1\xe8Hd\f\xee0\xad\ua594\x0f*]\x8b\x8a\x04\x9a\xda_\xa8\xc1\f\x88\x00\x00\u07d4S\xc9\xec\xa4\ts\xf6;\xb5\x92{\xe0\xbcj\x8a\x8b\xe1\x95\x1ft\x89lk\x93[\x8b\xbd@\x00\x00\u07d4S\u0388\xe6lZ\xf2\U0009bf4fY*V\xa3\xd1_ l2\x89\a\xa2\x8c1\xcc6\x04\x00\x00\u07d4S\xce\xc6\u0200\x92\xf7V\xef\xe5o}\xb1\x12(\xa2\xdbE\xb1\"\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4S\xe3[\x12#\x1f\x19\xc3\xfdwL\x88\xfe\xc8\xcb\xee\xdf\x14\b\xb2\x89\x1b\xc1mgN\xc8\x00\x00\x00\u07d4S\xe4\xd9im\xcb?M{?p\u072aN\xec\xb7\x17\x82\xff\\\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4S\xfa\xf1e\xbe\x03\x1e\xc1\x830\xd9\xfc\xe5\xbd\x12\x81\xa1\xaf\b\u06c9\a\x96\xe3\xea?\x8a\xb0\x00\x00\u07d4T\n\x18\x19\xbd|5\x86\x1ey\x18\x04\xe5\xfb\xb3\xbc\x97\u026b\xb1\x89N\xd7\xda\xc6B0 \x00\x00\xe0\x94T\f\a(\x02\x01N\xf0\xd5a4Z\xecH\x1e\x8e\x11\xcb5p\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\xe0\x94T\f\xf2=\xd9\\MU\x8a'\x9dw\x8d+75\xb3\x16A\x91\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4T\x10`\xfcX\xc7P\xc4\x05\x12\xf83i\xc0\xa63@\xc1\"\xb6\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4T\x13\xc9\u007f\xfaJn*{\xba\x89a\u071f\u03850\xa7\x87\u05c965\u026d\xc5\u07a0\x00\x00\u07d4T\x1d\xb2\n\x80\xcf;\x17\xf1b\x1f\x1b?\xf7\x9b\x88/P\xde\xf3\x8965\u026d\xc5\u07a0\x00\x00\u07d4T.\x80\x96\xba\xfb\x88\x16&\x06\x00.\x8c\x8a>\u0458\x14\xae\xac\x89lk\x93[\x8b\xbd@\x00\x00\u07d4T1\v:\xa8\x87\x03\xa7%\u07e5}\xe6\xe6F\x93Qd\x80,\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4T1\xb1\u0447Q\xb9\x8f\xc9\u220a\xc7u\x9f\x155\xa2\xdbG\x89lk\x93[\x8b\xbd@\x00\x00\u07d4T1\xcaB~ae\xa6D\xba\xe3&\xbd\tu\n\x17\x8ce\r\x89lk\x93[\x8b\xbd@\x00\x00\u07d4T5\xc6\xc1y3\x17\xd3,\xe1;\xbaLO\xfe\xb9s\xb7\x8a\u0709\r\x8ek\x1c\x12\x85\xef\x00\x00\xe0\x94T6)\xc9\\\xde\xf4(\xad7\xd4S\u02958\xa9\xf9\t\x00\xac\x8a\t(\x96R\x9b\xad\u0708\x00\x00\u07d4T9\x1bM\x17mGl\xea\x16N_\xb55\u0197\x00\xcb%5\x89\x05l\xd5_\xc6M\xfe\x00\x00\xe0\x94T:\x8c\x0e\xfb\x8b\xcd\x15\xc5C\u29a4\xf8\aYv1\xad\xef\x8a\x01?\x80\xe7\xe1O-D\x00\x00\u07d4T?\x8cgN$b\xd8\xd5\u06a0\xe8\x01\x95\xa8p\x8e\x11\xa2\x9e\x89\x03wX\x83;:z\x00\x00\xe0\x94TK[5\x1d\x1b\xc8.\x92\x97C\x99H\xcfHa\xda\u026e\x11\x8a\x04\xa8\x9fT\xef\x01!\xc0\x00\x00\u07d4TM\xdaB\x1d\xc1\xebs\xbb$\xe3\xe5j$\x80\x13\xb8|\x0fD\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4TW\\1\x14u\x1e\x14o\xfe\u00c7nE\xf2\x0e\xe8AJ\u07ba\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4T\xb4B\x9b\x18/\x03w\xbe~bi9\xc5\xdbd@\xf7]z\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4T\xbc\xb8\xe7\xf7<\xda=s\xf4\u04cb-\bG\xe6\x00\xba\r\xf8\x89:pAX\x82\xdf\x18\x00\x00\u07d4T\xc9>\x03\xa9\xb2\xe8\xe4\xc3g(5\xa9\xeev\xf9a[\xc1N\x89\x01\r:\xa56\xe2\x94\x00\x00\u07d4T\u0388'YV\xde\xf5\xf9E\x8e;\x95\xde\xca\xcdH@!\xa0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4T\xdb^\x06\xb4\x81]1\xcbV\xa8q\x9b\xa3:\xf2\xd7>rR\x89$R\x1e*0\x17\xb8\x00\x00\xe0\x94T\xe0\x12\x83\u030b8E8\xdddgp\xb3W\xc9`\xd6\xca\u034a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4T\xecs\x00\xb8\x1a\xc8C3\xed\x1b\x03<\xd5\u05e39r\xe24\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4T\xfe\xbc\xce \xfez\x90\x98\xa7U\xbd\x90\x98\x86\x02\xa4\x8c\b\x9e\x89\"\xb1\xc8\xc1\"z\x00\x00\x00\u07d4U\n\xad\xae\x12!\xb0z\xfe\xa3\x9f\xba.\xd6.\x05\u5df5\xf9\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4U\f0o\x81\xef]\x95\x80\xc0l\xb1\xab \x1b\x95\xc7H\xa6\x91\x89$\x17\xd4\xc4p\xbf\x14\x00\x00\xe0\x94U\x19\x99\xdd\xd2\x05V3'\xb9\xb50xZ\xcf\xf9\xbcs\xa4\xba\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4U\x1ew\x84w\x8e\xf8\xe0H\xe4\x95\xdfI\xf2aO\x84\xa4\xf1\u0709 \x86\xac5\x10R`\x00\x00\xe0\x94U)\x83\na\xc1\xf1<\x19~U\v\xed\xdf\u05bd\x19\\\x9d\x02\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4U)\x87\xf0e\x1b\x91[.\x1eS(\xc1!\x96\rK\xddj\xf4\x89a\t=|,m8\x00\x00\u07d4U;k\x1cW\x05\x0e\x88\xcf\f1\x06{\x8dL\xd1\xff\x80\xcb\t\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4U?7\xd9$fU\x0e\x9f\xd7u\xaet6-\xf00\x17\x912\x89lk\x93[\x8b\xbd@\x00\x00\u07d4UC6\xeeN\xa1U\xf9\xf2O\x87\xbc\xa9\xcar\xe2S\xe1,\u0489\x05k\xc7^-c\x10\x00\x00\u0794UC\xddm\x16\x9e\xec\x8a!;\xbfz\x8a\xf9\xff\xd1]O\xf7Y\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4UG\xfd\xb4\xae\x11\x95>\x01)+x\a\xfa\x92#\xd0\xe4`j\x89\x05]\x11}\xcb\x1d&\x00\x00\u07d4UR\xf4\xb3\xed>\x1d\xa7\x9a/x\xbb\x13\xe8\xaeZh\xa9\xdf;\x8965\u026d\xc5\u07a0\x00\x00\u07d4U\\\xa9\xf0\\\xc14\xabT\xae\x9b\xea\x1c?\xf8z\xa8Q\x98\u0289\x05k\xc7^-c\x10\x00\x00\xe0\x94U]\x8d<\xe1y\x8a\u0290'T\xf1d\xb8\xbe*\x022\x9cl\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4U]\xf1\x93\x90\xc1m\x01)\x87r\xba\xe8\xbc:\x11R\x19\x9c\xbd\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4U^\xbe\x84\u06a4+\xa2V\xeax\x91\x05\xce\u0136\x93\xf1/\x18\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94U\u007f^e\xe0\xda3\x99\x82\x19\xadN\x99W\x05E\xb2\xa9\xd5\x11\x8a\x02U\x9c\xbb\x98XB@\x00\x00\u07d4U\x83` h\x83\xdd\x1bmJYc\x9eV)\xd0\xf0\xc6u\u0409lk\x93[\x8b\xbd@\x00\x00\u07d4U\x84B0P\xe3\xc2\x05\x1f\v\xbd\x8fD\xbdm\xbc'\xec\xb6,\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4U\x85)CI)p\xf8\xd6)\xa1Sf\xcd\xda\x06\xa9OE\x13\x89lk\x93[\x8b\xbd@\x00\x00\u0794U\x86d\x86\xec\x16\x8fy\xdb\xe0\u1af1\x88d\u0649\x91\xae,\x88\xdfn\xb0\xb2\xd3\xca\x00\x00\u07d4U\x8cTd\x9a\x8an\x94r+\xd6\xd2\x1d\x14qOqx\x054\x89lk\x93[\x8b\xbd@\x00\x00\u07d4U\x91\x940O\x14\xb1\xb9:\xfeDO\x06$\xe0S\xc2:\x00\t\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4U\x93\xc9\u0536ds\x0f\xd9<\xa6\x01Q\xc2\\.\xae\xd9<;\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4U\x97\x06\xc32\xd2\ay\xc4_\x8am\x04ji\x91Y\xb7I!\x89\x14\x9bD.\x85\xa3\u03c0\x00\u07d4U\x98\xb3\xa7\x9aH\xf3+\x1f_\xc9\x15\xb8{d]\x80]\x1a\xfe\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4U\xa3\xdfW\xb7\xaa\xec\x16\xa1b\xfdS\x16\xf3[\xec\b(!\u03c9j\xcb=\xf2~\x1f\x88\x00\x00\u07d4U\xa4\xca\xc0\u02cbX-\x9f\xef8\xc5\xc9\xff\xf9\xbdS\t=\x1f\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4U\xa6\x1b\x10\x94\x80\xb5\xb2\xc4\xfc\xfd\xef\x92\xd9\x05\x84\x16\f\r5\x89\x02lVM+S\xf6\x00\x00\u07d4U\xaa]1>\xbb\bM\xa0\xe7\x80\x10\x91\u2792\xc5\xde\u00ea\x89lk\x93[\x8b\xbd@\x00\x00\u07d4U\xab\x99\xb0\xe0\xe5]{\xb8t\xb7\xcf\xe84\xdec\x1c\x97\xec#\x897\xe9\x8c\xe3h\x99\xe4\x00\x00\u07d4U\xaf\t/\x94\xbajy\x91\x8b\f\xf99\xea\xb3\xf0\x1b?Q\u01c9\b \xd5\xe3\x95v\x12\x00\x00\u07d4U\xc5dfAf\xa1\xed\xf3\x91>\x01i\xf1\xcdE\x1f\xdb]\f\x89\x82\x17\xeaIP\x8el\x00\x00\xe0\x94U\xcaj\xbey\xea$\x97\xf4o\u06f804`\x10\xfeF\x9c\xbe\x8a\x016\x9f\xb9a(\xacH\x00\x00\u07d4U\xca\xffK\xba\x04\xd2 \u0265\xd2\x01\x86r\xec\x85\xe3\x1e\xf8>\x89lk\x93[\x8b\xbd@\x00\x00\u07d4U\xd0W\xbc\xc0K\xd0\xf4\xaf\x96BQ:\xa5\t\v\xb3\xff\x93\xfe\x89;\xfeE,\x8e\xddL\x00\x00\u07d4U\xd4.\xb4\x95\xbfF\xa64\x99{_.\xa3b\x81I\x18\u2c09\x05\xc0\xd2e\xb5\xb2\xa8\x00\x00\u07d4U\u069d\xcd\xcaa\xcb\xfe\x1f\x13<{\xce\xfc\x86{\x9c\x81\"\xf9\x89/\xb4t\t\x8fg\xc0\x00\x00\u07d4U\xe2 \x87bb\xc2\x18\xafOVxG\x98\xc7\xe5]\xa0\x9e\x91\x89\a=\x99\xc1VE\xd3\x00\x00\u07d4U\xfd\b\u0440d\xbd ,\x0e\xc3\xd2\xcc\xe0\xce\v\x9d\x16\x9cM\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4V\x00s\nU\xf6\xb2\x0e\xbd$\x81\x1f\xaa=\xe9m\x16b\xab\xab\x89e\xea=\xb7UF`\x00\x00\u07d4V\x03$\x1e\xb8\xf0\x8fr\x1e4\x8c\x9d\x9a\xd9/H\u342a$\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4V\x056yJ\x9e+\x00I\xd1\x023\xc4\x1a\xdc_A\x8a&J\x8965\u026d\xc5\u07a0\x00\x00\u07d4V\aY\x00Y\xa9\xfe\xc1\x88\x11I\xa4K6\x94\x9a\xef\x85\xd5`\x89lk\x93[\x8b\xbd@\x00\x00\u07d4V\v\xec\xdfR\xb7\x1f=\x88'\xd9'a\x0f\x1a\x98\x0f3qo\x89\x17GMp_V\u0400\x00\xe0\x94V\r\xa3~\x95m\x86/\x81\xa7_\u0540\xa7\x13\\\x1b$cR\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94V\x0f\xc0\x8d\a\x9f\x04~\xd8\xd7\xdfuU\x1a\xa55\x01\xf5p\x13\x8a\x01\x9b\xff/\xf5yh\xc0\x00\x00\u07d4V\x1b\xe9)\x9b>k>c\xb7\x9b\t\x16\x9d\x1a\x94\x8a\xe6\xdb\x01\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94V \xe3\xedy-/\x185\xfe_UA}Q\x11F\fj\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4V \xf4m\x14Q\xc25=bC\xa5\u0534'\x13\v\xe2\xd4\a\x89\x03@\xaa\xd2\x1b;p\x00\x00\xe0\x94V!\x05\xe8+\t\x975\xdeI\xf6&\x92\u0307\xcd8\xa8\xed\u034a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94V*\x8d\u02fe\xee\xf7\xb3`h]'0;\u059e\tJ\xcc\xf6\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4V+\xce\u04ca\xb2\xabl\b\x0f;\x05A\xb8Enp\x82K?\x89\"\xca5\x87\xcfN\xb0\x00\x00\xe0\x94V+\xe9Z\xba\x17\xc57\x1f\u2e82\x87\x99\xb1\xf5]!w\u058a\b\x16\xd3~\x87\xb9\xd1\xe0\x00\x00\u07d4V/\x16\u05da\xbf\xce\u00d4>4\xb2\x0f\x05\xf9{\xdf\u0366\x05\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4V7=\xaa\xb4c\x16\xfd~\x15v\xc6\x1ej\xff\xcbeY\xdd\u05c9\v\xacq]\x14l\x9e\x00\x00\u07d4V9v8\xbb<\xeb\xf1\xf6 byK^\xb9B\xf9\x16\x17\x1d\x89lk\x93[\x8b\xbd@\x00\x00\u07d4V:\x03\xab\x9cV\xb6\x00\xf6\xd2[f\f!\xe1c5Qzu\x8965\u026d\xc5\u07a0\x00\x00\u07d4V<\xb8\x80<\x1d2\xa2['\xb6A\x14\x85+\xd0M\x9c \u0349\v\x14\x9e\xad\n\xd9\xd8\x00\x00\u07d4VXc\x91\x04\fW\xee\xc6\xf5\xaf\xfd\x8c\u052b\xde\x10\xb5\n\u0309\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4Vl\x10\xd68\u8e0bG\xd6\xe6\xa4\x14Iz\xfd\xd0\x06\x00\u0509\x05k9Bc\xa4\f\x00\x00\u07d4Vl(\xe3L8\b\xd9vo\xe8B\x1e\xbfO+\x1cO}w\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4V\x8d\xf3\x18Vi\x9b\xb5\xac\xfc\x1f\xe1\u0580\u07d9`\xcaCY\x89J\xcfUR\xf3\xb2I\x80\x00\u07d4V\x91\xdd/gE\xf2\x0e\"\xd2\xe1\u0479U\xaa)\x03\xd6VV\x89j\xc5\xc6-\x94\x86\a\x00\x00\u07d4V\xa1\xd6\r@\xf5\u007f0\x8e\xeb\xf0\x87\xde\xe3\xb3\u007f\x1e|,\xba\x89>\u072e\xc8-\x06\xf8\x00\x00\u07d4V\xac \xd6;\xd8\x03Y\\\xec\x03m\xa7\xed\x1d\xc6n\n\x9e\a\x89\x03w*S\xcc\xdce\x80\x00\u07d4V\xb6\xc2=\xd2\uc434r\x8f;\xb2\xe7d\xc3\xc5\f\x85\xf1D\x8965\u026d\xc5\u07a0\x00\x00\u07d4V\xdf\x05\xba\xd4l?\x00\xaeGn\xcf\x01{\xb8\xc8w8?\xf1\x89\n\xb1]\xaa\xefp@\x00\x00\u07d4V\xee\x19\u007fK\xbf\x9f\x1b\x06b\xe4\x1c+\xbd\x9a\xa1\xf7\x99\xe8F\x8965\u026d\xc5\u07a0\x00\x00\u07d4V\xf4\x93\xa3\xd1\b\xaa\xa2\u044d\x98\x92/\x8e\xfe\x16b\u03f7=\x89m\x81!\xa1\x94\xd1\x10\x00\x00\u07d4V\xfc\x1a{\xad@G#|\xe1\x16\x14b\x96#\x8e\a\x8f\x93\xad\x89\t\xa6?\b\xeac\x88\x00\x00\u07d4V\xfe\xbf\x9e\x10\x03\xaf\x15\xb1\xbdI\a\xec\b\x9aJ\x1b\x91\xd2h\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4W\x17\u0313\x01Q\x1dJ\x81\xb9\xf5\x83\x14\x8b\xee\xd3\xd3\u0303\t\x89\x8c\xf2?\x90\x9c\x0f\xa0\x00\x00\u07d4W\x17\xf2\xd8\xf1\x8f\xfc\xc0\xe5\xfe$}:B\x19\x03|:d\x9c\x89\u063beI\xb0+\xb8\x00\x00\u07d4W\x19P\xea,\x90\xc1B}\x93\x9da\xb4\xf2\xdeL\xf1\u03ff\xb0\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4W\x19\xf4\x9br\r\xa6\x88V\xf4\xb9\xe7\b\xf2VE\xbd\xbcKA\x89\"\xb1\xc8\xc1\"z\x00\x00\x00\u07d4W*\xc1\xab\xa0\xde#\xaeA\xa7\xca\xe1\xdc\bB\u062b\xfc\x10;\x89g\x8a\x93 b\xe4\x18\x00\x00\xe0\x94W-\xd8\xcd?\xe3\x99\xd1\xd0\xec(\x121\xb7\xce\xfc \xb9\u4eca\x023\xc8\xfeBp>\x80\x00\x00\xe0\x94WI!\x83\x8c\xc7}l\x98\xb1}\x90::\xe0\xee\r\xa9[\u040a\vS(\x17\x8a\xd0\xf2\xa0\x00\x00\u07d4WJ\xd95S\x90\u421e\xf4*\xcd\x13\x8b*'\xe7\x8c\x00\xae\x89Tg\xb72\xa9\x134\x00\x00\u07d4WM\xe1\xb3\xf3\x8d\x91XF\xae7\x18VJZ\xda \xc2\xf3\xed\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94W\\\x00\u0081\x82\x10\u0085U\xa0\xff)\x01\x02\x89\xd3\xf8#\t\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94Ws\xb6\x02g!\xa1\xdd\x04\xb7\x82\x8c\xd6+Y\x1b\xfb4SL\x8a\x05\xb7\xacES\xdez\xe0\x00\x00\xe0\x94WwD\x1c\x83\xe0?\v\xe8\xdd4\v\xdechP\x84|b\v\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4Wx\xff\u071b\x94\u0165\x9e\"N\xb9e\xb6\u0790\xf2\"\xd1p\x89\x12-\u007f\xf3f\x03\xfc\x00\x00\u07d4Wz\xee\xe8\u053c\b\xfc\x97\xab\x15n\xd5\u007f\xb9p\x92Sf\xbe\x89\x12\r\xf1\x14rX\xbf\x00\x00\u07d4W{-\a\xe9\xcfRJ\x18\u04c9\x15Vak\x96\x06g\x00\x00\u07d4W\xd5\xfd\x0e=0I3\x0f\xfc\xdc\xd0 Ei\x17e{\xa2\u0689k\xf2\x01\x95\xf5T\xd4\x00\x00\u07d4W\u0754q\xcb\xfa&'\t\xf5\U00106f37t\xc5\xf5'\xb8\xf8\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4W\xdf#\xbe\xbd\xc6^\xb7_\ub732\xfa\xd1\xc0si++\xaf\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4X\x00\u03410\x83\x9e\x94I]-\x84\x15\xa8\xea,\x90\xe0\xc5\u02c9\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94X\x03\xe6\x8b4\xda\x12\x1a\xef\b\xb6\x02\xba\u06ef\xb4\xd1$\x81\u028a\x03\xcf\xc8.7\xe9\xa7@\x00\x00\xe0\x94X\x16\xc2hww\xb6\xd7\u04a2C-Y\xa4\x1f\xa0Y\xe3\xa4\x06\x8a\x1cO\xe4:\xdb\n^\x90\x00\x00\u07d4X\x1a:\xf2\x97\xef\xa4Cj)\xaf\x00r\x92\x9a\xbf\x98&\xf5\x8b\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94X\x1b\x9f\xd6\xea\xe3r\xf3P\x1fB\xeb\x96\x19\xee\xc8 \xb7\x8a\x84\x8a\x04+\xe2\xc0\f\xa5;\x8d\x80\x00\u07d4X\x1b\xdf\x1b\xb2v\xdb\u0746\xae\xdc\xdb9z\x01\xef\xc0\xe0\f[\x8965\u026d\xc5\u07a0\x00\x00\u07d4X\x1f4\xb5#\xe5\xb4\x1c\t\xc8|)\x8e)\x9c\xbc\x0e)\xd0f\x89=X3\xaa\xfd9u\x80\x00\xe0\x94X$\xa7\xe2(8'q40\x8c_KP\u06b6^C\xbb1\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4X+pf\x9c\x97\xaa\xb7\u0581H\xd8\xd4\xe9\x04\x11\xe2\x81\rV\x8965f3\xeb\xd8\xea\x00\x00\u07d4X.|\xc4o\x1d{Nn\x9d\x95\x86\x8b\xfd7\x05s\x17\x8fL\x89lk\x93[\x8b\xbd@\x00\x00\u07d4X>\x83\xbaU\xe6~\x13\xe0\xe7o\x83\x92\xd8s\xcd!\xfb\xf7\x98\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4Xi\xfb\x86}q\xf18\u007f\x86;i\x8d\t\xfd\xfb\x87\u011b\\\x89\u01bb\xf8X\xb3\x16\b\x00\x00\u07d4X}hI\xb1h\xf6\xc33+z\xba\xe7\xeblB\xc3\u007fH\xbf\x89/\xb4t\t\x8fg\xc0\x00\x00\u07d4X\x87\xdcj3\xdf\xedZ\xc1\xed\xef\xe3^\xf9\x1a!b1\xac\x96\x89\r\x8drkqw\xa8\x00\x00\xe0\x94X\x8e\u0650\xa2\xaf\xf4J\x94\x10]X\xc3\x05%w5\xc8h\xac\x8a\x03h\xc8b:\x8bM\x10\x00\x00\u07d4X\xae-\xdc_L\x8a\u0697\xe0l\x00\x86\x17\x17g\xc4#\xf5\u05c9WG=\x05\u06ba\xe8\x00\x00\u07d4X\xae\xd6gJ\xff\xd9\xf6B3'*W\x8d\xd98k\x99\xc2c\x89\xb8Pz\x82\a( \x00\x00\xe0\x94X\xb8\b\xa6[Q\xe63\x89i\xaf\xb9^\xc7\a5\xe4Q\xd5&\x8a\bxK\xc1\xb9\x83z8\x00\x00\u07d4X\xb8\xae\x8fc\xef5\xed\ab\xf0\xb6#=J\xc1Nd\xb6M\x89lk\x93[\x8b\xbd@\x00\x00\u07d4X\xba\x15ie\x0e[\xbb\xb2\x1d5\xd3\xe1u\xc0\u05b0\xc6Q\xa9\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4X\xc5U\xbc)<\xdb\x16\xc66.\xd9z\xe9U\v\x92\xea\x18\x0e\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4X\xc6P\xce\xd4\v\xb6VA\xb8\xe8\xa9$\xa09\xde\xf4hT\u07c9\x01\x00\xbd3\xfb\x98\xba\x00\x00\u07d4X\xc9\aT\xd2\xf2\n\x1c\xb1\xdd3\x06%\xe0KE\xfaa\x9d\\\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94X\xe2\xf1\x12#\xfc\x827\xf6\x9d\x99\xc6(\x9c\x14\x8c\x06\x04\xf7B\x8a\x05\x15\n\xe8J\x8c\xdf\x00\x00\x00\u07d4X\xe5T\xaf=\x87b\x96 \xdaa\xd58\xc7\xf5\xb4\xb5LJ\xfe\x89FP\x9diE4r\x80\x00\u07d4X\xe5\xc9\xe3D\xc8\x06e\r\xac\xfc\x90M3\xed\xbaQ\a\xb0\u0789\x01\t\x10\xd4\xcd\xc9\xf6\x00\x00\u07d4X\xe6a\u043as\xd6\xcf$\t\x9aUb\xb8\b\xf7\xb3g;h\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94X\xf0[&%`P<\xa7a\xc6\x18\x90\xa4\x03_Lsr\x80\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4X\xfb\x94sd\xe7iWe6\x1e\xbb\x1e\x80\x1f\xfb\x8b\x95\xe6\u0409\n\u05ce\xbcZ\xc6 \x00\x00\u07d4Y\x01\x81\xd4E\x00{\u0407Z\xaf\x06\x1c\x8dQ\x159\x00\x83j\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Y\x02\xe4J\xf7i\xa8rF\xa2\x1e\a\x9c\b\xbf6\xb0n\xfe\xb3\x8965\u026d\xc5\u07a0\x00\x00\u07d4Y\n\xcb\xda7)\f\r>\xc8O\xc2\x00\rv\x97\xf9\xa4\xb1]\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94Y\f\xcbY\x11\xcfx\xf6\xf6\"\xf55\xc4t7_J\x12\xcf\u03ca\x04<3\xc1\x93ud\x80\x00\x00\u07d4Y\x10\x10m\xeb\u0491\xa1\u0340\xb0\xfb\xbb\x8d\x8d\x9e\x93\xa7\xcc\x1e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Y\x16\x17I\xfe\xdc\xf1\xc7!\xf2 -\x13\xad\xe2\xab\xcfF\v=\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94Y\x1b\xef1q\xd1\u0155w\x17\xa4\xe9\x8d\x17\xeb\x14,!NV\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4Y <\xc3u\x99\xb6H1*|\xc9\xe0m\xac\xb5\x89\xa9\xaej\x89\b\x0fyq\xb6@\x0e\x80\x00\u07d4Y&\x81q\xb83\xe0\xaa\x13\xc5KR\xcc\xc0B.O\xa0:\ub262\xa1]\tQ\x9b\xe0\x00\x00\xe0\x94Y'w&\x1e;\xd8R\u010e\u0295\xb3\xa4L[\u007f-B,\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4Y0Dg\x0f\xae\xff\x00\xa5[Z\xe0Q\xeb{\xe8p\xb1\x16\x94\x89\a?u\u0460\x85\xba\x00\x00\xe0\x94Y;E\xa1\x86J\xc5\xc7\xe8\xf0\u02ae\xba\r\x87<\xd5\xd1\x13\xb2\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4Y_^\xdajV\xf1N%\xe0\xc6\xf3\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4Z\x1a3ib\xd6\xe0\xc601\u0303\u01a5\u01a6\xf4G\x8e\u02c965\u026d\xc5\u07a0\x00\x00\u07d4Z\x1d--\x1dR\x03\x04\xb6 \x88IW\x047\xeb0\x91\xbb\x9f\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4Z&s1\xfa\xcb&-\xaa\xec\xd9\xddc\xa9p\f_RY\u07c9\x05k\xc7^-c\x10\x00\x00\xe0\x94Z(WU9\x1e\x91NX\x02_\xaaH\xcch_O\xd4\xf5\xb8\x8a\x05\x81v{\xa6\x18\x9c@\x00\x00\u07d4Z)\x16\xb8\xd2\xe8\xcc\x12\xe2\a\xabFMC>#p\xd8#\u0649lk\x93[\x8b\xbd@\x00\x00\u07d4Z+\x1c\x85:\xeb(\xc4U9\xafv\xa0\n\xc2\u0628$(\x96\x89\x01Z\xf1\u05cbX\xc4\x00\x00\u07d4Z-\xaa\xb2\\1\xa6\x1a\x92\xa4\xc8,\x99%\xa1\xd2\xefXX^\x89\f8\r\xa9\u01d5\f\x00\x00\u07d4Z0\xfe\xac7\xac\x9fr\u05f4\xaf\x0f+\xc79R\xc7O\xd5\u00c9lk\x93[\x8b\xbd@\x00\x00\u07d4ZTh\xfa\\\xa2&\xc7S.\xcf\x06\xe1\xbc\x1cE\"]~\u0249g\x8a\x93 b\xe4\x18\x00\x00\u07d4ZVR\x857JI\xee\xddPL\x95}Q\bt\xd0\x04U\xbc\x89\x05k\xc7^-c\x10\x00\x00\u07d4Z^\xe8\xe9\xbb\x0e\x8a\xb2\xfe\xcbK3\u0494x\xbeP\xbb\xd4K\x89*\x11)\u0413g \x00\x00\xe0\x94Z_\x85\b\xda\x0e\xbe\xbb\x90\xbe\x903\xbdM\x9e'A\x05\xae\x00\x8a\x01je\x02\xf1Z\x1eT\x00\x00\u07d4Z`q\xbc\xeb\xfc\xbaJ\xb5\u007fM\xb9o\u01e6\x8b\xec\xe2\xba[\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Z`\xc9$\x16(s\xfc~\xa4\xda\u007f\x97.5\x01g7`1\x89\x04\x87\xf2w\xa8\x85y\x80\x00\u07d4Zf\x86\xb0\xf1~\a\xed\xfcY\xb7Y\xc7}[\xef\x16M8y\x89P\xc5\xe7a\xa4D\b\x00\x00\u07d4Zp\x10o \xd6?\x87Re\xe4\x8e\r5\xf0\x0e\x17\xd0+\u0249\x01\x15\x8eF\t\x13\xd0\x00\x00\u0794Zt\xbab\xe7\xc8\x1a4t\xe2}\x89O\xed3\xdd$\xad\x95\xfe\x88\xfc\x93c\x92\x80\x1c\x00\x00\xe0\x94Zw5\x00}p\xb0hD\u0699\x01\xcd\xfa\xdb\x11\xa2X,/\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4Z\x82\xf9l\u0537\xe2\xd9=\x10\xf3\x18]\xc8\xf4=Ku\xaai\x89lc?\xba\xb9\x8c\x04\x00\x00\u07d4Z\x87\xf04\xe6\xf6\x8fNt\xff\xe6\fd\x81\x946\x03l\xf7\u05c9\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94Z\x89\x11U\xf5\x0eB\aCt\xc79\xba\xad\xf7\xdf&Q\x15:\x8a\x01\x02\xdao\xd0\xf7:<\x00\x00\u07d4Z\x9c\x8bi\xfcaMiVI\x99\xb0\r\xcbB\xdbg\xf9~\x90\x89\xb9\xe6\x15\xab\xad:w\x80\x00\xe0\x94Z\xaf\x1c1%Jn\x00_\xba\u007fZ\xb0\xecy\xd7\xfc+c\x0e\x8a\x01@a\xb9\xd7z^\x98\x00\x00\u07d4Z\xb1\xa5aSH\x00\x1c|w]\xc7WHf\x9b\x8b\xe4\xde\x14\x89%jr\xfb)\xe6\x9c\x00\x00\xe1\x94Z\xbf\xec%\xf7L\u06047c\x1aw1\x90i2wcV\xf9\x8b\t\xd8<\xc0\u07e1\x11w\xff\x80\x00\u07d4Z\u0090\x8b\x0f9\x8c\r\xf5\xba\xc2\xcb\x13\xcas\x14\xfb\xa8\xfa=\x89\n\xd4\xc81j\v\f\x00\x00\xe0\x94Z\u025a\u05c1j\xe9\x02\x0f\xf8\xad\xf7\x9f\xa9\x86\x9b|\xeaf\x01\x8a\x04ri\x8bA;C \x00\x00\u07d4Z\xd1,^\xd4\xfa\x82~!P\u03e0\u058c\n\xa3{\x17i\xb8\x89+^:\xf1k\x18\x80\x00\x00\xe0\x94Z\xd5\xe4 uV\x13\x88o5\xaaV\xac@>\xeb\xdf\xe4\xb0\u040a\x10\xf0\xcf\x06M\u0552\x00\x00\x00\u07d4Z\xdew\xfd\x81\xc2\\\n\xf7\x13\xb1\a\x02v\x8c\x1e\xb2\xf9u\xe7\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4Z\xe6N\x85;\xa0\xa5\x12\x82\u02cd\xb5.Aa^|\x9fs?\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Z\xed\x0el\xfe\x95\xf9\u0580\xc7dr\xa8\x1a+h\n\u007f\x93\xe2\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4Z\xef\x16\xa2&\xddh\a\x1f$\x83\xe1\xdaBY\x83\x19\xf6\x9b,\x89lk\x93[\x8b\xbd@\x00\x00\u07d4Z\xf4j%\xac\t\xcbsakS\xb1O\xb4/\xf0\xa5\x1c\u0772\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4Z\xf7\xc0r\xb2\u016c\xd7\x1cv\xad\xdc\xceS\\\xf7\xf8\xf95\x85\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94Z\xfd\xa9@\\\x8e\x976QEt\u0692\x8d\xe6tV\x01\t\x18\x8a\x01E\xb8\xb0#\x9aF\x92\x00\x00\u07d4[\x06\xd1\xe6\x93\f\x10Ti+y\xe3\xdb\xe6\xec\xceS\x96d \x89\v\"\u007fc\xbe\x81<\x00\x00\u07d4[%\xca\xe8m\xca\xfa*`\xe7r61\xfc_\xa4\x9c\x1a\xd8}\x89\x87\fXQ\x0e\x85 \x00\x00\u07d4[(|~sB\x99\xe7'bo\x93\xfb\x11\x87\xa6\rPW\xfe\x89\x05|\xd94\xa9\x14\xcb\x00\x00\u07d4[)\f\x01\x96|\x81.M\xc4\xc9\v\x17L\x1b@\x15\xba\xe7\x1e\x89\b \xeb4\x8dR\xb9\x00\x00\u07d4[+d\xe9\xc0X\u30a8\xb2\x99\"N\xec\xaa\x16\xe0\x9c\x8d\x92\x89\b\xbaR\xe6\xfcE\xe4\x00\x00\xe0\x94[./\x16\x18U.\xab\r\xb9\x8a\xddUc|)Q\xf1\xfb\x19\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4[0`\x8cg\x8e\x1a\xc4d\xa8\x99L;3\xe5\xcd\xf3Iq\x12\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4[36\x96\xe0L\xca\x16\x92\xe7\x19\x86W\x9c\x92\rk)\x16\xf9\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94[C\rw\x96\x96\xa3e?\xc6\x0et\xfb\u02ec\xf6\xb9\u00ba\xf1\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4[Cse\xae:\x9a/\xf9|h\xe6\xf9\nv \x18\x8c}\x19\x89l\x87T\xc8\xf3\f\b\x00\x00\u07d4[I\xaf\xcduDx8\xf6\xe7\xce\u068d!w}O\xc1\xc3\xc0\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4[L\f`\xf1\x0e\u0489K\xdbB\xd9\xdd\x1d!\x05\x87\x81\n\r\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4[N\xa1m\xb6\x80\x9b\x03R\u0536\xe8\x1c9\x13\xf7jQ\xbb2\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4[[\xe0\xd8\xc6rv\xba\xab\xd8\xed\xb3\rH\xeaud\v\x8b)\x89,\xb1\xf5_\xb7\xbe\x10\x00\x00\u07d4[]Qp)2\x15b\x11\x1bC\bm\v\x045\x91\x10\x9ap\x89\x8c\xf2?\x90\x9c\x0f\xa0\x00\x00\xe0\x94[]\x8c\x8e\xedl\x85\xac!Va\xde\x02fv\x82?\xaa\n\f\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4[mU\xf6q)g@\\e\x91)\xf4\xb1\xde\t\xac\xf2\xcb{\x89\x0e~\xeb\xa3A\vt\x00\x00\u07d4[p\u011c\u024b=\xf3\xfb\xe2\xb1Y\u007f\\\x1bcG\xa3\x88\xb7\x894\x95tD\xb8@\xe8\x00\x00\u07d4[sn\xb1\x83Sb\x9b\u0796v\xda\xdd\x16P4\xce^\xcch\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4[u\x9f\xa1\x10\xa3\x1c\x88F\x9fT\xd4K\xa3\x03\xd5}\xd3\xe1\x0f\x89[F\xdd/\x0e\xa3\xb8\x00\x00\u07d4[w\x84\xca\xea\x01y\x9c\xa3\x02'\x82vg\xce |\\\xbcv\x89lk\x93[\x8b\xbd@\x00\x00\u07d4[x\xec\xa2\u007f\xbd\xeao&\xbe\xfb\xa8\x97+)^x\x146K\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94[\x80\v\xfd\x1b>\u0525}\x87Z\xed&\xd4/\x1aw\b\xd7*\x8a\x01Z\x82\xd1\u057b\x88\xe0\x00\x00\u07d4[\x85\xe6\x0e*\xf0TO/\x01\xc6N 2\x90\x0e\xbd8\xa3\u01c9lk\x93[\x8b\xbd@\x00\x00\u07d4[\xa2\xc6\xc3]\xfa\xec)h&Y\x19\x04\xd5DFJ\xea\xbd^\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94[\xafmt\x96 \x80>\x83H\xaf7\x10\xe5\xc4\xfb\xf2\x0f\u0214\x8a\x01\x0f@\x02a]\xfe\x90\x00\x00\u07d4[\xc1\xf9U\a\xb1\x01\x86B\xe4\\\xd9\xc0\xe2'3\xb9\xb1\xa3&\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94[\xd25GG\u007fm\t\u05f2\xa0\x05\xc5\xeee\fQ\fV\u05ca\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4[\xd2J\xac6\x12\xb2\f`\x9e\xb4gy\xbf\x95i\x84\a\xc5|\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4[\u0586-Q}M\xe4U\x9dN\xec\n\x06\xca\xd0^/\x94n\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4[\xe0EQ*\x02n?\x1c\xeb\xfdZ~\xc0\xcf\xc3o-\xc1k\x89\x06\x81U\xa46v\xe0\x00\x00\xe0\x94[\xf9\xf2\"nZ\xea\xcf\x1d\x80\xae\nY\xc6\xe3\x808\xbc\x8d\xb5\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4[\xfa\xfe\x97\xb1\xdd\x1dq+\xe8mA\xdfy\x89SE\x87Z\x87\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\\\x0f.Q7\x8fk\r{\xabas1X\vn9\xad<\xa5\x8a\x02\bj\xc3Q\x05&\x00\x00\x00\u07d4\\)\xf9\xe9\xa5#\xc1\xf8f\x94H\xb5\\H\xcb\xd4|%\xe6\x10\x894F\xa0\xda\xd0L\xb0\x00\x00\xe0\x94\\0\x8b\xacHW\xd3;\xae\xa0t\xf3\x95m6!\xd9\xfa(\xe1\x8a\x01\x0f\b\xed\xa8\xe5U\t\x80\x00\u07d4\\1*V\u01c4\xb1\"\t\x9bvM\x05\x9c!\xec\xe9^\x84\u0289\x05&c\u032b\x1e\x1c\x00\x00\u07d4\\1\x99m\xca\xc0\x15\xf9\xbe\x98[a\x1fF\x870\xef$M\x90\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\\24W\xe1\x87v\x1a\x82v\xe3Y\xb7\xb7\xaf?;n=\xf6\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\\<\x1cd[\x91uC\x11;>l\x1c\x05M\xa1\xfet+\x9a\x89+^:\xf1k\x18\x80\x00\x00\u0794\\=\x19D\x1d\x19l\xb4Cf \xfc\xad\u007f\xbby\xb2\x9ex\x88\xc6s\xce<@\x16\x00\x00\u07d4\\?V\u007f\xaf\xf7\xba\u0475\x12\x00\"\xe8\xcb\u02a8+I\x17\xb3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\\Ch\x91\x8a\xced\t\u01de\u0280\u036a\xe49\x1d+bN\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\\FA\x97y\x1c\x8a=\xa3\xc9%Co'z\xb1;\xf2\xfa\xa2\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\\H\x81\x16\\\xb4+\xb8.\x979l\x8e\xf4J\xdb\xf1s\xfb\x99\x89\x05\xfe\xe2\"\x04\x1e4\x00\x00\xe0\x94\\H\x92\x90z\a \xdfo\xd3A>c\xffv}k9\x80#\x8a\x02\xcb\x00\x9f\u04f5y\x0f\x80\x00\u07d4\\O$\xe9\x94\ud3c5\x0e\xa7\x81\x8fG\x1c\x8f\xac;\xcf\x04R\x89]\x80h\x8d\x9e1\xc0\x00\x00\u07d4\\T\x19V\\:\xadNqN\a92\x8e5!\u024f\x05\u0309\x1c\x9fx\u0489>@\x00\x00\u07d4\\a6\xe2\x18\xde\na\xa17\xb2\xb3\x96-*a\x12\xb8\t\u05c9\x0f\xf3\u06f6_\xf4\x86\x80\x00\xe0\x94\\a\xaby\xb4\b\xdd2)\xf6bY7\x05\xd7/\x1e\x14{\xb8\x8a\x04\xd0$=4\x98\u0344\x00\x00\u07d4\\m\x04\x1d\xa7\xafD\x87\xb9\xdcH\xe8\xe1\xf6\af\u0425m\xbc\x89O\a\n\x00>\x9ct\x00\x00\u07d4\\o6\xaf\x90\xab\x1aeln\xc8\xc7\xd5!Q'b\xbb\xa3\xe1\x89lh\xcc\u041b\x02,\x00\x00\u07d4\\{\x9e\u01e2C\x8d\x1eD*\x86\x0f\x8a\x02\x1e\x18\x99\xf07z\xea\x00\x00\u07d4\\\xcc\xf1P\x8b\xfd5\xc2\x050\xaad%\x00\xc1\r\xeee\xea\xed\x89.\x14\x1e\xa0\x81\xca\b\x00\x00\u07d4\\\xcer\xd0h\xc7\xc3\xf5[\x1d(\x19T^w1|\xae\x82@\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\\\xd0\xe4u\xb5D!\xbd\xfc\f\x12\xea\x8e\b+\u05e5\xaf\nj\x89\x032\xca\x1bg\x94\f\x00\x00\u07d4\\\u0548\xa1N\xc6H\xcc\xf6G)\xf9\x16z\xa7\xbf\x8b\xe6\xeb=\x8965\u026d\xc5\u07a0\x00\x00\u07d4\\\u062f`\xdee\xf2M\xc3\xceW0\xba\x92e0\"\xdcYc\x89a\t=|,m8\x00\x00\u07d4\\\xdcG\b\xf1O@\xdc\xc1Zy_}\xc8\xcb\v\u007f\xaa\x9en\x89\x1d\x1c_>\xda \xc4\x00\x00\u07d4\\\u0d86,\u0391b\xe8~\bI\xe3\x87\xcb]\xf4\xf9\x11\x8c\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\xe0\x94\\\xe2\xe7\u03aa\xa1\x8a\xf0\xf8\xaa\xfa\u007f\xba\xd7L\u021e<\xd46\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\\\xe4@h\xb8\xf4\xa3\xfey\x9ej\x83\x11\xdb\xfd\xed\xa2\x9d\xee\x0e\x89lk\x93[\x8b\xbd@\x00\x00\u0794\\\xeb\xe3\v*\x95\xf4\xae\xfd\xa6ee\x1d\xc0\xcf~\xf5u\x81\x99\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\\\xf1\x8f\xa7\u0227\xc0\xa2\xb3\xd5\xef\u0459\x0fd\xdd\xc5i$,\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\\\xf4N\x10T\reqd#\xb1\xbc\xb5B\xd2\x1f\xf8:\x94\u034a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\\\xf8\xc0>\xb3\xe8r\xe5\x0f|\xfd\f/\x8d;?,\xb5\x18:\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\\\xfa\x8dV\x85ue\x8c\xa4\xc1\xa5\x93\xacL]\x0eD\xc6\aE\x89\x0f\xc6o\xae7F\xac\x00\x00\u07d4\\\xfa\x98w\xf7\x19\u01dd\x9eIJ\b\xd1\xe4\x1c\xf1\x03\xfc\x87\u0249\n\u05ce\xbcZ\xc6 \x00\x00\u07d4]\x1d\xc38{G\xb8E\x1eU\x10l\f\xc6}m\xc7+\u007f\v\x89lk\x93[\x8b\xbd@\x00\x00\u07d4]#\x1ap\xc1\xdf\xeb6\n\xbd\x97\xf6\x16\xe2\xd1\r9\xf3\u02b5\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4]$\xbd\xbc\x1cG\xf0\xeb\x83\xd1(\xca\xe4\x8a\xc3\xf4\xb5\x02bt\a\xda'/g\x81Jk\xec\u0509\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4]\x83\xb2\x1b\xd2q#`Ckg\xa5\x97\xee3x\xdb>z\xe4\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94]\x87+\x12.\x99N\xf2|q\xd7\u07b4W\xbfeB\x9e\xcal\x8a\x01\xb1\xad\xed\x81\u04d4\x10\x80\x00\xe0\x94]\x8d1\xfa\xa8d\xe2!Y\xcdoQu\xcc\xec\xc5?\xa5Mr\x8a\x05\xb6\x96\xb7\r\xd5g\x10\x00\x00\xe0\x94]\x95\x8a\x9b\u0449\u0098_\x86\u014a\x8ci\xa7\xa7\x88\x06\xe8\u068a\x02(\xf1o\x86\x15x`\x00\x00\u07d4]\xa2\xa9\xa4\xc2\xc0\xa4\xa9$\xcb\xe0\xa5:\xb9\xd0\xc6'\xa1\u03e0\x89'\xbf8\xc6TM\xf5\x00\x00\u07d4]\xa4\u0288\x93\\'\xf5\\1\x10H\x84\x0eX\x9e\x04\xa8\xa0I\x89\x04V9\x18$O@\x00\x00\u07d4]\xa5G\x85\u027d0W\\\x89\u07b5\x9d A\xd2\n9\xe1{\x89j\xa2\t\xf0\xb9\x1de\x80\x00\xe0\x94]\xb6\x9f\xe9>o\xb6\xfb\xd4P\x96k\x97#\x8b\x11\n\xd8'\x9a\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4]\xb7\xbb\xa1\xf9W?$\x11]\x8c\x8cb\xe9\u0388\x95\x06\x8e\x9f\x89\x02\xb5\xaa\xd7,e \x00\x00\xe0\x94]\xb8D\x00W\x00i\xa9W<\xab\x04\xb4\u6d955\xe2\x02\xb8\x8a\x02\r\u058a\xaf2\x89\x10\x00\x00\u07d4]\xc3m\xe55\x94P\xa1\xec\t\xcb\fD\xcf+\xb4+:\xe45\x89<\x94m\x89;3\x06\x00\x00\u07d4]\xc6\xf4_\xef&\xb0n3\x021?\x88M\xafH\xe2to\xb9\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94]\u0376\xb8zP\xa9\xde\x02C\x80\x00\x00\u07d4^Q\xb8\xa3\xbb\t\xd3\x03\xea|\x86\x05\x15\x82\xfd`\x0f\xb3\xdc\x1a\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u0794^X\xe2U\xfc\x19\x87\n\x040_\xf2\xa0F1\xf2\xff)K\xb1\x88\xf4?\xc2\xc0N\xe0\x00\x00\u07d4^ZD\x19t\xa8=t\u0187\xeb\xdcc?\xb1\xa4\x9e{\x1a\u05c9\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4^eE\x8b\xe9d\xaeD\x9fqw7\x04\x97\x97f\xf8\x89\x87a\x89\x1c\xa7\xccs[o|\x00\x00\u07d4^g\u07c9i\x10\x1a\u06bd\x91\xac\xcdk\xb1\x99\x12t\xaf\x8d\xf2\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4^n\x97G\xe1b\xf8\xb4\\en\x0fl\xaez\x84\xba\xc8\x0eN\x89lk\x93[\x8b\xbd@\x00\x00\u07d4^s\x1bU\xce\xd4R\xbb??\xe8q\xdd\xc3\xed~\xe6Q\n\x8f\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4^t\xed\x80\xe9eW\x88\xe1\xbb&\x97R1\x96g\xfeuNZ\x89\x03\t'\xf7L\x9d\xe0\x00\x00\u07d4^w.'\xf2\x88\x00\xc5\r\u0697;\xb3>\x10v.n\xea \x89a\t=|,m8\x00\x00\u07d4^{\x8cT\xdcW\xb0@ bq\x9d\xee~\xf5\xe3~\xa3]b\x89\x9b\xf9\x81\x0f\xd0\\\x84\x00\x00\u07d4^\u007fp7\x87uX\x9f\xc6j\x81\xd3\xf6S\xe9T\xf5U`\ub243\xf2\x89\x18\x1d\x84\xc8\x00\x00\xe0\x94^\x80n\x84W0\xf8\a>l\xc9\x01\x8e\xe9\x0f\\\x05\xf9\t\xa3\x8a\x02\x01\xe9m\xac\u03af \x00\x00\u07d4^\x8eM\xf1\x8c\xf0\xafw\tx\xa8\u07cd\xac\x90\x93\x15\x10\xa6y\x89lk\x93[\x8b\xbd@\x00\x00\u07d4^\x90\xc8Xw\x19\x87V\xb06l\x0e\x17\xb2\x8eR\xb4FPZ\x89\x14JJ\x18\xef\xebh\x00\x00\u07d4^\x95\xfe_\xfc\xf9\x98\xf9\xf9\xac\x0e\x9a\x81\u06b8>\xadw\x00=\x89\x1dB\xc2\r2y\u007f\x00\x00\u07d4^\xad)\x03z\x12\x89dx\xb1)j\xb7\x14\xe9\u02d5B\x8c\x81\x89\x03\xe0C\a-@n\x00\x00\u07d4^\xb3q\xc4\a@lB{;}\xe2q\xad<\x1e\x04&\x95y\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4^\u037a\xea\xb9\x10o\xfe]{Q\x96\x96`\x9a\x05\xba\ub16d\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4^\xd0\xd63\x85Y\xefD\xdcza\xed\xeb\x89?\xa5\xd8?\xa1\xb5\x89\v\xed\x1d\x02c\xd9\xf0\x00\x00\xe0\x94^\u04fb\xc0R@\xe0\u04d9\xebm\xdf\xe6\x0fb\xdeM\x95\t\xaf\x8a)\x14\xc0$u\xf9\xd6\xd3\x00\x00\u0594^\xd3\xf1\xeb\xe2\xaegV\xb5\xd8\xdc\x19\xca\xd0,A\x9a\xa5w\x8b\x80\u07d4^\xd5a\x15\xbde\x05\xa8\x82s\xdf\\V\x83\x94p\xd2J-\xb7\x89\x03\x8ee\x91\xeeVf\x80\x00\xe0\x94^\xf8\xc9a\x86\xb3y\x84\xcb\xfe\x04\u0158@n;\n\xc3\x17\x1f\x8a\x01\xfd\x934\x94\xaa_\xe0\x00\x00\u07d4^\xfb\xdf\xe58\x99\x99c<&`Z[\xfc,\x1b\xb5\x95\x93\x93\x89\x03\xc0W\xc9\\\xd9\b\x00\x00\xe0\x94_\x13\x15F1Fm\xcb\x13S\u0210\x93*|\x97\xe0\x87\x8e\x90\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4_\x16z\xa2B\xbcL\x18\x9a\xde\xcb:\u0127\xc4R\xcf\x19/\u03c9lkLM\xa6\u077e\x00\x00\xe0\x94_\x1c\x8a\x04\xc9\rs[\x8a\x15)\t\xae\xaeco\xb0\xce\x16e\x8a\x01{x'a\x8cZ7\x00\x00\u07d4_#\xba\x1f7\xa9lE\xbcI\x02YS\x8aT\u008b\xa3\xb0\u0549A\rXj \xa4\xc0\x00\x00\u07d4_&\xcf4Y\x9b\xc3n\xa6{\x9ez\x9f\x9bC0\xc9\xd5B\xa3\x8965\u026d\xc5\u07a0\x00\x00\u07d4_)\xc9\xdev]\xde%\x85*\xf0}3\xf2\xceF\x8f\xd2\t\x82\x89lk\x93[\x8b\xbd@\x00\x00\u07d4_/\a\xd2\u0597\xe8\xc5g\xfc\xfd\xfe\x02\x0fI\xf3`\xbe!9\x89lk\x93[\x8b\xbd@\x00\x00\u07d4_2\x1b=\xaa\xa2\x96\xca\xdf)C\x9f\x9d\xab\x06*K\xff\xed\u0589\x04p%\x90>\xa7\xae\x00\x00\u07d4_3:;#\x10vZ\r\x182\xb9\xbeL\n\x03pL\x1c\t\x8965\u026d\xc5\u07a0\x00\x00\u07d4_4K\x01\xc7\x19\x1a2\xd0v*\xc1\x88\xf0\xec-\xd4`\x91\x1d\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94_6>\n\xb7G\xe0-\x1b;f\xab\xb6\x9e\xa5<{\xafR:\x8a\x02w\x01s8\xa3\n\xe0\x00\x00\u07d4_7[\x86`\f@\u0328\xb2gkz\x1a\x1d\x16D\xc5\xf5,\x89\x04F\x18\xd7Lb?\x00\x00\u07d4_>\x1eg9\xb0\xc6\"\x00\xe0\n\x006\x91\xd9\xef\xb28\u061f\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4_H?\xfb\x8fh\n\xed\xf2\xa3\x8fx3\xaf\xdc\xdeY\xb6\x1eK\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94_J\xceL\x1c\xc13\x91\xe0\x1f\x00\xb1\x98\xe1\xf2\v_\x91\xcb\xf5\x8a\x01\x0f\x0f\xa8\xb9\u04c1\x1a\x00\x00\xe0\x94_R\x12\x82\xe9\xb2x\u070c\x03Lr\xafS\xee)\xe5D=x\x8a\x01as-/\x8f:\xe0\x00\x00\u07d4_h\xa2L~\xb4\x11vgs{39?\xb3\xc2\x14\x8aS\xb6\x89\x02\xce\u0791\x8dE<\x00\x00\u07d4_p\x8e\xaf9\xd8#\x94lQ\xb3\xa3\u9df3\xc0\x03\xe2cA\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4_t.H~:\xb8\x1a\xf2\xf9J\xfd\xbe\x1b\x9b\x8f\\\u0301\xbc\x89u\xc4E\xd4\x11c\xe6\x00\x00\u07d4_t\xed\x0e$\xff\x80\u0672\u0124K\xaa\x99uB\x8c\u05b95\x89\xa1\x8b\xce\xc3H\x88\x10\x00\x00\u07d4_v\xf0\xa3\x06&\x9cx0k=e\r\xc3\xe9\xc3p\x84\xdba\x89\x82\x1a\xb0\xd4AI\x80\x00\x00\u07d4_w\xa1\a\xab\x12&\xb3\xf9_\x10\ue0ee\xfcl]\xff>\u0709\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4_{;\xba\xc1m\xab\x83\x1aJ\x0f\xc5;\fT\x9d\xc3l1\u0289i*\xe8\x89p\x81\xd0\x00\x00\xe0\x94_\x93\xff\x83't\xdbQ\x14\xc5[\xb4\xbfD\xcc\U000f53d0?\x8a(\xa9\xc9\x1a&4X)\x00\x00\u07d4_\x96\x16\xc4{Jg\xf4\x06\xb9Z\x14\xfeo\xc2h9o\x17!\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4_\x98\x109\xfc\xf5\x02%\xe2\xad\xf7bu!\x12\xd1\xcc&\xb6\xe3\x89\x1b\x1aAj!S\xa5\x00\x00\u07d4_\x99\u070eI\xe6\x1dW\xda\xef`j\xcd\xd9\x1bMp\a2j\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4_\xa6\x1f\x15-\xe6\x125\x16\xc7Q$)y(_yj\u01d1\x89\v\x0f\x11\x97)c\xb0\x00\x00\u07d4_\xa7\xbf\xe0C\x88a'\xd4\x01\x1d\x83V\xa4~\x94yc\xac\xa8\x89b\xa9\x92\xe5:\n\xf0\x00\x00\xe0\x94_\xa8\xa5Nh\x17lO\xe2\xc0\x1c\xf6q\xc5\x15\xbf\xbd\xd5(\xa8\x8aE\xe1U\xfa\x01\x10\xfa@\x00\x00\u07d4_\xad\x96\x0fk,\x84V\x9c\x9fMG\xbf\x19\x85\xfc\xb2\xc6]\xa6\x8965f3\xeb\xd8\xea\x00\x00\u07d4_\xc6\xc1\x14&\xb4\xa1\xea\xe7\xe5\x1d\xd5\x12\xad\x10\x90\xc6\xf1\xa8[\x89\x93\xfe\\W\xd7\x10h\x00\x00\u07d4_\u0344Th\x96\xdd\b\x1d\xb1\xa3 \xbdM\x8c\x1d\xd1R\x8cL\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4_\u0368G\xaa\xf8\xd7\xfa\x8b\xca\b\x02\x9c\xa2\x84\x91f\xaa\x15\xa3\x89!\u02b8\x12Y\xa3\xbf\x00\x00\u07d4_\xd1\xc3\xe3\x17x'l\xb4.\xa7@\xf5\xea\xe9\xc6A\xdb\xc7\x01\x89\n\x84Jt$\xd9\xc8\x00\x00\u07d4_\xd3\xd6w~\xc2b\n\xe8:\x05R\x8e\xd4%\a-<\xa8\xfd\x89lk\x93[\x8b\xbd@\x00\x00\u07d4_\xd9s\xaf6j\xa5\x15|Te\x9b\u03f2|\xbf\xa5\xac\x15\u0589\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4_\xe7w\x03\x80\x8f\x82>l9\x93R\x10\x8b\xdb,R|\xb8|\x89j@v\xcfy\x95\xa0\x00\x00\xe0\x94_\xecI\xc6e\xe6N\xe8\x9d\xd4A\xeet\x05n\x1f\x01\xe9(p\x8a\x01V\x9b\x9es4t\xc0\x00\x00\u07d4_\xf3&\xcd`\xfd\x13k$^)\xe9\bzj\u04e6R\u007f\r\x89e\xea=\xb7UF`\x00\x00\u07d4_\xf9=\xe6\xee\x05L\xadE\x9b-^\xb0\xf6\x87\x03\x89\xdf\xcbt\x89\v\xed\x1d\x02c\xd9\xf0\x00\x00\u07d4`\x06\xe3m\x92\x9b\xf4]\x8f\x16#\x1b\x12j\x01\x1a\xe2\x83\xd9%\x89\t\x8a}\x9b\x83\x14\xc0\x00\x00\u07d4`!\xe8Z\x88\x14\xfc\xe1\xe8*A\xab\xd1\u04f2\xda\xd2\xfa\xef\xe0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4`8t\n\xe2\x8df\xba\x93\xb0\xbe\bH+2\x05\xa0\xf7\xa0{\x89\x11!a\x85\u009fp\x00\x00\u07d4`?/\xabz\xfbn\x01{\x94v`i\xa4\xb4;8\x96I#\x89Y\xd2\xdb$\x14\u0699\x00\x00\u07d4`B'm\xf2\x98?\xe2\xbcGY\xdc\x19C\xe1\x8f\xdb\xc3Ow\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4`B\xc6D\xba\xe2\xb9o%\xf9M1\xf6x\xc9\r\xc9f\x90\u06c9lk\x93[\x8b\xbd@\x00\x00\u07d4`L\xdf\x18b\x8d\xbf\xa82\x91\x94\xd4x\xddR\x01\xee\xccK\xe7\x89\x01?0j$\t\xfc\x00\x00\u07d4`N\x94w\xeb\xf4r|t[\u02bb\xed\xcbl\xcf)\x99@\"\x8966\x9e\xd7t}&\x00\x00\u07d4`gm\x1f\xa2\x1f\xca\x05\"\x97\xe2K\xf9c\x89\u0171*p\u05c9\r\x17|Zzh\xd6\x00\x00\u07d4`gn\x92\u044b\x00\x05\t\xc6\x1d\xe5@\xe6\xc5\u0776v\xd5\t\x89A\rXj \xa4\xc0\x00\x00\u07d4`o\x17q!\xf7\x85\\!\xa5\x06#0\xc8v\"d\xa9{1\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4`\x86B6\x93\r\x04\xd8@+]\xcb\xeb\x80\u007f<\xafa\x1e\xa2\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4`\xabq\xcd&\xeamnY\xa7\xa0\xf6'\xee\a\x9c\x88^\xbb\xf6\x89\x01s\x17\x90SM\xf2\x00\x00\u07d4`\xaf\x0e\xe1\x18D<\x9b7\xd2\xfe\xadw\xf5\xe5!\u07be\x15s\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4`\xb3X\xcb=\xbe\xfa7\xf4}\xf2\xd76X@\u068e;\u024c\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4`\xb8\u05b7;ySO\xb0\x8b\xb8\xcb\xce\xfa\xc7\xf3\x93\xc5{\xfe\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4`\xbeo\x95?*M%\xb6%o\xfd$#\xac\x148%.N\x89\b!\xab\rD\x14\x98\x00\x00\u0794`\xc3qO\xdd\xdbcFY\u48b1\xeaB\xc4r\x8c\u01f8\xba\x88\xb9\x8b\xc8)\xa6\xf9\x00\x00\u07d4`\xcc=D^\xbd\xf7j}z\xe5q\u0197\x1d\xffh\u0305\x85\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94`\xd5fq@\xd1&\x14\xb2\x1c\x8e^\x8a3\b.2\xdf\xcf#\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4`\xde\"\xa1Pt2\xa4{\x01\xcch\xc5*\v\xf8\xa2\xe0\u0418\x89\x01\t\x10\xd4\xcd\xc9\xf6\x00\x00\u07d4`\xe0\xbd\u0422Y\xbb\x9c\xb0\x9d?7\xe5\u034b\x9d\xac\uafca\x89JD\x91\xbdm\xcd(\x00\x00\u07d4`\xe3\xccC\xbc\xdb\x02j\xadu\x9cpf\xf5U\xbb\xf2\xacf\xf5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4a\x04+\x80\xfd`\x95\u0478{\xe2\xf0\x0f\x10\x9f\xab\xaf\xd1W\xa6\x89\x05k\xc7^-c\x10\x00\x00\u07d4a\a\xd7\x1d\xd6\xd0\xee\xfb\x11\xd4\xc9\x16@L\xb9\x8cu>\x11}\x89lk\x93[\x8b\xbd@\x00\x00\u07d4a\x0f\xd6\xeeN\xeb\xab\x10\xa8\xc5]\vK\xd2\xe7\xd6\xef\x81qV\x89\x01\x15\x95a\x06]]\x00\x00\u07d4a\x14\xb0\xea\xe5Wi\x03\xf8\v\xfb\x98\x84-$\xed\x92#\u007f\x1e\x89\x05k\xc7^-c\x10\x00\x00\u07d4a!\xaf9\x8a[-\xa6\x9fe\xc68\x1a\xec\x88\u039c\xc6D\x1f\x89\"\xb1\xc8\xc1\"z\x00\x00\x00\u07d4a&g\xf1r\x13[\x95\v,\xd1\xde\x10\xaf\xde\xcehW\xb8s\x8965\u026d\xc5\u07a0\x00\x00\u07d4a,\xed\x8d\xc0\u071e\x89\x9e\xe4oyb33\x15\xf3\xf5^D\x89\x12^5\xf9\xcd=\x9b\x00\x00\u07d4a4\xd9B\xf07\xf2\xcc=BJ#\f`=g\xab\xd3\xed\xf7\x89lk\x93[\x8b\xbd@\x00\x00\u07d4a:\xc5;\xe5e\xd4e6\xb8 q[\x9b\x8d:\xe6\x8aK\x95\x89\xcb\xd4{n\xaa\x8c\xc0\x00\x00\u07d4a?\xabD\xb1k\xbeUMD\xaf\xd1x\xab\x1d\x02\xf3z\ua949lk\x93[\x8b\xbd@\x00\x00\u07d4aN\x8b\xef=\xd2\u015bY\xa4\x14Vt@\x10\x185\x18\x84\xea\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4aQ\x84d\xfd\u0637<\x1b\xb6\xacm\xb6\x00eI8\xdb\xf1z\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4aT}7nSi\xbc\xf9x\xfc\x16,1\xc9\b\"3\xb8%\xd0%\xbe?{\x10V\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4a\x91\xdd\u0276J\x8e\b\x90\xb427\t\u05e0|H\xb9*d\x89*\x03I\x19\u07ff\xbc\x00\x00\u07d4a\x96\xc3\xd3\xc0\x90\x8d%Cf\xb7\xbc\xa5WE\"-\x9dM\xb1\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4a\x9f\x17\x14E\xd4+\x02\xe2\xe0p\x04\xad\x8a\xfeiO\xa5=j\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4a\xad\xf5\x92\x9a^)\x81hN\xa2C\xba\xa0\x1f}\x1f^\x14\x8a\x89\x05\xfa\xbfl\x98O#\x00\x00\u07d4a\xb1\xb8\xc0\x12\xcdLx\xf6\x98\xe4p\xf9\x02V\xe6\xa3\x0fH\u0749\n\u05ce\xbcZ\xc6 \x00\x00\u07d4a\xb3\xdf.\x9e\x9f\xd9h\x13\x1f\x1e\x88\xf0\xa0\xeb[\xd7eFM\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4a\xb9\x02\u0166s\x88X&\x82\r\x1f\xe1EI\xe4\x86_\xbd\u0089\x12$\xef\xed*\u1440\x00\u07d4a\xb9\x05\xdef?\xc1s\x86R;:(\xe2\xf7\xd07\xa6U\u0349\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4a\xba\x87\xc7~\x9bYm\xe7\xba\x0e2o\xdd\xfe\xec!c\xeff\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4a\xbf\x84\u056b\x02oX\xc8s\xf8o\xf0\xdf\u0282\xb5W3\xae\x89lk\x93[\x8b\xbd@\x00\x00\u07d4a\xc4\xee|\x86LMk^7\xea\x131\xc2\x03s\x9e\x82k/\x89\x01\xa15;8*\x91\x80\x00\u07d4a\xc80\xf1eG\x18\xf0u\u032b\xa3\x16\xfa\xac\xb8[}\x12\v\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4a\xc8\xf1\xfaC\xbf\x84i\x99\xec\xf4{+2M\xfbkc\xfe:\x89+^:\xf1k\x18\x80\x00\x00\u07d4a\xc9\xdc\u8c98\x1c\xb4\x0e\x98\xb0@+\xc3\xeb(4\x8f\x03\xac\x89\n\xac\xac\u0679\xe2+\x00\x00\u07d4a\u03a7\x1f\xa4d\xd6*\a\x06?\x92\v\f\xc9\x17S\x973\u0609Z\x87\xe7\xd7\xf5\xf6X\x00\x00\u07d4a\xd1\x01\xa03\xee\x0e.\xbb1\x00\xed\xe7f\xdf\x1a\xd0$IT\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4a\xedU\x96\u0197 \u007f=U\xb2\xa5\x1a\xa7\xd5\x0f\a\xfa\t\xe8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4a\xff\x8eg\xb3M\x9e\xe6\xf7\x8e\xb3o\xfe\xa1\xb9\xf7\xc1W\x87\xaf\x89X\xe7\x92n\xe8X\xa0\x00\x00\u07d4b\x05\xc2\xd5dtp\x84\x8a8@\xf3\x88~\x9b\x01]4u\\\x89a\x94\x04\x9f0\xf7 \x00\x00\u07d4b(\xad\xe9^\x8b\xb1}\x1a\xe2;\xfb\x05\x18AMI~\x0e\xb8\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94b)\xdc\xc2\x03\xb1\xed\xcc\xfd\xf0n\x87\x91\fE*\x1fMzr\x8a\x06\xe1\xd4\x1a\x8f\x9e\xc3P\x00\x00\u0794b+\xe4\xb4T\x95\xfc\xd91C\xef\xc4\x12\u0599\xd6\xcd\xc2=\u0148\xf0\x15\xf2W6B\x00\x00\u07d4b3\x1d\xf2\xa3\xcb\xee5 \xe9\x11\u07a9\xf7>\x90_\x89%\x05\x89lk\x93[\x8b\xbd@\x00\x00\u07d4bVD\xc9Z\x87>\xf8\xc0l\u06de\x9fm\x8dv\x80\x04=b\x89a\x94\x04\x9f0\xf7 \x00\x00\u07d4be\xb2\xe7s\x0f6\xb7v\xb5-\f\x9d\x02\xad\xa5]\x8e<\xb6\x8965\u026d\xc5\u07a0\x00\x00\u07d4bh\n\x15\xf8\u0338\xbd\xc0/s`\xc2Z\xd8\u03f5{\x8c\u034965\u026d\xc5\u07a0\x00\x00\u07d4b\x94\xea\xe6\xe4 \xa3\xd5`\n9\xc4\x14\x1f\x83\x8f\xf8\xe7\xccH\x89\xa00\xdc\xeb\xbd/L\x00\x00\u07d4b\x97\x1b\xf2cL\xee\v\xe3\u0249\x0fQ\xa5`\x99\u06f9Q\x9b\x89#\x8f\xd4,\\\xf0@\x00\x00\u07d4b\x9b\xe7\xab\x12jS\x98\xed\xd6\u069f\x18D~x\u0192\xa4\xfd\x89lk\x93[\x8b\xbd@\x00\x00\u07d4b\xb4\xa9\"nah\a\x1el\xbea\x11\xfe\xf0\xbcc\x8a\x03\xba\x19\x10\xbf4\x1b\x00\x00\x00\xe0\x94c\n\x91:\x901\xc9I*\xbdLA\u06f1PT\xcf\xecD\x16\x8a\x014X\xdbg\xaf5\xe0\x00\x00\xe0\x94c\fRs\x12mQ|\xe6q\x01\x81\x1c\xab\x16\xb8SL\xf9\xa8\x8a\x01\xfe\xcc\xc6%s\xbb\u04c0\x00\u07d4c\x100\xa5\xb2{\a(\x8aEio\x18\x9e\x11\x14\xf1*\x81\xc0\x89\x1b\x1azB\v\xa0\r\x00\x00\u07d4c\x10\xb0 \xfd\x98\x04IW\x99P\x92\t\x0f\x17\xf0NR\xcd\xfd\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4c+\x91I\xd7\x01x\xa7364'^\x82\u0555?'\x96{\x89%\xf2s\x93=\xb5p\x00\x00\u07d4c,\xec\xb1\f\xfc\xf3\x8e\u0246\xb4;\x87p\xad\xec\xe9 \x02!\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4c1\x02\x8c\xbbZ!H[\xc5\x1bVQB\x99;\xdb%\x82\xa9\x89\x1c\xfd\xd7F\x82\x16\xe8\x00\x00\u07d4c3O\xcf\x17E\x84\x0eK\tJ;\xb4\v\xb7o\x96\x04\xc0L\x89\u05e5\xd7\x03\xa7\x17\xe8\x00\x00\u07d4c4\nWqk\xfac\xebl\xd13r\x12\x02W[\xf7\x96\xf0\x89\va\xe0\xa2\f\x12q\x80\x00\u07d4cN\xfc$7\x11\a\xb4\xcb\xf0?y\xa9=\xfd\x93\xe41\xd5\xfd\x89B5\x82\xe0\x8e\xdc\\\x80\x00\xe0\x94c\\\x00\xfd\xf05\xbc\xa1_\xa3a\r\xf38N\x0f\xb7\x90h\xb1\x8a\x01\xe7\xe4\x17\x1b\xf4\u04e0\x00\x00\u07d4ca.xb\xc2{X|\xfbm\xaf\x99\x12\xcb\x05\x1f\x03\n\x9f\x89\x02[\x19\u053f\xe8\xed\x00\x00\u07d4cfgU\xbdA\xb5\x98i\x97x<\x13\x040\b$+<\xb5\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4c{\xe7\x1b:\xa8\x15\xffE=VB\xf70tE\vd\xc8*\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94c}g\xd8\u007fXo\nZG\x9e \xee\x13\xea1\n\x10\xb6G\x8a\n:Y&\xaf\xa1\xe70\x00\x00\u07d4c\u007fXi\xd6\xe4i_\x0e\xb9\xe2s\x11\u0107\x8a\xff33\x80\x89j\xc0Nh\xaa\xec\x86\x00\x00\u07d4c\x97|\xad}\r\xcd\xc5+\x9a\xc9\xf2\xff\xa16\xe8d(\x82\xb8\x89\x04\x10\u0546\xa2\nL\x00\x00\u07d4c\xa6\x1d\xc3\n\x8e;0\xa7c\xc4!<\x80\x1c\xbf\x98s\x81x\x8965\u026d\xc5\u07a0\x00\x00\u07d4c\xacT\\\x99\x12C\xfa\x18\xae\xc4\x1dOoY\x8eUP\x15\u0709 \x86\xac5\x10R`\x00\x00\u07d4c\xb9uMu\xd1-8@9\xeci\x06<\v\xe2\x10\xd5\xe0\u3252\v\x86\f\xc8\xec\xfd\x80\x00\u07d4c\xbbfO\x91\x17\x03v(YM\xa7\xe3\xc5\b\x9f\xd6\x18\xb5\xb5\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4c\u00a3\xd25\xe5\xee\xab\xd0\u0526\xaf\u06c9\xd9F'9d\x95\x89CN\xf0[\x9d\x84\x82\x00\x00\u07d4c\xc8\xdf\xde\v\x8e\x01\xda\xdc.t\x8c\x82L\xc06\x9d\U00010cc9\xd2U\xd1\x12\xe1\x03\xa0\x00\x00\u07d4c\xd5Z\u065b\x917\xfd\x1b \xcc+O\x03\xd4,\xba\xdd\xf34\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4c\xd8\x00H\x87u\x96\xe0\u0084\x89\xe6P\xcdJ\xc1\x80\tjI\x89\x0f-\xc7\xd4\u007f\x15`\x00\x00\xe0\x94c\xe4\x14`>\x80\xd4\xe5\xa0\xf5\xc1\x87t FB%\x82\b\xe4\x8a\x01\x0f\f\xf0d\xddY \x00\x00\xe0\x94c\xe8\x8e.S\x9f\xfbE\x03\x86\xb4\xe4g\x89\xb2#\xf5GlE\x8a\x01U\x17\nw\x8e%\xd0\x00\x00\u07d4c\xef/\xbc=\xaf^\xda\xf4\xa2\x95b\x9c\xcf1\xbc\xdf@8\xe5\x89O%\x91\xf8\x96\xa6P\x00\x00\u07d4c\xf0\xe5\xa7R\xf7\x9fg\x12N\xedc:\xd3\xfd'\x05\xa3\x97\u0509\u0556{\xe4\xfc?\x10\x00\x00\xe0\x94c\xf5\xb5=y\xbf.A\x14\x89Re0\"8E\xfa\xc6\xf6\x01\x8a\x06ZM\xa2]0\x16\xc0\x00\x00\u07d4c\xfc\x93\x00\x13\x05\xad\xfb\u0278])\xd9)\x1a\x05\xf8\xf1A\v\x8965\u026d\xc5\u07a0\x00\x00\u0794c\xfek\xccK\x8a\x98P\xab\xbeu\x8070\xc92%\x1f\x14[\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4d\x03\xd0bT\x96\x90\xc8\xe8\xb6>\xaeA\xd6\xc1\tGn%\x88\x89lk\x93[\x8b\xbd@\x00\x00\u07d4d\x04+\xa6\x8b\x12\xd4\xc1Qe\x1c\xa2\x81;sR\xbdV\xf0\x8e\x89 \x86\xac5\x10R`\x00\x00\u0794d\x05\xdd\x13\xe9:\xbc\xff7~p\x0e<\x1a\x00\x86\xec\xa2})\x88\xfc\x93c\x92\x80\x1c\x00\x00\xe0\x94d\n\xbam\xe9\x84\xd9E\x177x\x03p^\xae\xa7\t_J\x11\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4d\v\xf8t\x15\xe0\xcf@s\x01\xe5Y\x9ah6m\xa0\x9b\xba\u0209\x1a\xbc\x9fA`\x98\x15\x80\x00\u07d4d \xf8\xbc\xc8\x16JaR\xa9\x9dk\x99i0\x05\xcc\xf7\xe0S\x8965f3\xeb\xd8\xea\x00\x00\u07d4d$\x1axD)\x0e\n\xb8U\xf1\u052au\xb5SE\x03\"$\x89V\xbcu\xe2\xd61\x00\x00\x00\u07d4d&J\xed\xd5-\xca\xe9\x18\xa0\x12\xfb\xcd\f\x03\x0e\xe6\xf7\x18!\x8965\u026d\xc5\u07a0\x00\x00\u07d4d7\x0e\x87 &E\x12Z5\xb2\a\xaf\x121\xfb`r\xf9\xa7\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4d=\x9a\xee\u0531\x80\x94~\u04b9 |\xceL=\xdcU\xe1\xf7\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4dC\xb8\xaec\x9d\xe9\x1c\xf7\xf0p\xa5G\x03\xb7\x18NH'l\\\x00w\xefK4\x89\x11X\xe4`\x91=\x00\x00\x00\xe0\x94d\xe2\xde! \v\x18\x99\u00e0\xc0e;P@\x13m\r\xc8B\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4d\xec\x8a[t?4y\xe7\a\xda\xe9\xee \u076aO@\xf1\u0649\n\u05ce\xbcZ\xc6 \x00\x00\u07d4e\x03\x86\v\x19\x10\b\xc1U\x83\xbf\u0201X\t\x93\x01v((\x8965\u026d\xc5\u07a0\x00\x00\u07d4e\x051\x911\x9e\x06z%\xe66\x1dG\xf3\u007fc\x18\xf84\x19\x89\x15[\xd90\u007f\x9f\xe8\x00\x00\u07d4e\t;#\x9b\xbf\xba#\xc7w\\\xa7\xdaZ\x86H\xa9\xf5L\xf7\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4e\t\xee\xb14~\x84/\xfbA>7\x15^,\xbcs\x82s\xfd\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94e\vBUU\xe4\xe4\xc5\x17\x18\x14h6\xa2\xc1\xeew\xa5\xb4!\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4e\f\xf6}\xb0`\xcc\xe1uh\xd5\xf2\xa4#h|Idv\t\x89\x05k\xc7^-c\x10\x00\x00\u07d4e\x10\xdfB\xa5\x99\xbc\xb0\xa5\x19\u0329a\xb4\x88u\x9aogw\x89lk\x93[\x8b\xbd@\x00\x00\u07d4e6u\xb8B\xd7\u0634a\xf7\"\xb4\x11|\xb8\x1d\xac\x8ec\x9d\x89\x01\xae6\x1f\xc1E\x1c\x00\x00\u07d4eK~\x80\x87\x99\xa8=r\x87\xc6w\x06\xf2\xab\xf4\x9aId\x04\x89j\xcb=\xf2~\x1f\x88\x00\x00\xe0\x94eORHG\xb3\xa6\xac\xc0\xd3\xd5\xf1\xf3b\xb6\x03\xed\xf6_\x96\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4eY4\u068etN\xaa=\xe3M\xbb\xc0\x89LN\xda\va\xf2\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4e]\\\xd7H\x96)\xe2ANIb?\xabb\xa1~M6\x11\x89\x05\fL\xb2\xa1\f`\x00\x00\u07d4e\xaf\x8d\x8b[\x1d\x1e\xed\xfaw\xbc\xbc\x96\xc1\xb13\xf83\x06\u07c9\x05P\x05\xf0\xc6\x14H\x00\x00\u07d4e\xaf\x90\x87\xe0QgqT\x97\u0265\xa7I\x18\x94\x89\x00M\xef\x89-C\xf3\xeb\xfa\xfb,\x00\x00\u0794e\xb4/\xae\xcc\x1e\u07f1B\x83\u0297\x9a\xf5E\xf6;0\xe6\f\x88\xfc\x93c\x92\x80\x1c\x00\x00\u0794e\xd3>\xb3\x9c\xdadS\xb1\x9ea\xc1\xfeM\xb91p\xef\x9d4\x88\xb9\x8b\xc8)\xa6\xf9\x00\x00\u07d4e\xd8\xddN%\x1c\xbc\x02\x1f\x05\xb0\x10\xf2\xd5\xdcR\f8r\xe0\x89-CW\x9a6\xa9\x0e\x00\x00\u07d4e\xea&\xea\xbb\xe2\xf6L\xcc\xcf\xe0h)\xc2]F7R\x02%\x89%\xf2s\x93=\xb5p\x00\x00\u07d4e\xeag\xad?\xb5j\xd5\xfb\x948}\u04ce\xb3\x83\x00\x1d|h\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94e\xeb\xae\xd2~\u06dd\xcc\x19W\xae\xe5\xf4R\xac!\x05\xa6\\\x0e\x8a\t7\u07ed\xae%\u26c0\x00\u07d4e\xee \xb0m\x9a\u0549\xa7\xe7\xce\x04\xb9\xf5\xf7\x95\xf4\x02\xae\u0389lk\x93[\x8b\xbd@\x00\x00\u07d4e\xf544m/\xfbx\u007f\xa9\xcf\x18]t[\xa4)\x86\xbdn\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94e\xf5\x87\x0f&\xbc\xe0\x89g}\xfc#\xb5\x00\x1e\xe4\x92H4(\x8a\x01\x12\xb1\xf1U\xaa2\xa3\x00\x00\u07d4e\xfd\x02\xd7\x04\xa1*M\xac\xe9G\x1b\x06E\xf9b\xa8\x96q\u0209\x01\x8d\x1c\xe6\xe4'\u0340\x00\u07d4e\xff\x87O\xaf\xceM\xa3\x18\xd6\xc9=W\xe2\u00ca\rs\xe8 \x8968\x02\x1c\xec\u06b0\x00\x00\xe0\x94f\x05W\xbbC\xf4\xbe:\x1b\x8b\x85\xe7\xdf{<[\xcdT\x80W\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4f\b,u\xa8\xde1\xa59\x13\xbb\xd4M\xe3\xa07O\u007f\xaaA\x89O%\x91\xf8\x96\xa6P\x00\x00\u07d4f\x11\xceY\xa9\x8b\a*\xe9Y\xdcI\xadQ\x1d\xaa\xaa\xa1\x9dk\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4f \x1b\xd2'\xaem\u01bd\xfe\xd5\xfb\u0781\x1f\xec\xfe^\x9d\u0649 >\x9e\x84\x92x\x8c\x00\x00\u07d4f#4\x81G$\x93[y1\xdd\xcaa\x00\xe0\rFw'\u0349\"\x88&\x9d\a\x83\xd4\x00\x00\u07d4f'O\xea\x82\xcd0\xb6\u009b#5\x0eOO=1\nX\x99\x89p7\x05P\xab\x82\x98\x00\x00\u07d4f,\xfa\x03\x8f\xab7\xa0\x17E\xa3d\u1e41'\xc5\x03tm\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4f5\xb4oq\x1d-\xa6\xf0\xe1cp\u034e\xe4>\xfb,-R\x89lk\x93[\x8b\xbd@\x00\x00\u07d4f6\x04\xb0P0F\xe6$\xcd&\xa8\xb6\xfbGB\xdc\xe0*o\x89\x03\x8b\x9by~\xf6\x8c\x00\x00\u07d4f6\u05ecczH\xf6\x1d8\xb1L\xfdHe\xd3m\x14(\x05\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4f@\xcc\xf0SU\\\x13\n\xe2\xb6Vd~\xa6\xe3\x167\xb9\xab\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4fBK\xd8x[\x8c\xb4a\x10*\x90\x02\x83\xc3]\xfa\a\xefj\x89\x02.-\xb2ff\xfc\x80\x00\u07d4fL\xd6}\xcc\u026c\x82(\xb4\\U\u06cdvU\ve\x9c\u0709\x15[\xd90\u007f\x9f\xe8\x00\x00\u07d4fNC\x11\x98p\xaf\x10zD\x8d\xb1'\x8b\x04H8\xff\u036f\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4fQso\xb5\x9b\x91\xfe\xe9\xc9:\xa0\xbdn\xa2\xf7\xb2Pa\x80\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4f[\x00\x0f\vw'P\xcc\x89k\x91\x8a\xacIK\x16\x80\x00\xe0\x94g]\\\xaa`\x9b\xf7\n\x18\xac\xa5\x80F]\x8f\xb71\r\x1b\xbb\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4gc F\u0732ZT\x93i(\xa9oB?3 \xcb\ud489lk\x93[\x8b\xbd@\x00\x00\u07d4ge\xdf%(\x0e\x8eO8\u0531\xcfDo\xc5\xd7\xebe\x9e4\x89\x05k\xc7^-c\x10\x00\x00\u07d4gv\xe13\xd9\xdc5L\x12\xa9Q\b{c\x96P\xf59\xa43\x89\x06\x81U\xa46v\xe0\x00\x00\u07d4g\x85Q<\xf72\xe4~\x87g\ap\xb5A\x9b\xe1\f\xd1\xfct\x89lk\x93[\x8b\xbd@\x00\x00\u07d4g\x947\xea\xcfCxx\xdc)=H\xa3\x9c\x87\xb7B\x1a!l\x89\x03\u007f\x81\x82\x1d\xb2h\x00\x00\u07d4g\x9b\x9a\x10\x990Q~\x89\x99\t\x9c\xcf*\x91LL\x8d\xd94\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4g\xa8\x0e\x01\x90r\x1f\x949\rh\x02r\x9d\xd1,1\xa8\x95\xad\x89lk\x13u\xbc\x91V\x00\x00\u07d4g\xb8\xa6\xe9\x0f\xdf\n\x1c\xacD\x17\x930\x1e\x87P\xa9\xfayW\x890\x84\x9e\xbe\x166\x9c\x00\x00\u07d4g\xbc\x85\xe8}\xc3LN\x80\xaa\xfa\x06k\xa8\u049d\xbb\x8eC\x8e\x89\x15\xd1\xcfAv\xae\xba\x00\x00\u07d4g\xc9&\t>\x9b\x89'\x938\x10\u0642\"\xd6.+\x82\x06\xbb\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4g\xcf\xdanp\xbfvW\u04d0Y\xb5\x97\x90\xe5\x14Z\xfd\xbea\x89#\x05\r\tXfX\x00\x00\u07d4g\u0582\xa2\x82\xefs\xfb\x8dn\x90q\xe2aOG\xab\x1d\x0f^\x8965\u026d\xc5\u07a0\x00\x00\u07d4g\u05a8\xaa\x1b\xf8\xd6\xea\xf78N\x99=\xfd\xf1\x0f\n\xf6\x8aa\x89\n\xbc\xbbW\x18\x97K\x80\x00\u07d4g\u0692.\xff\xa4r\xa6\xb1$\xe8N\xa8\xf8k$\xe0\xf5\x15\xaa\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4g\xdf$-$\r\u0538\a\x1dr\xf8\xfc\xf3[\xb3\x80\x9dq\xe8\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4g\xee@n\xa4\xa7\xaej:8\x1e\xb4\xed\xd2\xf0\x9f\x17KI(\x898)c_\th\xb0\x00\x00\u07d4g\xf2\xbbx\xb8\xd3\xe1\x1f|E\x8a\x10\xb5\xc8\xe0\xa1\xd3tF}\x89a\t=|,m8\x00\x00\u07d4g\xfcR}\xce\x17\x85\xf0\xfb\x8b\xc7\xe5\x18\xb1\xc6i\xf7\xec\u07f5\x89\r\x02\xabHl\xed\xc0\x00\x00\u07d4h\x02}\x19U\x8e\xd73\x9a\b\xae\xe8\xde5Y\xbe\x06>\xc2\xea\x89lk\x93[\x8b\xbd@\x00\x00\u07d4h\x06@\x83\x8b\xd0zD{\x16\x8dm\x92;\x90\xcflC\xcd\u0289]\u0212\xaa\x111\xc8\x00\x00\u07d4h\a\xdd\u020d\xb4\x89\xb03\xe6\xb2\xf9\xa8\x15SW\x1a\xb3\xc8\x05\x89\x01\x9f\x8euY\x92L\x00\x00\xe0\x94h\rY\x11\xed\x8d\xd9\xee\xc4\\\x06\f\"?\x89\xa7\xf6 \xbb\u054a\x04<3\xc1\x93ud\x80\x00\x00\u07d4h\x11\xb5L\u0456c\xb1\x1b\x94\xda\x1d\xe2D\x82\x85\u035fh\u0649;\xa1\x91\v\xf3A\xb0\x00\x00\u07d4h\x19\f\xa8\x85\xdaB1\x87L\x1c\xfbB\xb1X\n!s\u007f8\x89\xcf\x15&@\xc5\xc80\x00\x00\xe0\x94h(\x97\xbcO\x8e\x89\x02\x91 \xfc\xff\xb7\x87\xc0\x1a\x93\xe6A\x84\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4h)^\x8e\xa5\xaf\xd9\t?\xc0\xa4e\xd1W\x92+]*\xe24\x89\x01\x15NS!}\xdb\x00\x00\u07d4h.\x96'oQ\x8d1\xd7\xe5n0\u07f0\t\xc1!\x82\x01\xbd\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4h5\xc8\xe8\xb7J,\xa2\xae?J\x8d\x0fk\x95J>*\x83\x92\x89\x03B\x9c3]W\xfe\x00\x00\u07d4h63\x01\n\x88hk\xeaZ\x98\xeaS\xe8y\x97\xcb\xf7>i\x89\x05k9Bc\xa4\f\x00\x00\u07d4h=\xba6\xf7\xe9O@\xeaj\xea\ry\xb8\xf5!\xdeU\an\x89\a\x96\xe3\xea?\x8a\xb0\x00\x00\u07d4hA\x9cm\xd2\xd3\xceo\u02f3\xc7>/\xa0y\xf0`Q\xbd\xe6\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4hG;z}\x96Y\x04\xbe\u06e5V\u07fc\x17\x13l\xd5\xd44\x89\x05k\xc7^-c\x10\x00\x00\u07d4hG\x82[\xde\xe8$\x0e(\x04,\x83\xca\xd6B\U000868fd\u0709QP\xae\x84\xa8\xcd\xf0\x00\x00\xe0\x94hJD\xc0i3\x9d\b\xe1\x9auf\x8b\u06e3\x03\xbe\x85S2\x8a\x0e\u04b5%\x84\x1a\xdf\xc0\x00\x00\u07d4hS\x1fM\u0680\x8fS vz\x03\x114(\xca\f\xe2\xf3\x89\x89\x01\r:\xa56\xe2\x94\x00\x00\u07d4hy'\xe3\x04\x8b\xb5\x16*\xe7\xc1\\\xf7k\xd1$\xf9I{\x9e\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94h\x80\x9a\xf5\xd52\xa1\x1c\x1aMn2\xaa\xc7\\LR\xb0\x8e\xad\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4h\x86\xad\xa7\xbb\xb0a{\u0684!\x91\u018c\x92.\xa3\xa8\xac\x82\x89>\xe2;\xde\x0e} \x00\x00\xe0\x94h\x88>\x15.V`\xfe\xe5\x96&\xe7\xe3\xb4\xf0Q\x10\xe6\"/\x8a\v\x94c;\xe9u\xa6*\x00\x00\u07d4h\x8aV\x9e\x96U$\xeb\x1d\n\xc3\xd3s>\xab\x90\x9f\xb3\xd6\x1e\x89G\x8e\xae\x0eW\x1b\xa0\x00\x00\xe0\x94h\x8e\xb3\x85;\xbc\xc5\x0e\xcf\xee\x0f\xa8\u007f\n\xb6\x93\u02bd\xef\x02\x8a\x06\xb1\n\x18@\x06G\xc0\x00\x00\u07d4h\xa7B_\xe0\x9e\xb2\x8c\xf8n\xb1y>A\xb2\x11\xe5{\u058d\x89$=M\x18\"\x9c\xa2\x00\x00\u07d4h\xa8l@#\x88\xfd\xdcY\x02\x8f\xecp!\u933f\x83\x0e\xac\x89\x01\t\x10\xd4\xcd\xc9\xf6\x00\x00\xe0\x94h\xac\u06a9\xfb\x17\xd3\xc3\t\x91\x1aw\xb0_S\x91\xfa\x03N\xe9\x8a\x01\xe5.3l\xde\"\x18\x00\x00\u07d4h\xad\xdf\x01\x9dk\x9c\xabp\xac\xb1?\v1\x17\x99\x9f\x06.\x12\x89\x02\xb5\x12\x12\xe6\xb7\u0200\x00\u07d4h\xb3\x186\xa3\n\x01j\xda\x15{c\x8a\xc1]\xa7?\x18\xcf\u0789\x01h\u048e?\x00(\x00\x00\xe0\x94h\xb6\x85G\x88\xa7\xc6Il\xdb\xf5\xf8K\x9e\xc5\xef9+x\xbb\x8a\x04+\xf0kx\xed;P\x00\x00\u07d4h\xc0\x84\x90\u021b\xf0\u05b6\xf3 \xb1\xac\xa9\\\x83\x12\xc0\x06\b\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4h\xc7\xd1q\x1b\x01\x1a3\xf1o\x1fU\xb5\xc9\x02\xcc\xe9p\xbd\u05c9\b=lz\xabc`\x00\x00\u07d4h\xc8y\x1d\xc3B\xc3sv\x9e\xa6\x1f\xb7\xb5\x10\xf2Q\xd3 \x88\x8965\u026d\xc5\u07a0\x00\x00\u07d4h\u07d4|I[\ubbb8\u8273\xf9S\xd53\x87K\xf1\x06\x89\x1d\x99E\xab+\x03H\x00\x00\u07d4h\xe8\x02'@\xf4\xaf)\xebH\xdb2\xbc\xec\xdd\xfd\x14\x8d=\xe3\x8965\u026d\xc5\u07a0\x00\x00\u07d4h\xecy\u057eqUql@\x94\x1cy\u05cd\x17\u079e\xf8\x03\x89\x1b#8w\xb5 \x8c\x00\x00\u07d4h\xee\xc1\u222c1\xb6\xea\xba~\x1f\xbdO\x04\xadW\x9ak]\x89lk\x93[\x8b\xbd@\x00\x00\u07d4h\xf5%\x92\x1d\xc1\x1c2\x9buO\xbf>R\x9f\xc7#\xc84\u0349WG=\x05\u06ba\xe8\x00\x00\u07d4h\xf7\x19\xae4+\xd7\xfe\xf1\x8a\x05\u02f0/pZ\u04ce\u0572\x898\xeb\xad\\\u0710(\x00\x00\xe0\x94h\xf7W<\xd4W\xe1L\x03\xfe\xa4>0-04|\x10p\\\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4h\xf8\xf4QU\xe9\x8cP)\xa4\xeb\u0175'\xa9.\x9f\xa81 \x89\xf0{D\xb4\a\x93 \x80\x00\u07d4h\xfe\x13W!\x8d\tXI\xcdW\x98B\u012a\x02\xff\x88\x8d\x93\x89lk\x93[\x8b\xbd@\x00\x00\u07d4i\x02(\xe4\xbb\x12\xa8\u0535\u09d7\xb0\xc5\xcf*u\t\x13\x1e\x89e\xea=\xb7UF`\x00\x00\u07d4i\x05\x94\xd3\x06a<\xd3\xe2\xfd$\xbc\xa9\x99J\u064a=s\xf8\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94i\a2ir\x9ed\x14\xb2n\xc8\xdc\x0f\xd95\xc7;W\x9f\x1e\x8a\x06ZM\xa2]0\x16\xc0\x00\x00\xe0\x94i\x19\xdd^]\xfb\x1a\xfa@G\x03\xb9\xfa\xea\x8c\xee5\xd0\rp\x8a\x01@a\xb9\xd7z^\x98\x00\x00\u07d4i4\x92\xa5\xc5\x13\x96\xa4\x82\x88\x16i\xcc\xf6\xd8\xd7y\xf0\tQ\x89\x12\xbfPP:\xe3\x03\x80\x00\u07d4i=\x83\xbe\tE\x9e\xf89\v.0\xd7\xf7\u008d\xe4\xb4(N\x89lk\x93[\x8b\xbd@\x00\x00\u07d4iQp\x83\xe3\x03\xd4\xfb\xb6\xc2\x11E\x14!]i\xbcF\xa2\x99\x89\x05k\xc7^-c\x10\x00\x00\u07d4iUPel\xbf\x90\xb7]\x92\xad\x91\"\xd9\r#\xcah\xcaM\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94iX\xf8;\xb2\xfd\xfb'\xce\x04\t\xcd\x03\xf9\xc5\xed\xbfL\xbe\u074a\x04<3\xc1\x93ud\x80\x00\x00\u0794i[\x0fRBu7\x01\xb2d\xa6pq\xa2\u0708\b6\xb8\u06c8\u3601\x1b\xech\x00\x00\xe0\x94i[L\xce\bXV\xd9\xe1\xf9\xff>y\x94 #5\x9e_\xbc\x8a\x01\x0f\f\xf0d\xddY \x00\x00\xe0\x94if\x06:\xa5\xde\x1d\xb5\xc6q\xf3\xddi\x9dZ\xbe!>\xe9\x02\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4it\u0224\x14\u03ae\xfd<.M\xfd\xbe\xf40V\x8d\x9a\x96\v\x89\x12\x1e\xa6\x8c\x11NQ\x00\x00\xe0\x94iximQP\xa9\xa2cQ?\x8ft\u0196\xf8\xb19|\xab\x8a\x01g\xf4\x82\xd3\u0171\xc0\x00\x00\xe0\x94iy{\xfb\x12\u027e\u0582\xb9\x1f\xbcY5\x91\xd5\xe4\x027(\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94i\u007fUSk\xf8Z\xdaQ\x84\x1f\x02\x87b:\x9f\x0e\u041a\x17\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4i\x82\xfe\x8a\x86~\x93\xebJ\v\xd0QX\x93\x99\xf2\xec\x9aR\x92\x89lk\x93[\x8b\xbd@\x00\x00\u07d4i\x8a\x8ao\x01\xf9\xabh/c|yi\xbe\x88_lS\x02\xbf\x89\x01\r:\xa56\xe2\x94\x00\x00\u07d4i\x8a\xb9\xa2\xf33\x81\xe0|\fGC=\r!\xd6\xf36\xb1'\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4i\x94\xfb21\xd7\xe4\x1dI\x1a\x9dh\xd1\xfaL\xae,\xc1Y`\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4i\x9c\x9e\xe4q\x95Q\x1f5\xf8b\xcaL\"\xfd5\xae\x8f\xfb\xf4\x89\x04V9\x18$O@\x00\x00\u07d4i\x9f\xc6\u058aGuW<\x1d\u036e\xc80\xfe\xfdP9|N\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4i\xaf(\xb0tl\xac\r\xa1p\x84\xb99\x8c^6\xbb:\r\xf2\x896w\x03n\xdf\n\xf6\x00\x00\u07d4i\xb8\x0e\xd9\x0f\x84\x83J\xfa?\xf8.\xb9dp;V\tw\u0589\x01s\x17\x90SM\xf2\x00\x00\xe0\x94i\xb8\x1dY\x81\x14\x1e\u01e7\x14\x10`\xdf\u03cf5\x99\xff\xc6>\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4i\xbc\xfc\x1dC\xb4\xba\x19\xde{'K\xdf\xfb5\x13\x94\x12\xd3\u05c95e\x9e\xf9?\x0f\xc4\x00\x00\u07d4i\xbd%\xad\xe1\xa34lY\xc4\xe90\xdb*\x9dq^\xf0\xa2z\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4i\xc0\x8dtGT\xdep\x9c\xe9n\x15\xae\r\x1d9[:\"c\x8965\u026d\xc5\u07a0\x00\x00\u07d4i\xc2\xd85\xf1>\xe9\x05\x80@\x8ej2\x83\xc8\u0326\xa44\xa2\x89#\x8f\xd4,\\\xf0@\x00\x00\u07d4i\xc9N\a\u0129\xbe3\x84\xd9]\xfa<\xb9)\x00Q\x87;{\x89\x03\xcbq\xf5\x1f\xc5X\x00\x00\u07d4i\xcb>!S\x99\x8d\x86\xe5\xee \xc1\xfc\u0466\xba\uec86?\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4i\u04ddQ\b\x89\xe5R\xa3\x96\x13[\xfc\xdb\x06\xe3~8v3\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94i\u064f8\xa3\xba=\xbc\x01\xfa\\,\x14'\xd8b\x83//p\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94i\xe2\xe2\xe7\x040|\xcc[\\\xa3\xf1d\xfe\xce.\xa7\xb2\xe5\x12\x8a\x01{x\x83\xc0i\x16`\x00\x00\u07d4i\xffB\x90t\u02dblc\xbc\x91B\x84\xbc\xe5\xf0\xc8\xfb\xf7\u0409\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4i\xff\x89\x01\xb5Av?\x81|_)\x98\xf0-\xcf\xc1\xdf)\x97\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4j\x02:\xf5}XM\x84^i\x876\xf10\u06dd\xb4\r\xfa\x9a\x89\x05[ \x1c\x89\x00\x98\x00\x00\u07d4j\x04\xf5\xd5?\xc0\xf5\x15\xbe\x94+\x8f\x12\xa9\xcbz\xb0\xf3\x97x\x89\xa9\xaa\xb3E\x9b\xe1\x94\x00\x00\u07d4j\x05\xb2\x1cO\x17\xf9\xd7?_\xb2\xb0\u02c9\xffSV\xa6\xcc~\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\xe0\x94j\x0f\x05`f\xc2\xd5f(\x85\x02s\xd7\xec\xb7\xf8\xe6\xe9\x12\x9e\x8a\x01\x0f\r)<\u01e5\x88\x00\x00\u07d4j\x13\xd5\xe3,\x1f\xd2m~\x91\xffn\x051`\xa8\x9b,\x8a\xad\x89\x02\xe6/ \xa6\x9b\xe4\x00\x00\u07d4j.\x86F\x9a[\xf3|\xee\x82\xe8\x8bL8c\x89](\xfc\xaf\x89\x1c\"\x92f8[\xbc\x00\x00\u07d4j6\x94BL|\u01b8\xbc\u067c\u02baT\f\xc1\xf5\xdf\x18\u05c9lk\x93[\x8b\xbd@\x00\x00\xe0\x94jB\u0297\x1cex\u056d\xe2\x95\xc3\xe7\xf4\xad3\x1d\xd3BN\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4jD\xaf\x96\xb3\xf02\xaed\x1b\xebg\xf4\xb6\xc83B\xd3|]\x89\x01\x92t\xb2Y\xf6T\x00\x00\u07d4jL\x89\a\xb6\x00$\x80W\xb1\xe4cT\xb1\x9b\u0705\x9c\x99\x1a\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4jQNbB\xf6\xb6\x8c\x13~\x97\xfe\xa1\u73b5U\xa7\xe5\xf7\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4jS\xd4\x1a\xe4\xa7R\xb2\x1a\xbe\xd57FI\x95:Q=\xe5\xe5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4jaY\aJ\xb5s\xe0\xeeX\x1f\x0f=\xf2\u05a5\x94b\x9bt\x89\x10\xce\x1d=\x8c\xb3\x18\x00\x00\u07d4jc7\x83?\x8fjk\xf1\f\xa7\xec!\xaa\x81\x0e\xd4D\xf4\u02c97\xbd$4\\\xe8\xa4\x00\x00\u07d4jcS\xb9qX\x9f\x18\xf2\x95\\\xba(\xab\xe8\xac\xcejWa\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4jc\xfc\x89\xab\xc7\xf3n(-\x80x{{\x04\xaf\xd6U>q\x89\b\xacr0H\x9e\x80\x00\x00\u07d4jg\x9e7\x8f\xdc\xe6\xbf\xd9\u007f\xe6/\x04)Z$\xb9\x8965\u026d\xc5\u07a0\x00\x00\u07d4j\x8c\xea-\xe8J\x8d\xf9\x97\xfd?\x84\xe3\b=\x93\xdeW\u0369\x89\x05k\xe0<\xa3\xe4}\x80\x00\xe0\x94j\x97Xt;`>\xea:\xa0RKB\x88\x97#\xc4\x159H\x8a\x02#\x85\xa8'\xe8\x15P\x00\x00\u07d4j\xa5s/;\x86\xfb\x8c\x81\xef\xbek[G\xb5cs\v\x06\u020965\u026d\xc5\u07a0\x00\x00\u07d4j\xb3#\xaePV\xed\nE0r\u016b\xe2\xe4/\xcf]q9\x89/\xb4t\t\x8fg\xc0\x00\x00\u07d4j\xb5\xb4\xc4\x1c\u0778)i\f/\xda\u007f \xc8^b\x9d\xd5\u0549d\u052fqL2\x90\x00\x00\u07d4j\xc4\x0fS-\xfe\xe5\x11\x81\x17\u04ad5-\xa7}Om\xa2\u0209\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4j\xc4\u053e-\xb0\u065d\xa3\xfa\xaa\xf7RZ\xf2\x82\x05\x1dj\x90\x89\x04X\xcaX\xa9b\xb2\x80\x00\u07d4j\xcd\u0723\xcd+I\x90\xe2\\\xd6\\$\x14\x9d\t\x12\t\x9ey\x89\xa2\xa1\xe0|\x9fl\x90\x80\x00\u07d4j\xd9\v\xe2R\xd9\xcdFM\x99\x81%\xfa\xb6\x93\x06\v\xa8\xe4)\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4j\u0753!\x93\xcd8IJ\xa3\xf0:\xec\xccKz\xb7\xfa\xbc\xa2\x89\x04\xdbs%Gc\x00\x00\x00\xe0\x94j\xe5\u007f'\x91|V*\x13*M\x1b\xf7\xec\n\u01c5\x83)&\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4j\xeb\x9ftt.\xa4\x91\x81=\xbb\xf0\xd6\xfc\xde\x1a\x13\x1dM\xb3\x89\x17\xe5T0\x8a\xa00\x00\x00\u07d4j\xf25\u04bb\xe0P\xe6)\x16\x15\xb7\x1c\xa5\x82\x96X\x81\x01B\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4j\xf6\xc7\xee\x99\xdf'\x1b\xa1[\xf3\x84\xc0\xb7d\xad\xcbM\xa1\x82\x8965f3\xeb\xd8\xea\x00\x00\u07d4j\xf8\xe5Yih,q_H\xadO\xc0\xfb\xb6~\xb5\x97\x95\xa3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4j\xf9@\xf6>\u0278\xd8v'*\u0296\xfe\xf6\\\xda\xce\xcd\ua262\xa1]\tQ\x9b\xe0\x00\x00\u07d4j\xf9\xf0\xdf\uebbb_d\xbf\x91\xabw\x16i\xbf\x05)US\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4j\xff\x14f\xc2b6u\xe3\xcb\x0eu\xe4#\xd3z%\xe4B\xeb\x89]\u0212\xaa\x111\xc8\x00\x00\xe0\x94k\r\xa2Z\xf2g\u05c3l\"k\xca\xe8\xd8r\xd2\xceR\xc9A\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4k\x10\xf8\xf8\xb3\xe3\xb6\r\xe9\n\xa1-\x15_\x9f\xf5\xff\xb2,P\x89lk\x93[\x8b\xbd@\x00\x00\u07d4k\x17Y\x8a\x8e\xf5Oyz\xe5\x15\u0336Q}\x18Y\xbf\x80\x11\x89\x05k\xc7^-c\x10\x00\x00\u07d4k \xc0\x80`jy\xc7;\xd8\xe7[\x11qzN\x8d\xb3\xf1\u00c9\x10?sX\x03\xf0\x14\x00\x00\u07d4k\"\x84D\x02!\xce\x16\xa88-\xe5\xff\x02)G\"i\xde\xec\x8965\u026d\xc5\u07a0\x00\x00\u07d4k0\xf1\x829\x10\xb8m:\xcbZj\xfc\x9d\xef\xb6\xf3\xa3\v\xf8\x89\u3bb5sr@\xa0\x00\x00\u07d4k8\u0784\x1f\xad\u007fS\xfe\x02\xda\x11[\xd8j\xaff$f\xbd\x89]\u0212\xaa\x111\xc8\x00\x00\u07d4kK\x99\xcb?\xa9\xf7\xb7L\u3903\x17\xb1\xcd\x13\t\n\x1az\x89\x03\x1b2~i]\xe2\x00\x00\u07d4kZ\xe7\xbfx\xecu\xe9\f\xb5\x03\xc7x\xcc\u04f2KO\x1a\xaf\x89+^:\xf1k\x18\x80\x00\x00\u07d4kc\xa2\u07f2\xbc\xd0\xca\xec\x00\"\xb8\x8b\xe3\f\x14Q\xeaV\xaa\x89+\xdbk\xf9\x1f\u007fL\x80\x00\u07d4kew\xf3\x90\x9aMm\xe0\xf4\x11R-Ep8d\x004\\\x89e\xea=\xb7UF`\x00\x00\u07d4kr\xa8\xf0a\xcf\xe6\x99j\xd4G\xd3\xc7,(\xc0\xc0\x8a\xb3\xa7\x89\xe7\x8cj\u01d9\x12b\x00\x00\u07d4kv\rHw\xe6\xa6'\xc1\xc9g\xbe\xe4Q\xa8P}\xdd\u06eb\x891T\xc9r\x9d\x05x\x00\x00\u07d4k\x83\xba\xe7\xb5e$EXU[\xcfK\xa8\xda \x11\x89\x1c\x17\x89lk\x93[\x8b\xbd@\x00\x00\u07d4k\x92]\xd5\xd8\xeda2\xabm\b`\xb8,D\xe1\xa5\x1f\x1f\xee\x89P; >\x9f\xba \x00\x00\xe0\x94k\x94a]\xb7Pej\u00cc~\x1c\xf2\x9a\x9d\x13g\u007fN\x15\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4k\x95\x1aC'N\xea\xfc\x8a\t\x03\xb0\xaf.\xc9+\xf1\xef\xc89\x89\x05k\xc7^-c\x10\x00\x00\u07d4k\x99%!\xec\x85#p\x84\x8a\u0597\xcc-\xf6Nc\xcc\x06\xff\x8965\u026d\xc5\u07a0\x00\x00\u07d4k\xa8\xf7\xe2_\xc2\xd8qa\x8e$\xe4\x01\x84\x19\x917\xf9\xf6\xaa\x89\x15\xafd\x86\x9ak\xc2\x00\x00\u07d4k\xa9\xb2\x1b5\x10k\xe1Y\xd1\xc1\xc2ez\xc5l\u049f\xfdD\x89\xf2\xdc}G\xf1V\x00\x00\x00\u07d4k\xafz*\x02\xaex\x80\x1e\x89\x04\xadz\xc0Q\b\xfcV\xcf\xf6\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94k\xb2\xac\xa2?\xa1bm\x18\xef\xd6w\u007f\xb9}\xb0-\x8e\n\xe4\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4k\xb4\xa6a\xa3:q\xd4$\u051b\xb5\xdf(b.\xd4\xdf\xfc\xf4\x89\",\x8e\xb3\xfff@\x00\x00\u07d4k\xb5\b\x13\x14j\x9a\xddB\xee\"\x03\x8c\x9f\x1fti\xd4\u007fG\x89\n\xdaUGK\x814\x00\x00\u07d4k\xbc?5\x8af\x8d\u0461\x1f\x03\x80\xf3\xf71\bBj\xbdJ\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4k\xbd\x1eq\x93\x90\xe6\xb9\x10C\xf8\xb6\xb9\u07c9\x8e\xa8\x00\x1b4\x89llO\xa6\xc3\xdaX\x80\x00\u07d4k\xc8Z\xcdY(r.\xf5\tS1\xee\x88\xf4\x84\xb8\u03c3W\x89\t\xc2\x00vQ\xb2P\x00\x00\u07d4k\xd3\xe5\x9f#\x9f\xaf\xe4wk\xb9\xbd\xddk\ue0fa]\x9d\x9f\x8965\u026d\xc5\u07a0\x00\x00\u07d4k\xd4W\xad\xe0Qy]\xf3\xf2F\\89\xae\xd3\xc5\xde\xe9x\x8964\xbf9\xab\x98x\x80\x00\u07d4k\xe1c\x13d>\xbc\x91\xff\x9b\xb1\xa2\xe1\x16\xb8T\xea\x93:E\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4k\xe7Y^\xa0\xf0hH\x9a'\x01\xecFI\x15\x8d\xdcC\xe1x\x89lk\x93[\x8b\xbd@\x00\x00\u07d4k\xe9\x03\x0e\xe6\xe2\xfb\u0111\xac\xa3\xde@\"\xd3\x01w+{}\x89\x01s\x17\x90SM\xf2\x00\x00\xe0\x94k\xec1\x1a\xd0P\b\xb4\xaf5<\x95\x8c@\xbd\x06s\x9a?\xf3\x8a\x03w\xf6*\x0f\nbp\x00\x00\u07d4k\xf7\xb3\xc0e\xf2\xc1\xe7\xc6\xeb\t+\xa0\xd1Pf\xf3\x93\u0478\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4k\xf8o\x1e/+\x802\xa9\\Mw8\xa1\t\xd3\xd0\xed\x81\x04\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4l\x05\xe3N^\xf2\xf4.\u041d\xef\xf1\x02l\xd6k\xcbi`\xbb\x89lk\x93[\x8b\xbd@\x00\x00\u07d4l\b\xa6\xdc\x01s\xc74)U\xd1\xd3\xf2\xc0e\xd6/\x83\xae\u01c9\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4l\n\xe9\xf0C\xc84\xd4Bq\xf14\x06Y=\xfe\tO8\x9f\x89RD*\xe13\xb6*\x80\x00\u07d4l\f\xc9\x17\xcb\xee}|\t\x97c\xf1Nd\xdf}4\xe2\xbf\t\x89\r\x8drkqw\xa8\x00\x00\xe0\x94l\x0eq/@\\Yr_\xe8)\xe9wK\xf4\xdf\u007fM\xd9e\x8a\f(h\x88\x9c\xa6\x8aD\x00\x00\xe0\x94l\x10\x12\x05\xb3#\xd7uD\xd6\xdcR\xaf7\xac\xa3\xce\xc6\xf7\xf1\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4l\x15\xec5 \xbf\x8e\xbb\xc8 \xbd\x0f\xf1\x97x7T\x94\u03dd\x89l\xb7\xe7Hg\xd5\xe6\x00\x00\xe0\x94l\x1d\xdd3\xc8\x19f\u0706!w`q\xa4\x12\x94\x82\xf2\xc6_\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4l%2\u007f\x8d\u02f2\xf4^V\x1e\x86\xe3]\x88P\xe5:\xb0Y\x89;\xcd\xf9\xba\xfe\xf2\xf0\x00\x00\u07d4l.\x9b\xe6\u052bE\x0f\xd1%1\xf3?\x02\x8caFt\xf1\x97\x89\xc2\x12z\xf8X\xdap\x00\x00\u07d4l5\x9eX\xa1=Ex\xa93\x8e3\\g\xe7c\x9f_\xb4\u05c9\v\xd1[\x94\xfc\x8b(\x00\x00\u07d4l=\x18pA&\xaa\x99\xee3B\xce`\xf5\xd4\xc8_\x18g\u0349\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4lGK\xc6jTx\x00f\xaaOQ.\xef\xa7s\xab\xf9\x19\u01c9\x05\x18\x83\x15\xf7v\xb8\x00\x00\u07d4lNBn\x8d\xc0\x05\u07e3Ql\xb8\xa6\x80\xb0.\ua56e\x8e\x89Hz\x9a0E9D\x00\x00\u07d4lR\xcf\b\x95\xbb5\xe6V\x16\x1eM\xc4j\xe0\xe9m\xd3\xe6,\x89\xd8\xd8X?\xa2\xd5/\x00\x00\u07d4lT\"\xfbK\x14\xe6\u064b`\x91\xfd\xecq\xf1\xf0\x86@A\x9d\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4l\\:T\u0367\xc2\xf1\x18\xed\xbaCN\xd8\x1en\xbb\x11\xddz\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4lc\xf8EV\u0490\xbf\u0359\xe44\ue657\xbf\xd7yWz\x89lk\x93[\x8b\xbd@\x00\x00\u07d4lc\xfc\x85\x02\x9a&T\u05db+\xeaM\xe3I\xe4REw\u0149#\xc7W\a+\x8d\xd0\x00\x00\u07d4led\xe5\xc9\xc2N\xaa\xa7D\xc9\xc7\xc9h\xc9\xe2\xc9\xf1\xfb\xae\x89I\x9bB\xa2\x119d\x00\x00\xe0\x94lg\xd6\xdb\x1d\x03Ql\x12\x8b\x8f\xf24\xbf=I\xb2m)A\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4lg\xe0\u05f6.*\bPiE\xa5\xdf\xe3\x82c3\x9f\x1f\"\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4lj\xa0\xd3\vdr\x19\x90\xb9PJ\x86?\xa0\xbf\xb5\xe5}\xa7\x89\x92^\x06\xee\xc9r\xb0\x00\x00\u07d4lqJX\xff\xf6\xe9}\x14\xb8\xa5\xe3\x05\xeb$@eh\x8b\xbd\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4l\x80\rKI\xba\a%\x04`\xf9\x93\xb8\xcb\xe0\v&j%S\x89\x1a\xb2\xcf|\x9f\x87\xe2\x00\x00\u07d4l\x80\x8c\xab\xb8\xff_\xbbc\x12\xd9\xc8\xe8J\xf8\xcf\x12\xef\bu\x89\xd8\xd8X?\xa2\xd5/\x00\x00\xe0\x94l\x82 )!\x8a\xc8\xe9\x8a&\f\x1e\x06@)4\x889\x87[\x8a\x01\x0f\x97\xb7\x87\xe1\xe3\b\x00\x00\u07d4l\x84\u02e7|m\xb4\xf7\xf9\x0e\xf1=^\xe2\x1e\x8c\xfc\u007f\x83\x14\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94l\x86\x87\xe3Aw\x10\xbb\x8a\x93U\x90!\xa1F\x9ej\x86\xbcw\x8a\x02[-\xa2x\xd9k{\x80\x00\xe0\x94l\x88,'s,\xef\\|\x13\xa6\x86\xf0\xa2\xeawUZ\u0089\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4l\xa5\xde\x00\x81}\xe0\xce\xdc\xe5\xfd\x00\x01(\xde\xde\x12d\x8b<\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4l\xa6\xa12\xce\x1c\u0488\xbe\xe3\x0e\xc7\xcf\xef\xfb\x85\xc1\xf5\nT\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94l\xb1\x1e\xcb2\xd3\u0382\x96\x011\x066\xf5\xa1\f\xf7\u03db_\x8a\x04?\u851c8\x01\xf5\x00\x00\u07d4l\xc1\xc8x\xfal\u078a\x9a\v\x83\x11$~t\x1eFB\xfem\x895e\x9e\xf9?\x0f\xc4\x00\x00\xe0\x94l\xcb\x03\xac\xf7\xf5<\xe8z\xad\xcc!\xa9\x93-\xe9\x15\xf8\x98\x04\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4l\xd2\x12\xae\xe0N\x01?=*\xba\u04a0#`k\xfb\\j\u01c9lj\xccg\u05f1\xd4\x00\x00\u07d4l\xd2(\xdcq!i0\u007f\xe2|\xebtw\xb4\x8c\xfc\x82r\xe5\x89\x044\xea\x94\u06caP\x00\x00\u07d4l\xe1\xb0\xf6\xad\xc4pQ\xe8\xab8\xb3\x9e\xdbA\x86\xb0;\xab\u0309Ay\x97\x94\xcd$\xcc\x00\x00\u07d4l\xea\xe3s=\x8f\xa4=l\xd8\f\x1a\x96\xe8\xeb\x93\x10\x9c\x83\xb7\x89\x10'\x94\xad \xdah\x00\x00\u07d4m\x05i\xe5U\x8f\xc7\xdf'f\xf2\xba\x15\u070a\xef\xfc[\xebu\x89\xd8\xe6\x00\x1el0+\x00\x00\u07d4m\x12\x0f\f\xaa\xe4O\xd9K\xca\xfeU\xe2\xe2y\uf5ba\\z\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4m\x14V\xff\xf0\x10N\xe8D\xa31G7\x8438\xd2L\xd6l\x89\a\xb0l\xe8\u007f\xddh\x00\x00\u07d4m \xef\x97\x04g\nP\v\xb2i\xb5\x83.\x85\x98\x02\x04\x9f\x01\x89\a\f\x1c\xc7;\x00\xc8\x00\x00\xe0\x94m/\x97g4\xb9\xd0\a\r\x18\x83\xcfz\u02b8\xb3\xe4\x92\x0f\xc1\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4m9\xa9\u93c1\xf7i\xd7:\xad,\xea\xd2v\xac\x13\x87\xba\xbe\x89\x15[\xd90\u007f\x9f\xe8\x00\x00\u07d4m;x6\xa2\xb9\u0619r\x1aM#{R#\x85\xdc\xe8\xdf\u034966\xc2^f\xec\xe7\x00\x00\u07d4m?+\xa8V\u033b\x027\xfava\x15k\x14\xb0\x13\xf2\x12@\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94m@\b\xb4\xa8\x88\xa8&\xf2H\xeej\v\r\xfd\xe9\xf92\x10\xb9\x8a\x01'\xfc\xb8\xaf\xae \xd0\x00\x00\u07d4m@\xca'\x82m\x97s\x1b>\x86\xef\xfc\u05f9*Aa\xfe\x89\x89lk\x93[\x8b\xbd@\x00\x00\u07d4mD\x97J1\u0447\xed\xa1m\xddG\xb9\xc7\xecP\x02\xd6\x1f\xbe\x892\xf5\x1e\u06ea\xa30\x00\x00\xe0\x94mK\\\x05\xd0j \x95~\x17H\xabm\xf2\x06\xf3C\xf9/\x01\x8a\x02\x1f6\x06\x99\xbf\x82_\x80\x00\xe0\x94mL\xbf=\x82\x84\x83:\xe9\x93D0>\b\xb4\xd6\x14\xbf\xda;\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4mY\xb2\x1c\xd0\xe2t\x88\x04\u066b\xe0d\xea\u00be\xf0\xc9_'\x89lk\x93[\x8b\xbd@\x00\x00\u07d4mc\u04ce\xe8\xb9\x0e\x0en\xd8\xf1\x92\xed\xa0Q\xb2\u05a5\x8b\xfd\x89\x01\xa0Ui\r\x9d\xb8\x00\x00\u07d4mf4\xb5\xb8\xa4\x01\x95\xd9I\x02z\xf4\x82\x88\x02\t,\ued89\xa2\xa1]\tQ\x9b\xe0\x00\x00\xe0\x94m}\x1c\x94\x95\x11\xf8\x83\x03\x80\x8c`\xc5\xea\x06@\xfc\xc0&\x83\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4m\x84m\xc1&W\xe9\x1a\xf2P\bQ\x9c>\x85\u007fQp}\u0589\xf8\xd3\v\xc9#B\xf8\x00\x00\u07d4m\x91\x93\x99k\x19F\x17!\x11\x06\xd1c^\xb2l\u0136ll\x89\x15\xaa\x1e~\x9d\xd5\x1c\x00\x00\u07d4m\x99\x97P\x98\x82\x02~\xa9G#\x14$\xbe\xde\xde)e\u043a\x89l\x81\u01f3\x11\x95\xe0\x00\x00\u07d4m\xa0\xed\x8f\x1di3\x9f\x05\x9f*\x0e\x02G\x1c\xb4O\xb8\u00fb\x892\xbc8\xbbc\xa8\x16\x00\x00\u07d4m\xb7+\xfdC\xfe\xf4e\xcaV2\xb4Z\xabra@N\x13\xbf\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94m\xbe\x8a\xbf\xa1t(\x06&9\x817\x1b\xf3\xd3U\x90\x80kn\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4m\xc3\xf9+\xaa\x1d!\u06b78+\x892a\xa05o\xa7\xc1\x87\x89]\u0212\xaa\x111\xc8\x00\x00\u07d4m\xc7\x05:q\x86\x16\xcf\u01cb\xeec\x82\xeeQ\xad\xd0\xc7\x030\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94m\xcc~d\xfc\xaf\xcb\xc2\xdcl\x0e^f,\xb3G\xbf\xfc\xd7\x02\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4m\xda_x\x8alh\x8d\u07d2\x1f\xa3\x85.\xb6\xd6\xc6\xc6)f\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4m\xdb`\x92w\x9dXB\xea\xd3x\xe2\x1e\x81 \xfdLk\xc12\x89lk\x93[\x8b\xbd@\x00\x00\u07d4m\xdf\xefc\x91U\u06ab\n\\\xb4\x95:\xa8\u016f\xaa\x88\x04S\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4m\xe0/-\xd6~\xfd\xb794\x02\xfa\x9e\xaa\xcb\xcfX\x9d.V\x89@\x13\x8b\x91~\u07f8\x00\x00\u07d4m\u4d418\\\xf7\xfc\x9f\xe8\xc7}\x13\x1f\xe2\xeew$\xc7j\x89})\x97s=\xcc\xe4\x00\x00\u07d4m\xe4\xd1R\x19\x18/\xaf:\xa2\xc5\xd4\xd2Y_\xf20\x91\xa7'\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4m\xed\xf6.t?M,*K\x87\xa7\x87\xf5BJz\xeb9<\x89\t\xc2\x00vQ\xb2P\x00\x00\u07d4m\xf2Of\x85\xa6/y\x1b\xa37\xbf?\xf6~\x91\xf3\u053c:\x89ukI\xd4\nH\x18\x00\x00\u07d4m\xf5\xc8O{\x90\x9a\xab>a\xfe\x0e\xcb\x1b;\xf2`\"*\u0489\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4m\xff\x90\xe6\xdc5\x9d%\x90\x88+\x14\x83\xed\xbc\xf8\x87\xc0\xe4#\x8965\u026d\xc5\u07a0\x00\x00\u07d4n\x01\xe4\xadV\x9c\x95\xd0\a\xad\xa3\r^-\xb1(\x88I\"\x94\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4n\a;f\u0478\xc6gD\u0600\x96\xa8\u0759\xec~\x02(\u0689\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4n\x0e\xe7\x06\x12\xc9v(}I\x9d\u07e6\xc0\xdc\xc1,\x06\xde\xea\x89\a\v\u0579V!F\x00\x00\xe0\x94n\x12\xb5\x1e\"[JCr\xe5\x9a\u05e2\xa1\xa1>\xa3\u04e17\x8a\x03\x00F\xc8\xccw_\x04\x00\x00\u07d4n\x1a\x04l\xaf[JW\xf4\xfdK\xc1sb!&\xb4\xe2\xfd\x86\x89a\t=|,m8\x00\x00\u07d4n\x1e\xa4\xb1\x83\xe2R\u027bwg\xa0\x06\u05346\x96\u02ca\xe9\x89\x0f\xf3x<\x85\xee\u0400\x00\u07d4n%[p\n\xe7\x13\x8aK\xac\xf2(\x88\xa9\xe2\xc0\n(^\xec\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4n'\n\xd5)\xf1\xf0\xb8\xd9\xcbm$'\xec\x1b~-\xc6Jt\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4n.\xab\x85\u0709\xfe)\xdc\n\xa1\x852G\u06b4:R=V\x89\x04V9\x18$O@\x00\x00\u07d4n:Q\xdbt=3M/\xe8\x82$\xb5\xfe|\x00\x8e\x80\xe6$\x89\x05\xbf\v\xa6cOh\x00\x00\u07d4nL*\xb7\xdb\x02i9\xdb\u04fch8J\xf6`\xa6\x18\x16\xb2\x89\t\r\x97/22<\x00\x00\u07d4nM.9\u0203f)\u5d07\xb1\x91\x8af\x9a\xeb\u07556\x8965\u026d\xc5\u07a0\x00\x00\u07d4n\\-\x9b\x1cTj\x86\xee\xfd]\nQ \xc9\xe4\xe70\x19\x0e\x89\n\xd2\x01\xa6yO\xf8\x00\x00\u07d4n`\xae\u19cf\x8e\u068bBLs\xe3S5J\xe6|0B\x89\xbd5\xa4\x8d\x99\x19\xe6\x00\x00\u07d4nd\xe6\x12\x9f\"N7\x8c\x0ensj~z\x06\xc2\x11\xe9\xec\x8965\u026d\xc5\u07a0\x00\x00\u07d4nm[\xbb\xb9\x05;\x89\xd7D\xa2s\x16\u00a7\xb8\xc0\x9bT}\x891Rq\n\x02>m\x80\x00\u07d4nr\xb2\xa1\x18j\x8e)\x16T;\x1c\xb3jh\x87\x0e\xa5\u0457\x89\n\x15D\xbe\x87\x9e\xa8\x00\x00\u07d4nv\x1e\xaa\x0f4_w{TA\xb7:\x0f\xa5\xb5k\x85\xf2-\x89lk\x93[\x8b\xbd@\x00\x00\u07d4ny\xed\u0504[\anL\u060d\x18\x8bnC-\xd9?5\xaa\x893\xc5I\x901r\f\x00\x00\u07d4n\x82\x12\xb7\"\xaf\xd4\b\xa7\xa7>\xd3\xe29^\xe6EJ\x030\x89\b\x9e\x91y\x94\xf7\x1c\x00\x00\u07d4n\x84\x87m\xbb\x95\xc4\vfV\xe4+\xa9\xae\xa0\x8a\x99;T\u0709;\xbc`\xe3\xb6\u02fe\x00\x00\u07d4n\x84\xc2\xfd\x18\xd8\tW\x14\xa9h\x17\x18\x9c\xa2\x1c\xcab\xba\xb1\x89\x12{lp&!\u0340\x00\u07d4n\x86m\x03-@Z\xbd\xd6\\\xf6QA\x1d\x807\x96\xc2#\x11\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94n\x89\x9eY\xa9\xb4\x1a\xb7\xeaA\xdfu\x17\x86\x0f*\xcbY\xf4\xfd\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4n\x89\xc5\x1e\xa6\xde\x13\xe0l\xdct\x8bg\xc4A\x0f\u9f2b\x03\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4n\x8a&h\x9fz/\xde\xfd\x00\x9c\xba\xaaS\x10%4P\u06ba\x89o!7\x17\xba\xd8\xd3\x00\x00\u07d4n\x96\xfa\xed\xa3\x05C\x02\xc4_X\xf1a2L\x99\xa3\xee\xbbb\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4n\xb0\xa5\xa9\xae\x96\xd2,\xf0\x1d\x8f\xd6H;\x9f8\xf0\x8c,\x8b\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4n\xb3\x81\x96\x17@@X&\x8f\f<\xff5\x96\xbf\xe9\x14\x8c\x1c\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\xe0\x94n\xb5W\x8ak\xb7\xc3!S\x19[\r\x80 \xa6\x91HR\xc0Y\x8a\x8b\u00ab\xf4\x02!\xf4\x80\x00\x00\u07d4n\xbb^iW\xaa\x82\x1e\xf6Y\xb6\x01\x8a9:PL\xaeDP\x89lk\x93[\x8b\xbd@\x00\x00\u07d4n\xbc\xf9\x95\u007f_\xc5\u916d\xd4u\";\x04\xb8\xc1Jz\xed\x89]\u0212\xaa\x111\xc8\x00\x00\u07d4n\xc3e\x95q\xb1\x1f\x88\x9d\xd49\xbc\xd4\xd6u\x10\xa2[\xe5~\x89\x06\xaa\xf7\xc8Qm\f\x00\x00\u07d4n\u021b9\xf9\xf5'jU>\x8d\xa3\x0en\xc1z\xa4~\xef\u01c9\x18BO_\v\x1bN\x00\x00\u07d4n\xc9m\x13\xbd\xb2M\u01e5W)?\x02\x9e\x02\xddt\xb9zU\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4n\xca\xef\xa6\xfc>\xe54bm\xb0,o\x85\xa0\u00d5W\x1ew\x89 \x86\xac5\x10R`\x00\x00\u07d4n\u04a1+\x02\xf8\u0188\u01f5\u04e6\xea\x14\xd66\x87\u06b3\xb6\x89lk\x93[\x8b\xbd@\x00\x00\u07d4n\u0604E\x9f\x80\x9d\xfa\x10\x16\xe7p\xed\xaf>\x9f\xefF\xfa0\x89\xb8R\xd6x \x93\xf1\x00\x00\xe0\x94n\xdf\u007fR\x83r\\\x95>\xe6C\x17\xf6a\x88\xaf\x11\x84\xb03\x8a\x01\xb4d1\x1dE\xa6\x88\x00\x00\u07d4n\xe8\xaa\xd7\xe0\xa0e\u0605-|;\x9an_\xdcK\xf5\f\x00\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4n\xef\u0705\x0e\x87\xb7\x15\xc7'\x91w<\x03\x16\xc3U\x9bX\xa4\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4n\xf9\xe8\u0276!}Vv\x9a\xf9}\xbb\x1c\x8e\x1b\x8b\xe7\x99\u0489\t\xdd\xc1\xe3\xb9\x01\x18\x00\x00\u07d4n\xfb\xa8\xfb*\u0176s\a)\xa9r\xec\"D&\xa2\x87\u00ed\x89\x0fY\x85\xfb\xcb\xe1h\x00\x00\xe0\x94n\xfd\x90\xb55\xe0\v\xbd\x88\x9f\xda~\x9c1\x84\xf8y\xa1Q\u06ca\x02#\x85\xa8'\xe8\x15P\x00\x00\u07d4o\x05\x16f\xcbO{\u04b1\x90r!\xb8)\xb5U\u05e3\xdbt\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4o\x0e\xdd#\xbc\xd8_`\x15\xf9(\x9c(\x84\x1f\xe0L\x83\xef\xeb\x89\x01\t\x10\xd4\xcd\xc9\xf6\x00\x00\u07d4o\x13zq\xa6\xf1\x97\xdf,\xbb\xf0\x10\u073d\x89a\t=|,m8\x00\x00\u07d4p\x10\xbe-\xf5{\u042b\x9a\xe8\x19l\xd5\n\xb0\xc5!\xab\xa9\xf9\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4p#\xc7\tV\xe0J\x92\xd7\x00%\xaa\u0497\xb59\xaf5Xi\x89lk\x93[\x8b\xbd@\x00\x00\u07d4p%\x96]+\x88\xda\x19}DY\xbe=\xc98cD\xcc\x1f1\x89l\xb7\xe7Hg\xd5\xe6\x00\x00\u07d4p(\x02\xf3m\x00%\x0f\xabS\xad\xbc\u0596\xf0\x17oc\x8aI\x89lk\x93[\x8b\xbd@\x00\x00\u07d4pH\x19\xd2\xe4Mn\xd1\xda%\xbf\u0384\u011f\u0322V\x13\xe5\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4pJn\xb4\x1b\xa3O\x13\xad\xdd\xe7\xd2\xdb}\xf0I\x15\u01e2!\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4pJ\xb1\x15\r^\x10\xf5\xe3I\x95\b\xf0\xbfpe\x0f\x02\x8dK\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4pJ\xe2\x1dv-n\x1d\xde(\xc25\xd11\x04Yr6\xdb\x1a\x89lk\x93[\x8b\xbd@\x00\x00\u07d4pM$<)x\xe4l,\x86\xad\xbe\xcd$n;)_\xf63\x89m\x12\x1b\xeb\xf7\x95\xf0\x00\x00\u07d4pM]\xe4\x84m9\xb5<\xd2\x1d\x1cI\xf0\x96\xdb\\\x19\xba)\x89\b=lz\xabc`\x00\x00\u07d4p]\xdd85T\x82\xb8\xc7\u04f5\x15\xbd\xa1P\r\xd7\u05e8\x17\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94pan(\x92\xfa&\x97\x05\xb2\x04k\x8f\xe3\xe7/\xa5X\x16\u04ca\x04<3\xc1\x93ud\x80\x00\x00\u07d4pg\x0f\xbb\x05\xd30\x14DK\x8d\x1e\x8ew\x00%\x8b\x8c\xaam\x89lk\x93[\x8b\xbd@\x00\x00\u07d4p\x81\xfak\xaa\xd6\u03f7\xf5\x1b,\xca\x16\xfb\x89p\x99\x1ad\xba\x89\f\xae\xc0\x05\xf6\xc0\xf6\x80\x00\xe0\x94p\x85\xae~~M\x93!\x97\xb5\u01c5\x8c\x00\xa3gF&\xb7\xa5\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4p\x86\xb4\xbd\xe3\xe3]J\xeb$\xb8%\xf1\xa2\x15\xf9\x9d\x85\xf7E\x89lh\xcc\u041b\x02,\x00\x00\u07d4p\x8a*\xf4%\u03b0\x1e\x87\xff\xc1\xbeT\xc0\xf52\xb2\x0e\xac\u0589\aE\u0503\xb1\xf5\xa1\x80\x00\u07d4p\x8e\xa7\a\xba\xe45\u007f\x1e\xbe\xa9Y\u00e2P\xac\u05aa!\xb3\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u0794p\x8f\xa1\x1f\xe3=\x85\xad\x1b\xef\u02ee8\x18\xac\xb7\x1fj}~\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4p\x9101\x16\xd5\xf28\x9b##\x8bMej\x85\x96\u0644\u04c9;N~\x80\xaaX3\x00\x00\u07d4p\x99\xd1/n\xc6V\x89\x9b\x04\x9avW\x06]b\x99h\x92\u0209\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4p\x9f\xe9\xd2\xc1\xf1\xceB |\x95\x85\x04J`\x89\x9f5\x94/\x89lk\x93[\x8b\xbd@\x00\x00\u07d4p\xa05I\xaaah\xe9~\x88\xa5\b3\nZ\v\xeatq\x1a\x89Hz\x9a0E9D\x00\x00\u07d4p\xa4\x06}D\x8c\xc2]\xc8\xe7\x0ee\x1c\xea|\xf8N\x92\x10\x9e\x89\t\x8a}\x9b\x83\x14\xc0\x00\x00\u07d4p\xab4\xbc\x17\xb6o\x9c;c\xf1Q'O*r|S\x92c\x89lk\x93[\x8b\xbd@\x00\x00\u07d4p\xc2\x13H\x8a\x02\f<\xfb9\x01N\xf5\xbad\x04rK\u02a3\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4p\xd2^\xd2\u022d\xa5\x9c\b\x8c\xf7\r\xd2+\xf2\u06d3\xac\xc1\x8a\x899GEE\u4b7c\x00\x00\u07d4p\xe5\xe9\xdas_\xf0w$\x9d\u02da\xaf=\xb2\xa4\x8d\x94\x98\xc0\x8965\u026d\xc5\u07a0\x00\x00\u07d4p\xfe\xe0\x8b\x00\xc6\xc2\xc0Jp\xc0\xce=\x92\u03ca\x01Z\xf1\u05cbX\xc4\x00\x00\x00\u0794q\v\xe8\xfd^)\x18F\x8b\u2abe\xa8\r\x82\x845\u05d6\x12\x88\xf4?\xc2\xc0N\xe0\x00\x00\u07d4q\x13]\x8f\x05\x96<\x90ZJ\a\x92)\t#Z\x89jR\ua262\xa1]\tQ\x9b\xe0\x00\x00\u07d4q\x1e\xcfw\xd7\x1b=\x0e\xa9\\\xe4u\x8a\xfe\u0379\xc11\a\x9d\x89)3\x1eeX\xf0\xe0\x00\x00\u07d4q!?\xca14\x04 N\u02e8q\x97t\x1a\xa9\xdf\xe9c8\x89\x03@\xaa\xd2\x1b;p\x00\x00\xe0\x94q+vQ\x02\x14\xdcb\x0fl:\x1d\u049a\xa2+\xf6\xd2\x14\xfb\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4q/\xf77\n\x13\xed6\ts\xfe\u071f\xf5\xd2\xc9:P^\x9e\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4q3\x84:x\xd99\u019dD\x86\xe1\x0e\xbc{`*4\x9f\xf7\x89\x11\xd5\xca\xcc\xe2\x1f\x84\x00\x00\u07d4qH\xae\xf32a\xd8\x03\x1f\xac?q\x82\xff5\x92\x8d\xafT\u0649\xdeB\xee\x15D\u0750\x00\x00\u07d4qcu\x8c\xbblLR^\x04\x14\xa4\n\x04\x9d\xcc\xcc\xe9\x19\xbb\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4qh\xb3\xbb\x8c\x16s!\u067d\xb0#\xa6\xe9\xfd\x11\xaf\u026f\u0649a\t=|,m8\x00\x00\u07d4qirN\xe7\"q\xc54\xca\xd6B\x0f\xb0N\xe6D\u02c6\xfe\x89\x16<+@\u06e5R\x00\x00\u07d4qj\xd3\xc3:\x9b\x9a\n\x18\x96sW\x96\x9b\x94\xee}*\xbc\x10\x89\x1a!\x17\xfeA*H\x00\x00\xe0\x94qk\xa0\x1e\xad*\x91'\x065\xf9_%\xbf\xaf-\xd6\x10\xca#\x8a\ty\xe7\x01 V\xaax\x00\x00\u07d4qmP\u0320\x1e\x93\x85\x00\xe6B\x1c\xc0p\xc3P|g\u04c7\x89lk\x93[\x8b\xbd@\x00\x00\u07d4qv,cg\x8c\x18\xd1\xc67\x8c\xe0h\xe6f8\x13\x15\x14~\x89lk\x93[\x8b\xbd@\x00\x00\u07d4qxL\x10Q\x17\xc1\xf6\x895y\u007f\xe1Y\xab\xc7NC\xd1j\x89l\x81\u01f3\x11\x95\xe0\x00\x00\xe0\x94qyro\\q\xae\x1bm\x16\xa6\x84(\x17Nk4\xb26F\x8a\x01\x8e\xa2P\t|\xba\xf6\x00\x00\xe0\x94q|\xf9\xbe\xab680\x8d\xed~\x19^\f\x86\x13-\x16?\xed\x8a\x032n\xe6\xf8e\xf4\"\x00\x00\u07d4q\x80\xb8>\xe5WC\x17\xf2\x1c\x80r\xb1\x91\u0615\xd4aS\u00c9\x18\xef\xc8J\xd0\u01f0\x00\x00\u07d4q\x94kq\x17\xfc\x91^\xd1\a8_B\u065d\xda\xc62I\u0089lk\x93[\x8b\xbd@\x00\x00\xe0\x94q\x9e\x89\x1f\xbc\xc0\xa3>\x19\xc1-\xc0\xf0 9\xca\x05\xb8\x01\u07ca\x01OU8F:\x1bT\x00\x00\u07d4q\xc7#\n\x1d5\xbd\u0581\x9e\u0539\xa8\x8e\x94\xa0\xeb\a\x86\u0749\uc80b5=$\x14\x00\x00\u07d4q\xd2\xccm\x02W\x8ce\xf7\r\xf1\x1bH\xbe\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4r\x83\xcdFu\xdaX\u0116UaQ\xda\xfd\x80\xc7\xf9\x95\xd3\x18\x89)3\x1eeX\xf0\xe0\x00\x00\u07d4r\x86\xe8\x9c\xd9\u078fz\x8a\x00\xc8o\xfd\xb59\x92\u0752Q\u0449i*\xe8\x89p\x81\xd0\x00\x00\u07d4r\x8f\x9a\xb0\x80\x15}\xb3\a1V\xdb\xca\x1a\x16\x9e\xf3\x17\x94\a\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4r\x94\xc9\x18\xb1\xae\xfbM%\x92~\xf9\u05d9\xe7\x1f\x93\xa2\x8e\x85\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\xe0\x94r\x94\uc763\x10\xbckK\xbd\xf5C\xb0\xefE\xab\xfc>\x1bM\x8a\x04\xa8\x9fT\xef\x01!\xc0\x00\x00\u07d4r\x9a\xadF'tNS\xf5\xd6c\t\xaatD\x8b:\xcd\xf4o\x89lk\x93[\x8b\xbd@\x00\x00\u07d4r\xa2\xfc\x86u\xfe\xb9r\xfaA\xb5\r\xff\u06fa\xe7\xfa*\u07f7\x89\x9a\xb4\xfcg\xb5(\xc8\x00\x00\u07d4r\xa8&\b&)G&\xa7[\xf3\x9c\u066a\x9e\a\xa3\xea\x14\u0349lk\x93[\x8b\xbd@\x00\x00\u07d4r\xb0Yb\xfb*\u0549\xd6Z\xd1j\"U\x9e\xba\x14X\xf3\x87\x89\a?u\u0460\x85\xba\x00\x00\u07d4r\xb5c?\xe4w\xfeT.t/\xac\xfdi\f\x13xT\xf2\x16\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\u07d4r\xb7\xa0=\xda\x14\u029cf\x1a\x1dF\x9f\xd376\xf6s\xc8\xe8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4r\xb9\x04D\x0e\x90\xe7 \u05ac\x1c*\u05dc2\x1d\xcc\x1c\x1a\x86\x89T\x06\x923\xbf\u007fx\x00\x00\xe0\x94r\xb9\nM\xc0\x97#\x94\x92\u0179w}\xcd\x1eR\xba+\xe2\u008a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4r\xbb'\u02d9\xf3\xe2\xc2\u03d0\xa9\x8fp}0\xe4\xa2\x01\xa0q\x89X\xe7\x92n\xe8X\xa0\x00\x00\xe0\x94r\xc0\x83\xbe\xad\xbd\xc2'\xc5\xfbC\x88\x15\x97\xe3.\x83\xc2`V\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4r\xcd\x04\x8a\x11\x05tH)\x83I-\xfb\x1b\xd2yB\xa6\x96\xba\x89lk\x93[\x8b\xbd@\x00\x00\u07d4r\xd0=M\xfa\xb3P\f\xf8\x9b\x86\x86o\x15\xd4R\x8e\x14\xa1\x95\x89\xf3K\x82\xfd\x8e\x91 \x00\x00\u07d4r\u06bb[n\ud799\xbe\x91X\x88\xf6V\x80V8\x16\b\xf8\x89\vL\x96\xc5,\xb4\xfe\x80\x00\u07d4r\xfbI\u009d#\xa1\x89P\u0132\xdc\r\xdfA\x0fS-oS\x89lk\x93[\x8b\xbd@\x00\x00\u07d4r\xfe\xaf\x12EyR9Td[\u007f\xaf\xff\x03x\xd1\xc8$.\x8965\u026d\xc5\u07a0\x00\x00\u07d4s\x01\xdcL\xf2mq\x86\xf2\xa1\x1b\xf8\xb0\x8b\xf2)F?d\xa3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4s\x04G\xf9|\xe9\xb2_\"\xba\x1a\xfb6\xdf'\xf9Xk\ub6c9,s\xc97t,P\x00\x00\u07d4s\x06\xde\x0e(\x8bV\xcf\u07d8~\xf0\xd3\xcc)f\a\x93\xf6\u0749\x1b\x8a\xbf\xb6.\xc8\xf6\x00\x00\xe0\x94s\r\x87c\u01a4\xfd\x82J\xb8\xb8Y\x16\x1e\xf7\xe3\xa9j\x12\x00\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4s\x12\x81sH\x95(\x01.v\xb4\x1a^(\u018b\xa4\xe3\xa9\u050965\u026d\xc5\u07a0\x00\x00\u07d4s\x13F\x12\bETUFTE\xa4Y\xb0l7s\xb0\xeb0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4s/\xea\xd6\x0f{\xfd\u05a9\xde\u0101%\xe3s]\xb1\xb6eO\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4sB#\xd2\u007f\xf2>Y\x06\xca\xed\"YW\x01\xbb4\x83\f\xa1\x89lk\x93[\x8b\xbd@\x00\x00\u07d4sG>r\x11Q\x10\xd0\xc3\xf1\x17\b\xf8nw\xbe+\xb0\x98<\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4sRXm\x02\x1a\xd0\xcfw\xe0\xe9(@JY\xf3t\xffE\x82\x89\xb8Pz\x82\a( \x00\x00\u07d4sU\v\xebs+\xa9\u076f\xdaz\xe4\x06\xe1\x8f\u007f\xeb\x0f\x8b\xb2\x89\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4s[\x97\xf2\xfc\x1b\xd2K\x12\an\xfa\xf3\xd1(\x80s\xd2\f\x8c\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4s^2\x86f\xedV7\x14+3\x06\xb7|\xccT`\xe7,=\x89j\xb8\xf3xy\u0251\x00\x00\u07d4sc\u0350\xfb\xab[\xb8\u011a\xc2\x0f\xc6,9\x8f\xe6\xfbtL\x89lk\x93[\x8b\xbd@\x00\x00\u07d4skDP=\xd2\xf6\xddTi\xffL[-\xb8\xeaO\xece\u0409\x11\x04\xeeu\x9f!\xe3\x00\x00\xe0\x94sk\xf1@,\x83\x80\x0f\x89>X1\x92X*\x13N\xb52\xe9\x8a\x02\x1e\x19\u0493\xc0\x1f&\x00\x00\xe0\x94s\x8c\xa9M\xb7\u038b\xe1\xc3\x05l\u0598\x8e\xb3v5\x9f3S\x8a\x05f[\x96\xcf5\xac\xf0\x00\x00\u07d4s\x91K\"\xfc/\x13\x15\x84$}\x82\xbeO\ucfd7\x8a\u053a\x89lk\x93[\x8b\xbd@\x00\x00\u07d4s\x93'\t\xa9\u007f\x02\u024eQ\xb0\x911(e\x12#\x85\xae\x8e\x89M\x85<\x8f\x89\b\x98\x00\x00\u07d4s\x93\xcb\xe7\xf9\xba!e\xe5\xa7U5\x00\xb6\xe7]\xa3\xc3:\xbf\x89\x05k\xc7^-c\x10\x00\x00\u07d4s\xb4\u0519\xde?8\xbf5\xaa\xf7i\xa6\xe3\x18\xbcm\x126\x92\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94s\xbe\xddo\xda{\xa3'!\x85\b{cQ\xfc\x13=HN7\x8a\x01\x12&\xbf\x9d\xceYx\x00\x00\u07d4s\xbf\xe7q\x0f1\u02b9I\xb7\xa2`O\xbfR9\xce\xe7\x90\x15\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94s\u03c0\xae\x96\x88\xe1X\x0eh\xe7\x82\xcd\b\x11\xf7\xaaIM,\x8a\x01\xa4\xab\xa2%\xc2\a@\x00\x00\xe0\x94s\xd7&\x9f\xf0l\x9f\xfd3uL\xe5\x88\xf7J\x96j\xbb\xbb\xba\x8a\x01e\xc9fG\xb3\x8a \x00\x00\u07d4s\xd8\xfe\xe3\u02c6M\xce\"\xbb&\u029c/\bm^\x95\xe6;\x8965\u026d\xc5\u07a0\x00\x00\u07d4s\xdf<>yU\xf4\xf2\xd8Y\x83\x1b\xe3\x80\x00\xb1\ak8\x84\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4s\u48b6\f\U0010e2ef+w~\x17Z[\x1eM\f-\x8f\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94t\n\xf1\xee\xfd3e\u05cb\xa7\xb1,\xb1\xa6s\xe0j\arF\x8a\x04+\xf0kx\xed;P\x00\x00\xe0\x94t\v\xfdR\xe0\x16g\xa3A\x9b\x02\x9a\x1b\x8eEWj\x86\xa2\u06ca\x03\x8e\xba\xd5\xcd\xc9\x02\x80\x00\x00\u07d4t\x0fd\x16\x14w\x9d\u03e8\x8e\xd1\xd4%\xd6\r\xb4*\x06\f\xa6\x896\"\xc6v\b\x10W\x00\x00\u07d4t\x12\u027c0\xb4\xdfC\x9f\x021\x00\xe69$\x06j\xfdS\xaf\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4t\x16\x93\xc3\x03vP\x85\x13\b \xcc+c\xe9\xfa\x92\x13\x1b\x89A\rXj \xa4\xc0\x00\x00\u07d4t!\xce[\xe3\x81s\x8d\u0703\xf0&!\x97O\xf0hly\xb8\x89Xx\x8c\xb9K\x1d\x80\x00\x00\u07d4t1j\xdf%7\x8c\x10\xf5v\u0574\x1aoG\xfa\x98\xfc\xe3=\x89\x128\x13\x1e\\z\xd5\x00\x00\u07d4t6Q\xb5^\xf8B\x9d\xf5\f\xf8\x198\xc2P\x8d\xe5\u0207\x0f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4t=\xe5\x00&\xcag\xc9M\xf5O\x06b`\xe1\xd1J\xcc\x11\xac\x89lk\x93[\x8b\xbd@\x00\x00\u07d4tE /\ft)z\x00N\xb3rj\xa6\xa8-\xd7\xc0/\xa1\x89lk\x93[\x8b\xbd@\x00\x00\u07d4tK\x03\xbb\xa8X*\xe5I\x8e-\xc2-\x19\x94\x94g\xabS\xfc\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4tL\fw\xba\u007f#i \xd1\xe44\xde]\xa3>H\xeb\xf0,\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4tP\xff\u007f\x99\xea\xa9\x11bu\u07ach\xe4(\xdf[\xbc\u0639\x89lk\x93[\x8b\xbd@\x00\x00\u07d4tV\u0172\xc5Cn>W\x10\b\x93?\x18\x05\xcc\xfe4\xe9\xec\x8965\u026d\xc5\u07a0\x00\x00\u07d4tZ\u04eb\xc6\xee\xeb$qh\x9bS\x9ex\x9c\xe2\xb8&\x83\x06\x89=A\x94\xbe\xa0\x11\x92\x80\x00\xe0\x94tZ\xec\xba\xf9\xbb9\xb7Jg\xea\x1c\xe6#\xde6\x84\x81\xba\xa6\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4t\\\xcf-\x81\x9e\u06fd\u07a8\x11{\\I\xed<*\x06n\x93\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4tb\u021c\xaa\x9d\x8dx\x91\xb2T]\xef!otd\u057b!\x89\x05\xea\xedT\xa2\x8b1\x00\x00\u07d4td\x8c\xaa\xc7H\xdd\x13\\\xd9\x1e\xa1L(\xe1\xbdM\u007f\xf6\xae\x89\xa8\r$g~\xfe\xf0\x00\x00\xe0\x94tq\xf7.\xeb0\x06$\xeb(.\xabM\x03r\x00\x00\x00\xe0\x94t\x84\xd2k\xec\xc1\xee\xa8\xc61^\xc3\xee\nE\x01\x17\u0706\xa0\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4t\x86:\xce\xc7]\x03\xd5>\x86\x0ed\x00/,\x16^S\x83w\x8965\u026d\xc5\u07a0\x00\x00\u07d4t\x89\u030a\xbeu\u0364\xef\r\x01\xce\xf2`^G\xed\xa6z\xb1\x89\a?u\u0460\x85\xba\x00\x00\u07d4t\x8c(^\xf1#?\xe4\xd3\x1c\x8f\xb17\x833r\x1c\x12\xe2z\x89lk\x93[\x8b\xbd@\x00\x00\u07d4t\x90\x87\xac\x0fZ\x97\xc6\xfa\xd0!S\x8b\xf1\xd6\u0361\x8e\r\xaa\x8965\u026d\xc5\u07a0\x00\x00\u07d4t\x95\xaex\xc0\xd9\x02a\xe2\x14\x0e\xf2\x061\x04s\x1a`\xd1\xed\x89\x01\xdbPq\x89%!\x00\x00\u07d4t\x9aJv\x8b_#rH\x93\x8a\x12\xc6#\x84{\xd4\xe6\x88\u0709\x03\xe73b\x87\x14 \x00\x00\u07d4t\x9a\xd6\xf2\xb5pk\xbe/h\x9aD\u0136@\xb5\x8e\x96\xb9\x92\x89\x05k\xc7^-c\x10\x00\x00\u07d4t\xa1\u007f\x06K4N\x84\xdbce\u0695\x91\xff\x16(%vC\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4t\xae\xec\x91]\xe0\x1c\u019b,\xb5\xa65o\xee\xa1FX\xc6\u0149\f\x9a\x95\xee)\x86R\x00\x00\u07d4t\xaf\xe5I\x02\xd6\x15x%v\xf8\xba\xac\x13\xac\x97\f\x05\x0fn\x89\t\xa1\xaa\xa3\xa9\xfb\xa7\x00\x00\u07d4t\xb7\xe0\"\x8b\xae\xd6YW\xae\xbbM\x91m3:\xae\x16O\x0e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4t\xbcJ^ E\xf4\xff\x8d\xb1\x84\xcf:\x9b\f\x06Z\xd8\a\u0489lk\x93[\x8b\xbd@\x00\x00\u07d4t\xbc\xe9\xec86-l\x94\u032c&\xd5\xc0\xe1:\x8b;\x1d@\x8965&A\x04B\xf5\x00\x00\u07d4t\xbfzZ\xb5\x92\x93\x14\x9b\\`\xcf6Bc\xe5\xeb\xf1\xaa\r\x89\x06G\f>w\x1e<\x00\x00\xe0\x94t\xc7<\x90R\x8a\x15s6\xf1\xe7\xea b\n\xe5?\xd2G(\x8a\x01\xe6:.S\x8f\x16\xe3\x00\x00\u07d4t\u0464\xd0\xc7RN\x01\x8dN\x06\xed;d\x80\x92\xb5\xb6\xaf,\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\xe0\x94t\xd3f\xb0{/VG}|pw\xaco\xe4\x97\xe0\xebeY\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4t\xd3zQt{\xf8\xb7q\xbf\xbfC\x9493\xd1\x00\xd2\x14\x83\x8965\u026d\xc5\u07a0\x00\x00\u07d4t\xd6q\u065c\xbe\xa1\xabW\x90cu\xb6?\xf4+PE\x1d\x17\x8965\u026d\xc5\u07a0\x00\x00\u07d4t\xeb\xf4BVF\xe6\u03c1\xb1\t\xce{\xf4\xa2\xa6=\x84\x81_\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4t\xed3\xac\xf4?5\xb9\x8c\x920\xb9\xe6d.\xcbS0\x83\x9e\x89$\xf6\xdf\xfbI\x8d(\x00\x00\u07d4t\xef(i\xcb\xe6\b\x85`E\xd8\xc2\x04\x11\x18W\x9f\"6\xea\x89\x03<\xd6E\x91\x95n\x00\x00\u07d4t\xfcZ\x99\xc0\xc5F\x05\x03\xa1;\x05\tE\x9d\xa1\x9c\xe7\u0350\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4u\v\xbb\x8c\x06\xbb\xbf$\bC\xccux.\xe0/\b\xa9tS\x89-C\xf3\xeb\xfa\xfb,\x00\x00\u07d4u\x14\xad\xbd\xc6?H?0M\x8e\x94\xb6\u007f\xf30\x9f\x18\v\x82\x89!\u0120n-\x13Y\x80\x00\u0794u\x17\xf1l(\xd12\xbb@\xe3\xba6\u01ae\xf11\xc4b\xda\x17\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4u\x1a,\xa3Nq\x87\xc1c\u048e6\x18\xdb(\xb1<\x19m&\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94u\x1a\xbc\xb6\xcc\x030Y\x91\x18\x15\xc9o\u04516\n\xb0D-\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4u&\xe4\x82R\x9f\n\x14\xee\u0248q\xdd\xdd\x0er\x1b\f\u0662\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4u)\xf3y{\xb6\xa2\x0f~\xa6I$\x19\xc8L\x86vA\xd8\x1c\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94u*^\xe22a,\xd3\x00_\xb2n[Y}\xe1\x9fwk\xe6\x8a\x01'\xfc\xb8\xaf\xae \xd0\x00\x00\u07d4u,\x9f\xeb\xf4/f\xc4x{\xfa~\xb1|\xf53;\xbaPp\x89j\x99\xf2\xb5O\xddX\x00\x00\u07d4u930F\u07b1\xef\x8e\u07b9\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94u\xc1\xad#\xd2?$\xb3\x84\xd0\xc3\x14\x91w\xe8f\x97a\r!\x8a\x01\\[\xcdl(\x8b\xbd\x00\x00\u07d4u\xc2\xff\xa1\xbe\xf5I\x19\xd2\t\u007fz\x14-.\x14\xf9\xb0JX\x89\x90\xf3XP@2\xa1\x00\x00\u07d4u\xd6|\xe1N\x8d)\xe8\xc2\xff\u3051{\x93\v\x1a\xff\x1a\x87\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4u\xde~\x93R\xe9\v\x13\xa5\x9aXx\xff\xec\u01c3\x1c\xacM\x82\x89\x94\x89#z\u06daP\x00\x00\u07d4u\xf7S\x9d0\x9e\x909\x98\x9e\xfe.\x8b-\xbd\x86Z\r\xf0\x88\x89\x85[[\xa6\\\x84\xf0\x00\x00\u07d4v\b\xf47\xb3\x1f\x18\xbc\vd\u04c1\xae\x86\xfd\x97\x8e\u05f3\x1f\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\xe0\x94v\x0f\xf35N\x0f\u0793\x8d\x0f\xb5\xb8,\xef[\xa1\\=)\x16\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4v\x1an6,\x97\xfb\xbd|Yw\xac\xba-\xa7F\x876_I\x89\t\xf7J\xe1\xf9S\xd0\x00\x00\u07d4v\x1el\xae\xc1\x89\xc20\xa1b\xec\x00e0\x19>g\u03dd\x19\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94v\x1f\x8a:*\U00028f7e\x1d\xa0\t2\x1f\xb2\x97d\xebb\xa1\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4v)\x98\xe1\xd7R'\xfc\xedzp\xbe\x10\x9aL\vN\xd8d\x14\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4v-o0\u06b9\x915\xe4\xec\xa5\x1dRC\xd6\xc8b\x11\x02\u0549\x0fI\x89A\xe6d(\x00\x00\u07d4v3\x1e0yl\xe6d\xb2p\x0e\rASp\x0e\u0706\x97w\x89lk\x93[\x8b\xbd@\x00\x00\u07d4v8\x86\xe33\xc5o\xef\xf8[\xe3\x95\x1a\xb0\xb8\x89\xce&.\x95\x89lk\x93[\x8b\xbd@\x00\x00\u07d4v:|\xba\xb7\rzd\u0427\xe5)\x80\xf6\x81G%\x93I\f\x89 \x86\xac5\x10R`\x00\x00\u07d4v>\xec\u0c0a\u021e2\xbf\xa4\xbe\xcev\x95\x14\xd8\xcb[\x85\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4v@\xa3\u007f\x80R\x98\x15\x15\xbc\xe0x\u0693\xaf\xa4x\x9bW4\x89lk\x93[\x8b\xbd@\x00\x00\u0794vA\xf7\xd2j\x86\xcd\xdb+\xe10\x81\x81\x0e\x01\xc9\xc8E\x89dI\xe8NG\xa8\xa8\x00\x00\xe0\x94vO\xc4mB\x8bm\xbc\"\x8a\x0f_U\xc9P\x8cw.\xab\x9f\x8a\x05\x81v{\xa6\x18\x9c@\x00\x00\u07d4vPn\xb4\xa7\x80\xc9Q\xc7J\x06\xb0=;\x83b\xf0\x99\x9dq\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94v[\xe2\xe1/b\x9ecI\xb9}!\xb6*\x17\xb7\xc80\xed\xab\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94vb\x81P\xe2\x99[['\x9f\xc8>\r\xd5\xf1\x02\xa6q\xdd\x1c\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4vk7Y\xe8yN\x92m\xacG=\x91:\x8f\xb6\x1a\xd0\xc2\u0249\x04\xb0m\xbb\xb4\x0fJ\x00\x00\u07d4vp\xb0/,<\xf8\xfdOG0\xf38\x1aq\xeaC\x1c3\u01c9\x0e~\xeb\xa3A\vt\x00\x00\u07d4vz\x03eZ\xf3`\x84\x1e\x81\r\x83\xf5\xe6\x1f\xb4\x0fL\xd1\x13\x895e\x9e\xf9?\x0f\xc4\x00\x00\u07d4vz\u0190y\x1c.#E\x10\x89\xfelp\x83\xfeU\u07b6+\x89,s\xc97t,P\x00\x00\u07d4v\u007f\xd7y}Qi\xa0_sd2\x1c\x19\x84:\x8c4\x8e\x1e\x89\x01\x04\xe7\x04d\xb1X\x00\x00\u0794v\x84o\r\xe0;Zv\x97\x1e\xad)\x8c\xdd\b\x84:K\xc6\u0188\xd7\x1b\x0f\u088e\x00\x00\xe0\x94v\x84\x98\x93N7\xe9\x05\xf1\xd0\xe7{D\xb5t\xbc\xf3\xecJ\xe8\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4v\x8c\xe0\u06a0)\xb7\xde\xd0\"\xe5\xfcWM\x11\xcd\xe3\xec\xb5\x17\x89\x11t\xa5\xcd\xf8\x8b\xc8\x00\x00\xe0\x94v\x93\xbd\xebo\xc8+[\xcar\x13U\"1u\xd4z\bKM\x8a\x04\xa8\x9fT\xef\x01!\xc0\x00\x00\u07d4v\xaa\xf8\xc1\xac\x01/\x87R\xd4\xc0\x9b\xb4f\a\xb6e\x1d\\\xa8\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4v\xab\x87\xddZ\x05\xad\x83\x9aN/\xc8\xc8Z\xa6\xba\x05d\x170\x89lk\x93[\x8b\xbd@\x00\x00\u07d4v\xaf\xc2%\xf4\xfa0}\xe4\x84U+\xbe\x1d\x9d?\x15\aLJ\x89\xa2\x90\xb5\u01ed9h\x00\x00\xe0\x94v\xbe\xca\xe4\xa3\x1d6\xf3\xcbW\u007f*CYO\xb1\xab\xc1\xbb\x96\x8a\x05C\xa9\xce\x0e\x132\xf0\x00\x00\u07d4v\xc2u5\xbc\xb5\x9c\xe1\xfa-\x8c\x91\x9c\xab\xebJk\xba\x01\u0449lk\x93[\x8b\xbd@\x00\x00\u07d4v\xca\"\xbc\xb8y\x9eS'\u012a*}\tI\xa1\xfc\xce_)\x89R\xa0?\"\x8cZ\xe2\x00\x00\u07d4v\xca\u0108\x11\x1aO\u0555\xf5h\xae:\x85\x87p\xfc\x91]_\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94v\u02dc\x8bi\xf48vu\u0102S\xe24\xcb~\rt\xa4&\x8a\x01\x90\xf4H.\xb9\x1d\xae\x00\x00\u07d4v\xf8:\xc3\xda0\xf7\t&(\xc73\x9f \x8b\xfc\x14,\xb1\ue25a\x18\xff\xe7B}d\x00\x00\xe0\x94v\xf9\xad=\x9b\xbd\x04\xae\x05\\\x14w\xc0\xc3^u\x92\xcb* \x8a\b\x83?\x11\xe3E\x8f \x00\x00\u07d4v\xff\xc1W\xadk\xf8\xd5m\x9a\x1a\u007f\u077c\x0f\xea\x01\n\xab\xf4\x8965\u026d\xc5\u07a0\x00\x00\u07d4w\x02\x8e@\x9c\xc4:;\xd3=!\xa9\xfcS\xec`n\x94\x91\x0e\x89\xd2U\xd1\x12\xe1\x03\xa0\x00\x00\u07d4w\f/\xb2\u0128\x17S\xac\x01\x82\xeaF\x0e\xc0\x9c\x90\xa5\x16\xf8\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4w\r\x98\xd3\x1bCS\xfc\xee\xe8V\fL\u03c0>\x88\xc0\xc4\xe0\x89 \x86\xac5\x10R`\x00\x00\xe0\x94w\x13\xab\x807A\x1c\t\xbah\u007fo\x93d\xf0\xd3#\x9f\xac(\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4w\x15\a\xae\xeej%]\xc2\u035d\xf5QT\x06-\b\x97\xb2\x97\x89\x12\x1e\xa6\x8c\x11NQ\x00\x00\u07d4w\x19\x88\x87\x95\xadtY$\xc7W`\u0771\x82}\xff\xd8\u0368\x89lkLM\xa6\u077e\x00\x00\u07d4w'\xaf\x10\x1f\n\xab\xa4\xd2:\x1c\xaf\xe1|n\xb5\u06b1\xc6\u0709lk\x93[\x8b\xbd@\x00\x00\u07d4w,)\u007f\n\u0454H.\xe8\xc3\xf06\xbd\xeb\x01\xc2\x01\xd5\u0309\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94w0o\xfe.J\x8f<\xa8&\xc1\xa2I\xf7!-\xa4:\xef\xfd\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4w1A\x12}\x8c\xf3\x18\xae\xbf\x886Z\xdd=U'\xd8[j\x8966\u05ef^\u024e\x00\x00\u07d4wF\xb6\xc6i\x9c\x8f4\xca'h\xa8 \xf1\xff\xa4\xc2\a\xfe\x05\x89\xd8\xd8X?\xa2\xd5/\x00\x00\u07d4wQ\xf3c\xa0\xa7\xfd\x053\x19\b\t\u076f\x93@\xd8\xd1\x12\x91\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4wW\xa4\xb9\xcc=\x02G\u032a\xeb\x99\t\xa0\xe5n\x1d\xd6\xdc\u0089\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4w\\\x10\xc9>\r\xb7 [&CE\x823\xc6O\xc3?\xd7[\x89lk\x93[\x8b\xbd@\x00\x00\u07d4wa~\xbcK\xeb\xc5\xf5\xdd\xeb\x1bzp\xcd\xebj\xe2\xff\xa0$\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4wiC\xff\xb2\xef\\\xdd5\xb8<(\xbc\x04k\xd4\xf4gp\x98\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\xe0\x94wp\x1e,I=\xa4|\x1bX\xf4!\xb5I]\xeeE\xbe\xa3\x9b\x8a\x01H\xf6I\xcfaB\xa5\x80\x00\u07d4wy\x8f \x12W\xb9\xc3R\x04\x95pW\xb5Ft\xae\xfaQ\u07c9\b\x13\xcaV\x90m4\x00\x00\u07d4w\x8cC\xd1\x1a\xfe;Xo\xf3t\x19-\x96\xa7\xf2=+\x9b\u007f\x89\x8b\xb4\xfc\xfa;}k\x80\x00\u07d4w\x8cy\xf4\xde\x19S\xeb\u0398\xfe\x80\x06\xd5:\x81\xfbQ@\x12\x8963\x03\"\xd5#\x8c\x00\x00\u07d4w\x92t\xbf\x18\x03\xa36\xe4\u04f0\r\u0753\xf2\xd4\xf5\xf4\xa6.\x8965\u026d\xc5\u07a0\x00\x00\u07d4w\xa1q\"\xfa1\xb9\x8f\x17\x11\xd3*\x99\xf0>\xc3&\xf3=\b\x89\\(=A\x03\x94\x10\x00\x00\u07d4w\xa3I\a\xf3\x05\xa5L\x85\xdb\t\xc3c\xfd\xe3\xc4~j\xe2\x1f\x895e\x9e\xf9?\x0f\xc4\x00\x00\u07d4w\xa7i\xfa\xfd\xec\xf4\xa68v-[\xa3\x96\x9d\xf61 \xa4\x1d\x89lk\x93[\x8b\xbd@\x00\x00\u07d4w\xbekd\xd7\xc73\xa46\xad\xec^\x14\xbf\x9a\xd7@+\x1bF\x8965\u026d\xc5\u07a0\x00\x00\u07d4w\xbf\xe9<\u0367P\x84~A\xa1\xaf\xfe\xe6\xb2\u0696\xe7!N\x89\x10CV\x1a\x88)0\x00\x00\u07d4w\u0126\x97\xe6\x03\xd4+\x12\x05l\xbb\xa7a\xe7\xf5\x1d\x04C\xf5\x89$\xdc\xe5M4\xa1\xa0\x00\x00\u07d4w\xcc\x02\xf6#\xa9\u03d8S\t\x97\xeag\xd9\\;I\x18Y\xae\x89Is\x03\xc3n\xa0\xc2\x00\x00\u07d4w\xd4?\xa7\xb4\x81\xdb\xf3\xdbS\f\xfb\xf5\xfd\xce\xd0\xe6W\x181\x89lk\x93[\x8b\xbd@\x00\x00\u07d4w\xda^lr\xfb6\xbc\xe1\xd9y\x8f{\xcd\xf1\u044fE\x9c.\x89\x016\x95\xbbl\xf9>\x00\x00\u07d4w\xf4\xe3\xbd\xf0V\x88<\xc8r\x80\xdb\xe6@\xa1\x8a\r\x02\xa2\a\x89\n\x81\x99:+\xfb[\x00\x00\u0794w\xf6\t\u0287 \xa0#&,U\xc4o-&\xfb90\xaci\x88\xf0\x15\xf2W6B\x00\x00\u07d4w\xf8\x1b\x1b&\xfc\x84\xd6\u0797\uf2df\xbdr\xa310\xccJ\x8965\u026d\xc5\u07a0\x00\x00\u07d4x\x19\xb0E\x8e1N+S\xbf\xe0\f8I_\u0539\xfd\xf8\u0589\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4x\x1b\x15\x01dz.\x06\xc0\xedC\xff\x19\u007f\xcc\xec5\xe1p\v\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4x/R\xf0\xa6v\xc7w\x16\xd5t\xc8\x1e\xc4hO\x9a\x02\n\x97\x89.\x14\xe2\x06\xb70\xad\x80\x00\u07d4x5]\xf0\xa20\xf8=\x03,p1TAM\xe3\xee\u06b5W\x89lk\x93[\x8b\xbd@\x00\x00\u07d4x6\xf7\xefk\u01fd\x0f\xf3\xac\xafD\x9c\x84\xddk\x1e,\x93\x9f\x89\xe0\x8d\xe7\xa9,\xd9|\x00\x00\u07d4x7\xfc\xb8v\xda\x00\xd1\xeb;\x88\xfe\xb3\xdf?\xa4\x04/\xac\x82\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4x>\uc2a5\xda\xc7{.f#\xedQ\x98\xa41\xab\xba\xee\a\x89\x17\xda:\x04\u01f3\xe0\x00\x00\u07d4x\\\x8e\xa7t\xd70D\xa74\xfay\n\x1b\x1et>w\xed|\x89\f\xf1Rd\f\\\x83\x00\x00\u07d4x`\xa3\xde8\xdf8*\xe4\xa4\xdc\xe1\x8c\f\a\xb9\x8b\xce=\xfa\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94xcCq\xe1s\x04\xcb\xf39\xb1E*L\xe48\xdcvL\u038a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4xd\u0719\x9f\xe4\xf8\xe0\x03\xc0\xf4=\xec\u00da\xae\x15\"\xdc\x0f\x89\x05\x1e\x10+\xd8\xec\xe0\x00\x00\u07d4xtj\x95\x8d\xce\xd4\xc7d\xf8vP\x8cAJh4,\uce49\x02\xbe7O\xe8\xe2\xc4\x00\x00\xe0\x94x}1?\xd3k\x05>\xee\xae\xdb\xcet\xb9\xfb\x06x32\x89\x8a\x05\xc0X\xb7\x84'\x19`\x00\x00\u07d4x\x85\x9c[T\x8bp\r\x92\x84\xce\xe4\xb6c=GJ\x8a\x04{\x92\xc4\x15B$-\n\b\xc7\x0f\x99\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4x\u03c36\xb3(\xdb=\x87\x81:G+\x9e\x89\xb7^\f\xf3\xbc\x8965\u026d\xc5\u07a0\x00\x00\u07d4x\xd4\xf8\xc7\x1c\x1eh\xa6\x9a\x98\xf5/\xcbE\u068a\xf5n\xa1\xa0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4x\xdf&\x81\xd6\xd6\x02\xe2!B\xd5A\x16\u07a1]EIW\xaa\x89\x10'\x94\xad \xdah\x00\x00\u07d4x\xe0\x8b\xc53A<&\u2473\x14?\xfa|\u026f\xb9{x\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4x\xe8?\x80\xb3g\x8cz\nN>\x8c\x84\xdc\xcd\xe0dBbw\x89a\t=|,m8\x00\x00\u07d4x\xf5\xc7G\x85\xc5f\x8a\x83\x80r\x04\x8b\xf8\xb4SYM\u06ab\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4y\x0f\x91\xbd]\x1c\\\xc4s\x9a\xe9\x13\x00\u06c9\xe1\xc10<\x93\x89lk\x93[\x8b\xbd@\x00\x00\u07d4y\x17\u5f42\xa9y\x0f\xd6P\xd0C\xcd\xd90\xf7y\x963\u06c9\xd8\xd4`,&\xbfl\x00\x00\u07d4y\x19\xe7b\u007f\x9b}T\xea;\x14\xbbM\xd4d\x9fO9\xde\xe0\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\u07d4y\x1f`@\xb4\xe3\xe5\r\xcf5S\xf1\x82\u0357\xa9\x060\xb7]\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4y0\xc2\xd9\xcb\xfa\x87\xf5\x10\xf8\xf9\x87w\xff\x8a\x84H\xcaV)\x89\n\xd6\xee\xdd\x17\xcf;\x80\x00\u07d4yE)\u041d\x01rq5\x970\x02pu\xb8z\xd8=\xaen\x89\x10\xce\x1d=\x8c\xb3\x18\x00\x00\u07d4yKQ\u00deS\xd9\xe7b\xb0a;\x82\x9aD\xb4r\xf4\xff\xf3\x89$5\xe0dxA\u0300\x00\xe0\x94yU\x1c\xed\xe3v\xf7G\xe3ql\x8dy@\rvm.\x01\x95\x8a\t\xcb7\xaf\xa4\xffxh\x00\x00\u07d4y^\xbc&&\xfc9\xb0\xc8b\x94\xe0\xe87\xdc\xf5#U0\x90\x8965\u026d\xc5\u07a0\x00\x00\u07d4yn\xbb\xf4\x9b>6\xd6v\x94\xady\xf8\xff6vz\xc6\xfa\xb0\x89\x03K\xc4\xfd\xde'\xc0\x00\x00\u07d4yo\x87\xbaaz)0\xb1g\v\xe9.\xd1(\x1f\xb0\xb3F\xe1\x89\x06\xf5\xe8o\xb5((\x00\x00\u07d4yt'\xe3\xdb\xf0\xfe\xaez%\x06\xf1-\xf1\xdc@2n\x85\x05\x8965\u026d\xc5\u07a0\x00\x00\u07d4yu\x10\xe3\x86\xf5c\x93\xce\xd8\xf4w7\x8aDLHO}\xad\x8965\u026d\xc5\u07a0\x00\x00\u07d4y{\xb7\xf1W\xd9\xfe\xaa\x17\xf7m\xa4\xf7\x04\xb7M\xc1\x03\x83A\x89\xb5\x0f\u03ef\xeb\xec\xb0\x00\x00\u07d4y\x88\x90\x131\xe3\x87\xf7\x13\xfa\u03b9\x00\\\xb9\xb6Q6\xeb\x14\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4y\x89\u041f8&\xc3\u5bccu*\x81\x15r:\x84\xd8\tp\x89\x16\x86\xf8aL\xf0\xad\x00\x00\xe0\x94y\x95\xbd\x8c\xe2\xe0\xc6{\xf1\u01e51\xd4w\xbc\xa1\xb2\xb9ua\x8a\x01BH\xd6\x17\x82\x9e\xce\x00\x00\u07d4y\xae\xb3Ef\xb9t\xc3ZX\x81\xde\xc0 \x92}\xa7\xdf]%\x89lk\x93[\x8b\xbd@\x00\x00\u07d4y\xb1 \xeb\x88\x06s#!(\x8fgZ'\xa9\"_\x1c\xd2\ub245\xa0\xbf7\xde\xc9\xe4\x00\x00\u07d4y\xb4\x8d-a7\u00c5Ma\x1c\x01\xeaBBz\x0fY{\xb7\x89\nZ\xa8P\t\xe3\x9c\x00\x00\u07d4y\xb8\xaa\xd8y\xdd0V~\x87x\xd2\xd21\xc8\xf3z\xb8sN\x89lk\x93[\x8b\xbd@\x00\x00\u07d4y\xbf/{n2\x8a\xaf&\xe0\xbb\t?\xa2-\xa2\x9e\xf2\xf4q\x89a\t=|,m8\x00\x00\u07d4y\xc10\xc7b\xb8v[\x19\u04ab\u0260\x83\xab\x8f:\xady@\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4y\xc1\xbe\x19q\x1fs\xbe\xe4\xe61j\xe7T\x94Y\xaa\u03a2\xe0\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4y\xc6\x00/\x84R\xca\x15\u007f\x13\x17\xe8\n/\xaf$GUY\xb7\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4y\xca\xc6IO\x11\xef'\x98t\x8c\xb52\x85\xbd\x8e\"\xf9|\u0689lk\x93[\x8b\xbd@\x00\x00\u07d4y\u03e9x\n\xe6\xd8{,1\x88?\t'i\x86\u021ag5\x8965\u026d\xc5\u07a0\x00\x00\u07d4y\u06e2VG-\xb4\xe0X\xf2\xe4\xcd\xc3\xeaN\x8aBw83\x89O%\x91\xf8\x96\xa6P\x00\x00\u07d4y\xed\x10\xcf\x1fm\xb4\x82\x06\xb5\t\x19\xb9\xb6\x97\b\x1f\xbd\xaa\xf3\x89lk\x93[\x8b\xbd@\x00\x00\u0794y\xf0\x8e\x01\xce\t\x88\xe6<\u007f\x8f)\b\xfa\xdeC\xc7\xf9\xf5\u0248\xfc\x93c\x92\x80\x1c\x00\x00\u07d4y\xfdmH1Pf\xc2\x04\xf9e\x18i\xc1\tl\x14\xfc\x97\x81\x89lk\x93[\x8b\xbd@\x00\x00\u0794y\xff\xb4\xac\x13\x81*\vx\u0123{\x82u\">\x17k\xfd\xa5\x88\xf0\x15\xf2W6B\x00\x00\u07d4z\x05\x89\xb1C\xa8\xe5\xe1\a\u026cf\xa9\xf9\xf8Yz\xb3\u7ac9Q\xe92\xd7n\x8f{\x00\x00\u07d4z\nx\xa9\xcc9?\x91\xc3\xd9\xe3\x9ak\x8c\x06\x9f\a^k\xf5\x89Hz\x9a0E9D\x00\x00\u07d4z\x13p\xa7B\xec&\x87\xe7a\xa1\x9a\u0167\x942\x9e\xe6t\x04\x89\xa2\xa12ga\xe2\x92\x00\x00\xe0\x94z-\xfcw\x0e$6\x811\xb7\x84w\x95\xf2\x03\xf3\xd5\r[V\x8a\x02i\xfe\xc7\xf06\x1d \x00\x00\u07d4z3\x83N\x85\x83s>-R\xae\xadX\x9b\u046f\xfb\x1d\xd2V\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94z6\xab\xa5\xc3\x1e\xa0\xca~'{\xaa2\xecF\u0393\xcfu\x06\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94z8\x11\"\xba\xday\x1az\xb1\xf6\x03}\xac\x80C'S\xba\xad\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94zH\xd8w\xb6:\x8f\x8f\x93\x83\xe9\xd0\x1eS\xe8\fR\x8e\x95_\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4zO\x9b\x85\x06\x90\xc7\xc9F\x00\xdb\xee\f\xa4\xb0\xa4\x11\xe9\xc2!\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4zc\x86\x9f\xc7g\xa4\u01b1\xcd\x0e\x06I\xf3cL\xb1!\xd2K\x89\x043\x87Oc,\xc6\x00\x00\u07d4zg\xdd\x04:PO\xc2\xf2\xfcq\x94\xe9\xbe\xcfHL\xec\xb1\xfb\x89\r\x8drkqw\xa8\x00\x00\xe0\x94zk&\xf48\u0663RD\x91U\xb8\x87l\xbd\x17\xc9\u065bd\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4zmx\x1cw\u013a\x1f\xca\xdfhsA\xc1\xe3\x17\x99\xe9='\x89\x0e\u0683\x8cI)\b\x00\x00\u07d4zph\xe1\xc37\\\x0eY\x9d\xb1\xfb\xe6\xb2\xea#\xb8\xf4\a\u0489lk\x93[\x8b\xbd@\x00\x00\u07d4zt\xce\xe4\xfa\x0fcp\xa7\x89O\x11l\xd0\f\x11G\xb8>Y\x89+^:\xf1k\x18\x80\x00\x00\u07d4zy\xe3\x0f\xf0W\xf7\n=\x01\x91\xf7\xf5?v\x157\xaf}\xff\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94zzO\x80sW\xa4\xbb\xe6\x8e\x1a\xa8\x0692\x10\xc4\x11\u0333\x8a\x06ZM\xa2]0\x16\xc0\x00\x00\u07d4z\x85c\x86y\x01 o?+\xf0\xfa>\x1c\x81\t\u02bc\u0345\x89\amA\xc6$\x94\x84\x00\x00\xe0\x94z\x87\x97i\n\xb7{Tp\xbf|\f\x1b\xbaa%\b\xe1\xac}\x8a\x01\xe0\x92\x96\xc37\x8d\xe4\x00\x00\u07d4z\x8c\x89\xc0\x14P\x9dV\u05f6\x810f\x8f\xf6\xa3\xec\xecsp\x89\x10CV\x1a\x88)0\x00\x00\xe0\x94z\x94\xb1\x99\x92\u03b8\xcec\xbc\x92\xeeKZ\xde\xd1\fM\x97%\x8a\x03\x8d\x1a\x80d\xbbd\xc8\x00\x00\u07d4z\xa7\x9a\xc0C\x16\u030d\b\xf2\x00e\xba\xa6\xd4\x14(\x97\xd5N\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4z\xadM\xbc\u04ec\xf9\x97\u07d3XiV\xf7+d\u062d\x94\xee\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94z\xb2V\xb2\x04\x80\n\xf2\x017\xfa\xbc\xc9\x16\xa22Xu%\x01\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4z\xbaV\xf6:H\xbc\b\x17\u05b9p9\x03\x9az\xd6/\xae.\x89 \x86\xac5\x10R`\x00\x00\xe0\x94z\xbb\x10\xf5\xbd\x9b\xc3;\x8e\xc1\xa8-d\xb5[k\x18wuA\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4z\u010d@\xc6d\u031am\x89\xf1\xc5\xf5\xc8\n\x1cp\xe7D\u6263\x10b\xbe\xee\xd7\x00\x00\x00\u07d4z\u014fo\xfcO\x81\a\xaen07\x8eN\x9f\x99\xc5\u007f\xbb$\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4z\xd3\xf3\aao\x19\u0731C\xe6DM\xab\x9c<3a\x1fR\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4z\xd8,\xae\xa1\xa8\xb4\xed\x051\x9b\x9c\x98p\x17<\x81N\x06\xee\x89!d\xb7\xa0J\u0220\x00\x00\u07d4z\xde]f\xb9D\xbb\x86\f\x0e\xfd\xc8bv\u054fFS\xf7\x11\x89lk\x93[\x8b\xbd@\x00\x00\u07d4z\xdf\xed\xb0m\x91\xf3\xccs\x90E\v\x85U\x02p\x88<{\xb7\x89\x11x\xfa@Q]\xb4\x00\x00\u07d4z\xe1\xc1\x9eS\xc7\x1c\xeeLs\xfa\xe2\xd7\xfcs\xbf\x9a\xb5\u348965\u026d\xc5\u07a0\x00\x00\u07d4z\xe6Y\xeb;\xc4hR\xfa\x86\xfa\xc4\xe2\x1cv\x8dP8\x89E\x89\x0f\x81\f\x1c\xb5\x01\xb8\x00\x00\u07d4z\xea%\xd4+&\x12(n\x99\xc56\x97\u01bcA\x00\xe2\u06ff\x89lk\x93[\x8b\xbd@\x00\x00\u07d4z\xef{U\x1f\v\x9cF\xe7U\xc0\xf3\x8e[:s\xfe\x11\x99\xf5\x89P\xc5\xe7a\xa4D\b\x00\x00\u07d4{\v1\xffn$t^\xad\x8e\u067b\x85\xfc\v\xf2\xfe\x1dU\u0509+^:\xf1k\x18\x80\x00\x00\xe0\x94{\x0f\xea\x11v\xd5!Y3:\x14<)IC\xda6\xbb\u0774\x8a\x01\xfc}\xa6N\xa1L\x10\x00\x00\u07d4{\x11g<\xc0\x19bk)\f\xbd\xce&\x04o~m\x14\x1e!\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4{\x12!b\xc9\x13\xe7\x14l\xad\v~\xd3z\xff\xc9*\v\xf2\u007f\x89Q\xaf\tk#\x01\u0440\x00\u07d4{\x1b\xf5:\x9c\xbe\x83\xa7\u07a44W\x9f\xe7*\xac\x8d*\f\u0409\n\xd4\xc81j\v\f\x00\x00\u07d4{\x1d\xaf\x14\x89\x1b\x8a\x1e\x1b\xd4)\u0633k\x9aJ\xa1\u066f\xbf\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4{\x1f\xe1\xabM\xfd\x00\x88\xcd\xd7\xf6\x01c\xefY\xec*\xee\x06\xf5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4{%\xbb\x9c\xa8\xe7\x02!~\x933\"RP\xe5<6\x80MH\x89e\xea=\xb7UF`\x00\x00\u07d4{'\xd0\xd1\xf3\xdd<\x14\x02\x94\xd0H\x8bx>\xbf@\x15'}\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94{@\a\xc4^ZW?\u06f6\xf8\xbdtk\xf9J\xd0J<&\x8a\x038!\xf5\x13]%\x9a\x00\x00\u07d4{C\xc7\xee\xa8\xd6#U\xb0\xa8\xa8\x1d\xa0\x81\xc6Dk3\xe9\xe0\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4{M*8&\x90i\xc1\x85Ww\rY\x1d$\xc5\x12\x1f^\x83\x89%\xf2s\x93=\xb5p\x00\x00\xe0\x94{au\xec\x9b\xef\xc78$\x955\xdd\xde4h\x8c\xd3n\xdf%\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94{f\x12hy\x84M\xfa4\xfee\xc9\xf2\x88\x11\u007f\xef\xb4I\xad\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4{j\x84q\x8d\xd8nc3\x84)\xac\x81\x1d|\x8a\x86\x0f!\xf1\x89a\t=|,m8\x00\x00\xe0\x94{q,z\xf1\x16v\x00jf\xd2\xfc\\\x1a\xb4\xc4y\xce`7\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4{s$-u\u029a\xd5X\xd6P)\r\xf1v\x92\xd5L\u0638\x89lnY\xe6|xT\x00\x00\u07d4{v\x1f\xeb\u007f\u03e7\xde\xd1\xf0\xeb\x05\x8fJ`\v\xf3\xa7\b\u02c9\xf9]\xd2\xec'\xcc\xe0\x00\x00\xe0\x94{\x82|\xae\u007f\xf4t\t\x18\xf2\xe00\xab&\u02d8\xc4\xf4l\xf5\x8a\x01\x94hL\v9\xde\x10\x00\x00\xe0\x94{\x892\x86B~r\xdb!\x9a!\xfcM\xcd_\xbfY(<1\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4{\x92&\xd4o\xe7Q\x94\v\xc4\x16\xa7\x98\xb6\x9c\xcf\r\xfa\xb6g\x89\u3bb5sr@\xa0\x00\x00\u07d4{\x98\xe2<\xb9k\xee\xe8\n\x16\x80i\ube8f \xed\xd5\\\u03c9\v\xa0\xc9\x15\x87\xc1J\x00\x00\u07d4{\xb0\xfd\xf5\xa6c\xb5\xfb\xa2\x8d\x9c\x90*\xf0\xc8\x11\xe2R\xf2\x98\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4{\xb9W\x1f9K\v\x1a\x8e\xbaVd\xe9\u0635\xe8@g{\xea\x89\x01\x11du\x9f\xfb2\x00\x00\xe0\x94{\xb9\x84\xc6\u06f9\xe2y\x96j\xfa\xfd\xa5\x9c\x01\xd0&'\xc8\x04\x8a\x01\xb4d1\x1dE\xa6\x88\x00\x00\u07d4{\xbb\xec^p\xbd\xea\u063b2\xb4(\x05\x98\x8e\x96H\xc0\xaa\x97\x8966\u05ef^\u024e\x00\x00\u07d4{\xca\x1d\xa6\xc8\nf\xba\xa5\xdbZ\u0245A\u013e'kD}\x89$\xcf\x04\x96\x80\xfa<\x00\x00\u07d4{\u0772\xee\x98\xde\x19\xeeL\x91\xf6a\xee\x8eg\xa9\x1d\x05K\x97\x8965\u026d\xc5\u07a0\x00\x00\u0794{\xe2\xf7h\f\x80-\xa6\x15L\x92\xc0\x19J\xe72Qzqi\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4{\xe7\xf2Eiq\x88;\x9a\x8d\xbeL\x91\xde\xc0\x8a\xc3N\x88b\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4{\xe8\u0334\xf1\x1bf\xcan\x1dW\xc0\xb59b!\xa3\x1b\xa5:\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94{\xeb\x81\xfb/^\x91Rk*\xc9y^v\u019b\xcf\xf0K\xc0\x8a\x0e\xb2.yO\n\x8d`\x00\x00\u07d4|\b\x83\x05L-\x02\xbcz\x85+\x1f\x86\xc4'w\xd0\xd5\xc8V\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4|\x0f^\a C\xc9\xeet\x02B\x19~x\xccK\x98\xcd\xf9`\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4|\x1d\xf2JO\u007f\xb2\u01f4r\xe0\xbb\x00l\xb2}\xcd\x16AV\x8965\u026d\xc5\u07a0\x00\x00\u07d4|)\xd4}W\xa73\xf5k\x9b!pc\xb5\x13\xdc;1Y#\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4|+\x96\x03\x88JO.FN\u03b9}\x17\x93\x8d\x82\x8b\xc0,\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4|8,\x02\x96a.N\x97\xe4@\xe0-8q';U\xf5;\x89\n\xb6@9\x12\x010\x00\x00\u07d4|>\xb7\x13\xc4\xc9\xe08\x1c\xd8\x15L|\x9a}\xb8d\\\xde\x17\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4|D\x01\xae\x98\xf1.\xf6\xde9\xae$\u03df\xc5\x1f\x80\xeb\xa1k\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4|E\xf0\xf8D*V\xdb\u04dd\xbf\x15\x99\x95A\\R\xedG\x9b\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94|S-\xb9\xe0\xc0l&\xfd@\xac\xc5j\xc5\\\x1e\xe9-<:\x8a?\x87\bW\xa3\xe0\xe3\x80\x00\x00\u07d4|`\xa0_zJ_\x8c\xf2xC\x916.uZ\x83A\xefY\x89f\x94\xf0\x18*7\xae\x00\x00\u07d4|`\xe5\x1f\v\xe2(\xe4\xd5o\xdd)\x92\xc8\x14\xdaw@\u01bc\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4|i$\xd0|>\xf5\x89\x19f\xfe\nxV\xc8{\xef\x9d 4\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94|\x8b\xb6Zo\xbbI\xbdA3\x96\xa9\xd7\xe3\x10S\xbb\xb3z\xa9\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94|\x9a\x11\f\xb1\x1f%\x98\xb2\xb2\x0e,\xa4\x002^A\xe9\xdb3\x8a\x05\x81v{\xa6\x18\x9c@\x00\x00\u07d4|\xbc\xa8\x8f\xcaj\x00`\xb9`\x98\\\x9a\xa1\xb0%4\xdc\"\b\x89\x19\x12z\x13\x91\xea*\x00\x00\u07d4|\xbe\xb9\x992\xe9~n\x02\x05\x8c\xfcb\u0432k\xc7\u0325+\x89lk\x93[\x8b\xbd@\x00\x00\u07d4|\xc2Jj\x95\x8c \xc7\xd1$\x96`\xf7Xb&\x95\v\r\x9a\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4|\xd2\x0e\u0335\x18\xb6\f\xab\t[r\x0fW\x15p\u02aaD~\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4|\xd5\xd8\x1e\xab7\xe1\x1ebv\xa3\xa1\t\x12Q`~\r~8\x89\x03hM^\xf9\x81\xf4\x00\x00\u07d4|\xdft!9E\x95=\xb3\x9a\xd0\xe8\xa9x\x1a\xddy.M\x1d\x89lk\x93[\x8b\xbd@\x00\x00\u07d4|\xe4hdF\U000547be\xd6r\x15\xeb\rZ\x1d\xd7,\x11\xb8\x89x9\xd3!\xb8\x1a\xb8\x00\x00\u07d4|\xefMC\xaaA\u007f\x9e\xf8\xb7\x87\xf8\xb9\x9dS\xf1\xfe\xa1\ue209g\x8a\x93 b\xe4\x18\x00\x00\u07d4}\x03P\xe4\v3\x8d\xdasfa\x87+\xe3?\x1f\x97R\xd7U\x89\x02\xb4\xf5\xa6\U00051500\x00\xe0\x94}\x04\xd2\xed\xc0X\xa1\xaf\xc7a\xd9\u025a\xe4\xfc\\\x85\xd4\u0226\x8aB\xa9\xc4g\\\x94g\xd0\x00\x00\u07d4}\v%^\xfbW\xe1\x0fp\b\xaa\"\xd4\x0e\x97R\xdf\xcf\x03x\x89\x01\x9f\x8euY\x92L\x00\x00\xe0\x94}\x13\xd6pX\x84\xab!W\u074d\xccpF\xca\xf5\x8e\xe9K\xe4\x8a\x1d\r\xa0|\xbb>\xe9\xc0\x00\x00\u07d4}'>c~\xf1\xea\u0101\x11\x94\x13\xb9\x1c\x98\x9d\xc5\xea\xc1\"\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4}*R\xa7\xcf\f\x846\xa8\xe0\a\x97kl&\xb7\"\x9d\x1e\x15\x89\x17\xbf\x06\xb3*$\x1c\x00\x00\u07d4}4\x805i\xe0\v\u05b5\x9f\xff\b\x1d\xfa\\\n\xb4\x19zb\x89\\\xd8|\xb7\xb9\xfb\x86\x00\x00\u07d4}4\xffY\xae\x84\nt\x13\u01baL[\xb2\xba,u\xea\xb0\x18\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4}9(R\xf3\xab\xd9/\xf4\xbb[\xb2l\xb6\bt\xf2\xbeg\x95\x8966\xc2^f\xec\xe7\x00\x00\u07d4}DRg\u015a\xb8\u04a2\xd9\xe7\t\x99\x0e\th%\x80\u011f\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94}U\x13\x97\xf7\x9a)\x88\xb0d\xaf\xd0\xef\xeb\xee\x80,w!\xbc\x8a\bW\xe0\xd6\xf1\xdav\xa0\x00\x00\u07d4}Z\xa3?\xc1KQ\x84\x1a\x06\x90n\xdb+\xb4\x9c*\x11ri\x89\x10D\x00\xa2G\x0eh\x00\x00\xe0\x94}]/s\x94\x9d\xad\xda\bV\xb2\x06\x98\x9d\xf0\a\x8dQ\xa1\xe5\x8a\x02\xc4:H\x1d\xf0M\x01wb\xed\xcb\\\xaab\x9bZ\x89\x02\"\xc8\xeb?\xf6d\x00\x00\u07d4~\x8f\x96\xcc)\xf5{\tu\x12\f\xb5\x93\xb7\u0743=`kS\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4~\x97*\x8a|*D\xc9;!Cl8\xd2\x1b\x92R\xc3E\xfe\x89a\t=|,m8\x00\x00\u07d4~\x99\u07fe\x98\x9d;\xa5)\u0457Q\xb7\xf41\u007f\x89S\xa3\xe2\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4~\xa0\xf9n\xe0\xa5s\xa30\xb5h\x97v\x1f=L\x010\xa8\xe3\x89Hz\x9a0E9D\x00\x00\u0794~\xa7\x91\xeb\xab\x04E\xa0\x0e\xfd\xfcNJ\x8e\x9a~ue\x13m\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4~\xab\xa05\xe2\xaf7\x93\xfdtgK\x10%@\xcf\x19\n\u0779\x89E\x02l\x83[`D\x00\x00\xe0\x94~\xb4\xb0\x18\\\x92\xb6C\x9a\b\xe72!h\xcb5<\x8awJ\x8a\x02'\x19l\xa0I\x83\xca\x00\x00\xe0\x94~\xbd\x95\xe9\xc4p\xf7(5\x83\xdcn\x9d,M\xce\v\ua3c4\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4~\u0425\xa8G\xbe\xf9\xa9\xda|\xba\x1dd\x11\xf5\xc3\x161&\x19\x89\x02(\xeb7\xe8u\x1d\x00\x00\u07d4~\xda\xfb\xa8\x98K\xafc\x1a\x82\vk\x92\xbb\xc2\xc56U\xf6\xbd\x89lk\x93[\x8b\xbd@\x00\x00\u07d4~\xdb\x02\xc6\x1a\"r\x87a\x1a\xd9Pici\xccNdzh\x89\x0e\u0683\x8cI)\b\x00\x00\u07d4~\xe5\u0280]\xce#\xaf\x89\xc2\xd4D\xe7\xe4\af\xc5Lt\x04\x89\r\v\xd4\x12\xed\xbd\x82\x00\x00\xe0\x94~\xe6\x04\u01e9\xdc)\t\xce2\x1d\u6e72OWgWuU\x8a\x01+\xf9\u01d8\\\xf6-\x80\x00\u07d4~\xf1o\xd8\xd1[7\x8a\x0f\xba0k\x8d\x03\u0758\xfc\x92a\x9f\x89%\xf2s\x93=\xb5p\x00\x00\u07d4~\xf9\x8bR\xbe\xe9S\xbe\xf9\x92\xf3\x05\xfd\xa0'\xf8\x91\x1cXQ\x89\x1b\xe7\" i\x96\xbc\x80\x00\u07d4~\xfc\x90vj\x00\xbcR7,\xac\x97\xfa\xbd\x8a<\x83\x1f\x8e\u0349\b\x90\xb0\xc2\xe1O\xb8\x00\x00\u07d4~\xfe\xc0\xc6%<\xaf9\u007fq(|\x1c\a\xf6\xc9X+[\x86\x89\x1a,\xbc\xb8O0\u0540\x00\u07d4\u007f\x01\xdc|7G\xca`\x8f\x98=\xfc\x8c\x9b9\xe7U\xa3\xb9\x14\x89\v8l\xad_zZ\x00\x00\u07d4\u007f\x06b\xb4\x10)\x8c\x99\xf3\x11\u04e1EJ\x1e\xed\xba/\xeav\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\u007f\x06\u021dY\x80\u007f\xa6\v\xc6\x016\xfc\xf8\x14\u02ef%C\xbd\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\u007f\v\x90\xa1\xfd\u050f'\xb2h\xfe\xb3\x83\x82\xe5]\xdbP\xef\x0f\x892\xf5\x1e\u06ea\xa30\x00\x00\u07d4\u007f\x0e\xc3\u06c0F\x92\xd4\xd1\xea2E6Z\xab\x05\x90\a[\u0109\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u007f\x0f\x04\xfc\xf3zS\xa4\xe2N\xden\x93\x10Nx\xbe\x1d<\x9e\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\u007f\x13\xd7`I\x8dq\x93\xcahY\xbc\x95\xc9\x018d#\xd7l\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\u007f\x15\n\xfb\x1aw\u00b4Y(\xc2h\xc1\u9f74d\x1dG\u0609lk\x93[\x8b\xbd@\x00\x00\u07d4\u007f\x16\x19\x98\x8f7\x15\xe9O\xf1\xd2S&-\xc5X\x1d\xb3\xde\x1c\x890\xca\x02O\x98{\x90\x00\x00\u07d4\u007f\x1c\x81\xee\x16\x97\xfc\x14K|\v\xe5I;V\x15\xae\u007f\xdd\u0289\x1b\x1d\xaba\u04ead\x00\x00\u07d4\u007f#\x82\xff\xd8\xf89VFy7\xf9\xbar7F#\xf1\x1b8\x89 \x86\xac5\x10R`\x00\x00\u07d4\u007f7\t9\x1f?\xbe\xba5\x92\xd1u\xc7@\xe8z\tT\x1d\x02\x89\x1a\x05V\x90\xd9\u06c0\x00\x00\u07d4\u007f8\x9c\x12\xf3\xc6\x16OdFVlwf\x95\x03\xc2y%'\x89\x05V\xf6L\x1f\xe7\xfa\x00\x00\xe0\x94\u007f:\x1eE\xf6~\x92\u0200\xe5s\xb43y\xd7\x1e\xe0\x89\xdbT\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94\u007f=r\x03\u0224G\xf7\xbf6\u060a\xe9\xb6\x06*^\xeex\xae\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\u007fF\xbb%F\r\xd7\xda\xe4!\x1c\xa7\xf1Z\xd3\x12\xfc}\xc7\\\x8a\x01je\x02\xf1Z\x1eT\x00\x00\u07d4\u007fI\xe7\xa4&\x98\x82\xbd\x87\"\u0526\xf5f4v)b@y\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\u007fI\xf2\a&G\x1a\xc1\u01e8>\xf1\x06\xe9w\\\xebf%f\x8a\x01@a\xb9\xd7z^\x98\x00\x00\u07d4\u007fK^'\x85x\xc0F\xcc\xea\xf6W0\xa0\xe0h2\x9e\u0576\x89e\xea=\xb7UF`\x00\x00\u07d4\u007fOY;a\x8c3\v\xa2\xc3\xd5\xf4\x1e\xce\xeb\x92\xe2~Bl\x89\x96n\xdcuk|\xfc\x00\x00\u07d4\u007fT\x14\x91\u04ac\x00\xd2a/\x94\xaa\u007f\v\xcb\x01FQ\xfb\u0509\x14b\fW\xdd\xda\xe0\x00\x00\u07d4\u007fZ\xe0Z\xe0\xf8\xcb\xe5\xdf\xe7!\xf0D\u05e7\xbe\xf4\xc2y\x97\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4\u007f`:\xec\x17Y\xea_\a\xc7\xf8\xd4\x1a\x14(\xfb\xba\xf9\xe7b\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\u007falo\x00\x8a\u07e0\x82\xf3M\xa7\xd0e\x04`6\x80u\xfb\x8965\u026d\xc5\u07a0\x00\x00\u07d4\u007fa\xfal\xf5\xf8\x98\xb4@\xda\u016b\xd8`\rmi\x1f\xde\xf9\x89\x0f-\xc7\xd4\u007f\x15`\x00\x00\xe0\x94\u007fe\\g\x89\xed\xdfE\\\xb4\xb8\x80\x99r\x0698\x9e\ubb0a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\u007fk(\u0204!\xe4\x85~E\x92\x81\u05c4ai$\x89\xd3\xfb\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u007fn\xfboC\x18\x87m.\xe6$\xe2u\x95\xf4DF\xf6\x8e\x93\x89T\x06\x923\xbf\u007fx\x00\x00\u07d4\u007fq\x92\xc0\xdf\x1c}\xb6\xd9\xede\xd7\x11\x84\xd8\xe4\x15Z\x17\xba\x89\x04Sr\x8d3\x94,\x00\x00\u07d4\u007fz:!\xb3\xf5\xa6]\x81\xe0\xfc\xb7\xd5-\xd0\n\x1a\xa3m\xba\x89\x05k\xc7^-c\x10\x00\x00\u07d4\u007f\x8d\xbc\xe1\x80\xed\x9cV65\xaa\xd2\xd9{L\xbcB\x89\x06\u0649\x90\xf54`\x8ar\x88\x00\x00\xe0\x94\u007f\x99=\xdb~\x02\u0082\xb8\x98\xf6\x15_h\x0e\xf5\xb9\xaf\xf9\a\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\u007f\x9f\x9bV\xe4(\x9d\xfbX\xe7\x0f\xd5\xf1*\x97\xb5m5\u01a5\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\u007f\xa3~\xd6x\x87u\x1aG\x1f\x0e\xb3\x06\xbeD\xe0\xdb\xcd`\x89\x899vt\u007f\xe1\x1a\x10\x00\x00\u07d4\u007f\xaa0\xc3\x15\x19\xb5\x84\xe9rP\xed*<\xf38^\xd5\xfdP\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u007f\xcf[\xa6fo\x96lTH\xc1{\xf1\xcb\v\xbc\xd8\x01\x9b\x06\x89\x05k\xc3\u042e\xbeI\x80\x00\xe0\x94\u007f\xd6y\xe5\xfb\r\xa2\xa5\xd1\x16\x19M\xcbP\x83\x18\xed\u0140\xf3\x8a\x01c\x9eI\xbb\xa1b\x80\x00\x00\u07d4\u007f\u06e01\u01cf\x9c\tmb\xd0Z6\x9e\uac3c\xccU\u5257\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4\u007f\xdb\u00e8D\xe4\r\x96\xb2\xf3\xa652.`e\xf4\xca\x0e\x84\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u007f\xdf\u020dx\xbf\x1b(Z\xc6O\x1a\xdb5\xdc\x11\xfc\xb09Q\x89|\x06\xfd\xa0/\xb06\x00\x00\u07d4\u007f\xea\x19b\xe3]b\x05\x97h\xc7I\xbe\u0756\u02b90\xd3x\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u007f\xef\x8c8w\x9f\xb3\a\xeco\x04K\xeb\xe4\u007f<\xfa\xe7\x96\xf1\x89\t#@\xf8l\xf0\x9e\x80\x00\u07d4\u007f\xf0\xc6?p$\x1b\xec\xe1\x9bs~SA\xb1+\x10\x901\u0609\x12\xc1\xb6\xee\xd0=(\x00\x00\xe0\x94\u007f\xfa\xbf\xbc9\f\xbeC\u0389\x18\x8f\bh\xb2}\xcb\x0f\f\xad\x8a\x01YQ\x82\"K&H\x00\x00\xe0\x94\u007f\xfd\x02\xed7\fp`\xb2\xaeS\xc0x\xc8\x01!\x90\u07fbu\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u0794\x80\x02*\x12\a\xe9\x10\x91\x1f\xc9(I\xb0i\xab\f\xda\xd0C\u04c8\xb9\x8b\xc8)\xa6\xf9\x00\x00\u07d4\x80\t\xa7\xcb\u0452\xb3\xae\u052d\xb9\x83\xd5(ER\xc1ltQ\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x80\x0e}c\x1cnW:\x903/\x17\xf7\x1f_\u045bR\x8c\xb9\x89\b=lz\xabc`\x00\x00\u07d4\x80\x15m\x10\ufa320\u0254\x10c\r7\xe2i\xd4\t<\xea\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x80\x172\xa4\x81\u00c0\xe5~\xd6-l)\u0799\x8a\xf3\xfa;\x13\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x80\x1de\xc5\x18\xb1\x1d\x0e?OG\x02!Ap\x13\xc8\xe5>\u0149\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x80&CZ\xacr\x8dI{\x19\xb3\xe7\xe5|(\xc5c\x95O+\x89]\u0212\xaa\x111\xc8\x00\x00\u07d4\x80-\xc3\xc4\xff-}\x92^\u215fJ\x06\u05fa`\xf10\x8c\x89\x05P\x94\f\x8f\xd3L\x00\x00\u07d4\x800\xb1\x11\u0198?\x04\x85\u076c\xa7b$\xc6\x18\x064x\x9f\x89\x04V9\x18$O@\x00\x00\u07d4\x805\xbc\xff\xae\xfd\xee\xea5\x83\fI}\x14(\x9d6 #\u0789\x10CV\x1a\x88)0\x00\x00\u07d4\x805\xfeNkj\xf2z\u44a5xQ^\x9d9\xfao\xa6[\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x80C\xed\"\xf9\x97\u58a4\xc1n6D\x86\xaed\x97V\x92\u0109=I\x04\xff\xc9\x11.\x80\x00\u07d4\x80C\xfd\u043cL\x97=\x16c\xd5_\xc15P\x8e\xc5\xd4\xf4\xfa\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x80L\xa9IrcOc:Q\xf3V\v\x1d\x06\xc0\xb2\x93\xb3\xb1\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x80R-\u07d4N\xc5.'\xd7$\xedL\x93\xe1\xf7\xbe`\x83\u0589\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x80Y\x1aB\x17\x9f4\xe6M\x9d\xf7]\xcdF;(hoUt\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x80\\\xe5\x12\x97\xa0y;\x81 g\xf0\x17\xb3\xe7\xb2\u07db\xb1\xf9\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\x80]\x84o\xb0\xbc\x02\xa73r&\u0585\xbe\x9e\xe7s\xb9\x19\x8a\x8a\x04<0\xfb\b\x84\xa9l\x00\x00\u07d4\x80c7\x9a{\xf2\u02d2:\x84\xc5\t>h\xda\xc7\xf7T\x81\u0149\x11v\x10.n2\xdf\x00\x00\u07d4\x80hTX\x8e\xcc\xe5AI_\x81\u008a)\x03s\xdf\x02t\xb2\x89\x1f\x8c\xdf\\n\x8dX\x00\x00\u07d4\x80oD\xbd\xebh\x807\x01^\x84\xff!\x80I\xe3\x823*3\x89l]\xb2\xa4\xd8\x15\xdc\x00\x00\u07d4\x80tF\x18\xde9jT1\x97\xeeH\x94\xab\xd0c\x98\xdd|'\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x80w\xc3\xe4\xc4EXn\tL\xe1\x02\x93\u007f\xa0[s{V\x8c\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x80\x90\u007fY1H\xb5|F\xc1w\xe2=%\xab\u012a\xe1\x83a\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x80\x97s\x16\x94NYB\xe7\x9b\x0e:\xba\u04cd\xa7F\be\x19\x89\x02\x1auJm\xc5(\x00\x00\xe0\x94\x80\xa0\xf6\xcc\x18l\xf6 \x14\x00sn\x06Z9\x1fR\xa9\xdfJ\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x80\xab\xecZ\xa3n\\\x9d\t\x8f\x1b\x94(\x81\xbdZ\xca\u0196=\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x80\xb2=8\v\x82\\F\xe098\x99\xa8UVF-\xa0\u1309lk\x93[\x8b\xbd@\x00\x00\u07d4\x80\xb4-\xe1p\xdb\xd7#\xf4T\xe8\x8fw\x16E-\x92\x98P\x92\x89\x10F#\xc0v-\xd1\x00\x00\u07d4\x80\xb7\x9f3\x83\x90\u047a\x1b77\xa2\x9a\x02W\xe5\xd9\x1e\a1\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x80\xbf\x99^\u063a\x92p\x1d\x10\xfe\u011f\x9e}\x01M\xbe\xe0&\x89\x1f\x047\xca\x1a~\x12\x80\x00\u07d4\x80\xc0N\xfd1\x0fD\x04\x83\xc7?tK[\x9edY\x9c\xe3\xec\x89A\rXj \xa4\xc0\x00\x00\u07d4\x80\u00e9\xf6\x95\xb1m\xb1Yr\x86\u0473\xa8\xb7il9\xfa'\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x80\xc5>\xe7\xe35\u007f\x94\xce\rxh\x00\x9c \x8bJ\x13\x01%\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x80\xcc!\xbd\x99\xf3\x90\x05\u014f\xe4\xa4H\x90\x92 !\x8ff\u02c966\xc9yd6t\x00\x00\u07d4\x80\xd5\xc4\fY\xc7\xf5N\xa3\xa5_\xcf\xd1uG\x1e\xa3P\x99\xb3\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x80\xda/\u0762\x9a\x9e'\xf9\xe1\x15\x97^i\xae\x9c\xfb\xf3\xf2~\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x80\xe7\xb3 R0\xa5f\xa1\xf0a\xd9\"\x81\x9b\xb4\xd4\u04a0\xe1\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4\x80\xea\x1a\xcc\x13n\xcaKh\xc8B\xa9Z\xdfk\u007f\xee~\xb8\xa2\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x80\xf0z\xc0\x9e{,<\n=\x1e\x94\x13\xa5D\xc7:A\xbe\u02c9\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x81\r\xb2Vu\xf4^\xa4\xc7\xf3\x17\u007f7\xce)\xe2-g\x99\x9c\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x81\x13\x9b\xfd\u0326V\xc40 ?r\x95\x8cT;e\x80\xd4\f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x81\x14a\xa2\xb0\u0290\xba\xda\xc0j\x9e\xa1nx{3\xb1\x96\u0309\b\xe3\xf5\v\x17<\x10\x00\x00\u07d4\x81\x16M\xeb\x10\x81J\xe0\x83\x91\xf3,\bf{bH\xc2}z\x89\x15[\xd90\u007f\x9f\xe8\x00\x00\u07d4\x81\x18i1\x18A7\xd1\x19*\u020c\xd3\xe1\xe5\xd0\xfd\xb8jt\x89\x9d5\x95\xab$8\xd0\x00\x00\u0794\x81*U\xc4<\xae\xdcYr\x187\x90\x00\xceQ\rT\x886\xfd\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\x81.\xa7\xa3\xb2\xc8n\xed2\xffO,sQL\xc6;\xac\xfb\u038965\u026d\xc5\u07a0\x00\x00\u07d4\x814\xdd\x1c\x9d\xf0\xd6\u0225\x81$&\xbbU\xc7a\u0283\x1f\b\x89\x06\xa2\x16\v\xb5|\xcc\x00\x00\u07d4\x81A5\u068f\x98\x11\aW\x83\xbf\x1a\xb6pb\xaf\x8d>\x9f@\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x81I\x8c\xa0{\x0f/\x17\xe8\xbb\xc7\xe6\x1a\u007fJ\xe7\xbef\xb7\x8b\x89\x05\x81\xfb\xb5\xb3;\xb0\x00\x00\u07d4\x81Um\xb2sI\xab\x8b'\x00ID\xedP\xa4n\x94\x1a\x0f_\x89\u063beI\xb0+\xb8\x00\x00\u07d4\x81U\xfalQ\xeb1\xd8\bA-t\x8a\xa0\x86\x10P\x18\x12/\x89e\xea=\xb7UF`\x00\x00\xe0\x94\x81V6\v\xbd7\ta\xce\xcakf\x91\xd7P\x06\xad L\xf2\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\x81a\xd9@\xc3v\x01\x00\xb9\b\x05)\xf8\xa6\x03%\x03\x0fn\u0709\x10CV\x1a\x88)0\x00\x00\xe0\x94\x81d\xe7\x83\x14\xae\x16\xb2\x89&\xccU=,\xcb\x16\xf3V'\r\x8a\x01\xca\x13N\x95\xfb2\xc8\x00\x00\u07d4\x81e\u02b0\xea\xfbZ2\x8f\xc4\x1a\xc6M\xaeq[.\xef,e\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x81h\xed\xce\u007f)a\xcf)[\x9f\xcdZE\xc0l\xde\xdan\xf5\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x81m\x97r\xcf\x119\x91\x16\xcc\x1er\xc2lgt\xc9\xed\xd79\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x81s\xc85djg.\x01R\xbe\x10\xff\xe8Ab\xdd%nL\x89\x1a\xab\xdf!E\xb40\x00\x00\u07d4\x81t\x93\u035b\xc6#p*$\xa5o\x9f\x82\xe3\xfdH\xf3\xcd1\x89\x9eK#\xf1-L\xa0\x00\x00\u07d4\x81y\xc8\tp\x18,\u0177\xd8*M\xf0n\xa9M\xb6:%\xf3\x89'o%\x9d\xe6k\xf4\x00\x00\u07d4\x81z\xc3;\xd8\xf8GVsr\x95\x1fJ\x10\u05e9\x1c\xe3\xf40\x89\n\xd7\xc4\x06\xc6m\xc1\x80\x00\xe0\x94\x81\x8f\xfe'\x1f\u00d75e\xc3\x03\xf2\x13\xf6\xd2\u0689\x89~\xbd\x8a\x016\xe0SB\xfe\u1e40\x00\u07d4\x81\x97\x94\x81!s.c\xd9\xc1H\x19N\xca\xd4n0\xb7I\u0209\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x81\x9a\xf9\xa1\xc2s2\xb1\xc3i\xbb\xda\x1b=\xe1\xc6\xe93\xd6@\x89\x11\t\xe6T\xb9\x8fz\x00\x00\xe0\x94\x81\x9c\u06a506x\xef|\xecY\u050c\x82\x16:\xcc`\xb9R\x8a\x03\x13QT_y\x81l\x00\x00\u07d4\x81\x9e\xb4\x99\vZ\xbaUG\t=\xa1+k<\x10\x93\xdfmF\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x81\xa8\x81\x96\xfa\xc5\xf2<>\x12\xa6\x9d\xecK\x88\x0e\xb7\xd9s\x10\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x81\xbc\xcb\xff\x8fD4~\xb7\xfc\xa9['\xce|\x95$\x92\xaa\xad\x89\b@\xc1!e\xddx\x00\x00\u07d4\x81\xbdu\xab\xd8e\xe0\xc3\xf0J\vO\xdb\xcbt\xd3@\x82\xfb\xb7\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x81\xc1\x8c*#\x8d\xdcL\xba#\n\a-\xd7\xdc\x10\x1eb\x02s\x89Hz\x9a0E9D\x00\x00\u07d4\x81\xc9\xe1\xae\xe2\xd36]S\xbc\xfd\u0356\xc7\xc58\xb0\xfd~\xec\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\x81\u03edv\t\x13\xd3\xc3\"\xfc\xc7{I\u00ae9\a\xe7On\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\xe0\x94\x81\xd6\x19\xffW&\xf2@_\x12\x90Lr\xeb\x1e$\xa0\xaa\xeeO\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x81\xef\u25aev\xc8`\xd1\xc5\xfb\xd3=G\xe8\u0399\x96\xd1W\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x81\xf8\xde,(=_\u052f\xbd\xa8]\xed\xf9v\x0e\xab\xbb\xb5r\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\x82\f\x19)\x11\x96P[e\x05\x9d\x99\x14\xb7\t\v\xe1\u06c7\u0789\a\x96\xe3\xea?\x8a\xb0\x00\x00\u07d4\x82\x1c\xb5\xcd\x05\xc7\uf41f\xe1\xbe`s=\x89c\xd7`\xdcA\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\x82\x1dy\x8a\xf1\x99\x89\u00ee[\x84\xa7\xa7(<\xd7\xfd\xa1\xfa\xbe\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\x82\x1e\xb9\t\x94\xa2\xfb\xf9K\xdc23\x91\x02\x96\xf7o\x9b\xf6\xe7\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\x82$\x9f\xe7\x0fa\u01b1o\x19\xa3$\x84\x0f\xdc\x02\x021\xbb\x02\x8a\x02\x036\xb0\x8a\x93c[\x00\x00\u07d4\x82(\xeb\xc0\x87H\x0f\xd6EG\xca(\x1f^\xac\xe3\x04\x14S\xb9\x89j\xcb=\xf2~\x1f\x88\x00\x00\xe0\x94\x82)\u03b9\xf0\xd7\b9I\x8dD\xe6\xab\xed\x93\xc5\xca\x05\x9f]\x8a\x1a\x1c\x1b<\x98\x9a \x10\x00\x00\u07d4\x82.\xdf\xf66V:a\x06\xe5.\x9a%\x98\xf7\xe6\xd0\xef'\x82\x89\x01\xf4\xf9i=B\u04c0\x00\u07d4\x822\x19\xa2Yv\xbb*\xa4\xaf\x8b\xadA\xac5&\xb4\x936\x1f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x822\xd1\xf9t.\u07cd\xd9'\xda5;*\xe7\xb4\xcb\xceu\x92\x89$=M\x18\"\x9c\xa2\x00\x00\u07d4\x824\xf4c\u0444\x85P\x1f\x8f\x85\xac\xe4\x97,\x9bc-\xbc\u0309lk\x93[\x8b\xbd@\x00\x00\u07d4\x827htg7\xcem\xa3\x12\xd5>TSN\x10o\x96|\xf3\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x82;\xa7dr8\xd1\x13\xbc\xe9\x96JC\u0420\x98\x11\x8b\xfeM\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x82@t1(\x06\xdaGHCBf\xee\x00!@\u305a\u0089Q\xb1\u04c3\x92a\xac\x00\x00\u07d4\x82C\x8f\u04b3*\x9b\xddgKI\xd8\xcc_\xa2\xef\xf9x\x18G\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x82HW(\xd0\xe2\x81V7X\xc7Z\xb2~\xd9\u80a0\x00-\x89\a\xf8\b\xe9)\x1el\x00\x00\u07d4\x82K<\x19)]~\xf6\xfa\xa7\xf3t\xa4y\x84\x86\xa8\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x82Q5\x8c\xa4\xe0`\u0775Y\xcaX\xbc\v\u077e\xb4\a\x02\x03\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x82Q5\xb1\xa7\xfc\x16\x05aL\x8a\xa4\u042cm\xba\u040fH\x0e\x89M\x85<\x8f\x89\b\x98\x00\x00\u07d4\x82S\t\xa7\xd4]\x18\x12\xf5\x1en\x8d\xf5\xa7\xb9ol\x90\x88\x87\x89\x804\xf7\u0671f\xd4\x00\x00\u07d4\x82Z\u007fN\x10\x94\x9c\xb6\xf8\x96Bh\xf1\xfa_W\xe7\x12\xb4\u0109\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x82a\xfa#\f\x90\x1dC\xffW\x9fG\x80\u04d9\xf3\x1e`v\xbc\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x82b\x16\x9baXp\x13N\xb4\xacl_G\x1ck\xf2\xf7\x89\xfc\x89\x19\x12z\x13\x91\xea*\x00\x00\u07d4\x82c\xec\xe5\xd7\t\xe0\u05eeq\u0328h\xed7\xcd/\xef\x80{\x895\xab\x02\x8a\xc1T\xb8\x00\x00\xe0\x94\x82l\xe5y\x052\xe0T\x8ca\x02\xa3\r>\xac\x83k\xd68\x8f\x8a\x03\xcf\xc8.7\xe9\xa7@\x00\x00\u07d4\x82n\xb7\xcds\x19\xb8-\xd0z\x1f;@\x90q\xd9n9g\u007f\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x82u1\xa6\u0141z\xe3_\x82\xb0\v\x97T\xfc\xf7LU\xe22\x89\xc3(\t>a\xee@\x00\x00\u0794\x82u\xcdhL6y\u0548}\x03fN3\x83E\xdc<\xdd\xe1\x88\xdbD\xe0I\xbb,\x00\x00\u07d4\x82\x84\x92;b\u62ff|+\x9f4\x14\xd1>\xf6\xc8\x12\xa9\x04\x89\xd2U\xd1\x12\xe1\x03\xa0\x00\x00\u07d4\x82\x8b\xa6Q\u02d3\x0e\xd9xqV)\x9a=\xe4L\u040br\x12\x89Hz\x9a0E9D\x00\x00\u07d4\x82\xa1\\\xef\x1dl\x82`\xea\xf1Y\xea?\x01\x80\xd8g}\xce\x1c\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x82\xa8\xb9kl\x9e\x13\xeb\xec\x1e\x9f\x18\xac\x02\xa6\x0e\xa8\x8aH\xff\x89lk\x8c@\x8es\xb3\x00\x00\u07d4\x82\xa8\u02ff\xdf\xf0+.8\xaeK\xbf\xca\x15\xf1\xf0\xe8;\x1a\xea\x89\x04\x9b\x99\x1c'\xefm\x80\x00\u07d4\x82\xe4F\x1e\xb9\xd8I\xf0\x04\x1c\x14\x04!\x9eBr\u0110\n\xb4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x82\xe5w\xb5\x15\xcb+\b`\xaa\xfe\x1c\xe0\x9aY\xe0\x9f\xe7\xd0@\x89 \x86\xac5\x10R`\x00\x00\u07d4\x82\xea\x01\xe3\xbf.\x83\x83nqpN\"\xa2q\x93w\xef\xd9\u00c9\xa4\xccy\x95c\u00c0\x00\x00\u07d4\x82\xf2\xe9\x91\xfd2L_]\x17v\x8e\x9fa3]\xb61\x9dl\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\x82\xf3\x9b'X\xaeB'{\x86\u059fu\xe6(\xd9X\xeb\u02b0\x8a\bxg\x83&\xea\xc9\x00\x00\x00\xe0\x94\x82\xf8T\xc9\xc2\xf0\x87\xdf\xfa\x98Z\xc8 \x1ebl\xa5Fv\x86\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\x82\xffqo\xdf\x03>\xc7\xe9B\xc9\t\u0643\x18g\xb8\xb6\xe2\xef\x89a\t=|,m8\x00\x00\u07d4\x83\b\xed\n\xf7\xf8\xa3\xc1u\x1f\xaf\xc8w\xb5\xa4*\xf7\xd3X\x82\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x83\x1cD\xb3\b@G\x18K*\xd2\x18h\x06@\x907P\xc4]\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x83!\x05\x83\xc1jN\x1e\x1d\xac\x84\xeb\xd3~=\x0f|W\ub909lk\x93[\x8b\xbd@\x00\x00\u07d4\x83,T\x17k\xdfC\xd2\u027c\u05f8\b\xb8\x95V\xb8\x9c\xbf1\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x833\x16\x98]Gt+\xfe\xd4\x10`J\x91\x95<\x05\xfb\x12\xb0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x834vK{9zNW\x8fP6M`\xceD\x89\x9b\xff\x94\x89\x05\x03\xb2\x03\xe9\xfb\xa2\x00\x00\xe0\x94\x83;j\x8e\xc8\xda@\x81\x86\xac\x8a}*m\xd6\x15#\xe7\u0384\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\u07d4\x83=?\xaeT*\xd5\xf8\xb5\f\xe1\x9b\xde+\xecW\x91\x80\u020c\x89\x12\xc1\xb6\xee\xd0=(\x00\x00\xe0\x94\x83=\xb4,\x14\x16<{\xe4\u02b8j\u0153\xe0bf\u0599\u054a$\xe4\r+iC\xef\x90\x00\x00\xe0\x94\x83V;\xc3d\ud060\xc6\xda;V\xffI\xbb\xf2g\x82z\x9c\x8a\x03\xab\x91\xd1{ \xdeP\x00\x00\u07d4\x83zd]\xc9\\IT\x9f\x89\x9cN\x8b\u03c7S$\xb2\xf5|\x89 \x8c9J\xf1\u0208\x00\x00\u07d4\x83\x8b\xd5e\xf9\x9f\xdeH\x05?y\x17\xfe3<\xf8J\xd5H\xab\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x83\x90\x8a\xa7G\x8am\x1c\x9b\x9b\x02\x81\x14\x8f\x8f\x9f$+\x9f\u0709lk\x93[\x8b\xbd@\x00\x00\u07d4\x83\x92\xe57vq5x\x01[\xffI@\xcfC\x84\x9d}\u02e1\x89\bM\xf05]V\x17\x00\x00\xe0\x94\x83\x97\xa1\xbcG\xac\xd6GA\x81Y\xb9\x9c\xeaW\xe1\xe6S-n\x8a\x01\xf1\x0f\xa8'\xb5P\xb4\x00\x00\u07d4\x83\x98\xe0~\xbc\xb4\xf7_\xf2\x11m\xe7|\x1c*\x99\xf3\x03\xa4\u03c9\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x83\xa3\x14\x883\xd9dI\x84\xf7\xc4u\xa7\x85\a\x16\ufd00\xff\x89\xb8Pz\x82\a( \x00\x00\u07d4\x83\xa4\x02C\x8e\x05\x19w=TH2k\xfba\xf8\xb2\f\xf5-\x89Rf<\u02b1\xe1\xc0\x00\x00\u07d4\x83\xa9;[\xa4\x1b\xf8\x87 \xe4\x15y\f\xdc\vg\xb4\xaf4\u0109\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x83\xc2=\x8aP!$\xee\x15\x0f\b\xd7\x1d\xc6rt\x10\xa0\xf9\x01\x8a\a3\x1f;\xfef\x1b\x18\x00\x00\u07d4\x83\u0217\xa8Ki^\xeb\xe4fy\xf7\xda\x19\xd7vb\x1c&\x94\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x83\xd52\u04cdm\xee?`\xad\u018b\x93a3\u01e2\xa1\xb0\u0749\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x83\xdb\xf8\xa1(S\xb4\n\xc6\x19\x96\xf8\xbf\x1d\xc8\xfd\xba\xdd\xd3)\x894\x95tD\xb8@\xe8\x00\x00\u07d4\x83\xdb\xfd\x8e\xda\x01\xd0\u078e\x15\x8b\x16\u0413_\xc28\n]\u01c9 \x86\xac5\x10R`\x00\x00\u07d4\x83\xe4\x80U2|(\xb5\x93o\xd9\xf4D~s\xbd\xb2\xdd3v\x89\x90\xf54`\x8ar\x88\x00\x00\xe0\x94\x83\xfeZ\x1b2\x8b\xaeD\a\x11\xbe\xafj\xad`&\xed\xa6\xd2 \x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x84\x00\x8ar\xf8\x03o?\xeb\xa5B\xe3Px\xc0W\xf3*\x88%\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x84\x0e\xc8>\xa96!\xf04\xe7\xbb7b\xbb\x8e)\xde\xd4\xc4y\x89\x87\x86x2n\xac\x90\x00\x00\xe0\x94\x84\x11E\xb4H@\xc9F\xe2\x1d\xbc\x19\x02d\xb8\xe0\xd5\x02\x93i\x8a?\x87\bW\xa3\xe0\xe3\x80\x00\x00\u07d4\x84#!\a\x93+\x12\xe01\x86X5%\xce\x02:p>\xf8\u0649lk\x93[\x8b\xbd@\x00\x00\u07d4\x84$O\xc9ZiW\xed|\x15\x04\xe4\x9f0\xb8\xc3^\xcaKy\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x841'}{\xdd\x10E}\xc0\x17@\x8c\x8d\xbb\xbdAJ\x8d\xf3\x89\x02\"\xc8\xeb?\xf6d\x00\x00\u07d4\x847Z\xfb\xf5\x9b:\x1da\xa1\xbe2\xd0u\xe0\xe1ZO\xbc\xa5\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x84;\xd3P/E\xf8\xbcM\xa3p\xb3#\xbd\xac?\xcf_\x19\xa6\x89P\x03\x9dc\xd1\x1c\x90\x00\x00\u07d4\x84P34c\rw\xf7AG\xf6\x8b.\bf\x13\xc8\xf1\xad\xe9\x89V\xbcu\xe2\xd61\x00\x00\x00\u07d4\x84R\x03u\x0fqH\xa9\xaa&)!\xe8mC\xbfd\x19t\xfd\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x84a\xec\u0126\xa4^\xb1\xa5\xb9G\xfb\x86\xb8\x80i\xb9\x1f\xcdo\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x84g^\x91wrmE\xea\xa4k9\x92\xa3@\xba\u007fq\f\x95\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x84hl{\xadv,T\xb6g\u055f\x90\x94<\xd1M\x11z&\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x84\x89\xf6\xad\x1d\x9a\x94\xa2\x97x\x91V\x89\x9d\xb6AT\xf1\u06f5\x89\x13t\a\xc0<\x8c&\x80\x00\u07d4\x84\x8c\x99Jy\x00?\xe7\xb7\xc2l\xc62\x12\xe1\xfc/\x9c\x19\xeb\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x84\x8f\xbd)\xd6|\xf4\xa0\x13\xcb\x02\xa4\xb1v\xef$N\x9e\u6349\x01\x17*ck\xbd\xc2\x00\x00\u07d4\x84\x94\x9d\xbaU\x9ac\xbf\xc8E\xde\xd0n\x9f-\x9b\u007f\x11\xef$\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x84\x9a\xb8\a\x90\xb2\x8f\xf1\xff\u05ba9N\xfctc\x10\\6\xf7\x89\x01\xe0+\xe4\xael\x84\x00\x00\u07d4\x84\x9b\x11oYc\x01\xc5\u063bb\xe0\xe9z\x82H\x12n9\xf3\x89\x10CV\x1a\x88)0\x00\x00\u07d4\x84\xa7L\xee\xcf\xf6\\\xb9;/\x94\x9dw>\xf1\xad\u007f\xb4\xa2E\x89\x05\n\x9bDF\x85\xc7\x00\x00\u07d4\x84\xaa\xc7\xfa\x19\u007f\xf8\\0\xe0;zS\x82\xb9W\xf4\x1f:\xfb\x89\b\x8b#\xac\xff\u0650\x00\x00\u07d4\x84\xaf\x1b\x15sB\xd5Ch&\r\x17\x87b0\xa54\xb5K\x0e\x895e\x9e\xf9?\x0f\xc4\x00\x00\u07d4\x84\xb0\xeek\xb87\u04e4\xc4\xc5\x01\x1c:\"\x8c\x0e\u06b4cJ\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x84\xb4\xb7Nf#\xba\x9d\x15\x83\xe0\u03feId?\x168AI\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x84\xb6\xb6\xad\xbe/[>-h,f\xaf\x1b\u0110S@\xc3\xed\x89!\x92\xf8\xd2\"\x15\x00\x80\x00\xe0\x94\x84\xb9\x1e.)\x02\xd0^+Y\x1bA\b;\u05fe\xb2\xd5,t\x8a\x02\x15\xe5\x12\x8bE\x04d\x80\x00\u07d4\x84\xbc\xbf\"\xc0\x96\a\xac\x844\x1d.\xdb\xc0;\xfb\x179\xd7D\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x84\xbf\xce\xf0I\x1a\n\xe0iK7\u03ac\x02E\x84\xf2\xaa\x04g\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\x84\xcb}\xa0P-\xf4\\\xf5a\x81{\xbd#b\xf4Q\xbe\x02\u0689Hz\x9a0E9D\x00\x00\u07d4\x84\xccxx\xda`_\xdb\x01\x9f\xab\x9bL\xcf\xc1Wp\x9c\u0765\x89Hy\x85\x13\xaf\x04\xc9\x00\x00\u07d4\x84\xdb\x14Y\xbb\x00\x81.\xa6~\xcb=\xc1\x89\xb7!\x87\xd9\xc5\x01\x89\b\x11\xb8\xfb\u0685\xab\x80\x00\u07d4\x84\u9516\x80\xbe\xcehA\xb9\xa7\xe5%\r\b\xac\xd8}\x16\u0349\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x84\xe9\u03c1f\xc3j\xbf\xa4\x90S\xb7\xa1\xad@6 &\x81\xef\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x84\xec\x06\xf2G\x00\xfeBAL\xb9\x89|\x15L\x88\xde/a2\x89Hz\x9a0E9D\x00\x00\xe0\x94\x84\xf5\"\xf0R\x0e\xbaR\xdd\x18\xad!\xfaK\x82\x9f+\x89\u02d7\x8a\x01\fQ\x06\xd5\x13O\x13\x00\x00\u07d4\x85\v\x9d\xb1\x8f\xf8K\xf0\xc7\xdaI\xea7\x81\xd9 \x90\xad~d\x89\x8c\xf2?\x90\x9c\x0f\xa0\x00\x00\u07d4\x85\x10\xee\x93O\f\xbc\x90\x0e\x10\a\xeb8\xa2\x1e*Q\x01\xb8\xb2\x89\x05\xbf\v\xa6cOh\x00\x00\u07d4\x85\x16\xfc\xafw\u0213\x97\x0f\xcd\x1a\x95\x8b\xa9\xa0\x0eI\x04@\x19\x89\n\xa3\xeb\x16\x91\xbc\xe5\x80\x00\u07d4\x85\x1a\xa9\x1c\x82\xf4/\xad]\xd8\xe8\xbb^\xa6\x9c\x8f:Yw\u0449\b\x0eV\x1f%xy\x80\x00\u07d4\x85\x1c\rb\xbeF5\xd4w~\x805\xe3~K\xa8Q|a2\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x85\x1d\u00ca\xdbE\x93r\x9av\xf3:\x86\x16\u06b6\xf5\xf5\x9aw\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x852I\b\x97\xbb\xb4\u038b\u007fk\x83~L\xba\x84\x8f\xbe\x99v\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x85>j\xba\xf4Di\xc7/\x15\x1dN\"8\x19\xac\xedN7(\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x85F\x91\xceqO2\\\xedU\xceY(\u039b\xa1/\xac\u0478\x89\xedp\xb5\xe9\xc3\xf2\xf0\x00\x00\u07d4\x85L\fF\x9c$k\x83\xb5\u0473\xec\xa4C\xb3\x9a\xf5\xee\x12\x8a\x89V\xbcu\xe2\xd61\x00\x00\x00\u07d4\x85]\x9a\xef,9\xc6#\r\t\u025e\xf6II\x89\xab\u61c5\x89\b\xbaR\xe6\xfcE\xe4\x00\x00\u07d4\x85c\u0113a\xb6%\xe7hw\x1c\x96\x15\x1d\xbf\xbd\x1c\x90iv\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x85fa\t\x01\xaa\xce8\xb82D\xf3\xa9\xc810jg\xb9\u0709\xb0\x82\x13\xbc\xf8\xff\xe0\x00\x00\xe0\x94\x85j\xa2<\x82\xd7![\xec\x8dW\xf6\n\xd7^\xf1O\xa3_D\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x85nZ\xb3\xf6L\x9a\xb5k\x00\x93\x93\xb0\x16d\xfc\x03$\x05\x0e\x89a\t=|,m8\x00\x00\u07d4\x85n\xb2\x04$\x1a\x87\x83\x0f\xb2)\x03\x13C\xdc0\x85OX\x1a\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x85s,\x06\\\xbdd\x11\x99A\xae\xd40\xacYg\vlQ\u0109'\xa5sb\xab\n\x0e\x80\x00\xe0\x94\x85x\xe1\x02\x12\xca\x14\xff\a2\xa8$\x1e7F}\xb8V2\xa9\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x85y\xda\xdf\x1a9Z4q\xe2\vov=\x9a\x0f\xf1\x9a?o\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x85\u007f\x10\v\x1aY0\"^\xfc~\x90 \u05c3'\xb4\x1c\x02\u02c9lk\x93[\x8b\xbd@\x00\x00\u07d4\x85\x94mV\xa4\xd3q\xa93hS\x96\x90\xb6\x0e\xc8%\x10tT\x89]\u0212\xaa\x111\xc8\x00\x00\xe0\x94\x85\x99\xcb\u0566\xa9\xdc\u0539f\xbe8}iw]\xa5\xe3C'\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x86$_Yf\x91\t>\xce?=<\xa2&>\xac\xe8\x19A\u0649\n1\x06+\xee\xedp\x00\x00\u07d4\x86%i!\x1e\x8cc'\xb5A^:g\xe5s\x8b\x15\xba\xafn\x89\a\x96\xe3\xea?\x8a\xb0\x00\x00\u07d4\x86)}s\x0f\xe0\xf7\xa9\xee$\xe0\x8f\xb1\b{1\xad\xb3\x06\xa7\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x86D\xcc(\x1b\xe32\xcc\xce\xd3m\xa4\x83\xfb*\aF\u067a.\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x86I\x9a\x12(\xff-~\xe3\au\x93dPo\x8e\x8c\x83\a\xa5\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x86K\xecPi\xf8U\xa4\xfdX\x92\xa6\xc4I\x1d\xb0|\x88\xff|\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x86W\n\xb2Y\u0271\xc3,\x97) /w\xf5\x90\xc0}\xd6\x12\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x86c\xa2A\xa0\xa8\x9ep\xe1\x82\xc8E\xe2\x10\\\x8a\xd7&K\u03ca\x03#\xb1=\x83\x98\xf3#\x80\x00\u07d4\x86g\xfa\x11U\xfe\xd72\u03f8\u0725\xa0\xd7e\xce\r\a\x05\xed\x89\x04n\xc9e\u00d3\xb1\x00\x00\u07d4\x86h\xaf\x86\x8a\x1e\x98\x88_\x93\u007f&\x15\xde\xd6u\x18\x04\xeb-\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\x86t\nFd\x8e\x84Z]\x96F\x1b\x18\t\x1f\xf5{\xe8\xa1o\x8a\x14\xc0\x974\x85\xbf9@\x00\x00\xe0\x94\x86~\xbaVt\x8aY\x045\r,\xa2\xa5\u039c\xa0\vg\n\x9b\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\x86\x80dt\xc3X\x04}\x94\x06\xe6\xa0\u007f@\x94[\xc82\x8eg\x8a\x01u.\xb0\xf7\x01=\x10\x00\x00\u07d4\x86\x88=T\xcd9\x15\xe5I\tU0\xf9\xab\x18\x05\xe8\xc5C-\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x86\x8c#\xbe\x874f\xd4\xc7L\"\n\x19\xb2E\xd1x~\x80\u007f\x89J\x13\xbb\xbd\x92\u020e\x80\x00\xe0\x94\x86\x92O\xb2\x11\xaa\xd2<\xf5\xce`\x0e\n\xae\x80c\x96D@\x87\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x86\x93\u9e3e\x94B^\xefyi\xbci\xf9\xd4/|\xadg\x1e\x8967\tlK\xcci\x00\x00\xe0\x94\x86\x9f\x1a\xa3\x0eDU\xbe\xb1\x82 \x91\xde\\\xad\xecy\xa8\xf9F\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\x86\xa1\xea\xde\xeb0F\x13E\xd9\xefk\xd0R\x16\xfa$|\r\f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x86\xa5\xf8%\x9e\u0570\x9e\x18\x8c\xe3F\xee\x92\xd3J\xa5\u0753\xfa\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x86\xb7\xbdV<\uad86\xf9bD\xf9\xdd\xc0*\u05f0\xb1K\u008a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x86\u008bVx\xaf7\xd7'\xec\x05\xe4Dw\x90\xf1_q\xf2\xea\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x86\xc4\xce\x06\u066c\x18[\xb1H\xd9o{z\xbes\xf4A\x00m\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\x86\xc8\xd0\u0642\xb59\xf4\x8f\x980\xf9\x89\x1f\x9d`z\x94&Y\x8a\x02\xce\xd3wa\x82O\xb0\x00\x00\u07d4\x86\xc94\xe3\x8eS\xbe;3\xf2t\xd0S\x9c\xfc\xa1Y\xa4\xd0\u04494\x95tD\xb8@\xe8\x00\x00\xe0\x94\x86\xca\x01E\x95~k\r\xfe6\x87_\xbez\r\xecU\xe1z(\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\x86\u02af\xac\xf3*\xa01|\x03*\xc3k\xab\xed\x97G\x91\xdc\x03\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\x86\u0377\xe5\x1a\xc4Gr\xbe6\x90\xf6\x1d\x0eYvn\x8b\xfc\x18\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x86\xdfs\xbd7\u007f,\t\xdec\xc4]g\xf2\x83\xea\xef\xa0\xf4\xab\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x86\xe3\xfe\x86\xe9=\xa4\x86\xb1Bf\xea\xdf\x05l\xbf\xa4\xd9\x14C\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x86\xe8g\x0e'Y\x8e\xa0\x9c8\x99\xabw\x11\u04f9\xfe\x90\x1c\x17\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x86\xefd&!\x19I\xcc7\xf4\xc7^xP6\x9d\f\xf5\xf4y\x8a\x02\xd6_2\xea\x04Z\xf6\x00\x00\u07d4\x86\xf0]\x19\x06>\x93i\xc6\x00N\xb3\xf1#\x94:|\xffN\xab\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\x86\xf2>\x9c\n\xaf\u01cb\x9c@M\xcd`3\x9a\x92[\xff\xa2f\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x86\xf4\xf4\n\u0644\xfb\xb8\t3\xaebn\x0eB\xf93?\xddA\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\x86\xf9\\[\x11\xa2\x93\x94\x0e5\xc0\xb8\x98\u0637_\b\xaa\xb0m\x8a\x06D\xe3\xe8u\xfc\xcft\x00\x00\u07d4\x86\xff\xf2 \xe5\x93\x05\xc0\x9fH8`\xd6\xf9N\x96\xfb\xe3/W\x89\x02S[j\xb4\xc0B\x00\x00\u07d4\x87\a\x96\xab\xc0\u06c4\xaf\x82\xdaR\xa0\xedhsM\xe7\xe66\xf5\x89\x10CV\x1a\x88)0\x00\x00\u07d4\x87\x0f\x15\xe5\u07cb\x0e\xab\xd0%iSz\x8e\xf9;Vx\\B\x89\x15\b\x94\xe8I\xb3\x90\x00\x00\u07d4\x87\x181`\xd1r\xd2\xe0\x84\xd3'\xb8k\xcb|\x1d\x8eg\x84\xef\x89\xd8\xd8X?\xa2\xd5/\x00\x00\xe0\x94\x87\x1b\x8a\x8bQ\u07a1\x98\x9aY!\xf1>\xc1\xa9U\xa5\x15\xadG\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\x87%\xe8\xc7S\xb3\xac\xbf\u0725_I\x13\\3\x91\x99\x10`)\n\xa7\xf6\u0338\xf8Zx\u06c9\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x87Pa\xee\x12\xe8 \x04\x1a\x01\x94,\xb0\xe6[\xb4'\xb0\x00`\x89\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4\x87XJ?a;\xd4\xfa\xc7L\x1ex\v\x86\xd6\xca\xeb\x89\f\xb2\x89\\(=A\x03\x94\x10\x00\x00\u07d4\x87d\xd0'\"\x00\t\x96\xec\xd4u\xb43)\x8e\x9fT\v\x05\xbf\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x87l?!\x8bGv\xdf<\xa9\xdb\xfb'\r\xe1R\xd9N\xd2R\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x87u\xa6\x10\xc5\x02\xb9\xf1\xe6\xadL\xda\u06cc\xe2\x9b\xffu\xf6\xe4\x89 \x86\xac5\x10R`\x00\x00\u07d4\x87vN6w\xee\xf6\x04\xcb\u015a\xed$\xab\xdcVk\t\xfc%\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\xe0\x94\x87\x87\xd1&w\xa5\xec)\x1eW\xe3\x1f\xfb\xfa\xd1\x05\xc32K\x87\x8a\x02\xa2N\xb52\b\xf3\x12\x80\x00\u07d4\x87\x94\xbfG\xd5E@\xec\xe5\xc7\"7\xa1\xff\xb5\x11\u0777Gb\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x87\xa5>\xa3\x9fY\xa3[\xad\xa85%!dU\x94\xa1\xa7\x14\u02c9g\x8a\x93 b\xe4\x18\x00\x00\u07d4\x87\xa7\xc5\b\xefqX-\u0665Cr\xf8\x9c\xb0\x1f%/\xb1\x80\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x87\xaf%\xd3\xf6\xf8\xee\xa1S\x13\xd5\xfeEW\xe8\x10\xc5$\xc0\x83\x8a\x04+\xf0kx\xed;P\x00\x00\u07d4\x87\xb1\x0f\x9c(\x00\x98\x17\x9a+v\xe9\u0390\xbea\xfc\x84M\r\x89Hz\x9a0E9D\x00\x00\u07d4\x87\xbf|\xd5\u0629)\xe1\u01c5\xf9\xe5D\x91\x06\xac#$c\u0249\x047\xb1\x1f\xccEd\x00\x00\u07d4\x87\u0118\x17\t4\xb8#=\x1a\xd1\xe7i1}\\G_/@\x897\b\xba\xed=h\x90\x00\x00\u07d4\x87\xcf6\xad\x03\xc9\xea\xe9\x05:\xbbRB\u0791\x17\xbb\x0f*\v\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\x87\u05ec\x06S\xcc\xc6z\xa9\xc3F\x9e\xefCR\x19?}\xbb\x86\x8a*Z\x05\x8f\u0095\xed\x00\x00\x00\xe0\x94\x87\xe3\x06+#!\xe9\u07f0\x87\\\u311c\x9b.5\"\xd5\n\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x87\xe6\x03N\xcf#\xf8\xb5c\x9d_\x0e\xa7\n\"S\x8a\x92\x04#\x89\x11\xc7\xea\x16.x \x00\x00\u07d4\x87\xefm\x8bj|\xbf\x9b\\\x8c\x97\xf6~\xe2\xad\u00a7;?w\x89\n\xdd\x1b\xd2<\x00L\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x88F\x92\x8dh2\x89\xa2\xd1\x1d\xf8\xdbz\x94t\x98\x8e\xf0\x13H\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x88I\x80\xebEe\xc1\x04\x83\x17\xa8\xf4\u007f\u06f4a\x96[\u4049\xd8\xd6\x11\x9a\x81F\x05\x00\x00\xe0\x94\x88Jz9\u0411n\x05\xf1\xc2B\xdfU`\u007f7\u07cc_\u068a\x04\xf4\x84<\x15|\x8c\xa0\x00\x00\u07d4\x88T\x93\xbd\xa3j\x042\x97eF\xc1\xdd\xceq\xc3\xf4W\x00!\x89\v\xbfQ\r\xdf\xcb&\x00\x00\xe0\x94\x88`\x9e\nF[n\x99\xfc\xe9\a\x16mW\xe9\xda\b\x14\xf5\u020a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x88m\n\x9e\x17\xc9\xc0\x95\xaf.\xa25\x8b\x89\xecpR\x12\ue509\x01\x84\x93\xfb\xa6N\xf0\x00\x00\u07d4\x88y~Xg^\xd5\xccL\x19\x98\a\x83\xdb\xd0\xc9V\bQS\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x88|\xacA\xcdpo3E\xf2\xd3J\xc3N\x01u*nY\t\x89 F\\\ue7617\x00\x00\u07d4\x88\x88\x8aW\xbd\x96\x87\xcb\xf9P\xae\xea\u03d7@\xdc\xc4\xd1\xefY\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u0794\x88\x89D\x83\x16\xcc\xf1N\xd8m\xf8\xe2\xf4x\xdcc\xc43\x83@\x88\xd2\xf1?w\x89\xf0\x00\x00\u07d4\x88\x8c\x16\x14I3\x19|\xac&PM\xd7n\x06\xfdf\x00\u01c9\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x88\x8e\x94\x91p\x83\xd1R +S\x1699\x86\x9d'\x11u\xb4\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\x88\x90\x87\xf6o\xf2\x84\xf8\xb5\xef\xbd)I;pg3\xab\x14G\x8a\x02\x15\xf85\xbcv\x9d\xa8\x00\x00\u07d4\x88\x95\xebrb&\xed\xc3\xf7\x8c\u01a5\x15\a{2\x96\xfd\xb9^\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4\x88\x97Z_\x1e\xf2R\x8c0\v\x83\xc0\xc6\a\xb8\xe8}\u0593\x15\x89\x04\x86\u02d7\x99\x19\x1e\x00\x00\u07d4\x88\x9d\xa4\x0f\xb1\xb6\x0f\x9e\xa9\xbdzE>XL\xf7\xb1\xb4\xd9\xf7\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\x88\x9d\xa6b\xebJ\n*\x06\x9d+\xc2K\x05\xb4\xee.\x92\xc4\x1b\x89Z,\x8cTV\xc9\xf2\x80\x00\u07d4\x88\xa1\"\xa28,R91\xfbQ\xa0\u032d;\xeb[rY\u00c9lk\x93[\x8b\xbd@\x00\x00\u07d4\x88\xa2\x15D0\xc0\xe4\x11G\xd3\xc1\xfe\u3cf0\x06\xf8Q\xed\xbd\x8965f3\xeb\xd8\xea\x00\x00\u07d4\x88\xb2\x17\u0337\x86\xa2T\xcfM\xc5\u007f]\x9a\xc3\xc4U\xa3\x04\x83\x892$\xf4'#\xd4T\x00\x00\xe0\x94\x88\xbcC\x01.\xdb\x0e\xa9\xf0b\xacCxC%\n9\xb7\x8f\xbb\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x88\xc2Qj|\xdb\t\xa6'mr\x97\xd3\x0fZM\xb1\xe8K\x86\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\x88\xc3ad\rki7;\b\x1c\xe0\xc43\xbdY\x02\x87\xd5\xec\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\u07d4\x88\xd5A\xc8@\xceC\xce\xfb\xafm\x19\xafk\x98Y\xb5s\xc1E\x89\t79SM(h\x00\x00\u07d4\x88\xde\x13\xb0\x991\x87|\x91\rY1e\xc3d\u0221d\x1b\u04c9\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\x88\xde\u017d?N\xba-\x18\xb8\xaa\xce\xfa{r\x15H\xc3\x19\xba\x89JD\x91\xbdm\xcd(\x00\x00\u07d4\x88\xe6\xf9\xb2G\xf9\x88\xf6\xc0\xfc\x14\xc5o\x1d\xe5>\u019dC\u0309\x05k\xc7^-c\x10\x00\x00\u07d4\x88\xee\u007f\x0e\xfc\x8fw\x8ckh~\xc3+\xe9\xe7\xd6\xf0 \xb6t\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x88\xf1\x04_\x19\xf2\xd3\x19\x18\x16\xb1\xdf\x18\xbbn\x145\xad\x1b8\x89\r\x02\xabHl\xed\xc0\x00\x00\xe0\x94\x89\x00\x9e\a\xe3\xfahc\xa7x\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x89Wr~r\xcfb\x90 \xf4\xe0^\xdfy\x9a\xa7E\x80b\u0409wC\"\x17\xe6\x83`\x00\x00\u07d4\x89]iN\x88\v\x13\xcc\u0404\x8a\x86\xc5\xceA\x1f\x88Gk\xbf\x89\n\xd6\xee\xdd\x17\xcf;\x80\x00\u07d4\x89^\xc5TVD\u0dc30\xff\xfa\xb8\xdd\xea\xc9\xe83\x15l\x89 \x86\xac5\x10R`\x00\x00\u07d4\x89`\tRj,{\f\t\xa6\xf6:\x80\xbd\U0009d707\u079c\x89\xbb\xb8k\x82#\xed\xeb\x00\x00\u07d4\x89g\u05f9\xbd\xb7\xb4\xae\xd2.e\xa1]\xc8\x03\xcbz!?\x10\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x89n3\\\xa4z\xf5yb\xfa\x0fM\xbf>E\xe6\x88\u02e5\x84\x89J/\xc0\xab`R\x12\x00\x00\u07d4\x89s\xae\xfd^\xfa\xee\x96\t]\x9e(\x8fj\x04l\x977KC\x89\a\xa4\u0120\xf32\x14\x00\x00\u07d4\x89\x8cr\xddseX\xef\x9eK\xe9\xfd\xc3O\xefT\xd7\xfc~\b\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x89\x9b<$\x9f\fK\x81\xdfu\xd2\x12\x00M=m\x95/\xd2#\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x89\xab\x13\xee&mw\x9c5\xe8\xbb\x04\u034a\x90\xcc!\x03\xa9[\x8a\f\xb4\x9bD\xba`-\x80\x00\x00\u07d4\x89\xc43\xd6\x01\xfa\xd7\x14\xdaci0\x8f\xd2l\x1d\u0254+\xbf\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x89\xd7[\x8e\b1\xe4o\x80\xbc\x17A\x88\x18N\x00o\xde\x0e\xae\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x89\u3d5a\x15\x86G7\u0513\xc1\xd2<\xc5=\xbf\x8d\xcb\x13b\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x89\xfc\x8eM8k\r\v\xb4\xa7\a\xed\xf3\xbdV\r\xf1\xad\x8fN\x89\xa00\xdc\xeb\xbd/L\x00\x00\u07d4\x89\xfe\xe3\r\x17(\xd9l\xec\xc1\u06b3\xda.w\x1a\xfb\u03eaA\x89lj\xccg\u05f1\xd4\x00\x00\xe0\x94\x8a\x1c\u016c\x11\x1cI\xbf\xcf\xd8H\xf3}\xd7h\xaae\u0208\x02\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x8a \xe5\xb5\xce\xe7\xcd\x1fU\x15\xba\xce;\xf4\xf7\u007f\xfd\xe5\xcc\a\x89\x04V9\x18$O@\x00\x00\xe0\x94\x8a!}\xb3\x8b\xc3_!_\xd9)\x06\xbeBCo\xe7\xe6\xed\x19\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x8a$:\n\x9f\xeaI\xb89TwE\xff-\x11\xaf?K\x05\"\x895e\x9e\xf9?\x0f\xc4\x00\x00\u07d4\x8a$}\x18e\x10\x80\x9fq\xcf\xfcEYG\x1c9\x10\x85\x81!\x89a\t=|,m8\x00\x00\u07d4\x8a4p(-^**\xef\u05e7P\x94\xc8\"\xc4\xf5\xae\uf289\r(\xbc`dx\xa5\x80\x00\u07d4\x8a6\x86\x9a\xd4x\x99|\xbfm\x89$\xd2\n<\x80\x18\xe9\x85[\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x8aC\x14\xfba\u0353\x8f\xc3>\x15\xe8\x16\xb1\x13\U000ac267\xfb\x89\x17vNz\xede\x10\x00\x00\u07d4\x8aOJ\u007fR\xa3U\xba\x10_\xca r\xd3\x06_\xc8\xf7\x94K\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x8aX1(,\xe1Jezs\r\xc1\x88&\xf7\xf9\xb9\x9d\xb9h\x89\uaf8a[A\xc16\x00\x00\u07d4\x8a_\xb7W\x93\xd0C\xf1\xbc\xd48\x85\xe07\xbd0\xa5(\xc9'\x89\x13Snm.\x9a\xc2\x00\x00\u07d4\x8af\xab\xbc-0\xce!\xa83\xb0\u06ceV\x1dQ\x05\xe0\xa7,\x89%\xf1\xde\\v\xac\xdf\x00\x00\u07d4\x8atl]g\x06G\x11\xbf\xcah[\x95\xa4\xfe)\x1a'\x02\x8e\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\x8ax\n\xb8z\x91E\xfe\x10\xed`\xfaGjt\n\xf4\u02b1\u0489\x12\x1b.^ddx\x00\x00\u07d4\x8az\x06\xbe\x19\x9a:X\x01\x9d\x84j\xc9\xcb\xd4\xd9]\xd7W\u0789\xa2\xa4#\x94BV\xf4\x00\x00\u07d4\x8a\x81\x01\x14\xb2\x02]\xb9\xfb\xb5\x00\x99\xa6\xe0\u02de.\xfak\u0709g\x8a\x93 b\xe4\x18\x00\x00\u07d4\x8a\x86\xe4\xa5\x1c\x01;\x1f\xb4\xc7k\xcf0f|x\xd5.\xed\xef\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8a\x9e\u029cZ\xba\x8e\x13\x9f\x80\x03\xed\xf1\x16:\xfbp\xaa:\xa9\x89#\xc7W\a+\x8d\xd0\x00\x00\u07d4\x8a\xb89\xae\xaf*\xd3|\xb7\x8b\xac\xbb\xb63\xbc\xc5\xc0\x99\xdcF\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8a\u021b\u06780\x1ek\x06w\xfa%\xfc\xf0\xf5\x8f\f\u01f6\x11\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\x8a\xdcS\xef\x8c\x18\xed0Qx]\x88\xe9\x96\xf3\xe4\xb2\x0e\xcdQ\x8a\b\xe4\xd3\x16\x82v\x86@\x00\x00\u07d4\x8a\xe6\xf8\vp\xe1\xf2<\x91\xfb\u0569f\xb0\xe4\x99\xd9]\xf82\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\x8a\xe9\uf30a\x8a\u07e6\xaby\x8a\xb2\xcd\xc4\x05\b*\x1b\xbbp\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8a\xf6&\xa5\xf3'\xd7Pe\x89\xee\xb7\x01\x0f\xf9\xc9D` \u0489K\xe4\xe7&{j\xe0\x00\x00\xe0\x94\x8b\x01\xda4\xd4p\xc1\xd1\x15\xac\xf4\xd8\x11\xe1\x01\xdb\x1e\x14\xec\xc7\xd3\"\xc7+\x8c\x04s\x89\x18\xb2j1>\x8a\xe9\x00\x00\xe0\x94\x8bH\xe1\x9d9\xdd5\xb6nn\x1b\xb6\xb9\xc6W\xcb,\xf5\x9d\x04\x8a\x03\xc7U\xac\x9c\x02J\x01\x80\x00\xe0\x94\x8bP^(q\xf7\u07b7\xa68\x95 \x8e\x82'\u072a\x1b\xff\x05\x8a\f\xf6\x8e\xfc0\x8dy\xbc\x00\x00\u07d4\x8bW\xb2\xbc\x83\u030dM\xe31 N\x89?/;\x1d\xb1\a\x9a\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\x8b\\\x91K\x12\x8b\xf1i\\\b\x89#\xfaF~y\x11\xf3Q\xfa\x89\x05V\xf6L\x1f\xe7\xfa\x00\x00\xe0\x94\x8b_)\xcc/\xaa&,\xde\xf3\x0e\xf5T\xf5\x0e\xb4\x88\x14n\xac\x8a\x01;hp\\\x97 \x81\x00\x00\u07d4\x8bpV\xf6\xab\xf3\xb1\x18\xd0&\xe9D\xd5\xc0sC<\xa4Q\u05c965\xc6 G9\u0640\x00\u07d4\x8bqE\"\xfa(9b\x04p\xed\xcf\fD\x01\xb7\x13f=\xf1\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x8bt\xa7\xcb\x1b\xb8\u014f\xce&tf\xa3\x03X\xad\xafR\u007fa\x8a\x02\xe2WxN%\xb4P\x00\x00\u07d4\x8b~\x9fo\x05\xf7\xe3dv\xa1n>q\x00\xc9\x03\x1c\xf4\x04\xaf\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x8b\x81\x15ni\x869\x94<\x01\xa7Rr\xad=5\x85\x1a\xb2\x82\x89\x12\xb3\x16_e\xd3\xe5\x00\x00\u07d4\x8b\x95w\x92\x00S\xb1\xa0\x01\x890M\x88\x80\x10\xd9\xef,\xb4\xbf\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x8b\x98A\x86.w\xfb\xbe\x91\x94p\x93U\x83\xa9<\xf0'\xe4P\x89llS4B\u007f\x1f\x00\x00\u07d4\x8b\x99}\xbc\a\x8a\xd0)a5]\xa0\xa1Y\xf2\x92~\xd4=d\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\xe0\x94\x8b\x9f\xda}\x98\x1f\xe9\xd6B\x87\xf8\\\x94\xd8?\x90t\x84\x9f\u030a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4\x8b\xb0!/2\x95\xe0)\u02b1\xd9a\xb0A3\xa1\x80\x9e{\x91\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8b\xbe\xac\xfc)\xcf\xe94\x02\xdb\xd6j\x1a\xcbvv\x85c7\xb9;\xf0\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x8b\xf3s\xd0v\x81L\xbcW\xe1\xc6\xd1j\x82\u017e\x13\xc7=7\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x8c\x10#\xfd\xe1WM\xb8\xbbT\xf1s\x96p\x15|\xa4}\xa6R\x8a\x01y\u03da\u00e1\xb1w\x00\x00\u07d4\x8c\x1f\xbe_\n\xea5\x9cZ\xa1\xfa\b\u0209T\x12\u028e\x05\xa6\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x8c\"B`U\xb7o\x11\xf0\xa2\xde\x1a\u007f\x81\x9aa\x96\x85\xfe`\x89kV\x05\x15\x82\xa9p\x00\x00\u07d4\x8c+}\x8b`\x8d(\xb7\u007f\\\xaa\x9c\xd6E$*\x82>L\u0649b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\x8c/\xbe\ue3ac\xc5\xc5\xd7|\x16\xab\xd4b\ue701E\xf3K\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\x8c:\x9e\xe7\x1fr\x9f#l\xba8g\xb4\u05dd\x8c\xee\xe2]\xbc\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x8cP\xaa*\x92\x12\xbc\xdeVA\x8a\xe2a\xf0\xb3^z\x9d\xbb\x82\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x8cT\xc7\xf8\xb9\x89nu\xd7\xd5\xf5\xc7`%\x86\x99\x95qB\xad\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\x8c]\x16\xede\xe3\xed~\x8b\x96\u0297+\xc8as\xe3P\v\x03\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8cj\xa8\x82\xee2,\xa8HW\x8c\x06\xcb\x0f\xa9\x11\xd3`\x83\x05\x89 \x86\xac5\x10R`\x00\x00\xe0\x94\x8cj\xe7\xa0Z\x1d\xe5u\x82\xae'h Bv\xc0\xffG\xed\x03\x8a,\v\xb3\xdd0\xc4\xe2\x00\x00\x00\u07d4\x8co\x9fN[z\xe2v\xbfXI{\u05ff*}%$_d\x89\x93\xfe\\W\xd7\x10h\x00\x00\u07d4\x8cu\x95n\x8f\xedP\xf5\xa7\xdd|\xfd'\xda \x0fgF\xae\xa6\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x8c|\xb4\xe4\x8b%\x03\x1a\xa1\xc4\xf9)%\xd61\xa8\xc3\xed\xc7a\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x8c\u007f\xa5\xca\xe8/\xed\xb6\x9a\xb1\x89\xd3\xff'\xae \x92\x93\xfb\x93\x89\x15\xaf\x88\r\x8c\u06c3\x00\x00\xe0\x94\x8c\x81A\x0e\xa85L\xc5\xc6\\A\xbe\x8b\xd5\xdes<\v\x11\x1d\x8a\x02\x05\xb4\u07e1\xeetx\x00\x00\u07d4\x8c\x83\xd4$\xa3\xcf$\xd5\x1f\x01\x92=\xd5J\x18\u05b6\xfe\xde{\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x8c\x90\n\x826\xb0\x8c+e@]9\xd7_ \x06*ua\xfd\x89X\xe7\x92n\xe8X\xa0\x00\x00\u07d4\x8c\x93\xc3\xc6\u06dd7q}\xe1e\u00e1\xb4\xfeQ\x95,\b\u0789\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x8c\x99\x95\x91\xfdr\xefq\x11\xef\xcaz\x9e\x97\xa25k;\x00\n\x89\xddd\xe2\xaa\ngP\x00\x00\u07d4\x8c\xa6\x98\x97F\xb0n2\xe2Hta\xb1\u0399j':\xcf\u05c9\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x8c\xb3\xaa?\xcd!(T\xd7W\x8f\xcc0\xfd\xed\xe6t*1*\x89\x10CV\x1a\x88)0\x00\x00\u07d4\x8c\xc0\xd7\xc0\x16\xfaz\xa9P\x11J\xa1\xdb\tH\x82\xed\xa2t\xea\x89\b\xa9\xab\xa5W\xe3l\x00\x00\u07d4\x8c\xc6R\xdd\x13\xe7\xfe\x14\u06bb\xb3m]2\r\xb9\xff\xee\x8aT\x89a\t=|,m8\x00\x00\u07d4\x8c\u02bf%\a\u007f:\xa4\x15E4MS\xbe\x1b+\x9c3\x90\x00\x89[\xe8f\xc5b\xc5D\x00\x00\u07d4\x8c\xcf:\xa2\x1a\xb7BWj\xd8\xc4\"\xf7\x1b\xb1\x88Y\x1d\ua28965\u026d\xc5\u07a0\x00\x00\u07d4\x8c\xd0\xcd\"\xe6 \xed\xa7\x9c\x04a\xe8\x96\xc9\xd1b)\x12K_z\xfb\xec\x89\a?u\u0460\x85\xba\x00\x00\u07d4\x8c\xe2/\x9f\xa3rD\x9aB\x06\x10\xb4z\xe0\xc8\xd5eH\x122\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x8c\u451d\x8a\x16T-B<\x17\x98Ng9\xfar\u03b1w\x8a\x05K@Y&\xf4\xa6=\x80\x00\u07d4\x8c\xe5\xe3\xb5\xf5\x91\xd5\uc8ca\xbf\"\x8f.<5\x13K\xda\xc0\x89}\xc3[\x84\x89|8\x00\x00\xe0\x94\x8c\xee8\xd6YW\x88\xa5n?\xb9F4\xb3\xff\xe1\xfb\xdb&\u058a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x8c\xee\xa1^\xec;\xda\xd8\x02?\x98\xec\xf2[+\x8f\xef'\xdb)\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8c\xf3To\xd1\u0363=X\x84_\xc8\xfc\xfe\u02bc\xa7\xc5d*\x89\x1f\x1e9\x93,\xb3'\x80\x00\u07d4\x8c\xf6\xda\x02\x04\xdb\u0106\vF\xad\x97?\xc1\x11\x00\x8d\x9e\fF\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x8c\xfe\xde\xf1\x98\xdb\n\x91C\xf0\x91)\xb3\xfdd\u073b\x9bIV\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8d\x04\xa5\xeb\xfb]\xb4\t\xdb\x06\x17\xc9\xfaV1\xc1\x92\x86\x1fJ\x894\x95tD\xb8@\xe8\x00\x00\u07d4\x8d\x06\xe4d$\\\xadaI9\xe0\xaf\bE\xe6\xd70\xe2\x03t\x89\n\u070a(\xf3\xd8}\x80\x00\u07d4\x8d\a\xd4-\x83\x1c-|\x83\x8a\xa1\x87+:\xd5\xd2w\x17h#\x89\x12\xee\x1f\x9d\xdb\xeeh\x00\x00\u07d4\x8d\v\x9e\xa5?\xd2cA^\xac\x119\x1f|\xe9\x12V\xb9\xfb\x06`\xf6\xf0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8dy\\_JV\x89\xadb\u0696\x16q\xf0(\x06R\x86\xd5T\x89o\x05\xb5\x9d; \x00\x00\x00\u07d4\x8d\u007f>a)\x9c-\xb9\xb9\xc0H|\xf6'Q\x9e\xd0\n\x91#\x89^t\xa8P^\x80\xa0\x00\x00\xe0\x94\x8d\x89\x17\v\x92\xb2\xbe,\b\xd5|H\xa7\xb1\x90\xa2\xf1Fr\x0f\x8a\x04+\xf0kx\xed;P\x00\x00\u07d4\x8d\x93\xda\u01c5\xf8\x8f\x1a\x84\xbf\x92}Se+E\xa1T\xcc\u0749\b\x90\xb0\xc2\xe1O\xb8\x00\x00\u07d4\x8d\x99R\u043bN\xbf\xa0\xef\xd0\x1a:\xa9\xe8\xe8\u007f\x05%t.\x89\xbb\x91%T\"c\x90\x00\x00\u07d4\x8d\x9a\fp\xd2& B\xdf\x10\x17\xd6\xc3\x03\x13 $w'\x12\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8d\x9e\xd7\xf4U0X\xc2ox6\xa3\x80-0d\xeb\x1b6=\x89\x04\xe1\x00;(\xd9(\x00\x00\u07d4\x8d\xa1\x17\x8fU\xd9wr\xbb\x1d$\x11\x1a@JO\x87\x15\xb9]\x89/\x9a\xc3\xf6\xde\x00\x80\x80\x00\u07d4\x8d\xa1\xd3Y\xbal\xb4\xbc\xc5}zCw \xd5]\xb2\xf0\x1cr\x89\x04V9\x18$O@\x00\x00\u07d4\x8d\xab\x94\x8a\xe8\x1d\xa3\x01\xd9r\xe3\xf6\x17\xa9\x12\xe5\xa7Sq.\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x8d\xad\xdfR\xef\xbdt\u0695\xb9i\xa5GoO\xbb\xb5c\xbf\u0489-C\xf3\xeb\xfa\xfb,\x00\x00\u07d4\x8d\xb1\x85\xfe\x1bp\xa9Jj\b\x0e~#\xa8\xbe\xdcJ\xcb\xf3K\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\x8d\xb5\x8e@n -\xf9\xbcpl\xb43\xe1\x94\xf4\x0f\x82\xb4\x0f\xaa\xdb\x1f\x8b\x85a\x16\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4\x8d\xc1\xd5\x11\x1d\t\xaf%\xfd\xfc\xacE\\|\xec(>mgu\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8d\u0504\xff\x8a0sd\xebf\xc5%\xa5q\xaa\xc7\x01\xc5\xc3\x18\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x8d\u05a9\xba\xe5\u007fQ\x85I\xad\xa6wFo\ua2b0O\u0674\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x8d\xde<\xb8\x11\x85h\xefE\x03\xfe\x99\x8c\xcd\xf56\xbf\x19\xa0\x98\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x8d\xde`\xeb\b\xa0\x99\xd7\u06a3V\u06aa\xb2G\r{\x02Zk\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\xe0\x94\x8d\xf39!Kj\u0472Fc\xceq`4t\x9dn\xf88\u064a\x02TO\xaaw\x80\x90\xe0\x00\x00\xe0\x94\x8d\xf5=\x96\x19\x14q\xe0Y\xdeQ\xc7\x18\xb9\x83\xe4\xa5\x1d*\xfd\x8a\x06\u01b95\xb8\xbb\xd4\x00\x00\x00\u07d4\x8d\xfb\xaf\xbc\x0e[\\\x86\xcd\x1a\u0597\xfe\xea\x04\xf41\x88\u0796\x89\x15%+\u007f_\xa0\xde\x00\x00\u07d4\x8e\a;\xad%\xe4\"\x18a_J\x0ek.\xa8\xf8\xde\"0\xc0\x89\x82=b\x9d\x02k\xfa\x00\x00\u07d4\x8e\x0f\xee8hZ\x94\xaa\xbc\xd7\u0385{k\x14\t\x82Ou\xb8\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x8e#\xfa\xcd\x12\xc7e\xc3j\xb8\x1am\xd3M\x8a\xa9\xe6\x89\x18\xae\x89\t\x11\u418d\xba\x9b\x00\x00\xe0\x94\x8e/\x904\xc9%G\x19\u00ceP\u026ad0^\u0596\xdf\x1e\x8a\x01\x00N.E\xfb~\xe0\x00\x00\u07d4\x8e2@\xb0\x81\x0e\x1c\xf4\a\xa5\x00\x80G@\u03cdad2\xa4\x89\x02/fU\xef\v8\x80\x00\u07d4\x8eHj\x04B\xd1q\xc8`[\xe3H\xfe\xe5~\xb5\b^\xff\r\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x8eaV3k\xe2\u037e2\x14\r\xf0\x8a+\xa5_\u0425\x84c\x89\x04\t\x9e\x1dcW\x18\x00\x00\u07d4\x8eg\b\x15\xfbg\xae\xae\xa5{\x86SN\xdc\x00\xcd\xf5d\xfe\u5272\xe4\xb3#\xd9\xc5\x10\x00\x00\u07d4\x8emt\x85\xcb\u942c\xc1\xad\x0e\xe9\xe8\xcc\xf3\x9c\f\x93D\x0e\x893\xc5I\x901r\f\x00\x00\xe0\x94\x8et\xe0\u0477~\xbc\x82:\xca\x03\xf1\x19\x85L\xb1 '\xf6\u05ca\x16\xb3R\xda^\x0e\xd3\x00\x00\x00\u07d4\x8ex\xf3QE}\x01oJ\xd2u^\xc7BN\\!\xbamQ\x89\a\xea(2uw\b\x00\x00\u07d4\x8ey6\u0552\x00\x8f\xdcz\xa0N\xde\xebuZ\xb5\x13\u06f8\x9d\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x8e\u007f\xd28H\xf4\xdb\a\x90j}\x10\xc0K!\x80;\xb0\x82'\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x8e\x92\xab\xa3\x8er\xa0\x98\x17\v\x92\x95\x92FSz.UV\xc0\x89\x0e~\xeb\xa3A\vt\x00\x00\u07d4\x8e\x98ve$\xb0\xcf'G\xc5\r\xd4;\x95gYM\x971\u0789lD\xb7\xc2a\x82(\x00\x00\u07d4\x8e\x9b5\xadJ\n\x86\xf7XDo\xff\xde4&\x9d\x94\f\xea\u0349\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x8e\x9c\b\xf78f\x1f\x96v#n\xff\x82\xbaba\xdd?H\"\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x8e\x9cB\x92f\xdf\x05~\xfax\xdd\x1d_w\xfc@t*\xd4f\x89\x10D.\u0475l|\x80\x00\u07d4\x8e\xa6V\xe7\x1e\xc6Q\xbf\xa1|ZWY\xd8`1\xcc5\x99w\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x8e\xae)CU\x98\xba\x8f\x1c\x93B\x8c\xdb>+M1\a\x8e\x00\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8e\xb1\xfb\xe4\xe5\xd3\x01\x9c\xd7\xd3\r\xae\x9c\r[Lv\xfbc1\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8e\xb5\x17t\xaf k\x96k\x89\t\xc4Z\xa6r'H\x80,\f\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x8e\xb8\xc7\x19\x82\xa0\x0f\xb8Bu)2S\xf8\x04ED\xb6kI\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94\x8e\xcb\u03ec\xbf\xaf\xe9\xf0\f9\"\xa2N,\xf0\x02gV\xca \x8a\x011\xbe\xb9%\xff\xd3 \x00\x00\u07d4\x8e\u03b2\xe1$Sl[_\xfcd\x0e\xd1O\xf1^\u0668\xcbq\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8e\u042f\x11\xff(p\xda\x06\x81\x00J\xfe\x18\xb0\x13\xf7\xbd8\x82\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u0794\x8e\xd1Cp\x1f/r(\x0f\xd0J{Ad(\x19y\xea\x87\u0248\xc2I\xfd\xd3'x\x00\x00\u07d4\x8e\xd1R\x8bD~\xd4)y\x02\xf69\xc5\x14\u0414J\x88\xf8\u0209\n\xc6\xe7z\xb6c\xa8\x00\x00\u07d4\x8e\xd4(L\x0fGD\x9c\x15\xb8\u0673$]\u8fb6\u0380\xbf\x89+^:\xf1k\x18\x80\x00\x00\xe0\x94\x8e\xde~=\xc5\aI\xc6\xc5\x0e.(\x16\x84x\xc3M\xb8\x19F\x8a\x04<0\xfb\b\x84\xa9l\x00\x00\u07d4\x8e\xe5\x843}\xdb\xc8\x0f\x9e4\x98\xdfU\xf0\xa2\x1e\xac\xb5\u007f\xb1\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x8e\xeb\xec\x1ab\xc0\x8b\x05\xa7\xd1\u0551\x80\xaf\x9f\xf0\u044e?6\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x8e\xf4\u0622\xc2o\xf7\x14\xb6u\x89\x19\x80\x1c\x83\xb6\xc7\xc0\x00\x00\u07d4\x8fM\x1dAi>F,\xf9\x82\xfd\x81\u042ap\x1d:St\u0249\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x8fM\x1e~Ea(J4\xfe\xf9g<\r4\xe1*\xf4\xaa\x03\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8fO\xb1\xae\xa7\xcd\x0fW\x0e\xa5\xe6\x1b@\xa4\xf4Q\vbd\xe4\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x8fV\x1bA\xb2\t\xf2H\u0229\x9f\x85\x87\x887bP`\x9c\xf3\x89\\(=A\x03\x94\x10\x00\x00\xe0\x94\x8fX\xd84\x8f\xc1\xdcN\r\xd84;eC\xc8W\x04^\xe9@\x8a\x02\xe3\x03\x8d\xf4s\x03(\x00\x00\u07d4\x8f`\x89_\xbe\xbb\xb5\x01\u007f\xcb\xff<\u0763\x97)+\xf2[\xa6\x89\x17D\x06\xff\x9fo\u0480\x00\u07d4\x8fd\xb9\xc1$m\x85x1d1\a\xd3U\xb5\xc7_\xef]O\x89lj\xccg\u05f1\xd4\x00\x00\xe0\x94\x8ff\x0f\x8b.L|\u00b4\xac\x9cG\xed(P\x8d_\x8f\x86P\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x8fi\xea\xfd\x023\xca\xdb@Y\xabw\x9cF\xed\xf2\xa0PnH\x89`\xf0f \xa8IE\x00\x00\xe0\x94\x8fq~\xc1U/LD\x00\x84\xfb\xa1\x15J\x81\xdc\x00>\xbd\xc0\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\x8f\x8a\xcb\x10v\a8\x84y\xf6K\xaa\xab\xea\x8f\xf0\a\xad\xa9}\x8a\x05\xc6\xf3\b\n\xd4#\xf4\x00\x00\u07d4\x8f\x8c\xd2n\x82\xe7\xc6\xde\xfd\x02\u07ed\a\x97\x90!\xcb\xf7\x15\f\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\x8f\x8f7\u042d\x8f3]*q\x01\xb4\x11V\xb6\x88\xa8\x1a\x9c\xbe\x89\x03\xcbq\xf5\x1f\xc5X\x00\x00\u07d4\x8f\x92\x84O(*\x92\x99\x9e\u5d28\xd7s\xd0kiM\xbd\x9f\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\x8f\xact\x8fxJ\x0f\xedh\u06e43\x19\xb4*u\xb4d\x9cn\x891T\xc9r\x9d\x05x\x00\x00\u07d4\x8f\u0665\xc3:}\x9e\xdc\xe0\x99{\xdfw\xab0d$\xa1\x1e\xa9\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x8f\xef\xfa\xdb8z\x15G\xfb(M\xa9\xb8\x14\u007f>|m\xc6\u0689-b{\xe4S\x05\b\x00\x00\u07d4\x8f\xf4`Ehw#\xdc3\xe4\u0419\xa0i\x04\xf1\ubd44\u0709lk\x93[\x8b\xbd@\x00\x00\u07d4\x8f\xfa\x06!\"\xac0t\x18\x82\x1a\u06d3\x11\aZ7\x03\xbf\xa3\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x8f\xfe2)\x97\xb8\xe4\x04B-\x19\xc5J\xad\xb1\x8f[\xc8\u9dc9\u0556{\xe4\xfc?\x10\x00\x00\u07d4\x90\x01\x94\u0131\aC\x05\u045d\xe4\x05\xb0\xacx(\x0e\xca\xf9g\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x90\x03\xd2p\x89\x1b\xa2\xdfd=\xa84\x15\x83\x195E\xe3\xe0\x00\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\x90\x05z\xf9\xaaf0~\xc9\xf03\xb2\x97$\u04f2\xf4\x1e\xb6\xf9\x8a\x19\xd1\u05aa\xdb,R\xe8\x00\x00\u07d4\x90\x0f\v\x8e5\xb6h\xf8\x1e\xf2R\xb18U\xaaP\a\xd0\x12\xe7\x89\x17\n\x0fP@\xe5\x04\x00\x00\u07d4\x90\x18\xcc\x1fH\xd20\x8e%*\xb6\b\x9f\xb9\x9a|\x1dV\x94\x10\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x90\x1d\x99\xb6\x99\xe5\u0191\x15\x19\xcb v\xb4\xc7c0\xc5M\"\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x90-t\xa1W\xf7\u04b9\xa37\x8b\x1fVp70\xe0:\x17\x19\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\x904\x13\x87\x8a\xea;\xc1\bc\t\xa3\xfev\x8beU\x9e\x8c\xab\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\x90If\xcc\"\x13\xb5\xb8\xcb[\xd6\b\x9e\xf9\xcd\xdb\xef~\xdf\u0309lk\x93[\x8b\xbd@\x00\x00\u07d4\x90L\xaaB\x9ca\x9d\x94\x0f\x8egA\x82j\r\xb6\x92\xb1\x97(\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x90R\xf2\xe4\xa3\xe3\xc1-\xd1\xc7\x1b\xf7\x8aN\xc3\x04=\u020b~\x89\x0e~\xeb\xa3A\vt\x00\x00\u0794\x90U&V\x8a\xc1#\xaf\xc0\xe8J\xa7\x15\x12O\xeb\xe8=\xc8|\x88\xf8i\x93)g~\x00\x00\u07d4\x90\x92\x91\x87\a\xc6!\xfd\xbd\x1d\x90\xfb\x80\xebx\u007f\xd2osP\x89\x85[[\xa6\\\x84\xf0\x00\x00\u07d4\x90\x9b^v:9\xdc\u01d5\"=s\xa1\u06f7\xd9L\xa7Z\u0209lk\x93[\x8b\xbd@\x00\x00\u07d4\x90\xac\xce\xd7\xe4\x8c\b\u01b94dm\xfa\n\xdf)\u0714\aO\x89\x03\vK\x15{\xbdI\x00\x00\u07d4\x90\xb1\xf3p\xf9\xc1\xeb\v\xe0\xfb\x8e+\x8a\xd9jAcq\u074a\x890\xca\x02O\x98{\x90\x00\x00\u07d4\x90\xb6/\x13\x1a_)\xb4UqQ>\xe7\xa7J\x8f\v#\"\x02\x89\b\x90\xb0\xc2\xe1O\xb8\x00\x00\u07d4\x90\xbdb\xa0P\x84Ra\xfaJ\x9f|\xf2A\xeac\v\x05\ufe09\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x90\xc4\x1e\xba\x00\x8e \xcb\xe9'\xf3F`?\u0206\x98\x12Yi\x89\x02F\xdd\xf9yvh\x00\x00\u07d4\x90\u0480\x9a\xe1\xd1\xff\xd8\xf6>\xda\x01\xdeI\xddU-\xf3\u047c\x89\u063beI\xb0+\xb8\x00\x00\u07d4\x90\xdc\t\xf7\x17\xfc*[i\xfd`\xba\b\xeb\xf4\v\xf4\xe8$l\x89\xd8\xd8X?\xa2\xd5/\x00\x00\u07d4\x90\xe3\x00\xacqE\x1e@\x1f\x88\u007fnw(\x85\x16G\xa8\x0e\a\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x90\xe3Z\xab\xb2\xde\xef@\x8b\xb9\xb5\xac\xefqDW\xdf\xdebr\x89\x05l\xd5_\xc6M\xfe\x00\x00\u07d4\x90\xe7\a\x0fM\x03?\xe6\x91\f\x9e\xfeZ'\x8e\x1f\xc6#M\xef\x89\x05q8\b\x19\xb3\x04\x00\x00\u07d4\x90\xe9>M\xc1q!HyR36\x14\x00+\xe4#VI\x8e\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4\x90\u9a68.\u06a8\x14\u0084\xd22\xb6\u9e90p\x1dIR\x89\x05k\xe0<\xa3\xe4}\x80\x00\u07d4\x90\xf7t\xc9\x14}\u0790\x85=\xdcC\xf0\x8f\x16\xd4U\x17\x8b\x8c\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x90\xfcS{!\x06Xf\n\x83\xba\xa9\xacJ\x84\x02\xf6WF\xa8\x89e\xea=\xb7UF`\x00\x00\u07d4\x91\x05\n\\\xff\xad\xed\xb4\xbbn\xaa\xfb\xc9\xe5\x014(\xe9l\x80\x89\\(=A\x03\x94\x10\x00\x00\u07d4\x91\x05\x17d\xafk\x80\x8eB\x12\xc7~0\xa5W.\xaa1pp\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\x91\v}Wz~9\xaa#\xac\xf6*\xd7\xf1\xef4)4\xb9h\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x91\x0e\x99eC4Lh\x15\xfb\x97\u0367\xafK\x86\x98vZ[\x89\x05\x9a\xf6\x98)\xcfd\x00\x00\u07d4\x91\x1f\xee\xa6\x1f\xe0\xedP\u0179\xe5\xa0\xd6`q9\x9d(\xbd\u0189\x03@\xaa\xd2\x1b;p\x00\x00\u07d4\x91\x1f\xf23\xe1\xa2\x11\xc0\x17,\x92\xb4l\xf9\x97\x03\x05\x82\xc8:\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x91 \xe7\x11s\xe1\xba\x19\xba\x8f\x9fO\xdb\u072a4\xe1\u05bbx\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x91!\x17\x12q\x9f+\bM;8u\xa8Pi\xf4f61A\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x91#\x04\x11\x8b\x80G=\x9e\x9f\xe3\xeeE\x8f\xbea\x0f\xfd\xa2\xbb\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x91Tky\xec\xf6\x9f\x93kZV\x15\b\xb0\xd7\xe5\f\u0159/\x89\x0e~\xeb\xa3A\vt\x00\x00\u07d4\x91V\u0440)5\x0eG\x04\b\xf1_\x1a\xa3\xbe\x9f\x04\ng\u018965\u026d\xc5\u07a0\x00\x00\u07d4\x91b\x0f>\xb3\x04\xe8\x13\u048b\x02\x97Ume\xdcN]\u5a89\xcf\x15&@\xc5\xc80\x00\x00\xe0\x94\x91k\xf7\xe3\xc5E\x92\x1d2\x06\xd9\x00\xc2O\x14\x12|\xbd^p\x8a\x03\xd0\u077c}\xf2\xbb\x10\x00\x00\u0794\x91l\xf1}qA(\x05\xf4\xaf\xc3DJ\v\x8d\xd1\xd93\x9d\x16\x88\xc6s\xce<@\x16\x00\x00\u07d4\x91{\x8f\x9f:\x8d\t\xe9 ,R\u009erA\x96\xb8\x97\xd3^\x89\b\xbaR\xe6\xfcE\xe4\x00\x00\u07d4\x91\x89g\x91\x8c\u0617\xdd\x00\x05\xe3m\xc6\u0203\xefC\x8f\xc8\u01c9\a\x96\xe3\xea?\x8a\xb0\x00\x00\u07d4\x91\x89\x8e\xab\x8c\x05\xc0\"(\x83\xcdM\xb2;w\x95\xe1\xa2J\u05c9lk\x93[\x8b\xbd@\x00\x00\u0794\x91\x91\xf9F\x98!\x05\x16\xcfc!\xa1B\a\x0e Yvt\xed\x88\xee\x9d[\xe6\xfc\x11\x00\x00\u07d4\x91\xa4\x14\x9a,{\x1b:g\xea(\xaf\xf3G%\u0fcdu$\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\x91\xa7\x87\xbcQ\x96\xf3HW\xfe\f7/M\xf3v\xaa\xa7f\x13\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x91\xa8\xba\xae\xd0\x12\xea.c\x80;Y=\r\f*\xabL[\n\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4\x91\xac\\\xfeg\xc5J\xa7\xeb\xfb\xa4HflF\x1a;\x1f\xe2\xe1\x89\x15\xc94\x92\xbf\x9d\xfc\x00\x00\u07d4\x91\xbb?y\x02+\xf3\xc4S\xf4\xff%n&\x9b\x15\xcf,\x9c\xbd\x89RX\\\x13\xfe:\\\x00\x00\u07d4\x91\xc7^<\xb4\xaa\x89\xf3F\x19\xa1d\xe2\xa4x\x98\xf5gM\x9c\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x91\xc8\f\xaa\b\x1b85\x1d*\x0e\x0e\x00\xf8\n4\xe5dt\xc1\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x91\xccF\xaa7\x9f\x85jf@\xdc\xcdZd\x8ay\x02\xf8I\u0649\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x91\u04a9\xee\x1am\xb2\x0fS\x17\u0327\xfb\xe218\x95\u06ce\xf8\x8a\x01\xcc\u00e5/0n(\x00\x00\u07d4\x91\xd6n\xa6(\x8f\xaaK=`l*\xa4\\{k\x8a%'9\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x91\u06f6\xaa\xad\x14\x95\x85\xbeG7\\]m\xe5\xff\t\x19\x15\x18\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x91\xe8\x81\x06R\xe8\xe6\x16\x15%\xd6;\xb7u\x1d\xc2\x0fg`v\x89'Mej\xc9\x0e4\x00\x00\u07d4\x91\xf5\x16\x14l\xda (\x17\x19\x97\x80`\u01beAI\x06|\x88\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x91\xf6$\xb2J\x1f\xa5\xa0V\xfeW\x12)\xe77\x9d\xb1K\x9a\x1e\x8a\x02\x8a\x85\x17\xc6i\xb3W\x00\x00\xe0\x94\x91\xfe\x8aLad\u07cf\xa6\x06\x99]k\xa7\xad\xca\xf1\u0213\u038a\x03\x99\x92d\x8a#\u0220\x00\x00\u07d4\x92\x1fRa\xf4\xf6\x12v\a\x06\x89&%\xc7^{\u0396\xb7\b\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x92!\xc9\xce\x01#&et\x10\x96\xac\a#Y\x03\xad\x1f\xe2\xfc\x89\x06\xdbc3U\"b\x80\x00\u07d4\x92%\x988`\xa1\xcbF#\xc7$\x80\xac\x16'+\f\x95\xe5\xf5\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x92%\xd4jZ\x80\x949$\xa3\x9e[\x84\xb9m\xa0\xacE\x05\x81\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\x92* \u01da\x1d:&\xdd8)g{\xf1\xd4\\\x8fg+\xb6\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x92C\x8eR\x03\xb64o\xf8\x86\xd7\xc3b\x88\xaa\xcc\xccx\xce\u028965\u026d\xc5\u07a0\x00\x00\u07d4\x92C\xd7v-w({\x12c\x86\x88\xb9\x85N\x88\xa7i\xb2q\x8965\u026d\xc5\u07a0\x00\x00\u0794\x92K\xcez\x85<\x97\v\xb5\xec{\xb7Y\xba\xeb\x9ct\x10\x85{\x88\xbe -j\x0e\xda\x00\x00\u07d4\x92N\xfam\xb5\x95\xb7\x93\x13'~\x881\x96%\akX\n\x10\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x92U\x82&\xb3\x84bl\xadH\xe0\x9d\x96k\xf19^\xe7\xea]\x89\x12\x1e\xa6\x8c\x11NQ\x00\x00\u07d4\x92`\x82\xcb~\xedK\x19\x93\xad$ZGrg\xe1\xc3<\xd5h\x89\x14Jt\xba\u07e4\xb6\x00\x00\u07d4\x92b\t\xb7\xfd\xa5N\x8d\u06dd\x9eM=\x19\xeb\u070e\x88\u009f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x92h\xd6&FV6\x11\xdc;\x83*0\xaa#\x94\xc6F\x13\xe3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x92i\x8e4Sx\xc6-\x8e\xda\x18M\x946j\x14K\f\x10[\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\x92y:\u0173rhwJq0\xde+\xbd3\x04\x05f\x17s\x89\x02,\xa3X|\xf4\xeb\x00\x00\xe0\x94\x92y\xb2\"\x8c\xec\x8f{M\xda?2\x0e\x9a\x04f\xc2\xf5\x85\u028a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\x92|\xb7\xdc\x18p6\xb5B{\xc7\xe2\x00\xc5\xecE\f\x1d'\u0509\v\xb5\x9a'\x95<`\x00\x00\u07d4\x92|\u00bf\xda\x0e\b\x8d\x02\xef\xf7\v8\xb0\x8a\xa5<\xc3\tA\x89do`\xa1\xf9\x866\x00\x00\xe0\x94\x92\x84\xf9m\xdbG\xb5\x18n\xe5X\xaa12M\xf56\x1c\x0fs\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\u07d4\x92\x9d6\x8e\xb4j-\x1f\xbd\xc8\xff\xa0`~\xdeK\xa8\x8fY\xad\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x92\xa7\u0166Cb\xe9\xf8B\xa2=\xec\xa2\x105\x85\u007f\x88\x98\x00\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\x92\xa8\x98\xd4o\x19q\x9c8\x12j\x8a<'\x86z\xe2\xce\u5589lk\x93[\x8b\xbd@\x00\x00\u07d4\x92\xa9q\xa79y\x9f\x8c\xb4\x8e\xa8G]r\xb2\xd2GAr\xe6\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4\x92\xaa\xe5\x97h\xed\xdf\xf8<\xfe`\xbbQ.s\n\x05\xa1a\u05c9\\\x97xA\fv\u0440\x00\u07d4\x92\xad\x1b=u\xfb\xa6}Tf=\xa9\xfc\x84\x8a\x8a\xde\x10\xfag\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x92\xae[|~\xb4\x92\xff\x1f\xfa\x16\xddB\xad\x9c\xad@\xb7\xf8\u0709.\xe4IU\b\x98\xe4\x00\x00\u07d4\x92\xc0\xf5s\xec\xcfb\xc5H\x10\xeek\xa8\xd1\xf1\x13T+0\x1b\x89\xb7ro\x16\u0331\xe0\x00\x00\u07d4\x92\xc1?\xe0\xd6\u0387\xfdP\xe0=\uf7e6@\x05\t\xbdps\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\x92\xc9L( \xdf\xcfqV\xe6\xf10\x88\xec\u754b6v\xfd\x89\x05-T(\x04\xf1\xce\x00\x00\u07d4\x92\xcf\xd6\x01\x88\xef\u07f2\xf8\xc2\xe7\xb1i\x8a\xbb\x95&\xc1Q\x1f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x92\u062d\x9aMah;\x80\u0526g.\x84\xc2\rbB\x1e\x80\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x92\u0725\xe1\x02\xb3\xb8\x1b`\xf1\xa5\x04cIG\xc3t\xa8\x8c\u02c9lk\x93[\x8b\xbd@\x00\x00\u07d4\x92\xe454\x0e\x9d%<\x00%c\x89\xf5+\x06}U\x97Nv\x89\x0e\x87?D\x13<\xb0\x00\x00\xe0\x94\x92\xe49(\x16\xe5\xf2\xef_\xb6X7\xce\xc2\xc22\\\xc6I\"\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x92\xe6X\x1e\x1d\xa1\xf9\xb8F\xe0\x93G3=\xc8\x18\xe2\u04acf\x89\xc5S%\xcat\x15\xe0\x00\x00\u07d4\x93\x1d\xf3M\x12%\xbc\xd4\"Nch\r\\L\t\xbc\xe75\xa6\x89\x03\xaf\xb0\x87\xb8v\x90\x00\x00\u07d4\x93\x1f\xe7\x12\xf6B\a\xa2\xfdP\"r\x88CT\x8b\xfb\x8c\xbb\x05\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x93#_4\r(c\xe1\x8d/LR\x99e\x16\x13\x8d\"\x02g\x89\x04\x00.D\xfd\xa7\xd4\x00\x00\u07d4\x93%\x82U\xb3|\u007fX\xf4\xb1\x06s\xa92\xdd:\xfd\x90\xf4\xf2\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x93(\xd5\\\xcb?\xceS\x1f\x19\x93\x823\x9f\x0eWn\xe8@\xa3\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x93)\xff\xdc&\x8b\xab\u0788t\xb3f@l\x81D[\x9b-5\x89\x16\xe6/\x8cs\f\xa1\x80\x00\u07d4\x93+\x9c\x04\xd4\r*\xc80\x83\xd9B\x98\x16\x9d\xae\x81\xab.\u0409lk\x93[\x8b\xbd@\x00\x00\u07d4\x9346\xc8G&U\xf6L:\xfa\xaf|Lb\x1c\x83\xa6+8\x8965\u026d\xc5\u07a0\x00\x00\u0794\x93;\xf3?\x82\x99p+:\x90&B\xc3>\v\xfa\xea\\\x1c\xa3\x88\xd2\xf1?w\x89\xf0\x00\x00\u07d4\x93@4\\\xa6\xa3\uaf77sc\xf2X`C\xf2\x948\xce\v\x89\x1c\xc8\x05\xda\r\xff\xf1\x00\x00\xe0\x94\x93@\xb5\xf6x\xe4^\xe0^\xb7\b\xbbz\xbbn\xc8\xf0\x8f\x1bk\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\x93J\xf2\x1b~\xbf\xa4g\xe2\xce\xd6Z\xa3N\xdd:\x0e\xc7\x132\x8a\a\x80\x1f>\x80\xcc\x0f\xf0\x00\x00\xe0\x94\x93PiDJj\x98M\xe2\bNFi*\xb9\x9fg\x1f\xc7'\x8a\x01\xe7\xe4\x17\x1b\xf4\u04e0\x00\x00\xe0\x94\x93P~\x9e\x81\x19\xcb\xce\u068a\xb0\x87\xe7\xec\xb0q8=i\x81\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4\x93g\x8a\x00\x00\xe0\x94\x93m\xcf\x00\x01\x94\xe3\xbf\xf5\n\u0174$:;\xa0\x14\xd6a\u060a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x93o8\x13\xf5\xf6\xa1;\x8eO\xfe\xc8?\xe7\xf8&\x18jq\u0349\x1c0s\x1c\xec\x03 \x00\x00\u07d4\x93t\x86\x9dJ\x99\x11\xee\x1e\xafU\x8b\xc4\u00b6>\xc6:\xcf\u074965\u026d\xc5\u07a0\x00\x00\u07d4\x93uc\u0628\x0f\u05657\xb0\xe6m \xa0%%\xd5\u0606`\x89\x87\x86x2n\xac\x90\x00\x00\u07d4\x93v\xdc\xe2\xaf.\xc8\xdc\xdat\x1b~sEfF\x81\xd96h\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x93\x86\x8d\xdb*yM\x02\xeb\xda/\xa4\x80|v\xe3`\x98X\u0709m\xee\x15\xfc|$\xa7\x80\x00\xe0\x94\x93\x9cC\x13\xd2(\x0e\xdf^\a\x1b\xce\xd8F\x06?\n\x97]T\x8a\x19i6\x89t\xc0[\x00\x00\x00\xe0\x94\x93\xa6\xb3\xabB0\x10\xf9\x81\xa7H\x9dJ\xad%\xe2b\\WA\x8a\x04F\x80\xfej\x1e\xdeN\x80\x00\u07d4\x93\xaa\x8f\x92\xeb\xff\xf9\x91\xfc\x05^\x90ne\x1a\xc7h\xd3+\u02092\xf5\x1e\u06ea\xa30\x00\x00\u07d4\x93\xb4\xbf?\xdf\xf6\xde?NV\xbamw\x99\xdcK\x93\xa6T\x8f\x89\x01\t\x10\xd4\xcd\xc9\xf6\x00\x00\u07d4\x93\xbc}\x9aJ\xbdD\u023b\xb8\xfe\x8b\xa8\x04\xc6\x1a\xd8\xd6Wl\x89\xd8\xd6\x11\x9a\x81F\x05\x00\x00\u07d4\x93\xc2\xe6N]\xe5X\x9e\xd2P\x06\xe8C\x19n\xe9\xb1\xcf\v>\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\xe0\x94\x93\u020e-\x88b\x1e0\xf5\x8a\x95\x86\xbe\xd4\t\x89\x99\xebg\u074a\x06\x9bZ\xfa\xc7P\xbb\x80\x00\x00\u07d4\x93\xe0\xf3~\xcd\xfb\x00\x86\xe3\xe8b\xa9p4D{\x1eM\xec\x1a\x89\x01\xa0Ui\r\x9d\xb8\x00\x00\xe0\x94\x93\xe3\x03A\x1a\xfa\xf6\xc1\a\xa4A\x01\u026c[6\xe9\xd6S\x8b\x8a\r\xf9\xdd\xfe\xcd\x03e@\x00\x00\u07d4\x93\xf1\x8c\xd2R`@v\x14\x88\xc5\x13\x17M\x1eycv\x8b,\x89\x82\xff\xac\x9a\u0553r\x00\x00\u07d4\x94\x0fqQ@P\x9f\xfa\xbf\x97EF\xfa\xb3\x90\"\xa4\x19R\u0489K\xe4\xe7&{j\xe0\x00\x00\u07d4\x94,k\x8c\x95[\xc0\u0608\x12g\x8a#g%\xb3'9\xd9G\x89T\x06\x923\xbf\u007fx\x00\x00\u07d4\x94=7\x86JJS}5\xc8\u0657#\xcdd\x06\xce%b\xe6\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x94C\x9c\xa9\xcc\x16\x9ay\u0520\x9c\xae^gvJo\x87\x1a!\x89\r\x02\xabHl\xed\xc0\x00\x00\xe0\x94\x94D\x9c\x01\xb3*\u007f\xa5Z\xf8\x10OB\xcd\xd8D\xaa\x8c\xbc@\x8a\x03\x81\x11\xa1\xf4\xf0<\x10\x00\x00\xe0\x94\x94E\xba\\0\xe9\x89a\xb8`$a\xd08]@\xfb\xd8\x03\x11\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x94O\a\xb9o\x90\xc5\xf0\xd7\xc0\u0140S1I\xf3\xf5\x85\xa0x\x89\x04\x02\xf4\xcf\xeeb\xe8\x00\x00\u07d4\x94T\xb3\xa8\xbf\xf9p\x9f\xd0\u1407~l\xb6\u0219t\xdb\u0589\x90\xf54`\x8ar\x88\x00\x00\u07d4\x94]\x96\xeaW>\x8d\xf7&+\xbf\xa5r\"\x9bK\x16\x01k\x0f\x89\vX\x9e\xf9\x14\xc1B\x00\x00\u07d4\x94^\x18v\x9d~\xe7'\xc7\x01?\x92\xde$\xd1\x17\x96\u007f\xf3\x17\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x94a'\x81\x03;W\xb1F\xeet\xe7S\xc6r\x01\u007fS\x85\xe4\x89\xc3(\t>a\xee@\x00\x00\xe0\x94\x94dJ\xd1\x16\xa4\x1c\xe2\xca\u007f\xbe\xc6\t\xbd\xefs\x8a*\xc7\u01ca\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\x94p\xcc6YE\x86\x82\x18!\xc5\u0256\xb6\xed\xc8;mZ2\x89\x01M\x11 \u05f1`\x00\x00\xe0\x94\x94u\xc5\x10\xec\x9a&\x97\x92GtL=\x8c;\x0e\v_D\u04ca\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x94~\x11\xe5\xea)\ro\u00f3\x80H\x97\x9e\f\xd4N\xc7\xc1\u007f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x94\x83\u064f\x14\xa3?\xdc\x11\x8d@9U\u00995\xed\xfc_p\x89\x18\xea;4\xefQ\x88\x00\x00\u07d4\x94\x911\xf2\x89C\x92\\\xfc\x97\xd4\x1e\f\xea\v&)s\xa70\x89\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4\x94\x9f\x84\xf0\xb1\xd7\u0127\xcfI\xee\u007f\x8b,J\x13M\xe3(x\x89%\"H\u07b6\xe6\x94\x00\x00\u07d4\x94\x9f\x8c\x10{\xc7\xf0\xac\xea\xa0\xf1pR\xaa\xdb\xd2\xf9s+.\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x94\xa7\u0368\xf4\x81\xf9\u061dB\xc3\x03\xae\x162\xb3\xb7\t\xdb\x1d\x89\x10CV\x1a\x88)0\x00\x00\u07d4\x94\xa9\xa7\x16\x911| d'\x1bQ\xc95?\xbd\xed5\x01\xa8\x89\xb5\x0f\u03ef\xeb\xec\xb0\x00\x00\u07d4\x94\xadK\xad\x82K\xd0\ub7a4\x9cX\u03bc\xc0\xff^\b4k\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\x94\xbb\xc6}\x13\xf8\x9e\xbc\xa5\x94\xbe\x94\xbcQp\x92\f0\xd9\xf3\x89\x04X\xff\xa3\x15\nT\x00\x00\u07d4\x94\xbe:\xe5Ob\xd6c\xb0\xd4\u031e\x1e\xa8\xfe\x95V\ua7bf\x89\x01C\x13,\xa8C\x18\x00\x00\xe0\x94\x94\xc0U\xe8X5z\xaa0\xcf A\xfa\x90Y\xce\x16J\x1f\x91\x8a\x04<%\xe0\xdc\xc1\xbd\x1c\x00\x00\xe0\x94\x94\xc7B\xfdz\x8by\x06\xb3\xbf\xe4\xf8\x90O\xc0\xbe\\v\x803\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x94\xcaV\xdew\u007f\xd4S\x17\u007f^\x06\x94\xc4x\xe6j\xff\x8a\x84\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\x94\xd8\x10t\xdbZ\xe1\x97\u04bb\x13s\xab\x80\xa8}\x12\x1cK\u04ca\x01\xfd\x934\x94\xaa_\xe0\x00\x00\u07d4\x94\u06c0xs\x86\n\xac=Z\xea\x1e\x88^R\xbf\xf2\x86\x99T\x89\xae\x8ez\v\xb5u\xd0\x00\x00\u07d4\x94\xe1\xf5\u02db\x8a\xba\xce\x03\xa1\xa6B\x82VU;i\f#U\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x94\xef\x8b\xe4Pw\xc7\xd4\xc5e'@\u0794jbbOq?\x89\x05l\xf5Y:\x18\xf8\x80\x00\u07d4\x94\xf1?\x9f\b6\xa3\xee$7\xa8I\"\u0498M\xc0\xf7\xd5;\x89\xa2\xa02\x9b\u00ca\xbe\x00\x00\u07d4\x94\xf8\xf0W\xdb~`\xe6u\xad\x94\x0f\x15X\x85\u0464w4\x8e\x89\x15\xbeat\xe1\x91.\x00\x00\xe0\x94\x94\xfc\u03ad\xfe\\\x10\x9c^\xae\xafF-C\x871B\u020e\"\x8a\x01\x045a\xa8\x82\x93\x00\x00\x00\u07d4\x95\x03N\x16!\x86Q7\xcdG9\xb3F\xdc\x17\xda:'\xc3N\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4\x95\fh\xa4\t\x88\x15M#\x93\xff\xf8\xda|\u0369\x96\x14\xf7,\x89\xf9AF\xfd\x8d\xcd\xe5\x80\x00\xe0\x94\x95\x0f\xe9\xc6\xca\xd5\f\x18\xf1\x1a\x9e\xd9\xc4W@\xa6\x18\x06\x12\u040a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\x95!\x83\xcf\u04ce5.W\x9d6\xde\xce\u0171\x84P\xf7\xfb\xa0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x95'\x8b\b\xde\xe7\xc0\xf2\xc8\xc0\xf7\"\xf9\xfc\xbb\xb9\xa5$\x1f\u0689\x82\x93\t\xf6O\r\xb0\x00\x00\u07d4\x95,W\xd2\xfb\x19Q\a\xd4\xcd\\\xa3\x00wA\x19\u07ed/x\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x955r\xf0\xeam\xf9\xb1\x97\xca\xe4\x0eK\x8e\xcc\x05lCq\u014965\u026d\xc5\u07a0\x00\x00\u07d4\x95>\xf6R\xe7\xb7i\xf5=nxjX\x95/\xa9>\xe6\xab\u725b\ny\x1f\x12\x110\x00\x00\u07d4\x95DpF1;/:^\x19\xb9H\xfd;\x8b\xed\xc8,q|\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\x95]\xb3\xb7C`\xb9\xa2hg~s\u03a8!f\x8a\xf6\xfa\u038a\x06ZM\xa2]0\x16\xc0\x00\x00\u07d4\x95`\xe8\xacg\x18\xa6\xa1\xcd\xcf\xf1\x89\xd6\x03\xc9\x06>A=\xa6\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x95g\xa0\u0781\x1d\xe6\xff\t[~\xe6N\u007f\x1b\x83\xc2a[\x80\x89\x0e~\xeb\xa3A\vt\x00\x00\u07d4\x95h\x1c\xda\xe6\x9b I\xce\x10\x1e2\\u\x98\x92\xca\xc3\xf8\x11\x89\x9a\xe9*\x9b\xc9L@\x00\x00\xe0\x94\x95h\xb7\xdeuV(\xaf5\x9a\x84T=\xe25\x04\xe1^A\xe6\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\x95i\xc6:\x92\x84\xa8\x05bm\xb3\xa3.\x9d#c\x93GaQ\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x95\x80\x9e\x8d\xa3\xfb\xe4\xb7\xf2\x81\xf0\xb8\xb1q_B\x0f}}c\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x95\x9fW\xfd\xedj\xe3y\x13\xd9\x00\xb8\x1e_H\xa7\x93\"\xc6'\x89\r\xdb&\x10GI\x11\x80\x00\u07d4\x95\x9f\xf1\u007f\x1dQ\xb4s\xb4@\x10\x05'U\xa7\xfa\x8cu\xbdT\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x95\xa5w\xdc.\xb3\xael\xb9\xdf\xc7z\xf6\x97\xd7\xef\xdf\xe8\x9a\x01\x89\a_a\x0fp\xed \x00\x00\u07d4\x95\xcbm\x8acy\xf9J\xba\x8b\x88ViV,MD\x8eV\xa7\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x95\xd5PB{ZQLu\x1ds\xa0\xf6\u049f\xb6]\"\xed\x10\x89\x10CV\x1a\x88)0\x00\x00\u07d4\x95\u064d\f\x10i\x90\x8f\x06zR\xac\xac+\x8bSM\xa3z\xfd\x89oY\xb60\xa9)p\x80\x00\xe0\x94\x95\xdfN4E\xd7f&$\u010e\xbat\u03de\nS\xe9\xf72\x8a\v\xdb\xc4\x1e\x03H\xb3\x00\x00\x00\u07d4\x95\xe6\xa5K-_g\xa2JHu\xafu\x10|\xa7\xea\x9f\xd2\xfa\x89Hz\x9a0E9D\x00\x00\xe0\x94\x95\xe6\xf9=\xac\"\x8b\xc7XZ%sZ\xc2\xd0v\xcc:@\x17\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x95\xe7ad$\xcd\ta\xa7\x17'$t7\xf0\x06\x92r(\x0e\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x95\xe8\n\x82\xc2\f\xbe= `$,\xb9-sX\x10\xd04\xa2\x89\x01\xc3.F?\u0539\x80\x00\u07d4\x95\xf6-\x02C\xed\xe6\x1d\xad\x9a1e\xf59\x05'\rT\xe2B\x89WG=\x05\u06ba\xe8\x00\x00\u07d4\x95\xfbZ\xfb\x14\xc1\uf6b7\xd1y\xc5\xc3\x00P?\xd6j^\xe2\x89\x01\xda\xf7\xa0+\r\xbe\x80\x00\u07d4\x96\x10Y\"\x02\u0082\xab\x9b\u0628\x84Q\x8b>\v\xd4u\x817\x89\x0e\x87?D\x13<\xb0\x00\x00\xe0\x94\x96\x1cY\xad\xc7E\x05\u0446M\x1e\xcf\u02ca\xfa\x04\x12Y<\x93\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\x96,\r\xec\x8a=FK\xf3\x9b\x12\x15\xea\xfd&H\n\xe4\x90\u0349l\x82\xe3\xea\xa5\x13\xe8\x00\x00\u07d4\x96,\xd2*\x8e\xdf\x1eONU\xb4\xb1]\xdb\xfb]\x9dT\x19q\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x963K\xfe\x04\xff\xfaY\x02\x13\xea\xb3e\x14\xf38\xb8d\xb76\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x967\xdc\x12r=\x9cxX\x85B\uac02fO?\x03\x8d\x9d\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x96N\xabK'kL\u0618>\x15\xcar\xb1\x06\x90\x0f\xe4\x1f\u0389\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x96b\xee\x02\x19&h+1\xc5\xf2\x00\xceEz\xbe\xa7ll\xe9\x89$Y\x0e\x85\x89\xebj\x00\x00\xe0\x94\x96l\x04x\x1c\xb5\xe6}\xde25\xd7\xf8b\x0e\x1a\xb6c\xa9\xa5\x8a\x10\r P\xdacQ`\x00\x00\u07d4\x96pv\xa8w\xb1\x8e\xc1ZA[\xb1\x16\xf0n\xf3&E\u06e3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x96{\xfa\xf7bC\u0379@\t\xae<\x8d5\x05\xe9\xc0\x80EK\xe0\xe8\x19\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x96\x92A\x91\xb7\xdfe[3\x19\xdcma7\xf4\x81\xa7:\x0f\xf3\x89\xd9\xec\xb4\xfd \x8eP\x00\x00\u07d4\x96\x96\x05!83\x8cr/\x11@\x81\\\xf7t\x9d\r;:t\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x96\xa5_\x00\xdf\xf4\x05\xdcM\xe5\xe5\x8cW\xf6\xf6\xf0\xca\xc5]/\x89jf\x167\x9c\x87\xb5\x80\x00\u07d4\x96\xaaW?\xed/#4\x10\u06eeQ\x80\x14[#\xc3\x1a\x02\xf0\x89]\u0212\xaa\x111\xc8\x00\x00\u07d4\x96\xadW\x9b\xbf\xa8\u06ce\xbe\xc9\u0486\xa7.Fa\xee\xd8\xe3V\x89:\v\xa4+\xeca\x83\x00\x00\u07d4\x96\xb44\xfe\x06W\xe4*\u0302\x12\xb6\x86Q9\xde\xde\x15\x97\x9c\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x96\xb9\x06\xear\x9fFU\xaf\xe3\xe5}5'|\x96}\xfa\x15w\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x96\xd6-\xfdF\b\u007fb@\x9d\x93\xdd`a\x88\xe7\x0e8\x12W\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x96\xd9\u0328\xf5^\xea\x00@\xecn\xb3H\xa1wK\x95\xd9>\xf4\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x96\xe7\xc0\xc9\u057f\x10\x82\x1b\xf1@\xc5X\xa1E\xb7\xca\xc2\x13\x97\x899>\xf1\xa5\x12|\x80\x00\x00\u07d4\x96\xeaj\u021a+\xac\x954{Q\u06e6=\x8b\xd5\xeb\xde\xdc\xe1\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x96\xea\xfb\xf2\xfboM\xb9\xa46\xa7LE\xb5eDR\xe28\x19\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x96\xebR>\x83/P\n\x01}\xe1>\xc2\u007f]6lV\x0e\xff\x89\x10\xac\u03baC\xee(\x00\x00\u07d4\x96\xf0F*\xe6\xf8\xb9`\x88\xf7\xe9\u018ct\xb9\u062d4\xb3G\x89a\t=|,m8\x00\x00\u07d4\x96\xf8 P\vp\xf4\xa3\xe3#\x9da\x9c\xff\x8f\" u\xb15\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x96\xfeY\xc3\u06f3\xaa|\xc8\xcbbH\fe\xe5nb\x04\xa7\xe2\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x96\xffoP\x99h\xf3l\xb4,\xbaH\xdb2\xf2\x1fVv\xab\xf8\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x97\t8R*\xfb^\x8f\x99Hs\xc9\xfb\xdc&\xe3\xb3~1L\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x97\n\xbdS\xa5O\xcaJd) |\x18-MW\xbb9\u0520\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x97\r\x8b\x8a\x00\x16\xd1C\x05O\x14\x9f\xb3\xb8\xe5P\xdc\a\x97\u01c965\u026d\xc5\u07a0\x00\x00\u07d4\x97,/\x96\xaa\x00\u03ca/ Z\xbc\xf8\x93|\fu\xf5\xd8\u0649\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x97?N6\x1f\xe5\xde\u0358\x9dL\x8f}|\xc9y\x908]\xaf\x89\x15\x0f\x85C\xa3\x87B\x00\x00\u07d4\x97M\x05A\xabJG\xec\u007fu6\x9c\x00i\xb6J\x1b\x81w\x10\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u0794\x97M/\x17\x89_)\x02\x04\x9d\xea\xae\xcf\t\xc3\x04e\a@-\x88\xcc\x19\u00947\xab\x80\x00\u07d4\x97R\xd1O^\x10\x93\xf0qq\x1c\x1a\xdb\xc4\xe3\xeb\x1e\\W\xf3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x97V\xe1v\xc9\xefi>\xe1\xee\u01b9\xf8\xb1Q\xd3\x13\xbe\xb0\x99\x89A\rXj \xa4\xc0\x00\x00\u07d4\x97_7d\xe9{\xbc\xcfv|\xbd;y[\xa8m\x8b\xa9\x84\x0e\x89\x12\xc1\xb6\xee\xd0=(\x00\x00\xe0\x94\x97j\x18Sj\xf4\x18tBc\b\x87\x1b\xcd\x15\x12\xa7u\xc9\xf8\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x97n<\xea\xf3\xf1\xafQ\xf8\u009a\xff]\u007f\xa2\x1f\x03\x86\xd8\xee\x89\r\x02\xabHl\xed\xc0\x00\x00\xe0\x94\x97w\xcca\xcfuk\xe3\xb3\xc2\f\xd4I\x1ci\xd2u\xe7\xa1 \x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x97\x81\v\xaf\xc3~\x840c2\xaa\xcb5\xe9*\xd9\x11\xd2=$\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x97\x8cC\f\xe45\x9b\x06\xbc,\xdf\\)\x85\xfc\x95\x0eP\xd5\u0209\x1a\x05V\x90\xd9\u06c0\x00\x00\u07d4\x97\x95\xf6C\x19\xfc\x17\xdd\x0f\x82a\xf9\xd2\x06\xfbf\xb6L\xd0\u0249\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x97\x99\xca!\xdb\xcfi\xbf\xa1\xb3\xf7+\xacQ\xb9\xe3\xcaX|\xf9\x89\\(=A\x03\x94\x10\x00\x00\u07d4\x97\x9c\xbf!\xdf\xec\x8a\xce?\x1c\x19m\x82\u07d6%4\xdf9O\x89\x99\x91\xd4x\xddM\x16\x00\x00\u07d4\x97\x9dh\x1ca}\xa1o!\xbc\xac\xa1\x01\xed\x16\xed\x01Z\xb6\x96\x89e\xea=\xb7UF`\x00\x00\u07d4\x97\x9f0\x15\x8bWK\x99\x9a\xab4\x81\a\xb9\xee\xd8[\x1f\xf8\xc1\x894\x95tD\xb8@\xe8\x00\x00\u07d4\x97\xa8o\x01\xce?|\xfdDA3\x0e\x1c\x9b\x19\xe1\xb1\x06\x06\xef\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x97\xb9\x1e\xfesP\xc2\xd5~~@k\xab\x18\xf3a{\xcd\xe1J\x8a\x02\x1e\x19\x99\xbb\xd5\u04be\x00\x00\u07d4\x97\xd0\xd9r^;p\xe6u\x841s\x93\x8e\xd3q\xb6,\u007f\xac\x89\t79SM(h\x00\x00\u07d4\x97\xd9\xe4jv\x04\u05f5\xa4\xeaN\xe6\x1aB\xb3\xd25\x0f\xc3\xed\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x97\xdc&\xecg\n1\xe0\"\x1d*u\xbc]\xc9\xf9\f\x1fo\u0509\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\xe0\x94\x97\xde!\xe4!\xc3\u007f\xe4\xb8\x02_\x9aQ\xb7\xb3\x90\xb5\xdfx\x04\x8a\x10\xf0\xcf\x06M\u0552\x00\x00\x00\u07d4\x97\xe2\x89s\xb8`\xc5g@(\x00\xfb\xb6<\xe3\x9a\x04\x8a=y\x89\x05B%:\x12l\xe4\x00\x00\u07d4\x97\xe5\xcca'\xc4\xf8\x85\xbe\x02\xf4KB\xd1\u0230\xac\x91\u44c9\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x97\xf1\xfeL\x80\x83\xe5\x96!*\x18w(\xdd\\\xf8\n1\xbe\u0149\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\x97\xf7v\x06W\xc1\xe2\x02u\x90\x86\x96>\xb4!\x1c_\x819\xb9\x8a\n\x8a\t\u007f\xcb=\x17h\x00\x00\xe0\x94\x97\xf9\x9bk\xa3\x13F\u0358\xa9\xfeL0\x8f\x87\u0165\x8cQQ\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x98\n\x84\xb6\x86\xfc1\xbd\xc8<\"\x10XTjq\xb1\x1f\x83\x8a\x89*AUH\xaf\x86\x81\x80\x00\u07d4\x98\x10\xe3J\x94\xdbn\xd1V\xd08\x9a\x0e+\x80\xf4\xfdk\n\x8a\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x98\x1d\xdf\x04\x04\xe4\xd2-\xdaUj\a&\xf0\v-\x98\xab\x95i\x8965f3\xeb\xd8\xea\x00\x00\xe0\x94\x98\x1fq'u\xc0\xda\xd9u\x18\xff\xed\xcbG\xb9\xad\x1dl'b\x8a\x01je\x02\xf1Z\x1eT\x00\x00\u07d4\x984h!\x80\xb9\x82\xd1f\xba\u06dd\x9d\x1d\x9b\xbf\x01m\x87\xee\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x986\xb4\xd3\x04sd\x1a\xb5j\xee\xe1\x92Bv\x1drrQx\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x989sB\xec_=L\xb8w\xe5N\xf5\xd6\xf1\xd3fs\x1b\u050a\x01@a\xb9\xd7z^\x98\x00\x00\xe0\x94\x98Fd\x886\xa3\a\xa0W\x18O\xd5\x1fb\x8a_\x8c\x12B|\x8a\x04\vi\xbfC\xdc\xe8\xf0\x00\x00\xe0\x94\x98Jy\x85\xe3\xcc~\xb5\xc96\x91\xf6\xf8\xcc{\x8f$]\x01\xb2\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\x98]p\xd2\a\x89+\xed9\x85\x90\x02N$!\xb1\xcc\x11\x93Y\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\x98m\xf4~v\xe4\u05e7\x89\xcd\xee\x91<\u0243\x16P\x93l\x9d\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\x98t\x80?\xe1\xf3\xa06^y\"\xb1Bp\xea\xeb\x03,\xc1\xb5\x89<\xf5\x92\x88$\xc6\xc2\x00\x00\u07d4\x98ub4\x95\xa4l\xdb\xf2YS\x0f\xf88\xa1y\x9e\u00c9\x91\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x98v\x18\xc8VV |{\xac\x15\a\xc0\xff\xef\xa2\xfbd\xb0\x92\x89\x03}\xfeC1\x89\xe3\x80\x00\u07d4\x98|\x9b\xcdn?9\x90\xa5+\xe3\xed\xa4q\f'Q\x8fOr\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x98\x82\x96|\xeeh\u04a89\xfa\u062bJ|=\xdd\xf6\xc0\xad\u0209Hx\xbe\x1f\xfa\xf9]\x00\x00\u07d4\x98\x85\\}\xfb\xee3SD\x90J\x12\xc4\fs\x17\x95\xb1:T\x899\xfb\xae\x8d\x04-\xd0\x00\x00\u07d4\x98\x9c\f\xcf\xf6T\xda\x03\xae\xb1\x1a\xf7\x01\x05Ea\xd6)~\x1d\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x98\xa0\xe5Lm\x9d\u023e\x96'l\xeb\xf4\xfe\xc4`\xf6#]\x85\x89j\u0202\x10\tR\u01c0\x00\u07d4\x98\xb7i\xcc0\\\xec\xfbb\x9a\x00\xc9\a\x06\x9d~\xf9\xbc:\x12\x89\x01h\u048e?\x00(\x00\x00\xe0\x94\x98\xbaN\x9c\xa7/\xdd\xc2\fi\xb49ov\xf8\x18?z*N\x8a\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\x00\u07d4\x98\xbeimQ\xe3\x90\xff\x1cP\x1b\x8a\x0fc1\xb6(\xdd\u016d\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x98\xbe\u04e7.\xcc\xfb\xaf\xb9#H\x92\x93\xe4)\xe7\x03\xc7\xe2[\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x98\xbfJ\xf3\x81\v\x84#\x87\xdbp\xc1MF\t\x96&\x00=\x10\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x98\xc1\x0e\xbf,O\x97\u02e5\xa1\xab?*\xaf\xe1\xca\xc4#\xf8\u02c9\x10CV\x1a\x88)0\x00\x00\u07d4\x98\xc1\x9d\xba\x81\v\xa6\x11\xe6\x8f/\x83\xee\x16\xf6\xe7tO\f\x1f\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x98\xc5IJ\x03\xac\x91\xa7h\xdf\xfc\x0e\xa1\xdd\u0b3f\x88\x90\x19\x8a*Z\x05\x8f\u0095\xed\x00\x00\x00\u07d4\x98\xd2\x04\xf9\b_\x8c\x8e}\xe2>X\x9bd\xc6\xef\xf6\x92\xccc\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x98\xd3s\x19\x92\xd1\xd4\x0e\x12\x11\xc7\xf75\xf2\x18\x9a\xfa\a\x02\xe0\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\x98\xe2\xb6\xd6\x06\xfd-i\x91\xc9\xd6\xd4\a\u007f\xdf?\xddE\x85\u06890\xdf\x1ao\x8a\xd6(\x00\x00\u07d4\x98\xe3\xe9\v(\xfc\xca\ue087y\xb8\xd4\nUh\xc4\x11n!\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\x98\xe6\xf5G\u06c8\xe7_\x1f\x9c\x8a\xc2\xc5\xcf\x16'\xbaX\v>\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\x98\xf4\xaf:\xf0\xae\xde_\xaf\xdcB\xa0\x81\xec\xc1\xf8\x9e<\xcf \x8a\x01\xfd\x934\x94\xaa_\xe0\x00\x00\u07d4\x98\xf6\xb8\xe6!=\xbc\x9aU\x81\xf4\xcc\xe6e_\x95%+\xdb\a\x89\x11Xr\xb0\xbc\xa40\x00\x00\u07d4\x99\te\r\u05719{\x8b\x8b\x0e\xb6\x94\x99\xb2\x91\xb0\xad\x12\x13\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x99\x11s`\x19G\xc2\bJb\xd69R~\x96\x15\x12W\x9a\xf9\x89 \x86\xac5\x10R`\x00\x00\u07d4\x99\x12\x9d[<\f\xdeG\xea\r\xefM\xfc\a\r\x1fJY\x95'\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x99\x17\u058dJ\xf3A\xd6Q\xe7\xf0\a\\m\xe6\xd7\x14Nt\t\x8a\x012\xd4Gl\b\xe6\xf0\x00\x00\u07d4\x99\x1a\xc7\xcap\x97\x11_& ^\xee\x0e\xf7\xd4\x1e\xb4\xe3\x11\xae\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u0794\x99#e\xd7d\xc5\xce5@9\xdd\xfc\x91.\x02:u\xb8\xe1h\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\x99&F\xac\x1a\u02ab\xf5\u076b\xa8\xf9B\x9a\xa6\xa9Nt\x96\xa7\x8967Pz0\xab\xeb\x00\x00\u07d4\x99&\x83'\xc3s3.\x06\xc3\xf6\x16B\x87\xd4U\xb9\xd5\xfaK\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x99(\xffqZ\xfc:+`\xf8\xebL\u013aN\xe8\u06b6\u5749\x17\xda:\x04\u01f3\xe0\x00\x00\u07d4\x992\xef\x1c\x85\xb7Z\x9b*\x80\x05}P\x874\xc5\x10\x85\xbe\u0309\x02\xb8?\xa50\x1dY\x00\x00\xe0\x94\x99?\x14ax`^f\xd5\x17\xbex.\xf0\xb3\xc6\x1aN\x19%\x8a\x01|\x1f\x055\u05e5\x83\x00\x00\xe0\x94\x99A7\x04\xb1\xa3.p\xf3\xbc\ri\u0748\x1c8VkT\u02ca\x05\xcckiF1\xf7\x12\x00\x00\u07d4\x99AR\xfc\x95\xd5\xc1\u028b\x88\x11:\xbb\xadMq\x0e@\xde\xf6\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x99D\xfe\xe9\xd3JJ\x88\x00#\u01c92\xc0\vY\xd5\xc8*\x82\x89(\xa8\xa5k6\x90\a\x00\x00\u07d4\x99L\u00b5\"~\xc3\xcf\x04\x85\x12F|A\xb7\xb7\xb7H\x90\x9f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x99q\xdf`\xf0\xaef\xdc\xe9\xe8\xc8N\x17\x14\x9f\t\xf9\xc5/d\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x99v\x94~\xff_j\xe5\xda\b\xddT\x11\x92\xf3x\xb4(\xff\x94\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\x99}e\x92\xa3\x15\x89\xac\xc3\x1b\x99\x01\xfb\xeb<\xc3\xd6[2\x15\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x99\x82\xa5\x89\x0f\xfbT\x06\u04ec\xa8\u04bf\xc1\xddp\xaa\xa8\n\xe0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x99\x87\x8f\x9dn\n~\u066e\u01c2\x97\xb78y\xa8\x01\x95\xaf\xe0\x89\xd7\xc1\x98q\x0ef\xb0\x00\x00\u07d4\x99\x8c\x1f\x93\xbc\xdbo\xf2<\x10\xd0\u0712G(\xb7;\xe2\xff\x9f\x896[\xf3\xa43\xea\xf3\x00\x00\u07d4\x99\x91aL[\xaaG\xddl\x96\x87FE\xf9z\xdd,=\x83\x80\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x99\x92J\x98\x16\xbb}\xdf?\xec\x18D\x82\x8e\x9a\xd7\xd0k\xf4\xe6\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4\x99\x99vh\xf7\xc1\xa4\xff\x9e1\xf9\x97z\xe3\"K\u02c8z\x85\x89\x0f\xc969(\x01\xc0\x00\x00\u07d4\x99\x9cI\xc1t\xca\x13\xbc\x83l\x1e\n\x92\xbf\xf4\x8b'\x15C\u0289\xb1\xcf$\xdd\u0431@\x00\x00\u07d4\x99\xa4\xde\x19\xde\u05d0\b\xcf\xdc\xd4]\x01M.XK\x89\x14\xa8\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4\x99\xa9k\xf2$.\xa1\xb3\x9e\xceo\xcc\r\x18\xae\xd0\f\x01y\xf3\x89\x10CV\x1a\x88)0\x00\x00\u07d4\x99\xb0\x18\x93+\xca\xd3U\xb6y+%]\xb6p-\xec\x8c\xe5\u0749\xd8\xd8X?\xa2\xd5/\x00\x00\u07d4\x99\xb7C\xd1\xd9\xef\xf9\r\x9a\x194\xb4\xdb!\xd5\x19\u061bJ8\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x99\xb8\xc8$\x86\x9d\xe9\xed$\xf3\xbf\xf6\x85L\xb6\xddE\xcc?\x9f\x89e\xea=\xb7UF`\x00\x00\u07d4\x99\xc0\x17L\xf8N\a\x83\xc2 \xb4\xebj\xe1\x8f\xe7\x03\x85J\u04c9py\xa2W=\fx\x00\x00\u07d4\x99\xc1\xd9\xf4\fj\xb7\xf8\xa9/\xce/\xdc\xe4zT\xa5\x86\xc5?\x895e\x9e\xf9?\x0f\xc4\x00\x00\u07d4\x99\xc26\x14\x1d\xae\xc87\xec\xe0O\xda\xee\x1d\x90\u03cb\xbd\xc1\x04\x89ve\x16\xac\xac\r \x00\x00\u07d4\x99\xc3\x1f\xe7HX7\x87\xcd\xd3\xe5%\xb2\x81\xb2\x18\x96\x179\xe3\x897\b\xba\xed=h\x90\x00\x00\u07d4\x99\xc4u\xbf\x02\xe8\xb9!J\xda_\xad\x02\xfd\xfd\x15\xba6\\\f\x89 \t\xc5\u023fo\xdc\x00\x00\u07d4\x99\u0203%\x85F\xcc~N\x97\x1fR.8\x99\x18\xda^\xa6:\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x99\xc9\xf9>E\xfe<\x14\x18\xc3S\xe4\u016c8\x94\xee\xf8\x12\x1e\x89\x05\x85\xba\xf1E\x05\v\x00\x00\xe0\x94\x99\xd1W\x9c\xd4&\x82\xb7dN\x1dOq(D\x1e\xef\xfe3\x9d\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\x99\u0475\x85\x96_@jB\xa4\x9a\x1c\xa7\x0fv\x9evZ?\x98\x8a\x03\x89O\x0eo\x9b\x9fp\x00\x00\u07d4\x99\xdf\xd0PL\x06\xc7C\xe4e4\xfd{U\xf1\xf9\xc7\xec3)\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x99\xf4\x14|\xcck\u02c0\u0304.i\xf6\xd0\x0e0\xfaA3\u0649\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\x99\xf7\u007f\x99\x8b \xe0\xbc\xdc\xd9\xfc\x83\x86ARl\xf2Y\x18\xef\x89a\t=|,m8\x00\x00\u07d4\x99\xfa\xd5\x008\xd0\xd9\xd4\xc3\xfb\xb4\xbc\xe0V\x06\xec\xad\xcdQ!\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x99\xfe\r \x12(\xa7S\x14VU\xd4(\xeb\x9f\xd9I\x85\xd3m\x89i \xbf\xf3QZ:\x00\x00\u07d4\x9a\a\x9c\x92\xa6)\xca\x15\xc8\xca\xfa.\xb2\x8d[\xc1z\xf8(\x11\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x9a\r<\xee=\x98\x92\xea;7\x00\xa2\u007f\xf8A@\xd9\x02T\x93\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4\x9a$\u038dH\\\xc4\xc8nI\u07b3\x90\"\xf9,t0\xe6~\x89Fy\x1f\xc8N\a\xd0\x00\x00\u07d4\x9a,\xe4;]\x89\u0593k\x8e\x8c5G\x91\xb8\xaf\xff\x96$%\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9a9\x01bS^9\x88w\xe4\x16x}b9\xe0uN\x93|\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x9a=\xa6P#\xa10 \xd2!E\xcf\xc1\x8b\xab\x10\xbd\x19\xceN\x89\x18\xbfn\xa3FJ:\x00\x00\xe0\x94\x9a>+\x1b\xf3F\xdd\a\v\x02sW\xfe\xacD\xa4\xb2\xc9}\xb8\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x9aL\xa8\xb8!\x17\x89NC\xdbr\xb9\xfax\xf0\xb9\xb9:\xce\t\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4\x9aR.R\xc1\x95\xbf\xb7\xcf_\xfa\xae\u06d1\xa3\xbath\x16\x1d\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x9aZ\xf3\x1c~\x063\x9a\u0234b\x8d|M\xb0\xce\x0fE\u0224\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u0794\x9ac?\xcd\x11,\xce\xebv_\xe0A\x81ps*\x97\x05\u7708\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\x9ac\u0445\xa7\x91)\xfd\xab\x19\xb5\x8b\xb61\xea6\xa4 TN\x89\x02F\xdd\xf9yvh\x00\x00\u07d4\x9ag\b\u0778\x90<(\x9f\x83\xfe\x88\x9c\x1e\xdc\xd6\x1f\x85D#\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x9ao\xf5\xf6\xa7\xaf{z\xe0\xed\x9c \xec\xecP#\u0481\xb7\x86\x89\x8a\x12\xb9\xbdjg\xec\x00\x00\xe0\x94\x9a\x82\x82m<)H\x1d\xcc+\u0495\x00G\xe8\xb6\x04\x86\xc38\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x9a\x8e\xcaA\x89\xffJ\xa8\xff~\u0536\xb7\x03\x9f\t\x02!\x9b\x15\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x9a\x95;[\xccp\x93y\xfc\xb5Y\u05f9\x16\xaf\u06a5\f\xad\u0309\x05k\xc7^-c\x10\x00\x00\u07d4\x9a\x99\v\x8a\xebX\x8d~\xe7\xec.\xd8\xc2\xe6Os\x82\xa9\xfe\xe2\x89\x01\xd1'\xdbi\xfd\x8b\x00\x00\u07d4\x9a\x9d\x1d\xc0\xba\xa7}n \xc3\xd8I\u01c8b\xdd\x1c\x05L\x87\x89/\xb4t\t\x8fg\xc0\x00\x00\xe0\x94\x9a\xa4\x8cf\xe4\xfbJ\u0419\x93N2\x02.\x82t'\xf2w\xba\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x9a\xa80\x8fB\x91\x0eZ\xde\t\xc1\xa5\xe2\x82\xd6\xd9\x17\x10\xbd\xbf\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x9a\xaa\xfa\x00gd~\u0659\x06kzL\xa5\xb4\xb3\xf3\xfe\xaao\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x9a\xb9\x88\xb5\x05\xcf\xee\x1d\xbe\x9c\u044e\x9bTs\xb9\xa2\xd4\xf56\x89\x11X\xe4`\x91=\x00\x00\x00\u07d4\x9a\xb9\x8dm\xbb\x1e\xaa\xe1mE\xa0EhT\x1a\xd3\xd8\xfe\x06\u0309\x0e\xc5\x04d\xfe#\xf3\x80\x00\xe0\x94\x9a\xba+^'\xffx\xba\xaa\xb5\xcd\u0248\xb7\xbe\x85\\\xeb\xbd\u038a\x02\x1e\f\x00\x13\a\n\xdc\x00\x00\u07d4\x9a\xc4\xdaQ\xd2x\"\xd1\xe2\b\xc9n\xa6J\x1e[U)\x97#\x89\x05lUy\xf7\"\x14\x00\x00\u0794\x9a\xc8S\x97y*i\u05cf(k\x86C*\a\xae\u03b6\x0ed\x88\xc6s\xce<@\x16\x00\x00\xe0\x94\x9a\xc9\a\xee\x85\xe6\xf3\xe2#E\x99\x92\xe2V\xa4?\xa0\x8f\xa8\xb2\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x9a\xd4\u007f\xdc\xf9\u0354-(\xef\xfd[\x84\x11[1\xa6X\xa1>\x89\xb2Y\xec\x00\xd5;(\x00\x00\u07d4\x9a\xdb\u04fc{\n\xfc\x05\xd1\xd2\xed\xa4\x9f\xf8c\x93\x9cH\xdbF\x89\n\xd6\xee\xdd\x17\xcf;\x80\x00\u07d4\x9a\xdfE\x8b\xff5\x99\xee\xe1\xa2c\x98\x85\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\x9a\xf9\xdb\xe4t\"\xd1w\xf9E\xbd\xea\xd7\xe6\xd8)05b0\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4\x9a\xfaSkLf\xbc8\xd8u\u0133\x00\x99\xd9&\x1f\xdb8\xeb\x89\v*\x8f\x84*w\xbc\x80\x00\u07d4\x9b\x06\xad\x84\x1d\xff\xbeL\xcfF\xf1\x03\x9f\u00c6\xf3\xc3!Dn\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9b\x11h\u078a\xb6KGU/3\x89\x80\n\x9c\xc0\x8bFf\u03c9]\u0212\xaa\x111\xc8\x00\x00\u07d4\x9b\x18\x11\xc3\x05\x1fF\xe6d\xaeK\xc9\xc8$\u0445\x92\xc4WJ\x89\n\xd6\xee\xdd\x17\xcf;\x80\x00\u07d4\x9b\x18G\x86U\xa4\x85\x1c\xc9\x06\xe6`\xfe\xaca\xf7\xf4\u023f\xfc\x89\xe2G\x8d8\x90}\x84\x00\x00\u07d4\x9b\"\xa8\r\\{3t\xa0[D`\x81\xf9}\n4\a\x9e\u007f\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\x9b+\xe7\xf5gT\xf5\x05\xe3D\x1a\x10\xf7\xf0\xe2\x0f\xd3\xdd\xf8I\x89\x12nr\xa6\x9aP\xd0\x00\x00\u07d4\x9b2\xcfOQ\x15\xf4\xb3J\x00\xa6La}\xe0c\x875C#\x89\x05\xb8\x1e\u0608 |\x80\x00\u07d4\x9bC\u0739_\xde1\x80u\xa5g\xf1\xe6\xb5v\x17\x05^\xf9\xe8\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4\x9bDO\xd37\xe5\xd7R\x93\xad\xcf\xffp\xe1\xea\x01\xdb\x022\"\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x9bH$\xff\x9f\xb2\xab\xdaUM\xeeO\xb8\xcfT\x91eW\x061\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x9bL'\x15x\f\xa4\xe9\x9e`\xeb\xf2\x19\xf1Y\f\x8c\xadP\n\x89V\xbcu\xe2\xd61\x00\x00\x00\u07d4\x9bY\xeb!;\x1eue\xe4PG\xe0N\xa07O\x10v-\x16\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9b\\9\xf7\xe0\xac\x16\x8c\x8e\xd0\xed4\x04w\x11}\x1bh.\xe9\x89\x05P\x05\xf0\xc6\x14H\x00\x00\u07d4\x9b^\xc1\x8e\x83\x13\x88}\xf4a\u0490.\x81\xe6z\x8f\x11;\xb1\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x9bd\xd3\u034d+s\xf6hA\xb5\xc4k\xb6\x95\xb8\x8a\x9a\xb7]\x89\x01 :Ov\f\x16\x80\x00\u07d4\x9be\x8f\xb3a\xe0F\xd4\xfc\xaa\x8a\xefm\x02\xa9\x91\x11\"6%\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9bfA\xb1>\x17/\xc0r\xcaK\x83'\xa3\xbc(\xa1[f\xa9\x89\x06\x81U\xa46v\xe0\x00\x00\xe0\x94\x9bh\xf6t\x16\xa6;\xf4E\x1a1\x16L\x92\xf6r\xa6\x87Y\xe9\x8a\f\xb4\x9bD\xba`-\x80\x00\x00\u07d4\x9bw6i\xe8}v\x01\x8c\t\x0f\x82U\xe5D\t\xb9\u0728\xb2\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x9bw\xeb\xce\xd7\xe2\x15\xf0\x92\x0e\x8c+\x87\x00$\xf6\xec\xb2\xff1\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x9b|\x88\x10\xcc|\u021e\x80Nm>8\x12\x18PG(w\xfe\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9b\xa5=\xc8\xc9^\x9aG/\xeb\xa2\xc4\xe3,\x1d\xc4\xdd{\xabF\x89Hz\x9a0E9D\x00\x00\xe0\x94\x9b\xac\xd3\xd4\x0f;\x82\xac\x91\xa2d\xd9\u060d\x90\x8e\xac\x86d\xb9\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x9b\xb7`\xd5\u0089\xa3\xe1\xdb\x18\xdb\tSE\xcaA;\x9aC\u0089\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\x9b\xb7b\x04\x18j\xf2\xf6;\xe7\x91h`\x16\x87\xfc\x9b\xadf\x1f\x89\x10CV\x1a\x88)0\x00\x00\u07d4\x9b\xb9\xb0*&\xbf\xe1\xcc\xc3\xf0\xc6!\x9e&\x1c9\u007f\xc5\xcax\x89Hz\x9a0E9D\x00\x00\u07d4\x9b\xc5s\xbc\xda#\xb8\xb2o\x90s\xd9\f#\x0e\x8eq\xe0'\v\x896/u\xa40]\f\x00\x00\u07d4\x9b\xd7\u00caB\x100JMe>\xde\xff\x1b<\xe4_\xcexC\x89\x0fI\x89A\xe6d(\x00\x00\xe0\x94\x9b\u0600h\xe10u\xf3\xa8\xca\xc4d\xa5\xf9I\xd6\xd8\x18\xc0\xf6\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x9b\xd9\x05\xf1q\x9f\u01ec\xd0\x15\x9dM\xc1\xf8\xdb/!G#8\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x9b\xdb\u071b\x9741\xd1<\x89\xa3\xf9u~\x9b;bu\xbf\u01c9\x1b\x1a}\u03caD\u04c0\x00\u07d4\x9b\xe3\xc3)\xb6*(\xb8\xb0\x88l\xbd\x8b\x99\xf8\xbc\x93\f\xe3\xe6\x89\x04\t\xe5+H6\x9a\x00\x00\xe0\x94\x9b\xf5\x8e\xfb\xea\a\x84\xeb\x06\x8a\xde\u03e0\xbb!P\x84\xc7:5\x8a\x01:k+VHq\xa0\x00\x00\u07d4\x9b\xf6r\xd9y\xb3fR\xfcR\x82Tzjk\xc2\x12\xaeCh\x89#\x8f\xd4,\\\xf0@\x00\x00\xe0\x94\x9b\xf7\x03\xb4\x1c6$\xe1_@T\x96#\x90\xbc\xba0R\xf0\xfd\x8a\x01H>\x01S<.<\x00\x00\u07d4\x9b\xf7\x1f\u007f\xb57\xacT\xf4\xe5\x14\x94\u007f\xa7\xffg(\xf1m/\x89\x01\u03c4\xa3\n\n\f\x00\x00\u07d4\x9b\xf9\xb3\xb2\xf2<\xf4a\xebY\x1f(4\v\xc7\x19\x93\x1c\x83d\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\x9b\xfce\x9c\x9c`\x1e\xa4*k!\xb8\xf1p\x84\xec\x87\xd7\x02\x12\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x9b\xff\xf5\r\xb3jxUU\xf0vR\xa1S\xb0\xc4+\x1b\x8bv\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9c\x05\xe9\xd0\xf0u\x8eyS\x03q~1\xda!<\xa1W\u618965\u026d\xc5\u07a0\x00\x00\u07d4\x9c\x1bw\x1f\t\xaf\x88*\xf0d0\x83\xde*\xa7\x9d\xc0\x97\xc4\x0e\x89\x86p\xe9\xece\x98\xc0\x00\x00\u07d4\x9c(\xa2\xc4\b`\x91\xcb]\xa2&\xa6W\xce2H\xe8\xea{o\x89\x0f-\xc7\xd4\u007f\x15`\x00\x00\u07d4\x9c/\xd5@\x89\xaff]\xf5\x97\x1ds\xb8\x04a`9dsu\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x9c4@\x98\xbaaZ9\x8f\x11\xd0\t\x90[\x17|D\xa7\xb6\x02\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x9c=\x06\x92\xce\xee\xf8\n\xa4\x96\\\xee\xd2b\xff\xc7\xf0i\xf2\u0709\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x9c@\\\xf6\x97\x95a8\x06^\x11\xc5\xf7U\x9eg$[\u0465\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x9cE *%\xf6\xad\x00\x11\xf1\x15\xa5\xa7\"\x04\xf2\xf2\x19\x88f\x8a\x01\x0f\xcf:b\xb0\x80\x98\x00\x00\xe0\x94\x9cI\xde\xffG\b_\xc0\x97\x04\u02a2\u0728\u0087\xa9\xa17\u068a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\x9cK\xbc\xd5\xf1dJo\aX$\xdd\xfe\x85\xc5q\u05ab\xf6\x9c\x89a\x94\x04\x9f0\xf7 \x00\x00\u07d4\x9cRj\x14\x06\x83\xed\xf1C\x1c\xfa\xa1(\xa95\xe2\xb6\x14\u060b\x89\x06\x04o7\xe5\x94\\\x00\x00\xe0\x94\x9cT\xe4\xedG\x9a\x85h)\u01bbB\u069f\vi*u\xf7(\x8a\x01\x97\xa8\xf6\xddU\x19\x80\x00\x00\xe0\x94\x9cX\x1a`\xb6\x10(\xd94\x16y)\xb2-p\xb3\x13\xc3O\u040a\n\x96\x81c\xf0\xa5{@\x00\x00\u07d4\x9c\\\xc1\x11\t,\x12!\x16\xf1\xa8_N\xe3\x14\bt\x1a}/\x89\x1a\xb2\xcf|\x9f\x87\xe2\x00\x00\u07d4\x9ck\u0264k\x03\xaeT\x04\xf0C\xdf\xcf!\x88>A\x10\xcc3\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x9cx\x96?\xbc&<\t\xbdr\xe4\xf8\xde\xf7J\x94u\xf7\x05\\\x8a\x02\ub3b1\xa1r\u0738\x00\x00\u07d4\x9cx\xfb\xb4\xdfv\x9c\xe2\xc1V\x92\f\xfe\xdf\xda\x03:\x0e%J\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x9c{m\xc5\x19\x0f\xe2\x91)c\xfc\xd5yh>\xc79Q\x16\xb0\x89*\x11)\u0413g \x00\x00\u07d4\x9c\x80\xbc\x18\xe9\xf8\u0516\x8b\x18]\xa8\u01df\xa6\xe1\x1f\xfc>#\x89\r\x02\xabHl\xed\xc0\x00\x00\xe0\x94\x9c\x98\xfd\xf1\xfd\u034b\xa8\xf4\u0170L:\xe8X~\xfd\xf0\xf6\xe6\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\x9c\x99\xa1\u0691\u0552\v\xc1N\f\xb9\x14\xfd\xf6+\x94\u02c3X\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\x9c\x99\xb6&\x06(\x1b\\\xef\xab\xf3aV\xc8\xfeb\x83\x9e\xf5\xf3\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\x9c\x9a\a\xa8\xe5|1r\xa9\x19\xefdx\x94tI\x0f\r\x9fQ\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x9c\x9d\xe4G$\xa4\x05M\xa0\xea\xa6\x05\xab\u0300&hw\x8b\xea\x89\n\xd7\xd5\xca?\xa5\xa2\x00\x00\u07d4\x9c\x9f;\x8a\x81\x1b!\xf3\xff?\xe2\x0f\xe9p\x05\x1c\xe6j\x82O\x89>\xc2\u07bc\a\u053e\x00\x00\xe0\x94\x9c\x9f\x89\xa3\x91\x0fj*\xe8\xa9\x10G\xa1z\xb7\x88\xbd\xde\xc1p\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x9c\xa0B\x9f\x87O\x8d\xce\xe2\xe9\xc0b\xa9\x02\n\x84*Xz\xb9\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9c\xa4.\u7838\x98\xf6\xa5\xcc`\xb5\xa5\u05f1\xbf\xa3\xc321\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9c\xb2\x8a\xc1\xa2\n\x10o\u007f76\x92\xc5\xceLs\xf172\xa1\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x9c\xcd\u0732\xcf\u00b2[\br\x9a\n\x98\xd9\xe6\xf0 .\xa2\xc1\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x9c\xe2\u007f$^\x02\xd1\xc3\x12\xc1\xd5\x00x\x8c\x9d\xefv\x90E;\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x9c\xe56;\x13\xe8#\x8a\xa4\xdd\x15\xac\u0432\xe8\xaf\xe0\x872G\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\x9c\xf2\x92\x8b\xee\xf0\x9a@\xf9\xbf\xc9S\xbe\x06\xa2Q\x11a\x82\xfb\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x9d\x06\x91\x97\xd1\xdeP\x04Z\x18o^\xc7D\xac@\u8bd1\u0189lk\x93[\x8b\xbd@\x00\x00\u07d4\x9d\x0e}\x92\xfb0XS\u05d8&;\xf1^\x97\xc7+\xf9\xd7\xe0\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x9d\x0f4~\x82k}\u03aa\xd2y\x06\n5\xc0\x06\x1e\xcf3K\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x9d u\x17B,\xc0\xd6\r\xe7\xc27\tzMO\xce \x94\f\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\x9d%\n\xe4\xf1\x10\xd7\x1c\xaf\u01f0\xad\xb5.\x8d\x9a\xcbfy\xb8\x8a\x02\x15mn\x99r\x13\xc0\x00\x00\xe0\x94\x9d+\xfc6\x10o\x03\x82P\xc0\x18\x01hW\x85\xb1l\x86\xc6\r\x8aPw\xd7]\xf1\xb6u\x80\x00\x00\xe0\x94\x9d0\xcb#{\xc0\x96\xf1p6\xfc\x80\xdd!\xcah\x99,\xa2\u064a\x06n\xe71\x8f\u070f0\x00\x00\u07d4\x9d2\x96.\xa9\x97\x00\xd92(\xe9\xdb\xda\xd2\xcc7\xbb\x99\xf0~\x89\xb4c+\xed\xd4\xde\xd4\x00\x00\u07d4\x9d4\xda\xc2[\xd1X(\xfa\xef\xaa\xf2\x8fq\aS\xb3\x9e\x89\u0709;\x1cV\xfe\xd0-\xf0\x00\x00\u07d4\x9d6\x91e\xfbp\xb8\x1a:v_\x18\x8f\xd6\f\xbe^{\th\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9d@\xe0\x12\xf6\x04%\xa3@\xd8-\x03\xa1\xc7W\xbf\xab\xc7\x06\xfb\x89\t4o:\xdd\u020d\x80\x00\u07d4\x9dAt\xaaj\xf2\x84v\xe2)\xda\xdbF\x18\b\b\xc6u\x05\xc1\x89B\x1a\xfd\xa4.\u0597\x00\x00\u07d4\x9dB\x133\x9a\x01U\x18avL\x87\xa9<\xe8\xf8_\x87\x95\x9a\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x9dF\f\x1b7\x9d\xdb\x19\xa8\xc8[LgG\x05\r\xdf\x17\xa8u\x89\xb5\x0f\u03ef\xeb\xec\xb0\x00\x00\u07d4\x9dG\xba[L\x85\x05\xad\x8d\xa4)4(\va\xa0\xe1\xe8\xb9q\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x9dM2\x11w%n\xbd\x9a\xfb\xda0A5\xd5\x17\xc3\xdcV\x93\x89!d\xb7\xa0J\u0220\x00\x00\u07d4\x9dO\xf9\x89\xb7\xbe\u066b\x10\x9d\x10\xc8\xc7\xe5_\x02\xd7g4\xad\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\x9dQ\x15C\xb3\xd9\xdc`\xd4\u007f\t\u051d\x01\xb6\u0118\xd8 x\x8a\x02a\x97\xb9Qo\u00d4\x00\x00\u07d4\x9dn\u03e0:\xf2\xc6\xe1D\xb7\xc4i*\x86\x95\x1e\x90.\x9e\x1f\x89\xa2\xa5\xaa`\xad$?\x00\x00\u07d4\x9dvU\xe9\xf3\xe5\xba]n\x87\xe4\x12\xae\xbe\x9e\xe0\u0512G\ue24e\t1\x1c\x1d\x80\xfa\x00\x00\u07d4\x9dx1\xe84\xc2\v\x1b\xaaiz\xf1\xd8\xe0\xc6!\u016f\xff\x9a\x89\x04\xb0m\xbb\xb4\x0fJ\x00\x00\u07d4\x9dx\xa9u\xb7\xdb^M\x8e(\x84\\\xfb\xe7\xe3\x14\x01\xbe\r\u0649H\xa40k\xa2\u5e5c\x8ahX\u02f5,\f\xf75\x89\x10CV\x1a\x88)0\x00\x00\xe0\x94\x9d\u007f\xdapp\xbf>\xe9\xbb\u0664\x1fU\xca\u0505J\xe6\xc2,\x8a\x02U\u02e3\xc4o\xcf\x12\x00\x00\u07d4\x9d\x81\xae\xa6\x9a\xedj\xd0p\x89\xd6\x14E4\x8c\x17\xf3K\xfc[\x89\x10CV\x1a\x88)0\x00\x00\u07d4\x9d\x91\x1f6\x82\xf3/\xe0y.\x9f\xb6\xff<\xfcG\xf5\x89\xfc\xa5\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x9d\x91;]3\x9c\x95\xd8wEV%c\xfe\xa9\x8b#\xc6\f\u0109\tA0,\u007fM#\x00\x00\u07d4\x9d\x93\xfa\xb6\xe2(E\xf8\xf4Z\aIo\x11\xdeqS\r\xeb\u01c9lO\xd1\xee$nx\x00\x00\u07d4\x9d\x99\xb1\x89\xbb\u0664\x8f\xc2\xe1n\x8f\u0363;\xb9\x9a1{\xbb\x89=\x16\xe1\vm\x8b\xb2\x00\x00\u07d4\x9d\x9cN\xfe\x9fC9\x89\xe2;\xe9@I!S)\xfaU\xb4\u02c9\r\u3c89\x03\u01b5\x80\x00\u07d4\x9d\x9eW\xfd\xe3\x0ePh\xc0>I\x84\x8e\xdc\xe3C\xb7\x02\x83X\x89]\u0212\xaa\x111\xc8\x00\x00\u07d4\x9d\xa30\"@\xaf\x05\x11\xc6\xfd\x18W\xe6\u07779Ow\xabk\x89\xa8\r$g~\xfe\xf0\x00\x00\u07d4\x9d\xa4\xec@pw\xf4\xb9p{-\x9d.\xde^\xa5(+\xf1\u07c9\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x9d\xa6\t\xfa:~l\xf2\xcc\x0ep\u036b\xe7\x8d\xc4\xe3\x82\xe1\x1e\x89A\rXj \xa4\xc0\x00\x00\xe0\x94\x9d\xa6\x1c\xcdb\xbf\x86\x06V\xe02]qW\xe2\xf1`\xd9;\xb5\x8a\x01\x0f\f\xa9V\xf8y\x9e\x00\x00\xe0\x94\x9d\xa6\xe0u\x98\x9ct\x19\tL\xc9\xf6\xd2\u44d3\xbb\x19\x96\x88\x8a\x02Y\xbbq\u056d\xf3\xf0\x00\x00\u07d4\x9d\xa8\xe2,\xa1\x0eg\xfe\xa4NR^GQ\xee\xac6\xa3\x11\x94\x89\x0e\x189\x8ev\x01\x90\x00\x00\u07d4\x9d\xb2\xe1\\\xa6\x81\xf4\xc6`H\xf6\xf9\xb7\x94\x1e\u040b\x1f\xf5\x06\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x9d\xc1\x0f\xa3\x8f\x9f\xb0h\x10\xe1\x1f`\x17>\xc3\xd2\xfdju\x1e\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\x9d\xd2\x19f$\xa1\xdd\xf1J\x9d7^_\a\x15+\xaf\"\xaf\xa2\x89A\xb0^$c\xa5C\x80\x00\u07d4\x9d\xd4k\x1cm?\x05\u279co\x03~\xed\x9aYZ\xf4\xa9\xaa\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x9d\xdd5^cN\xe9\x92~K\u007fl\x97\xe7\xbf:/\x1ehz\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4\x9d\xe2\n\xe7j\xa0\x82c\xb2\x05\xd5\x14$a\x96\x1e$\b\xd2f\x89\r\xa93\xd8\xd8\xc6p\x00\x00\u07d4\x9d\xe2\v\xc3~\u007fH\xa8\x0f\xfdz\xd8O\xfb\xf1\xa1\xab\xe1s\x8c\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x9d\xe78m\xde@\x1c\xe4\xc6{q\xb6U?\x8a\xa3N\xa5\xa1}\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4\x9d\xeb9\x02z\xf8w\x99+\x89\xf2\xecJ\x1f\x82.\xcd\xf1&\x93\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x9d\xef\xe5j\x0f\xf1\xa1\x94}\xba\t#\xf7\xdd%\x8d\x8f\x12\xfaE\x8a\x05\xb1*\ufbe8\x04\x00\x00\x00\u07d4\x9d\xf0W\xcd\x03\xa4\xe2~\x8e\x03/\x85y\x85\xfd\u007f\x01\xad\xc8\u05c9lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x9d\xf3*P\x1c\vx\x1c\x02\x81\x02/B\xa1)?\xfd{\x89*\x8a\x01\xe7\xe4\x17\x1b\xf4\u04e0\x00\x00\u07d4\x9e\x01vZ\xff\b\xbc\"\x05P\xac\xa5\xea.\x1c\xe8\u5c19#\x8965\u026d\xc5\u07a0\x00\x00\u07d4\x9e \xe5\xfd6\x1e\xab\xcfc\x89\x1f[\x87\xb0\x92h\xb8\xeb7\x93\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x9e#,\b\xc1M\xc1\xa6\xed\v\x8a;(h\x97{\xa5\xc1}\x10\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x9e#\xc5\u4dc2\xb0\n_\xad\U0006eb47\xda\xcf[\x03g\xa1\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x9e59\x90q\xa4\xa1\x01\xe9\x19M\xaa?\t\xf0J\v_\x98p\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x9e>\xb5\t'\x8f\xe0\xdc\xd8\xe0\xbb\xe7\x8a\x19N\x06\xb6\x809C\x892\xf5\x1e\u06ea\xa30\x00\x00\u07d4\x9eBrrQk>g\xd4\xfc\xbf\x82\xf5\x93\x90\xd0L\x8e(\xe5\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\x9eL\xec5:\xc3\u3043^<\t\x91\xf8\xfa\xa5\xb7\u0428\xe6\x8a\x02\x1e\x18\xb9\xe9\xabE\xe4\x80\x00\u07d4\x9eX\x11\xb4\v\xe1\xe2\xa1\xe1\u048c;\at\xac\xde\n\t`=\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\x9eZ1\x1d\x9fi\x89\x8a|j\x9dc`h\x048\xe6z{/\x89P\xc5\xe7a\xa4D\b\x00\x00\u07d4\x9e| P\xa2'\xbb\xfd`\x93~&\x8c\xea>h\xfe\xa8\xd1\xfe\x89\x05k\xc7^-c\x10\x00\x00\u07d4\x9e\u007fe\xa9\x0e\x85\b\x86{\xcc\xc9\x14%j\x1e\xa5t\xcf\a\xe3\x89C8t\xf62\xcc`\x00\x00\xe0\x94\x9e\x81D\xe0\x8e\x89dx\x11\xfekr\xd4E\u05a5\xf8\n\xd2D\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x9e\x8fd\xdd\xcd\u9e34Q\xba\xfa\xa25\xa9\xbfQ\x1a%\xac\x91\x89\x90\xf54`\x8ar\x88\x00\x00\u07d4\x9e\x95\x1fm\xc5\xe3R\xaf\xb8\xd0B\x99\xd2G\x8aE\x12Y\xbfV\x89\x03\xe7A\x98\x81\xa7:\x00\x00\u07d4\x9e\x96\r\xcd\x03\u057a\x99\xcb\x11]\x17\xffL\t$\x8a\xd4\u043e\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x9e\xafj2\x8a@v\x02N\xfakg\xb4\x8b!\xee\xdc\xc0\xf0\xb8\x89\b\x90\xb0\xc2\xe1O\xb8\x00\x00\u07d4\x9e\xb1\xffqy\x8f(\xd6\xe9\x89\xfa\x1e\xa0X\x8e'\xba\x86\xcb}\x89\a\xa1\xfe\x16\x02w\x00\x00\x00\u07d4\x9e\xb2\x81\xc3'\x19\xc4\x0f\xdb>!m\xb0\xf3\u007f\xbcs\xa0&\xb7\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\x9e\xb3\xa7\xcb^g&Bz:6\x1c\xfa\x8dad\xdb\u043a\x16\x89+\x95\xbd\xcc9\xb6\x10\x00\x00\u07d4\x9e\xb7\x83N\x17\x1dA\xe0i\xa7yG\xfc\xa8v\"\xf0\xbaNH\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\x9e\xc0>\x02\u51f7v\x9d\xefS\x84\x13\xe9\u007f~U\xbeq\u060a\x04+\xf0kx\xed;P\x00\x00\u07d4\x9e\u02eb\xb0\xb2'\x82\xb3uD)\xe1uz\xab\xa0K\x81\x18\x9f\x89,\xa7\xbb\x06\x1f^\x99\x80\x00\u07d4\x9e\xce\x14\x00\x80\t6\xc7\xc6H_\xcd\xd3b`\x17\u041a\xfb\xf6\x89\x10\xce\x1d=\x8c\xb3\x18\x00\x00\u07d4\x9e\xd4\xe6?ReB\xd4O\xdd\xd3MY\xcd%8\x8f\xfdk\u0689\u049b4\xa4cH\x94\x00\x00\u07d4\x9e\xd8\x0e\xda\u007fU\x05M\xb9\xfbR\x82E\x16\x88\xf2k\xb3t\xc1\x89\x10CV\x1a\x88)0\x00\x00\u07d4\x9e\u0710\xf4\xbe!\be!J\xb5\xb3^Z\x8d\xd7t\x15'\x9d\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x9e\u07acL\x02k\x93\x05M\u0171\xd6a\fo9`\xf2\xads\x89A\rXj \xa4\xc0\x00\x00\u07d4\x9e\xe9?3\x9eg&\xece\xee\xa4O\x8aK\xfe\x10\xda=2\x82\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\x9e\xe9v\f\xc2s\xd4pj\xa0\x83u\xc3\xe4o\xa20\xaf\xf3\u054a\x01\xe5.3l\xde\"\x18\x00\x00\u07d4\x9e\xeb\a\xbd+x\x90\x19^}F\xbd\xf2\a\x1bf\x17QM\u06c9lk\x93[\x8b\xbd@\x00\x00\u07d4\x9e\xefD-)\x1aD}t\xc5\xd2S\u011e\xf3$\xea\xc1\xd8\xf0\x89\xb9f\b\xc8\x10;\xf0\x00\x00\u07d4\x9e\xf1\x89k\x00|2\xa1Q\x14\xfb\x89\xd7=\xbdG\xf9\x12+i\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x9f\x01w\x06\xb80\xfb\x9c0\ufc20\x9fPk\x91WEu4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9f\x10\xf2\xa0F;e\xae0\xb0p\xb3\xdf\x18\xcfF\xf5\x1e\x89\xbd\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4\x9f\x19\xfa\u0223$7\xd8\n\u0183z\v\xb7\x84\x17)\xf4\x97.\x89#=\xf3)\x9far\x00\x00\u07d4\x9f\x1a\xa8\xfc\xfc\x89\xa1\xa52\x8c\xbdcD\xb7\x1f'\x8a,\xa4\xa0\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\x9f!0,\xa5\tk\xeat\x02\xb9\x1b\x0f\xd5\x06%O\x99\x9a=\x89C\x97E\x1a\x00=\xd8\x00\x00\u07d4\x9f'\x1d(U\x00\xd78F\xb1\x8fs>%\u074bO]J\x8b\x89'#\xc3F\xae\x18\b\x00\x00\u07d4\x9f4\x97\xf5\xef_\xe60\x95\x83l\x00N\xb9\xce\x02\xe9\x01;K\x89\"V\x86\x1b\xf9\xcf\b\x00\x00\xe0\x94\x9f:t\xfd^~\xdc\xc1\x16)\x93\x17\x13\x81\u02f62\xb7\xcf\xf0\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\x9fF\xe7\xc1\xe9\a\x8c\xae\x860Z\xc7\x06\v\x01F}f\x85\xee\x89$=M\x18\"\x9c\xa2\x00\x00\u07d4\x9fIl\xb2\x06\x95c\x14M\b\x11g{\xa0\xe4q:\nAC\x89<\xd2\xe0\xbfc\xa4H\x00\x00\u07d4\x9fJq\x95\xac|\x15\x1c\xa2X\xca\xfd\xa0\u02b0\x83\xe0I\xc6\x02\x89SS\x8c2\x18\\\xee\x00\x00\u07d4\x9fJ\xc9\xc9\xe7\xe2L\xb2DJ\x04T\xfa[\x9a\xd9\xd9-8S\x89-C\xf3\xeb\xfa\xfb,\x00\x00\u07d4\x9f_D\x02kWjJ\xdbA\xe9YaV\x1dA\x03\x9c\xa3\x91\x89\r\x8drkqw\xa8\x00\x00\u07d4\x9f`{?\x12F\x9fDa!\u03bf4u5kq\xb42\x8c\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\x9fa\xbe\xb4o^\x85=\n\x85!\xc7Dnh\xe3L}\ts\x89\x1e[\x8f\xa8\xfe*\xc0\x00\x00\u07d4\x9fd\xa8\xe8\xda\xcfJ\xde0\xd1\x0fMY\xb0\xa3\u056b\xfd\xbft\x8966\x9e\xd7t}&\x00\x00\u07d4\x9ff.\x95'A!\xf1wVncm#\x96L\xf1\xfdho\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9fj2*mF\x99\x81Bj\xe8D\x86]~\xe0\xbb\x15\u01f3\x89\x02\xb5\xeeW\x92\x9f\u06c0\x00\u07d4\x9fy\x86\x92J\xeb\x02h|\xd6A\x89\x18\x9f\xb1g\xde\xd2\xdd\\\x895e\x9e\xf9?\x0f\xc4\x00\x00\u07d4\x9fz\x03\x92\xf8Ws.0\x04\xa3u\xe6\xb1\x06\x8dI\xd801\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9f\x82E\u00eb}\x171d\x86\x1c\u04d9\x1b\x94\xf1\xba@\xa9:\x89\x9b\ny\x1f\x12\x110\x00\x00\u07d4\x9f\x83\xa2\x93\xc3$\xd4\x10l\x18\xfa\xa8\x88\x8fd\u0499\x05L\xa0\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\x9f\x86\xa0f\xed\xb6\x1f\xcbXV\u0793\xb7\\\x8cy\x18d\xb9{\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\x9f\x98\xeb4\xd4iy\xb0\xa6\u078b\x05\xaaS:\x89\xb8%\xdc\xf1\x89\x04\xb0m\xbb\xb4\x0fJ\x00\x00\xe0\x94\x9f\x9f\xe0\xc9_\x10\xfe\xe8z\xf1\xaf r6\xc8\xf3aN\xf0/\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\x9f\xae\xa1\xc5\xe8\x1ez\xcb?\x17\xf1\xc3Q\xee.\u0649\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xa0\b\x01\x98c\xc1\xa7|\x14\x99\xeb9\xbb\u05ff-\u05e3\x1c\xb9\x89\amA\xc6$\x94\x84\x00\x00\u07d4\xa0\t\xbf\ao\x1b\xa3\xfaW\u04a7!r\x18\xbe\xd5VZzz\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xa0\x1e\x94v\u07c4C\x18%\xc86\xe8\x80:\x97\xe2/\xa5\xa0\u034a\x01EB\xba\x12\xa37\xc0\x00\x00\u0794\xa0\x1f\x12\xd7\x0fD\xaa{\x11;(\\\"\xdc\xdbE\x874T\xa7\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\xa0\x1f\u0450j\x90\x85\x06\xde\xda\xe1\xe2\b\x12\x88r\xb5n\u7489\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xa0\"\x82@\xf9\x9e\x1d\xe9\xcb2\xd8,\x0f/\xa9\xa3\xd4K\v\xf3\x89V\xbcu\xe2\xd61\x00\x00\x00\xe0\x94\xa0+\xdedahn\x19\xace\f\x97\r\x06r\xe7m\xcbO\u008a\x01\xe0\x92\x96\xc37\x8d\xe4\x00\x00\u07d4\xa0,\x1e4\x06O\x04u\xf7\xfa\x83\x1c\xcb%\x01L:\xa3\x1c\xa2\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4\xa0-\u01aa2\x8b\x88\r\u97acTh#\xfc\xcfw@G\xfb\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xa0.?\x8fYY\xa7\xaa\xb7A\x86\x12\x12\x9bp\x1c\xa1\xb8\x00\x10\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xa04\u007f\n\x98wc\x90\x16\\\x16m2\x96;\xf7M\xcd\n/\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa05\xa3e$x\xf8-\xbdm\x11_\xaa\x8c\xa9F\xec\x9eh\x1d\x89\x05\xf4\xe4-\u052f\xec\x00\x00\u07d4\xa0:=\xc7\xc53\xd1tB\x95\xbe\x95]a\xaf?R\xb5\x1a\xf5\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\xa0E\x9e\xf3i:\xac\xd1d|\xd5\u0612\x989 L\xefS\xbe\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xa0O*\xe0*\xdd\x14\xc1/\xafe\xcb%\x90\"\u0403\n\x8e&\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xa0l\xd1\xf3\x969l\ndFFQ\xd7\xc2\x05\xef\xaf8|\xa3\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xa0ri\x1c\x8d\xd7\xcdB7\xffr\xa7\\\x1a\x95\x06\xd0\xce[\x9e\x89\x14\x0e\xc8\x0f\xa7\xee\x88\x00\x00\u07d4\xa0r\u03beb\xa9\xe9\xf6\x1c\xc3\xfb\xf8\x8a\x9e\xfb\xfe>\x9a\x8dp\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xa0v\x82\x00\v\x1b\xcf0\x02\xf8\\\x80\xc0\xfa)I\xbd\x1e\x82\xfd\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xa0z\xa1mt\xae\u8a63(\x8dR\xdb\x15Q\u0553\x882\x97\x89 \x86\xac5\x10R`\x00\x00\u07d4\xa0\x8d![[j\xacHa\xa2\x81\xac~@\vx\xfe\xf0L\xbf\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xa0\x95\x19p\xdf\u0403/\xb8;\xda\x12\xc25E\xe7\x90Aul\x89 \x86\xac5\x10R`\x00\x00\u07d4\xa0\x9fM^\xaae\xa2\xf4\xcbu\nI\x924\x01\xda\u5410\xaf\x89\a\x96\xe3\xea?\x8a\xb0\x00\x00\xe0\x94\xa0\xa0\xe6R\x04T\x1f\u029b/\xb2\x82\u0355\x13\x8f\xae\x16\xf8\t\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xa0\xaa_\x02\x01\xf0M;\xbe\xb8\x98\x13/|\x11g\x94f\xd9\x01\x89\x01\xfb\xedR\x15\xbbL\x00\x00\u07d4\xa0\xaa\xdb\xd9P\x97\"p_m#X\xa5\u01df7\x97\x0f\x00\xf6\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xa0\xb7q\x95\x1c\xe1\xde\xee6:\xe2\xb7q\xb7>\a\u0135\xe8\x00\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\xa0\xde\\`\x1eif5\u0198\xb7\xae\x9c\xa4S\x9f\u01f9A\xec\x89\x12\xc3\xcb\xd7\x04\xc9w\x00\x00\u07d4\xa0\xe8\xbaf\x1bH\x15L\xf8C\xd4\u00a5\xc0\xf7\x92\xd5(\xee)\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xa0\xfc~S\xc5\xeb\xd2z*\xbd\xacE&\x1f\x84\xab;Q\xae\xfb\x89\xa3\x13\xda\xec\x9b\xc0\xd9\x00\x00\xe0\x94\xa0\xff[L\xf0\x16\x02~\x83#I}D(\xd3\xe5\xa8;\x87\x95\x8a\x01e\x98\xd3\xc8>\xc0B\x00\x00\u07d4\xa1\x06F[\xbd\x19\u1dbc\xe5\r\x1b\x11W\xdcY\tZ60\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xa1\x06\xe6\x92>\xddS\u028e\xd6P\x96\x8a\x91\b\xd6\xcc\xfd\x96p\x8a\x02\x02\xfe\x15\x05\xaf\uc240\x00\u07d4\xa1\t\u12f0\xa3\x9c\x9e\xf8/\xa1\x95\x97\xfc^\xd8\xe9\xebmX\x89X\xe7\x92n\xe8X\xa0\x00\x00\u07d4\xa1\x1a\x03\u013b&\xd2\x1e\xffg}]U\\\x80\xb2TS\xeez\x89\x03\xcb'Y\xbcA\x0f\x80\x00\u07d4\xa1\x1e\xff\xabl\xf0\xf5\x97,\xff\xe4\xd5e\x96\xe9\x89h\x14J\x8f\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\u07d4\xa1 M\xad_V\a(\xa3\\\r\x8f\u01d4\x81\x05{\xf7s\x86\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xa1&#\xe6)\u07d3\tg\x04\xb1`\x84\xbe,\u061dV-\xa4\x8a\x01\xcc\xc92E\x11\xe4P\x00\x00\xe0\x94\xa1*l-\x98]\xaf\x0eO_ z\xe8Q\xaa\xf7)\xb32\u034a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94\xa13m\xfb\x96\xb6\xbc\xbeK>\xdf2\x05\xbeW#\xc9\x0f\xadR\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xa1;\x9d\x82\xa9\x9b<\x9b\xbaZ\xe7.\xf2\x19\x9e\xdc};\xb3l\x89lj\xccg\u05f1\xd4\x00\x00\xe0\x94\xa1<\xfe\x82mm\x18A\u072eD;\xe8\u00c7Q\x816\xb5\xe8\x8a\x1d\xa5jK\b5\xbf\x80\x00\x00\xe0\x94\xa1C.\xd2\u01b7wz\x88\xe8\xd4m8\x8epG\u007f \x8c\xa5\x8a\x01\xb1\xa7\xe4\x13\xa1\x96\xc5\x00\x00\u07d4\xa1D\xf6\xb6\x0fr\xd6J!\xe30\xda\xdbb\u0619\n\xde+\t\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa1P%\xf5\x95\xac\xdb\xf3\x11\x0fw\u017f$G~eH\xf9\xe8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa1X\x14\x8a.\x0f>\x92\xdc,\xe3\x8f\xeb\xc2\x01\a\xe3%<\x96\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa1a`\x85\x1d+\x9c4\x9b\x92\xe4o\x82\x9a\xbf\xb2\x10\x945\x95\x89a\t=|,m8\x00\x00\u07d4\xa1f\xf9\x11\xc6D\xac2\x13\u049e\x0e\x1a\xe0\x10\xf7\x94\u056d&\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa1m\x9e=c\x98aY\xa8\x00\xb4h7\xf4^\x8b\xb9\x80\xee\v\x89n\x11u\xdaz\xd1 \x00\x00\u07d4\xa1pp\xc2\xe9\u0169@\xa4\xec\x0eIT\xc4\xd7\xd6C\xbe\x8fI\x89lk\x17\x03;6\x1c\x80\x00\u07d4\xa1|\x9eC#\x06\x95\x18\x18\x9dR\a\xa0r\x8d\u02d20j?\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa1\x83`\xe9\x85\xf2\x06.\x8f\x8e\xfe\x02\xad,\xbc\x91\xad\x9aZ\xad\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xa1\x91\x14\x05\xcfn\x99\x9e\xd0\x11\xf0\xdd\xcd*O\xf7\u008f%&\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\xa1\x92i\x80\a\xcc\x11\xaa`=\"\x1d_\xee\xa0v\xbc\xf7\xc3\r\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa1\x92\xf0j\xb0R\xd5\xfd\u007f\x94\xee\xa81\x8e\x82x\x15\xfegz\x89\a\x1f\x8a\x93\xd0\x1eT\x00\x00\u07d4\xa1\x99\x81D\x96\x8a\\p\xa6AUT\xce\xfe\u0082F\x90\u0125\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xa1\xa1\xf0\xfam \xb5\nyO\x02\xefR\b\\\x9d\x03j\xa6\u028965\u026d\xc5\u07a0\x00\x00\u07d4\xa1\xae\x8dE@\xd4\xdbo\xdd\xe7\x14oA[C\x1e\xb5\\y\x83\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\xa1\xb4|M\x0e\xd6\x01\x88B\xe6\xcf\xc8c\n\u00e3\x14.^k\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xa1\xc4\xf4Z\x82\xe1\xc4x\xd8E\b.\xb1\x88u\xc4\xeae9\xab\x8a*Z\x05\x8f\u0095\xed\x00\x00\x00\u07d4\xa1\xdc\xd0\xe5\xb0Z\x97|\x96#\xe5\xae/Y\xb9\xad\xa2\xf3>1\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xa1\xe48\n;\x1ft\x96s\xe2p\"\x99\x93\xeeU\xf3Vc\xb4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa1\xf1\x93\xa0Y/\x1f\xeb\x9f\xdf\xc9\n\xa8\x13xN\xb8\x04q\u0249K\xe4\xe7&{j\xe0\x00\x00\u07d4\xa1\xf2\x85@P\xf8re\x8e\xd8.R\xb0\xad{\xbc\x1c\xb9!\xf6\x89m\x03\x17\xe2\xb3&\xf7\x00\x00\u07d4\xa1\xf5\xb8@\x14\rZ\x9a\xce\xf4\x02\xac<\u00c8jh\xca\xd2H\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa1\xf7e\xc4O\xe4_y\x06w\x94HD\xbeO-B\x16_\xbd\x89\xc7\xe9\xcf\xdev\x8e\xc7\x00\x00\u07d4\xa1\xf7\xdd\xe1\xd78\xd8\xcdg\x9e\xa1\xee\x96[\xee\"K\xe7\xd0M\x89=\x18DP\xe5\xe9<\x00\x00\u07d4\xa1\xf8\u063c\xf9\x0ew\u007f\x19\xb3\xa6Iu\x9a\xd9P'\xab\xdf\u00c9\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xa2\x02TrB\x80onp\xe7@X\xd6\xe5)-\xef\xc8\xc8\u0509l\x87T\xc8\xf3\f\b\x00\x00\u07d4\xa2\r\a\x1b\x1b\x000cI}y\x90\xe1$\x9d\xab\xf3l5\xf7\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa2\r\x8f\xf6\f\xaa\xe3\x1d\x02\xe0\xb6e\xfaC]v\xf7|\x94B\x89\x1a\x8a\x90\x9d\xfc\xef@\x00\x00\u07d4\xa2\x11\xda\x03\xcc\x0e1\xec\xceS\t\x99\x87\x18QU(\xa0\x90\u07c9\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xa2\x14B\xab\x054\n\xdeh\xc9\x15\xf3\xc39\x9b\x99U\xf3\xf7\xeb\x89*\x03I\x19\u07ff\xbc\x00\x00\u07d4\xa2\"\"Y\u075c>=\xed\x12p\x84\xf8\b\xe9*\x18\x870,\x89\b\xc83\x9d\xaf\xedH\x00\x00\u07d4\xa2*\xde\r\xdb\\n\xf8\xd0\u034d\xe9M\x82\xb1\x10\x82\xcb.\x91\x897KW\xf3\xce\xf2p\x00\x00\u07d4\xa2L:\xb6!\x81\xe9\xa1[x\xc4b\x1eL|X\x81'\xbe&\x89\b\xcd\xe4:\x83\xd31\x00\x00\u07d4\xa2W\xadYK\u0603(\xa7\xd9\x0f\xc0\xa9\a\u07d5\xee\xca\xe3\x16\x89\x1c7\x86\xff8F\x93\x00\x00\u07d4\xa2[\bd7\xfd!\x92\u0420\xf6On\xd0D\xf3\x8e\xf3\xda2\x89\x12)\x0f\x15\x18\v\xdc\x00\x00\u07d4\xa2v\xb0X\u02d8\u060b\xee\xdbg\xe5CPl\x9a\r\x94p\u0609\x90\xaa\xfcv\xe0/\xbe\x00\x00\u07d4\xa2\x82\xe9i\xca\xc9\xf7\xa0\xe1\xc0\u0350\xf5\xd0\xc48\xacW\r\xa3\x89\"\a\xeb\x89\xfc'8\x00\x00\xe0\x94\xa2\x91\xe9\u01d9\rU-\u046e\x16\u03bc?\xca4,\xba\xf1\u044a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xa2\x93\x19\xe8\x10i\xe5\xd6\r\xf0\x0f=\xe5\xad\xee5\x05\xec\xd5\xfb\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xa2\x96\x8f\xc1\xc6K\xac\vz\xe0\u058b\xa9I\x87Mm\xb2S\xf4\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\xa2\x9d[\xdat\xe0\x03GHr\xbdX\x94\xb8\x853\xffd\u00b5\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xa2\x9df\x1acv\xf6m\vt\xe2\xfe\x9d\x8f&\xc0$~\xc8L\x89\xdf3\x04\a\x9c\x13\xd2\x00\x00\u07d4\xa2\xa45\xdeD\xa0\x1b\xd0\ucc9eD\xe4vD\xe4j\f\xdf\xfb\x89\x1b\x1dDZz\xff\xe7\x80\x00\u07d4\xa2\xac\xe4\u0253\xbb\x1eS\x83\xf8\xact\xe1y\x06n\x81O\x05\x91\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xa2\xb7\x01\xf9\xf5\xcd\u041eK\xa6+\xae\xba\u3a02W\x10X\x85\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa2\u0145O\xf1Y\x9f\x98\x89,W%\xd2b\xbe\x1d\xa9\x8a\xad\xac\x89\x11\t\xff30\x10\xe7\x80\x00\u07d4\xa2\xc7\xea\xff\xdc,\x9d\x93sE l\x90\x9aR\u07f1LG\x8f\x89\a\xc0\x86\x0eZ\x80\xdc\x00\x00\u07d4\xa2\u04aabk\t\xd6\xd4\xe4\xb1?\u007f\xfcZ\x88\xbdz\xd3gB\x89\xfb\x80xPuS\x83\x00\x00\u07d4\xa2\u04cd\xe1\xc79\x06\xf6\xa7\xcan\xfe\xb9|\xf6\xf6\x9c\xc4!\xbe\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xa2\xdce\xee%kY\xa5\xbdy)wO\x90K5\x8d\U000ed84a\x04\x83\xbc\xe2\x8b\xeb\t\xf8\x00\x00\u07d4\xa2\xe0h:\x80]\xe6\xa0^\xdb/\xfb\xb5\xe9o\x05p\xb67\u00c9\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xa2\u1e2a\x90\x0e\x9c\x13\x9b?\xa1\"5OaV\xd9*\x18\xb1\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xa2\u2d54\x1e\f\x01\x94K\xfe\x1d_\xb4\xe8\xa3K\x92,\u03f1\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xa2\xe4`\xa9\x89\xcb\x15V_\x9e\u0327\xd1!\xa1\x8eN\xb4\x05\xb6\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa2\xec\xce,I\xf7*\t\x95\xa0\xbd\xa5z\xac\xf1\xe9\xf0\x01\xe2*\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\xa2\xf4r\xfeO\"\xb7}\xb4\x89!\x9e\xa4\x02=\x11X*\x93)\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\xa2\xf7\x98\xe0w\xb0}\x86\x12N\x14\a\xdf2\x89\r\xbbKcy\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xa2\xf8k\xc0a\x88N\x9e\xef\x05d\x0e\xddQ\xa2\xf7\xc0Yli\x89llD\xfeG\xec\x05\x00\x00\u07d4\xa2\xfa\x17\xc0\xfbPl\xe4\x94\x00\x8b\x95W\x84\x1c?d\x1b\x8c\xae\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xa3\x04X\x8f\r\x85\f\xd8\u04cfv\xe9\xe8<\x1b\xf6>3>\u0789\x02(V\x01!l\x8c\x00\x00\u07d4\xa3\x05\x8cQszN\x96\xc5_.\xf6\xbd{\xb3X\x16~\u00a7\x89 \xdb:\xe4H\x1a\u0500\x00\u07d4\xa3\t\xdfT\u02bc\xe7\f\x95\xec03\x14\x9c\xd6g\x8ao\xd4\u03c9\f\x1f\x12\xc7Q\x01X\x00\x00\u07d4\xa3\nER\x0eR\x06\xd9\x00@p\xe6\xaf>{\xb2\xe8\xddS\x13\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xa3\x0e\n\xcbSL\x9b0\x84\xe8P\x1d\xa0\x90\xb4\xeb\x16\xa2\xc0\u0349lk\x93[\x8b\xbd@\x00\x00\u07d4\xa3 0\x95\xed\xb7\x02\x8ehq\xce\n\x84\xf5HE\x9f\x830\n\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xa3!\t\x1d0\x18\x06By\xdb9\x9d+*\x88\xa6\xf4@\xae$\x89\xadx\xeb\u016cb\x00\x00\x00\u07d4\xa3#-\x06\x8dP\x06I\x03\xc9\xeb\xc5c\xb5\x15\xac\u0237\xb0\x97\x89l\x87T\xc8\xf3\f\b\x00\x00\xe0\x94\xa3$\x1d\x89\n\x92\xba\xf5)\b\xdcJ\xa0Irk\xe4&\xeb\u04ca\x04<-\xa6a\xca/T\x00\x00\u07d4\xa3)F&\xec)\x84\xc4;C\xdaM]\x8eFi\xb1\x1dKY\x896\xa4\xcfcc\x19\xc0\x00\x00\u07d4\xa3,\xf7\xdd\xe2\f=\xd5g\x9f\xf5\xe3%\x84\\p\u0156&b\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xa39\xa3\xd8\xca(\x0e'\xd2A[&\xd1\xfcy2(\xb6`C\x896\U00086577\x8f\xf0\x00\x00\u07d4\xa3<\xb4P\xf9[\xb4n%\xaf\xb5\x0f\xe0_\xee\xe6\xfb\x8c\xc8\xea\x89*\x11)\u0413g \x00\x00\u07d4\xa3?p\xdaru\xef\x05q\x04\u07e7\xdbd\xf4r\xe9\xf5\xd5S\x89\x04YF\xb0\xf9\xe9\xd6\x00\x00\u07d4\xa3@v\xf8K\xd9\x17\xf2\x0f\x83B\u024b\xa7\x9eo\xb0\x8e\xcd1\x89\u3bb5sr@\xa0\x00\x00\u07d4\xa3C\x0e\x1fd\u007f2\x1e\xd3G9V##\xc7\xd6#A\vV\x8964\xfb\x9f\x14\x89\xa7\x00\x00\u07d4\xa3O\x9dV\x8b\xf7\xaf\xd9L*[\x8a_\xf5\\f\xc4\by\x99\x89\x84}P;\"\x0e\xb0\x00\x00\u07d4\xa3V\x06\xd5\x12 \xee\u007f!F\xd4\x11X.\xe4\xeeJEYn\x89\u062a\xbe\b\v\xc9@\x00\x00\u07d4\xa3VU\x1b\xb7}OE\xa6\xd7\xe0\x9f\n\b\x9ey\u0322I\u02c9\x12nr\xa6\x9aP\xd0\x00\x00\u07d4\xa3\\\x19\x13,\xac\x195Wj\xbf\xedl\x04\x95\xfb\a\x88\x1b\xa0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa3e\x91\x8b\xfe?&'\xb9\xf3\xa8gu\xd8un\x0f\u0629K\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xa3n\r\x94\xb9Sd\xa8&q\xb6\b\xcb-72Ea)\t\x89\b!\xd2!\xb5)\x1f\x80\x00\u07d4\xa3u\xb4\xbc$\xa2N\x1fyu\x93\xcc0+/3\x10c\xfa\\\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xa3v\"\xac\x9b\xbd\xc4\xd8+u\x01]t[\x9f\x8d\xe6Z(\uc25d\xc0\\\xce(\u00b8\x00\x00\xe0\x94\xa3y\xa5\a\fP=/\xac\x89\xb8\xb3\xaf\xa0\x80\xfdE\xedK\xec\x8a\x04+\xf0kx\xed;P\x00\x00\u07d4\xa3\x80-\x8ae\x9e\x89\xa2\xc4~\x90T0\xb2\xa8'\x97\x89P\xa7\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa3\x83\x06\xcbp\xba\xa8\u4446\xbdh\xaap\xa8=$/)\a\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa3\x84vi\x1d4\x94.\xeak/v\x88\x92#\x04}\xb4az\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa3\x87\xceN\x96\x1axG\xf5`\a\\d\xe1YkVA\xd2\x1c\x89$=M\x18\"\x9c\xa2\x00\x00\u07d4\xa3\x87\xec\xde\x0e\xe4\xc8\a\x94\x99\xfd\x8e\x03G;\u060a\xd7R*\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xa3\x88:$\xf7\xf1f _\x1aj\x99I\al&\xa7nqx\x89b\xa9\x92\xe5:\n\xf0\x00\x00\xe0\x94\xa3\x8b[\xd8\x1a\x9d\xb9\u04b2\x1d^\xc7\xc6\x05R\xcd\x02\xedV\x1b\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xa3\x90\xca\x12+\x85\x01\xee>^\a\xa8\xcaKA\x9f~M\xae\x15\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\xa3\x93*1\xd6\xffu\xfb;\x12q\xac\xe7\u02a7\xd5\xe1\xff\x10Q\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\xa3\x94\xadO\xd9\xe6S\x0eo\\S\xfa\xec\xbe\u0781\xcb\x17-\xa1\x8a\x01/\x93\x9c\x99\xed\xab\x80\x00\x00\u07d4\xa3\x97\x9a\x92v\n\x13Z\xdfi\xd7/u\xe1gu_\x1c\xb8\u00c9\x05k\xc7^-c\x10\x00\x00\xe0\x94\xa3\x9b\xfe\xe4\xae\u027du\xbd\"\u01b6r\x89\x8c\xa9\xa1\xe9]2\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xa3\xa2b\xaf\u0493h\x19#\b\x92\xfd\xe8O-ZYJ\xb2\x83\x89e\xea=\xb7UF`\x00\x00\u07d4\xa3\xa2\xe3\x19\xe7\u04e1D\x8bZ\xa2F\x89S\x16\f-\xbc\xbaq\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa3\xa5{\a\x16\x13(\x04\xd6\n\xac(\x11\x97\xff+=#{\x01\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\xa3\xa9>\xf9\xdb\xea&6&=\x06\xd8I/jA\u0790|\"\x89\x03@\xaa\xd2\x1b;p\x00\x00\xe0\x94\xa3\xae\x18y\x00}\x80\x1c\xb5\xf3RqjM\u063a'!\xde=\x8a*Z\x05\x8f\u0095\xed\x00\x00\x00\u07d4\xa3\xba\r:6\x17\xb1\xe3\x1bNB,\xe2i\xe8s\x82\x8d]i\x89.\x14\x1e\xa0\x81\xca\b\x00\x00\u07d4\xa3\xbc\x97\x9bp\x80\t/\xa1\xf9/n\x0f\xb3G\u2359PE\x89\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4\xa3\xbf\xf1\u07e9\x97\x16h6\f\r\x82\x82\x842\xe2{\xf5Ng\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xa3\xc1J\xce(\xb1\x92\u02f0b\x14_\u02fdXi\xc6rq\xf6\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xa3\xc3:\xfc\x8c\xb4pN#\x15=\xe2\x04\x9d5\xaeq3$r\x89+X\xad\u06c9\xa2X\x00\x00\u07d4\xa3\u0430<\xff\xbb&\x9fyj\u009d\x80\xbf\xb0}\xc7\u01ad\x06\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa3\u0543\xa7\xb6[#\xf6\vy\x05\xf3\xe4\xaab\xaa\xc8\u007fB'\x898\xbe\xfa\x12mZ\x9f\x80\x00\u07d4\xa3\xdb6J3-\x88K\xa9;&\x17\xaeM\x85\xa1H\x9b\xeaG\x89\\(=A\x03\x94\x10\x00\x00\u07d4\xa3\xe0Q\xfbtJ\xa3A\f;\x88\xf8\x99\xf5\xd5\u007f\x16\x8d\xf1-\x89\xa00\xdc\xeb\xbd/L\x00\x00\u07d4\xa3\xe3\xa6\xeaP\x95s\xe2\x1b\xd0#\x9e\xce\x05#\xa7\xb7\u061b/\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xa3\xf4\xad\x14\xe0\xbbD\xe2\xce,\x145\x9cu\xb8\xe72\xd3pT\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xa3\xfa\xccP\x19\\\vI3\xc8X\x97\xfe\xcc[\xbd\x99\\4\xb8\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xa4\x03Z\xb1\xe5\x18\b!\xf0\xf3\x80\xf1\x13\x1bs\x87\xc8\u0641\u0349\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xa4\n\xa2\xbb\xce\fr\xb4\xd0\xdf\xff\xccBq[+T\xb0\x1b\xfa\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa4\x19\xa9\x84\x14#c&uuV`\x894\x0e\xea\x0e\xa2\b\x19\x89lj\xccg\u05f1\xd4\x00\x00\xe0\x94\xa4!\u06f8\x9b:\aA\x90\x84\xad\x10\xc3\xc1]\xfe\x9b2\xd0\u008a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xa4\"\xe4\xbf\v\xf7AG\u0309[\xed\x8f\x16\xd3\xce\xf3BaT\x89\x12\xef?b\xee\x116\x80\x00\u07d4\xa4%\x9f\x83E\xf7\u3a37+\x0f\xec,\xf7^2\x1f\xdaM\u0089g\x8a\x93 b\xe4\x18\x00\x00\u07d4\xa4)\b\xe7\xfeS\x98\n\x9a\xbf@D\xe9W\xa5Kp\u973e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa4)\xfa\x88s\x1f\xdd5\x0e\x8e\xcdn\xa5B\x96\xb6HO\u6549j\xc5\xc6-\x94\x86\a\x00\x00\xe0\x94\xa40\x99]\xdb\x18[\x98e\xdb\xe6%9\xad\x90\xd2.Ks\u008a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xa46\xc7TS\xcc\xcaJ\x1f\x1bb\xe5\u0123\r\x86\xdd\xe4\xbeh\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xa47\xfen\xc1\x03\u028d\x15\x8fc\xb34\"N\u032c[>\xa3\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xa4;m\xa6\xcbz\xacW\x1d\xff'\xf0\x9d9\xf8F\xf57i\xb1\x89\x14\x99\x8f2\xacxp\x00\x00\u07d4\xa4;\x81\xf9\x93V\xc0\xaf\x14\x1a\x03\x01\rw\xbd\x04,q\xc1\xee\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa4>\x19G\xa9$+5Ua\xc3\n\x82\x9d\xfe\uc881Z\xf8\x89\xd2=\x99\x96\x9f\u0591\x80\x00\u07d4\xa4H\x9aP\xea\xd5\xd5DZ{\xeeM-U6\u00a7lA\xf8\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xa4O\xe8\x00\xd9o\xca\xd7;qp\xd0\xf6\x10\u02cc\x06\x82\xd6\u0389\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xa4T2\xa6\xf2\xac\x9dVW{\x93\x8a7\xfa\xba\xc8\xcc|F\x1c\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa4f\xd7p\u0618\xd8\xc9\xd4\x05\xe4\xa0\xe5Q\xef\xaf\xcd\xe5<\xf9\x89\x1a\xb2\xcf|\x9f\x87\xe2\x00\x00\xe0\x94\xa4g\a1\x17X\x93\xbb\xcf\xf4\xfa\x85\u0397\xd9O\xc5\x1cK\xa8\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xa4kC\x87\xfbM\xcc\xe0\x11\xe7nMsT}D\x81\xe0\x9b\xe5\x89Hz\x9a0E9D\x00\x00\u07d4\xa4l\xd27\xb6>\xeaC\x8c\x8e;e\x85\xf6y\xe4\x86\b2\xac\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa4wy\u063c\x1c{\xce\x0f\x01\x1c\xcb9\xefh\xb8T\xf8\u078f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa4\x82kl8\x82\xfa\xd0\xed\\\x8f\xbb%\xcc@\xccO3u\x9f\x89p\x1bC\xe3D3\xd0\x00\x00\u07d4\xa4\x87Y(E\x8e\xc2\x00]\xbbW\x8c\\\xd35\x80\xf0\xcf\x14R\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa4\x9fR:\xa5\x13d\xcb\xc7\u0655\x16=4\xebY\r\xed/\b\x89\x90'B\x1b*\x9f\xbc\x00\x00\u07d4\xa4\xa4\x9f\v\xc8h\x8c\xc9\xe6\xdc\x04\xe1\xe0\x8dR\x10&\xe6Ut\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xa4\xa7\xd3\x06\xf5\x10\xcdX5\x94(\xc0\xd2\xf7\xc3`\x9dVt\u05c9\xb5\x8c\xb6\x1c<\xcf4\x00\x00\u07d4\xa4\xa8:\a8y\x9b\x97\x1b\xf2\xdep\x8c.\xbf\x91\x1c\xa7\x9e\xb2\x89 \x86\xac5\x10R`\x00\x00\u07d4\xa4\xb0\x9d\xe6\xe7\x13\xdciTnv\xef\n\xcf@\xb9O\x02A\xe6\x89\x11}\xc0b~\xc8p\x00\x00\xe0\x94\xa4\u04b4)\xf1\xadSI\xe3\x17\x04\x96\x9e\xdc_%\xee\x8a\xca\x10\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\xa4\xd6\xc8.\u076eYG\xfb\xe9\xcd\xfb\xd5H\xae3\xd9\x1aq\x91\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xa4\xda4E\r\"\xec\x0f\xfc\xed\xe0\x00K\x02\xf7\x87.\xe0\xb7:\x89\x05\x0fafs\xf0\x83\x00\x00\xe0\x94\xa4\xddY\xab^Q}9\x8eI\xfaS\u007f\x89\x9f\xedL\x15\xe9]\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xa4\xe6#E\x1e~\x94\xe7\u86e5\xed\x95\u0228:b\xff\xc4\xea\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xa4\xed\x11\xb0r\u061f\xb16u\x9f\u019bB\x8cH\xaa]L\xed\x89\x0e?\x15'\xa0<\xa8\x00\x00\u07d4\xa4\xfb\x14@\x9ag\xb4V\x88\xa8Y>\\\xc2\xcfYl\xedo\x11\x89a\t=|,m8\x00\x00\xe0\x94\xa5\x14\xd0\x0e\xddq\b\xa6\xbe\x83\x9ac\x8d\xb2AT\x18\x17A\x96\x8a\x06ZM\xa2]0\x16\xc0\x00\x00\xe0\x94\xa5\"\xde~\xb6\xae\x12PR*Q13\xa9;\xd4(IG\\\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xa5$\xa8\xcc\xccIQ\x8d\x17\n2\x82p\xa2\xf8\x813\xfb\xaf]\x89\x0f\xf7\x02-\xac\x10\x8a\x00\x00\u07d4\xa59\xb4\xa4\x01\xb5\x84\xdf\xe0\xf3D\xb1\xb4\"\xc6UC\x16~.\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xa5>\xadT\xf7\x85\n\xf2\x148\xcb\xe0z\xf6\x86'\x9a1[\x86\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xa5C\xa0f\xfb2\xa8f\x8a\xa0sj\f\x9c\xd4\rx\t\x87'\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa5gw\vj\xe3 \xbd\xdeP\xf9\x04\xd6c\xe7F\xa6\x1d\xac\xe6\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa5h\xdbMW\xe4\xd6tb\xd73\u019a\x9e\x0f\xe2n!\x83'\x89;k\xff\x92f\xc0\xae\x00\x00\u07d4\xa5i\x8059\x1eg\xa4\x90\x13\xc0\x00 yY1\x14\xfe\xb3S\x89\r\x02\xabHl\xed\xc0\x00\x00\u07d4\xa5p\":\xe3\u02a8QA\x8a\x98C\xa1\xacU\xdbH$\xf4\xfd\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xa5s`\xf0\x02\xe0\xd6M-tE}\x8c\xa4\x85~\xe0\v\xcd\u07c9\x123\xe22\xf6\x18\xaa\x00\x00\u07d4\xa5u\xf2\x89\x1d\xcf\u0368<\\\xf0\x14t\xaf\x11\xee\x01\xb7-\u0089\x05l\xd5_\xc6M\xfe\x00\x00\u07d4\xa5x;\xf342\xff\x82\xacI\x89\x85\xd7\xd4`\xaeg\xec6s\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\xa5\x87MuF5\xa7b\xb3\x81\xa5\xc4\u01d2H:\xf8\xf2=\x1d\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\xe0\x94\xa5\xa4\"\u007fl\xf9\x88%\xc0\u057a\xffS\x15u,\xcc\x1a\x13\x91\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xa5\xabK\xd3X\x8fF\xcb'.V\xe9=\xee\u04c6\xba\x8bu=\x89HB\xf0A\x05\x87,\x80\x00\xe0\x94\xa5\xba\xd8e\t\xfb\xe0\xe0\xe3\xc0\xe9?m8\x1f\x1a\xf6\xe9\u0501\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xa5\xc36\b;\x04\xf9G\x1b\x8cn\xd76y\xb7Mf\xc3c\uc263e\nL\x9d \xe2\x00\x00\u07d4\xa5\xcd\x129\x92\x19K4\xc4x\x13\x140;\x03\xc5IH\xf4\xb9\x89l\xfc\xc3\xd9\x1d\xa5c\x00\x00\u07d4\xa5\u0578\xb6-\x00-\xef\x92A7\x10\xd1;o\xf8\xd4\xfc}\u04c9\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94\xa5\xd9ni}F5\x8d\x11\x9a\xf7\x81\x9d\xc7\b\u007fj\xe4\u007f\xef\x8a\x03\x17\xbe\xe8\xaf3\x15\xa7\x80\x00\u07d4\xa5\xde^CO\xdc\xddh\x8f\x1c1\xb6\xfbQ,\xb1\x96rG\x01\x89+^:\xf1k\x18\x80\x00\x00\u07d4\xa5\xe0\xfc<:\xff\xed=\xb6q\tG\xd1\xd6\xfb\x01\u007f>'m\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa5\xe9;I\xea|P\x9d\xe7\xc4Ml\xfe\xdd\xefY\x10\u07aa\xf2\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa5\xe9\xcdKt%]\"\xb7\u0672z\xe8\xddC\xedn\xd0%+\x89)\x8d\xb2\xf5D\x11\u0640\x00\xe0\x94\xa5\xf0\a{5\x1flP\\\xd5\x15\u07e6\xd2\xfa\u007f\\L\u0487\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\xa5\xf0u\xfd@\x135W{f\x83\u0081\xe6\xd1\x01C-\xc6\xe0\x89\x91Hx\xa8\xc0^\xe0\x00\x00\u07d4\xa5\xfe,\xe9\u007f\x0e\x8c8V\xbe\r\xe5\xf4\u0732\xce]8\x9a\x16\x89\x01=\xb0\xb8\xb6\x86>\x00\x00\u07d4\xa5\xffb\"-\x80\xc0\x13\xce\xc1\xa0\xe8\x85\x0e\xd4\xd3T\xda\xc1m\x89\vA\a\\\x16\x8b\x18\x00\x00\u07d4\xa6\t\xc2m\xd3P\xc25\xe4K+\x9c\x1d\xdd\xcc\u0429\xd9\xf87\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa6\f\x12\tuO]\x87\xb1\x81\xdaO\b\x17\xa8\x18Y\xef\x9f\u0609\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\xe0\x94\xa6\x10\x1c\x96\x1e\x8e\x1c\x15y\x8f\xfc\xd0\xe3 \x1dw\x86\xec7:\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\xa6\x13Ei\x96@\x8a\xf1\xc2\xe9>\x17w\x88\xabU\x89^+2\x8a\x01Y\x19\xffG|\x88\xb8\x00\x00\u07d4\xa6\x18\x87\x81\x8f\x91J \xe3\x10w)\v\x83qZk-n\xf9\x89e\xea=\xb7UF`\x00\x00\u07d4\xa6\x1aT\xdfxJD\xd7\x1bw\x1b\x871u\t!\x13\x81\xf2\x00\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa6\x1c\u06ed\xf0K\x1eT\u0203\xde`\x05\xfc\xdf\x16\xbe\xb8\xeb/\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xa69\xac\xd9k1\xbaS\xb0\u0407c\"\x9e\x1f\x06\xfd\x10^\x9d\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xa6BP\x10\x04\xc9\x0e\xa9\xc9\xed\x19\x98\xba\x14\nL\xd6,o_\x89\r\x94\xfb\x8b\x10\xf8\xb1\x80\x00\u07d4\xa6D\xed\x92,\xc27\xa3\xe5\u0117\x9a\x99Tw\xf3nP\xbcb\x89\x1f\xa7=\x84]~\x96\x00\x00\u07d4\xa6F\xa9\\moY\xf1\x04\xc6T\x1dw`uz\xb3\x92\xb0\x8c\x89\u3bb5sr@\xa0\x00\x00\xe0\x94\xa6HL\u0184\xc4\xc9\x1d\xb5>\xb6\x8aM\xa4Zjk\xda0g\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xa6N_\xfbpL,\x919\xd7~\xf6\x1d\x8c\u07e3\x1dz\x88\xe9\x89\a\xc0\x86\x0eZ\x80\xdc\x00\x00\xe0\x94\xa6T&\xcf\xf3x\xed#%5\x13\xb1\x9fIm\xe4_\xa7\u13ca\x01\x86P\x12|\xc3\u0700\x00\x00\u07d4\xa6jIc\xb2\u007f\x1e\xe1\x93+\x17+\xe5\x96N\r:\xe5KQ\x89\t`\xdbwh\x1e\x94\x00\x00\u07d4\xa6\u007f8\x81\x95eB:\xa8_>:\xb6\x1b\xc7c\u02eb\x89\u0749sw\xb0\"\u01be\b\x00\x00\u07d4\xa6\x8c14E\xc2-\x91\x9e\xe4l\xc2\xd0\xcd\xff\x04:uX%\x89\x04\x13t\xfd!\xb0\u0600\x00\u07d4\xa6\x8e\f0\u02e3\xbcZ\x88>T\x03 \xf9\x99\xc7\xcdU\x8e\\\x89a\x9237b\xa5\x8c\x80\x00\u07d4\xa6\x90\xf1\xa4\xb2\n\xb7\xba4b\x86 \u079c\xa0@\xc4<\x19c\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xa6\x9d|\xd1}HB\xfe\x03\xf6*\x90\xb2\xfb\xf8\xf6\xaf{\xb3\x80\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xa6\xa0\x82R\xc8YQw\xcc.`\xfc'Y>#y\xc8\x1f\xb1\x89\x01\x16Q\xac>zu\x80\x00\u07d4\xa6\xa0\xdeB\x1a\xe5Om\x17(\x13\b\xf5dm/9\xf7w]\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa6\xb2\xd5s)s`\x10,\a\xa1\x8f\xc2\x1d\xf2\xe7I\x9f\xf4\xeb\x89\xd9o\u0390\u03eb\xcc\x00\x00\xe0\x94\xa6\xc9\x10\xceMIJ\x91\x9c\u036a\xa1\xfc;\x82\xaat\xba\x06\u03ca\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xa6\u3ea3\x8e\x10J\x1e'\xa4\xd8(i\xaf\xb1\xc0\xaen\xff\x8d\x89\x01\x11@\ueb4bq\x00\x00\u07d4\xa6\xee\xbb\xe4d\u04d1\x87\xbf\x80\u029c\x13\xd7 '\xec[\xa8\xbe\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xa6\xf6+\x8a=\u007f\x11\"\a\x01\xab\x9f\xff\xfc\xb3'\x95\x9a'\x85\x89\x1bn)\x1f\x18\u06e8\x00\x00\u07d4\xa6\xf93\a\xf8\xbc\xe01\x95\xfe\u0387 C\xe8\xa0?{\xd1\x1a\x89\x9csK\xadQ\x11X\x00\x00\u07d4\xa7\x01\xdfy\xf5\x94\x90\x1a\xfe\x14DH^k \u00fd\xa2\xb9\xb3\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xa7\x02L\xfdt,\x1e\xc1<\x01\xfe\xa1\x8d0B\xe6_\x1d]\xee\x8a\x02c\x11\x9a(\xab\u0430\x80\x00\u07d4\xa7\x18\xaa\xadY\xbf9\\\xba+#\xe0\x9b\x02\xfe\f\x89\x81bG\x8960<\x97\xe4hx\x00\x00\u07d4\xa7$|S\xd0Y\xeb|\x93\x10\xf6(\xd7\xfclj\nw?\b\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\xa7%7c\xcfJu\u07d2\xca\x1evm\xc4\xee\x8a'E\x14{\x8a\x02F7p\xe9\n\x8fP\x00\x00\u07d4\xa7.\xe6f\u0133^\x82\xa5\x06\x80\x8bD<\xeb\xd5\xc62\xc7\u0749+^:\xf1k\x18\x80\x00\x00\u07d4\xa7DD\xf9\x0f\xbbT\xe5o:\u0276\xcf\u032aH\x19\xe4aJ\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xa7GC\x9a\xd0\u04d3\xb5\xa08a\xd7r\x962m\u8edd\xb9\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xa7`{BW;\xb6\xf6\xb4\xd4\xf2<~*&\xb3\xa0\xf6\xb6\xf0\x89WG=\x05\u06ba\xe8\x00\x00\xe0\x94\xa7i)\x89\n{G\xfb\x85\x91\x96\x01lo\u0742\x89\u03b7U\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u0794\xa7kt?\x98\x1bi0r\xa11\xb2+\xa5\x10\x96\\/\xef\u05c8\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\xa7m?\x15bQ\xb7,\f\xcfKG\xa39<\xbdoI\xa9\u0149Hz\x9a0E9D\x00\x00\u07d4\xa7t(\xbc\xb2\xa0\xdbv\xfc\x8e\xf1\xe2\x0eF\x1a\n2\u016c\x15\x89\x15\xbeat\xe1\x91.\x00\x00\u07d4\xa7u\x8c\xec\xb6\x0e\x8faL\u0396\x13~\xf7+O\xbd\awJ\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xa7w^J\xf6\xa2:\xfa \x1f\xb7\x8b\x91^Q\xa5\x15\xb7\xa7(\x89\x06\x81U\xa46v\xe0\x00\x00\u07d4\xa7\u007f>\u1793\x88\xbb\xbb\"\x15\xc6#\x97\xb9e`\x13#`\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xa7\x85\x9f\xc0\u007fun\xa7\xdc\xeb\xbc\xcdB\xf0X\x17X-\x97?\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xa7\x96lH\x9fLt\x8az\u902a'\xa5t%\x17g\xca\xf9\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xa7\xa3\xbba9\xb0\xad\xa0\f\x1f\u007f\x1f\x9fV\u0654\xbaM\x1f\xa8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa7\xa3\xf1S\xcd\u00c8!\xc2\f]\x8c\x82A\xb2\x94\xa3\xf8+$\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xa7\xa5\x17\u05ed5\x82\v\t\u0517\xfa~U@\xcd\xe9IXS\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xa7\xc9\u04c8\xeb\xd8s\xe6k\x17\x13D\x83\x97\xd0\xf3\u007f\x8b\u04e8\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xa7\u073b\xa9\xb9\xbfgb\xc1EAlPjq\u3d17 \x9c\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xa7\xe7O\v\xdb'\x8f\xf0\xa8\x05\xa6Ha\x8e\xc5+\x16o\xf1\xbe\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xa7\xe87r\xbc \x0f\x90\x06\xaa*&\r\xba\xa8H=\xc5+0\x89\vB\xd56f7\xe5\x00\x00\xe0\x94\xa7\xef5\u0387\xed\xa6\u008d\xf2HxX\x15\x05>\xc9zPE\x8a\x01\x0f\f\xe9I\xe0\x0f\x93\x00\x00\u07d4\xa7\xf9\"\f\x80G\x82k\xd5\xd5\x18?Ngjmw\xbf\xed6\x89\bPh\x97k\xe8\x1c\x00\x00\u07d4\xa8\a\x10O'\x03\xd6y\xf8\u07af\xc4B\xbe\xfe\x84\x9eB\x95\v\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa8\f\xb1s\x8b\xac\b\xd4\xf9\xc0\x8bM\xef\xf5\x15T_\xa8XO\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xa8\x19\xd2\xec\xe1\"\xe0(\xc8\xe8\xa0J\x06M\x02\xb9\x02\x9b\b\xb9\x8965\u026d\xc5\u07a0\x00\x00\u0794\xa8%\xfdZ\xbby&\xa6|\xf3k\xa2F\xa2K\xd2{\xe6\xf6\xed\x88\xf4?\xc2\xc0N\xe0\x00\x00\u07d4\xa8(U9\x86\x9d\x88\xf8\xa9aS7Uq}~\xb6Uv\xae\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xa83\x82\xb6\xe1Rg\x97J\x85P\xb9\x8fqv\xc1\xa3S\xf9\xbe\x89\xbf\xfd\xaf/\xc1\xb1\xa4\x00\x00\xe0\x94\xa8DlG\x81\xa77\xacC(\xb1\xe1[\x8a\v?\xbb\x0f\xd6h\x8a\x04\x87\x94\xd1\xf2F\x19*\x00\x00\u07d4\xa8E[A\x17e\u0590\x1e1\x1erd\x03\t\x1eB\xc5f\x83\x89\xb7:\xec;\xfe\x14P\x00\x00\xe0\x94\xa8f\x13\xe6\u0124\xc9\xc5_\\\x10\xbc\xda2\x17]\u02f4\xaf`\x8a\x02C\xd6\xc2\xe3k\xe6\xae\x00\x00\u07d4\xa8m\xb0}\x9f\x81/G\x96b-@\xe0=\x13Xt\xa8\x8at\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xa8\u007fz\xbdo\xa3\x11\x94(\x96x\xef\xb6<\xf5\x84\xee^*a\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\xa8\x80\u2a3f\x88\xa1\xa8&H\xb4\x01W\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xa8\x9d\xf3HY\xed\xd7\xc8 \u06c8w@\xd8\xff\x9e\x15\x15|{\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa8\xa4<\x00\x91\x00al\xb4\xaeN\x03?\x1f\xc5\xd7\xe0\xb6\xf1R\x89\u0548\xd0x\xb4?M\x80\x00\u07d4\xa8\xa7\b\xe8O\x82\u06c6\xa3U\x02\x19;Ln\xe9\xa7n\xbe\x8f\x897\b\xba\xed=h\x90\x00\x00\xe0\x94\xa8\xa7\xb6\x8a\u06b4\xe3\xea\xdf\xf1\x9f\xfaX\xe3J?\xce\xc0\xd9j\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\xa8\xa8\xdb\xdd\x1a\x85\u047e\xee%i\xe9\x1c\xccM\t\xae\u007fn\xa1\x8a\x01:k+VHq\xa0\x00\x00\u07d4\xa8\xac\xa7H\xf9\xd3\x12\xect\u007f\x8bex\x14&\x94\xc7\xe9\xf3\x99\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa8\xb6[\xa3\x17\x1a?w\xa65\v\x9d\xaf\x1f\x8dU\xb4\xd2\x01\xeb\x89(b\xf3\xb0\xd2\"\x04\x00\x00\u07d4\xa8\xbe\xb9\x1c+\x99\u0216J\xa9[kJ\x18K\x12i\xfc4\x83\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u0794\xa8\xc0\xb0/\xaf\x02\xcbU\x19\u0768\x84\xde{\xbc\x8c\x88\xa2\u0681\x88\xe7\xc2Q\x85\x05\x06\x00\x00\u07d4\xa8\xc1\u05aaA\xfe=e\xf6{\xd0\x1d\xe2\xa8f\xed\x1e\u066eR\x89\x01\xa0Ui\r\x9d\xb8\x00\x00\u07d4\xa8\xca\xfa\xc3\"\x80\xd0!\x02\v\xf6\xf2\xa9x(\x83\u05ea\xbe\x12\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xa8\xdb\v\x9b \x14S3A<;\fb\xf5\xf5.\u0544\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xa8\xf3\u007f\n\xb3\xa1\xd4H\xa9\xe3\xce@\x96_\x97\xa6F\b:4\x89\x11\xe0\xe4\xf8\xa5\v\xd4\x00\x00\u07d4\xa8\xf8\x9d\xd5\xccnd\u05f1\xee\xac\xe0\a\x02\x02,\xd7\xd2\xf0=\x89%\xf2s\x93=\xb5p\x00\x00\xe0\x94\xa9\x04v\xe2\xef\xdf\xeeO8{\x0f2\xa5\x06x\xb0\xef\xb5s\xb5\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xa9\x14PF\xfa6(\xcf_\xd4\xc6\x13\x92{\xe51\xe6\xdb\x1f\u0749\x06\x12O\xee\x99;\xc0\x00\x00\u07d4\xa9\x14\u0375q\xbf\xd9=d\xdaf\xa4\xe1\b\xea\x13NP\xd0\x00\x89M\x878\x99G\x13y\x80\x00\xe0\x94\xa9\x1aZ{4\x1f\x99\xc55\x14N \xbe\x9ck;\xb4\u008eM\x8a\x01&u:\xa2$\xa7\v\x00\x00\u07d4\xa9%%Q\xa6$\xaeQ7\x19\u06beR\a\xfb\xef\xb2\xfdwI\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\xa9'\u050b\xb6\u02c1K\xc6\t\xcb\u02a9\x15\x1f]E\x9a'\xe1\x89\x0e\xb95\t\x00d\x18\x00\x00\u07d4\xa9)\u023dq\xdb\f0\x8d\xac\x06\b\n\x17G\xf2\x1b\x14e\xaa\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xa9K\xbb\x82\x14\u03cd\xa0\xc2\xf6h\xa2\xacs\xe8bHR\x8dK\x894\n\xad!\xb3\xb7\x00\x00\x00\u07d4\xa9Q\xb2D\xffP\u03eeY\x1d^\x1a\x14\x8d\xf6\xa98\xef*\x1a\x89^\x00\x15\x84\xdf\xcfX\x00\x00\u07d4\xa9`\xb1\xca\xdd;\\\x1a\x8el\xb3\xab\xca\xf5.\xe7\xc3\xd9\xfa\x88\x89R\x8b\xc3T^Rh\x00\x00\u07d4\xa9a\x17\x1fSB\xb1s\xddp\xe7\xbf\xe5\xb5\xca#\x8b\x13\xbc\u0749\xb8'\x94\xa9$O\f\x80\x00\u07d4\xa9u\xb0w\xfc\xb4\u030e\xfc\xbf\x83\x84Y\xb6\xfa$:AY\u0589\x02+\x1c\x8c\x12'\xa0\x00\x00\xe0\x94\xa9{\xeb:H\xc4_\x15((L\xb6\xa9_}\xe4S5\x8e\u018a\x06\x90\x83l\n\xf5\xf5`\x00\x00\u07d4\xa9~\a!DI\x9f\xe5\xeb\xbd5J\xcc~~\xfbX\x98]\b\x89\x90\xf54`\x8ar\x88\x00\x00\u07d4\xa9\x86v/zO)O.\v\x172y\xad,\x81\xa2\"4X\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xa9\x8f\x10\x985\xf5\xea\xcd\x05Cd|4\xa6\xb2i\xe3\x80/\xac\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xa9\x97\xdf\u01d8j'\x05\bH\xfa\x1cd\u05e7\xd6\xe0z\u0322\x89\a\xc0\x86\x0eZ\x80\xdc\x00\x00\u07d4\xa9\x99\x91\u03bd\x98\xd9\xc88\xc2_zt\x16\xd9\xe2D\xca%\r\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xa9\xa1\xcd\xc3;\xfd7o\x1c\rv\xfbl\x84\xb6\xb4\xac'Mh\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xa9\xa8\xec\xa1\x1a#\xd6F\x89\xa2\xaa>A}\xbb=3k\xb5\x9a\x89\x0e4S\xcd;g\xba\x80\x00\u07d4\xa9\xac\xf6\x00\b\x1b\xb5[\xb6\xbf\xba\xb1\x81_\xfcN\x17\xe8Z\x95\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xa9\xad\x19&\xbcf\xbd\xb31X\x8e\xa8\x197\x88SM\x98,\x98\x8a\x06ZM\xa2]0\x16\xc0\x00\x00\u07d4\xa9\xaf!\xac\xbeH/\x811\x89j\"\x806\xbaQ\xb1\x94S\u00c9\x02\xb5\xe0!\x98\f\xc1\x80\x00\xe0\x94\xa9\xb2\xd2\xe0IN\xab\x18\xe0}7\xbb\xb8V\xd8\x0e\x80\xf8L\u04ca\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xa9\xbaoA;\x82\xfc\xdd\xf3\xaf\xfb\xbd\u0412\x87\xdc\xf5\x04\x15\u0289\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xa9\xbe\x88\xad\x1eQ\x8b\v\xbb\x02J\xb1\xd8\xf0\xe7?y\x0e\fv\x89\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4\xa9\xbf\xc4\x10\xdd\xdb q\x1eE\xc0s\x87\xea\xb3\n\x05N\x19\xac\x89>\x99`\x1e\xdfNS\x00\x00\u07d4\xa9\u0522\xbc\xbe[\x9e\bi\xd7\x0f\x0f\xe2\xe1\u05aa\xcdE\xed\u0149\n\xc6\xe7z\xb6c\xa8\x00\x00\xe0\x94\xa9\xd6KO;\xb7\x85\a\"\xb5\x8bG\x8b\xa6\x917^\"NB\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xa9\xd6\xf8q\xcax\x1au\x9a \xac:\u06d7,\xf1()\xa2\b\x892$\xf4'#\xd4T\x00\x00\xe0\x94\xa9\xdc\x04$\u0196\x9dy\x83X\xb3\x93\xb1\x93:\x1fQ\xbe\xe0\n\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xa9\xe1\x94f\x1a\xacpN\xe9\u07a0C\x97N\x96\x92\xde\xd8J]\x89\x1a&\xa5\x14\"\xa0p\x00\x00\u07d4\xa9\xe2\x837\xe65q\x93\xd9\xe2\xcb#k\x01\xbeD\xb8\x14'\u07c9wC\"\x17\xe6\x83`\x00\x00\u07d4\xa9\xe6\xe2^ekv%Xa\x9f\x14z!\x98[\x88t\xed\xfe\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xa9\xe9\xdb\xcez,\xb06\x94y\x98\x97\xbe\xd7\xc5M\x15_\u06a8\x89\n\xb5\xae\x8f\u025de\x80\x00\u07d4\xa9\xed7{}n\xc2Yq\xc1\xa5\x97\xa3\xb0\xf3\xbe\xadW\u024f\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xaa\x02\x00\xf1\xd1~\x9cT\xda\x06G\xbb\x969]W\xa7\x858\u06099>\xf1\xa5\x12|\x80\x00\x00\u07d4\xaa\f\xa3ss7\x17\x8a\f\xaa\xc3\t\x9cXK\x05lV0\x1c\x89/\xb4t\t\x8fg\xc0\x00\x00\u07d4\xaa\x13kG\x96+\xb8\xb4\xfbT\r\xb4\xcc\xf5\xfd\xd0B\xff\xb8\u03c9\x1b\x1bk\u05efd\xc7\x00\x00\xe0\x94\xaa\x14B-o\n\xe5\xa7X\x19N\xd1W\x80\xc88\xd6\u007f\x1e\xe1\x8a\x06\t2\x05lD\x9d\xe8\x00\x00\u07d4\xaa\x16&\x9a\xac\x9c\r\x800h\xd8/\u01d1Q\xda\xdd3Kf\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\xaa\x16p&\u04da\xb7\xa8V5\x94N\xd9\xed\xb2\xbf\xeb\xa1\x18P\x8a\x01\xc1\xd5\xe2\x1bO\xcfh\x00\x00\u07d4\xaa\x1b7h\xc1m\x82\x1fX\x0ev\xc8\xe4\xc8\xe8m}\u01c8S\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xaa\x1d\xf9.Q\xdf\xf7\v\x19s\xe0\xe9$\xc6b\x87\xb4\x94\xa1x\x89\x1c\xf8J0\xa0\xa0\xc0\x00\x00\u07d4\xaa,g\x00\x96\xd3\xf990S%B~\xb9U\xa8\xa6\r\xb3\u0149l\x95Y\x06\x99#-\x00\x00\u07d4\xaa15\xcbT\xf1\x02\xcb\xef\xe0\x9e\x96\x10:\x1ayg\x18\xffT\x89\x03\"\"\xd9\xc31\x94\x00\x00\u07d4\xaa2\x1f\xdb\xd4I\x18\r\xb8\xdd\xd3O\x0f\xe9\x06\xec\x18\xee\t\x14\x89%\"H\u07b6\xe6\x94\x00\x00\xe0\x94\xaa9%\xdc\"\v\xb4\xae!w\xb2\x880x\xb6\xdc4l\xa1\xb2\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xaa?)`\x1a\x131t^\x05\xc4(0\xa1^q\x93\x8ab7\x89\\(=A\x03\x94\x10\x00\x00\xe0\x94\xaaG\xa4\xff\xc9y622\u025b\x99\xfa\xda\x0f'4\xb0\xae\xee\x8a\x01\xb8H\x9d\xf4\xdb\xff\x94\x00\x00\u07d4\xaaI=?O\xb8fI\x1c\xf8\xf8\x00\xef\xb7\xe22N\xd7\xcf\xe5\x89\\(=A\x03\x94\x10\x00\x00\u07d4\xaaV\xa6]\u012b\xb7/\x11\xba\xe3+o\xbb\aDG\x91\xd5\u0249(\x94\xe9u\xbfIl\x00\x00\xe0\x94\xaaZ\xfc\xfd\x83\t\xc2\u07dd\x15\xbe^jPN}pf$\u014a\x01<\xf4\"\xe3\x05\xa17\x80\x00\u07d4\xaa\x8e\xb0\x82;\a\xb0\xe6\xd2\n\xad\xda\x0e\x95\xcf85\xbe\x19.\x89\x01\xbc\x16\xd6t\xec\x80\x00\x00\u07d4\xaa\x91#~t\r%\xa9/\u007f\xa1F\xfa\xa1\x8c\xe5m\xc6\xe1\xf3\x892$\xf4'#\xd4T\x00\x00\u07d4\xaa\x96\x0e\x10\xc5#\x91\xc5N\x158|\xc6z\xf8'\xb51m\u0309lk\x93[\x8b\xbd@\x00\x00\u07d4\xaa\x9b\xd4X\x955\xdb'\xfa+\xc9\x03\xca\x17\xd6y\xddeH\x06\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xaa\xa8\xde\xfe\x11\xe3a?\x11\x06\u007f\xb9\x83bZ\b\x99Z\x8d\xfc\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xaa\xaa\xe6\x8b2\x14\x02\xc8\xeb\xc14h\xf3A\xc6<\f\xf0?\u0389Rf<\u02b1\xe1\xc0\x00\x00\u07d4\xaa\xad\x1b\xaa\xdeZ\xf0N+\x17C\x9e\x93Y\x87\xbf\x8c+\xb4\xb9\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xaa\xb0\n\xbfX(\xd7\xeb\xf2kG\u03ac\u0378\xba\x032Qf\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xaa\xbd\xb3\\\x15\x14\x98J\x03\x92\x13y?3E\xa1h\xe8\x1f\xf1\x89\x10\xca\u0216\xd29\x00\x00\x00\u07d4\xaa\xca`\xd9\xd7\x00\u7156\xbb\xbb\xb1\xf1\xe2\xf7\x0fF'\xf9\u060965\xbbw\xcbK\x86\x00\x00\u07d4\xaa\xce\u0629V;\x1b\xc3\x11\xdb\xdf\xfc\x1a\xe7\xf5u\x19\xc4D\f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xaa\u04b7\xf8\x10f\x95\a\x8el\x13\x8e\xc8\x1at\x86\xaa\xca\x1e\xb2\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xaa\xe6\x1eC\xcb\r\f\x96\xb3\x06\x99\xf7~\x00\xd7\x11\u0423\x97\x9b\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xaa\xe72\xed\xa6Y\x88\u00e0\f\u007fG/5\x1cF;\x1c\x96\x8e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xaa\xf0#\xfe\U0009091b\xb7\x8b\xb7\xab\xc9]f\x9cP\xd5(\xb0\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xaa\xf5\xb2\a\xb8\x8b\r\xe4\xac@\xd7G\xce\xe0n\x17-\xf6\xe7E\x8a\x06\xa7\xb7\x1d\u007fQ\u0410\x00\x00\u07d4\xaa\xf9\xeeK\x88lm\x1e\x95Io\xd2t#[\xf4\xec\xfc\xb0}\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\xaa\xfb{\x01:\xa1\xf8T\x1c~2{\xf6P\xad\xbd\x19L \x8f\x89I\x9e\t-\x01\xf4x\x00\x00\xe0\x94\xab\t\x863\xee\xee\f\xce\xfd\xf62\xf9WTV\xf6\u0740\xfc\x86\x8a*Z\x05\x8f\u0095\xed\x00\x00\x00\u07d4\xab\f\xedv.\x16a\xfa\xe1\xa9*\xfb\x14\b\x88\x94\x13yH%\x89g\x8a\x93 b\xe4\x18\x00\x00\xe0\x94\xab\x14\xd2!\xe3=TF)\x19\x8c\u0416\xedc\u07e2\x8d\x9fG\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\xab \x9f\u0729y\u0426G\x01\n\xf9\xa8\xb5/\xc7\xd2\r\x8c\u044a\x01\xee\xe2S,|-\x04\x00\x00\u07d4\xab'\xbax\xc8\xe5\xe3\xda\xef1\xad\x05\xae\xf0\xff\x03%r\x1e\b\x89\x19^\xce\x00n\x02\xd0\x00\x00\u07d4\xab(q\xe5\a\u01fe9eI\x8e\x8f\xb4b\x02Z\x1a\x1cBd\x89*\x03I\x19\u07ff\xbc\x00\x00\u07d4\xab8a\"o\xfe\xc1(\x91\x87\xfb\x84\xa0\x8e\xc3\xed\x042d\xe8\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xab=\x86\xbc\x82\x92~\f\xd4!\xd1F\xe0\u007f\x91\x93'\xcd\xf6\xf9\x89g\x8a\x93 b\xe4\x18\x00\x00\xe0\x94\xab>b\xe7z\x8b\"^A\x15\x92\xb1\xaf0\aR\xfeA$c\x8a\x02\x15\xf85\xbcv\x9d\xa8\x00\x00\u07d4\xab>x)K\xa8\x86\xa0\xcf\xd5\xd3H\u007f\xb3\xa3\a\x8d3\x8dn\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xab@\x04\xc0@?~\xab\xb0\xeaXo!!V\xc4 =g\xf1\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xabAo\xe3\rX\xaf\xe5\xd9EL\u007f\xce\u007f\x83\v\xccu\x03V\x89\x0657\x01\xc6\x05\u06c0\x00\u07d4\xabEr\xfb\xb1\xd7+W]i\xecj\xd1s3\x87>\x85R\xfc\x89lj\xc5L\xdahG\x00\x00\u07d4\xabZy\x01av2\ts\xe8\xcd8\xf67U0\x02%1\xc0\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xab]\xfc\x1e\xa2\x1a\xdcB\u03cc?n6\x1e$?\xd0\xdaa\xe5\x89\x10CV\x1a\x88)0\x00\x00\u07d4\xabke\xea\xb8\xdf\xc9\x17\xec\x02Q\xb9\xdb\x0e\u03e0\xfa\x03(I\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xabp\x91\x93.K\u00dd\xbbU#\x80\u0293O\xd7\x16m\x1en\x89\xb5\x0f\u03ef\xeb\xec\xb0\x00\x00\u07d4\xabt\x16\xff2%IQ\u02fcbN\xc7\xfbE\xfc~\u02a8r\x89\x12nr\xa6\x9aP\xd0\x00\x00\u07d4\xab|B\xc5\xe5-d\x1a\a\xadu\t\x9cb\x92\x8b\u007f\x86b/\x89\x126\x1a\xa2\x1d\x14\xba\x00\x00\u07d4\xab}T\xc7\xc6W\x0e\xfc\xa5\xb4\xb8\xcep\xf5*Ws\xe5\xd5;\x89\x0f(:\xbe\x9d\x9f8\x00\x00\u07d4\xab~\v\x83\xed\x9aBLm\x1ejo\x87\xa4\xdb\xf0d\t\xc7\u0589\x82\x1a\xb0\xd4AI\x80\x00\x00\u07d4\xab\x84\xa0\xf1G\xad&T\x00\x00+\x85\x02\x9aA\xfc\x9c\xe5\u007f\x85\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xab\x93\xb2n\xce\n\n\xa2\x13e\xaf\xed\x1f\xa9\xae\xa3\x1c\xd5Dh\x89W+{\x98sl \x00\x00\u07d4\xab\x94\x8aJ\xe3y\\\xbc\xa11&\xe1\x92S\xbd\xc2\x1d:\x85\x14\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xab\x9a\xd3n\\t\xce.\x969\x9fW\x83\x941\xd0\u77d6\xab\x89\b\xe3\xf5\v\x17<\x10\x00\x00\u07d4\xab\xb2\xe6\xa7*@\xban\xd9\b\u037c\xec\x91\xac\xfdwx0\xd1dcG\x8a\xe0\xfcw \x89\a?u\u0460\x85\xba\x00\x00\xe0\x94\xab\u071f\x1b\xcfM\x19\xee\x96Y\x100\xe7r\xc340/}\x83\x8a\b~^\x11\xa8\x1c\xb5\xf8\x00\x00\u07d4\xab\xde\x14{*\xf7\x89\ua946T~f\xc4\xfa&d\xd3(\xa4\x89\rk`\x81\xf3L\x12\x80\x00\xe0\x94\xab\xe0|\xedj\xc5\xdd\xf9\x91\xef\xf6\xc3\xda\"jt\x1b\xd2C\xfe\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xab\xf1/\xa1\x9e\x82\xf7lq\x8f\x01\xbd\xca\x00\x03gE#\xef0\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xab\xf7(\u03d3\x12\xf2!(\x02NpF\xc2Q\xf5\xdcY\x01\xed\x8a\x06A\xe8\xa15c\xd8\xf8\x00\x00\u07d4\xab\xf8\xff\xe0p\x8a\x99\xb5(\xcc\x1e\xd4\xe9\xceK\r\x060\xbe\x8c\x89z\xb5\u00ae\xee\xe68\x00\x00\u07d4\xab\xfc\xf5\xf2P\x91\xceW\x87_\xc6t\xdc\xf1\x04\xe2\xa7=\xd2\xf2\x89\x01\x11du\x9f\xfb2\x00\x00\u07d4\xab\xfe\x93d%\xdc\u01f7K\x95P\x82\xbb\xaa\xf2\xa1\x1dx\xbc\x05\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\xac\x02OYO\x95X\xf0ICa\x8e\xb0\xe6\xb2\xeeP\x1d\xc2r\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xac\x12*\x03\xcd\x05\x8c\x12._\xe1{\x87/Hw\xf9\u07d5r\x89j\xc5\xc6-\x94\x86\a\x00\x00\u07d4\xac\x14.\xda\x11W\xb9\xa9\xa6C\x90\xdf~j\xe6\x94\xfa\u0249\x05\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xac\x1d\xfc\x98Kq\xa1\x99)\xa8\x1d\x81\xf0J|\xbb\x14\a7\x03\x89 \x86\xac5\x10R`\x00\x00\xe0\x94\xac!\xc1\xe5\xa3\xd7\xe0\xb5\x06\x81g\x9d\xd6\u01d2\xdb\u0287\xde\u02ca\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94\xac(\x89\xb5\x96o\f\u007f\x9e\xdbB\x89\\\xb6\x9d\x1c\x04\xf9#\xa2\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xac(\xb5\xed\xea\x05\xb7o\x8c_\x97\bEA'|\x96ijL\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xac,\x8e\t\xd0d\x93\xa68XC{\xd2\v\xe0\x19bE\x03e\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4\xac.vm\xac?d\x8fcz\xc6q?\u0770h\xe4\xa4\xf0M\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\xe0\x94\xac9\x00)\x8d\xd1M|\xc9mJ\xbbB\x8d\xa1\xba\xe2\x13\xff\xed\x8a\x05<\xa1)t\x85\x1c\x01\x00\x00\u07d4\xac=\xa5&\xcf\u0388)s\x02\xf3LI\xcaR\r\xc2q\xf9\xb2\x89+^:\xf1k\x18\x80\x00\x00\u07d4\xacD`\xa7nm\xb2\xb9\xfc\xd1R\xd9\xc7q\x8d\x9a\xc6\xed\x8co\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xacJ\xcf\xc3n\xd6\tJ'\xe1\x18\xec\xc9\x11\xcdG>\x8f\xb9\x1f\x89a\x91>\x14@<\f\x00\x00\u07d4\xacL\xc2V\xaet\xd6$\xac\xe8\r\xb0x\xb2 \u007fW\x19\x8fk\x89lyt\x12?d\xa4\x00\x00\u07d4\xacN\xe9\xd5\x02\xe7\xd2\xd2\xe9\x9eY\xd8\xca}_\x00\xc9KM\u058965\u026d\xc5\u07a0\x00\x00\u07d4\xacR\xb7~\x15fH\x14\xf3\x9eO'\x1b\xe6A0\x8d\x91\xd6\u0309\v\xed\x1d\x02c\xd9\xf0\x00\x00\u07d4\xacY\x99\xa8\x9d-\u0486\u0568\fm\xee~\x86\xaa\xd4\x0f\x9e\x12\x89\xd2U\xd1\x12\xe1\x03\xa0\x00\x00\u07d4\xac_br1H\r\r\x950.m\x89\xfc2\xcb\x1dO\xe7\xe3\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xac`\x8e+\xac\x9d\xd2\a(\u0494~\xff\xbb\xbf\x90\n\x9c\xe9K\x8a\x01EK\r\xb3uh\xfc\x00\x00\u07d4\xacm\x02\xe9\xa4k7\x9f\xacJ\u0271\u05f5\xd4{\xc8P\xce\x16\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4\xacoh\xe87\xcf\x19a\xcb\x14\xabGDm\xa1h\xa1m\u0789\x89Hz\x9a0E9D\x00\x00\u07d4\xacw\xbd\xf0\x0f\u0558[]\xb1+\xbe\xf8\x008\n\xbc*\x06w\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xac~\x03p'#\xcb\x16\xee'\xe2-\u0438\x15\xdc-\\\xae\x9f\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\u07d4\xac\x8bP\x9a\xef\xea\x1d\xbf\xaf+\xb35\x00\xd6W\vo\xd9mQ\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\xac\x8e\x87\xdd\xda^x\xfc\xbc\xb9\xfa\u007f\xc3\xce\x03\x8f\x9f}.4\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xac\x9f\xffh\xc6\x1b\x01\x1e\xfb\xec\xf08\xedr\u06d7\xbb\x9er\x81\x8a\x02\x05\xb4\u07e1\xeetx\x00\x00\u07d4\xac\xa1\xe6\xbcd\xcc1\x80\xf6 \xe9M\u0171\xbc\xfd\x81X\xe4]\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xac\xa2\xa883\v\x170-\xa71\xd3\r\xb4\x8a\x04\xf0\xf2\a\xc1\x89Hz\x9a0E9D\x00\x00\u07d4\xac\xaa\xdd\xcb\xf2\x86\xcb\x0e!]\xdaUY\x8f\u007f\xf0\xf4\xad\xa5\u018965\u026d\xc5\u07a0\x00\x00\u07d4\xac\xb9C8UK\u0108\u0308\xae-\x9d\x94\b\rk\u07c4\x10\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xac\xbc-\x19\xe0l;\xab\xbb[o\x05+k\xf7\xfc7\xe0r)\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xac\xbd\x18U\x89\xf7\xa6\x8ag\xaaK\x1b\xd6Pw\xf8\xc6NN!\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xac\xc0bp,Ya]4D\xefb\x14\xb8\x86+\x00\x9a\x02\xed\x89QO\xcb$\xff\x9cP\x00\x00\u07d4\xac\xc0\x90\x9f\xda.\xa6\xb7\xb7\xa8\x8d\xb7\xa0\xaa\xc8h\t\x1d\xdb\xf6\x89\x013v_\x1e&\u01c0\x00\u07d4\xac\xc1\u01c7\x86\xabM+;'q5\xb5\xba\x12>\x04\x00Hk\x89\x04E\x91\xd6\u007f\xec\xc8\x00\x00\u07d4\xac\xc4j*U\\t\xde\u0522\xbd\tN\x82\x1b\x97\x84;@\xc0\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\xac\u015f;0\xce\xff\xc5da\xcc[\x8d\xf4\x89\x02$\x0e\x0e{\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xac\xce\x01\xe0\xa7\x06\x10\xdcp\xbb\x91\xe9\x92o\xa9\x95\u007f7/\xba\x89\x1d\x1c_>\xda \xc4\x00\x00\u07d4\xac\xd8\u0751\xf7\x14vLEg|c\xd8R\xe5n\xb9\xee\xce.\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xac\u2af6;\x06\x04@\x9f\xbd\xe3\xe7\x16\u0487mD\xe8\xe5\u0749\b=lz\xabc`\x00\x00\xe0\x94\xac\xec\x91\xefiA\xcfc\v\xa9\xa3\u71e0\x12\xf4\xa2\xd9\x1d\u050a\x10\xf0\xcf\x06M\u0552\x00\x00\x00\u07d4\xad\nJ\xe4x\xe9cn\x88\xc6\x04\xf2B\xcfT9\xc6\xd4V9\x89\xbe\xd1\xd0&=\x9f\x00\x00\x00\u07d4\xad\x17\x99\xaa\xd7`+E@\u0343/\x9d\xb5\xf1\x11P\xf1hz\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xad\x1dh\xa08\xfd%\x86\x06~\xf6\xd15\xd9b\x8ey\xc2\xc9$\x89\xfe\t\xa5'\x9e*\xbc\x00\x00\u07d4\xad*\\\x00\xf9#\xaa\xf2\x1a\xb9\xf3\xfb\x06n\xfa\n\x03\xde/\xb2\x8965\xbbw\xcbK\x86\x00\x00\u07d4\xad5e\xd5+h\x8a\xdd\xed\b\x16\x8b-8r\xd1}\n&\xae\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xad7|\xd2^\xb5>\x83\xae\t\x1a\n\x1d+E\x16\xf4\x84\xaf\u0789i*\xe8\x89p\x81\xd0\x00\x00\xe0\x94\xadAM)\xcb~\xe9s\xfe\xc5N\"\xa3\x88I\x17\x86\xcfT\x02\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4\xadD5~\x01~$OGi1\u01f8\x18\x9e\xfe\xe8\n]\n\x89\x10CV\x1a\x88)0\x00\x00\u07d4\xadW\xaa\x9d\x00\xd1\fC\x9b5\xef\xcc\v\xec\xac.9U\xc3\x13\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xadY\xa7\x8e\xb9\xa7J\u007f\xbd\xae\xfa\xfa\x82\xea\u0684u\xf0\u007f\x95\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xadZ\x8dV\x01L\xfc\xb3`\xf4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xadr\x81!\x87?\x04V\xd0Q\x8b\x80\xabe\x80\xa2\x03pe\x95\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xads,\x97e\x93\xee\xc4x;N.\xcdy9yx\v\xfe\u06c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xad}\xd0S\x85\x9e\xdf\xf1\xcbo\x9d*\xcb\xedm\xd5\xe32Bo\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xad\x80\xd8e\xb8\\4\xd2\xe6IK.z\xef\xeak\x9a\xf1\x84\u06c9\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\xad\x8b\xfe\xf8\u018aH\x16\xb3\x91o5\xcb{\xfc\xd7\xd3\x04\tv\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\xad\x8eH\xa3wi]\xe0\x146:R:(\xb1\xa4\fx\xf2\b\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xad\x91\n#\u0585\x06\x13eJ\xf7\x863z\u04a7\bh\xacm\x89lh\xcc\u041b\x02,\x00\x00\u07d4\xad\x92~\x03\xd1Y\x9ax\xca+\xf0\xca\u04a1\x83\xdc\xebq\xea\xc0\x89j\xcb=\xf2~\x1f\x88\x00\x00\xe0\x94\xad\x92\xca\x06n\xdb|q\x1d\xfc[\x16a\x92\xd1\xed\xf8\xe7q\x85\x8a\a\x9f\x90\\o\xd3N\x80\x00\x00\u07d4\xad\x94#_\u00f3\xf4z$\x13\xaf1\u8111I\b\xef\fE\x89\x1b\x1b\x01B\xd8\x15\x84\x00\x00\u07d4\xad\x9e\x97\xa0H/5:\x05\xc0\xf7\x92\xb9w\xb6\xc7\xe8\x11\xfa_\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xad\x9fL\x89\n;Q\x1c\xeeQ\xdf\xe6\xcf\xd7\xf1\t;vA,\x89\x1bv|\xbf\xeb\f\xe4\x00\x00\u07d4\xad\xaa\x0eT\x8c\x03Z\xff\xedd\xcag\x8a\x96?\xab\xe9\xa2k\xfd\x89\x03\xcbq\xf5\x1f\xc5X\x00\x00\u07d4\xad\xb9H\xb1\xb6\xfe\xfe }\xe6^\x9b\xbc-\xe9\x8e`]\vW\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xad\xc1\x9e\xc85\xaf\xe3\u5347\u0713\xa8\xa9!<\x90E\x13&\x89j\xdb\xe54\"\x82\x00\x00\x00\u07d4\xad\xc8\"\x8e\xf9(\xe1\x8b*\x80}\x00\xfb1\xfcgX\x15\xaa\x00\x00\u07d4\xad\xff\r\x1d\v\x97G\x1ev\u05c9\xd2\u470at\xf9\xbdT\xff\x89e\xea=\xb7UF`\x00\x00\u07d4\xae\x06,D\x86\x18d0u\xdez\x0004-\xce\xd6=\xba\u05c9,\xc6\u034c\u0082\xb3\x00\x00\xe0\x94\xae\x10\xe2z\x01O\r0k\xaf&mH\x97\u021a\xee\xe2\xe9t\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xae\x12k8,\xf2W\xfa\xd7\xf0\xbc}\x16)~T\xccrg\u0689\x10CV\x1a\x88)0\x00\x00\u07d4\xae\x13\xa0\x85\x11\x11\x0f2\xe5;\xe4\x12xE\xc8C\xa1\xa5|{\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\xae\x17\x9aF\r\xb6c&t=$\xe6u#\xa5{$m\xaf\u007f\x8a\x01\x00\a\xae|\xe5\xbb\xe4\x00\x00\u07d4\xae\"(ey\x90y\xaa\xf4\xf0gJ\f\u06ab\x02\xa6\xd5p\xff\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xae#\x9a\xcf\xfdN\xbe.\x1b\xa5\xb4\x17\x05r\xdcy\xcce3\xec\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4\xae/\x9c\x19\xacv\x13e\x94C#\x93\xb0G\x1d\b\x90!d\u04c9%\xdf\x05\u01a8\x97\xe4\x00\x00\u07d4\xae4\x86\x1d4\"S\x19O\xfcfR\xdf\xdeQ\xabD\xca\xd3\xfe\x89\x19F\bhc\x16\xbd\x80\x00\u07d4\xae6\xf7E!!\x91>\x80\x0e\x0f\xcd\x1ae\xa5G\x1c#\x84o\x89\b\xe3\xf5\v\x17<\x10\x00\x00\u07d4\xae?\x98\xa4C\xef\xe0\x0f>q\x1dR]\x98\x94\u071aa\x15{\x89\x10\x04\xe2\xe4_\xb7\xee\x00\x00\xe0\x94\xaeG\xe2`\x9c\xfa\xfe6\x9df\xd4\x15\xd99\xde\x05\b\x1a\x98r\x8a\x05\xba\xec\xf0%\xf9\xb6P\x00\x00\u07d4\xaeO\x12.5\xc0\xb1\xd1\xe4\x06\x92\x91E|\x83\xc0\u007f\x96_\xa3\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xaePU\x81L\xb8\xbe\f\x11{\xb8\xb1\xc8\u04b6;F\x98\xb7(\x89\x01\xbc\x93.\xc5s\xa3\x80\x00\u07d4\xaeS\x8cs\u0173\x8d\x8dXM~\xbd\xad\xef\xb1\\\xab\xe4\x83W\x896'\xe8\xf7\x127<\x00\x00\u07d4\xaeW\xcc\x12\x9a\x96\xa8\x99\x81\xda\xc6\r/\xfb\x87}]\xc5\xe42\x89<:#\x94\xb3\x96U\x00\x00\u07d4\xaeZ\xa1\xe6\u00b6\x0fo\xd3\xef\xe7!\xbbJq\x9c\xbe=o]\x89+$\u01b5Z^b\x00\x00\u07d4\xae\\\x9b\xda\xd3\xc5\u0221\"\x04D\xae\xa5\xc2)\xc1\x83\x9f\x1dd\x89\x19\xe2\xa4\xc8\x18\xb9\x06\x00\x00\u07d4\xae\\\xe35Z{\xa9\xb32v\f\tP\u00bcE\xa8_\xa9\xa0\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94\xae]\"\x1a\xfc\xd3\u0493U\xf5\b\xea\xdf\xca@\x8c\xe3<\xa9\x03\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xaec[\xf781\x11\x9d-)\xc0\xd0O\xf8\xf8\xd8\u0425zF\x89Hz\x9a0E9D\x00\x00\xe0\x94\xaed\x81U\xa6X7\x0f\x92\x9b\xe3\x84\xf7\xe0\x01\x04~I\xddF\x8a\x02\xdf$\xae2\xbe D\x00\x00\xe0\x94\xaeo\fs\xfd\xd7|H\x97'Q!t\u0675\x02\x96a\x1cL\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xaep\xe6\x9d,J\n\xf8\x18\x80{\x1a'\x05\xf7\x9f\u0435\xdb\u01095e\x9e\xf9?\x0f\xc4\x00\x00\u07d4\xaew9\x12N\xd1S\x05%\x03\xfc\x10\x14\x10\xd1\xff\xd8\xcd\x13\xb7\x8964\xfb\x9f\x14\x89\xa7\x00\x00\u07d4\xaex\xbb\x84\x919\xa6\xba8\xae\x92\xa0\x9ai`\x1c\xc4\xcbb\u0449\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xae\x84\"\x10\xf4M\x14\u0124\u06d1\xfc\x9d;;P\x01O{\xf7\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xae\x84.\x81\x85\x8e\xcf\xed\xf6Plhm\xc2\x04\xac\x15\xbf\x8b$\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\xae\x89T\xf8\xd6\x16m\xe5\a\xcfa)}\x0f\xc7\xcak\x9eq(\x89\x10CV\x1a\x88)0\x00\x00\u07d4\xae\x9e\xcdk\u0755.\xf4\x97\xc0\x05\n\u0aca\x82\xa9\x18\x98\u0389\x01\xa0Ui\r\x9d\xb8\x00\x00\u07d4\xae\x9f\\?\xbb\xe0\u027c\xbf\x1a\xf8\xfft\xea(\v:]\x8b\b\x89]\u0212\xaa\x111\xc8\x00\x00\u07d4\xae\xad\x88\u0589Ak\x1c\x91\xf26D!7[}\x82\xd0RR\n\xfb\\Wm\x9f~\xb9>\u048a\r\xd0A \xba\t\xcf\xe6\x00\x00\u07d4\xae\xc2\u007f\xf5\xd7\xf9\xdd\u0691\x18?F\xf9\xd5%C\xb6\xcd+/\x89\x18e\x01'\xcc=\xc8\x00\x00\u07d4\xae\xe4\x9dh\xad\xed\xb0\x81\xfdCpZ_x\xc7x\xfb\x90\xdeH\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xae\xf5\xb1\"X\xa1\x8d\xec\a\xd5\xec.1et\x91\x9dy\xd6\u0589lk\x93[\x8b\xbd@\x00\x00\u07d4\xae\xfc\xfe\x88\xc8&\xcc\xf11\xd5N\xb4\ua7b8\x0ea\xe1\xee%\x89\x12nr\xa6\x9aP\xd0\x00\x00\u07d4\xaf\x06\xf5\xfam\x12\x14\xecC\x96}\x1b\xd4\xdd\xe7J\xb8\x14\xa98\x89\x04\xc5>\xcd\xc1\x8a`\x00\x00\u07d4\xaf\x11H\xefl\x8e\x10=u0\xef\xc9\x16y\u026c'\x00\t\x93\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xaf >\"\x9d~mA\x9d\xf47\x8e\xa9\x87\x15Q_c\x14\x85\x89j\xcb=\xf2~\x1f\x88\x00\x00\xe0\x94\xaf X\xc7(,\xf6|\x8c<\xf90\x13<\x89a|\xe7])\x8a\x01w\"J\xa8D\xc7 \x00\x00\u07d4\xaf&\xf7\u01bfE> x\xf0\x89S\u4c80\x04\xa2\xc1\xe2\t\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xaf0\x87\xe6.\x04\xbf\x90\rZT\xdc>\x94bt\u0692B;\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xaf6\x14\u0736\x8a6\xe4ZN\x91\x1ebybG\"-Y[\x89z\x81\x06_\x11\x03\xbc\x00\x00\u07d4\xaf6\x15\u01c9\u0431\x15*\xd4\xdb%\xfe]\xcf\"(\x04\xcfb\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xaf<\xb5\x96Y3\xe7\xda\u0603i;\x9c>\x15\xbe\xb6\x8aHs\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xafD\x93\xe8R\x1c\xa8\x9d\x95\xf5&|\x1a\xb6?\x9fEA\x1e\x1b\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xafL\xf4\x17\x85\x16\x1fW\x1d\f\xa6\x9c\x94\xf8\x02\x1fA)N\u028a\x02\x15\xf85\xbcv\x9d\xa8\x00\x00\u07d4\xafR\x9b\xdbE\x9c\xc1\x85\xbe\xe5\xa1\u014b\xf7\xe8\xcc\xe2\\\x15\r\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\xafg\xfd>\x12\u007f\xd9\xdc6\xeb?\xcdj\x80\u01feOu2\xb2\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\u07d4\xafw\x1094Z40\x01\xbc\x0f\x8aY#\xb1&\xb6\rP\x9c\x895e\x9e\xf9?\x0f\xc4\x00\x00\xe0\x94\xaf\u007fy\xcbAZ\x1f\xb8\u06fd\tF\a\xee\x8dA\xfb|Z;\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xaf\x87\xd27\x1e\xf3x\x95\u007f\xbd\x05\xba/\x1df\x93\x1b\x01\u2e09%\xf2s\x93=\xb5p\x00\x00\u07d4\xaf\x88\x0f\xc7V}U\x95\xca\xcc\xe1\\?\xc1L\x87B\xc2l\x9e\x89\a?u\u0460\x85\xba\x00\x00\u07d4\xaf\x8e\x1d\xcb1L\x95\r6\x87CM0\x98X\xe1\xa8s\x9c\u0509\x0e~\xeb\xa3A\vt\x00\x00\u07d4\xaf\x99-\xd6i\xc0\x88>U\x15\xd3\xf3\x11*\x13\xf6\x17\xa4\xc3g\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xaf\xa1\u056d8\xfe\xd4GY\xc0[\x89\x93\xc1\xaa\r\xac\xe1\x9f@\x89\x04V9\x18$O@\x00\x00\xe0\x94\xaf\xa59XnG\x19\x17J;F\xb9\xb3\xe6c\xa7\u0475\xb9\x87\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xaf\xa6\x94n\xff\xd5\xffS\x15O\x82\x01\x02S\xdfG\xae(\f\u0309j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xaf\xc8\xeb\u860b\xd4\x10Z\xccL\x01\x8eTj\x1e\x8f\x9cx\x88\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\xaf\xcc}\xbb\x83V\xd8B\xd4:\xe7\xe2<\x84\"\xb0\"\xa3\b\x03\x8a\x06o\xfc\xbf\xd5\xe5\xa3\x00\x00\x00\u07d4\xaf\xd0\x19\xff6\xa0\x91U4ki\x97H\x15\xa1\xc9\x12\xc9\n\xa4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xaf\xda\xc5\xc1\xcbV\xe2E\xbfp3\x00f\xa8\x17\uabecL\u0449\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xaf\xdd\x1bxab\xb81~ \xf0\xe9y\xf4\xb2\xceHmv]\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xaf\xf1\x04Z\xdf'\xa1\xaa2\x94a\xb2M\xe1\xba\u950ai\x8b\x89\x01\u03c4\xa3\n\n\f\x00\x00\u07d4\xaf\xf1\a\x96\v~\xc3N\u0590\xb6e\x02M`\x83\x8c\x19\x0fp\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xaf\xf1\x1c\xcfi\x93\x04\xd5\xf5\x86*\xf8`\x83E\x1c&\xe7\x9a\xe5\x89l]\xb2\xa4\xd8\x15\xdc\x00\x00\u07d4\xaf\xf1at\nm\x90\x9f\xe9\x9cY\xa9\xb7yE\xc9\x1c\xc9\x14H\x89\x03@\xaa\xd2\x1b;p\x00\x00\xe0\x94\xaf\xfc\x99\xd5\ubd28O\xe7x\x8d\x97\xdc\xe2t\xb08$\x048\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xaf\xfe\xa0G7\"\xcb\u007f\x0e\x0e\x86\xb9\xe1\x18\x83\xbfB\x8d\x8dT\x89i*\xe8\x89p\x81\xd0\x00\x00\xe0\x94\xb0\t\x96\xb0Vn\xcb>rC\xb8\"y\x88\u0733R\xc2\x18\x99\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4\xb0\x1e8\x9b(\xa3\x1d\x8eI\x95\xbd\xd7\xd7\xc8\x1b\xee\xab\x1eA\x19\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb0-\x06(s3EE\u03a2\x92\x18\xe4\x05w`Y\x0ft#\x89\xac\xb6\xa1\xc7\xd9:\x88\x00\x00\u07d4\xb0/\xa2\x93\x87\xec\x12\xe3\u007fi\"\xacL\xe9\x8c[\t\xe0\xb0\x0f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb06\x91k\xda\u03d4\xb6\x9eZ\x8ae`)u\xeb\x02a\x04\u0749\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xb0A1\x0f\xe9\xee\u0586L\xed\u053e\xe5\x8d\xf8\x8e\xb4\xed<\xac\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xb0U\xafL\xad\xfc\xfd\xb4%\xcfe\xbad1\a\x8f\a\xec\u056b\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\xb0W\x11S\xdb\x1cN\u05ec\xae\xfe\x13\xec\xdf\xdbr\xe7\xe4\xf0j\x8a\x11\f\xffyj\xc1\x95 \x00\x00\u07d4\xb0n\xab\t\xa6\x10\u01a5=V\xa9F\xb2\xc44\x87\xac\x1d[-\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb0rI\xe0U\x04J\x91U5\x9a@)7\xbb\xd9T\xfeH\xb6\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\xb0v\x182\x8a\x90\x13\a\xa1\xb7\xa0\xd0X\xfc\xd5xn\x9er\xfe\x8a\x06gI]JC0\xce\x00\x00\u07d4\xb0y\xbbM\x98f\x14:m\xa7*\xe7\xac\x00\"\x06)\x811\\\x89)3\x1eeX\xf0\xe0\x00\x00\u07d4\xb0{\xcc\bZ\xb3\xf7)\xf2D\x00Ah7\xb6\x996\xba\x88s\x89lm\x84\xbc\xcd\xd9\xce\x00\x00\u07d4\xb0{\xcf\x1c\xc5\xd4F.Q$\xc9e\xec\xf0\xd7\r\xc2z\xcau\x89V\xbcu\xe2\xd61\x00\x00\x00\u07d4\xb0|\xb9\xc1$\x05\xb7\x11\x80uC\u0113De\xf8\u007f\x98\xbd-\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xb0\u007f\u07af\xf9\x1dD`\xfel\xd0\u8870\xbd\x8d\"\xa6.\x87\x8a\x01\x1d%)\xf3SZ\xb0\x00\x00\xe0\x94\xb0\x9f\xe6\xd44\x9b\x99\xbc7\x93\x80T\x02-T\xfc\xa3f\xf7\xaf\x8a*Z\x05\x8f\u0095\xed\x00\x00\x00\xe0\x94\xb0\xaa\x00\x95\f\x0e\x81\xfa2\x10\x17>r\x9a\xaf\x16:'\xcdq\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\xb0\xacN\xfff\x80\xee\x14\x16\x9c\xda\xdb\xff\xdb0\x80Om%\xf5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb0\xb3j\xf9\xae\xee\u07d7\xb6\xb0\"\x80\xf1\x14\xf19\x84\xea2`\x895e\x9e\xf9?\x0f\xc4\x00\x00\u07d4\xb0\xb7y\xb9K\xfa<.\x1fX{\u031c~!x\x92\"7\x8f\x89T\x06\x923\xbf\u007fx\x00\x00\u07d4\xb0\xba\xeb0\xe3\x13wlLm$t\x02\xbaAg\xaf\u0361\u0309j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xb0\xbb)\xa8a\xea\x1dBME\xac\u053f\u0112\xfb\x8e\xd8\t\xb7\x89\x04V9\x18$O@\x00\x00\xe0\x94\xb0\xc1\xb1w\xa2 \xe4\x1f|t\xd0|\u0785i\xc2\x1cu\xc2\xf9\x8a\x01/\x93\x9c\x99\xed\xab\x80\x00\x00\u07d4\xb0\xc7\xceL\r\xc3\u00bb\xb9\x9c\xc1\x85{\x8aE_a\x17\x11\u0389\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xb0\xce\xf8\xe8\xfb\x89\x84\xa6\x01\x9f\x01\xc6y\xf2r\xbb\xe6\x8f\\w\x89\b=lz\xabc`\x00\x00\xe0\x94\xb0\xd3+\xd7\xe4\u6577\xb0\x1a\xa3\xd0Ao\x80U}\xba\x99\x03\x8a\x03s\x9f\xf0\xf6\xe6\x130\x00\x00\xe0\x94\xb0\xd3\u0247+\x85\x05n\xa0\xc0\xe6\xd1\xec\xf7\xa7~<\u6ac5\x8a\x01\x0f\b\xed\xa8\xe5U\t\x80\x00\u07d4\xb0\xe4i\u0206Y8\x15\xb3IV8Y]\xae\xf0f_\xaeb\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\xb0\xe7`\xbb\a\xc0\x81wsE\xe0W\x8e\x8b\u0218\"mN;\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb1\x040\x04\xec\x19A\xa8\xcfO+\x00\xb1W\x00\u076co\xf1~\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb1\x05\xdd=\x98|\xff\xd8\x13\xe9\xc8P\n\x80\xa1\xad%}V\u0189lj\xccg\u05f1\xd4\x00\x00\u07d4\xb1\x0f\u04a6G\x10/\x88\x1ft\xc9\xfb\xc3}\xa62\x94\x9f#u\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\xe0\x94\xb1\x15\xee:\xb7d\x1e\x1a\xa6\xd0\x00\xe4\x1b\xfc\x1e\xc7!\f/2\x8a\x02\xc0\xbb=\xd3\fN \x00\x00\u07d4\xb1\x17\x8a\xd4s\x83\xc3\x1c\x814\xa1\x94\x1c\xbc\xd4t\xd0bD\xe2\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xb1\x17\x95\x89\u1779\xd4\x15W\xbb\xec\x1c\xb2L\xcc-\xec\x1c\u007f\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xb1\x19\u76a9\xb9\x16Re\x81\xcb\xf5!\xefGJ\xe8M\xcf\xf4\x89O\xba\x10\x01\xe5\xbe\xfe\x00\x00\u07d4\xb1\x1f\xa7\xfb'\n\xbc\xdfZ.\xab\x95\xaa0\u013566\uffc9+^:\xf1k\x18\x80\x00\x00\u07d4\xb1$\xbc\xb6\xff\xa40\xfc\xae.\x86\xb4_'\xe3\xf2\x1e\x81\xee\b\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb1)\xa5\xcbq\x05\xfe\x81\v\u0615\xdcr\x06\xa9\x91\xa4TT\x88\x89\x01\xa0Ui\r\x9d\xb8\x00\x00\xe0\x94\xb1.\xd0{\x8a8\xadU\x066?\xc0z\vmy\x996\xbd\xaf\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xb14\xc0\x049\x1a\xb4\x99(x3zQ\xec$/B(WB\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb1?\x93\xaf0\xe8\xd7fs\x81\xb2\xb9[\xc1\xa6\x99\xd5\xe3\xe1)\x89\x16\u012b\xbe\xbe\xa0\x10\x00\x00\u07d4\xb1E\x92\x85\x86>\xa2\xdb7Y\xe5F\u03b3\xfb7a\xf5\x90\x9c\x89<\xd7*\x89@\x87\xe0\x80\x00\u07d4\xb1F\xa0\xb9%U<\xf0o\xca\xf5J\x1bM\xfe\xa6!)\aW\x89lnY\xe6|xT\x00\x00\xe0\x94\xb1Jz\xaa\x8fI\xf2\xfb\x9a\x81\x02\u05bb\xe4\u010a\xe7\xc0o\xb2\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xb1K\xbe\xffpr\tu\xdca\x91\xb2\xa4O\xf4\x9f&r\x87<\x89\a\xc0\x86\x0eZ\x80\xdc\x00\x00\xe0\x94\xb1L\xc8\xde3\xd63\x826S\x9aH\x90 \xceFU\xa3+\u018a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xb1M\xdb\x03\x86\xfb`c\x98\xb8\xccGVZ\xfa\xe0\x0f\xf1\xd6j\x89\xa1*\xff\b>f\xf0\x00\x00\u07d4\xb1S\xf8(\xdd\amJ|\x1c%t\xbb-\xee\x1aD\xa3\x18\xa8\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xb1T\x0e\x94\xcf\xf3F\\\xc3\u0447\xe7\xc8\u3f6f\x98FY\u2262\x15\xe4C\x90\xe33\x00\x00\u07d4\xb1X\xdbC\xfab\xd3\x0ee\xf3\u041b\xf7\x81\u01f6sr\uba89l]\xb2\xa4\xd8\x15\xdc\x00\x00\u07d4\xb1ar_\xdc\xed\xd1yR\xd5{#\xef([~K\x11i\xe8\x89\x02\xb6\xdf\xed6d\x95\x80\x00\u07d4\xb1dy\xba\x8e}\xf8\xf6>\x1b\x95\xd1I\u0345)\xd75\xc2\u0689-\xe3:j\xac2T\x80\x00\u07d4\xb1f\xe3}.P\x1a\xe7<\x84\x14+_\xfbZ\xa6U\xddZ\x99\x89l]\xb2\xa4\xd8\x15\xdc\x00\x00\u07d4\xb1\x83\xeb\xeeO\xcbB\xc2 \xe4wt\xf5\x9dlT\xd5\xe3*\xb1\x89V\xf7\xa9\xc3<\x04\xd1\x00\x00\u07d4\xb1\x88\a\x84D\x02~8g\x98\xa8\xaehi\x89\x19\xd5\xcc#\r\x89\x0e~\xeb\xa3A\vt\x00\x00\u07d4\xb1\x89j7\xe5\u0602Z-\x01vZ\xe5\xdeb\x99w\u0783R\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb1\x8eg\xa5\x05\n\x1d\xc9\xfb\x19\t\x19\xa3=\xa88\xefDP\x14\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xb1\xa2\xb4:t3\xdd\x15\v\xb8\"'\xedQ\x9c\u05b1B\u04c2\x89\x94mb\rtK\x88\x00\x00\u07d4\xb1\xc0\u040b6\xe1\x84\xf9\x95*@7\xe3\xe5:f}\a\nN\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb1\xc3(\xfb\x98\xf2\xf1\x9a\xb6do\n|\x8cVo\xdaZ\x85@\x89\x87\x86x2n\xac\x90\x00\x00\xe0\x94\xb1\xc7Qxi9\xbb\xa0\xd6q\xa6w\xa1X\u01ab\xe7&^F\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\xb1\xcdK\xdf\xd1\x04H\x9a\x02n\u025dYs\a\xa0By\xf1s\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xb1\u03d4\xf8\t\x15\x05\x05_\x01\n\xb4\xba\u0196\xe0\xca\x0fg\xa1\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4\xb1\u05b0\x1b\x94\xd8T\xfe\x8b7J\xa6^\x89\\\xf2*\xa2V\x0e\x892\xf5\x1e\u06ea\xa30\x00\x00\xe0\x94\xb1\u06e5%\v\xa9bWU$n\x06yg\xf2\xad/\a\x91\u078a\x10\xf0\xcf\x06M\u0552\x00\x00\x00\u07d4\xb1\xe2\u0755\xe3\x9a\xe9w\\U\xae\xb1?\x12\xc2\xfa#0S\xba\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb1\xe6\xe8\x10\xc2J\xb0H\x8d\xe9\xe0\x1eWH7\x82\x9f|w\u0409\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xb1\xe9\xc5\xf1\xd2\x1eauzk.\xe7Y\x13\xfcZ\x1aA\x01\u00c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xb2\x03\u049elV\xb9&\x99\u0139-\x1fo\x84d\x8d\xc4\u03fc\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xb2\x16\xdcY\xe2|=ry\xf5\xcd[\xb2\xbe\u03f2`n\x14\u0649\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xb2\x1byy\xbf|\\\xa0\x1f\xa8-\xd6@\xb4\x1c9\xe6\u01bcu\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xb2#\xbf\x1f\xbf\x80H\\\xa2\xb5V}\x98\xdb{\xc3SM\xd6i\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xb2-PU\xd9b15\x96\x1ej\xbd'<\x90\xde\xea\x16\xa3\xe7\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\xb2-\xad\xd7\xe1\xe0R2\xa927\xba\xed\x98\xe0\u07d2\xb1\x86\x9e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb24\x03_uDF<\xe1\xe2+\xc5S\x06F\x84\xc5\x13\xcdQ\x89\r\x89\xfa=\u010d\xcf\x00\x00\u07d4\xb2G\u03dcr\xecH*\xf3\xea\xa7Ye\x8fy=g\nW\f\x891p\x8a\xe0\x04T@\x00\x00\u07d4\xb2ghA\xee\x9f-1\xc1r\xe8#\x03\xb0\xfe\x9b\xbf\x9f\x1e\t\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb2y\xc7\xd3U\u0088\x03\x92\xaa\u046a!\xee\x86|;5\a\u07c9D[\xe3\xf2\uf1d4\x00\x00\u07d4\xb2|\x1a$ L\x1e\x11\x8du\x14\x9d\xd1\t1\x1e\a\xc0s\xab\x89\xa8\r$g~\xfe\xf0\x00\x00\u07d4\xb2\x81\x81\xa4X\xa4@\xf1\u01bb\x1d\xe8@\x02\x81\xa3\x14\x8fL5\x89\x14b\fW\xdd\xda\xe0\x00\x00\xe0\x94\xb2\x82E\x03|\xb1\x92\xf7W\x85\u02c6\xcb\xfe|\x93\r\xa2X\xb0\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\u07d4\xb2\x87\xf7\xf8\xd8\u00c7,\x1bXk\xcd}\n\xed\xbf~s'2\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xb2\x8b\xb3\x9f4fQ|\xd4o\x97\x9c\xf5\x96S\xee}\x8f\x15.\x89\x18e\x01'\xcc=\xc8\x00\x00\u07d4\xb2\x8d\xbf\xc6I\x98\x94\xf7:q\xfa\xa0\n\xbe\x0fK\xc9\u045f*\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xb2\x96\x8f}5\xf2\b\x87\x161\xc6h{?=\xae\xab\xc6al\x89\bu\xc4\u007f(\x9fv\x00\x00\u07d4\xb2\x9f[|\x190\xd9\xf9z\x11^\x06pf\xf0\xb5M\xb4K;\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb2\xa1D\xb1\xeag\xb9Q\x0f\"g\xf9\xda9\xd3\xf9=\xe2fB\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb2\xa2\xc2\x11\x16\x12\xfb\x8b\xbb\x8e}\xd97\x8dg\xf1\xa3\x84\xf0P\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xb2\xa4\x98\xf0;\xd7\x17\x8b\u0627\x89\xa0\x0fR7\xafy\xa3\xe3\xf8\x8a\x04\x1b\xad\x15^e\x12 \x00\x00\u07d4\xb2\xaa/\x1f\x8e\x93\xe7\x97\x13\xd9,\xea\x9f\xfc\xe9\xa4\n\xf9\xc8-\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb2\xb5\x16\xfd\u045e\u007f8d\xb6\xd2\xcf\x1b%*AV\xf1\xb0;\x89\x02\xe9\x83\xc7a\x15\xfc\x00\x00\u07d4\xb2\xb7\u0374\xffKa\u0577\xce\v\"p\xbb\xb5&\x97C\xec\x04\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb2\xbd\xbe\u07d5\x90\x84v\xd7\x14\x8a7\f\u0193t6(\x05\u007f\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xb2\xbf\xaaX\xb5\x19l\\\xb7\xf8\x9d\xe1_G\x9d\x188\xdeq=\x89\x01#n\xfc\xbc\xbb4\x00\x00\u07d4\xb2\xc5>\xfa3\xfeJ:\x1a\x80 \\s\xec;\x1d\xbc\xad\x06\x02\x89h\x01\u06b3Y\x18\x93\x80\x00\xe0\x94\xb2\xd06\x05\x15\xf1}\xab\xa9\x0f\u02ec\x82\x05\xd5i\xb9\x15\u05ac\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\xb2\xd1\xe9\x9a\xf9\x121\x85\x8epe\xdd\x19\x183\r\xc4\xc7G\u054a\x03\x89O\x0eo\x9b\x9fp\x00\x00\u07d4\xb2\u066b\x96d\xbc\xf6\xdf <4o\u0192\xfd\x9c\xba\xb9 ^\x89\x17\xbex\x97`e\x18\x00\x00\u07d4\xb2\u0777\x86\xd3yN'\x01\x87\xd0E\x1a\xd6\u0237\x9e\x0e\x87E\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xb2\xe0\x85\xfd\xdd\x14h\xba\aA['NsN\x11#\u007f\xb2\xa9\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xb2\xe9\xd7k\xf5\x0f\xc3k\xf7\u04d4Kc\xe9\u0288\x9bi\x99h\x89\x902\xeab\xb7K\x10\x00\x00\xe0\x94\xb2\xf9\xc9r\xc1\xe9swU\xb3\xff\x1b0\x88s\x83\x969[&\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\xb2\xfc\x84\xa3\xe5\nP\xaf\x02\xf9M\xa08>\u055fq\xff\x01\u05ca\x06ZM\xa2]0\x16\xc0\x00\x00\u07d4\xb3\x05\v\xef\xf9\xde3\xc8\x0e\x1f\xa1R%\xe2\x8f,A:\xe3\x13\x89%\xf2s\x93=\xb5p\x00\x00\u07d4\xb3\x11\x96qJH\xdf\xf7&\xea\x943\xcd)\x12\xf1\xa4\x14\xb3\xb3\x89\x91Hx\xa8\xc0^\xe0\x00\x00\xe0\x94\xb3\x14[tPm\x1a\x8d\x04|\xdc\xdcU9*{SPy\x9a\x8a\x1bb)t\x1c\r=]\x80\x00\u07d4\xb3 \x83H6\xd1\xdb\xfd\xa9\xe7\xa3\x18M\x1a\xd1\xfdC \xcc\xc0\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb3#\u073f.\xdd\xc58.\u4efb \x1c\xa3\x93\x1b\xe8\xb48\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xb3$\x00\xfd\x13\xc5P\t\x17\xcb\x03{)\xfe\"\xe7\xd5\"\x8f-\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\xb3%gL\x01\xe3\xf7)\rR&3\x9f\xbe\xacg\xd2!'\x9f\x89\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4\xb3(%\xd5\xf3\xdb$\x9e\xf4\xe8\\\xc4\xf31S\x95\x89v\u8f09\x1b-\xf9\xd2\x19\xf5y\x80\x00\u07d4\xb3*\xf3\xd3\xe8\xd0u4I&To.2\x88{\xf9;\x16\xbd\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb3/\x1c&\x89\xa5\xcey\xf1\xbc\x97\v1XO\x1b\xcf\"\x83\xe7\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xb3<\x03#\xfb\xf9\xc2l\x1d\x8a\xc4N\xf7C\x91\u0400F\x96\u0689\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xb3O\x04\xb8\xdbe\xbb\xa9\xc2n\xfcL\xe6\xef\xc5\x04\x81\xf3\xd6]\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xb3U}9\xb5A\x1b\x84D__T\xf3\x8fb\xd2qM\x00\x87\x89 \x86\xac5\x10R`\x00\x00\xe0\x94\xb3X\xe9|p\xb6\x05\xb1\xd7\xd7)\u07f6@\xb4<^\xaf\xd1\xe7\x8a\x04<3\xc1\x93ud\x80\x00\x00\u0794\xb3^\x8a\x1c\r\xac~\x0ef\u06ecsjY*\xbdD\x01%a\x88\xcf\xceU\xaa\x12\xb3\x00\x00\xe0\x94\xb3fx\x94\xb7\x86<\x06\x8a\xd3D\x87?\xcf\xf4\xb5g\x1e\x06\x89\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xb3qw1\xda\xd6Q2\xday-\x87`0\xe4j\xc2'\xbb\x8a\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb3s\x1b\x04l\x8a\u0195\xa1'\xfdy\u0425\xd5\xfaj\xe6\xd1.\x89lO\xd1\xee$nx\x00\x00\u07d4\xb3|+\x9fPc{\xec\xe0\u0295\x92\b\xae\xfe\xe6F;\xa7 \x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xb3\x88\xb5\xdf\xec\xd2\xc5\u4d56W|d%V\xdb\xfe'xU\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xb3\x8cNS{]\xf90\xd6Zt\xd0C\x83\x1dkH[\xbd\xe4\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94\xb3\x919Wa\x94\xa0\x86a\x95\x15\x1f3\xf2\x14\n\xd1\u0306\u03ca\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xb3\x9fL\x00\xb2c\f\xab}\xb7)^\xf4=G\xd5\x01\xe1\u007f\u05c9\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xb3\xa6K\x11vrOT\t\xe1AJ5#f\x1b\xae\xe7KJ\x89\x01ch\xffO\xf9\xc1\x00\x00\u07d4\xb3\xa6\xbdA\xf9\xd9\xc3 \x1e\x05\v\x87\x19\x8f\xbd\xa3\x994\"\x10\x89\xc4a\xe1\xdd\x10)\xb5\x80\x00\u07d4\xb3\xa8\xc2\xcb}5\x8eW9\x94\x1d\x94[\xa9\x04Z\x02:\x8b\xbb\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb3\xaeT\xfb\xa0\x9d>\xe1\u05bd\xd1\xe9W\x929\x19\x02L5\xfa\x89\x03\x8d,\xeee\xb2*\x80\x00\u07d4\xb3\xb7\xf4\x93\xb4J,\x8d\x80\xecx\xb1\xcd\xc7Ze+s\xb0l\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb3\xc2(s\x1d\x18m-\xed[_\xbe\x00Lfl\x8eF\x9b\x86\x89\x01\x92t\xb2Y\xf6T\x00\x00\u07d4\xb3\xc2``\x9b\x9d\xf4\t^l]\xff9\x8e\xeb^-\xf4\x99\x85\x89\r\xc5_\xdb\x17d{\x00\x00\u07d4\xb3\xc6[\x84Z\xbal\xd8\x16\xfb\xaa\xe9\x83\xe0\xe4l\x82\xaa\x86\"\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb3\xc9H\x11\xe7\x17[\x14\x8b(\x1c\x1a\x84[\xfc\x9b\xb6\xfb\xc1\x15\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb3\xe2\x0e\xb4\xde\x18\xbd\x06\x02!h\x98\x94\xbe\u5bb2SQ\xee\x89\x03\xfc\x80\xcc\xe5\x16Y\x80\x00\u07d4\xb3\xe3\xc49\x06\x98\x80\x15f\x00\u0089.D\x8dA6\xc9-\x9b\x89.\x14\x1e\xa0\x81\xca\b\x00\x00\xe0\x94\xb3\xf8*\x87\xe5\x9a9\xd0\u0480\x8f\aQ\xebr\xc22\x9c\xdc\u014a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xb3\xfc\x1dh\x81\xab\xfc\xb8\xbe\xcc\v\xb0!\xb8\xb7;r3\u0751\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4\xb4\x05\x94\xc4\xf3fN\xf8I\u0326\"{\x8a%\xaai\t%\xee\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xb4\x1e\xaf]Q\xa5\xba\x1b\xa3\x9b\xb4\x18\u06f5O\xabu\x0e\xfb\x1f\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb4$\u058d\x9d\r\x00\xce\xc1\x93\x8c\x85N\x15\xff\xb8\x80\xba\x01p\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb4%bs\x96+\xf61\xd0\x14U\\\xc1\xda\r\xcc1akI\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb40g\xfep\u0675Ys\xbaX\xdcd\xdd\u007f1\x1eUBY\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb46W\xa5\x0e\xec\xbc0w\xe0\x05\xd8\xf8\xd9O7xv\xba\u0509\x01\xec\x1b:\x1f\xf7Z\x00\x00\u07d4\xb4<'\xf7\xa0\xa1\"\bK\x98\xf4\x83\x92%A\u0203l\xee,\x89&\u009eG\u0104L\x00\x00\xe0\x94\xb4A5v\x86\x9c\b\xf9Q*\xd3\x11\xfe\x92Y\x88\xa5-4\x14\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xb4F\x05U$q\xa6\xee\xe4\u06abq\xff;\xb4\x13&\xd4s\xe0\x89-~=Q\xbaS\xd0\x00\x00\u07d4\xb4GW\x1d\xac\xbb>\u02f6\xd1\xcf\v\f\x8f88\xe5#$\xe2\x89\x01\xa3\x18f\u007f\xb4\x05\x80\x00\u07d4\xb4G\x83\xc8\xe5{H\a\x93\xcb\u059aE\xd9\f{O\fH\xac\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xb4H\x15\xa0\xf2\x8eV\x9d\x0e\x92\x1aJ\u078f\xb2d%&Iz\x89\x03\x027\x9b\xf2\xca.\x00\x00\u07d4\xb4Im\xdb'y\x9a\"$W\xd79y\x11g(\u8844[\x89\x8d\x81\x9e\xa6_\xa6/\x80\x00\xe0\x94\xb4RL\x95\xa7\x86\x0e!\x84\x02\x96\xa6\x16$@\x19B\x1cJ\xba\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xb4\\\xca\r6\x82fbh<\xf7\u0432\xfd\xach\u007f\x02\xd0\u010965\u026d\xc5\u07a0\x00\x00\u0794\xb4d@\u01d7\xa5V\xe0L}\x91\x04f\x04\x91\xf9k\xb0v\xbf\x88\xce\xc7o\x0eqR\x00\x00\u07d4\xb4j\u0386^,P\xeaF\x98\xd2\x16\xabE]\xffZ\x11\xcdr\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb4m\x11\x82\xe5\xaa\xca\xff\r&\xb2\xfc\xf7/<\x9f\xfb\xcd\xd9}\x89\xaa*`<\xdd\u007f,\x00\x00\u07d4\xb4\x89!\xc9h}U\x10tE\x84\x93n\x88\x86\xbd\xbf-\xf6\x9b\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb4\x98\xbb\x0fR\x00\x05\xb6!jD%\xb7Z\xa9\xad\xc5-b+\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\xb4\xb1\x1d\x10\x9f`\x8f\xa8\xed\xd3\xfe\xa9\xf8\xc3\x15d\x9a\xeb=\x11\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xb4\xb1K\xf4TU\u042b\b\x035\x8bu$\xa7+\xe1\xa2\x04[\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xb4\xb1\x85\xd9C\xee+Xc\x1e3\xdf\xf5\xafhT\xc1y\x93\xac\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb4\xbf$\u02c3hk\xc4i\x86\x9f\xef\xb0D\xb9\tqi\x93\xe2\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb4\xc2\x00@\xcc\u0661\xa3(=\xa4\u0522\xf3e\x82\bC\xd7\xe2\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb4\xc8\x17\x0f{*\xb56\xd1\u0662[\xdd :\xe1(\x8d\xc3\u0549\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb4\xd8/.i\x94?}\xe0\xf5\xf7t8y@o\xac.\x9c\xec\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\xe0\x94\xb4\xddF\f\xd0\x16rZd\xb2.\xa4\xf8\xe0n\x06gN\x03>\x8a\x01#\x1b\xb8t\x85G\xa8\x00\x00\u07d4\xb4\xddT\x99\xda\xeb%\a\xfb-\xe1\"\x97s\x1dLr\xb1k\xb0\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xb5\x04l\xb3\xdc\x1d\xed\xbd6E\x14\xa2\x84\x8eD\xc1\xdeN\xd1G\x8a\x03{}\x9b\xb8 @^\x00\x00\xe0\x94\xb5\b\xf9\x87\xb2\xde4\xaeL\xf1\x93\u0785\xbf\xf6\x13\x89b\x1f\x88\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xb5\tU\xaan4\x15q\x98f\b\xbd\u0211\xc2\x13\x9fT\f\u07c9j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xb5\f\x14\x9a\x19\x06\xfa\xd2xo\xfb\x13Z\xabP\x177\xe9\xe5o\x89\x15\b\x94\xe8I\xb3\x90\x00\x00\u07d4\xb5\f\x9fW\x89\xaeD\xe2\xdc\xe0\x17\xc7\x14\xca\xf0\f\x83\x00\x84\u0089\x15[\xd90\u007f\x9f\xe8\x00\x00\u07d4\xb5\x14\x88,\x97\x9b\xb6B\xa8\r\u04c7T\u0578\xc8)m\x9a\a\x893\xc5I\x901r\f\x00\x00\u07d4\xb5\x1d\u0734\xddN\x8a\xe6\xbe3m\xd9eIq\xd9\xfe\xc8kA\x89\x16\xd4d\xf8=\u2500\x00\u07d4\xb5\x1eU\x8e\xb5Q/\xbc\xfa\x81\xf8\u043d\x93\x8cy\xeb\xb5$+\x89&\u009eG\u0104L\x00\x00\u07d4\xb5#\xff\xf9t\x98q\xb3S\x88C\x887\xf7\xe6\xe0\u07a9\xcbk\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xb5-\xfbE\xde]t\xe3\xdf \x832\xbcW\x1c\x80\x9b\x8d\xcf2\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xb55\xf8\u06c7\x9f\xc6\u007f\xecX\x82J\\\xbenT\x98\xab\xa6\x92\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4\xb57\xd3jp\xee\xb8\xd3\xe5\xc8\r\xe8\x15\"\\\x11X\u02d2\u0109QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4\xb5;\xcb\x17L%\x184\x8b\x81\x8a\xec\xe0 6E\x96Fk\xa3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb5I>\xf1srDE\xcf4\\\x03]'\x9b\xa7Y\xf2\x8dQ\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xb5S\xd2]kT!\xe8\x1c*\xd0^\v\x8b\xa7Q\xf8\xf0\x10\xe3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb5Tt\xbaX\xf0\xf2\xf4\x0el\xba\xbe\xd4\xea\x17n\x01\x1f\xca\u0589j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xb5U\xd0\x0f\x91\x90\xcc6w\xae\xf3\x14\xac\xd7?\xdc99\x92Y\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb5W\xab\x949\xefP\xd27\xb5S\xf0%\b6JFj\\\x03\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb5jx\x00(\x03\x9c\x81\xca\xf3{gu\xc6 \u7195Gd\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb5j\u04ae\xc6\xc8\xc3\xf1\x9e\x15\x15\xbb\xb7\u0751(RV\xb69\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb5t\x13\x06\n\xf3\xf1N\xb4y\x06_\x1e\x9d\x19\xb3uz\xe8\u0309\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\xb5uI\xbf\xbc\x9b\xdd\x18\xf76\xb2&P\xe4\x8as`\x1f\xa6\\\x89\x18-~L\xfd\xa08\x00\x00\xe0\x94\xb5w\xb6\xbe\xfa\x05N\x9c\x04\x04a\x85P\x94\xb0\x02\xd7\xf5{\u05ca\x18#\xf3\xcfb\x1d#@\x00\x00\u07d4\xb5{\x04\xfa#\xd1 ?\xae\x06\x1e\xacEB\xcb`\xf3\xa5v7\x89\nZ\xa8P\t\xe3\x9c\x00\x00\u07d4\xb5\x87\f\xe3B\xd43C36s\x03\x8bGd\xa4n\x92_>\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb5\x87\xb4J,\xa7\x9eK\xc1\u074b\xfd\xd4: qP\xf2\xe7\xe0\x89\",\x8e\xb3\xfff@\x00\x00\u07d4\xb5\x89gm\x15\xa0DH4B0\xd4\xff'\xc9^\xdf\x12,I\x8965\u026d\xc5\u07a0\x00\x00\u0794\xb5\x8bR\x86^\xa5]\x806\xf2\xfa\xb2`\x98\xb3R\u0283~\x18\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\xb5\x90k\n\u9881X\xe8\xacU\x0e9\xda\bn\xe3\x15v#\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb5\xa4g\x96\x85\xfa\x14\x19l.\x920\xc8\xc4\xe3;\xff\xbc\x10\xe2\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\xb5\xa5\x89\u075f@q\u06f6\xfb\xa8\x9b?]]\xae}\x96\xc1c\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb5\xa6\x06\xf4\xdd\u02f9G\x1e\xc6\u007fe\x8c\xaf+\x00\xees\x02^\x89\xeaun\xa9*\xfct\x00\x00\u07d4\xb5\xadQW\u0769!\xe6\xba\xfa\u0350\x86\xaes\xae\x1fa\x1d?\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb5\xad\xd1\u701f}\x03\x06\x9b\xfe\x88;\n\x93\"\x10\xbe\x87\x12\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb5\xba)\x91|x\xa1\xd9\xe5\xc5\xc7\x13fl\x1eA\x1d\u007fi:\x89\xa8\r$g~\xfe\xf0\x00\x00\u07d4\xb5\xc8\x16\xa8(<\xa4\xdfh\xa1\xa7=c\xbd\x80&\x04\x88\xdf\b\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb5\xca\xc5\xed\x03G}9\v\xb2g\xd4\xeb\xd4a\x01\xfb\xc2\xc3\u0689\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\xb5\u037cA\x15@oR\u5a85\xd0\xfe\xa1p\u0497\x9c\u01fa\x89Hz\x9a0E9D\x00\x00\u0794\xb5\u0653M{)+\xcf`;(\x80t\x1e\xb7`(\x83\x83\xa0\x88\xe7\xc2Q\x85\x05\x06\x00\x00\u07d4\xb5\xddP\xa1]\xa3Ih\x89\nS\xb4\xf1?\xe1\xaf\b\x1b\xaa\xaa\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xb5\xfa\x81\x84\xe4>\xd3\u0e2b\x91!da\xb3R\x8d\x84\xfd\t\x89\x91Hx\xa8\xc0^\xe0\x00\x00\u07d4\xb5\xfb~\xa2\xdd\xc1Y\x8bfz\x9dW\xdd9\xe8Z8\xf3]V\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xb6\x00B\x97R\xf3\x99\xc8\r\a4tK\xae\n\x02.\xcag\u0189\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xb6\x00\xfe\xabJ\xa9lSu\x04\xd9`W\"1Ai,\x19:\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xb6\x04|\u07d3-\xb3\xe4\x04_Iv\x12#AS~\u0556\x1e\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xb6\x15\xe9@\x14>\xb5\u007f\x87X\x93\xbc\x98\xa6\x1b=a\x8c\x1e\x8c\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xb6\x1c4\xfc\xac\xdap\x1aZ\xa8p$Y\u07b0\u4b83\x8d\xf8\x8a\aiZ\x92\xc2\ro\xe0\x00\x00\xe0\x94\xb60d\xbd3U\xe6\xe0~-7p$\x12Z3wlJ\xfa\x8a\b7Z*\xbc\xca$@\x00\x00\u07d4\xb65\xa4\xbcq\xfb(\xfd\xd5\xd2\xc3\"\x98:V\u0084Bni\x89\t79SM(h\x00\x00\u07d4\xb6F\u07d8\xb4\x94BtkaR\\\x81\xa3\xb0K\xa3\x10bP\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xb6YA\xd4LP\xd2Ffg\r6Gf\xe9\x91\xc0.\x11\u0089 \x86\xac5\x10R`\x00\x00\xe0\x94\xb6[\u05c0\xc7CA\x15\x16 'VR#\xf4NT\x98\xff\x8c\x8a\x04<0\xfb\b\x84\xa9l\x00\x00\u07d4\xb6d\x11\xe3\xa0-\xed\xb7&\xfay\x10}\xc9\v\xc1\xca\xe6MH\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb6fu\x14.1\x11\xa1\xc2\xea\x1e\xb2A\x9c\xfaB\xaa\xf7\xa24\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xb6o\x92\x12K^c\x03XY\xe3\x90b\x88i\xdb\u07a9H^\x8a\x02\x15\xf85\xbcv\x9d\xa8\x00\x00\u07d4\xb6rsJ\xfc\xc2$\xe2\xe6\t\xfcQ\xd4\xf0Ys'D\xc9H\x89\x10\x04\xe2\xe4_\xb7\xee\x00\x00\xe0\x94\xb6w\x1b\v\xf3B\u007f\x9a\xe7\xa9>|.a\xeec\x94\x1f\xdb\b\x8a\x03\xfb&i)T\xbf\xc0\x00\x00\u07d4\xb6z\x80\xf1p\x19}\x96\xcd\xccJ\xb6\u02e6'\xb4\xaf\xa6\xe1,\x89\x82\x1a\xb0\xd4AI\x80\x00\x00\u07d4\xb6\x88\x99\xe7a\rL\x93\xa255\xbc\xc4H\x94[\xa1fo\x1c\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb6\xa8)3\xc9\xea\u06bd\x98\x1e]m`\xa6\x81\x8f\xf8\x06\xe3k\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94\xb6\xaa\u02cc\xb3\v\xab*\xe4\xa2BF&\xe6\xe1+\x02\xd0F\x05\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xb6\xb3J&?\x10\xc3\xd2\xec\xeb\n\xccU\x9a{*\xb8\\\xe5e\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xb6\xbf\xe1\xc3\xef\x94\xe1\x84o\xb9\xe3\xac\xfe\x9bP\xc3\xe9\x06\x923\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xb6\xcdt2\xd5\x16\x1b\xe7\x97h\xadE\xde>Dz\a\x98 c\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xb6\xceM\xc5`\xfcs\xdci\xfbzb\xe3\x88\xdb~r\xeavO\x894]\xf1i\xe9\xa3X\x00\x00\u07d4\xb6\xde\u03c2\x96\x98\x19\xba\x02\xde)\xb9\xb5\x93\xf2\x1bd\xee\xda\x0f\x89(\x1d\x90\x1fO\xdd\x10\x00\x00\xe0\x94\xb6\xe6\xc3\"+ko\x9b\xe2\x87]*\x89\xf1'\xfbd\x10\x0f\xe2\x8a\x01\xb2\x1dS#\xcc0 \x00\x00\u07d4\xb6\xe8\xaf\xd9=\xfa\x9a\xf2\u007f9\xb4\xdf\x06\ag\x10\xbe\xe3\u07eb\x89\x01Z\xf1\u05cbX\xc4\x00\x00\xe0\x94\xb6\xf7\x8d\xa4\xf4\xd0A\xb3\xbc\x14\xbc[\xa5\x19\xa5\xba\f2\xf1(\x8a$}\xd3,?\xe1\x95\x04\x80\x00\xe0\x94\xb6\xfb9xbP\b\x14&\xa3B\xc7\rG\xeeR\x1e[\xc5c\x8a\x03-&\xd1.\x98\v`\x00\x00\u07d4\xb7\r\xba\x93\x91h+J6Nw\xfe\x99%c\x01\xa6\xc0\xbf\x1f\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb7\x16#\xf3Q\a\xcft1\xa8?\xb3\xd2\x04\xb2\x9e\u0c67\xf4\x89\x01\x11du\x9f\xfb2\x00\x00\u07d4\xb7\x1a\x13\xba\x8e\x95\x16{\x803\x1bR\u059e7\x05O\xe7\xa8&\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb7\x1bb\xf4\xb4H\xc0+\x12\x01\xcb^9J\xe6'\xb0\xa5`\xee\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xb7\" \xad\xe3d\xd06\x9f--\xa7\x83\xcaGM{\x9b4\u0389\x1b\x1a\xb3\x19\xf5\xecu\x00\x00\xe0\x94\xb7#\r\x1d\x1f\xf2\xac\xa3f\x969\x14\xa7\x9d\xf9\xf7\xc5\xea,\x98\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\xe0\x94\xb7$\n\U000af433<\b\xae\x97d\x10>5\xdc\xe3c\x84(\x8a\x01\xca\xdd/\xe9hnc\x80\x00\u07d4\xb7'\xa9\xfc\x82\xe1\xcf\xfc\\\x17_\xa1HZ\x9b\xef\xa2\u037d\u04496'\xe8\xf7\x127<\x00\x00\u07d4\xb7,*\x01\x1c\r\xf5\x0f\xbbn(\xb2\n\xe1\xaa\xd2\x17\x88g\x90\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xb78-7\xdb\x03\x98\xacrA\f\xf9\x81=\xe9\xf8\xe1\uc36d\x8966\xc2^f\xec\xe7\x00\x00\u07d4\xb7;O\xf9\x9e\xb8\x8f\u061b\vmW\xa9\xbc3\x8e\x88o\xa0j\x89\x01\xbc\x16\xd6t\xec\x80\x00\x00\u07d4\xb7=jwU\x9c\x86\xcfet$)\x039K\xac\xf9n5p\x89\x04\xf1\xa7|\xcd;\xa0\x00\x00\u07d4\xb7Cr\xdb\xfa\x18\x1d\xc9$/9\xbf\x1d71\xdf\xfe+\xda\u03c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xb7G\x9d\xabP\"\xc4\xd5\u06ea\xf8\xde\x17\x1bN\x95\x1d\u0464W\x89\x04V9\x18$O@\x00\x00\u07d4\xb7I\xb5N\x04\u0571\x9b\xdc\xed\xfb\x84\xdaw\x01\xabG\x8c'\xae\x89\x91Hx\xa8\xc0^\xe0\x00\x00\u07d4\xb7N\xd2f`\x01\xc1c3\xcfz\xf5\x9eJ=H`6;\x9c\x89\n~\xbd^Cc\xa0\x00\x00\u07d4\xb7QI\xe1\x85\xf6\xe3\x92pWs\x90s\xa1\x82*\xe1\xcf\r\xf2\x89\xd8\xd8X?\xa2\xd5/\x00\x00\u07d4\xb7S\xa7_\x9e\xd1\v!d:\n=\xc0Qz\xc9k\x1a@h\x89\x15\xc8\x18[,\x1f\xf4\x00\x00\xe0\x94\xb7V\xadR\xf3\xbft\xa7\xd2LgG\x1e\b\x87Ci6PL\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xb7Wn\x9d1M\xf4\x1e\xc5Pd\x94):\xfb\x1b\xd5\xd3\xf6]\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xb7X\x89o\x1b\xaa\x86O\x17\xeb\xed\x16\xd9S\x88o\xeeh\xaa\xe6\x8965\u026d\xc5\u07a0\x00\x00\u0794\xb7h\xb5#N\xba:\x99h\xb3Mm\xdbH\x1c\x84\x19\xb3e]\x88\xcf\xceU\xaa\x12\xb3\x00\x00\u07d4\xb7\x82\xbf\xd1\xe2\xdep\xf4gdo\x9b\xc0\x9e\xa5\xb1\xfc\xf4P\xaf\x89\x0e~\xeb\xa3A\vt\x00\x00\xe0\x94\xb7\xa2\xc1\x03r\x8bs\x05\xb5\xaen\x96\x1c\x94\xee\x99\xc9\xfe\x8e+\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\u07d4\xb7\xa3\x1a|8\xf3\xdb\t2.\xae\x11\xd2'!A\xea\"\x99\x02\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb7\xa6y\x1c\x16\xebN!b\xf1Ke7\xa0+=c\xbf\xc6\x02\x89*Rc\x91\xac\x93v\x00\x00\u07d4\xb7\xa7\xf7|4\x8f\x92\xa9\xf1\x10\fk\xd8)\xa8\xacm\u007f\u03d1\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\xb7\xc0w\x94ft\xba\x93A\xfbLtz]P\xf5\xd2\xdad\x15\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xb7\xc0\xd0\xcc\vM4-@b\xba\xc6$\xcc\xc3\xc7\f\xc6\xda?\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xb7\xc9\xf1+\x03\x8esCm\x17\xe1\xc1/\xfe\x1a\xec\u0373\xf5\x8c\x89\x1dF\x01b\xf5\x16\xf0\x00\x00\u07d4\xb7\xcck\x1a\xcc2\u0632\x95\xdfh\xed\x9d^`\xb8\xf6L\xb6{\x89\x10CV\x1a\x88)0\x00\x00\u07d4\xb7\xcehK\t\xab\xdaS8\x9a\x87Si\xf7\x19X\xae\xac;\u0749lk\x93[\x8b\xbd@\x00\x00\u07d4\xb7\xd1.\x84\xa2\xe4\u01264Z\xf1\xdd\x1d\xa9\xf2PJ*\x99n\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xb7\xd2R\xee\x94\x02\xb0\xee\xf1D)_\x0ei\xf0\xdbXl\bq\x89#\xc7W\a+\x8d\xd0\x00\x00\xe0\x94\xb7\u0541\xfe\n\xf1\xec8?;\xce\x00\xaf\x91\x99\xf3\xcf_\xe0\xcc\xe2\x8c\xd1J\x89\xcf\x15&@\xc5\xc80\x00\x00\u07d4\xb8R\x18\xf3B\xf8\x01.\u069f'Nc\xce!R\xb2\xdc\xfd\xab\x89\xa8\r$g~\xfe\xf0\x00\x00\u07d4\xb8UP\x10wn<\\\xb3\x11\xa5\xad\xee\xfe\x9e\x92\xbb\x9ad\xb9\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\xb8_&\xdd\x0er\xd9\u009e\xba\xf6\x97\xa8\xafwG,+X\xb5\x8a\x02\x85\x19\xac\xc7\x19\fp\x00\x00\u07d4\xb8_\xf0>{_\xc4\"\x98\x1f\xae^\x99A\xda\xcb\u06bau\x84\x89Hz\x9a0E9D\x00\x00\xe0\x94\xb8f\a\x02\x1bb\xd3@\xcf&R\xf3\xf9_\xd2\xdcgi\x8b\u07ca\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xb8}\xe1\xbc\u0492i\xd5!\xb8v\x1c\u00dc\xfbC\x19\xd2\xea\u054965\u026d\xc5\u07a0\x00\x00\u07d4\xb8\u007fSv\xc2\xde\vl\xc3\xc1y\xc0`\x87\xaaG=kFt\x89Hz\x9a0E9D\x00\x00\u07d4\xb8\x84\xad\u060d\x83\xdcVJ\xb8\xe0\xe0,\xbd\xb69\x19\xae\xa8D\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb8\x8a7\xc2\u007fx\xa6\x17\xd5\xc0\x91\xb7\u0577:7a\xe6_*\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb8\x94x\"\u056c\u79ad\x83&\xe9T\x96\"\x1e\v\xe6\xb7=\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xb8\x9c\x03n\xd7\u0112\x87\x99!\xbeA\xe1\f\xa1i\x81\x98\xa7L\x89b\xa9\x92\xe5:\n\xf0\x00\x00\xe0\x94\xb8\x9fF2\xdfY\t\xe5\x8b*\x99d\xf7O\xeb\x9a;\x01\xe0\u014a\x04\x88u\xbc\xc6\xe7\xcb\xeb\x80\x00\u07d4\xb8\xa7\x9c\x84\x94^G\xa9\xc3C\x86\x83\u05b5\x84,\xffv\x84\xb1\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xb8\xa9y5'Y\xba\t\xe3Z\xa5\x93]\xf1u\xbf\xf6x\xa1\b\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xb8\xab9\x80[\xd8!\x18Ol\xbd=$s4{\x12\xbf\x17\\\x89\x06hZ\xc1\xbf\xe3,\x00\x00\xe0\x94\xb8\xac\x11}\x9f\r\xba\x80\x90\x14E\x82:\x92\x11\x03\xa51o\x85Zew\x9d\x1b\x8a\x05\x15\n\xe8J\x8c\xdf\x00\x00\x00\u07d4\xb9\xe9\f\x11\x92\xb3\xd5\xd3\xe3\xab\a\x00\xf1\xbfe_]\xd44z\x89\x1b\x19\xe5\vD\x97|\x00\x00\u07d4\xb9\xfd83\xe8\x8e|\xf1\xfa\x98y\xbd\xf5Z\xf4\xb9\x9c\xd5\xce?\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xba\x02I\xe0\x1d\x94[\xef\x93\xee^\xc6\x19%\xe0<\\\xa5\t\xfd\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xba\x0f9\x02;\xdb)\xeb\x18b\xa9\xf9\x05\x9c\xab]0nf/\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xba\x10\xf2vB\x90\xf8uCCr\xf7\x9d\xbfq8\x01\u02ac\x01\x893\xc5I\x901r\f\x00\x00\u07d4\xba\x151\xfb\x9ey\x18\x96\xbc\xf3\xa8\x05X\xa3Y\xf6\xe7\xc1D\xbd\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4\xba\x17m\xbe2I\xe3E\xcdO\xa9g\xc0\xed\x13\xb2LG\u5189\x15\xae\xf9\xf1\xc3\x1c\u007f\x00\x00\xe0\x94\xba\x1f\x0e\x03\u02da\xa0!\xf4\xdc\xeb\xfa\x94\xe5\u0209\xc9\u01fc\x9e\x8a\x06\u0450\xc4u\x16\x9a \x00\x00\u07d4\xba\x1f\xca\xf2#\x93~\xf8\x9e\x85gU\x03\xbd\xb7\xcaj\x92\x8bx\x89\"\xb1\xc8\xc1\"z\x00\x00\x00\xe0\x94\xba$\xfcCgS\xa79\xdb,\x8d@\xe6\xd4\xd0LR\x8e\x86\xfa\x8a\x02\xc0\xbb=\xd3\fN \x00\x00\u07d4\xbaB\xf9\xaa\xceL\x18E\x04\xab\xf5BWb\xac\xa2oq\xfb\u0709\x02\a\a}\u0627\x9c\x00\x00\u07d4\xbaF\x9a\xa5\u00c6\xb1\x92\x95\u0521\xb5G;T\x03S9\f\x85\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xbad@\xae\xb3s{\x8e\xf0\xf1\xaf\x9b\f\x15\xf4\xc2\x14\xff\xc7\u03c965\u026d\xc5\u07a0\x00\x00\xe0\x94\xbam1\xb9\xa2a\xd6@\xb5\u07a5\x1e\xf2\x16,1\t\xf1\uba0a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xbap\xe8\xb4u\x9c\f<\x82\xcc\x00\xacN\x9a\x94\xdd[\xaf\xb2\xb8\x890C\xfa3\xc4\x12\xd7\x00\x00\u07d4\xba\x8ac\xf3\xf4\r\u4a03\x88\xbcP!/\xea\x8e\x06O\xbb\x86\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xba\x8eF\u059d.#C\xd8l`\xd8,\xf4, A\xa0\xc1\u0089\x05k\xc7^-c\x10\x00\x00\u07d4\xba\xa4\xb6L+\x15\xb7\x9f_ BF\xfdp\xbc\xbd\x86\xe4\xa9*\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xba\u0212,J\xcc},\xb6\xfdY\xa1N\xb4\\\xf3\xe7\x02!K\x89+^:\xf1k\x18\x80\x00\x00\u07d4\xba\xd25\xd5\b]\u01f0h\xa6|A&w\xb0>\x186\x88L\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xba\xd4B^\x17\x1c>r\x97^\xb4j\xc0\xa0\x15\xdb1Z]\x8f\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xba\xdc*\xef\x9fYQ\xa8\u05cak5\xc3\u0433\xa4\xe6\xe2\xe79\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xba\xdeCY\x9e\x02\xf8OL0\x14W\x1c\x97k\x13\xa3le\xab\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xba\xe9\xb8/r\x99c\x14\be\x9d\xd7N\x89\x1c\xb8\xf3\x86\x0f\xe5\x89j\xcb=\xf2~\x1f\x88\x00\x00\xe0\x94\xbb\x03f\xa7\u03fd4E\xa7\r\xb7\xfeZ\xe3H\x85uO\xd4h\x8a\x01M\xef,B\xeb\xd6@\x00\x00\u07d4\xbb\aj\xac\x92 \x80i\xea1\x8a1\xff\x8e\xeb\x14\xb7\xe9\x96\xe3\x89\b\x13\xcaV\x90m4\x00\x00\u07d4\xbb\bW\xf1\xc9\x11\xb2K\x86\u0227\x06\x81G?\u6aa1\xcc\xe2\x89\x05k\xc7^-c\x10\x00\x00\u0794\xbb\x19\xbf\x91\u02edt\xcc\xeb_\x81\x1d\xb2~A\x1b\xc2\xea\x06V\x88\xf4?\xc2\xc0N\xe0\x00\x00\xe0\x94\xbb'\u01a7\xf9\x10uGZ\xb2)a\x90@\xf8\x04\xc8\xeczj\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xbb7\x1cr\xc9\xf01l\xea+\xd9\xc6\xfb\xb4\a\x9ewT)\xef\x89_h\xe8\x13\x1e\u03c0\x00\x00\xe0\x94\xbb;\x01\v\x18\xe6\xe2\xbe\x115\x87\x10&\xb7\xba\x15\xea\x0f\xde$\x8a\x02 |\x800\x9bwp\x00\x00\xe0\x94\xbb;\x90\x05\xf4o\xd2\xca;0\x16%\x99\x92\x8cw\xd9\xf6\xb6\x01\x8a\x01\xb1\xae\u007f+\x1b\xf7\xdb\x00\x00\u07d4\xbb?\xc0\xa2\x9c\x03Mq\b\x12\xdc\xc7u\xc8\u02b9\u048diu\x899\xd4\xe8D\xd1\xcf_\x00\x00\u07d4\xbbH\xea\xf5\x16\xce-\xec>A\xfe\xb4\xc6y\xe4\x95vA\x16O\x89\xcf\x15&@\xc5\xc80\x00\x00\u07d4\xbbKJKT\x80p\xffAC,\x9e\b\xa0\xcao\xa7\xbc\x9fv\x89.\x14\x1e\xa0\x81\xca\b\x00\x00\u07d4\xbbV\xa4\x04r<\xff \xd0hT\x88\xb0Z\x02\xcd\xc3Z\xac\xaa\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xbba\x8e%\"\x1a\u0667@\xb2\x99\xed\x14\x06\xbc94\xb0\xb1m\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xbba\xa0K\xff\xd5|\x10G\rE\u00d1\x03\xf6FP4v\x16\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xbbh#\xa1\xbd\x81\x9f\x13QU8&J-\xe0R\xb4D\"\b\x89\x01ch\xffO\xf9\xc1\x00\x00\u07d4\xbbl(J\xac\x8ai\xb7\\\u0770\x0f(\xe1EX;V\xbe\u0389lk\x93[\x8b\xbd@\x00\x00\u07d4\xbbu\xcbPQ\xa0\xb0\x94KFs\xcau*\x97\x03\u007f|\x8c\x15\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xbb\x99;\x96\xee\x92Z\xda}\x99\u05c6W=?\x89\x18\f\u3a89lk\x93[\x8b\xbd@\x00\x00\u07d4\xbb\xa3\u0180\x04$\x8eH\x95s\xab\xb2t6w\x06k$\u0227\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xbb\xa4\xfa\xc3\xc4 9\xd8(\xe7B\xcd\xe0\xef\xff\xe7t\x94\x1b9\x89lj\u04c2\xd4\xfba\x00\x00\u07d4\xbb\xa8\xab\"\xd2\xfe\xdb\xcf\xc6?hL\b\xaf\xdf\x1c\x17P\x90\xb5\x89\x05_)\xf3~N;\x80\x00\u07d4\xbb\xa9v\xf1\xa1!_u\x12\x87\x18\x92\xd4_pH\xac\xd3V\u0209lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xbb\xab\x00\v\x04\b\xed\x01Z7\xc0GG\xbcF\x1a\xb1N\x15\x1b\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xbb\xab\xf6d;\xebK\xd0\x1c\x12\v\xd0Y\x8a\t\x87\xd8)g\u0449\xb52\x81x\xad\x0f*\x00\x00\u07d4\xbb\xb4\xee\x1d\x82\xf2\xe1VD,\xc938\xa2\xfc(o\xa2\x88d\x89JD\x91\xbdm\xcd(\x00\x00\u07d4\xbb\xb5\xa0\xf4\x80,\x86H\x00\x9e\x8ai\x98\xaf5,\u0787TO\x89\x05-T(\x04\xf1\xce\x00\x00\u07d4\xbb\xb6C\xd2\x18{6J\xfc\x10\xa6\xfd6\x8d}U\xf5\r\x1a<\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xbb\xb8\xff\xe4?\x98\u078e\xae\x18F#\xaeRd\xe4$\u0438\u05c9\x05\xd5?\xfd\xe9(\b\x00\x00\u07d4\xbb\xbdn\u02f5u(\x91\xb4\u03b3\xcc\xe7:\x8fGpY7o\x89\x01\xf3\x99\xb1C\x8a\x10\x00\x00\u07d4\xbb\xbf9\xb1\xb6y\x95\xa4\"APO\x97\x03\u04a1JQV\x96\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4\xbb\xc8\xea\xffc~\x94\xfc\u014d\x91\xdb\\\x89\x12\x1d\x06\xe1/\xff\x98\x80\x00\u07d4\xbc\u065e\xdc!`\xf2\x10\xa0^:\x1f\xa0\xb0CL\xed\x00C\x9b\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xbc\u07ec\xb9\xd9\x02<4\x17\x18.\x91\x00\xe8\xea\x1d73\x93\xa3\x89\x034-`\xdf\xf1\x96\x00\x00\u07d4\xbc\xe1>\"2*\u03f3U\xcd!\xfd\r\xf6\f\xf9:\xdd&\u0189\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xbc\xe4\x04u\xd3E\xb0q-\xeep=\x87\xcdvW\xfc\u007f;b\x8a\x01\xa4 \xdb\x02\xbd}X\x00\x00\u07d4\xbc\xed\xc4&|\u02c9\xb3\x1b\xb7d\xd7!\x11q\x00\x8d\x94\xd4M\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xbc\xfc\x98\xe5\xc8+j\xdb\x18\n?\xcb\x12\v\x9av\x90\xc8j?\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xbd\x04;g\xc6>`\xf8A\xcc\xca\x15\xb1)\xcd\xfee\x90\xc8\xe3\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xbd\x04\u007f\xf1\xe6\x9c\u01b2\x9a\xd2d\x97\xa9\xa6\xf2z\x90?\xc4\u0749.\xe4IU\b\x98\xe4\x00\x00\u07d4\xbd\b\xe0\xcd\xde\xc0\x97\xdby\x01\ua05a=\x1f\xd9\u0789Q\xa2\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xbd\t\x12l\x89\x1cJ\x83\x06\x80Y\xfe\x0e\x15ylFa\xa9\xf4\x89+^:\xf1k\x18\x80\x00\x00\u07d4\xbd\f\\\u05d9\xeb\u0106B\xef\x97\xd7N\x8eB\x90d\xfe\u4489\x11\xac(\xa8\xc7)X\x00\x00\u07d4\xbd\x17\xee\xd8+\x9a%\x92\x01\x9a\x1b\x1b<\x0f\xba\xd4\\@\x8d\"\x89\r\x8drkqw\xa8\x00\x00\u07d4\xbd\x18\x037\v\u0771)\xd29\xfd\x16\xea\x85&\xa6\x18\x8a\u5389\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xbd+p\xfe\xcc7d\x0fiQO\xc7\xf3@IF\xaa\xd8k\x11\x89A\rXj \xa4\xc0\x00\x00\u07d4\xbd0\x97\xa7\x9b<\r.\xbf\xf0\xe6\xe8j\xb0\xed\xad\xbe\xd4p\x96\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\u07d4\xbd2]@)\xe0\xd8r\x9fm9\x9cG\x82$\xae\x9ez\xe4\x1e\x89\xd2U\xd1\x12\xe1\x03\xa0\x00\x00\u07d4\xbdC*9\x16$\x9bG$):\xf9\x14nI\xb8(\n\u007f*\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xbdG\xf5\xf7n;\x93\x0f\xd9HR\t\xef\xa0\xd4v=\xa0uh\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xbdK`\xfa\xect\n!\xe3\a\x13\x91\xf9j\xa54\xf7\xc1\xf4N\x89\t\xdd\xc1\xe3\xb9\x01\x18\x00\x00\u07d4\xbdK\u0571\"\xd8\xef{|\x8f\x06gE\x03 \xdb!\x16\x14.\x89 \x86\xac5\x10R`\x00\x00\u07d4\xbdQ\xee.\xa1C\u05f1\u05b7~~D\xbd\xd7\xda\x12\U00105b09G~\x06\u0332\xb9(\x00\x00\u07d4\xbdY\tN\aO\x8dy\x14*\xb1H\x9f\x14\x8e2\x15\x1f \x89\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xbdZ\x8c\x94\xbd\x8b\xe6G\x06D\xf7\f\x8f\x8a3\xa8\xa5\\cA\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xbd^G:\xbc\xe8\xf9zi2\xf7|/\xac\xaf\x9c\xc0\xa0\x05\x14\x89<\x92X\xa1\x06\xa6\xb7\x00\x00\u07d4\xbd_F\u02ab,=K(\x93\x96\xbb\xb0\u007f *\x06\x11>\xd4\xc3\xfb\xa1\xa8\x91;\x19@~\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xbd\x9eV\xe9\x02\xf4\xbe\x1f\xc8v\x8d\x808\xba\xc6>*\u02ff\x8e\x8965f3\xeb\xd8\xea\x00\x00\u07d4\xbd\xa4\xbe1~~K\xed\x84\xc0I^\xee2\xd6\a\xec8\xcaR\x89}2'yx\xefN\x80\x00\u07d4\xbd\xb6\v\x82:\x11s\xd4Z\a\x92$_\xb4\x96\xf1\xfd3\x01\u03c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xbd\xba\xf6CM@\xd65[\x1e\x80\xe4\f\u012b\x9ch\xd9a\x16\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xbd\xc0,\xd43\f\x93\xd6\xfb\xdaOm\xb2\xa8]\xf2/C\xc23\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xbd\xc4aF+c\"\xb4b\xbd\xb3?\"y\x9e\x81\b\xe2A}\x89$=M\x18\"\x9c\xa2\x00\x00\u07d4\xbd\xc79\xa6\x99p\v.\x8e,JL{\x05\x8a\x0eQ=\u07be\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xbd\xc7Hs\xaf\x92+\x9d\xf4t\x85;\x0f\xa7\xff\v\xf8\xc8&\x95\x89\xd8\xc9F\x00c\xd3\x1c\x00\x00\u07d4\xbd\xca*\x0f\xf3E\x88\xafb_\xa8\xe2\x8f\xc3\x01Z\xb5\xa3\xaa\x00\x89~\xd7?w5R\xfc\x00\x00\u07d4\xbd\xd3%N\x1b:m\xc6\xcc,i}Eq\x1a\xca!\xd5\x16\xb2\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xbd\u07e3M\x0e\xbf\x1b\x04\xafS\xb9\x9b\x82IJ\x9e=\x8a\xa1\x00\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4\xbd\xe4\xc7?\x96\x9b\x89\xe9\u03aef\xa2\xb5\x18DH\x0e\x03\x8e\x9a\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xbd\xe9xj\x84\xe7[H\xf1\x8erm\u05cdp\xe4\xaf>\xd8\x02\x8a\x016\x9f\xb9a(\xacH\x00\x00\u07d4\xbd\xed\x11a/\xb5\xc6\u0699\xd1\xe3\x0e2\v\xc0\x99Tf\x14\x1e\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xbd\xed~\a\xd0q\x1ehM\xe6Z\u0232\xabW\xc5\\\x1a\x86E\x89 \t\xc5\u023fo\xdc\x00\x00\u07d4\xbd\xf6\x93\xf83\xc3\xfeG\x17S\x18G\x88\xebK\xfeJ\xdc?\x96\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xbd\xf6\xe6\x8c\f\xd7X@\x80\xe8G\xd7,\xbb#\xaa\xd4j\xeb\x1d\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xbe\n/8_\t\xdb\xfc\xe9g2\xe1+\xb4\n\xc3I\x87\x1b\xa8\x89WL\x11^\x02\xb8\xbe\x00\x00\u07d4\xbe\f*\x80\xb9\xde\bK\x17(\x94\xa7l\xf4szOR\x9e\x1a\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xbe\x1c\xd7\xf4\xc4r\a\th\xf3\xbd\xe2h6k!\xee\xea\x83!\x89\xe9\x1a|\u045f\xa3\xb0\x00\x00\u07d4\xbe#F\xa2\u007f\xf9\xb7\x02\x04OP\r\xef\xf2\xe7\xff\xe6\x82EA\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xbe$q\xa6\u007f`G\x91\x87r\xd0\xe3h9%^\xd9\u0591\xae\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xbe+\"\x80R7h\xea\x8a\xc3\\\xd9\xe8\x88\xd6\nq\x93\x00\u0509lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xbe+2nx\xed\x10\xe5P\xfe\xe8\xef\xa8\xf8\a\x03\x96R/Z\x8a\bW\xe0\xd6\xf1\xdav\xa0\x00\x00\xe0\x94\xbe0Zyn3\xbb\xf7\xf9\xae\xaee\x12\x95\x90f\xef\xda\x10\x10\x8a\x02M\xceT\xd3J\x1a\x00\x00\x00\u07d4\xbeG\x8e\x8e=\xdek\xd4\x03\xbb-\x1ce|C\x10\xee\x19'#\x89\x1a\xb2\xcf|\x9f\x87\xe2\x00\x00\u07d4\xbeN}\x98?.*ck\x11\x02\xecp9\xef\xeb\xc8B\u9349\x03\x93\xef\x1aQ'\xc8\x00\x00\u07d4\xbeO\xd0sap\"\xb6\u007f\\\x13I\x9b\x82\u007fv69\xe4\xe3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xbeRZ3\xea\x91aw\xf1r\x83\xfc\xa2\x9e\x8b5\v\u007fS\v\x89\x8f\x01\x9a\xafF\xe8x\x00\x00\u07d4\xbeS2/C\xfb\xb5\x84\x94\xd7\xcc\xe1\x9d\xda'+$P\xe8'\x89\n\xd7\u03afB\\\x15\x00\x00\u07d4\xbeS\x82F\xddNo\f \xbfZ\xd17<;F:\x13\x1e\x86\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xbeZ`h\x99\x98c\x9a\xd7[\xc1\x05\xa3qt>\xef\x0fy@\x89\x1b2|s\xe1%z\x00\x00\u07d4\xbe\\\xba\x8d7By\x86\xe8\xca&\x00\xe8X\xbb\x03\xc3YR\x0f\x89\xa00\xdc\xeb\xbd/L\x00\x00\u07d4\xbe`\x03~\x90qJK\x91~a\xf1\x93\xd84\x90g\x03\xb1:\x89\\(=A\x03\x94\x10\x00\x00\u07d4\xbec:77\xf6\x849\xba\xc7\xc9\nR\x14 X\ue38ao\x894\n\xad!\xb3\xb7\x00\x00\x00\xe0\x94\xbee\x9d\x85\xe7\xc3O\x883\xea\u007fH\x8d\xe1\xfb\xb5\xd4\x14\x9b\xef\x8a\x01\xeb\xd2:\xd9\u057br\x00\x00\u07d4\xbes'M\x8cZ\xa4J<\xbe\xfc\x82c\xc3{\xa1!\xb2\n\u04c9\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xbe\x86\u0430C\x84\x19\u03b1\xa081\x927\xbaR\x06\xd7.F\x8964\xfb\x9f\x14\x89\xa7\x00\x00\u07d4\xbe\x8d\u007f\x18\xad\xfe]l\xc7u9I\x89\xe1\x93\f\x97\x9d\x00}\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xbe\x91\x86\xc3JRQJ\xbb\x91\a\x86\x0fgO\x97\xb8!\xbd[\x89\x1b\xa0\x1e\xe4\x06\x03\x10\x00\x00\u07d4\xbe\x93W\x93\xf4[p\xd8\x04]&T\xd8\xdd:\xd2K[a7\x89/\xb4t\t\x8fg\xc0\x00\x00\u07d4\xbe\x98\xa7\u007f\xd4\x10\x97\xb3OY\xd7X\x9b\xaa\xd0!e\x9f\xf7\x12\x890\xca\x02O\x98{\x90\x00\x00\u07d4\xbe\x9b\x8c4\xb7\x8e\xe9G\xff\x81G.\xdaz\xf9\xd2\x04\xbc\x84f\x89\b!\xab\rD\x14\x98\x00\x00\u07d4\xbe\xa0\r\xf1pg\xa4:\x82\xbc\x1d\xae\xca\xfbl\x140\x0e\x89\xe6\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\xbe\xa0\xaf\xc9:\xae!\b\xa3\xfa\xc0Yb;\xf8o\xa5\x82\xa7^\x89\\(=A\x03\x94\x10\x00\x00\u07d4\xbe\xb35\x8cP\u03dfu\xff\xc7mD<,\u007fU\aZ\x05\x89\x89\x90\xf54`\x8ar\x88\x00\x00\u07d4\xbe\xb4\xfd1UYC`E\u0739\x9dI\xdc\xec\x03\xf4\fB\u0709lk\x93[\x8b\xbd@\x00\x00\u07d4\xbe\xc2\xe6\xde9\xc0|+\xaeUj\u03fe\xe2\xc4r\x8b\x99\x82\xe3\x89\x1f\x0f\xf8\xf0\x1d\xaa\xd4\x00\x00\u07d4\xbe\xc6d\x0fI\t\xb5\x8c\xbf\x1e\x80cB\x96\x1d`u\x95\tl\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xbe\xc8\xca\xf7\xeeIF\x8f\xeeU.\xff:\xc5#N\xb9\xb1}B\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xbe\xce\xf6\x1c\x1cD+\xef|\xe0Ks\xad\xb2I\xa8\xba\x04~\x00\x896;V\u00e7T\xc8\x00\x00\u0794\xbe\xd4d\x9d\xf6F\u2052)\x03-\x88hUo\xe1\xe0S\u04c8\xfc\x93c\x92\x80\x1c\x00\x00\xe0\x94\xbe\xd4\xc8\xf0\x06\xa2|\x1e_|\xe2\x05\xdeu\xf5\x16\xbf\xb9\xf7d\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\u07d4\xbe\xe8\u0430\bB\x19T\xf9-\x00\r9\x0f\xb8\xf8\xe6X\xea\xee\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xbe\xec\u05af\x90\f\x8b\x06J\xfc\xc6\a?-\x85\u055a\xf1\x19V\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xbe\xef\x94!8y\xe0&\"\x14+\xeaa)\tx\x93\x9a`\u05ca\x016\x85{2\xad\x86\x04\x80\x00\xe0\x94\xbe\xf0}\x97\xc3H\x1f\x9dj\xee\x1c\x98\xf9\xd9\x1a\x18\n2D+\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xbe\xfbD\x8c\f_h?\xb6~\xe5p\xba\xf0\xdbV\x86Y\x97Q\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xbf\x05\a\f,4!\x93\x11\xc4T\x8b&\x14\xa48\x81\r\xedm\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xbf\x05\xff^\xcf\r\xf2\u07c8wY\xfb\x82t\xd928\xac&}\x89+^:\xf1k\x18\x80\x00\x00\xe0\x94\xbf\t\xd7pH\xe2p\xb6b3\x0e\x94\x86\xb3\x8bC\xcdx\x14\x95\x8a\\S\x9b{\xf4\xff(\x80\x00\x00\u07d4\xbf\x17\xf3\x97\xf8\xf4o\x1b\xaeE\u0447\x14\x8c\x06\xee\xb9Y\xfaM\x896I\u0156$\xbb0\x00\x00\u07d4\xbf\x186A\xed\xb8\x86\xce`\xb8\x19\x02a\xe1OB\xd9<\xce\x01\x89\x01[5W\xf1\x93\u007f\x80\x00\u07d4\xbf*\xeaZ\x1d\xcfn\u04f5\xe829D\xe9\x83\xfe\xdf\u046c\xfb\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4\xbf@\x96\xbcT}\xbf\xc4\xe7H\t\xa3\x1c\x03\x9e{8\x9d^\x17\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4\xbfI\xc1H\x981eg\u0637\t\xc2\xe5\x05\x94\xb3f\xc6\u04cc\x89'\xbf8\xc6TM\xf5\x00\x00\u07d4\xbfLs\xa7\xed\xe7\xb1d\xfe\a!\x14\x846T\xe4\xd8x\x1d\u0789lk\x93[\x8b\xbd@\x00\x00\u07d4\xbfP\xce.&K\x9f\xe2\xb0h0az\xed\xf5\x02\xb25\x1bE\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xbfY\xae\xe2\x81\xfaC\xfe\x97\x19CQ\xa9\x85~\x01\xa3\xb8\x97\xb2\x89 \x86\xac5\x10R`\x00\x00\u07d4\xbfh\u048a\xaf\x1e\xee\xfe\xf6F\xb6^\x8c\xc8\u0450\xf6\xc6\u069c\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xbfi%\xc0\aQ\x00\x84@\xa6s\x9a\x02\xbf+l\u06ab^:\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xbfw\x01\xfcb%\u0561x\x15C\x8a\x89A\xd2\x1e\xbc]\x05\x9d\x89e\xea=\xb7UF`\x00\x00\u07d4\xbf\x8b\x80\x05\xd66\xa4\x96d\xf7Bu\xefBC\x8a\xcde\xac\x91\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xbf\x92A\x8a\fl1$M\"\x02`\xcb>\x86}\u05f4\xefI\x89\x05i\x00\xd3<\xa7\xfc\x00\x00\u07d4\xbf\x9a\xcdDE\xd9\xc9UF\x89\u02bb\xba\xb1\x88\x00\xff\x17A\u008965\u026d\xc5\u07a0\x00\x00\u07d4\xbf\x9f'\x1fz~\x12\xe3m\xd2\xfe\x9f\xac\xeb\xf3\x85\xfeaB\xbd\x89\x03f\xf8O{\xb7\x84\x00\x00\u07d4\xbf\xa8\xc8X\xdf\x10,\xb1$!\x00\x8b\n1\xc4\xc7\x19\n\xd5`\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xbf\xae\xb9\x10ga}\u03cbD\x17+\x02\xafaVt\x83]\xba\x89\b\xb5\x9e\x88H\x13\b\x80\x00\xe0\x94\xbf\xb0\xea\x02\xfe\xb6\x1d\xec\x9e\"\xa5\a\tY3\x02\x99\xc40r\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\xbf\xbc\xa4\x18\xd3R\x9c\xb3\x93\b\x10b\x03*n\x11\x83\u01b2\u070a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xbf\xbe\x05\u831c\xbb\xcc\x0e\x92\xa4\x05\xfa\xc1\xd8]\xe2H\xee$\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\xbf\xbf\xbc\xb6V\u0099+\xe8\xfc\u0782\x19\xfb\xc5J\xad\u055f)\x8a\x02\x1e\x18\xd2\xc8!\xc7R\x00\x00\u07d4\xbf\xc5z\xa6f\xfa\u239f\x10zI\xcbP\x89\xa4\xe2!Q\u074965\u026d\xc5\u07a0\x00\x00\u07d4\xbf\u02d70$c\x04p\r\xa9\vAS\xe7\x11Ab.\x1cA\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xbf\xd9<\x90\u009c\a\xbc_\xb5\xfcI\xae\xeaU\xa4\x0e\x13O5\x8a\x05\xed\xe2\x0f\x01\xa4Y\x80\x00\x00\xe0\x94\xbf\xe3\xa1\xfcn$\xc8\xf7\xb3%\x05`\x99\x1f\x93\u02e2\u03c0G\x8a\x10\xf0\xcf\x06M\u0552\x00\x00\x00\u07d4\xbf\u6f30\xf0\xc0xRd3$\xaa]\xf5\xfdb%\xab\xc3\u0289\x04\t\xe5+H6\x9a\x00\x00\u07d4\xbf\xf5\xdfv\x994\xb8\x94<\xa9\x13}\x0e\xfe\xf2\xfen\xbb\xb3N\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xbf\xfbi)$\x1fx\x86\x93'>p\"\xe6\x0e>\xab\x1f\xe8O\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc0\x06O\x1d\x94t\xab\x91]V\x90l\x9f\xb3 \xa2\xc7\t\x8c\x9b\x89\x13h?\u007f<\x15\xd8\x00\x00\u07d4\xc0\a\xf0\xbd\xb6\xe7\x00\x92\x02\xb7\xaf>\xa9\t\x02i|r\x14\x13\x89\xa2\xa0\xe4>\u007f\xb9\x83\x00\x00\u07d4\xc0\n\xb0\x80\xb6C\xe1\u00ba\xe3c\xe0\u0455\xde.\xff\xfc\x1cD\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u0794\xc0 wD\x9a\x13Jz\xd1\xef~M\x92z\xff\xec\ueb75\xae\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\xc0$q\xe3\xfc.\xa0S&\x15\xa7W\x1dI2\x89\xc1<6\xef\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xc0-n\xad\xea\xcf\x1bx\xb3\u0285\x03\\c{\xb1\xce\x01\xf4\x90\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xc03\xb12Z\n\xf4Tr\xc2U'\x85;\x1f\x1c!\xfa5\u0789lk\x93[\x8b\xbd@\x00\x00\u07d4\xc03\xbe\x10\xcbHa;\xd5\xeb\xcb3\xedI\x02\xf3\x8bX0\x03\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xc04[3\xf4\x9c\xe2\u007f\xe8,\xf7\xc8M\x14\x1ch\xf5\x90\xcev\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xc0=\xe4*\x10\x9bezd\xe9\"$\xc0\x8d\xc1'^\x80\u0672\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xc0@i\u07f1\x8b\tlxg\xf8\xbe\xe7zm\xc7Gz\xd0b\x89\x90\xf54`\x8ar\x88\x00\x00\xe0\x94\xc0A?Z|-\x9aK\x81\b(\x9e\xf6\xec\xd2qx\x15$\xf4\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\u07d4\xc0C\xf2E-\u02d6\x02\xefb\xbd6\x0e\x03=\xd29q\xfe\x84\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xc0OK\xd4\x04\x9f\x04F\x85\xb8\x83\xb6)Y\xaec\x1df~5\x8a\x01;\x80\xb9\x9cQ\x85p\x00\x00\u07d4\xc0V\u053dk\xf3\u02ec\xace\xf8\xf5\xa0\xe3\x98\v\x85'@\xae\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xc0[t\x06 \xf1s\xf1nRG\x1d\u00cb\x9cQJ\v\x15&\x89\a\x96\xe3\xea?\x8a\xb0\x00\x00\u07d4\xc0i\xef\x0e\xb3B\x99\xab\xd2\xe3-\xab\xc4yD\xb2r3H$\x89\x06\x81U\xa46v\xe0\x00\x00\u07d4\xc0l\xeb\xbb\xf7\xf5\x14\x9af\xf7\xeb\x97k>G\xd5e\x16\xda/\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xc0r^\u00bd\xc3:\x1d\x82`q\u07a2\x9db\xd48Z\x8c%\x8a\b\xa0\x85\x13F:\xa6\x10\x00\x00\u07d4\xc0~8g\xad\xa0\x96\x80z\x05\x1al\x9c4\xcc;?J\xd3J\x89`\xf0f \xa8IE\x00\x00\u07d4\xc0\x89^\xfd\x05m\x9a:\x81\xc3\xdaW\x8a\xda1\x1b\xfb\x93V\u03c9\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xc0\x90\xfe#\xdc\xd8k5\x8c2\xe4\x8d*\xf9\x10$%\x9fef\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xc0\x9af\x17*\xea7\r\x9ac\xda\x04\xffq\xff\xbb\xfc\xff\u007f\x94\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc0\x9e<\xfc\x19\xf6\x05\xff>\xc9\xc9\xc7\x0e%@\xd7\xee\x97Cf\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xc0\xa0*\xb9N\xbeV\xd0E\xb4\x1bb\x9b\x98F.:\x02J\x93\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xc0\xa3\x93\b\xa8\x0e\x9e\x84\xaa\xaf\x16\xac\x01\xe3\xb0\x1dt\xbdk-\x89\afM\xddL\x1c\v\x80\x00\u07d4\xc0\xa6\u02edwi*=\x88\xd1A\xefv\x9a\x99\xbb\x9e<\x99Q\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\xc0\xa7\xe8C]\xff\x14\xc2Uws\x9d\xb5\\$\u057fW\xa3\u064a\nm\xd9\f\xaeQ\x14H\x00\x00\u07d4\xc0\xae\x14\xd7$\x83./\xce'x\xde\u007f{\x8d\xaf{\x12\xa9>\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xc0\xaf\xb7\u0637\x93p\xcf\xd6c\u018c\u01b9p*7\u035e\xff\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xc0\xb0\xb7\xa8\xa6\xe1\xac\xdd\x05\xe4\u007f\x94\xc0\x96\x88\xaa\x16\u01ed\x8d\x89\x03{m\x02\xacvq\x00\x00\xe0\x94\xc0\xb3\xf2D\xbc\xa7\xb7\xde[H\xa5>\u06dc\xbe\xab\vm\x88\xc0\x8a\x01;\x80\xb9\x9cQ\x85p\x00\x00\u07d4\xc0\xc0M\x01\x06\x81\x0e>\xc0\xe5J\x19\U000ab157\xe6\x9aW=\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4\xc0\xca2w\x94.tE\x87K\xe3\x1c\xeb\x90)rqO\x18#\x89\r\x8drkqw\xa8\x00\x00\u07d4\xc0\u02ed<\xcd\xf6T\xda\"\xcb\xcf\\xe\x97\xca\x19U\xc1\x15\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc0\xcb\xf6\x03/\xa3\x9e|F\xffw\x8a\x94\xf7\xd4E\xfe\"\xcf0\x89\x10\xce\x1d=\x8c\xb3\x18\x00\x00\u07d4\xc0\xe0\xb9\x03\b\x8e\fc\xf5=\xd0iWTR\xaf\xf5$\x10\u00c9\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xc0\xe4W\xbdV\xec6\xa1$k\xfa20\xff\xf3\x8eY&\xef\"\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\xc0\xed\rJ\xd1\r\xe045\xb1S\xa0\xfc%\xde;\x93\xf4R\x04\x89\xabM\xcf9\x9a:`\x00\x00\u07d4\xc0\xf2\x9e\xd0\af\x11\xb5\xe5^\x13\x05G\xe6\x8aH\xe2m\xf5\u4262\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xc1\x13(x#\\]\u06e5\xd9\xf3\"\x8bR6\xe4p \xdco\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xc1\x17\r\xba\xad\xb3\xde\xe6\x19\x8e\xa5D\xba\xec\x93%\x18`\xfd\xa5\x89A\rXj \xa4\xc0\x00\x00\xe0\x94\xc1&W=\x87\xb0\x17ZR\x95\xf1\xdd\a\xc5u\u03cc\xfa\x15\xf2\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xc1'\xaa\xb5\x90e\xa2\x86D\xa5k\xa3\xf1^.\xac\x13\xda)\x95\x89 \x86\xac5\x10R`\x00\x00\xe0\x94\xc1+\u007f@\u07da/{\xf9\x83f\x14\"\xab\x84\xc9\xc1\xf5\bX\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xc1,\xfb{=\xf7\x0f\xce\xca\x0e\xde&5\x00\xe2xs\xf8\xed\x16\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xc1/\x88\x1f\xa1\x12\xb8\x19\x9e\xcb\xc7>\xc4\x18W\x90\xe6\x14\xa2\x0f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc18Lnq~\xbeK#\x01NQ\xf3\x1c\x9d\xf7\xe4\xe2[1\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xc1C\x8c\x99\xddQ\xef\x1c\xa88j\xf0\xa3\x17\xe9\xb0AEx\x88\x89\f\x1d\xaf\x81\u0623\xce\x00\x00\u07d4\xc1c\x12(\xef\xbf*.:@\x92\xee\x89\x00\xc69\xed4\xfb\u02093\xc5I\x901r\f\x00\x00\u07d4\xc1u\xbe1\x94\xe6iB-\x15\xfe\xe8\x1e\xb9\xf2\xc5lg\xd9\u0249\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xc1\x82v\x86\xc0\x16\x94\x85\xec\x15\xb3\xa7\xc8\xc0\x15\x17\xa2\x87M\xe1\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\xc1\x8a\xb4g\xfe\xb5\xa0\xaa\xdf\xff\x91#\x0f\xf0VFMx\xd8\x00\x89lk\x93[\x8b\xbd@\x00\x00\u0794\xc1\x95\x05CUM\x8aq0\x03\xf6b\xbba,\x10\xadL\xdf!\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\xc1\xa4\x1aZ'\x19\x92&\xe4\xc7\xeb\x19\x8b\x03\x1bY\x19o\x98B\x89\nZ\xa8P\t\xe3\x9c\x00\x00\u07d4\xc1\xb2\xa0\xfb\x9c\xadE\xcdi\x91\x92\xcd'T\v\x88\xd38By\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xc1\xb2\xaa\x8c\xb2\xbfb\xcd\xc1:G\xec\xc4e\u007f\xac\xaa\x99_\x98\x8967\x93\xfa\x96\u6980\x00\u07d4\xc1\xb5\x00\x01\x1c\xfb\xa9]|\xd66\xe9^l\xbfagFK%\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xc1\xb9\xa5pM5\x1c\xfe\x98?y\xab\xee\xc3\u06fb\xae;\xb6)\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xc1\xcb\xd2\xe23*RL\xf2\x19\xb1\r\x87\x1c\xcc \xaf\x1f\xb0\xfa\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xc1\xcd\xc6\x01\xf8\x9c\x04(\xb3\x13\x02\u0447\xe0\xdc\b\xad}\x1cW\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\xc1\u052f8\xe9\xbay\x90@\x89HI\xb8\xa8!\x93u\xf1\xacx\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xc1\xe1@\x9c\xa5,%CQ4\xd0\x06\u00a6\xa8T-\xfbrs\x89\x01\xdd\x1eK\xd8\xd1\xee\x00\x00\u07d4\xc1\xeb\xa5hJ\xa1\xb2L\xbac\x15\x02c\xb7\xa9\x13\x1a\xee\u008d\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xc1\xec\x81\xdd\x12=K|-\u0674\xd48\xa7\a,\x11\u0707L\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc1\xf3\x9b\xd3]\xd9\xce\xc37\xb9oG\xc6w\x81\x81`\xdf7\xb7\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u0794\xc1\xff\xad\a\u06d6\x13\x8cK*S\x0e\xc1\xc7\xde)\xb8\xa0Y,\x88\xf4?\xc2\xc0N\xe0\x00\x00\xe0\x94\xc2\x1f\xa6d:\x1f\x14\xc0)\x96\xadqD\xb7Y&\xe8~\xcbK\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xc24\nL\xa9L\x96x\xb7IL<\x85%(\xed\xe5\xeeR\x9f\x89\x02\xa3k\x05\xa3\xfd|\x80\x00\u07d4\xc29\xab\u07ee>\x9a\xf5E\u007fR\xed+\x91\xfd\n\xb4\xd9\xc7\x00\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc2;/\x92\x1c\xe4\xa3z%\x9e\u4b4b!X\xd1]fOY\x89\x01`\x89\x95\xe8\xbd?\x80\x00\u07d4\xc2C\x99\xb4\xbf\x86\xf73\x8f\xbfd^;\"\xb0\u0dd79\x12\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc2L\u03bc#D\xcc\xe5d\x17\xfbhL\xf8\x16\x13\xf0\xf4\xb9\xbd\x89T\x06\x923\xbf\u007fx\x00\x00\u07d4\xc2Rf\xc7gf2\xf1>\xf2\x9b\xe4U\ud50a\xddVw\x92\x89Hz\x9a0E9D\x00\x00\u07d4\xc2\\\xf8&U\f\x8e\xaf\x10\xaf\"4\xfe\xf9\x04\u0779R\x13\xbe\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xc2f?\x81E\xdb\xfe\xc6\xc6F\xfc\\I\x96\x13E\xde\x1c\x9f\x11\x89%g\xacp9+\x88\x00\x00\u07d4\xc2pEh\x854+d\vL\xfc\x1bR\x0e\x1aTN\xe0\xd5q\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\xc2sv\xf4]!\xe1^\xde;&\xf2e_\xce\xe0,\xcc\x0f*\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xc2w\x97q\xf0Smy\xa8p\x8fi1\xab\xc4K05\u964a\x047\u04ca\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xc2\xc1>r\xd2h\xe7\x15\r\u01d9\xe7\xc6\xcf\x03\u0209T\xce\u05c9%\xf2s\x93=\xb5p\x00\x00\u07d4\xc2\xcb\x1a\xda]\xa9\xa0B8s\x81G\x93\xf1aD\xef6\xb2\xf3\x89HU~;p\x17\xdf\x00\x00\u07d4\xc2\xd1w\x8e\xf6\xee_\xe4\x88\xc1E\xf3Xkn\xbb\xe3\xfb\xb4E\x89>\x1f\xf1\xe0;U\xa8\x00\x00\xe0\x94\xc2\xd9\xee\xdb\xc9\x01\x92c\xd9\xd1l\u016e\a-\x1d=\xd9\xdb\x03\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xc2\xe0XJq4\x8c\xc3\x14\xb7; )\xb6#\v\x92\u06f1\x16\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc2\xe2\u0518\xf7\r\xcd\bY\xe5\v\x02:q\nmK!3\xbd\x8989\x11\xf0\f\xbc\xe1\x00\x00\u07d4\xc2\xed_\xfd\u046d\xd8U\xa2i/\xe0b\xb5\xd6\x18t#`\u0509A\rXj \xa4\xc0\x00\x00\u07d4\xc2\xee\x91\xd3\xefX\xc9\u0465\x89\x84N\xa1\xae1%\xd6\u017ai\x894\x95tD\xb8@\xe8\x00\x00\u07d4\xc2\xfa\xfd\xd3\n\xcbmg\x06\xe9)<\xb0&A\xf9\xed\xbe\a\xb5\x89Q\x00\x86\vC\x0fH\x00\x00\u07d4\xc2\xfd\v\xf7\xc7%\xef>\x04~Z\xe1\u009f\xe1\x8f\x12\xa7)\x9c\x89Hz\x9a0E9D\x00\x00\u07d4\xc2\xfe}us\x1fcm\xcd\t\xdb\xda\x06q9;\xa0\xc8*}\x89wC\"\x17\xe6\x83`\x00\x00\u07d4\xc3\x10z\x9a\xf32-R8\xdf\x012A\x911b\x959W}\x89\x1a\xb4\xe4d\xd4\x141\x00\x00\xe0\x94\xc3\x11\v\xe0\x1d\xc9sL\xfcn\x1c\xe0\u007f\x87\xd7}\x13E\xb7\xe1\x8a\x01\x0f\f\xe9I\xe0\x0f\x93\x00\x00\u07d4\xc3 8\xcaR\xae\xe1\x97E\xbe\\1\xfc\xdcT\x14\x8b\xb2\xc4\u0409\x02\xb5\xaa\xd7,e \x00\x00\u07d4\xc3%\xc3R\x80\x1b\xa8\x83\xb3\"l_\xeb\r\xf9\xea\xe2\xd6\xe6S\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4\xc3.\xc7\xe4*\xd1l\xe3\xe2UZ\xd4\xc5C\x06\xed\xa0\xb2gX\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc32\xdfP\xb1<\x014\x90\xa5\xd7\xc7]\xbf\xa3f\u0687\xb6\u0589\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xc3:\u0373\xba\x1a\xab'P{\x86\xb1]g\xfa\xf9\x1e\xcfb\x93\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xc3>\u0393Z\x8fN\xf98\xea~\x1b\xac\x87\u02d2]\x84\x90\u028a\a\x03\x8c\x16x\x1fxH\x00\x00\u07d4\xc3@\xf9\xb9\x1c&r\x8c1\xd1!\xd5\xd6\xfc;\xb5m=\x86$\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc3F\xcb\x1f\xbc\xe2\xab(]\x8eT\x01\xf4-\xd7#M7\xe8m\x89\x04\x86\u02d7\x99\x19\x1e\x00\x00\xe0\x94\xc3H=n\x88\xac\x1fJ\xe7<\xc4@\x8dl\x03\xab\xe0\xe4\x9d\u028a\x03\x99\x92d\x8a#\u0220\x00\x00\xe0\x94\xc3H\xfcZF\x13#\xb5{\xe3\x03\u02c96\x1b\x99\x19\x13\xdf(\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xc3N;\xa12.\xd0W\x11\x83\xa2O\x94 N\xe4\x9c\x18fA\x89\x03'\xaf\uf927\xbc\x00\x00\xe0\x94\xc3[\x95\xa2\xa3s|\xb8\xf0\xf5\x96\xb3E$\x87+\xd3\r\xa24\x8a\x01\x98\xbe\x85#^-P\x00\x00\xe0\x94\xc3c\x1cv\x98\xb6\xc5\x11\x19\x89\xbfE''\xb3\xf99Zm\xea\x8a\x02C'X\x96d\x1d\xbe\x00\x00\u07d4\xc3l\vc\xbf\xd7\\/\x8e\xfb\x06\b\x83\xd8h\xcc\xcdl\xbd\xb4\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\xe0\x94\xc3uk\xcd\xcc~\xect\xed\x89j\xdf\xc35'Y0&n\b\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\u00c4\xacn\xe2|9\xe2\xf2x\xc2 \xbd\xfa[\xae\xd6&\xd9\u04c9 \x86\xac5\x10R`\x00\x00\u07d4\u00e0F\xe3\u04b2\xbfh\x14\x88\x82n2\xd9\xc0aQ\x8c\xfe\x8c\x89\x8c\xf2?\x90\x9c\x0f\xa0\x00\x00\u07d4\u00e9\"j\xe2u\xdf,\xab1+\x91\x10@cJ\x9c\x9c\x9e\xf6\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u00f9(\xa7o\xadex\xf0O\x05U\xe69R\xcd!\xd1R\n\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xc3\xc2)s)\xa6\xfd\x99\x11~T\xfcj\xf3y\xb4\xd5VT~\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xc3\xc3\xc2Q\rg\x80 HZcs]\x13\a\xecL\xa60+\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xc3\xcbk6\xafD?,n%\x8bJ9U:\x81\x87G\x81\x1f\x89WG=\x05\u06ba\xe8\x00\x00\xe0\x94\xc3\xdbVW\xbbr\xf1\rX\xf21\xfd\xdf\x11\x98\n\xffg\x86\x93\x8a\x01@a\xb9\xd7z^\x98\x00\x00\xe0\x94\xc3\u06df\xb6\xf4lH\n\xf3De\u05d7S\xb4\xe2\xb7Jg\u038a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xc3\xddX\x908\x860;\x92\x86%%z\xe1\xa0\x13\xd7\x1a\xe2\x16\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc3\xe0G\x1cd\xff5\xfaR2\xcc1!\xd1\u04cd\x1a\x0f\xb7\u0789lk\x93[\x8b\xbd@\x00\x00\u07d4\xc3\xe2\f\x96\u07cdN8\xf5\v&Z\x98\xa9\x06\xd6\x1b\xc5\x1aq\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc3\u31f0<\xe9\\\xcf\xd7\xfaQ\u0744\x01\x83\xbcCS(\t\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xc3\xf8\xf6r\x95\xa5\xcd\x04\x93d\xd0]#P&#\xa3\xe5.\x84\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xc4\x01\xc4'\xcc\xcf\xf1\r\xec\xb8d /6\xf5\x80\x83\"\xa0\xa8\x89\xb4{Q\xa6\x9c\xd4\x02\x00\x00\u07d4\xc4\b\x8c\x02_>\x85\x01?T9\xfb4@\xa1s\x01\xe5D\xfe\x89~\t\xdbM\x9f?4\x00\x00\u07d4\xc4\x14a\xa3\u03fd2\u0246UU\xa4\x8117\xc0v1#`\x8965\xc6 G9\u0640\x00\u07d4\xc4 8\x8f\xbe\xe8J\xd6V\xddh\xcd\xc1\xfb\xaa\x93\x92x\v4\x89\n-\xcac\xaa\xf4\u0140\x00\u07d4\xc4\"P\xb0\xfeB\xe6\xb7\xdc\xd5\u0210\xa6\xf0\u020f__\xb5t\x89\b\x1e\xe4\x82SY\x84\x00\x00\u07d4\xc4-j\xebq\x0e:P\xbf\xb4Ml1\t)i\xa1\x1a\xa7\xf3\x89\b\"c\xca\xfd\x8c\xea\x00\x00\xe0\x94\xc4@\xc7\xca/\x96Kir\xeffJ\"a\xdd\xe8\x92a\x9d\x9c\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\xc4K\xde\xc8\xc3l\\h\xba\xa2\xdd\xf1\xd41i2)rlC\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xc4OJ\xb5\xbc`9|s~\xb0h3\x91\xb63\xf8\xa2G\x1b\x12\x1c\xa4\x89 .h\xf2\u00ae\xe4\x00\x00\u07d4\xc4h\x1es\xbb\x0e2\xf6\xb7& H1\xffi\xba\xa4\x87~2\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\xc4k\xbd\xefv\xd4\xca`\xd3\x16\xc0\u007f]\x1ax\x0e;\x16_~\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc4}a\v9\x92P\xf7\x0e\xcf\x13\x89\xba\xb6),\x91&O#\x89\x0f\xa7\xe7\xb5\xdf<\xd0\x00\x00\u07d4\u0100;\xb4\a\xc7b\xf9\vu\x96\xe6\xfd\u1513\x1ev\x95\x90\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u0106Q\xc1\xd9\xc1k\xffL\x95T\x88l??&C\x1foh\x89#\xab\x95\x99\xc4?\b\x00\x00\u07d4\u0109\xc8?\xfb\xb0%*\xc0\xdb\xe3R\x12\x17c\x0e\x0fI\x1f\x14\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u010bi<\xac\xef\xdb\xd6\xcb]x\x95\xa4.1\x962~&\x1c\x8965\u026d\xc5\u07a0\x00\x00\u07d4\u0113H\x9eV\u00fd\xd8)\x00}\xc2\xf9VA)\x06\xf7k\xfa\x89\x02\xa7\x91H\x8eqT\x00\x00\u07d4\u0116\u02f0E\x9aj\x01`\x0f\u0149\xa5Z2\xb4T!\u007f\x9d\x89\x0e\u0683\x8cI)\b\x00\x00\u07d4\u011c\xfa\xa9g\xf3\xaf\xbfU\x03\x10a\xfcL\xef\x88\xf8]\xa5\x84\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\u0136\xe5\xf0\x9c\xc1\xb9\r\xf0x\x03\xce=M\x13vj\x9cF\xf4\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\u013e\xc9c\b\xa2\x0f\x90\u02b1\x83\x99\u0113\xfd=\x06Z\xbfE\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\xe0\x94\xc4\xc0\x1a\xfc>\x0f\x04R!\xda\x12\x84\u05c7\x85tD/\xb9\xac\x8a\x01\x92\xb5\u0249\x02J\x19\xc1\xbdo\x12\x80\x00\xe0\x94\xc5\x00\xb7 sN\xd2)8\u05cc^H\xb2\xba\x93g\xa5u\xba\x8a\a\x12\x9e\x1c\xdf7>\xe0\x00\x00\u07d4\xc5\x0f\xe4\x15\xa6A\xb0\x85lNu\xbf\x96\x05\x15D\x1a\xfa5\x8d\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc5\x13L\xfb\xb1\xdfz \xb0\xedpWb.\xee\u0480\x94}\xad\x89\xcd\xff\x97\xfa\xbc\xb4`\x00\x00\xe0\x94\xc5\x17\xd01\\\x87\x88\x13\xc7\x17\u132f\xa1\xea\xb2eN\x01\u068a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xc5\x18y\x9aY%Wb\x13\xe2\x18\x96\xe0S\x9a\xbb\x85\xb0Z\xe3\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xc5\"\xe2\x0f\xbf\x04\xed\u007fk\x05\xa3{G\x18\xd6\xfc\xe0\x14.\x1a\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xc5$\bmF\xc8\x11+\x12\x8b/\xafo|}\x81`\xa88l\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xc5-\x1a\fs\u00a1\xbe\x84\x91Q\x85\xf8\xb3O\xaa\n\xdf\x1d\xe3\x89K\xe4\xea\xb3\xfa\x0f\xa6\x80\x00\xe0\x94\xc55\x94\xc7\u03f2\xa0\x8f(L\xc9\u05e6;\xbd\xfc\v1\x972\x8a\nk#(\xff:b\xc0\x00\x00\u07d4\xc57I(\xcd\xf1\x93pTC\xb1L\xc2\r\xa4#G<\xd9\u03c9\a}\x10P\x9b\xb3\xaf\x80\x00\u07d4\xc58\xa0\xff(*\xaa_Ku\u03f6,p\x03~\xe6}O\xb5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc5;P\xfd;+r\xbclC\v\xaf\x19JQU\x85\u04d8m\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xc5=y\xf7\u02dbp\x95/\xd3\x0f\xceX\xd5K\x9f\vY\xf6G\x8a\x01\x13\xe2\xd6tCE\xf8\x00\x00\u07d4\xc5I\u07c3\xc6\xf6^\xec\x0f\x1d\u0260\x93J\\_:P\xfd\x88\x89\x9d\xc0\\\xce(\u00b8\x00\x00\u07d4\xc5P\x05\xa6\xc3~\x8c\xa7\xe5C\xce%\x99s\xa3\xca\u0396\x1aJ\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc5U\xb91V\xf0\x91\x01#\x80\x00\xe0\x94\u0166)\xa3\x96%R\u02ce\xde\u0609cj\xaf\xbd\f\x18\xcee\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\u016e\x86\xb0\xc6\xc7\xe3\x90\x0f\x13h\x10\\VS\u007f\xaf\x8dt>\x89\n1\x06+\xee\xedp\x00\x00\u07d4\u0170\t\xba\xea\xf7\x88\xa2v\xbd5\x81:\xd6[@\v\x84\x9f;\x8965\u026d\xc5\u07a0\x00\x00\u07d4\u0175l\xd24&|(\xe8\x9cok\"f\xb0\x86\xa1/\x97\f\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\xc5\u01a4\x99\x8a3\xfe\xb7dCz\x8b\xe9)\xa7;\xa3J\ad\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\xe0\x94\xc5\xc7=a\xcc\xe7\xc8\xfeL\x8f\xce)\xf3\x90\x92\xcd\x19>\x0f\xff\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xc5\xc7Y\vV!\xec\xf85\x85\x88\u079bh\x90\xf2baC\U000498a1]\tQ\x9b\xe0\x00\x00\u07d4\xc5\xcd\xce\xe0\xe8]\x11}\xab\xbfSj?@i\xbfD?T\xe7\x89j\xc5\xc6-\x94\x86\a\x00\x00\u07d4\xc5\u050c\xa2\xdb/\x85\xd8\xc5U\xcb\x0e\x9c\xfe\x82i6x?\x9e\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xc5\xde\x12\x03\xd3\xcc,\xea1\xc8.\xe2\xdeY\x16\x88\a\x99\xea\xfd\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xc5\xe4\x88\xcf+Vw\x939q\xf6L\xb8 -\xd0WR\xa2\xc0\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xc5\xe8\x12\xf7o\x15\xf2\xe1\xf2\xf9\xbcH#H<\x88\x04cog\x89\x03\xf5\x14\x19:\xbb\x84\x00\x00\u07d4\xc5\u94d34\xf1%.\u04ba&\x81D\x87\xdf\u0498+1(\x89\x03\xcbq\xf5\x1f\xc5X\x00\x00\u07d4\xc5\xebB)^\x9c\xad\xea\xf2\xaf\x12\xde\u078a\x8dS\xc5y\xc4i\x89\xcf\x15&@\xc5\xc80\x00\x00\xe0\x94\xc5\xed\xbb\xd2\xca\x03WeJ\xd0\xeaG\x93\xf8\xc5\xce\xcd0\xe2T\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xc5\xf6K\xab\xb7\x031B\xf2\x0eF\u05eab\x01\xed\x86\xf6q\x03\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc5\xf6\x87qrF\u068a \r \xe5\u9f2c`\xb6\u007f8a\x89\x01\x8d\x99?4\xae\xf1\x00\x00\u07d4\xc6\x04[<5\vL\xe9\xca\fkuO\xb4\x1ai\xb9~\x99\x00\x892$\xf4'#\xd4T\x00\x00\u07d4\xc6\v\x04eN\x00;F\x83\x04\x1f\x1c\xbdk\u00cf\xda|\xdb\u0589lk\x93[\x8b\xbd@\x00\x00\u07d4\xc6\x14F\xb7T\xc2N;\x16B\xd9\xe5\x17e\xb4\xd3\xe4k4\xb6\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc6\x18R\x13!\xab\xaf[&Q:J\x95(\bo\"\n\xdco\x89\x01v\xb3D\xf2\xa7\x8c\x00\x00\u07d4\xc6#FW\xa8\a8A&\xf8\x96\x8c\xa1p\x8b\xb0{\xaaI<\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xc6%\xf8\u024d'\xa0\x9a\x1b\u02bdQ(\xb1\u00a9HV\xaf0\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xc65^\xc4v\x8cp\xa4\x9a\xf6\x95\x13\u0343\xa5\xbc\xa7\xe3\xb9\u034a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xc6:\xc4\x17\x99.\x9f\x9b`8n\xd9S\xe6\xd7\xdf\xf2\xb0\x90\xe8\x89\xd8\xd8X?\xa2\xd5/\x00\x00\u07d4\xc6<\u05c8!\x18\xb8\xa9\x1e\aML\x8fK\xa9\x18Q0;\x9a\x89\x0e\x189\x8ev\x01\x90\x00\x00\u07d4\xc6R\x87\x1d\x19$\"\u01bc#_\xa0c\xb4J~\x1dC\u3149\bg\x0e\x9e\xc6Y\x8c\x00\x00\xe0\x94\xc6gD\x1e\u007f)y\x9a\xbaadQ\xd5;?H\x9f\x9e\x0fH\x8a\x02\xf2\x9a\xceh\xad\u0740\x00\x00\u07d4\xc6j\xe4\xce\xe8\u007f\xb352\x19\xf7\u007f\x1dd\x86\u0140(\x032\x89\x01\x9a\x16\xb0o\xf8\xcb\x00\x00\u07d4\xc6t\xf2\x8c\x8a\xfd\a?\x8by\x96\x91\xb2\xf0XM\xf9B\xe8D\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\u0197\xb7\x04w\u02b4.+\x8b&f\x81\xf4\xaesu\xbb%A\x8a\x01.W2\xba\xba\\\x98\x00\x00\u07d4\u019b\x85U9\xce\x1b\x04qG(\xee\xc2Z7\xf3g\x95\x1d\xe7\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u019b\xe4@\x13Mb\x80\x98\x01D\xa9\xf6M\x84t\x8a7\xf3I\x89&\u009eG\u0104L\x00\x00\u07d4\u019df<\x8d`\x90\x83\x91\xc8\xd26\x19\x153\xfd\xf7wV\x13\x89\x1aJ\xba\"\\ t\x00\x00\u0794\u01a2\x86\xe0e\xc8_:\xf7H\x12\xed\x8b\u04e8\xce]%\xe2\x1d\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\u01a3\x0e\xf5\xbb3 \xf4\r\xc5\xe9\x81#\rR\xae:\xc1\x93\"\x89\t\xdd\xc1\xe3\xb9\x01\x18\x00\x00\u07d4\u01ae(}\xdb\xe1\x14\x9b\xa1m\xdc\xcaO\xe0j\xa2\uaa48\xa9\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xc6\xc7\xc1\x917\x98\x97\u075c\x9d\x9a3\x83\x9cJ_b\xc0\x89\r\x89\xd8\xd8T\xb2$0h\x80\x00\xe0\x94\xc6\xcdh\xec56,Z\xd8L\x82\xadN\xdc#!%\x91-\x99\x8a\x05\xe0T\x9c\x962\xe1\xd8\x00\x00\u07d4\xc6\u0615N\x8f?\xc53\xd2\xd20\xff\x02\\\xb4\xdc\xe1O4&\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xc6\xdb\u06de\xfd^\xc1\xb3xn\x06q\xeb\"y\xb2S\xf2\x15\xed\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xc6\xdf u\xeb\xd2@\xd4Hi\u00bek\u07c2\xe6=N\xf1\xf5\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xc6\xe2\xf5\xaf\x97\x9a\x03\xfdr:\x1bn\xfar\x83\x18\u03dc\x18\x00\x89$=M\x18\"\x9c\xa2\x00\x00\u07d4\xc6\xe3$\xbe\xeb[6v^\xcdFB`\xf7\xf2`\x06\xc5\xc6.\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc6\xe4\xcc\fr\x83\xfc\x1c\x85\xbcH\x13\xef\xfa\xafr\xb4\x98#\xc0\x89\x0f\x03\x1e\xc9\xc8}\xd3\x00\x00\xe0\x94\xc6\xee5\x93B)i5)\xdcA\u067bq\xa2IfX\xb8\x8e\x8a\x04+\xf0kx\xed;P\x00\x00\u07d4\xc6\xfb\x1e\xe3t\x17\u0400\xa0\xd0H\x92;\u06ba\xb0\x95\xd0w\u0189\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xc7\x05'\xd4D\u0110\xe9\xfc?\\\xc4Nf\xebO0k8\x0f\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xc7\r\x85mb\x1e\xc1E0<\nd\x00\xcd\x17\xbb\xd6\xf5\xea\xf7\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xc7\x0f\xa4Uv\xbf\x9c\x86_\x988\x93\x00,AI&\xf6\x10)\x89\x15\xb4\xaa\x8e\x97\x02h\x00\x00\u07d4\xc7\x11E\xe5)\u01e7\x14\xe6y\x03\xeeb\x06\xe4\xc3\x04+g'\x89M\x85<\x8f\x89\b\x98\x00\x00\u07d4\xc7\x1b*=q5\u04a8_\xb5\xa5q\u073ei^\x13\xfcC\u034965\u026d\xc5\u07a0\x00\x00\u07d4\xc7\x1f\x1du\x87?3\u0732\xddK9\x87\xa1-\a\x91\xa5\xce'\x897\b\xba\xed=h\x90\x00\x00\u07d4\xc7\x1f\x92\xa3\xa5J{\x8c/^\xa4C\x05\xfc\u02c4\xee\xe21H\x89\x02\xb5\x9c\xa11\xd2\x06\x00\x00\u07d4\xc7!\xb2\xa7\xaaD\xc2\x12\x98\xe8P9\xd0\x0e.F\x0eg\v\x9c\x89\a\xa1\xfe\x16\x02w\x00\x00\x00\u07d4\xc7,\xb3\x01%\x8e\x91\xbc\b\x99\x8a\x80]\u0452\xf2\\/\x9a5\x89 \t\xc5\u023fo\xdc\x00\x00\xe0\x94\xc76\x8b\x97\t\xa5\xc1\xb5\x1c\n\xdf\x18ze\xdf\x14\xe1+}\xba\x8a\x02\x02o\xc7\u007f\x03\u5b80\x00\u07d4\xc79%\x9e\u007f\x85\xf2e\x9b\xef_`\x9e\xd8k=Yl \x1e\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xc7>!\x12(\"\x15\xdc\ab\xf3+~\x80}\xcd\x1az\xae>\x8a\x01v\f\xbcb;\xb3P\x00\x00\xe0\x94\xc7If\x80B\xe7\x11#\xa6H\x97^\b\xedc\x82\xf8>\x05\xe2\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4\xc7J9\x95\xf8\a\xde\x1d\xb0\x1a.\xb9\xc6.\x97\xd0T\x8fio\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xc7Pl\x10\x19\x12\x1f\xf0\x8a,\x8c\x15\x91\xa6^\xb4\xbd\xfbJ?\x89 \x86\xac5\x10R`\x00\x00\u07d4\xc7\\7\xce-\xa0k\xbc@\b\x11Y\u01ba\x0f\x97n9\x93\xb1\x89:y#\x15\x1e\xcfX\x00\x00\u07d4\xc7]\"Y0j\xec}\xf0\"v\x8ci\x89\x9ae!\x85\xdb\u0109\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xc7`\x97\x1b\xbc\x18\x1cj|\xf7tA\xf2BG\u045c\xe9\xb4\u03c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xc7a0\xc7<\xb9!\x028\x02\\\x9d\xf9]\v\xe5J\xc6\u007f\xbe\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4\xc7e\xe0\x04v\x81\tG\x81j\xf1B\xd4m.\u7f28\xccO\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xc7g^VG\xb9\xd8\xda\xf4\xd3\xdf\xf1\xe5R\xf6\xb0qT\xac8\x89\t\xc2\x00vQ\xb2P\x00\x00\u07d4\xc7{\x01\xa6\xe9\x11\xfa\x98\x8d\x01\xa3\xab3dk\xee\xf9\xc18\xf3\x89'\x1bo\xa5\xdb\xe6\xcc\x00\x00\u07d4\u01c3z\u0420\xbf\x14\x18i7\xac\xe0lUF\xa3j\xa5OF\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u01d8\x06\x03+\xc7\xd8(\xf1\x9a\u01a6@\u018e=\x82\x0f\xa4B\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\u01d9\xe3N\x88\xff\x88\xbe}\xe2\x8e\x15\xe4\xf2\xa6=\v3\xc4\u02c9\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\u01ddPb\u01d6\xddwa\xf1\xf1>U\x8ds\xa5\x9f\x82\xf3\x8b\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\u01e0\x18\xf0\x96\x8aQ\xd1\xf6`<\\I\xdcT[\xcb\x0f\xf2\x93\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u01ef\xf9\x19)yt\x89UZ/\xf1\xd1M\\iZ\x10\x83U\x8965\u026d\xc5\u07a0\x00\x00\u0794\u01f1\xc8>c ?\x95G&>\xf6(.}\xa3;n\xd6Y\x88\xfc\x93c\x92\x80\x1c\x00\x00\xe0\x94\u01f3\x9b\x06\x04Q\x00\f\xa1\x04\x9b\xa1T\xbc\xfa\x00\xff\x8a\xf2b\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\u01ff\x17\xc4\xc1\x1f\x98\x94\x1fP~w\bO\xff\xbd-\xbd=\xb5\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\u01ff.\xd1\xed1)@\xeej\xde\xd1Qn&\x8eJ`HV\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xc7\xd4O\xe3,\u007f\x8c\xd5\xf1\xa9t'\xb6\xcd:\xfc\x9eE\x02>\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4\xc7\xd5\xc7\x05@\x81\xe9\x18\xech{Z\xb3n\x97=\x18\x13)5\x89\t\xdd\xc1\xe3\xb9\x01\x18\x00\x00\u07d4\xc7\xde^\x8e\xaf\xb5\xf6+\x1a\n\xf2\x19\\\xf7\x93\u01c9L\x92h\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xc7\xe30\xcd\f\x89\n\u025f\xe7q\xfc\xc7\xe7\xb0\t\xb7A=\x8a\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xc7\xea\xc3\x1a\xbc\xe6\xd5\xf1\u07a4\"\x02\xb6\xa6t\x15=\xb4z)\x89 \t\xc5\u023fo\xdc\x00\x00\xe0\x94\xc7\xecb\xb8\x04\xb1\xf6\x9b\x1e0p\xb5\xd3b\xc6/\xb3\t\xb0p\x8a\x02\xc4k\xf5A`f\x11\x00\x00\u07d4\xc7\xf7+\xb7X\x01k7G\x14\u0509\x9b\xce\"\xb4\xae\xc7\n1\x89:&\xc9G\x8f^-\x00\x00\u0794\xc8\v6\u047e\xaf\xba_\xccdM`\xacnF\xed)'\xe7\u0708\xb9\x8b\xc8)\xa6\xf9\x00\x00\u07d4\xc8\x11\xc2\xe9\xaa\x1a\xc3F.\xba^\x88\xfc\xb5\x12\x0e\x9fn,\xa2\x89K\xe6\u0607\xbd\x87n\x00\x00\u07d4\xc8\x17\xdf\x1b\x91\xfa\xf3\x0f\xe3%\x15qr|\x97\x11\xb4]\x8f\x06\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xc8\x1f\xb7\xd2\x0f\u0480\x01\x92\xf0\xaa\xc1\x98\xd6\u05a3}?\xcb}\x89\x0e\x11I3\x1c-\xde\x00\x00\u07d4\xc8 \xc7\x11\xf0w\x05'8\a\xaa\xaam\xe4M\x0eKH\xbe.\x89\bg\x0e\x9e\xc6Y\x8c\x00\x00\u07d4\xc8#\x1b\xa5\xa4\x11\xa1>\"+)\xbf\xc1\b?v1X\xf2&\x8967\tlK\xcci\x00\x00\u07d4\xc86\xe2Jo\xcf)\x94;6\b\xe6b)\n!_e)\xea\x89\x0f\xd4Pd\xea\xee\x10\x00\x00\xe0\x94\xc8;\xa6\u0755I\xbe\x1d2\x87\xa5\xa6T\xd1\x06\xc3Lk]\xa2\x8a\x01{x\x83\xc0i\x16`\x00\x00\u07d4\xc8>\x9djX%;\uefb7\x93\xe6\xf2\x8b\x05JXI\x1bt\x89\x0fF\u00b6\xf5\xa9\x14\x00\x00\u07d4\xc8A\x88O\xa4x_\xb7s\xb2\x8e\x97\x15\xfa\xe9\x9aQ40]\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc8M\x9b\xea\n{\x9f\x14\x02 \xfd\x8b\x90\x97\u03ff\xd5\xed\xf5d\x89\x06\xab\x9e\u0091\xad}\x80\x00\u07d4\xc8RB\x8d+Xd\x97\xac\xd3\fV\xaa\x13\xfbU\x82\xf8D\x02\x893B\xd6\r\xff\x19`\x00\x00\u07d4\xc8S![\x9b\x9f-,\xd0t\x1eX^\x98{_\xb8\f!.\x89T\x06\x923\xbf\u007fx\x00\x00\u07d4\xc8S%\uaca5\x9b>\xd8c\xc8j_)\x06\xa0B)\xff\xa9\x89\x19=\u007f}%=\xe0\x00\x00\u07d4\xc8^\xf2}\x82\x04\x03\x80_\xc9\xed%\x9f\xffd\xac\xb8\xd64j\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc8akN\xc0\x91(\xcd\xff9\xd6\u4e6c\x86\xee\xc4q\xd5\xf2\x89\x01\r:\xa56\xe2\x94\x00\x00\xe0\x94\xc8a\x90\x90K\x8d\a\x9e\xc0\x10\xe4b\xcb\xff\xc9\b4\xff\xaa\\\x8a\x02#\x85\xa8'\xe8\x15P\x00\x00\u07d4\xc8q\r~\x8bZ;\u059aB\xfe\x0f\xa8\xb8|5\u007f\xdd\xcd\u0209\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xc8sR\u06e5\x82\xee f\xb9\xc0\x02\xa9b\xe0\x03\x13Ox\xb1\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\xc8|w\xe3\xc2J\xde\xcd\xcd\x108\xa3\x8bV\xe1\x8d\xea\u04f7\x02\x8a\x01\xdd\f\x88_\x9a\r\x80\x00\x00\u07d4\xc8}:\xe3\u0607\x04\u066b\x00\t\xdc\xc1\xa0\x06q1\xf8\xba<\x89j\xc5\xc6-\x94\x86\a\x00\x00\xe0\x94\u0201N4R>8\xe1\xf9'\xa7\xdc\xe8FjDz\t6\x03\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\u0202U\xed\xdc\xf5!\xc6\xf8\x1d\x97\xf5\xa4!\x81\xc9\a=N\xf1\x89\x0f\u00d0D\xd0\n*\x80\x00\u07d4\u0205\xa1\x8a\xab\xf4T\x1b{{~\xcd0\xf6\xfa\u619d\x95i\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u020c\xa1\xe6\xe5\xf4\xd5X\xd17\x80\xf4\x88\xf1\rJ\xd3\x13\r4\x89T\x06\x923\xbf\u007fx\x00\x00\u07d4\u020e\xecT\xd3\x05\xc9(\xcc(H\xc2\xfe\xe251\xac\xb9mI\x89lj\u04c2\xd4\xfba\x00\x00\xe0\x94\u021c\xf5\x04\xb9\xf3\xf85\x18\x1f\xd8BO\\\xcb\xc8\xe1\xbd\xdf}\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\u0222\xc4\xe5\x9e\x1c\u007f\xc5H\x05X\x048\xae\xd3\xe4J\xfd\xf0\x0e\x89\x02b\x9ff\xe0\xc50\x00\x00\u07d4\u022aI\u301f\b\x99\xf2\x8a\xb5~gCp\x9dXA\x903\x89/\xb4t\t\x8fg\xc0\x00\x00\u07d4\u022b\x1a<\xf4l\xb8\xb0d\xdf.\"-9`s\x94 2w\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u0231\x85\x05%\xd9F\xf2\xae\x84\xf3\x17\xb1Q\x88\xc56\xa5\u0706\x89\x91\x8d\xdc:B\xa3\xd4\x00\x00\u07d4\xc8\xd4\xe1Y\x9d\x03\xb7\x98\t\xe0\x13\n\x8d\u00c4\b\xf0^\x8c\u04c9\x9f\xad\x06$\x12y\x16\x00\x00\u07d4\xc8\xdd'\xf1k\xf2$P\xf5w\x1b\x9f\xe4\xedO\xfc\xb3\t6\xf4\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\xc8\xdezVL\u007f@\x12\xa6\xf6\xd1\x0f\u040fG\x89\x0f\xbf\a\u0509\x10CV\x1a\x88)0\x00\x00\u07d4\xc8\xe2\xad\xebT^I\x9d\x98,\f\x11sc\u03b4\x89\u0171\x1f\x895e\x9e\xf9?\x0f\xc4\x00\x00\xe0\x94\xc8\xe5X\xa3\xc5i~o\xb2:%\x94\u0200\xb7\xa1\xb6\x8f\x98`\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xc8\xf2\xb3 \xe6\xdf\xd7\t\x06\u0157\xba\xd2\xf9P\x13\x12\u01c2Y\x89Q\x93K\x8b:W\xd0\x00\x00\u07d4\xc9\x03\x00\xcb\x1d@w\xe6\xa6\xd7\xe1i\xa4`F\x8c\xf4\xa4\x92\u05c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xc9\f7e\x15k\u028eH\x97\xab\x80$\x19\x15<\xbeR%\xa9\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xc9\x10\xa9pUl\x97\x16\xeaS\xaff\xdd\xef\x93\x141$\x91=\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\xe0\x94\xc9\x12{\u007ff)\xee\x13\xfc?`\xbc/Dg\xa2\aE\xa7b\x8a\x03|\x9a\xa4\xe7\xceB\x1d\x80\x00\u07d4\xc9\x1b\xb5b\xe4+\xd4a0\xe2\u04eeFR\xb6\xa4\ub1bc\x0f\x89\x1dF\x01b\xf5\x16\xf0\x00\x00\xe0\x94\xc90\x88y\x05m\xfe\x13\x8e\xf8 \x8fy\xa9\x15\u01bc~p\xa8\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\xc94\xbe\xca\xf7\x1f\"_\x8bJK\xf7\xb1\x97\xf4\xac\x9604\\\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xc9?\xbd\xe8\xd4m+\xcc\x0f\xa9\xb3;\u063a\u007f\x80B\x12Ue\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\xc9@\x89U:\xe4\xc2,\xa0\x9f\xbc\x98\xf5pu\xcf.\u0155\x04\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xc9A\x10\xe7\x1a\xfeW\x8a\xa2\x18\xe4\xfc(d\x03\xb03\n\u038d\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xc9F\u056c\xc14n\xba\nry\xa0\xac\x1dF\\\x99m\x82~\x8a\x03x=T_\xdf\n\xa4\x00\x00\u07d4\xc9J(\xfb20\xa9\xdd\xfa\x96Nw\x0f,\xe3\xc2S\xa7\xbeO\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xc9JXR\x03\xda{\xba\xfd\x93\xe1X\x84\xe6`\u0531\xea\xd8T\x8a\x01{x\x83\xc0i\x16`\x00\x00\u07d4\xc9O|5\xc0'\xd4}\xf8\xefO\x9d\xf8Z\x92H\xa1}\xd2;\x89\x01\x9f\x8euY\x92L\x00\x00\u07d4\xc9Q\x90\f4\x1a\xbb\xb3\xba\xfb\xf7\xee )7pq\xdb\xc3j\x89\x11\xc2]\x00M\x01\xf8\x00\x00\u07d4\xc9S\xf94\xc0\xeb-\x0f\x14K\u06b0\x04\x83\xfd\x81\x94\x86\\\xe7\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc9f&r\x8a\xaaLO\xb3\xd3\x1c&\xdf:\xf3\x10\b\x17\x10\u0449\xb5\x0f\u03ef\xeb\xec\xb0\x00\x00\u07d4\xc9gQel\n\x8e\xf45{sD2!4\xb9\x83PJ\u0289lk\x93[\x8b\xbd@\x00\x00\u07d4\u0240Hh\u007f+\xfc\u027d\x90\xed\x18slW\xed\xd3R\xb6]\x8965\u026d\xc5\u07a0\x00\x00\u07d4\u0241\xd3\x12\u0487\xd5X\x87\x1e\u0757:\xbbv\xb9y\xe5\xc3^\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\u0242Xmc\xb0\xd7L \x1b\x1a\xf8A\x83r\xe3\fv\x16\xbe\x89\x05k\xc7^-c\x10\x00\x00\u07d4\u0249CO\x82Z\xaf\x9cU/h^\xba|\x11\xdbJ_\xc7:\x89\x1b(\u014d\x96\x96\xb4\x00\x00\u07d4\u0249\xee\xc3\a\u80db\x9dr7\xcf\xda\b\x82)b\xab\u41c9\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\u0252\xbeY\xc6r\x1c\xafN\x02\x8f\x9e\x8f\x05\xc2\\UQ[\u0509\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\u0255{\xa9L\x1b)\xe5'~\xc3f\"pI\x04\xc6=\xc0#\x89h>\xfcg\x82d,\x00\x00\xe0\x94\u025a\x9c\xd6\xc9\xc1\xbe54\xee\u0352\xec\xc2/\\8\xe9Q[\x8a\x01\x05Y;:\x16\x9dw\x00\x00\xe0\x94\u026c\x01\xc3\xfb\t)\x03?\f\xcc~\x1a\xcf\uaae7\x94]G\x8a\x02\xa3j\x9e\x9c\xa4\xd2\x03\x80\x00\u07d4\u0276\x98\xe8\x98\xd2\rMO@\x8eNM\x06\x19\"\xaa\x85c\a\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\u0276\xb6\x86\x11\x16\x91\xeej\xa1\x97\xc7#\x1a\x88\xdc`\xbd)]\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xc9\u01ec\v\u0753B\xb5\xea\xd46\t#\xf6\x8cr\xa6\xbac:\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xc9\xc8\r\xc1.{\xab\x86\xe9I\xd0\x1eL>\xd3_+\x9b\xba_\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xc9\xd7dF\u056a\xdf\xf8\vh\xb9\x1b\b\u035b\xc8\xf5U\x1a\xc1\x89&\xb4\xbd\x91\x10\xdc\xe8\x00\x00\xe0\x94\xc9\u073b\x05oM\xb7\xd9\xda9\x93b\x02\u017d\x820\xb3\xb4w\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xc9\xe0&\b\x06h(\x84\x8a\xeb(\xc76r\xa1)%\x18\x1fM\x89\x1b\x1bk\u05efd\xc7\x00\x00\u07d4\xca\x042\xcb\x15{Qy\xf0.\xbb\xa5\xc9\u0475O\xecM\x88\u028965\u026d\xc5\u07a0\x00\x00\u07d4\xca\x12,\xf0\U00094216\xb7HC\xf4\x9a\xfe\u043a\x16\x18\xee\u05c9\x1e[\x8f\xa8\xfe*\xc0\x00\x00\xe0\x94\xca\"\u0363`m\xa5\xca\xd0\x13\xb8\aG\x06\xd7\xe9\xe7!\xa5\f\x8a\x01q\x81\xc6\xfa9\x81\x94\x00\x00\u07d4\xca#\xf6-\xff\rd`\x03lb\xe8@\xae\xc5W~\v\xef\u0489\a\xa1\xfe\x16\x02w\x00\x00\x00\u07d4\xca%\xff4\x93L\x19B\xe2*N{\xd5o\x14\x02\x1a\x1a\xf0\x88\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\xca7?\xe3\xc9\x06\xb8\xc6U\x9e\xe4\x9c\xcd\a\xf3|\xd4\xfbRf\x89a\t=|,m8\x00\x00\u07d4\xcaA\u032c0\x17 R\xd5\"\xcd//\x95}$\x81S@\x9f\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xcaB\x88\x01N\xdd\xc5c/_\xac\xb5\xe3\x85\x17\xa8\xf8\xbc]\x98\x89\x12nr\xa6\x9aP\xd0\x00\x00\u07d4\xcaB\x88c\xa5\xca06\x98\x92\xd6\x12\x18>\xf9\xfb\x1a\x04\xbc\xea\x89Rf<\u02b1\xe1\xc0\x00\x00\u07d4\xcaI\xa5\xf5\x8a\xdb\xef\xae#\xeeY\xee\xa2A\xcf\x04\x82b.\xaa\x89M\x85<\x8f\x89\b\x98\x00\x00\u07d4\xcaL\xa9\xe4w\x9dS\x0e\u02ec\xd4~j\x80X\xcf\xdee\u064f\x89+^:\xf1k\x18\x80\x00\x00\u07d4\xcae~\xc0o\xe5\xbc\t\xcf#\xe5*\xf7\xf8\f\xc3h\x9en\u07890\xca\x02O\x98{\x90\x00\x00\u07d4\xcaf\xb2(\x0f\xa2\x82\u0176v1\xceU+b\xeeU\xad\x84t\x89j\xc4\"\xf54\x92\x88\x00\x00\xe0\x94\xcal\x81\x8b\xef\xd2Q6\x1e\x02t@h\xbe\x99\u062a`\xb8J\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xcap\xf4\u077f\x06\x9d!C\xbdk\xbc\u007fikRx\x9b2\u7262\xa1]\tQ\x9b\xe0\x00\x00\xe0\x94\xcatuvDjL\x8f0\xb0\x83@\xfe\xe1\x98\xdec\xec\x92\u03ca\x01|\x8e\x12\x06r*0\x00\x00\u07d4\xca{\xa3\xffSl~_\x0e\x158\x00\xbd8=\xb81)\x98\xe0\x89\t1\xac=k\xb2@\x00\x00\xe0\x94\u0282v\xc4w\xb4\xa0{\x80\x10{\x845\x94\x18\x96\a\xb5;\xec\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\u0284\t\b>\x01\xb3\x97\xcf\x12\x92\x8a\x05\xb6\x84U\xceb\x01\u07c9V\xbcu\xe2\xd61\x00\x00\x00\u07d4\u0298\u01d8\x8e\xfa\b\xe9%\uf719ER\x03&\xe9\xf4;\x99\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u029a\x04*j\x80o\xfc\x92\x17\x95\x00\xd2D)\xe8\xabR\x81\x17\x89;\xa1\x91\v\xf3A\xb0\x00\x00\u07d4\u029d\xec\x02\x84\x1a\xdf\\\xc9 WjQ\x87\xed\u04bdCJ\x18\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\u029f\xaa\x17T/\xaf\xbb8\x8e\xab!\xbcL\x94\u89f3G\x88\x89lk\x8f\xce\r\x18y\x80\x00\xe0\x94\u02aah\xeel\xdf\r4EJv\x9b\r\xa1H\xa1\xfa\xaa\x18e\x8a\x01\x87.\x1d\xe7\xfeR\xc0\x00\x00\u07d4\u02ad\x9d\xc2\rX\x9c\xe4(\xd8\xfd\xa3\xa9\xd5:`{y\x88\xb5\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\u02b0\xd3,\xf3v\u007f\xa6\xb3S|\x842\x8b\xaa\x9fPE\x816\x8a\x01\xe5\xb8\xfa\x8f\xe2\xac\x00\x00\x00\u07d4\u02b9\xa3\x01\xe6\xbdF\xe9@5P(\xec\xcd@\xceMZ\x1a\u00c9\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\u02b9\xa9z\xda\x06\\\x87\x81nh`\xa8\xf1Bo\xe6\xb3\xd7u\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\u02ba\xb6'N\xd1P\x89s~({\xe8x\xb7W\x93Hd\xe2\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\u02bd\xaf5OG \xa4f\xa7d\xa5(\xd6\x0e:H*9<\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xca\xcbg^\t\x96#T\x04\ufbfb.\u02c1R'\x1bU\xe0\x89%\xf2s\x93=\xb5p\x00\x00\u07d4\xca\xd1O\x9e\xbb\xa7f\x80\xeb\x83k\a\x9c\u007f{\xaa\xf4\x81\xedm\x89\f\xef={\xd7\xd04\x00\x00\xe0\x94\xca\xe3\xa2S\xbc\xb2\xcfN\x13\xba\x80\u0098\xab\x04\x02\xda|*\xa0\x8a\x01$\xbc\r\u0752\xe5`\x00\x00\u07d4\xca\xef\x02{\x1a\xb5\x04\xc7?A\xf2\xa1\ty\xb4t\xf9~0\x9f\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xca\xf4H\x1d\x9d\xb7\x8d\xc4\xf2_{J\u023d;\x1c\xa0\x10k1\x8a\x01\x0f\f\xf0d\xddY \x00\x00\xe0\x94\xca\xfd\xe8U\x86L%\x98\xda<\xaf\xc0Z\u064d\U00089380H\x8a\x03\x00\xa8\xed\x96\xffJ\x94\x00\x00\xe0\x94\xcb\r\xd7\xcfN]\x86a\xf6\x02\x89C\xa4\xb9\xb7\\\x91D6\xa7\x8a\x19i6\x89t\xc0[\x00\x00\x00\u07d4\xcb\x1b\xb6\xf1\xda^\xb1\rH\x99\xf7\xe6\x1d\x06\xc1\xb0\x0f\u07f5-\x898E$\xccp\xb7x\x00\x00\u07d4\xcb=vl\x98?\x19+\xce\xca\xc7\x0fN\xe0=\xd9\xffqMQ\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xcbB\xb4N\xb5\xfd`\xb5\x83~O\x9e\xb4rgR=\x1a\"\x9c\x89.\xe4IU\b\x98\xe4\x00\x00\u07d4\xcbG\xbd0\u03e8\xecTh\xaa\xa6\xa9FB\xce\xd9\xc8\x19\xc8\u0509\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xcbH\xfe\x82e\u066fU\xebp\x06\xbc3VE\xb0\xa3\xa1\x83\xbe\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xcbJ\x91M+\xb0)\xf3._\xef\\#LO\xec--\xd5w\x89a\x94\x04\x9f0\xf7 \x00\x00\xe0\x94\xcbJ\xbf\u0082\xae\xd7n]W\xaf\xfd\xa5B\xc1\xf3\x82\xfc\xac\xf4\x8a\x01\xb9\x0f\x11\xc3\x18?\xaa\x00\x00\u07d4\xcbJ\xd0\xc7#\xdaF\xabV\xd5&\xda\f\x1d%\xc7=\xaf\xf1\n\x89\x1b\xa5\xab\xf9\xe7y8\x00\x00\u07d4\xcbK\xb1\xc6#\xba(\xdcB\xbd\xaa\xa6\xe7N\x1d*\xa1%l*\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xcbPXt\x12\x82#\x04\xeb\u02e0}\xab:\x0f\t\xff\xfe\u4189JD\x91\xbdm\xcd(\x00\x00\u07d4\xcbX\x99\v\u0350\u03ffm\x8f\t\x86\xf6\xfa`\x02v\xb9N-\x8964\xbf9\xab\x98x\x80\x00\u07d4\xcbh\xaeZ\xbe\x02\xdc\xf8\xcb\u016aq\x9c%\x81FQ\xaf\x8b\x85\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xcbty\x10\x9bC\xb2fW\xf4F_M\x18\xc6\xf9t\xbe_B\x89b\xa9\x92\xe5:\n\xf0\x00\x00\xe0\x94\xcb}+\x80\x89\xe91,\u026e\xaa's\xf3S\b\xecl*{\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\u02c6\xed\xbc\x8b\xbb\x1f\x911\x02+\xe6IV^\xbd\xb0\x9e2\xa1\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u02d3\x19\x9b\x9c\x90\xbcI\x15\xbd\x85\x9e=B\x86m\xc8\xc1\x87I\x89\f\x90\xdf\a\xde\xf7\x8c\x00\x00\u07d4\u02d4\xe7o\xeb\xe2\b\x11g3\xe7n\x80]H\xd1\x12\xec\x9f\u028965\u026d\xc5\u07a0\x00\x00\u07d4\u02dbQ\x03\xe4\u0389\xafOd\x91aP\xbf\xf9\xee\u02df\xaa\\\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\u02e2\\zP<\xc8\xe0\xd0Iq\xca\x05\xc7b\xf9\xb7b\xb4\x8b\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\u02e2\x88\xcd<\x1e\xb4\u055d\xdb\x06\xa6B\x1c\x14\xc3E\xa4{$\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u02f3\x18\x9eK\xd7\xf4_\x17\x8b\x1c0\xc7n&1MJK\n\x89\x0f\xfe\vg|e\xa9\x80\x00\xe0\x94\u02f7\xbe\x17\x95?,\u0313\u1f19\x80[\xf4U\x11CNL\x8a\n\xae[\x9d\xf5m/ \x00\x00\xe0\x94\xcb\xc0KM\x8b\x82\xca\xf6p\x99o\x16\f6)@\xd6o\xcf\x1a\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xcb\u07974\xb8\xe6\xaaS\x8c)\x1dm\u007f\xac\xed\xb0\xf38\xf8W\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xcb\xe1\xb9H\x86M\x84t\xe7e\x14XX\xfc\xa4U\x0fxK\x92\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xcb\xe5/\xc53\xd7\xdd`\x8c\x92\xa2`\xb3|?E\u07b4\xeb3\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xcb\xe8\x10\xfe\x0f\xec\xc9dGJ\x1d\xb9w(\xbc\x87\xe9s\xfc\xbd\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xcb\xf1j\x0f\xe2tRX\xcdR\xdb+\xf2\x19T\xc9u\xfcj\x15\x89\x10CV\x1a\x88)0\x00\x00\xe0\x94\xcb\xf3\u007f\xf8T\xa2\xf1\xceS\x93D\x94wx\x92\xd3\xeceW\x82\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xcb\xfaj\xf6\u0083\xb0F\xe2w,`c\xb0\xb2\x15S\xc4\x01\x06\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xcb\xfav\xdb\x04\xce8\xfb ]7\xb8\xd3w\xcf\x13\x80\xda\x03\x17\x89M\x85<\x8f\x89\b\x98\x00\x00\u07d4\xcc\x03I\x85\xd3\xf2\x8c-9\xb1\xa3K\xce\xd4\u04f2\xb6\xca#N\x89\t\xdd\xc1\xe3\xb9\x01\x18\x00\x00\u07d4\xcc\x04\x8d\u01f9]\xca%\xdf&\xee\xfac\x9d\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xcc+_D\x8f5(\xd3\xfeA\xcc}\x1f\xa9\xc0\xdcv\xf1\xb7v\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4\xcc-\x04\xf0\xa4\x01q\x89\xb3@\xcaw\x19\x86A\xdc\xf6Ek\x91\x89\u0556{\xe4\xfc?\x10\x00\x00\xe0\x94\xccA\x9f\u0651+\x85\x13VY\xe7z\x93\xbc=\xf1\x82\xd4Q\x15\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xccE\xfb:U[\xad\x80{8\x8a\x03W\xc8U _|u\xe8\x89.\xe4IU\b\x98\xe4\x00\x00\u07d4\xccHAM*\xc4\xd4*Yb\xf2\x9e\xeeD\x97\t/C\x13R\x89\b\xbaR\xe6\xfcE\xe4\x00\x00\u07d4\xccJ/,\xf8l\xf3\xe43u\xf3`\xa4sF\x91\x19_\x14\x90\x89I\x15\x05;\xd1)\t\x80\x00\u07d4\xccO\x0f\xf2\xae\xb6}T\xce;\xc8\xc6Q\v\x9a\xe8>\x9d2\x8b\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xccO\xaa\xc0\v\xe6b\x8f\x92\xefk\x8c\xb1\xb1\xe7j\xac\x81\xfa\x18\x89\v\"\xa2\xea\xb0\xf0\xfd\x00\x00\xe0\x94\xccO\xebr\u07d8\xff5\xa18\xe0\x17a\xd1 ?\x9b~\xdf\n\x8a\x01{x\x83\xc0i\x16`\x00\x00\u07d4\xcc`oQ\x13\x97\xa3\x8f\u01c7+\u04f0\xbd\x03\xc7\x1b\xbdv\x8b\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xcc`\xf86\xac\xde\xf3T\x8a\x1f\xef\u0321>\u01a97\xdbD\xa0\x89\x04\xb0m\xbb\xb4\x0fJ\x00\x00\u07d4\xccl\x03\xbd`>\t\xdeT\xe9\xc4\u056cmA\xcb\xceqW$\x89\x05V\xf6L\x1f\xe7\xfa\x00\x00\u07d4\xccl-\xf0\x0e\x86\xec\xa4\x0f!\xff\xda\x1ag\xa1i\x0fG|e\x89\xabM\xcf9\x9a:`\x00\x00\xe0\x94\xccm{\x12\x06\x1b\xc9m\x10M`me\xff\xa3+\x006\xeb\a\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xccs\xdd5kIy\xb5y\xb4\x01\xd4\xccz1\xa2h\xdd\xceZ\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\xccu\x8d\a\x1d%\xa62\n\xf6\x8c]\xc9\xc4\xf6\x95[\xa9E \x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xcc{\x04\x81\xcc2\xe6\xfa\xef#\x86\xa0p\"\xbc\xb6\xd2\u00f4\xfc\x89\xabM\xcf9\x9a:`\x00\x00\xe0\x94\u0314;\xe1\",\xd1@\n#\x99\xdd\x1bE\x94E\xcfmT\xa9\x8a\x02\xa7@\xaee6\xfc\x88\x00\x00\u07d4\u0315\x19\xd1\xf3\x98_k%^\xad\xed\x12\xd5bJ\x97'!\xe1\x8965\u026d\xc5\u07a0\x00\x00\u0794\u031a\xc7\x15\xcdo&\x10\xc5+XgdV\x88B\x97\x01\x8b)\x88\xb9\x8b\xc8)\xa6\xf9\x00\x00\u07d4\u0320{\xb7\x94W\x1dJ\xcf\x04\x1d\xad\x87\xf0\xd1\xef1\x85\xb3\x19\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u032b\xc6\x04\x8aSFD$\xfc\xf7n\xeb\x9en\x18\x01\xfa#\u0509\x02\xab{&\x0f\xf3\xfd\x00\x00\u07d4\u032e\r=\x85*}\xa3\x86\x0f\x066\x15L\nl\xa3\x16(\u0509\x05\xc6\xd1+k\xc1\xa0\x00\x00\u07d4\xcc\xca$\xd8\xc5mn,\a\xdb\bn\xc0~X[\xe2g\xac\x8d\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xcc\xd5!\x13-\x98l\xb9hi\x84&\"\xa7\u0762l>\xd0W\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xcc\xf49u\xb7k\xfes_\xec<\xb7\xd4\xdd$\xf8\x05\xba\tb\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4\xcc\xf6*f?\x13S\xba.\xf8\xe6R\x1d\xc1\xec\xb6s\xec\x8e\xf7\x89\b=lz\xabc`\x00\x00\u07d4\xcc\xf7\x11\r\x1b\u0667K\xfd\x1d}}-\x9dU`~{\x83}\x890\xca\x02O\x98{\x90\x00\x00\u07d4\xcc\xfdrW`\xa6\x88#\xff\x1e\x06/L\xc9~\x13`\xe8\u0657\x89\x15\xacV\xed\xc4\xd1,\x00\x00\u07d4\xcd\x02\x0f\x8e\xdf\xcfRG\x98\xa9\xb7:d\x034\xbb\xf7/\x80\xa5\x89\a?u\u0460\x85\xba\x00\x00\u07d4\xcd\x06\xf8\xc1\xb5\u037d(\xe2\xd9kcF\xc3\xe8Z\x04\x83\xba$\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xcd\a.n\x183\x13y\x95\x19m{\xb1r_\xef\x87a\xf6U\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xcd\n\x16\x1b\xc3g\xae\t'\xa9*\xac\x9c\xf6\xe5\bg\x14\xef\u0289lk\x93[\x8b\xbd@\x00\x00\u07d4\xcd\n\xf3GN\"\xf0i\xec4\a\x87\r\xd7pD=[\x12\xb0\x89\x8e^\xb4\xeew\xb2\xef\x00\x00\u07d4\xcd\v\x02W\u70e3\xd2\xc2\u3e9dny\xb7^\xf9\x80$\u0509\x9f\xad\x06$\x12y\x16\x00\x00\u07d4\xcd\x10,\xd6\xdb=\xf1J\u05af\x0f\x87\xc7$y\x86\x1b\xfc=$\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xcd\x1ef\xedS\x9d\xd9/\xc4\v\xba\xa1\xfa\x16\u078c\x02\xc1ME\x89\fw\xe4%hc\xd8\x00\x00\u07d4\xcd\x1e\xd2c\xfb\xf6\xf6\xf7\xb4\x8a\xef\x8fs=2\x9dC\x82\xc7\u01c9\x01\x00\xbd3\xfb\x98\xba\x00\x00\u07d4\xcd*6\xd7S\xe9\xe0\xed\x01*XMqh\aX{A\xd5j\x89\x0e+\xa7[\v\x1f\x1c\x00\x00\u07d4\xcd2\xa4\xa8\xa2\u007f\x1c\xc69T\xaacOxW\x05s4\u01e3\x89:\xd1fWlr\xd4\x00\x00\u07d4\xcd5\xff\x01\x0e\xc5\x01\xa7!\xa1\xb2\xf0z\x9c\xa5\x87}\xfc\xf9Z\x89\xd9o\u0390\u03eb\xcc\x00\x00\u07d4\xcdC\x06\xd7\xf6\x94z\xc1tMN\x13\xb8\xef2\xcbe~\x1c\x00\x89\x1b\x1a\xb3\x19\xf5\xecu\x00\x00\u07d4\xcdC%\x8bs\x92\xa90\x83\x9aQ\xb2\xef\x8a\xd24\x12\xf7Z\x9f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xcdI\xbf\x18^p\xd0E\a\x99\x9f\x92\xa4\xdeDU1('\u040965\u026d\xc5\u07a0\x00\x00\u07d4\xcdU\x10\xa2B\u07f0\x18=\xe9%\xfb\xa8f\xe3\x12\xfa\xbc\x16W\x89\x82\x1a\xb0\xd4AI\x80\x00\x00\u07d4\xcdVj\u05f8\x83\xf0\x1f\u04d9\x8a\x9aX\xa9\xde\xe4rM\u0725\x89\x030\xae\x185\xbe0\x00\x00\xe0\x94\xcdY\xf3\xdd\xe7~\t\x94\v\xef\xb6\xeeX\x03\x19e\xca\xe7\xa36\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xcdr]p\xbe\x97\xe6w\xe3\xc8\xe8\\\v&\xef1\xe9\x95PE\x89Hz\x9a0E9D\x00\x00\xe0\x94\xcd~G\x90\x94d\xd8q\xb9\xa6\xdcv\xa8\xe9\x19]\xb3H^z\x8a\x02\x15\xf85\xbcv\x9d\xa8\x00\x00\u07d4\xcd~\xce\bkKa\x9b;6\x93R\xee8\xb7\x1d\xdb\x06C\x9a\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xcd\u007f\t\xd7\xedf\xd0\u00cb\u016dN2\xb7\xf2\xb0\x8d\xc1\xb3\r\x89>;\xb3M\xa2\xa4p\x00\x00\u07d4\u0355)I+\\)\xe4u\xac\xb9A@+=;\xa5\x06\x86\xb0\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\u0355\xfaB=o\xc1 'J\xac\xde\x19\xf4\xee\xb7f\xf1\x04 \x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\u035bL\xefs9\f\x83\xa8\xfdq\u05f5@\xa7\xf9\u03cb\x8c\x92\x89\x04\xe1\x00;(\xd9(\x00\x00\u07d4\u0361t\x11\t\xc0&[?\xb2\xbf\x8d^\xc9\u00b8\xa34kc\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\u0361\xb8\x86\u39d5\u027aw\x91N\n/\xe5go\x0f\\\u03c9\x05\xbf`\xeaB\xc2\x04\x00\x00\u07d4\u0364S\x0fK\x9b\xc5\t\x05\xb7\x9d\x17\u008f\xc4o\x954\x9b\u07c93\x10\xe0I\x11\xf1\xf8\x00\x00\u07d4\u036bF\xa5\x90 \x80do\xbf\x95B\x04 J\xe8\x84\x04\x82+\x89\x1d\x8a\x96\xe5\xc6\x06\xeb\x00\x00\u07d4\u0375\x97)\x900\x18?n-#\x853\xf4d*\xa5\x87T\xb6\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xcd\xd5\u0601\xa76,\x90p\a;\u07fcu\xe7$S\xacQ\x0e\x89-\xa5\x18\xea\xe4\x8e\xe8\x00\x00\u07d4\xcd\xd6\rs\xef\xaa\xd8s\u027b\xfb\x17\x8c\xa1\xb7\x10Z\x81\xa6\x81\x89\x01\xbc\x16\xd6t\xec\x80\x00\x00\u07d4\xcd\xd9\xef\xacMm`\xbdq\xd9U\x85\xdc\xe5\u0557\x05\xc15d\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xcd\xe3m\x81\xd1(\u015d\xa1Ee!\x93\xee\u00bf\xd9e\x86\xef\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xcd\xea8o\x9d\x0f\xd8\x04\xd0(\x18\xf27\xb7\xd9\xfavF\xd3^\x89\xa3I\xd3m\x80\xecW\x80\x00\u07d4\xcd\xec\xf5gT3\u0370\xc2\xe5Zh\xdb]\x8b\xbexA\x9d\u0489\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xcd\xfd\x82\x173\x97%\xd7\xeb\xac\x11\xa66U\xf2e\xef\xf1\xcc=\x8a\x01\x0f\fid\x10\xe3\xa9\x00\x00\u07d4\xce\a\x9fQ\x88wt\xd8\x02\x1c\xb3\xb5u\xf5\x8f\x18\xe9\xac\xf9\x84\x89\t\xc2\x00vQ\xb2P\x00\x00\u07d4\xce\x18\x84\u077b\xb8\xe1\x0eM\xbanD\xfe\xee\u00a7\xe5\xf9/\x05\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xce\x1b\f\xb4j\xae\xcf\u05db\x88\f\xad\x0f-\u068a\x8d\xed\u0431\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xce&\xf9\xa50_\x83\x81\tCT\xdb\xfc\x92fN\x84\xf9\x02\xb5\x89\fz\xaa\xb0Y\x1e\xec\x00\x00\u07d4\xce-\xea\xb5\x1c\n\x9a\xe0\x9c\xd2\x12\xc4\xfaL\xc5+S\xcc\r\xec\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xce.\r\xa8\x93F\x99\xbb\x1aU>U\xa0\xb8\\\x16\x945\xbe\xa3\x8a\x01\x0f\fid\x10\xe3\xa9\x00\x00\u07d4\xce:a\xf0F\x1b\x00\x93^\x85\xfa\x1e\xad\x82\xc4^Zd\u0508\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xceK\x06]\xbc\xb20G 2b\xfbH\xc1\x18\x83d\x97tp\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xceS\xc8\xcd\xd7B\x96\xac\xa9\x87\xb2\xbc\x19\u00b8u\xa4\x87I\u0409\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xce^\x04\xf0\x18Ci\xbc\xfa\x06\xac\xa6o\xfa\x91\xbfY\xfa\x0f\xb9\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\xce^\xb6:{\xf4\xfb\xc2\xf6\u4ea0\u018a\xb1\xcbL\xf9\x8f\xb4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xceb\x12Z\xde\xc37\n\xc5!\x10\x95:Nv\v\xe9E\x1e;\x89\b=lz\xabc`\x00\x00\xe0\x94\xceq\bmL`%T\xb8-\xcb\xfc\xe8\x8d cMS\xccM\x8a\t(\x96R\x9b\xad\u0708\x00\x00\u07d4\u038akmP3\xb1I\x8b\x1f\xfe\xb4\x1aAU\x04\x05\xfa\x03\xa2\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u0397\x86\xd3q/\xa2\x00\xe9\xf6\x857\xee\xaa\x1a\x06\xa6\xf4ZK\x89a\t=|,m8\x00\x00\u07d4\u039d!\u0192\xcd<\x01\xf2\x01\x1fP_\x87\x006\xfa\x8fl\u0489\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94\u03a2\x89f#\xf4\x91\x02\x87\xa2\xbd\u017e\x83\xae\xa3\xf2\xe6\xde\b\x8a\x01\xfbZ7Q\xe4\x90\xdc\x00\x00\u07d4\u03a3JM\xd9=\u066e\xfd9\x90\x02\xa9}\x99z\x1bK\x89\u0349QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4\u03a4?pu\x81k`\xbb\xfc\u62d9:\xf0\x88\x12p\xf6\u0109lk\x93[\x8b\xbd@\x00\x00\u07d4\u03a8t3AS<\xb2\xf0\xb9\xc6\xef\xb8\xfd\xa8\rw\x16(%\x89\x05k\xc7^-c\x10\x00\x00\u07d4\u03b0\x89\xec\x8ax3~\x8e\xf8\x8d\xe1\x1bI\xe3\u0751\x0ft\x8f\x8965\u026d\xc5\u07a0\x00\x00\u07d4\u03b3=x\xe7Tz\x9d\xa2\xe8}Q\xae\xc5\xf3D\x1c\x87\x92:\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\u03b3\x898\x1dH\xa8\xaeO\xfcH:\u043b^ L\xfd\xb1\xec\x89('\xe6\xe4\xddb\xba\x80\x00\u07d4\xce\xc6\xfce\x85?\x9c\xce_\x8e\x84Fv6.\x15y\x01_\x02\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xce\xd3\u01fe\x8d\xe7XQ@\x95*\xebP\x1d\xc1\xf8v\ucbf0\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xce\xd8\x1e\xc3S?\xf1\xbf\xeb\xf3\xe3\x84>\xe7@\xad\x11u\x8d>\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xce\u0733\xa1\u0584?\xb6\xbe\xf6Ca}\xea\U000cf398\xdd_\x89\x19\xe2\xa4\xc8\x18\xb9\x06\x00\x00\u07d4\xce\xe6\x99\xc0pzx6%+)/\x04|\xe8\xad(\x9b/U\x89\x11\x9a\x1e!\xaaiV\x00\x00\u07d4\xce\xedG\xca[\x89\x9f\xd1b?!\xe9\xbdM\xb6Z\x10\u5c1d\x89\a8w@L\x1e\xee\x00\x00\u07d4\xce\xf7tQ\u07e2\xc6C\xe0\v\x15mlo\xf8N#s\xebf\x89\n1\x06+\xee\xedp\x00\x00\u07d4\xcf\x11i\x04\x1c\x17E\xe4[\x17$5\xa2\xfc\x99\xb4\x9a\xce+\x00\x89\x01\xbb\x88\xba\xab-|\x00\x00\xe0\x94\xcf\x15v\x12vN\x0f\u0596\xc8\xcb_\xba\x85\xdfL\r\xdc<\xb0\x8a\x06ZM\xa2]0\x16\xc0\x00\x00\u0794\xcf\x1b\xdby\x9b.\xa6<\xe14f\x8b\xdc\x19\x8bT\x84\x0f\x18\v\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\xcf\"\x88\xefN\xbf\x88\xe8m\xb1=\x8a\x0e\v\xf5*\x05e\x82\u00c9\x89Po\xbf\x97@t\x00\x00\u07d4\xcf&Ni%\x13\t\x06\xc4\xd7\xc1\x85\x91\xaaA\xb2\xa6\u007foX\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xcf&\xb4{\xd04\xbcP\x8elK\xcf\xd6\xc7\xd3\x004\x92Wa\x89a\x94\x04\x9f0\xf7 \x00\x00\xe0\x94\xcf.*\xd65\xe9\x86\x1a\xe9\\\xb9\xba\xfc\xca\x03kR\x81\xf5\u038a\at2!~h6\x00\x00\x00\u07d4\xcf.s@B\xa3U\xd0_\xfb.9\x15\xb1h\x11\xf4Zi^\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xcf4\x8f/\xe4{~A<\az{\xaf:u\xfb\xf8B\x86\x92\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xcf?\x91(\xb0r\x03\xa3\xe1\r}WU\xc0\u012b\xc6\xe2\xca\u008a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xcf?\xbf\xa1\xfd2\u05e6\xe0\xe6\xf8\xefN\xabW\xbe4\x02\\L\x899\xa1\xc0\xf7YMH\x00\x00\u07d4\xcfAftn\x1d;\xc1\xf8\xd0qK\x01\xf1~\x8ab\xdf\x14d\x896w\x03n\xdf\n\xf6\x00\x00\u07d4\xcfO\x118\xf1\xbdk\xf5\xb6\u0505\xcc\xe4\xc1\x01\u007f\u02c5\xf0}\x89/\u043cw\xc3+\xff\x00\x00\u07d4\xcfZo\x9d\xf7Uy\xc6D\xf7\x94q\x12\x15\xb3\rw\xa0\xce@\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xcf^\x0e\xac\u0473\x9d\x06U\xf2\xf7u5\xeff\b\xeb\x95\v\xa0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xcfhM\xfb\x83\x04r\x93U\xb5\x83\x15\xe8\x01\x9b\x1a\xa2\xad\x1b\xac\x89\x17r$\xaa\x84Lr\x00\x00\u07d4\xcfi@\x81\xc7m\x18\xc6L\xa7\x13\x82\xbe\\\xd6;<\xb4v\xf8\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xcfnR\xe6\xb7t\x80\xb1\x86~\xfe\xc6Dm\x9f\xc3\xcc5w\xe8\x89\f\t\x01\xf6\xbd\x98y\x00\x00\u07d4\u03c8: 2\x96g\xea\"j\x1e\x9a\x92*\x12\xf2\x1f\xaa\x03\x81V\x91\x8cO\u02dc\x89\x04E\x91\xd6\u007f\xec\xc8\x00\x00\u07d4\xcf\xf7\xf8\x9aMB\x19\xa3\x82\x95%\x131V\x82\x10\xff\xc1\xc14\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4\xcf\xf8\xd0k\x00\xe3\xf5\f\x19\x10\x99\xadV\xbaj\xe2eq\u0348\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xcf\xfcI\xc1x~\ubcb5l\xab\xe9$\x04\xb66\x14}EX\x8a\x013\xe00\x8f@\xa3\u0680\x00\u07d4\xd0\bQ;'`J\x89\xba\x17c\xb6\xf8L\u6233F\x94[\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd0\x0f\x06r\x86\xc0\xfb\u0402\xf9\xf4\xa6\x10\x83\xecv\u07b3\xce\xe6\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd0\x15\xf6\xfc\xb8M\xf7\xbbA\x0e\x8c\x8f\x04\x89J\x88\x1d\xca\xc27\x898E$\xccp\xb7x\x00\x00\u07d4\xd0\x1a\xf9\x13O\xafRW\x17N\x8by\x18oB\xee5Nd-\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd0!\b\u04ae<\xab\x10\xcb\xcf\x16W\xaf\">\x02|\x82\x10\xf6\x89lm\x84\xbc\xcd\xd9\xce\x00\x00\u07d4\xd0*\xfe\u03ce.\u00b6*\u022d Aa\xfd\x1f\xaew\x1d\x0e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd01\x919\xfb\xab.\x8e*\xcc\xc1\xd9$\u0531\x1d\xf6ilZ\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd07\xd2\x15\xd1\x1d\x1d\xf3\xd5O\xbd2\x1c\u0495\xc5F^';\x89K\xe4\xe7&{j\xe0\x00\x00\u07d4\xd0:-\xa4\x1e\x86\x8e\xd3\xfe\xf5t[\x96\xf5\xec\xa4b\xffo\u0689\xa2\xa1]\tQ\x9b\xe0\x00\x00\xe0\x94\xd0?\xc1eWj\xae\xd5%\xe5P,\x8e\x14\x0f\x8b.\x86\x969\x8a\x01sV\u0633%\x01\xc8\x00\x00\u07d4\xd0C\xa0\x11\xecBp\xee~\u0239hsu\x15\xe5\x03\xf80(\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xd0K\x86\x1b=\x9a\xccV:\x90\x16\x89\x94\x1a\xb1\xe1\x86\x11a\xa2\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xd0ZD|\x91\x1d\xbb'[\xfb.Z7\xe5\xa7\x03\xa5o\x99\x97\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd0_\xfb+t\xf8g O\xe51e;\x02H\xe2\x1c\x13TN\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd0bX\x81q\u03d9\xbb\xebX\xf1&\xb8p\xf9\xa3r\x8da\xec\x89\xf3\xf2\v\x8d\xfai\xd0\x00\x00\u07d4\xd0c\x8e\xa5q\x89\xa6\xa6\x99\x02J\u05ccq\xd99\xc1\xc2\xff\x8c\x89\x8e\xaeVg\x10\xfc \x00\x00\xe0\x94\xd0d\x8aX\x1b5\b\xe15\xa2\x93]\x12\xc9epE\xd8q\u028a\x01\xb2\u07dd!\x9fW\x98\x00\x00\u07d4\xd0q\x19)f\xebi\xc3R\x0f\xca:\xa4\xdd\x04)~\xa0KN\x89\x05\xf6\x8e\x811\xec\xf8\x00\x00\u07d4\xd0q\x85 \xea\xe0\xa4\xd6-p\xde\x1b\xe0\xcaC\x1c^\xea$\x82\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd0w]\xba*\xf4\xc3\n:x6Y9\xcdq\xc2\xf9\u0795\u0489i*\xe8\x89p\x81\xd0\x00\x00\u07d4\xd0{\xe0\xf9\t\x97\xca\xf9\x03\u022c\x1dS\xcd\xe9\x04\xfb\x19\aA\x8968\x908\xb6\x99\xb4\x00\x00\u07d4\xd0~Q\x18d\xb1\u03d9i\xe3V\x06\x02\x82\x9e2\xfcNq\xf5\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4\u0400\x94\x98\xc5H\x04z\x1e**\xa6\xa2\x9c\xd6\x1a\x0e\xe2h\xbd\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u0402'_tZ,\xac\x02v\xfb\xdb\x02\u0532\xa3\xab\x17\x11\xfe\x89\x01\xa0Ui\r\x9d\xb8\x00\x00\u07d4\u040f\xc0\x9a\x000\xfd\t(\xcd2\x11\x98X\x01\x82\xa7j\xae\x9f\x8965\u026d\xc5\u07a0\x00\x00\u07d4\u0413\xe8)\x81\x9f\xd2\xe2[\x978\x00\xbb=XA\xdd\x15-\x05\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u0414J\xa1\x85\xa13pa\xae \u071d\xd9l\x83\xb2\xbaF\x02\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\u0416V[|t\a\xd0e6X\x03U\xfd\xd6\xd29\x14J\xa1\x89\r\x8drkqw\xa8\x00\x00\u07d4\u041c\xb2\xe6\b-i:\x13\xe8\xd2\xf6\x8d\xd1\u0744a\xf5X@\x8965\u026d\xc5\u07a0\x00\x00\u07d4\u0426\xc6\xf9\xe9\u0133\x83\xd7\x16\xb3\x1d\xe7\x8dVAM\xe8\xfa\x91\x89\x10CV\x1a\x88)0\x00\x00\u07d4\u0427 \x9b\x80\xcf`\xdbb\xf5}\n]}R\x1ai`fU\x89\b\xacr0H\x9e\x80\x00\x00\xe0\x94\u0428\xab\xd8\n\x19\x9bT\xb0\x8be\xf0\x1d \x9c'\xfe\xf0\x11[\x8a\x01a\xc6&\xdca\xa2\xef\x80\x00\xe0\x94\u042b\xccp\xc0B\x0e\x0e\x17/\x97\xd4;\x87\xd5\xe8\f3n\xa9\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\u042es]\x91^\x94hf\xe1\xfe\xa7~^\xa4f\xb5\xca\xdd\x16\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u0431\x1do+\u0394^\fjP \u00f5'S\xf8\x03\xf9\u0449\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xd0\xc1\x01\xfd\x1f\x01\xc6?k\x1d\x19\xbc\x92\r\x9f\x93#\x14\xb16\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xd0\xc5Z\xbf\x97o\xdc=\xb2\xaf\u9f99\u0519HMWl\x02\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd0\u0422\xadE\xf5\x9a\x9d\xcc\u0195\xd8_%\xcaF\xed1\xa5\xa3\x89-\x89W}}@ \x00\x00\u07d4\xd0\xd6,G\xea`\xfb\x90\xa3c\x92\t\xbb\xfd\xd4\xd93\x99\x1c\u0189\n\x84Jt$\xd9\xc8\x00\x00\u07d4\xd0\xdbEax o\\D0\xfe\x00Pc\x90<=zI\xa7\x89&I\x1eE\xa7S\xc0\x80\x00\u07d4\xd0\xe1\x94\xf3K\x1d\xb6\t(\x85\t\xcc\xd2\xe7;a1\xa2S\x8b\x8965f3\xeb\xd8\xea\x00\x00\u07d4\xd0\xe3^\x04vF\xe7Y\xf4Qp\x93\xd6@\x86BQ\u007f\bM\x89\u054f\xa4h\x18\xec\u02c0\x00\u07d4\xd0\xeeM\x02\xcf$8,0\x90\xd3\xe9\x95`\xde6xs\\\u07c9\x82\x1a\xb0\xd4AI\x80\x00\x00\u07d4\xd0\xf0OR\x10\x9a\xeb\xec\x9a{\x1e\x932v\x1e\x9f\xe2\xb9{\xb5\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xd0\xf9Yx\x11\xb0\xb9\x92\xbb}7W\xaa%\xb4\xc2V\x1d2\xe2\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xd1\x03\x02\xfa\xa1\x92\x9a2i\x04\xd3v\xbf\v\x8d\xc9:\xd0LL\x89a\t=|,m8\x00\x00\xe0\x94\xd1\x10\r\xd0\x0f\xe2\xdd\xf1\x81c\xad\x96M\vi\xf1\xf2\xe9e\x8a\x8a\x01C\x12\tU\xb2Pk\x00\x00\u07d4\xd1\x16\xf3\xdc\xd5\xdbtK\xd0\b\x88v\x87\xaa\x0e\xc9\xfdr\x92\xaa\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd1\x19A|Fs,\xf3M\x1a\x1a\xfby\xc3\xe7\xe2\u034e\xec\xe4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd1-w\xae\x01\xa9-5\x11{\xacpZ\xac\u0642\xd0.t\xc1\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xd15yK\x14\x9a\x18\xe1G\xd1nb\x1ai1\xf0\xa4\n\x96\x9a\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\xd1C%8\xe3[vd\x95j\u4563*\xbd\xf0A\xa7\xa2\x1c\x8a\x04+\xf0kx\xed;P\x00\x00\u07d4\xd1C\x82g#\x17\x04\xfcr\x80\xd5c\xad\xf4v8D\xa8\a\"\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd1S\x8e\x9a\x87\u5729\xec\x8eX&\xa5\xb7\x93\xf9\x9f\x96\xc4\u00c965\u026d\xc5\u07a0\x00\x00\xe0\x94\xd1d\x85\x03\xb1\xcc\u0178\xbe\x03\xfa\x1e\xc4\xf3\xee&~j\xdf{\x8a\x01;\xef\xbfQ\xee\xc0\x90\x00\x00\xe0\x94\xd1h,!Y\x01\x8d\xc3\xd0\u007f\b$\n\x8c`m\xafe\xf8\xe1\x8a*Z\x05\x8f\u0095\xed\x00\x00\x00\u07d4\xd1q\xc3\xf2%\x8a\xef5\xe5\x99\xc7\xda\x1a\xa0s\x00#M\xa9\xa6\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd1w\x8c\x13\xfb\xd9h\xbc\b<\xb7\xd1\x02O\xfe\x1fI\xd0,\xaa\x89\xd9\xec\xb4\xfd \x8eP\x00\x00\u07d4\xd1\u007f\xbe\"\xd9\x04b\xed7(\x06p\xa2\xea\v0\x86\xa0\xd6\u0589\n\xd6\xee\xdd\x17\xcf;\x80\x00\u07d4\u0441\x1cU\x97i\x80\xf0\x83\x90\x1d\x8a\r\xb2i\"-\xfb\\\xfe\x89T\x06\x923\xbf\u007fx\x00\x00\u07d4\u044e\xb9\xe1\u0485\u06be\x93\xe5\u053a\xe7k\xee\xfeC\xb5!\xe8\x89$=M\x18\"\x9c\xa2\x00\x00\u07d4\u0453\xe5\x83\xd6\a\x05c\xe7\xb8b\xb9aJG\u9509\xf3\xe5\x8965f3\xeb\xd8\xea\x00\x00\u07d4\u0457\x8f.4@\u007f\xab\x1d\xc2\x18=\x95\xcf\xdab`\xb3Y\x82\x89*\xb7\xb2`\xff?\xd0\x00\x00\u07d4\u045c\xaf9\xbb7\u007f\xdf,\xf1\x9b\xd4\xfbRY\x1c&1\xa6<\x8965\u026d\xc5\u07a0\x00\x00\u0794\u0463\x96\xdc\u06b2\xc7IA0\xb3\xfd0x 4\r\xfd\x8c\x1f\x88\xf9\"P\xe2\xdf\xd0\x00\x00\xe0\x94\u0467\x1b-\bX\xe82p\b]\x95\xa3\xb1T\x96P\x03^#\x8a\x03'\xbb\t\xd0j\xa8P\x00\x00\u07d4\u046c\xb5\xad\xc1\x189s%\x8dk\x85$\xff\xa2\x8f\xfe\xb2=\xe3\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u0473\u007f\x03\xcb\x10t$\xe9\xc4\xddW\\\xcdOL\xeeW\xe6\u0349lk\x93[\x8b\xbd@\x00\x00\u07d4\u0475\xa4T\xac4\x05\xbbAy \x8cl\x84\xde\x00k\u02db\xe9\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xd1\xc4YT\xa6+\x91\x1a\xd7\x01\xff.\x90\x13\x1e\x8c\xeb\x89\xc9\\\x89K\x91\xa2\xdeE~\x88\x00\x00\u07d4\xd1\xc9np\xf0Z\xe0\xe6\xcd`!\xb2\b7P\xa7q|\xdeV\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xd1\u0571\u007f\xfe-{\xbby\xcc}y0\xbc\xb2\xe5\x18\xfb\x1b\xbf\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xd1\xda\f\x8f\xb7\xc2\x10\xe0\xf2\xeca\x8f\x85\xbd\xae}>sK\x1c\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xd1\xddy\xfb\x15\x81`\xe5\xb4\xe8\xe2?1.j\x90\u007f\xbcMN\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xd1\xdeZ\xad:_\xd8\x03\U00071bb6\x10<\xb8\xe1O\xe7#\xb7\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xd1\xe1\xf2\xb9\xc1l0\x98t\xde\xe7\xfa\xc3&u\xaf\xf1)\u00d8\x89\x03\xf2M\x8eJ\x00p\x00\x00\xe0\x94\xd1\xe5\xe24\xa9\xf4Bf\xa4\xa6$\x1a\x84\u05e1\xa5Z\u0567\xfe\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xd1\xeaMr\xa6{[>\x0f1UY\xf5+\xd0aMq0i\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd1\xee\x90YW\xfe|\xc7\x0e\xc8\xf2\x86\x8bC\xfeG\xb1?\xeb\xff\x89\x02b\x9ff\xe0\xc50\x00\x00\u07d4\xd1\xf1iM\"g\x1bZ\xadj\x94\x99\\6\x9f\xbea3go\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd1\xf4\xdc\x1d\u06ca\xbb\x88H\xa8\xb1N%\xf3\xb5Z\x85\x91\xc2f\x89\r\x8drkqw\xa8\x00\x00\u07d4\xd1\xfe\u042e\xe6\xf5\xdf\xd7\xe2Wi%L<\xfa\xd1Z\xde\u032a\x89'\x92\xc8\xfcKS(\x00\x00\u07d4\xd2\x05\x1c\xb3\xcbg\x04\xf0T\x8c\u0210\xab\n\x19\xdb4\x15\xb4*\x89\x12\x1b.^ddx\x00\x00\u07d4\xd2\x06\xaa\u07736\xd4^yr\xe9<\xb0uG\x1d\x15\x89{]\x89 \x86\xac5\x10R`\x00\x00\u07d4\xd2\tH+\xb5I\xab\xc4w{\xeam\u007fe\x00b\xc9\xc5z\x1c\x89\x11e\x1a\xc3\xe7\xa7X\x00\x00\u07d4\xd2\r\xcb\vxh+\x94\xbc0\x00(\x14H\xd5W\xa2\v\xfc\x83\x890\x84\x9e\xbe\x166\x9c\x00\x00\u07d4\xd2\x10{57&\u00e2\xb4ef\xea\xa7\xd9\xf8\v]!\xdb\xe3\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xd2\x11\xb2\x1f\x1b\x12\xb5\ta\x81Y\r\xe0~\xf8\x1a\x89S~\xad\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd2\x18\xef\xb4\u06d8\x1c\xddjy\u007fK\u050c|&)<\xeb@\x89\xa1Fk1\xc6C\x1c\x00\x00\xe0\x94\xd2\x1asA\xeb\x84\xfd\x15\x10T\xe5\u31fb%\xd3nI\x9c\t\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\xe0\x94\xd2$\xf8\x80\xf9G\x9a\x89\xd3/\t\xe5+\u9432\x88\x13\\\xef\x8a\x03\xa9\u057a\xa4\xab\xf1\xd0\x00\x00\u07d4\xd2/\f\xa4\xcdG\x9ef\x17u\x05;\xccI\xe3\x90\xf6p\u074a\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd21\x92\x975\x13!\x02G\x1b\xa5\x90\a\xb6dL\xc0\xc1\xde>\x8967\tlK\xcci\x00\x00\u07d4\xd25\xd1\\\xb5\xec\xee\xbba)\x9e\x0e\x82\u007f\xa8'H\x91\x1d\x89\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xd2:$\xd7\xf9F\x83C\xc1C\xa4\x1ds\xb8\x8f|\xbec\xbe^\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd2=z\xff\xac\xdc>\x9f=\xaez\xfc\xb4\x00oX\xf8\xa4F\x00\x89\xc3(\t>a\xee@\x00\x00\u07d4\xd2C\x18L\x80\x1e]y\xd2\x06?5x\u06ee\x81\u7ce9\u02c9k\u0722h\x1e\x1a\xba\x00\x00\u07d4\xd2KfD\xf49\xc8\x05\x1d\xfcd\u04c1\xb8\xc8lu\xc1u8\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xd2K\xf1--\xdfE}\xec\xb1xt\xef\xde R\xb6\\\xbbI\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\xe0\x94\xd2Q\xf9\x03\xae\x18rrY\xee\xe8A\xa1\x89\xa1\xf5i\xa5\xfdv\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xd2R\x96\v\v\xf6\xb2\x84\x8f\u07ad\x80\x13m\xb5\xf5\a\xf8\xbe\x02\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xd2X\x1aU\xce#\xab\x10\u062d\x8cD7\x8fY\a\x9b\xd6\xf6X\x8a\x01\xdd\f\x88_\x9a\r\x80\x00\x00\u07d4\xd2Z\xec\xd7\xeb\x8b\xd64[\x06;]\xbd'\x1cw\xd3QD\x94\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\xd2|#O\xf7\xac\xca\xce=\x99g\b\xf8\xf9\xb0Ip\xf9}6\x89Hz\x9a0E9D\x00\x00\u07d4\u0482\x98RM\xf5\xecK$\xb0\xff\xb9\u07c5\x17\n\x14Z\x9e\xb5\x89\x0f\x98\xa3\xb9\xb37\xe2\x00\x00\xe0\x94\u0483\xb8\xed\xb1\n%R\x8aD\x04\xde\x1ce\xe7A\r\xbc\xaag\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4\u0484\xa5\x03\x82\xf8:am9\xb8\xa9\xc0\xf3\x96\xe0\ubfe9]\x8966\xc2^f\xec\xe7\x00\x00\u07d4\u0488\xe7\xcb{\xa9\xf6 \xab\x0ftR\xe5\bc=\x1cZ\xa2v\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\u049d\xc0\x8e\xfb\xb3\xd7.&?x\xabv\x10\xd0\"m\xe7k\x00\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4\u04a00\xac\x89R2_\x9e\x1d\xb3x\xa7\x14\x85\xa2N\x1b\a\xb2\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u04a4y@CG\xc5T:\xab)*\xe1\xbbJo\x15\x83W\xfa\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\u04a5\xa0$#\nW\xcc\xc6fv\v\x89\xb0\xe2l\xaf\u0449\u01ca\n\x96YZ\\n\x8a?\x80\x00\u07d4\u04a8\x03'\xcb\xe5\\L{\xd5\x1f\xf9\xdd\xe4\xcad\x8f\x9e\xb3\xf8\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4\u04a8Oug\\b\xd8\f\x88ulB\x8e\xee+\xcb\x18T!\x89A\rXj \xa4\xc0\x00\x00\u07d4\u04ab\xd8J\x18\x10\x93\xe5\xe2)\x13oB\xd85\xe8#]\xe1\t\x89\x05k\xe0<\xa3\xe4}\x80\x00\u07d4\u04ac\r:X`^\x1d\x0f\x0e\xb3\xde%\xb2\xca\xd1)\xed`X\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u04bfg\xa7\xf3\xc6\xceV\xb7\xbeAg]\xbb\xad\xfe~\xa9:3\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xd2\xdb\xeb\xe8\x9b\x03W\xae\xa9\x8b\xbe\x8eIc8\u07bb(\xe8\x05\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xd2\xe2\x1e\xd5hh\xfa\xb2\x8e\tG\x92z\xda\xf2\x9f#\xeb\xadl\x89l\x18O\x13U\xd0\xe8\x00\x00\u07d4\xd2\xe8\x17s\x8a\xbf\x1f\xb4\x86X?\x80\xc3P1\x8b\xed\x86\f\x80\x89\r\x02\xce\xcf_]\x81\x00\x00\u07d4\xd2\xed\xd1\xdd\xd6\xd8m\xc0\x05\xba\xebT\x1d\"\xb6@\xd5\xc7\xca\xe5\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xd2\xf1\x99\x8e\x1c\xb1X\f\xecOl\x04}\xcd=\xce\xc5L\xf7<\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd2\xf2A%]\xd7\xc3\xf7<\a\x040q\xec\b\xdd\xd9\xc5\xcd\xe5\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xd2\xffg \x16\xf6;/\x859\x8fJo\xed\xbb`\xa5\r<\u0389\x12\x91$o[sJ\x00\x00\u07d4\xd3\rLC\xad\xcfU\xb2\xcbS\u0583#&A4I\x8d\x89\u038965\u026d\xc5\u07a0\x00\x00\u07d4\xd3\x0e\xe9\xa1+Mh\xab\xac\xe6\xba\u029a\u05ff\\\xd1\xfa\xf9\x1c\x89QO\xcb$\xff\x9cP\x00\x00\u07d4\xd3\x11\x8e\xa3\xc85\x05\xa9\u0613\xbbg\xe2\xde\x14-Sz>\xe7\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xd3\x11\xbc\u05eaN\x9bO8?\xf3\xd0\u05b6\xe0~!\xe3p]\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd3\x15\xde\xea\x1d\x8c\x12q\xf9\xd11\x12c\xabG\xc0\a\xaf\xb6\xf5\x89\x03\xc8\x1dNeK@\x00\x00\u07d4\xd3+,y\xc3dx\xc5C\x19\x01\xf6\xd7\x00\xb0M\xbe\x9b\x88\x10\x89\x15w\x9a\x9d\xe6\xee\xb0\x00\x00\u07d4\xd3+EVF\x14Ql\x91\xb0\u007f\xa9\xf7-\xcfx|\xceN\x1c\x89\x0f\xc6o\xae7F\xac\x00\x00\u07d4\xd30r\x811\xfe\x8e:\x15Hz4W<\x93E~*\xfe\x95\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xd31\xc8#\x82Z\x9eRc\xd0R\u0611]M\xcd\xe0z\\7\x89\x1e\x93\x12\x83\xcc\xc8P\x00\x00\u07d4\xd33btE\xf2\u05c7\x90\x1e\xf3;\xb2\xa8\xa3g^'\xff\xec\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xd3<\xf8+\xf1LY&@\xa0\x86\b\x91L#py\u057e4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd3Mp\x8ds\x98\x02E3\xa5\xa2\xb20\x9b\x19\xd3\xc5Qq\xbb\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xd3N\x03\xd3j+\xd4\u045a_\xa1b\x18\xd1\xd6\x1e?\xfa\v\x15\x89\x11X\xe4`\x91=\x00\x00\x00\u07d4\xd3Pu\xcaa\xfeY\xd1#\x96\x9c6\xa8-\x1a\xb2\xd9\x18\xaa8\x89\x90\xf54`\x8ar\x88\x00\x00\u07d4\xd3g\x00\x9a\xb6X&;b\xc23:\x1c\x9eA@I\x8e\x13\x89\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd3g\x9aG\xdf-\x99\xa4\x9b\x01\u024d\x1c>\f\x98|\xe1\xe1X\x89\x0f-\xc7\xd4\u007f\x15`\x00\x00\xe0\x94\u04cf\xa2\xc4\xcc\x14z\xd0j\u0562\xf7Uy(\x1f\"\xa7\xcc\x1f\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\u04da]\xa4`9+\x94\v\u01ee8\xf1e\u007f\x8a\x01f\xc5H\b\x89\xdbw\x00\x00\xe0\x94\xd3\xd6\xe9\xfb\x82T/\u049e\xd9\xea6\t\x89\x1e\x15\x13\x96\xb6\xf7\x8a\voX\x8a\xa7\xbc\xf5\xc0\x00\x00\xe0\x94\xd3\xda\u0476\u040dE\x81\u032ee\xa8s-\xb6\xaci\xf0\u019e\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xd3\xdf;S\xcb;GU\xdeT\xe1\x80E\x1c\xc4L\x9e\x8a\u0a89#\u0114\t\xb9w\x82\x80\x00\u07d4\xd3\xf8s\xbd\x99V\x13W\x89\xab\x00\xeb\xc1\x95\xb9\"\xe9K%\x9d\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd4\x02\xb4\xf6\xa0\x99\xeb\xe7\x16\xcb\x14\xdfOy\xc0\xcd\x01\xc6\a\x1b\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xd4\r\x00U\xfd\x9a8H\x8a\xff\x92?\xd0=5\xecF\xd7\x11\xb3\x8a\x01\x0f\b\xed\xa8\xe5U\t\x80\x00\u07d4\xd4\x0e\xd6j\xb3\xce\xff$\xca\x05\xec\xd4q\ufd12\xc1__\xfa\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xd4\x18\x87\v\xc2\xe4\xfa{\x8aa!\xae\br\xd5RG\xb6%\x01\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4\xd4\x1d\u007f\xb4\x9f\xe7\x01\xba\xac%qpBl\u0273\x8c\xa3\xa9\xb2\x89\t\x8a}\x9b\x83\x14\xc0\x00\x00\u07d4\xd4 U\x92\x84@U\xb3\u01e1\xf8\f\xef\xe3\xb8\xebP\x9b\xcd\xe7\x89\t\xb3\xbf\xd3B\xa9\xfc\x80\x00\u07d4\xd4+ \xbd\x03\x11`\x8bf\xf8\xa6\xd1[*\x95\xe6\xde'\u017f\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd44O}\\\xade\xd1~\\-\x0es#\x94=ob\xfe\x92\x89\x0e~\xeb\xa3A\vt\x00\x00\u07d4\xd4>\xe48\xd8=\xe9\xa3ub\xbbN(l\xb1\xbd\x19\xf4\x96M\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd4C4\xb4\xe2:\x16\x9a\f\x16\xbd!\xe8f\xbb\xa5-\x97\x05\x87\x89\x8c\xf2?\x90\x9c\x0f\xa0\x00\x00\xe0\x94\xd4M\x81\xe1\x8fF\xe2\u03f5\xc1\xfc\xf5\x04\x1b\xc8V\x97g\xd1\x00\x8a\a\xb4B\xe6\x84\xf6Z\xa4\x00\x00\u07d4\xd4OJ\xc5\xfa\xd7k\xdc\x157\xa3\xb3\xafdr1\x9bA\r\x9d\x89V\xbcu\xe2\xd61\x00\x00\x00\u07d4\xd4O^\xdf+\xcf$3\xf2\x11\xda\xdd\f\xc4P\xdb\x1b\x00\x8e\x14\x89\x0e~\xeb\xa3A\vt\x00\x00\xe0\x94\xd4Oj\u00d2;_\xd71\xa4\xc4YD\xecO~\xc5*j\xe4\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xd4[3A\xe8\xf1\\\x802\x93 \u00d7~;\x90\xe7\x82j~\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xd4]]\xaa\x13\x8d\xd1\xd3t\xc7\x1b\x90\x19\x91h\x11\xf4\xb2\nN\x89\x1f9\x9b\x148\xa1\x00\x00\x00\u07d4\xd4`\xa4\xb9\b\xdd+\x05gY\xb4\x88\x85\vf\xa88\xfcw\xa8\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xd4g\xcf\x06L\bq\x98\x9b\x90\u0632\xeb\x14\xcc\xc6;6\b#\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd4k\xaea\xb0'\xe5\xbbB.\x83\xa3\xf9\xc9?<\x8f\xc7}'\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd4o\x82#E)\x82\xa1\xee\xa0\x19\xa8\x81n\xfc-o\xc0\ah\x89\amA\xc6$\x94\x84\x00\x00\u07d4\xd4uG\u007f\xa5c\x90\xd30\x17Q\x8dg\x11\x02\u007f\x05\U0008dfc9k\x11\x133\xd4\xfdL\x00\x00\u07d4\xd4|$.\xdf\xfe\xa0\x91\xbcT\xd5}\xf5\xd1\xfd\xb91\x01Gl\x89\x9d\xf7\u07e8\xf7`H\x00\x00\u07d4\xd4}\x86\x85\xfa\xee\x14|R\x0f\u0646p\x91u\xbf/\x88k\xef\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd4\u007fP\u07c9\xa1\xcf\xf9e\x13\xbe\xf1\xb2\xae:)q\xac\xcf,\x89-\x89W}}@ \x00\x00\u07d4\u0502\xe7\xf6\x8eA\xf28\xfeQx)\xde\x15G\u007f\xe0\xf6\xdd\x1d\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\u0507\x9f\xd1+\x1f:'\xf7\xe1\tv\x1b#\xca4\xfa#\x06K\x1c\xaf\x00Qn(pJ\x82\xa4\xf8\x89Hz\x9a0E9D\x00\x00\u07d4\xd5\x00\xe4\xd1\u0242K\xa9\xf5\xb65\u03e3\xa8\xc2\u00cb\xbdL\xed\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xd5\b\u04dcp\x91oj\xbcL\xc7\xf9\x99\xf0\x11\xf0w\x10X\x02\x89\x05rM$\xaf\xe7\u007f\x00\x00\u07d4\xd5\x0f\u007f\xa0>8\x98v\u04d0\x8b`\xa57\xa6pc\x04\xfbV\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xd5\x13\xa4P\x80\xff/\xeb\xe6,\u0545J\xbe)\xeeDg\xf9\x96\x89\bN\x13\xbcO\xc5\xd8\x00\x00\u07d4\xd5'o\f\xd5\xff\xd5\xff\xb6?\x98\xb5p=U\x94\xed\xe0\x83\x8b\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xd5)KfbB0;m\xf0\xb1\u020d7B\x9b\xc8\xc9e\xaa\x89\x10M\r\x00\u04b7\xf6\x00\x00\u07d4\xd5*\xec\xc6I98\xa2\x8c\xa1\xc3g\xb7\x01\xc2\x15\x98\xb6\xa0.\x89;\xa1\x91\v\xf3A\xb0\x00\x00\u07d4\xd5\x99x\xee \xa3\x8c?I\x8dc\xd5\u007f1\xa3\x9fj\x06\x8a\x022\xb3o\xfcg*\xb0\x00\x00\u07d4\u05568\xd3\xc5\xfa\xa7q\x1b\xf0\x85t_\x9d[\xdc#\u0518\u0609lk\x93[\x8b\xbd@\x00\x00\xe0\x94\u055d\x92\xd2\xc8p\x19\x80\xcc\a<7]r\n\xf0dt<\f\x8a\x04\x05\xfd\xf7\u5bc5\xe0\x00\x00\u07d4\u0567\xbe\xc32\xad\xde\x18\xb3\x10KW\x92Tj\xa5\x9b\x87\x9bR\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u0571\x17\xec\x11n\xb8FA\x89a\xeb~\xdbb\x9c\xd0\xddi\u007f\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\u0572\x84\x04\x010\xab\xf7\xc1\xd1cq#q\xcc~(\xadf\u0689j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\u0579\xd2w\u062a\xd2\x06\x97\xa5\x1fv\xe2\tx\x99k\xff\xe0U\x89\a\xc3\xfe<\aj\xb5\x00\x00\u07d4\u057d^\x84U\xc10\x16\x93W\xc4q\xe3\u6077\x99jrv\x89-\x9e(\x8f\x8a\xbb6\x00\x00\u07d4\xd5\u02e5\xb2k\xea]s\xfa\xbb\x1a\xba\xfa\xcd\xef\x85\xde\xf3h\u0309\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd5\xceU\u0476/YC\xc0?\x89\b\xe3\x1f\xe1h\x9d\x8a\x00\x00\u07d4\xd6\x06Q\xe3\x93x4#\xe5\xcc\x1b\xc5\xf8\x89\xe4N\xf7\xea$>\x89\x15\x9ev7\x11)\xc8\x00\x00\u07d4\xd6\t\xbfO\x14n\xeak\r\xc8\xe0m\xdc\xf4D\x8a\x1f\xcc\xc9\xfa\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd6\t\xec\v\xe7\r\n\xd2ong\xc9\xd4v+R\xeeQ\x12,\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd6\nRX\a(R\r\xf7Tk\xc1\xe2\x83)\x17\x88\u06ee\f\x8964\x89\xef?\xf0\xd7\x00\x00\u07d4\xd6\v$s!\xa3*Z\xff\xb9k\x1e'\x99'\xccXM\xe9C\x89z\xd0 \xd6\xdd\xd7v\x00\x00\u07d4\xd6\x11\x02v\xcf\xe3\x1eB\x82ZW\u007fkC]\xbc\xc1\f\xf7d\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xd6\x12Y{\xc3\x17C\u01c63\xf63\xf29\xb1\xe9Bk\xd9%\x8a\x10\x17\xf7\u07d6\xbe\x17\x80\x00\x00\u07d4\xd6#J\xafE\xc6\xf2.f\xa2%\xff\xb9:\xddb\x9bN\xf8\x0f\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd6.\u06d6\xfc\u259a\xaflT^\x96|\xf1\xc0\xbc\x80R\x05\x89\x04\xa5eSjZ\u0680\x00\u07d4\xd60\v2\x15\xb1\x1d\xe7b\xec\xdeKp\xb7\x92}\x01)\x15\x82\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd69]\xb5\xa4\xbbf\xe6\x0fL\xfb\xcd\xf0\x05{\xb4\xd9xb\xe2\x891T\xc9r\x9d\x05x\x00\x00\xe0\x94\xd6J-P\xf8\x85\x857\x18\x8a$\xe0\xf5\r\xf1h\x1a\xb0~\u05ca\b7Z*\xbc\xca$@\x00\x00\u07d4\xd6X\n\xb5\xedL}\xfaPo\xa6\xfed\xad\\\xe1)pw2\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xd6Y\x8b\x13\x86\xe9<\\\u02d6\x02\xffK\xbb\xec\xdb\xd3p\x1d\u0109\f%\xf4\xec\xb0A\xf0\x00\x00\u07d4\xd6dM@\xe9\v\xc9\u007f\xe7\xdf\xe7\u02bd2i\xfdW\x9b\xa4\xb3\x89\b\x9e\x91y\x94\xf7\x1c\x00\x00\xe0\x94\xd6g\f\x03m\xf7T\xbeC\xda\u074fP\xfe\xea(\x9d\x06\x1f\u058a\x01D\xa2\x904H\xce\xf7\x80\x00\u07d4\xd6hR:\x90\xf0)=e\xc58\xd2\xddlWg7\x10\x19n\x89\x02$,0\xb8S\xee\x00\x00\u07d4\xd6j\xb7\x92\x94\aL\x8bb}\x84-\xabA\xe1}\xd7\f]\xe5\x8965\u026d\xc5\u07a0\x00\x00\u0794\xd6j\xcc\r\x11\xb6\x89\u03a6\xd9\xea_\xf4\x01L\"J]\xc7\u0108\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\xd6m\xdf\x11Y\xcf\"\xfd\x8czK\xc8\u0540wV\xd43\xc4>\x89wC\"\x17\xe6\x83`\x00\x00\u07d4\u0587\xce\xc0\x05\x90\x87\xfd\xc7\x13\xd4\xd2\xd6^w\xda\xef\xed\xc1_\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4\u0588\xe7\x85\u024f\x00\xf8K:\xa1S3U\u01e2X\xe8yH\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\u05a2.Y\x8d\xab\u04ce\xa6\xe9X\xbdy\u050d\u0756\x04\xf4\u07c965\u026d\xc5\u07a0\x00\x00\u07d4\u05a7\xacM\xe7\xb5\x10\xf0\xe8\xdeQ\x9d\x97?\xa4\xc0\x1b\xa84\x00\x89e\xea=\xb7UF`\x00\x00\u07d4\u05ac\xc2 \xba.Q\xdf\xcf!\xd4C6\x1e\xeav\\\xbd5\u0609\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\u05ac\xff\u043f\u065c8.{\xd5o\xf0\xe6\x14J\x9eR\xb0\x8e\x89\b\xacr0H\x9e\x80\x00\x00\u07d4\xd6\xc0\u043c\x93\xa6.%qtp\x0e\x10\xf0$\u0232?\x1f\x87\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xd6\xcf\\\x1b\u03dd\xa6b\xbc\xea\"U\x90P\x99\xf9\xd6\xe8M\u030a\x01\u011eB\x01W\xd9\xc2\x00\x00\u07d4\xd6\xd05r\xa4RE\xdb\xd46\x8cO\x82\xc9W\x14\xbd!g\xe2\x89?\x00\xc3\xd6f\x86\xfc\x00\x00\u07d4\xd6\xd6wiX\xee#\x14:\x81\xad\xad\xeb\b8 \t\xe9\x96\u0089\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xd6\xd9\xe3\x0f\bB\x01*qv\xa9\x17\xd9\xd2\x04\x8c\xa0s\x87Y\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xd6\xe0\x9e\x98\xfe\x13\x003!\x04\xc1\xca4\xfb\xfa\xc5T6N\u0649lk\x93[\x8b\xbd@\x00\x00\u07d4\xd6\xe8\xe9z\u90db\x9e\xe5\a\xee\xdb(\xed\xfbtw\x03\x149\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd6\uea18\u052e+q\x80'\xa1\x9c\xe9\xa5\xebs\x00\xab\xe3\u0289\x01}J\xce\xeec\u06c0\x00\xe0\x94\xd6\xf1\xe5[\x16\x94\b\x9e\xbc\xb4\xfe}x\x82\xaaf\u0217av\x8a\x04<#\xbd\xbe\x92\x9d\xb3\x00\x00\u07d4\xd6\xf4\xa7\xd0N\x8f\xaf \xe8\xc6\ub15c\xf7\xf7\x8d\xd2=z\x15\x89\a$\xde\xd1\xc7H\x14\x00\x00\u07d4\xd6\xfc\x04F\u01a8\xd4\n\xe3U\x1d\xb7\xe7\x01\xd1\xfa\x87nJI\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd7\x03\u01a4\xf1\x1d`\x19Ey\u054c'f\xa7\xef\x16\xc3\n)\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd7\x05%\x19uj\xf4%\x90\xf1S\x91\xb7#\xa0?\xa5d\xa9Q\x89\xfa61H\r\x01\xfd\x80\x00\u07d4\xd7\na+\xd6\u0769\xea\xb0\xdd\xdc\xffJ\xafA\"\u04cf\xea\xe4\x89\x1dF\x01b\xf5\x16\xf0\x00\x00\u07d4\xd7\n\xd2\xc4\xe9\uefe67\xefV\xbdHj\u04a1\xe5\xbc\xe0\x93\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd7\x14\f\x8eZC\a\xfa\xb0\xcc'\xba\u0752\x95\x01\x8b\xf8yp\x89\x05\xf1\x01kPv\xd0\x00\x00\u07d4\xd7\x16J\xa2a\xc0\x9a\u0672\xb5\x06\x8dE>\xd8\xebj\xa10\x83\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xd7\x1eC\xa4Qw\xadQ\xcb\xe0\xf7!\x84\xa5\xcbP9\x17(Z\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd7\x1f\xb10\xf0\x15\fVRi\xe0\x0e\xfbC\x90+R\xa4U\xa6\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd7\"W8\xdc\xf3W\x848\xf8\xe7\u0233\x83~B\xe0J&/\x89\x18+\x8c\ubec3\xaa\x00\x00\u07d4\xd7'MP\x80M\x9cw\u0693\xfaH\x01V\xef\xe5{\xa5\x01\u0789i*\xe8\x89p\x81\xd0\x00\x00\u07d4\xd71\xbbk_<79^\t\u03ac\xcd\x14\xa9\x18\xa6\x06\a\x89\x89\u0556{\xe4\xfc?\x10\x00\x00\xe0\x94\xd7>\xd2\u0645\xb5\xf2\x1bU\xb2td;\xc6\xda\x03\x1d\x8e\u074d\x8a\nm\xd9\f\xaeQ\x14H\x00\x00\u07d4\xd7D\xac~S\x10\xbeijc\xb0\x03\xc4\v\xd097\x05a\u0189Z\x87\xe7\xd7\xf5\xf6X\x00\x00\xe0\x94\xd7Jn\x8dj\xab4\u0385\x97h\x14\xc12{\xd6\xea\a\x84\u048a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xd7ZP*[gr\x87G\x0fe\u016aQ\xb8|\x10\x15\x05r\x8910\xb4dc\x85t\x00\x00\u07d4\xd7m\xba\xeb\xc3\rN\xf6{\x03\xe6\xe6\xec\xc6\xd8N\x00MP-\x89mv\xb9\x18\x8e\x13\x85\x00\x00\u07d4\xd7q\xd9\xe0\u028a\b\xa1\x13wW1CN\xb3'\x05\x99\xc4\r\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xd7x\x8e\xf2\x86X\xaa\x06\xccS\xe1\xf3\xf0\xdeX\xe5\xc3q\xbex\x8a\x01je\x02\xf1Z\x1eT\x00\x00\u07d4\xd7x\x92\xe2';#]v\x89\xe40\xe7\xae\ud73c\xe8\xa1\xf3\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u05c1\xf7\xfc\t\x18F\x11V\x85p\xb4\x98n,r\x87+~\u0409\x01\x15\x95a\x06]]\x00\x00\u07d4\u05c5\xa8\xf1\x8c8\xb9\xbcO\xfb\x9b\x8f\xa8\xc7r{\xd6B\xee\x1c\x8965\u026d\xc5\u07a0\x00\x00\u07d4\u05ce\xcd%\xad\xc8k\xc2\x05\x1d\x96\xf6Sd\x86kB\xa4&\xb7\x89\xd20X\xbf/&\x12\x00\x00\xe0\x94\u05cf\x84\xe3\x89D\xa0\xe0%_\xae\xceH\xbaIP\u053d9\u048a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\u05d4\x83\xf6\xa8DO%I\xd6\x11\xaf\xe0,C-\x15\xe1\x10Q\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\u05d85\xe4\x04\xfb\x86\xbf\x84_\xba\t\rk\xa2^\f\x88f\xa6\x89\x82\x1a\xb0\xd4AI\x80\x00\x00\u07d4\u05da\xff\x13\xba-\xa7]F$\f\xac\n$g\xc6V\x94\x98#\x89]\u0212\xaa\x111\xc8\x00\x00\u07d4\u05dd\xb5\xabCb\x1az=\xa7\x95\xe5\x89)\xf3\xdd%\xafg\u0649lj\xccg\u05f1\xd4\x00\x00\u07d4\u05e1C\x1e\xe4S\xd1\xe4\x9a\x05P\xd1%hy\xb4\xf5\xd1\x02\x01\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\u07d4\u05ed\t\xc6\xd3&WhSU\xb5\xc6\uc39fW\xb4\ube42\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\u05f7@\xdf\xf8\xc4Wf\x8f\xdft\xf6\xa2f\xbf\xc1\u0737#\xf9\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xd7\u0080>\u05f0\xe0\x83sQA\x1a\x8ef7\xd1h\xbc[\x05\x8a\x06A\xda\xf5\xc9\x1b\xd95\x80\x00\u07d4\xd7\xc6&]\xea\x11\x87l\x90;q\x8eL\u062b$\xfe&[\u0789lk\x93[\x8b\xbd@\x00\x00\u07d4\xd7\xca\u007f\xdc\xfe\xbeE\x88\xef\xf5B\x1d\x15\"\xb6\x13(\xdf{\xf3\x89\xd8\xe6\x00\x1el0+\x00\x00\u07d4\xd7\u037dA\xff\xf2\r\xf7'\xc7\vbU\xc1\xbav\x06\x05Th\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd7\xd1W\xe4\xc0\xa9d7\xa6\u0485t\x1d\xd2>\xc46\x1f\xa3k\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd7\xd2\xc6\xfc\xa8\xad\x1fu9R\x10\xb5}\xe5\xdf\xd6s\x939\t\x89\x12nr\xa6\x9aP\xd0\x00\x00\xe0\x94\xd7\xd3\xc7Y Y\x048\xb8,>\x95\x15\xbe.\xb6\xedz\x8b\x1a\x8a\f\xb4\x9bD\xba`-\x80\x00\x00\u07d4\xd7\xd7\xf2\u02a4b\xa4\x1b;0\xa3J\xeb;\xa6\x10\x10\xe2bo\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd7\xe7J\xfd\xba\xd5^\x96\u03bcZ7O,\x8bv\x86\x80\xf2\xb0\x89\x05]\xe6\xa7y\xbb\xac\x00\x00\xe0\x94\xd7\xeb\x901b'\x1c\x1a\xfa5\xfei\xe3s\"\u0224\u049b\x11\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xd7\xeb\u0779\xf99\x87w\x9bh\x01U7T8\xdbe\xaf\xcbj\x89\x05t\x1a\xfe\xff\x94L\x00\x00\u07d4\xd7\xef4\x0ef\xb0\u05ef\xcc\xe2\n\x19\xcb{\xfc\x81\xda3\xd9N\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xd7\xf3p\u053e\xd9\xd5|oI\u0259\xder\x9e\xe5i\xd3\xf4\xe4\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd7\xfa_\xfb`H\xf9o\xb1\xab\xa0\x9e\xf8{\x1c\x11\xddp\x05\xe4\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd8\x06\x9f\x84\xb5!I?G\x15\x03\u007f2&\xb2_3\xb6\x05\x86\x89g\x8a\x93 b\xe4\x18\x00\x00\u0794\xd8\x15\xe1\xd9\xf4\xe2\xb5\xe5~4\x82k|\xfd\x88\x81\xb8Th\x90\x88\xf0\x15\xf2W6B\x00\x00\u07d4\xd8\x1b\xd5K\xa2\xc4Jok\xeb\x15a\u058b\x80\xb5DNm\u0189?\x17\r~\xe4\"\xf8\x9c\x80-1({\x96q\xe8\x1c\x88\xb9\x8b\xc8)\xa6\xf9\x00\x00\u07d4\xd8K\x92/xA\xfcWt\xf0\x0e\x14`J\xe0\xdfB\xc8U\x1e\x89\xd9o\u0390\u03eb\xcc\x00\x00\u07d4\xd8U\xb0<\xcb\x02\x9awG\xb1\xf0s\x03\xe0\xa6dy59\u0209lk\x93[\x8b\xbd@\x00\x00\u07d4\xd8_\u07af*a\xf9]\xb9\x02\xf9\xb5\xa5<\x9b\x8f\x92f\u00ec\x89l\xf6Z~\x90G(\x00\x00\u07d4\xd8q^\xf9\x17o\x85\v.0\xeb\x8e8'\a\xf7w\xa6\xfb\xe9\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd8t\xb9\u07eeEj\x92\x9b\xa3\xb1\xa2~W,\x9b,\xec\u07f3\x89\t79SM(h\x00\x00\u07d4\u0613\n9\xc7sW\xc3\n\u04e0`\xf0\v\x06\x04c1\xfdb\x89,s\xc97t,P\x00\x00\u07d4\u061b\xc2q\xb2{\xa3\xabib\xc9JU\x90\x06\xae8\xd5\xf5j\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u0637}\xb9\xb8\x1b\xbe\x90B{b\xf7\x02\xb2\x01\xff\u009f\xf6\x18\x892m\x1eC\x96\xd4\\\x00\x00\u07d4\xd8\xcdd\xe0(N\xecS\xaaF9\xaf\xc4u\b\x10\xb9\u007f\xabV\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xd8\xd6C\x84$\x9bwg\x94\x06;V\x98x\xd5\xe3\xb50\xa4\xb2\x89\t\xa0C\u0432\xf9V\x80\x00\u07d4\xd8\xd6T \xc1\x8c#'\xccZ\xf9t%\xf8W\xe4\xa9\xfdQ\xb3\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4\xd8\xe5\xc9g^\xf4\xde\xed&k\x86\x95o\xc4Y\x0e\xa7\u0522}\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xd8\xe8GB\x92\xe7\xa0Q`L\xa1d\xc0pw\x83\xbb(\x85\xe8\x8a\x02\xd4\xca\x05\xe2\xb4<\xa8\x00\x00\u07d4\xd8\xebxP>\xc3\x1aT\xa9\x016x\x1a\xe1\t\x00Lt2W\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd8\xee\xf4\xcfK\xeb\x01\xee \xd1\x11t\x8ba\xcbM?d\x1a\x01\x89\x94\x89#z\u06daP\x00\x00\u07d4\xd8\xf4\xba\xe6\xf8M\x91\rm}Z\xc9\x14\xb1\xe6\x83r\xf9A5\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xd8\xf6 6\xf0;v5\xb8X\xf1\x10?\x8a\x1d\x90\x19\xa8\x92\xb6\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4\xd8\xf6e\xfd\x8c\xd5\u00bc\xc6\xdd\xc0\xa8\xaeR\x1eM\u01aa``\x89\\(=A\x03\x94\x10\x00\x00\u07d4\xd8\xf9$\fU\xcf\xf05RB\x80\xc0\x9e\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xd8\xfe\b\x8f\xff\u0394\x8fQ7\xee#\xb0\x1d\x95\x9e\x84\xacB#\x89\f[T\xa9O\xc0\x17\x00\x00\u07d4\xd9\x0f0\t\xdbC~N\x11\u01c0\xbe\u0209os\x8de\xef\r\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xd9\x10;\xb6\xb6zU\xa7\xfe\xce-\x1a\xf6-E|!x\x94m\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd9\x13\xf0w\x19Iu\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xd9D\u0226\x9f\xf2\xca\x12Ii\f\x12)\xc7\x19/6%\x10b\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xd9JW\x88*Rs\x9b\xbe*\x06G\xc8\f$\xf5\x8a+O\x1c\x89H\xb5N*\xdb\xe1+\x00\x00\xe0\x94\xd9SB\x95<\x8a!\xe8\xb65\xee\xfa\u01c1\x9b\xea0\xf1pG\x8a\x13\xf0l\u007f\xfe\xf0]@\x00\x00\u07d4\xd9\\\x90\xff\xbeT\x84\x86G\x80\xb8gIJ\x83\u0212V\xd6\xe4\x89X\xe7\x92n\xe8X\xa0\x00\x00\u07d4\xd9g\x11T\x0e.\x99\x83C\xd4\xf5\x90\xb6\xfc\x8f\xac;\xb8\xb3\x1d\x89_Z@h\xb7\x1c\xb0\x00\x00\u07d4\xd9j\xc2Pt\t\u01e3\x83\xab.\xee\x18\"\xa5\xd78\xb3kV\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xd9m\xb3;{Z\x95\f>\xfa-\xc3\x1b\x10\xba\x10\xa52\uf1c9lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xd9wYe\xb7\x16Gfu\xa8\xd5\x13\xeb\x14\xbb\xf7\xb0|\xd1J\x8a\x01\x13.m-#\xc5\xe4\x00\x00\u07d4\xd9{\xc8J\xbdG\xc0[\xbfE{.\xf6Y\xd6\x1c\xa5\xe5\u43c9\x06\x9d\x17\x11\x9d\u0168\x00\x00\u07d4\xd9\u007fE&\u07a9\xb1c\xf8\xe8\xe3:k\u03d2\xfb\x90}\xe6\xec\x89\x0feJ\xafM\xb2\xf0\x00\x00\u07d4\xd9\u007f\xe6\xf5?*X\xf6\xd7mu*\xdft\xa8\xa2\xc1\x8e\x90t\x89\x10\xcd\xf9\xb6\x9aCW\x00\x00\u07d4\u0659\x99\xa2I\r\x94\x94\xa50\xca\xe4\xda\xf3\x85T\xf4\xddc>\x89\x06\x81U\xa46v\xe0\x00\x00\u07d4\u065d\xf7B\x1b\x93\x82\xe4,\x89\xb0\x06\xc7\xf0\x87p*\aW\xc0\x89\x1a\x05V\x90\xd9\u06c0\x00\x00\xe0\x94\u0677\x83\xd3\x1d2\xad\xc5\x0f\xa3\xea\u02a1]\x92\xb5h\xea\xebG\x8a\a3\xaf\x907L\x1b(\x00\x00\u07d4\xd9\xd3p\xfe\xc65v\xab\x15\xb3\x18\xbf\x9eX6M\u00a3U*\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\xd9\xd4/\xd1>\xbdK\xf6\x9c\xac^\x9c~\x82H:\xb4m\xd7\xe9\x8a\x01!\xeah\xc1\x14\xe5\x10\x00\x00\u07d4\xd9\xe2~\xb0}\xfcq\xa7\x06\x06\f\u007f\a\x928\u0293\xe8\x859\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xd9\xe3\x85~\xfd\x1e *D\x17p\xa7w\xa4\x9d\xccE\xe2\xe0\u04c9\f\x1d\xaf\x81\u0623\xce\x00\x00\u07d4\xd9\xec.\xfe\x99\xff\\\xf0\r\x03\xa81{\x92\xa2J\xefD\x1f~\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xd9\xec\x8f\xe6\x9bw\x16\xc0\x86Z\xf8\x88\xa1\x1b+\x12\xf7 \xed3\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xd9\xf1\xb2d\b\xf0\xecg\xad\x1d\ro\xe2.\x85\x15\xe1t\x06$\x89\x01M\x11 \u05f1`\x00\x00\u07d4\xd9\xf5G\xf2\xc1\xde\x0e\u064aS\xd1a\xdfWc]\xd2\x1a\x00\xbd\x89\x05V\xf6L\x1f\xe7\xfa\x00\x00\u07d4\xd9\xff\x11]\x01&l\x9fs\xb0c\xc1\xc28\xef5e\xe6;6\x89$\xdc\xe5M4\xa1\xa0\x00\x00\u07d4\xda\x06\x04N)#&\xffil\x0091h\xceF\xff\xac9\xec\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xda*\x14\xf9r@\x15\u05d0\x14\xed\x8eY\th\x1dYaH\xf1\x89\x02\xa1\x0f\x0f\x8a\x91\xab\x80\x00\u07d4\xda*\u054ew\xde\xdd\xed\xe2\x18vF\xc4e\x94Z\x8d\xc3\xf6A\x89#\xc7W\a+\x8d\xd0\x00\x00\u07d4\xda0\x17\xc1P\xdd\r\xce\u007f\u03c8\x1b\nH\xd0\xd1\xc7V\xc4\u01c9\x05k\xf9\x1b\x1ae\xeb\x00\x00\u07d4\xda4\xb2\xea\xe3\v\xaf\xe8\xda\xec\xcd\xe8\x19\xa7\x94\u0349\xe0\x95I\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xdaJ_U\u007f;\xab9\n\x92\xf4\x9b\x9b\x90\n\xf3\fF\xae\x80\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xdaPU7S\u007f\xfb3\xc4\x15\xfe\xc6Ni\xba\xe0\x90\xc5\xf6\x0f\x89\b\xacr0H\x9e\x80\x00\x00\u07d4\xdai\x8dd\xc6\\\u007f+,rS\x05\x9c\xd3\u0441\u0619\xb6\xb7\x89\x10\x04\xe2\xe4_\xb7\xee\x00\x00\u07d4\xdaw2\xf0/.'.\xaf(\u07d7.\xcc\r\xde\xed\x9c\xf4\x98\x89\v \xbf\xbfig\x89\x00\x00\u07d4\xdaz\xd0%\xeb\xde%\xd2\"C\u02c3\x0e\xa1\xd3\xf6JVc#\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\u0685]SG\u007fP^\xc4\xc8\xd5\u8ed1\x80\u04c6\x81\x11\x9c\x8a\x01/\x93\x9c\x99\xed\xab\x80\x00\x00\u07d4\u0687^N/<\xab\xe4\xf3~\x0e\xae\xd7\xd1\xf6\xdc\xc6\xff\xefC\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u068b\xbe\xe1\x82\xe4U\xd2\t\x8a\xcb3\x8amE\xb4\xb1~\u0636\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u0698.\x96C\xff\xec\xe7#\aZ@\xfewnZ\xce\x04\xb2\x9b\x89\b\xb8\xb6\u0259\x9b\xf2\x00\x00\u07d4\u069fUF\tF\u05ff\xb5p\xdd\xecu|\xa5w;XB\x9a\x89\x1b\x84]v\x9e\xb4H\x00\x00\u07d4\u06a1\xbdz\x91H\xfb\x86\\\xd6\x12\xdd5\xf1b\x86\x1d\x0f;\u0709\xa68\xabr\xd9,\x13\x80\x00\xe0\x94\u06a6<\xbd\xa4]\u0507\xa3\xf1\xcdJtj\x01\xbb^\x06\v\x90\x8a\x01\x04\x16\u0670*\x89$\x00\x00\u07d4\u06a7v\xa6uDi\u05f9&z\x89\xb8g%\xe7@\xda\x0f\xa0\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\u06ac\x91\xc1\xe8Y\xd5\xe5~\xd3\bKP \x0f\x97f\xe2\xc5+\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\u06ac\xda\xf4\"&\xd1\\\xb1\u03d8\xfa\x15\x04\x8c\u007fL\xee\xfei\x89\x10CV\x1a\x88)0\x00\x00\xe0\x94\u06b6\xbc\u06c3\xcf$\xa0\xae\x1c\xb2\x1b;[\x83\xc2\xf3\x82I'\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\u07d4\u06bb\b\x89\xfc\x04)&\xb0^\xf5{% \x91\n\xbcKAI\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u06bc\"PB\xa6Y,\xfa\x13\xeb\xe5N\xfaA\x04\bx\xa5\xa2\x89\x0e\x11\xfa\xd5\xd8\\\xa3\x00\x00\u07d4\xda\xc0\xc1w\xf1\x1c\\>>x\xf2\xef\xd6c\xd12!H\x85t\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xda\xd16\xb8\x81x\xb4\x83zlx\x0f\xeb\xa2&\xb9\x85i\xa9L\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xda\xdb\xfa\xfd\x8bb\xb9*$\xef\xd7RV\u0743\xab\xdb\u05fb\u06c9\x01\x11du\x9f\xfb2\x00\x00\u07d4\xda\xdc\x00\xaby'`\xaa4\x15i\xfa\x9f\xf5\x98&\x84\x85JJ2\x8an\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xda\xe7 \x1e\xab\x8c\x063\x02\x93\ri9)\xd0\u007f\x95\xe7\x19b\x89\x91\xae\xc0(\xb4\x19\x81\x00\x00\u07d4\xda\xed\u052d\x10{'\x1e\x89Hl\xbf\x80\xeb\xd6!\u0757Ex\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xdb\x04\xfa\xd9\u011f\x9e\x88\v\xeb\x8f\xcf\x1d:8\x90\u4cc4o\x89CZ\xe6\xcc\fX\xe5\x00\x00\u07d4\xdb\f\u01cft\u0642{\u070ads'n\xb8O\u0717b\x12\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xdb\x12\x93\xa5\x06\xe9\f\xad*Y\xe1\xb8V\x1f^f\x96\x1ag\x88\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xdb\x19\xa3\x98\"06\x8f\x01w!\x9c\xb1\f\xb2Y\u0372%|\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xdb#\xa6\xfe\xf1\xaf{X\x1ew,\xf9\x18\x82\u07b2Qo\xc0\xa7\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xdb$O\x97\xd9\xc4K\x15\x8a@\xed\x96\x06\xd9\xf7\xbd8\x9131\x89\x05\x87\x88\u02d4\xb1\xd8\x00\x00\u07d4\xdb(\x8f\x80\xff\xe22\u00baG\u0314\xc7c\xcfo\u0278+\r\x89\x04\x9b\x9c\xa9\xa6\x944\x00\x00\u07d4\xdb*\f\x9a\xb6M\xf5\x8d\u07f1\u06ec\xf8\xba\r\x89\xc8[1\xb4\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xdb4t^\u0785v\xb4\x99\xdb\x01\xbe\xb7\xc1\xec\u0685\xcfJ\xbe\x89\x04V9\x18$O@\x00\x00\u07d4\xdb?%\x8a\xb2\xa3\xc2\xcf3\x9cD\x99\xf7ZK\xd1\xd3G.\x9e\x89QP\xae\x84\xa8\xcd\xf0\x00\x00\u07d4\xdbK\xc8;\x0ek\xaa\xdb\x11V\xc5\xcf\x06\xe0\xf7!\x80\x8cR\u01c9/\xb4t\t\x8fg\xc0\x00\x00\u07d4\xdbc\x12-\xe7\x03}\xa4\x97\x151\xfa\u9bc5\x86x\x86\u0192\x89\x0f\x04%\xb0d\x1f4\x00\x00\u07d4\xdbl*s\xda\xc7BJ\xb0\xd01\xb6ga\x12%f\xc0\x10C\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xdbnV\f\x9b\xc6 \u053e\xa3\xa9MG\xf7\x88\v\xf4\u007f-_\x89\x04\xda\x0f\xdf\xcf\x05v\x00\x00\u07d4\xdbo\xf7\x1b=\xb0\x92\x8f\x83\x9e\x05\xa72;\xfbW\u049c\x87\xaa\x891T\xc9r\x9d\x05x\x00\x00\u07d4\xdbsF\vY\xd8\xe8PE\xd5\xe7R\xe6%Y\x87^BP.\x8963\x03\"\xd5#\x8c\x00\x00\u07d4\xdbw\xb8\x8d\xcbq/\xd1~\xe9\x1a[\x94t\x8dr\f\x90\xa9\x94\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xdb}@7\b\x1fle\xf9Gk\x06\x87\xd9\u007f\x1e\x04M\n\x1d\x89#\xc7W\a+\x8d\xd0\x00\x00\xe0\x94\u06c8.\xac\xed\xd0\xef\xf2cQ\x1b1*\u06fcY\u01b8\xb2[\x8a\x01\xedO\xdez\"6\xb0\x00\x00\u07d4\u06d3q\xb3\fL\x84NY\xe0>\x92K\xe6\x06\xa98\xd1\xd3\x10\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u06e4ym\f\xebM:\x83k\x84\xc9o\x91\n\xfc\x10?[\xa0\x89\t\b\xf4\x93\xf77A\x00\x00\u07d4\u06ed\xc6\x1e\xd5\xf0F\n\u007f\x18\xe5\x1b/\xb2aM\x92d\xa0\xe0\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\u06f6\xacH@'\x04\x16B\xbb\xfd\x8d\x80\xf9\xd0\xc1\xcf3\xc1\xeb\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u06fc\xbby\xbfG\x9aB\xadq\xdb\u02b7{Z\u07ea\x87,X\x89]\u0212\xaa\x111\xc8\x00\x00\u07d4\xdb\xc1\xce\x0eI\xb1\xa7\x05\xd2. 7\xae\xc8x\xee\ru\xc7\x03\x89\r\x8drkqw\xa8\x00\x00\u07d4\xdb\xc1\xd0\xee+\xabS\x11@\xde\x13w\"\xcd6\xbd\xb4\xe4q\x94\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xdb\u015e\u0609s\u07ad1\b\x84\":\xf4\x97c\xc0P0\xf1\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u0794\xdb\xc6ie\xe4&\xff\x1a\xc8z\xd6\xebx\xc1\xd9Rq\x15\x8f\x9f\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\xdb\xcb\xcdzW\ua7724\x9b\x87\x8a\xf3K\x1a\xd6B\xa7\xf1\u0449\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xdb\xd5\x1c\xdf,;\xfa\xcd\xff\x10b!\xde.\x19\xadmB\x04\x14\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4\xdb\xd7\x1e\xfaK\x93\u0209\xe7e\x93\xde`\x9c;\x04\u02ef\xbe\b\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xdb\xf5\xf0a\xa0\xf4\x8e^ia\x879\xa7}.\xc1\x97h\xd2\x01\x89\b=lz\xabc`\x00\x00\u07d4\xdb\xf8\xb19g\xf5Q%'-\xe0V%6\xc4P\xbaVU\xa0\x89n\xf5x\xf0n\f\xcb\x00\x00\u07d4\xdb\xfb\x1b\xb4d\xb8\xa5\x8eP\r.\xd8\u0797,E\xf5\xf1\xc0\xfb\x89V\xbcu\xe2\xd61\x00\x00\x00\xe0\x94\xdc\x06~\xd3\xe1-q\x1e\xd4u\xf5\x15n\xf7\xe7\x1a\x80\xd94\xb9\x8a\x02\x05\xb4\u07e1\xeetx\x00\x00\u07d4\xdc\b\u007f\x93\x90\xfb\x9e\x97j\xc2:\xb6\x89TJ\tB\xec !\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\xdc\x1e\xb9\xb6\xe6CQ\xf5d$P\x96E\xf8>y\xee\xe7l\xf4\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xdc\x1f\x19ya_\b!@\xb8\xbbx\xc6{'\xa1\x94'\x13\xb1\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4\xdc#\xb2`\xfc\xc2n}\x10\xf4\xbd\x04J\xf7\x94W\x94`\xd9\u0689\x1b\x1bk\u05efd\xc7\x00\x00\u07d4\xdc)\x11\x97E\xd23s \xdaQ\xe1\x91\x00\xc9H\u0640\xb9\x15\x89\b\xacr0H\x9e\x80\x00\x00\u07d4\xdc-\x15\xa6\x9fk\xb3;$j\xef@E\aQ\xc2\xf6uj\u0489l4\x10\x80\xbd\x1f\xb0\x00\x00\u07d4\xdc=\xaeY\xed\x0f\xe1\x8bXQ\x1eo\xe2\xfbi\xb2\x19h\x94#\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\xdc?\x0evr\xf7\x1f\xe7R[\xa3\v\x97U\x18: \xb9\x16j\x8a\x02\b\x9c\xf5{[>\x96\x80\x00\xe0\x94\xdcCE\u0581.\x87\n\xe9\fV\x8cg\xd2\xc5g\u03f4\xf0<\x8a\x01k5-\xa5\xe0\xed0\x00\x00\u07d4\xdcD'[\x17\x15\xba\xea\x1b\x03EsZ)\xacB\xc9\xf5\x1bO\x89?\x19\xbe\xb8\xdd\x1a\xb0\x00\x00\u07d4\xdcF\xc13%\u034e\xdf\x020\xd0h\x89d\x86\xf0\a\xbfN\xf1\x89Hz\x9a0E9D\x00\x00\u07d4\xdcQ\xb2\u071d$z\x1d\x0e[\xc3l\xa3\x15oz\xf2\x1f\xf9\xf6\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xdcS\x05\xb4\x02\n\x06\xb4\x9de||\xa3L5\xc9\x1c_,V\x8a\x01}\xf6\xc1\r\xbe\xba\x97\x00\x00\u07d4\xdcW4[8\xe0\xf0g\u0263\x1d\x9d\xea\xc5'Z\x10\x94\x93!\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xdcWG}\xaf\xa4/p\\\u007f\xe4\x0e\xae\x9c\x81un\x02%\xf1\x89\x1b\x1b\x81(\xa7An\x00\x00\u07d4\xdc_Z\xd6c\xa6\xf2c2}d\xca\xc9\xcb\x13=,\x96\x05\x97\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xdcp:_7\x94\xc8Ml\xb3TI\x18\xca\xe1J5\u00fdO\x89dI\xe8NG\xa8\xa8\x00\x00\xe0\x94\xdcs\x8f\xb2\x17\u03ad/iYL\b\x17\r\xe1\xaf\x10\xc4\x19\xe3\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xdcv\xe8[\xa5\v\x9b1\xec\x1e& \xbc\xe6\xe7\xc8\x05\x8c\x0e\xaf\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\u0703\xb6\xfd\rQ!1 G\a\xea\xf7.\xa0\xc8\u027e\xf9v\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u070c)\x12\xf0\x84\xa6\u0444\xaasc\x85\x13\u033c2n\x01\x02\x89F3\xbc6\xcb\xc2\xdc\x00\x00\u07d4\u0711\x1c\xf7\xdc]\u04016Vg\x05(\xe93\x8eg\x03G\x86\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u0730;\xfal\x111#NV\xb7\xea|Or\x14\x87Tkz\x89Hz\x9a0E9D\x00\x00\xe0\x94\u0736M\xf47X\xc7\u03d7O\xa6`HO\xbbq\x8f\x8cg\xc1\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xdc\xc5-\x8f\x8d\x9f\xc7B\xa8\xb8'g\xf0US\x87\xc5c\xef\xff\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xdc\xcb7\x0e\u058a\xa9\"(0C\xef|\xad\x1b\x9d@?\xc3J\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xdc\u0324 E\xec>\x16P\x8b`?\xd96\xe7\xfd}\xe5\xf3j\x89\x01\x11du\x9f\xfb2\x00\x00\u07d4\xdc\xd1\fU\xbb\x85OuD4\xf1!\x9c,\x9a\x98\xac\xe7\x9f\x03\x89\xd8\xd8X?\xa2\xd5/\x00\x00\u07d4\xdc\u057c\xa2\x00S\x95\xb6u\xfd\xe5\x03VY\xb2k\xfe\xfcI\xee\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\xdc\u06fdN&\x04\xe4\x0e\x17\x10\xccg0(\x9d\xcc\xfa\u04c9-\x89\xf9]\xd2\xec'\xcc\xe0\x00\x00\u07d4\xdc\xe3\f1\xf3\xcafr\x1e\xcb!<\x80\x9a\xabV\x1d\x9bR\xe4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xdc\xf39eS\x13\x80\x161h\xfc\x11\xf6~\x89\xc6\xf1\xbc\x17\x8a\x89\x12'v\x854\x06\xb0\x80\x00\u07d4\xdc\xf6\xb6W&n\x91\xa4\xda\xe6\x03=\xda\xc1S2\u074d+4\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4\xdc\xf9q\x9b\xe8|oFum\xb4\x89\x1d\xb9\xb6\x11\xd2F\x9cP\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xdc\xff\xf3\xe8\xd2<*4\xb5k\u0473\xbdE\u01d3tC\"9\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xdd\x04\xee\xe7N\v\xf3\f?\x8dl,\u007fR\xe0Q\x92\x10\u07d3\x89\x04V9\x18$O@\x00\x00\xe0\x94\xdd&\xb4)\xfdC\xd8N\xc1y\x82S$\xba\u057f\xb9\x16\xb3`\x8a\x01\x16\xbf\x95\xbc\x842\x98\x00\x00\u07d4\xdd*#:\xde\xdef\xfe\x11&\xd6\xc1h#\xb6*\x02\x1f\xed\u06c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xdd+\u07e9\x17\xc1\xf3\x10\xe6\xfa5\xaa\x8a\xf1i9\xc23\xcd}\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xdd5\xcf\xdb\u02d93\x95Sz\xec\xc9\xf5\x90\x85\xa8\xd5\u0776\xf5\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xddG\x18\x9a>d9qg\xf0b\x0eHEe\xb7b\xbf\xbb\xf4\x89dI\xe8NG\xa8\xa8\x00\x00\u07d4\xddM\xd6\xd3`3\xb0co\u030d\t8`\x9fM\xd6OJ\x86\x89\x03@\xaa\xd2\x1b;p\x00\x00\u07d4\xddO_\xa2\x11\x1d\xb6\x8fk\xde5\x89\xb60)9[i\xa9-\x89\b\x96=\xd8\xc2\xc5\xe0\x00\x00\xe0\x94\xddc\x04/%\xed2\x88J\xd2n:\xd9Y\xeb\x94\xea6\xbfg\x8a\x04\x84\xd7\xfd\xe7\u0553\xf0\x00\x00\u07d4\xdde\xf6\xe1qc\xb5\xd2\x03d\x1fQ\xcc{$\xb0\x0f\x02\xc8\xfb\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xddl\x06!\x93\xea\xc2=/\xdb\xf9\x97\xd5\x06:4k\xb3\xb4p\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xdd{\u0366Y$\xaa\xa4\x9b\x80\x98J\xe1su\x02X\xb9(G\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xdd\u007f\xf4A\xbao\xfe6q\xf3\xc0\u06bb\xff\x18#\xa5\x043p\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u0742T\x12\x1an\x94/\xc9\b(\xf2C\x1fQ\x1d\xad\u007f2\u6263\x9b)\xe1\xf3`\xe8\x00\x00\xe0\x94\u074a\xf9\xe7vR#\xf4DoD\xd3\xd5\t\x81\x9a==\xb4\x11\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\u0755\xdb\xe3\x0f\x1f\x18w\xc5\xddv\x84\xae\xef0*\xb6\x88Q\x92\x8a\x01\xc5\xd8\xd6\xeb>2P\x00\x00\xe0\x94\u0756|L_\x8a\xe4~&o\xb4\x16\xaa\u0456N\xe3\xe7\xe8\u00ca\x01\xa4 \xdb\x02\xbd}X\x00\x00\u07d4\u075bHZ;\x1c\xd3:j\x9cb\xf1\xe5\xbe\xe9'\x01\x85m%\x89\f3\x83\xed\x03\x1b~\x80\x00\xe0\x94\u0763q\xe6\x00\xd3\x06\x88\xd4q\x0e\b\x8e\x02\xfd\xf2\xb9RM_\x8a\x01w\"J\xa8D\xc7 \x00\x00\u07d4\u0764\xed*X\xa8\xdd \xa72u4{X\rq\xb9[\xf9\x9a\x89\x15\xa1<\xc2\x01\xe4\xdc\x00\x00\xe0\x94\u0764\xff}\xe4\x91\u0187\xdfEt\xdd\x1b\x17\xff\x8f$k\xa3\u044a\x04&\x84\xa4\x1a\xbf\xd8@\x00\x00\u07d4\u076bkQ\xa9\x03\v@\xfb\x95\xcf\vt\x8a\x05\x9c$\x17\xbe\u01c9lk\x93[\x8b\xbd@\x00\x00\xe0\x94\u076bu\xfb/\xf9\xfe\u02c8\xf8\x94vh\x8e+\x00\xe3g\xeb\xf9\x8a\x04\x1b\xad\x15^e\x12 \x00\x00\xe0\x94\u076b\xf1<<\x8e\xa4\xe3\xd7=x\xecqz\xfa\xfaC\x0eTy\x8a\b\xcf#\xf9\t\xc0\xfa\x00\x00\x00\u07d4\u076c1*\x96UBj\x9c\f\x9e\xfa?\xd8%Y\xefE\x05\xbf\x89\x15\xbeat\xe1\x91.\x00\x00\u07d4\u076ck\xf4\xbb\xdd}Y}\x9chm\x06\x95Y;\xed\xcc\xc7\xfa\x89.\xe4IU\b\x98\xe4\x00\x00\xe0\x94\u077d+\x93,v;\xa5\xb1\xb7\xae;6.\xac>\x8d@\x12\x1a\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\u077d\xdd\x1b\xbd8\xff\xad\xe00]0\xf0 (\xd9.\x9f:\xa8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\u077e\xe6\xf0\x94\xea\xe64 \xb0\x03\xfbGW\x14*\xeal\xd0\xfd\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xdd\u059c[\x9b\xf5\xebZ9\xce\xe7\xc34\x1a\x12\r\x97?\xdb4\x89k\xc1K\x8f\x8e\x1b5\x00\x00\xe0\x94\xdd\xdd{\x9en\xab@\x9b\x92&:\xc2r\u0680\x1bfO\x8aW\x8ai\xe1\r\xe7fv\u0400\x00\x00\u07d4\xdd\xe6p\xd0\x169fuv\xa2-\xd0]2F\xd6\x1f\x06\xe0\x83\x89\x01s\x17\x90SM\xf2\x00\x00\xe0\x94\xdd\xe7zG@\xba\b\xe7\xf7?\xbe:\x16t\x91)1t.\xeb\x8a\x044\xfeMC\x82\xf1\u0500\x00\u07d4\xdd\xe8\xf0\xc3\x1bt\x15Q\x1d\xce\xd1\xcd}F2>K\xd1\"2\x89WG=\x05\u06ba\xe8\x00\x00\u07d4\xdd\xe9i\xae\xf3N\xa8z\u0099\xb7Y~)+J\x01U\u030a\x89\x102\xf2YJ\x01s\x80\x00\u07d4\xdd\xf0\xcc\xe1\xfe\x99m\x91v5\xf0\a\x12\xf4\x05 \x91\xdf\xf9\xea\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xdd\xf3\xadv58\x10\xbej\x89\xd71\xb7\x87\xf6\xf1q\x88a+\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\xdd\xf5\x81\n\x0e\xb2\xfb.22;\xb2\u0255\t\xab2\x0f$\xac\x8a\x03\xca\\f\u067cD0\x00\x00\xe0\x94\xdd\xf9\\\x1e\x99\xce/\x9fV\x98\x05|\x19\xd5\xc9@'\xeeJn\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u0794\xdd\xfa\xfd\xbc|\x90\xf12\x0eT\xb9\x8f7F\x17\xfb\xd0\x1d\x10\x9f\x88\xb9\x8b\xc8)\xa6\xf9\x00\x00\u07d4\xdd\xfc\xca\x13\xf94\xf0\u03fe#\x1d\xa109\xd7\x04u\xe6\xa1\u040968\"\x16`\xa5\xaa\x80\x00\u07d4\xde\x02~\xfb\xb3\x85\x03\"n\xd8q\t\x9c\xb3\v\xdb\x02\xaf\x135\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xde\x06\xd5\xeawzN\xb1G^`]\xbc\xbfCDN\x807\xea\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\u07d4\xde\a\xfb[zFN;\xa7\xfb\xe0\x9e\x9a\xcb'\x1a\xf53\x8cX\x89\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4\xde\x11!\x82\x9c\x9a\b(@\x87\xa4?\xbd/\xc1\x14*23\xb4\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xde\x17kR\x84\xbc\xee:\x83\x8b\xa2Og\xfc|\xbfg\u05ce\xf6\x89\x02\t\xce\b\xc9b\xb0\x00\x00\u07d4\xde!\"\x93\xf8\xf1\xd21\xfa\x10\xe6\tG\rQ,\xb8\xff\xc5\x12\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xde0\xe4\x9eZ\xb3\x13!M/\x01\u072b\u0389@\xb8\x1b\x1cv\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\xde3\xd7\b\xa3\xb8\x9e\x90\x9e\xafe;0\xfd\u00e5\xd5\u0334\xb3\x89\t\x9c\x88\"\x9f\xd4\xc2\x00\x00\u07d4\xde7B\x99\xc1\xd0}ySs\x85\x19\x0fD.\xf9\xca$\x06\x1f\x89\a?u\u0460\x85\xba\x00\x00\u07d4\xdeB\xfc\xd2L\xe4#\x93\x830CgY_\x06\x8f\fa\a@\x89\x02r*p\xf1\xa9\xa0\x00\x00\u07d4\xdeP\x86\x8e\xb7\xe3\xc7\x197\xecs\xfa\x89\u074b\x9e\xe1\rE\xaa\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xdeU\xde\x04X\xf8P\xb3~Mx\xa6A\xdd.\xb2\u074f8\u0389\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xde[\x00_\xe8\u06ae\x8d\x1f\x05\xde>\xda\x04 f\xc6\xc4i\x1c\x89;\xa1\x91\v\xf3A\xb0\x00\x00\u07d4\xdea-\a$\xe8N\xa4\xa7\xfe\xaa=!B\xbd^\xe8-2\x01\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xdem61\x06\xccb8\xd2\xf0\x92\xf0\xf07!6\xd1\xcdP\u018a\x01!\xeah\xc1\x14\xe5\x10\x00\x00\u07d4\xde}\xee\"\x0f\x04W\xa7\x18}V\xc1\xc4\x1f.\xb0\n\xc5`!\x89\"%\xf3\x9c\x85\x05*\x00\x00\u07d4\u0782\u030dJ\x1b\xb1\xd9CC\x92\x96[>\x80\xba\xd3\xc0=O\x89P\x18nu\u0797\xa6\x00\x00\u07d4\u0797\xf43\a\x00\xb4\x8cImC|\x91\xca\x1d\xe9\u0130\x1b\xa4\x89\x9d\xcc\x05\x15\xb5n\f\x00\x00\u07d4\u079e\xffLy\x88\x11\xd9h\xdc\xcbF\r\x9b\x06\x9c\xf3\x02x\xe0\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\u07b1\xbc4\xd8mJM\xde%\x80\u063e\xaf\aN\xb0\xe1\xa2D\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4\u07b2I]j\xca{*j-\x13\x8bn\x1aB\xe2\xdc1\x1f\u0749lk\x93[\x8b\xbd@\x00\x00\u07d4\u07b9rTGL\r/Zyp\xdc\xdb/R\xfb\x10\x98\xb8\x96\x8965\u026d\xc5\u07a0\x00\x00\u07d4\u07b9\xa4\x9aC\x870 \xf0u\x91\x85\xe2\v\xbbL\U000c1ecf\x89\vx\xed\xb0\xbf.^\x00\x00\u07d4\u07bb\u0743\x1e\x0f \xaen7\x82R\xde\xcd\xf9/|\xf0\xc6X\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xde\xc3\xee\xc2d\nu,Fn+~~\u616f\xe9\xacA\xf4\x89G\u0257SYk(\x80\x00\u07d4\xde\xc8#s\xad\xe8\xeb\xcf*\xcbo\x8b\xc2AM\u05eb\xb7\rw\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xde\u0221\xa8\x98\xf1\xb8\x95\xd80\x1f\xe6J\xb3\xad]\xe9A\xf6\x89\x89*\xb4\xf6~\x8as\x0f\x80\x00\u07d4\xde\u025e\x97/\xcaqwP\x8c\x8e\x1aG\xac\"\xd7h\xac\xab|\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xde\xd8w7\x84\a\xb9Nx\x1cN\xf4\xaf|\xfc[\xc2 \xb5\x16\x89\x141y\xd8i\x11\x02\x00\x00\u07d4\xde\xe9B\xd5\xca\xf5\xfa\xc1\x14!\xd8k\x01\vE\x8e\\9)\x90\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xde\xee&\x89\xfa\x90\x06\xb5\x9c\xf2\x85#}\xe5;:\u007f\xd0\x148\x89\x18ey\xf2\x9e %\x00\x00\u07d4\xde\xfd\xdf\u055b\x8d,\x15N\xec\xf5\xc7\xc1g\xbf\v\xa2\x90]>\x89\x05\x12\xcb^&GB\x00\x00\u07d4\xde\xfe\x91A\xf4pE\x99\x15\x9d{\"=\xe4+\xff\xd8\x04\x96\xb3\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xdf\t\x8f^N=\xff\xa5\x1a\xf27\xbd\xa8e,Os\ud726\x89\x1b6\xa6DJ>\x18\x00\x00\xe0\x94\xdf\r\ba{\xd2R\xa9\x11\u07cb\xd4\x1a9\xb8=\u07c0\x96s\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xdf\x0f\xf1\xf3\xd2z\x8e\xc9\xfb\x8fk\f\xb2T\xa6;\xba\x82$\xa5\x89\xec\xc5 )E\xd0\x02\x00\x00\u07d4\xdf\x1f\xa2\xe2\x0e1\x98^\xbe,\x0f\f\x93\xb5L\x0f\xb6z&K\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xdf!\x1c\xd2\x12\x88\xd6\xc5o\xaef\xc3\xffTb]\u0531T'\x89\x87\x86\xcdvN\x1f,\x00\x00\u07d4\xdf#k\xf6\xab\xf4\xf3)7\x95\xbf\f(q\x8f\x93\u3c73k\x89Hz\x9a0E9D\x00\x00\u07d4\xdf1\x02_VI\xd2\xc6\xee\xa4\x1e\u04fd\xd3G\x1ay\x0fu\x9a\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xdf7\xc2.`:\xed\xb6\nbrS\xc4}\x8b\xa8f\xf6\xd9r\x8a\x05\x15\n\xe8J\x8c\xdf\x00\x00\x00\u07d4\xdf;r\u017dq\u0501N\x88\xa6#!\xa9=@\x11\xe3W\x8b\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xdf?W\xb8\xeed4\xd0G\"=\xeft\xb2\x0fc\xf9\xe4\xf9U\x89\r\x94b\xc6\xcbKZ\x00\x00\u07d4\xdfD\xc4\u007f\xc3\x03\xacv\xe7O\x97\x19L\xcag\xb5\xbb<\x02?\x89 \t\xc5\u023fo\xdc\x00\x00\u07d4\xdfG\xa6\x1brSQ\x93\xc5a\xcc\xccu\xc3\xf3\xce\b\x04\xa2\x0e\x89\x15\x93\\\vN=x\x00\x00\u07d4\xdfG\xa8\xef\x95\xf2\xf4\x9f\x8eoX\x18AT\x14]\x11\xf7'\x97\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4\xdfS\x003F\xd6\\^zdk\xc04\xf2\xb7\xd3/\xcb\xe5j\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xdfW5:\xaf\xf2\xaa\xdb\n\x04\xf9\x01N\x8d\xa7\x88N\x86X\x9c\x89\bH\x86\xa6nO\xb0\x00\x00\u07d4\xdf`\xf1\x8c\x81*\x11\xedN'v\xe7\xa8\x0e\xcf^S\x05\xb3\u05890\xca\x02O\x98{\x90\x00\x00\u07d4\xdfd\x85\xc4)z\xc1R\xb2\x89\xb1\x9d\xde2\xc7~\xc4\x17\xf4}\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xdff\n\x91\u06b9\xf70\xf6\x19\rP\xc89\x05aP\aV\u0289lk\x93[\x8b\xbd@\x00\x00\u07d4\xdfn\xd6\x00jj\xbe\x88n\xd3=\x95\xa4\xde(\xfc\x12\x189'\x891T\xc9r\x9d\x05x\x00\x00\u07d4\u07c5\x10y>\xee\x81\x1c-\xab\x1c\x93\xc6\xf4G?0\xfb\xef[\x8965\u026d\xc5\u07a0\x00\x00\u07d4\u07cdH\xb1\xeb\a\xb3\xc2\x17y\x0el-\xf0M\xc3\x19\xe7\xe8H\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\u07e6\xb8\xb8\xad1\x84\xe3W\xda()Q\u05d1a\u03f0\x89\xbc\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94\u07ef1\xe6\"\xc0=\x9e\x18\xa0\u0778\xbe`\xfb\xe3\xe6a\xbe\n\x8a\x02\x1e\x17\x1a>\xc9\xf7,\x00\x00\u07d4\u07f1bn\xf4\x8a\x1d}uR\xa5\xe0)\x8f\x1f\xc2:;H-\x89\\\xe8\x95\u0754\x9e\xfa\x00\x00\xe0\x94\u07f4\u052d\xe5/\u0301\x8a\xccz,k\xb2\xb0\x02$e\x8fx\x8a\x01\xa4 \xdb\x02\xbd}X\x00\x00\u07d4\u07fdB2\xc1|@z\x98\r\xb8\u007f\xfb\u036060\xe5\xc4Y\x89\x1d\xfc\u007f\x92I#S\x00\x00\u07d4\xdf\xcb\xdf\tEN\x1a^J@\xd3\xee\xf7\xc5\xcf\x1c\xd3\u0794\x86\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xdf\xdb\xce\xc1\x01K\x96\xda!X\xcaQ>\x9c\x8d;\x9a\xf1\xc3\u0409lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xdf\xde\xd2WK'\xd1a:}\x98\xb7\x15\x15\x9b\r\x00\xba\xab(\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xdf\xdfC9P\x8b\x0fnZ\xb1\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xe0\x06\x04b\xc4\u007f\xf9g\x9b\xae\xf0qY\xca\xe0\x8c)\xf2t\xa9\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe0\r\x15;\x106\x91C\xf9\u007fT\xb8\xd4\xca\"\x9e\xb3\xe8\xf3$\x89\b=lz\xabc`\x00\x00\u07d4\xe0\x12\xdbE8'\xa5\x8e\x16\xc16V\b\xd3n\xd6Xr\x05\a\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe0\x15G\xbaB\xfc\xaf\xaf\x93\x93\x8b\xec\xf7i\x9ft)\n\xf7O\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe0\x16\xdc\x13\x8e%\x81[\x90\xbe?\xe9\xee\xe8\xff\xb2\xe1\x05bO\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xe0\x18Y\xf2B\xf1\xa0\xec`/\xa8\xa3\xb0\xb5v@\xec\x89\a^\x89\x1e\x16,\x17{\xe5\xcc\x00\x00\xe0\x94\xe0 \xe8cb\xb4\x87u(6\xa6\xde\v\xc0,\xd8\u061a\x8bj\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xe0#\xf0\x9b(\x87a,|\x9c\xf1\x98\x8e::`+3\x94\u0249lk\x93[\x8b\xbd@\x00\x00\u07d4\xe0'\"\x13\xe8\xd2\xfd>\x96\xbdb\x17\xb2KK\xa0\x1bapy\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xe0+t\xa4v(\xbe1[\x1fv\xb3\x15\x05J\xd4J\xe9qo\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\xe02 \u0197\xbc\u048f&\xef\vt@J\x8b\xeb\x06\xb2\xba{\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xe05/\u07c1\x9b\xa2e\xf1L\x06\xa61\\J\xc1\xfe\x13\x1b.\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xe08\x8a\xed\xdd?\xe2\xadV\xf8WH\xe8\x0eq\n4\xb7\xc9.\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xe0<\x00\xd0\x03\x88\xec\xbfO&=\n\xc7x\xbbA\xa5z@\u064966\xc9yd6t\x00\x00\u07d4\xe0I \xdcn\xcc\x1dn\xcc\bO\x88\xaa\n\xf5\u06d7\xbf\x89:\x89\t\xdd\xc1\xe3\xb9\x01\x18\x00\x00\u07d4\xe0Ir\xa8<\xa4\x11+\xc8q\xc7-J\xe1al/\a(\u06c9\x0e\x81\xc7\u007f)\xa3/\x00\x00\u07d4\xe0O\xf5\xe5\xa7\u2bd9]\x88W\xce\x02\x90\xb5:+\x0e\xda]\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xe0P)\xac\xeb\axg[\xef\x17A\xab,\u0493\x1e\xf7\xc8K\x8a\x01\x0f\r\xba\xe6\x10\tR\x80\x00\u07d4\xe0V\xbf?\xf4\x1c&%o\xefQqf\x12\xb9\u04da\u0799\x9c\x89\x05k\xe7W\xa1.\n\x80\x00\u07d4\xe0a\xa4\xf2\xfcw\xb2\x96\u045a\xda#\x8eI\xa5\u02ce\xcb\xfap\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xe0f>\x8c\xd6g\x92\xa6A\xf5nP\x03f\x01G\x88\x0f\x01\x8e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe0f\x8f\xa8,\x14\xd6\xe8\xd9:S\x11>\xf2\x86/\xa8\x15\x81\xbc\x89//9\xfclT\x00\x00\x00\u07d4\xe0i\xc0\x173R\xb1\v\xf6\x83G\x19\xdb[\xed\x01\xad\xf9{\xbc\x89\x01\x064\xf8\xe52;\x00\x00\u07d4\xe0l)\xa8\x15\x17\xe0\u0507\xb6\u007f\xb0\xb6\xaa\xbcOW6\x83\x88\x89\x15\xbeat\xe1\x91.\x00\x00\u07d4\xe0l\xb6)G\x04\xee\xa7C|/\xc3\xd3\as\xb7\xbf8\x88\x9a\x89\x01\x16\xdc:\x89\x94\xb3\x00\x00\u07d4\xe0q7\xae\r\x11m\x0353\xc4\uad16\xf8\xa9\xfb\tV\x9c\x89K\xe4\xe7&{j\xe0\x00\x00\xe0\x94\xe0v\xdb0\xabHoy\x19N\xbb\xc4]\x8f\xab\x9a\x92B\xf6T\x8a\x01\x06`~4\x94\xba\xa0\x00\x00\u07d4\xe0~\xbb\xc7\xf4\xdaAnB\xc8\xd4\xf8B\xab\xa1b3\xc1%\x80\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe0\x81\xca\x1fH\x82\xdb`C\u0569\x19\a\x03\xfd\xe0\xab;\xf5m\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xe0\x83\xd3Hc\xe0\xe1\u007f\x92ky(\xed\xff1~\x99\x8e\x9cK\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xe0\x8b\x9a\xbak\xd9\u048b\xc2\x05gy\xd2\xfb\xf0\xf2\x85Z=\x9d\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe0\x8b\u009c+H\xb1i\xff+\xdc\x16qLXnl\xb8\\\u03c9\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xe0\x8c`11\x06\xe3\xf93O\xe6\xf7\xe7bM!\x110\xc0w\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\xe0\x9ch\xe6\x19\x98\xd9\xc8\x1b\x14\xe4\xee\x80+\xa7\xad\xf6\xd7L\u06c9\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xe0\x9f\xeauZ\xee\x1aD\xc0\xa8\x9f\x03\xb5\u07b7b\xba3\x00o\x89;\xa2\x89\xbc\x94O\xf7\x00\x00\xe0\x94\xe0\xa2T\xac\t\xb9r[\xeb\xc8\xe4`C\x1d\xd0s.\xbc\xab\xbf\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\xe0\xaai6UU\xb7?(#3\xd1\xe3\f\x1b\xbd\a(T\xe8\x8a\x01{x\x83\xc0i\x16`\x00\x00\u07d4\xe0\xba\u064e\ue598\xdb\xf6\xd7`\x85\xb7\x92=\xe5uN\x90m\x89\t\r\x97/22<\x00\x00\u07d4\xe0\u012b\x90r\xb4\xe6\xe3eJI\xf8\xa8\xdb\x02jK3\x86\xa9\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe0\u0380\xa4a\xb6H\xa5\x01\xfd\v\x82F\x90\u0206\x8b\x0eM\xe8\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xe0\xcfi\x8a\x053'\xeb\xd1k}w\x00\t/\xe2\xe8T$F\x89\x05*4\u02f6\x1fW\x80\x00\xe0\x94\xe0\xd21\xe1D\xec\x91\a8l|\x9b\x02\xf1p,\xea\xa4\xf7\x00\x8a\x01\x0f\r\xba\xe6\x10\tR\x80\x00\u07d4\xe0\xd7kqf\xb1\xf3\xa1+@\x91\xee+)\u078c\xaa}\a\u06c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xe0\xe0\xb2\xe2\x9d\xdes\xafu\x98~\xe4Dl\x82\x9a\x18\x9c\x95\xbc\x89\b\x13\xcaV\x90m4\x00\x00\xe0\x94\xe0\xe9xu=\x98/\u007f\x9d\x1d#\x8a\x18\xbdH\x89\xae\xfeE\x1b\x8a\x02\r\u058a\xaf2\x89\x10\x00\x00\u07d4\xe0\xf3r4|\x96\xb5_}C\x06\x03K\xeb\x83&o\xd9\tf\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xe0\xf9\x03\xc1\xe4\x8a\xc4!\xabHR\x8f=J&H\b\x0f\xe0C\x897\b\xba\xed=h\x90\x00\x00\u07d4\xe0\xff\v\xd9\x15D9\u0125\xb7#>)\x1d}\x86\x8a\xf5?3\x89\x15y!jQ\xbb\xfb\x00\x00\xe0\x94\xe1\n\xc1\x9cTo\xc2T|a\xc19\xf5\xd1\xf4Zff\u0570\x8a\x01\x02\xdao\xd0\xf7:<\x00\x00\xe0\x94\xe1\fT\x00\x88\x11?\xa6\xec\x00\xb4\xb2\u0202O\x87\x96\xe9n\u010a2\x0fE\t\xab\x1e\xc7\xc0\x00\x00\xe0\x94\xe1\x17:$})\xd8#\x8d\xf0\x92/M\xf2Z\x05\xf2\xafw\u00ca\bx\xc9]V\x0f0G\x80\x00\xe0\x94\xe1 >\xb3\xa7#\xe9\x9c\" \x11|\xa6\xaf\xebf\xfaBOa\x8a\x02\x00\uf49e2V\xfe\x00\x00\xe0\x94\xe11\xf8~\xfc^\xf0~C\xf0\xf2\xf4\xa7G\xb5Q\xd7P\xd9\xe6\x8a\x04<%\xe0\xdc\xc1\xbd\x1c\x00\x00\u07d4\xe13N\x99\x83y\xdf\xe9\x83\x17pby\x1b\x90\xf8\x0e\xe2-\x8d\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xe15@\xec\xee\x11\xb2\x12\xe8\xb7u\u070eq\xf3t\xaa\xe9\xb3\xf8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe1;=+\xbf\u073c\x87r\xa23\x15rL\x14%\x16|V\x88\x897\xf3y\x14\x1e\xd0K\x80\x00\u07d4\xe1D=\xbd\x95\xccA#\u007fa:HEi\x88\xa0Oh2\x82\x89\xd8\xd8X?\xa2\xd5/\x00\x00\u07d4\xe1F\x17\xf6\x02%\x01\xe9~{>-\x886\xaaa\xf0\xff-\xba\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xe1I\xb5rl\xafm^\xb5\xbf*\xccA\xd4\xe2\xdc2\x8d\u1089i*\xe8\x89p\x81\xd0\x00\x00\xe0\x94\xe1T\xda\xea\xdbTX8\xcb\u01aa\fUu\x19\x02\xf5(h*\x8a\x01\n\xfc\x1a\xde;N\xd4\x00\x00\u07d4\xe1l\xe3Ya\xcdt\xbdY\r\x04\u012dJ\x19\x89\xe0V\x91\u0189\a\xea(2uw\b\x00\x00\u07d4\xe1r\xdf\xc8\xf8\f\xd1\xf8\u03459\xdc&\b \x14\xf5\xa8\xe3\u8262\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xe1w\xe0\xc2\x01\xd35\xba9V\x92\x9cW\x15\x88\xb5\x1cR#\xae\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe1x\x12\xf6l^e\x94\x1e\x18lF\x92+n{/\x0e\xebF\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\xe1\x80\u079e\x86\xf5{\xaf\xac\u05d0O\x98&\xb6\xb4\xb2c7\xa3\x89-\x04\x1dpZ,`\x00\x00\xe0\x94\xe1\x92H\x9b\x85\xa9\x82\xc1\x882F\xd9\x15\xb2)\xcb\x13 \u007f8\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xe1\x95\xbb\xc6,{tD\x04\x0e\xb9\x96#\x96Ovg\xb3v\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xe2\x06\xfbs$\xe9\u07b7\x9e\x19\x904\x96\u0596\x1b\x9b\xe5f\x03\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xe2\aW\x8e\x1fM\u06cf\xf6\u0546{9X-q\xb9\x81*\u0149\xd2U\xd1\x12\xe1\x03\xa0\x00\x00\u07d4\xe2\b\x81*h@\x98\xf3\xdaN\xfej\xba%bV\xad\xfe?\xe6\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xe2\tT\xd0\xf4\x10\x8c\x82\xd4\u0732\x14\x8d&\xbb\xd9$\xf6\xdd$\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xe2\v\xb9\xf3\x96d\x19\xe1K\xbb\xaa\xaag\x89\xe9$\x96\u03e4y\x89\xbb\xd8%\x03\aRv\x00\x00\u07d4\xe2\r\x1b\xcbq(m\xc7\x12\x8a\x9f\xc7\xc6\xed\u007fs8\x92\xee\xf5\x896d\xf8\xe7\xc2J\xf4\x00\x00\u0794\xe2\x19\x12\x15\x98?3\xfd3\xe2,\u0522I\x00T\xdaS\xfd\u0708\xdbD\xe0I\xbb,\x00\x00\u07d4\xe2\x19\x8c\x8c\xa1\xb3\x99\xf7R\x15a\xfdS\x84\xa7\x13/\xbaHk\x897\b\xba\xed=h\x90\x00\x00\xe0\x94\xe2\x1cw\x8e\xf2\xa0\xd7\xf7Q\xea\x8c\aM\x1f\x81\"C\x86>N\x8a\x01\x1f\xc7\x0e,\x8c\x8a\xe1\x80\x00\xe0\x94\xe2)\xe7F\xa8?,\xe2S\xb0\xb0>\xb1G$\x11\xb5~W\x00\x8a\x016\x9f\xb9a(\xacH\x00\x00\u07d4\xe2+ \xc7x\x94F;\xafwL\xc2V\u057d\u06ff}\xdd\t\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xe20\xfe\x1b\xff\x03\x18m\x02\x19\xf1]LH\x1b}Y\xbe(j\x89\x01\xfdt\x1e\x80\x88\x97\x00\x00\u07d4\xe27\xba\xa4\xdb\u0252n2\xa3\xd8]\x12d@-T\xdb\x01/\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe2A\t\xbe/Q=\x87I\x8e\x92j(d\x99uO\x9e\u051e\x890\x0e\xa8\xad\x1f'\xca\x00\x00\u07d4\xe2Fh<\u025d\xb7\u0125+\u02ec\xaa\xb0\xb3/k\xfc\x93\u05c9lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xe2Z\x16{\x03\x1e\x84am\x0f\x01?1\xbd\xa9]\xcccP\xb9\x8a\x02\x8c*\xaa\u0243\xd0]\u0187st\xa8\xf4F\xee\xe9\x89\n\xb6@9\x12\x010\x00\x00\u07d4\xe2\x8b\x06\"Y\xe9n\xeb<\x8dA\x04\x94?\x9e\xb3%\x89<\xf5\x89Hz\x9a0E9D\x00\x00\xe0\x94\u237c\x8e\xfd^Ajv.\xc0\xe0\x18\x86K\xb9\xaa\x83({\x8a\x051\xf2\x00\xab>\x03\n\x80\x00\u07d4\xe2\x90K\x1a\xef\xa0V9\x8bb4\xcb5\x81\x12\x88\xd76\xdbg\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\u274a\xe4R\xdc\xf3\xb6\xacd^c\x04\t8UQ\xfa\xae\n\x89\x04Z\r\xa4\xad\xf5B\x00\x00\u07d4\xe2\xbb\xf8FA\xe3T\x1fl3\xe6\xedh:cZp\xbd\xe2\xec\x89\x1bA<\xfc\xbfY\xb7\x80\x00\u07d4\xe2\xcf6\n\xa22\x9e\xb7\x9d+\xf7\xca\x04\xa2z\x17\xc52\xe4\u0609\x05\x87\x88\u02d4\xb1\xd8\x00\x00\u07d4\xe2\xdf#\xf6\xea\x04\xbe\xcfJ\xb7\x01t\x8d\xc0\x961\x84U\\\u06c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xe2\xe1\\`\xdd8\x1e:K\xe2Pq\xab$\x9aL\\Rd\u0689\u007fk\u011b\x81\xb57\x00\x00\u07d4\xe2\xe2nN\x1d\xcf0\xd0H\xccn\u03ddQ\xec\x12\x05\xa4\xe9&\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\xe2\xeei\x1f#~\xe6R\x9beW\xf2\xfc\xdd=\xcf\fY\xecc\x8a\x01'r\x9c\x14h| \x00\x00\u07d4\xe2\xef\xa5\xfc\xa7\x958\xce`h\xbf1\xd2\xc5\x16\xd4\xd5<\b\xe5\x89\a\x1c\xc4\b\xdfc@\x00\x00\xe0\x94\xe2\xef\u0429\xbc@~\xce\x03\xd6~\x8e\xc8\xe9\u0483\xf4\x8d*I\x8a\x02\x99\xb3;\xf9\u0144\xe0\x00\x00\u07d4\xe2\xf4\r5\x8f^?\xe7F>\xc7\x04\x80\xbd.\u04d8\xa7\x06;\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xe2\xf98=X\x10\xea{C\x18+\x87\x04\xb6+'\xf5\x92]9\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xe2\xff\x9e\xe4\xb6\xec\xc1AA\xcct\xcaR\xa9\xe7\xa2\xee\x14\xd9\b\x89K\xe4\xe7&{j\xe0\x00\x00\xe0\x94\xe3\x02\x12\xb2\x01\x1b\xb5k\xdb\xf1\xbc5i\x0f:N\x0f\xd9\x05\xea\x8a\x01\xb2\u07dd!\x9fW\x98\x00\x00\u07d4\xe3\x03\x16\u007f=I`\xfe\x88\x1b2\x80\n+J\xef\xf1\xb0\x88\u0509lk\x93[\x8b\xbd@\x00\x00\u07d4\xe3\x04\xa3/\x05\xa87btJ\x95B\x97o\xf9\xb7#\xfa1\xea\x89Ur\xf2@\xa3F \x00\x00\u07d4\xe3\bCR\x04y7d\xf5\xfc\xbee\xebQ\x0fZtJeZ\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xe3\t\x97L\xe3\x9d`\xaa\xdf.ig2Q\xbf\x0e\x04v\n\x10\x89\r\xc5_\xdb\x17d{\x00\x00\u07d4\xe3\x1bN\xef\x18L$\xab\t\x8e6\xc8\x02qK\xd4t=\xd0\u0509\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xe3!\xbbJ\x94j\xda\xfd\xad\xe4W\x1f\xb1\\\x00C\u04de\xe3_\x89Udu8+L\x9e\x00\x00\u07d4\xe3&<\xe8\xafm\xb3\xe4gXE\x02\xedq\t\x12^\xae\"\xa5\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe3+\x1cG%\xa1\x87TI\u93d7\x0e\xb3\xe5@b\xd1X\x00\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xe3/\x95vmW\xb5\xcdK\x172\x89\u0587o\x9edU\x81\x94\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xe38@\u063c\xa7\u0698\xa6\xf3\u0416\xd8=\xe7\x8bp\xb7\x1e\xf8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe38\xe8Y\xfe.\x8c\x15UHH\xb7\\\xae\u0368w\xa0\xe82\x89a\xac\xff\x81\xa7\x8a\xd4\x00\x00\u07d4\xe3=\x98\x02 \xfa\xb2Y\xafj\x1fK8\xcf\x0e\xf3\xc6\xe2\xea\x1a\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xe3=\xf4\u0380\u0336*v\xb1+\xcd\xfc\xec\xc4b\x89\x97:\xa9\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\xe3?\xf9\x87T\x1d\xde\\\xde\u0a29m\xcc?3\xc3\xf2L\u008a*Z\x05\x8f\u0095\xed\x00\x00\x00\u07d4\xe3A\v\xb7U|\xf9\x1dy\xfai\xd0\xdf\xea\n\xa0u@&Q\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe3Ad-@\u04af\xce.\x91\a\xc6py\xacz&`\bl\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xe3TS\xee\xf2\xcc2\x89\x10CR\x8d\t\x84i\x80\x00\xe0\x94\xe5\x10\xd6y\u007f\xba=f\x93\x83Z\x84N\xa2\xadT\x06\x91\x97\x1b\x8a\x03\xae9\xd4s\x83\xe8t\x00\x00\u07d4\xe5\x14!\xf8\xee\"\x10\xc7\x1e\xd8p\xfea\x82v\u0215J\xfb\xe9\x89Hz\x9a0E9D\x00\x00\u07d4\xe5\x1e\xb8~\u007f\xb71\x1fR(\xc4y\xb4\x8e\u0247\x881\xacL\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe5!V1\xb1BH\xd4Z%R\x96\xbe\xd1\xfb\xfa\x030\xff5\x89G\x03\xe6\xebR\x91\xb8\x00\x00\xe0\x94\xe5(\xa0\xe5\xa2g\xd6g\xe99:e\x84\xe1\x9b4\u071b\xe9s\x8a\x01/\x93\x9c\x99\xed\xab\x80\x00\x00\u07d4\xe54%\xd8\xdf\x1f\x11\xc3A\xffX\xae_\x148\xab\xf1\xcaS\u03c9\x11t\xa5\xcd\xf8\x8b\xc8\x00\x00\u07d4\xe5No\x9c\xffV\xe1\x9cF\x1e\xb4T\xf9\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xe5A\x02SM\xe8\xf2>\xff\xb0\x93\xb3\x12B\xad;#?\xac\xfd\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xe5E\xee\x84\xeaH\xe5d\x16\x1e\x94\x82\u055b\xcf@j`,\xa2\x89dI\xe8NG\xa8\xa8\x00\x00\u07d4\xe5H\x1a\u007f\xedB\xb9\x01\xbb\xed x\x9b\u052d\xe5\r_\x83\xb9\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe5Y\xb5\xfd3{\x9cUr\xa9\xbf\x9e\x0f%!\xf7\xd4F\xdb\xe4\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xe5\\\x80R\n\x1b\x0fu[\x9a,\xd3\xce!Ov%e>\x8a\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xe5mC\x13$\xc9)\x11\xa1t\x9d\xf2\x92p\x9c\x14\xb7ze\u034a\x01\xbc\x85\xdc*\x89\xbb \x00\x00\u07d4\xe5})\x95\xb0\xeb\xdf?<\xa6\xc0\x15\xeb\x04&\r\xbb\x98\xb7\u0189lk\x93[\x8b\xbd@\x00\x00\u07d4\u51f1j\xbc\x8at\b\x1e6\x13\xe1CB\xc03u\xbf\bG\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe5\x89\xfav\x98M\xb5\xec@\x04\xb4n\u8954\x92\xc3\aD\u0389\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4\xe5\x8d\xd228\xeen\xa7\xc2\x13\x8d8]\xf5\x00\xc3%\xf3v\xbe\x89b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\xe5\x95?\xeaIq\x04\xef\x9a\xd2\xd4\xe5\x84\x1c'\x1f\a5\x19\u0089&)\xf6n\fS\x00\x00\x00\xe0\x94\u5587\x97F\x8e\xf7g\x10\x1bv\x1dC\x1f\xce\x14\xab\xff\u06f4\x8a\x01\xb3\xd9i\xfaA\x1c\xa0\x00\x00\u07d4\xe5\x97\xf0\x83\xa4i\xc4Y\x1c=+\x1d,w'\x87\xbe\xfe'\xb2\x89\x0f-\xc7\xd4\u007f\x15`\x00\x00\u07d4\xe5\x9b;\xd3\x00\x89?\x97#>\xf9G\xc4or\x17\xe3\x92\xf7\xe9\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xe5\xa3e4<\xc4\xeb\x1ew\x03h\xe1\xf1\x14Jw\xb82\xd7\xe0\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xe5\xa3\xd7\xeb\x13\xb1\\\x10\x01w#m\x1b\xeb0\xd1~\xe1T \x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe5\xaa\v\x83;\xb9\x16\xdc\x19\xa8\xddh?\x0e\xde$\x1d\x98\x8e\xba\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\u5def\x14i\x86\xc0\xff\x8f\x85\xd2.l\xc34\a}\x84\xe8$\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe5\xb8&\x19l\x0e\x1b\xc1\x11\x9b\x02\x1c\xf6\xd2Y\xa6\x10\u0256p\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xe5\xb9o\u026c\x03\xd4H\xc1a:\xc9\x1d\x15\x97\x81E\xdb\xdf\u0449\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\u5e40\u048e\xec\xe2\xc0o\xcal\x94s\x06\x8b7\u0526\xd6\xe9\x89%\xaf\u058c\xac+\x90\x00\x00\u07d4\u5eb4\xf0\xaf\u0629\u0463\x81\xb4Wa\xaa\x18\xf3\xd3\xcc\xe1\x05\x89Q\xbf\xd7\xc18x\xd1\x00\x00\u07d4\xe5\xbc\u020c;%on\xd5\xfeU\x0eJ\x18\x19\x8b\x943V\xad\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe5\xbd\xf3OL\xccH>L\xa50\xcc|\xf2\xbb\x18\xfe\xbe\x92\xb3\x89\x06\xd85\xa1\v\xbc\xd2\x00\x00\u07d4\xe5\u0713I\xcbR\xe1a\x19a\"\u03c7\xa3\x896\xe2\xc5\u007f4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe5\xe38\x00\xa1\xb2\xe9k\xde\x101c\n\x95\x9a\xa0\a\xf2nQ\x89Hz\x9a0E9D\x00\x00\u07d4\xe5\xe3~\x19@\x8f,\xfb\xec\x834\x9d\u0501S\xa4\xa7\x95\xa0\x8f\x89\u3bb5sr@\xa0\x00\x00\u07d4\xe5\xed\xc7>bo]4A\xa4U9\xb5\xf7\xa3\x98\u0153\xed\xf6\x89.\xe4IU\b\x98\xe4\x00\x00\u07d4\xe5\xed\xf8\x12?$\x03\xce\x1a\x02\x99\xbe\xcfz\xactM\a_#\x89\n\xdaUGK\x814\x00\x00\u07d4\xe5\xf8\xefm\x97\x066\xb0\u072aO \x0f\xfd\xc9\xe7Z\xf1t\x1c\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe5\xfb1\xa5\xca\xeej\x96\xde9;\xdb\xf8\x9f\xbee\xfe\x12[\xb3\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xe5\xfb\xe3I\x84\xb67\x19o3\x1cg\x9d\f\fG\xd84\x10\xe1\x89llD\xfeG\xec\x05\x00\x00\u07d4\xe6\tU\xdc\v\xc1V\xf6\xc4\x18I\xf6\xbdwk\xa4K\x0e\xf0\xa1\x89\x10C\x16'\xa0\x93;\x00\x00\u07d4\xe6\nU\xf2\u07d9m\u00ee\xdbil\b\xdd\xe09\xb2d\x1d\xe8\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xe6\x11[\x13\xf9y_~\x95e\x02\xd5\aEg\u06b9E\xcek\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xe6\x1f(\t\x15\xc7t\xa3\x1d\"<\xf8\f\x06\x92f\xe5\xad\xf1\x9b\x89/\xb4t\t\x8fg\xc0\x00\x00\u07d4\xe6/\x98e\a\x12\xeb\x15\x87S\xd8)r\xb8\u9723\xf6\x18w\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xe6/\x9d|d\xe8\xe2cZ\xeb\x88=\xd7;\xa6\x84\xee|\x10y\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xe6>xt\x14\xb9\x04\x84x\xa5\a35\x9e\xcd\xd7\xe3dz\xa6\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4\xe6FfXr\xe4\v\rz\xa2\xff\x82r\x9c\xaa\xba[\xc3\u8789\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xe6N\xf0\x12e\x8dT\xf8\xe8`\x9cN\x90#\xc0\x9f\xe8e\xc8;\x89\x01\x84\x93\xfb\xa6N\xf0\x00\x00\u07d4\xe6On\x1dd\x01\xb5l\akd\xa1\xb0\x86}\v/1\rN\x89\x02\u02edq\xc5:\xe5\x00\x00\u07d4\xe6g\xf6R\xf9W\u008c\x0ef\u04364\x17\xc8\f\x8c\x9d\xb8x\x89 \x9d\x92/RY\xc5\x00\x00\xe0\x94\xe6w\xc3\x1f\xd9\xcbr\x00u\u0724\x9f\x1a\xbc\xcdY\xec3\xf74\x8a\x01\xa6\u05be\xb1\xd4.\xe0\x00\x00\u07d4\xe6|,\x16e\u02038h\x81\x87b\x9fI\xe9\x9b`\xb2\u04fa\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xe6\x9al\xdb:\x8a}\xb8\xe1\xf3\f\x8b\x84\xcds\xba\xe0+\xc0\xf8\x8a\x03\x94\xfd\xc2\xe4R\xf6q\x80\x00\u07d4\xe6\x9d\x1c7\x8bw\x1e\x0f\xef\xf0Q\xdbi\xd9f\xacgy\xf4\xed\x89\x1d\xfaj\xaa\x14\x97\x04\x00\x00\u07d4\xe6\x9f\xcc&\xed\"_{.7\x984\xc5$\xd7\f\x175\u5f09lk\x93[\x8b\xbd@\x00\x00\u07d4\xe6\xa3\x01\x0f\x02\x01\xbc\x94\xffg\xa2\xf6\x99\xdf\xc2\x06\xf9\xe7gB\x89/\xa7\xcb\xf6dd\x98\x00\x00\u07d4\xe6\xa6\xf6\xddop\xa4V\xf4\xec\x15\xefz\xd5\xe5\u06f6\x8b\xd7\u0709\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xe6\xb2\x0f\x98\n\xd8S\xad\x04\xcb\xfc\x88|\xe6`\x1ck\xe0\xb2L\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\u6cec?]M\xa5\xa8\x85}\v?0\xfcK+i+w\u05c9O%\x91\xf8\x96\xa6P\x00\x00\u07d4\xe6\xb9T_~\u0406\xe5R\x92F9\xf9\xa9\xed\xbb\xd5T\v>\x89\xcb\xd4{n\xaa\x8c\xc0\x00\x00\xe0\x94\xe6\xbc\xd3\n\x8f\xa18\xc5\xd9\xe5\xf6\xc7\xd2\u0680i\x92\x81-\u034a7\x0e\xa0\xd4|\xf6\x1a\x80\x00\x00\u07d4\xe6\xc8\x1f\xfc\xec\xb4~\xcd\xc5\\\vq\xe4\x85_>^\x97\xfc\x1e\x89\x12\x1e\xa6\x8c\x11NQ\x00\x00\u07d4\xe6\xcb&\vqmL\n\xb7&\xee\xeb\a\xc8pr\x04\xe2v\xae\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xe6\xcb?1$\xc9\xc9\xcc84\xb1'K\xc33dV\xa3\x8b\xac\x89\x17+\x1d\xe0\xa2\x13\xff\x00\x00\xe0\x94\xe6\xd2\"\t\xff\u0438u\t\xad\xe3\xa8\xe2\xefB\x98y\u02c9\xb5\x8a\x03\xa7\xaa\x9e\x18\x99\xca0\x00\x00\u07d4\xe6\u051f\x86\xc2(\xf4sg\xa3^\x88l\xaa\xcb'\x1eS\x94)\x89\x16^\xc0\x9d\xa7\xa1\x98\x00\x00\u07d4\xe6\xe6!\xea\xab\x01\xf2\x0e\xf0\x83k|\xadGFL\xb5\xfd<\x96\x89\x11!\x93B\xaf\xa2K\x00\x00\u07d4\xe6\xe8\x861{jf\xa5\xb4\xf8\x1b\xf1d\xc58\xc2d5\x17e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe6\u98ddu\x0f\xe9\x949N\xb6\x82\x86\xe5\xeab\xa6\x99x\x82\x89 \x86\xac5\x10R`\x00\x00\xe0\x94\xe6\xec\\\xf0\u011b\x9c1~\x1epc\x15\uf7b7\xc0\xbf\x11\xa7\x8a\x03\xa4i\xf3F~\x8e\xc0\x00\x00\u07d4\xe6\xf5\xebd\x9a\xfb\x99Y\x9cAK'\xa9\xc9\xc8U5\u007f\xa8x\x89\x90\xf54`\x8ar\x88\x00\x00\xe0\x94\xe6\xfe\n\xfb\x9d\xce\xdd7\xb2\xe2,E\x1b\xa6\xfe\xabg4\x803\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xe7\x10\xdc\u041b\x81\x01\xf9C{\xd9}\xb9\ns\xef\x99=\v\xf4\x89\x14\xee6\xc0Z\xc2R\x00\x00\u07d4\xe7'\xe6~\xf9\x11\xb8\x1fl\xf9\xc7?\xcb\xfe\xbc+\x02\xb5\xbf\u0189lk\x93[\x8b\xbd@\x00\x00\u07d4\xe7.\x1d3\\\u009a\x96\xb9\xb1\xc0/\x00:\x16\xd9q\xe9\v\x9d\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4\xe71\x1c\x953\xf0\t,rH\xc9s\x9b[,\x86J4\xb1\u0389\x97\xf9}l\xc2m\xfe\x00\x00\u07d4\xe7;\xfe\xad\xa6\xf0\xfd\x01o\xbc\x84>\xbc\xf6\xe3p\xa6[\xe7\f\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xe7<\xcfCg%\xc1Q\xe2U\xcc\xf5!\f\xfc\xe5\xa4?\x13\xe3\x89\x01\x15NS!}\xdb\x00\x00\u07d4\xe7B\xb1\xe6\x06\x9a\x8f\xfc'\f\xc6\x1f\xa1d\xac\x15SE\\\x10]\x04\x88~\x14\x89\x06\x96\xd8Y\x00 \xbb\x00\x00\u07d4\xe7\\\x1f\xb1w\b\x9f>X\xb1\x06y5\xa6Yn\xf1s\u007f\xb5\x89\x05j\x87\x9f\xa7uG\x00\x00\u07d4\xe7\\;8\xa5\x8a?3\xd5V\x90\xa5\xa5\x97f\xbe\x18^\x02\x84\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xe7a\xd2\u007f\xa3P,\xc7k\xb1\xa6\bt\x0e\x14\x03\u03dd\xfci\x89\x0f-\xc7\xd4\u007f\x15`\x00\x00\u07d4\xe7f\xf3O\xf1o<\xfc\xc9s!r\x1fC\xdd\xf5\xa3\x8b\f\xf4\x89T\x06\x923\xbf\u007fx\x00\x00\u07d4\xe7m\x94Z\xa8\x9d\xf1\xe4W\xaa4+1\x02\x8a^\x910\xb2\u03897\b\xba\xed=h\x90\x00\x00\u07d4\xe7s^\xc7e\x18\xfcj\xa9-\xa8qZ\x9e\xe3\xf6%x\x8f\x13\x89lM\x16\v\xaf\xa1\xb7\x80\x00\xe0\x94\xe7z\x89\xbdE\xdc\x04\xee\xb4\xe4\x1d{Ykp~nQ\xe7L\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4\xe7}}\uac96\u0234\xfa\a\xca;\xe1\x84\x16=Zm`l\x89\x05\x049\x04\xb6q\x19\x00\x00\u07d4\xe7\u007f\xeb\xab\xdf\b\x0f\x0f]\xca\x1d?Wf\xf2\xa7\x9c\x0f\xfa|\x89K\"\x9d(\xa8Ch\x00\x00\xe0\x94\u7025c\x06\xba\x1ek\xb31\x95,\"S\x9b\x85\x8a\xf9\xf7}\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\u07d4\xe7\x81\xecs-@\x12\x02\xbb\x9b\xd18`\x91\r\xd6\u009a\xc0\xb6\x89C8t\xf62\xcc`\x00\x00\u07d4\xe7\x84\xdc\xc8s\xaa\x8c\x15\x13\xec&\xff6\xbc\x92\xea\xc6\xd4\xc9h\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xe7\x91-L\xf4V,W=\xdc[q\xe3s\x10\xe3x\xef\x86\u0249\x15[\xd90\u007f\x9f\xe8\x00\x00\u07d4\xe7\x91\u0545\xb8\x996\xb2])\x8f\x9d5\xf9\xf9\xed\xc2Z)2\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe7\x924\x9c\xe9\xf6\xf1O\x81\xd0g@\x96\xbe\xfa\x1f\x92!\xcd\xea\x89[]#J\r\xb48\x80\x00\u07d4\xe7\x96\xfdN\x83\x9bL\x95\xd7Q\x0f\xb7\xc5\xc7+\x83\xc6\xc3\xe3\u01c9\x1b\xc43\xf2?\x83\x14\x00\x00\xe0\x94\xe7\xa4/Y\xfe\xe0t\xe4\xfb\x13\xea\x9eW\xec\xf1\xccH(\"I\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xe7\xa4V\f\x84\xb2\x0e\x0f\xb5LIg\f)\x03\xb0\xa9lB\xa4\x89 j\xea\u01e9\x03\x98\x00\x00\u07d4\xe7\xa8\xe4q\xea\xfby\x8fET\xccnRg0\xfdV\xe6,}\x8965\u026d\xc5\u07a0\x00\x00\u07d4\u7f82\xc6Y<\x1e\xed\xdd*\xe0\xb1P\x01\xff \x1a\xb5{/\x89\x01\t\x10\xd4\xcd\xc9\xf6\x00\x00\u07d4\xe7\u01b5\xfc\x05\xfct\x8e[C\x81rdI\xa1\xc0\xad\x0f\xb0\xf1\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe7\xd1u$\xd0\v\xad\x82I|\x0f'\x15jd\u007f\xf5\x1d'\x92\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xe7\xd2\x13\x94\u007f\u02d0J\xd78H\v\x1e\xed/\\2\x9f'\xe8\x89\x01\x03\u00f1\xd3\xe9\xc3\x00\x00\u07d4\xe7\xd6$\x06 \xf4,^\u06f2\xed\xe6\xae\xc4=\xa4\xed\x9bWW\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xe7\xda`\x9d@\xcd\xe8\x0f\x00\xce[O\xfbj\xa9\u04304\x94\xfc\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xe7\xf0oi\x9b\xe3\x1cD\vC\xb4\xdb\x05\x01\xec\x0e%&\x16D\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xe7\xf4\xd7\xfeoV\x1f\u007f\xa1\xda0\x05\xfd6TQ\xad\x89\u07c9\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xe7\xfd\x8f\xd9Y\xae\xd2v~\xa7\xfa\x96\f\xe1\xdbS\xaf\x80%s\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xe8\x0e\u007f\xef\x18\xa5\xdb\x15\xb0\x14s\xf3\xadkx\xb2\xa2\xf8\xac\u0649\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xe8\x13\u007f\xc1\xb2\xec|\xc7\x10:\xf9!\x89\x9bJ9\xe1\xd9Y\xa1\x89P\xc5\xe7a\xa4D\b\x00\x00\u07d4\xe8\x1c-4l\n\xdfL\xc5g\b\xf69K\xa6\xc8\u0226J\x1e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe8,X\xc5yC\x1bg5F\xb5:\x86E\x9a\xca\xf1\u079b\x93\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xe84\xc6C\x18 \\\xa7\xddJ!\xab\xcb\b&l\xb2\x1f\xf0,\x8965\xc6 G9\u0640\x00\u07d4\xe86\x04\xe4\xffk\xe7\xf9o`\x18\xd3\xec0r\xecR]\xffk\x89\t\xdd\xc1\xe3\xb9\x01\x18\x00\x00\xe0\x94\xe8E\xe3\x87\xc4\xcb\u07d8\"\x80\xf6\xaa\x01\xc4\x0eK\xe9X\u0772\x8a\x05K@\xb1\xf8R\xbd\xa0\x00\x00\u07d4\xe8H\xca~\xbf\xf5\xc2O\x9b\x9c1g\x97\xa4;\xf7\xc3V)-\x89\x06.\x11\\\x00\x8a\x88\x00\x00\u07d4\xe8KU\xb5%\xf1\x03\x9etK\x91\x8c\xb33$\x92\xe4^\xcaz\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xe8O\x80v\xa0\xf2\x96\x9e\xcd3>\xef\x8d\xe4\x10B\x98b\x91\xf2\x89\x17k4O*x\xc0\x00\x00\u07d4\xe8d\xfe\xc0~\xd1!Je1\x1e\x11\xe3)\xde\x04\r\x04\xf0\xfd\x89Y\u0283\xf5\xc4\x04\x96\x80\x00\u07d4\xe8}\xba\xc66\xa3w!\xdfT\xb0\x8a2\xefIY\xb5\xe4\xff\x82\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xe8~\x9b\xbf\xbb\xb7\x1c\x1at\ft\xc7#Bm\xf5]\x06=\u064a\x01\xb1\x92\x8c\x00\u01e68\x00\x00\u07d4\xe8~\xacm`+A\t\xc9g\x1b\xf5{\x95\f,\xfd\xb9\x9dU\x89\x02\xb4\xf2\x19r\xec\xce\x00\x00\xe0\x94\u807b\xbeir-\x81\xef\xec\xaaH\u0455*\x10\xa2\xbf\xac\x8f\x8a\x03c\\\x9a\xdc]\xea\x00\x00\x00\u07d4\xe8\x92Is\x8b~\xce\xd7\xcbfjf\xe4s\xbcv\x82/U\t\x8d\x89\xb9\x1c\u0149lk\x93[\x8b\xbd@\x00\x00\u07d4\xe8\xc3\u04f0\xe1\u007f\x97\xd1\xe7V\xe6\x84\xf9N\x14p\xf9\x9c\x95\xa1\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94\xe8\xc3\xf0E\xbb}8\xc9\xd2\U000d5c3a\x84\x92\xb2S#\t\x01\x8a\x01\xe7\xe4\x17\x1b\xf4\u04e0\x00\x00\u07d4\xe8\xccC\xbcO\x8a\xcf9\xbf\xf0N\xbf\xbfB\xaa\xc0j2\x84p\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xe8\xd9B\xd8/\x17^\xcb\x1c\x16\xa4\x05\xb1\x01C\xb3\xf4k\x96:\x89\x1e\xd2\xe8\xffm\x97\x1c\x00\x00\u07d4\xe8\u077e\xd72\xeb\xfeu@\x96\xfd\xe9\bk\x8e\xa4\xa4\xcd\xc6\x16\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe8\xder^\xca]\xef\x80_\xf7\x94\x1d1\xac\x1c.4-\xfe\x95\x89\x85~\ro\x1d\xa7j\x00\x00\u07d4\xe8\xe9\x85\x05\x86\xe9OR\x99\xabIK\xb8!\xa5\xf4\f\x00\xbd\x04\x89\xcf\x15&@\xc5\xc80\x00\x00\xe0\x94\xe8\xea\u047b\x90\xcc\u00ee\xa2\xb0\xdc\u0175\x80VUFU\xd1\u054a\x01\xa4\xab\xa2%\xc2\a@\x00\x00\u07d4\xe8\xea\xf1)D\t-\xc3Y\x9b9S\xfa|\xb1\xc9v\x1c\xc2F\x89a\x94\x04\x9f0\xf7 \x00\x00\xe0\x94\xe8\xedQ\xbb\xb3\xac\xe6\x9e\x06\x02K3\xf8hD\xc4sH\u06de\x8a\"\xf9\xea\x89\xf4\xa7\xd6\xc4\x00\x00\u07d4\xe8\xef\x10\r|\xe0\x89X2\xf2g\x8d\xf7-J\u03cc(\xb8\xe3\x89\x1b\x1bk\u05efd\xc7\x00\x00\u07d4\xe8\xf2\x99i\xe7\\e\xe0\x1c\xe3\xd8aT }\n\x9e|v\xf2\x89\xa2/\xa9\xa7:'\x19\x80\x00\u07d4\xe8\xfc6\xb0\x13\x1e\xc1 \xac\x9e\x85\xaf\xc1\f\xe7\vV\u0636\xba\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xe9\n5L\xec\x04\u059e]\x96\xdd\xc0\xc5\x13\x8d=3\x15\n\xa0\x89\x1b\x1a}\u03caD\u04c0\x00\xe0\x94\xe9\x13>}1\x84]_+f\xa2a\x87\x92\xe8i1\x1a\xcff\x8a\x05\x17\xc0\xcb\xf9\xa3\x90\x88\x00\x00\u07d4\xe9\x1d\xac\x01\x95\xb1\x9e7\xb5\x9bS\xf7\xc0\x17\xc0\xb29[\xa4L\x89e\xea=\xb7UF`\x00\x00\u07d4\xe9\x1f\xa0\xba\xda\u0779\xa9~\x88\xd3\xf4\xdb|U\u05bbt0\xfe\x89\x14b\fW\xdd\xda\xe0\x00\x00\u07d4\xe9#\xc0aw\xb3B~\xa4H\xc0\xa6\xff\x01\x9bT\xccT\x8d\x95\x89\x01\xf7\x80\x01Fg\xf2\x80\x00\xe0\x94\xe9=G\xa8\u0288]T\fNRo%\xd5\xc6\xf2\xc1\b\u0138\x8a\x17\xda:\x04\u01f3\xe0\x00\x00\x00\u07d4\xe9E\x8fh\xbb',\xb5g:\x04\xf7\x81\xb4\x03Uo\u04e3\x87\x89\x03N\x8b\x88\xce\xe2\xd4\x00\x00\u07d4\xe9IA\xb6\x03`\x19\xb4\x01j0\xc1\x03}Zi\x03\xba\xba\xad\x89*H\xac\xabb\x04\xb0\x00\x00\u07d4\xe9I[\xa5\x84'(\xc0\ud5fe7\xd0\xe4\"\xb9\x8di ,\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xe9M\xed\x99\u0735r\xb9\xbb\x1d\u02e3/m\xee\x91\xe0W\x98N\x89\x15[\xd90\u007f\x9f\xe8\x00\x00\xe0\x94\xe9QyR}\uc951l\xa9\xa3\x8f!\\\x1e\x9c\xe77\xb4\u024a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\xe9U\x91\x85\xf1f\xfc\x95\x13\xccq\x11aD\xce-\xeb\x0f\x1dK\x8a\x04<3\xc1\x93ud\x80\x00\x00\u0794\xe9^\x92\xbb\xc6\xde\a\xbf:f\x0e\xbf_\xeb\x1c\x8a5'\xe1\u0148\xfc\x93c\x92\x80\x1c\x00\x00\xe0\x94\xe9e\u06a3@9\xf7\xf0\xdfb7Z7\u5acar\xb3\x01\xe7\x8a\x01\x03\xfd\xde\u0373\xf5p\x00\x00\u07d4\xe9i\xea\x15\x95\xed\xc5\u0127\a\xcf\xde8\t)c2Q\xa2\xb0\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xe9k\x18N\x1f\x0fT\x92J\xc8t\xf6\v\xbfDptF\xb7+\x89\x9d\xcc\x05\x15\xb5n\f\x00\x00\xe0\x94\xe9m}L\xdd\x15U:NM1mmd\x80\xca<\xea\x1e8\x8a\x02\x95]\x02\xe1\xa15\xa0\x00\x00\u07d4\xe9n-8\x13\xef\xd1\x16_\x12\xf6\x02\xf9\u007fJb\x90\x9d\x1b;\xc0\xe9\xaa\"\u007f\x90\x89'\xcaK\xd7\x19\xf0\xb8\x00\x00\u07d4\xea,\x19}&\xe9\x8b\r\xa8>\x1br\u01c7a\x8c\x97\x9d=\xb0\x89\x01\x11du\x9f\xfb2\x00\x00\xe0\x94\xea7y\xd1J\x13\xf6\u01c5f\xbc\xde@5\x91A:b9\u06ca)\xb7d2\xb9DQ \x00\x00\u07d4\xeaN\x80\x9e&j\xe5\xf1<\xdb\u33dd\x04V\xe68m\x12t\x89\xf3\xf2\v\x8d\xfai\xd0\x00\x00\xe0\x94\xeaS\xc9T\xf4\xed\x97\xfdH\x10\x11\x1b\u06b6\x9e\xf9\x81\xef%\xb9\x8a\x03\xa9\u057a\xa4\xab\xf1\xd0\x00\x00\u07d4\xeaS\xd2ed\x85\x9d\x9e\x90\xbb\x0eS\xb7\xab\xf5`\xe0\x16,8\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xea`Ci\x12\xdek\xf1\x87\u04e4r\xff\x8fS3\xa0\xf7\xed\x06\x89\x01\x11du\x9f\xfb2\x00\x00\u07d4\xea`T\x9e\xc7U?Q\x1d!I\xf2\xd4fl\xbd\x92C\xd9<\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xeaf\xe7\xb8M\u037f6\xee\xa3\xe7[\x858*u\xf1\xa1]\x96\x89]\xbc\x91\x91&o\x11\x80\x00\u07d4\xeahlPW\t<\x17\x1cf\u06d9\xe0\x1b\x0e\xce\xcb0\x86\x83\x89\x14\u0768],\xe1G\x80\x00\u07d4\xeaj\xfe,\xc9(\xac\x83\x91\xeb\x1e\x16_\xc4\x00@\xe3t!\u7262\u007f\xa0c\xb2\xe2\xe6\x80\x00\u07d4\xeay\x05}\xab\xef^d\xe7\xb4O\u007f\x18d\x8e~S7\x18\u0489\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xea|Mm\xc7)\xcdk\x15|\x03\xad#|\xa1\x9a \x93F\u00c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xea\x81h\xfb\xf2%\xe7\x86E\x9c\xa6\xbb\x18\xd9c\xd2kPS\t\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xea\x81\u02868T\f\xd9\xd4\xd7=\x06\x0f,\xeb\xf2$\x1f\xfc>\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xea\x83\x17\x19yYB@A\xd9\xd7\xc6z>\xce\x1d\xbbx\xbbU\x89\x15[\xd90\u007f\x9f\xe8\x00\x00\u07d4\xea\x85'\xfe\xbf\xa1\xad\xe2\x9e&A\x93)\u04d3\xb9@\xbb\xb7\u0709lj\xccg\u05f1\xd4\x00\x00\u07d4\xea\x8f0\xb6\xe4\xc5\xe6R\x90\xfb\x98d%\x9b\u0159\x0f\xa8\ue289\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xea\x94\xf3(\b\xa2\uf29b\xf0\x86\x1d\x1d$\x04\xf7\xb7\xbe%\x8a\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xea\xa4\\\xea\x02\xd8},\xc8\xfd\xa9CN-\x98[\xd4\x03\x15\x84\x89h\x1f\xc2\xccn+\x8b\x00\x00\xe0\x94\uac3d\x14\x83\t\x18l\xf8\xcb\xd1;r2\xd8\tZ\u02c3:\x8a\x02C\x9a\x88\x1cjq|\x00\x00\u07d4\uaed0\xd3y\x89\xaa\xb3\x1f\xea\xe5G\xe0\xe6\xf3\x99\x9c\xe6\xa3]\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xea\xc0\x82~\xff\fn?\xf2\x8a}JT\xf6\\\xb7h\x9d{\x99\x89\x9a\xd9\u67ddGR\x00\x00\u07d4\xea\xc1H(&\xac\xb6\x11\x1e\x19\xd3@\xa4_\xb8QWk\xed`\x89\x01\xbe\x8b\xab\x04\u067e\x80\x00\xe0\x94\xea\xc1{\x81\xedQ\x91\xfb\b\x02\xaaT3s\x13\x83A\a\xaa\xa4\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xea\u00efW\x84\x92\u007f\u9958\xfcN\xec8\xb8\x10/7\xbcX\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xea\u01b9\x88BT.\xa1\v\xb7O&\xd7\xc7H\x8fi\x8bdR\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\xea\xc7h\xbf\x14\xb8\xf9C.i\xea\xa8*\x99\xfb\xeb\x94\xcd\f\x9c\x8a\x14\u06f2\x19\\\xa2(\x90\x00\x00\u07d4\xea\xd2\x1c\x1d\xec\u03ff\x1c\\\xd9f\x88\xa2Gki\xba\a\xceJ\x89\x03\xf2M\x8eJ\x00p\x00\x00\u07d4\xea\xd4\xd2\xee\xfbv\xab\xaeU3\x96\x1e\xdd\x11@\x04\x06\xb2\x98\xfc\x89\xd2U\xd1\x12\xe1\x03\xa0\x00\x00\u07d4\xea\xd6Rb\xed]\x12-\xf2\xb2u\x14\x10\xf9\x8c2\xd1#\x8fQ\x89\x05\x83\x17\xedF\xb9\xb8\x00\x00\u07d4\xea\xd7P\x16\u3801Pr\xb6\xb1\b\xbc\xc1\xb7\x99\xac\xf08>\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xea\xea#\xaa\x05r\x00\xe7\xc9\xc1^\x8f\xf1\x90\xd0\xe6l\f\x0e\x83\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xea\xed\x16\xea\xf5\u06ab[\xf0)^^\a\u007fY\xfb\x82U\x90\v\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xea\xed\xcck\x8bib\xd5\xd9(\x8c\x15lW\x9dG\xc0\xa9\xfc\xff\x89\x04\x9b\x9c\xa9\xa6\x944\x00\x00\u07d4\xea\xf5#\x88Tn\xc3Z\xcaolc\x93\xd8\xd6\t\xde:K\xf3\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xeb\x10E\x8d\xac\xa7\x9eJk$\xb2\x9a\x8a\x8a\xdaq\x1b\u007f.\xb6\x89\u063beI\xb0+\xb8\x00\x00\u07d4\xeb\x1c\xea{E\u047dM\x0e*\x00{\u04ff\xb3Tu\x9e,\x16\x89\n\xbb\xcdN\xf3wX\x00\x00\u07d4\xeb%H\x1f\u035c\"\x1f\x1a\xc7\xe5\xfd\x1e\u0353\a\xa1b\x15\xb8\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\xe0\x94\xeb.\xf3\u04cf\xe6R@<\xd4\xc9\xd8^\xd7\xf0h,\xd7\xc2\u078a\t\x0fSF\b\xa7(\x80\x00\x00\xe0\x94\xeb;\xddY\xdc\u0765\xa9\xbb*\xc1d\x1f\xd0!\x80\xf5\xf3e`\x8a\x01e\xc9fG\xb3\x8a \x00\x00\u07d4\xeb<\xe7\xfc8\x1cQ\xdb}_\xbdi/\x8f\x9e\x05\x8aLp=\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xebE?Z:\xdd\u074a\xb5gP\xfa\xdb\x0f\xe7\xf9M\x9c\x89\xe7\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xebO\x00\xe2\x836\xea\t\x94%\x88\ueb12\x18\x11\xc5\"\x14<\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xebR\xab\x10U4\x922\x9c\x1cT\x83:\xe6\x10\xf3\x98\xa6[\x9d\x89\b=lz\xabc`\x00\x00\u07d4\xebW\r\xba\x97R'\xb1\xc4-n\x8d\xea,V\u026d\x96\x06p\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xebc\x94\xa7\xbf\xa4\u0489\x11\u0565\xb2>\x93\xf3^4\f\"\x94\x89\x04:w\xaa\xbd\x00x\x00\x00\u07d4\xebh\x10i\x1d\x1a\xe0\u045eG\xbd\"\u03be\u0cfa'\xf8\x8a\x89\x87\x85c\x15\xd8x\x15\x00\x00\u07d4\xebvBL\x0f\u0557\xd3\xe3A\xa9d*\xd1\xee\x11\x8b+W\x9d\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xeb| +F+|\u0145]t\x84u_n&\xefC\xa1\x15\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xeb\x83\\\x1a\x91\x18\x17\x87\x8a3\xd1gV\x9e\xa3\xcd\u04c7\xf3(\x8965\u026d\xc5\u07a0\x00\x00\u07d4\ub268\x82g\t\t\xcf7~\x9ex(n\xe9{\xa7\x8dF\u0089+|\xc2\xe9\xc3\"\\\x00\x00\xe0\x94\xeb\x90\u01d3\xb3S\x97a\xe1\xc8\x14\xa2\x96q\x14\x86\x92\x19>\xb4\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4\xeb\x9c\xc9\xfe\bi\xd2\u06b5,\u01ea\xe8\xfdW\xad\xb3_\x9f\xeb\x89j\x93\xbb\x17\xaf\x81\xf8\x00\x00\xe0\x94\ub8c8\xb0\xda'\xc8{\x1c\xc0\xea\xc6\xc5{,Z\vE\x9c\x1a\x8a\x01p\xa0\xf5\x04\x0eP@\x00\x00\u07d4\xeb\xaa!m\xe9\xccZC\x03\x17\a\xd3o\xe6\u057e\xdc\x05\xbd\xf0\x89j\xc5\xc6-\x94\x86\a\x00\x00\u07d4\xeb\xac+D\b\xefT1\xa1;\x85\b\xe8bP\x98!\x14\xe1E\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xeb\xb6,\xf8\xe2,\x88K\x1b(\xc6\xfa\x88\xfb\xbc\x17\x93\x8a\xa7\x87\x89+By\x84\x03\u0278\x00\x00\u07d4\xeb\xb7\xd2\xe1\x1b\u01b5\x8f\n\x8dE\xc2\xf6\xde0\x10W\n\u0211\x89\x01s\x17\x90SM\xf2\x00\x00\u07d4\xeb\xbbO,=\xa8\xbe>\xb6-\x1f\xfb\x1f\x95\x02a\u03d8\xec\u0689lk\x93[\x8b\xbd@\x00\x00\u07d4\xeb\xbdM\xb9\x01\x99R\u058b\x1b\x0fm\x8c\xf0h<\x008{\xb5\x89\x12\x04\x01V=}\x91\x00\x00\u07d4\xeb\xbe\xeb%\x91\x84\xa6\xe0\x1c\xcc\xfc\"\a\xbb\u0603xZ\xc9\n\x89!\x9b\xc1\xb0G\x83\xd3\x00\x00\u07d4\xeb\xd3V\x15j81#4=H\x84;\xff\xeda\x03\xe8f\xb3\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xeb\xd3{%ec\xe3\fo\x92\x89\xa8\xe2p/\bR\x88\b3\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xeb\xe4l\xc3\xc3L2\xf5\xad\xd6\xc3\x19[\xb4\x86\xc4q>\xb9\x18\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xeb\xff\x84\xbb\xefB0q\xe6\x04\xc3a\xbb\xa6w\xf5Y=\xefN\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xec\t'\xba\xc7\xdc6f\x9c(5J\xb1\xbe\x83\xd7\xee\xc3\t4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xec\x0e\x18\xa0\x1d\xc4\xdc]\xaa\xe5g\xc3\xfaL\u007f\x8f\x9bY\x02\x05\x89\x11\x1f\xfe@JA\xe6\x00\x00\xe0\x94\xec\x116,\xec\x81\t\x85\xd0\xeb\xbd{sE\x14D\x98[6\x9f\x8a\x06ZNIWpW1\x80\x00\u07d4\xec,\xb8\xb97\x8d\xff1\xae\xc3\xc2.\x0em\xad\xff1J\xb5\u0749lk\x93[\x8b\xbd@\x00\x00\u07d4\xec0\xad\u0749[\x82\xee1\x9eT\xfb\x04\xcb+\xb09q\xf3k\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xec;\x8bX\xa1'\x03\xe5\x81\xce_\xfd~!\xc5}\x1e\\f?\x89\\(=A\x03\x94\x10\x00\x00\u07d4\xecHg\xd2\x17Z\xb5\xb9F\x93aYUFUF\x84\u0364`\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xecM\b\xaa.GIm\u0287\"]\xe3?+@\xa8\xa5\xb3o\x89\b\x90\xb0\xc2\xe1O\xb8\x00\x00\u07d4\xecX\xbc\r\f \xd8\xf4\x94efAS\xc5\xc1\x96\xfeY\u6f89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xec[\x19\x8a\x00\u03f5Z\x97\xb5\xd56D\xcf\xfa\x8a\x04\u04abE\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xec]\xf2'\xbf\xa8]z\xd7kBn\x1c\xee\x96;\xc7\xf5\x19\u074965\u026d\xc5\u07a0\x00\x00\xe0\x94\xec_\xea\xfe!\f\x12\xbf\u0265\xd0Y%\xa1#\xf1\xe7?\xbe\xf8\x8a`\x8f\xcf=\x88t\x8d\x00\x00\x00\u07d4\xeci\x04\xba\xe1\xf6\x97\x90Y\x17\t\xb0`\x97\x83s?%s\xe3\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\xe0\x94\xecs\x11L^@o\u06fe\t\xb4\xfab\x1b\xd7\x0e\xd5N\xa1\xef\x8a\x050%\xcd!o\xceP\x00\x00\u07d4\xecs\x83=\xe4\xb8\x10\xbb\x02x\x10\xfc\x8fi\xf5D\xe8<\x12\u044965\u026d\xc5\u07a0\x00\x00\u07d4\xecu\xb4\xa4u\x13\x12\v\xa5\xf8`9\x81O\x19\x98\xe3\x81z\u00c9\t\xb0\xbc\xe2\xe8\xfd\xba\x00\x00\u07d4\xecv\xf1.W\xa6U\x04\x03?,\v\xceo\xc0;\xd7\xfa\n\u0109\xc2\x12z\xf8X\xdap\x00\x00\u0794\xec\x80\x14\xef\xc7\xcb\xe5\xb0\xceP\xf3V,\xf4\xe6\u007f\x85\x93\xcd2\x88\xf0\x15\xf2W6B\x00\x00\u07d4\xec\x82\xf5\r\x06G_hM\xf1\xb3\x92\xe0\r\xa3A\xaa\x14TD\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xec\x83\xe7\x98\u00d6\xb7\xa5^*\"$\xab\u0343K'\xeaE\x9c\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4\xec\x89\xf2\xb6x\xa1\xa1[\x914\xec^\xb7\fjb\a\x1f\xba\xf9\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xec\x8c\x1d{j\xac\xcdB\x9d\xb3\xa9\x1e\xe4\xc9\xeb\x1c\xa4\xf6\xf7<\x89\xe6d\x99\"\x88\xf2(\x00\x00\xe0\x94\xec\x98Q\xbd\x91rpa\x02g\xd6\x05\x18\xb5M<\xa2\xb3[\x17\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\xec\x99\xe9]\xec\xe4o\xff\xfb\x17^\xb6@\x0f\xbe\xbb\b\ue6d5\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xec\xa5\xf5\x87\x92\xb8\xc6-*\xf5Vq~\xe3\xee0(\xbeM\u0389lk\x93[\x8b\xbd@\x00\x00\u07d4\xec\xabZ\xba[\x82\x8d\xe1pS\x81\xf3\x8b\xc7D\xb3+\xa1\xb47\x892\xf5\x1e\u06ea\xa30\x00\x00\u07d4\xec\xaf3P\xb7\xce\x14M\x06\x8b\x18`\x10\x85,\x84\xdd\f\xe0\xf0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xec\xb9LV\x8b\xfeY\xad\xe6Pd_O&0lsl\xac\xe4\x89\x0e~\xeb\xa3A\vt\x00\x00\xe0\x94\xec\xbeB^g\r9\tN \xfbVC\xa9\xd8\x18\xee\xd26\u078a\x01\x0f\f\xf0d\xddY \x00\x00\xe0\x94\xec\xbe^\x1c\x9a\u04b1\xdc\xcf\n0_\xc9R/Fi\xdd:\xe7\x8a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xec\xcfz\x04W\xb5f\xb3F\xcag:\x18\x0fDA0!j\u00c9\x05k\xc7^-c\x10\x00\x00\u07d4\xec\u0466(\x025\x1aAV\x8d#\x030\x04\xac\xc6\xc0\x05\xa5\u04c9\x02\xb5\xe3\xaf\x16\xb1\x88\x00\x00\u07d4\xec\xd2v\xafd\u01dd\x1b\u0669+\x86\xb5\u835a\x95\xeb\x88\xf8\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xec\u0506\xfc\x19g\x91\xb9,\xf6\x12\xd3HaO\x91VH\x8b~\x8a\x02\x8a\x85t%Fo\x80\x00\x00\u07d4\xec\xda\xf92)\xb4^\xe6r\xf6]\xb5\x06\xfb^\xca\x00\xf7\xfc\xe6\x89W\x01\xf9m\xcc@\xee\x80\x00\u07d4\xec\xe1\x11g\vV<\u037e\xbc\xa5#\x84)\x0e\xcdh\xfe\\\x92\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xec\xe1\x15&\x82\xb7Y\x8f\xe2\xd1\xe2\x1e\xc1U3\x88T5\xac\x85\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xec\xe1)\bw\xb5\x83\xe3a\xa2\xd4\x1b\x00\x93F\xe6'N%8\x89\x10CV\x1a\x88)0\x00\x00\u07d4\xec\xf0]\a\xea\x02n~\xbfIA\x00#5\xba\xf2\xfe\xd0\xf0\x02\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xec\xf2L\xdd|\"\x92\x8cD\x1eiM\xe4\xaa1\xb0\xfa\xb5\x97x\x89 \x86\xac5\x10R`\x00\x00\xe0\x94\xec\xfd\x00M\x02\xf3l\xd4\u0634\xa8\xc1\xa9S;j\xf8\\\xd7\x16\x8a\x01\x0fA\xac\xb4\xbb;\x9c\x00\x00\xe0\x94\xed\x02\x06\xcb#1Q(\xf8\xca\xff&\xf6\xa3\v\x98Tg\xd0\"\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\xed\x10e\xdb\u03dds\xc0O\xfcy\b\x87\r\x88\x14h\xc1\xe12\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xed\x12vQ;o\u0186(\xa7A\x85\xc2\xe2\f\xbb\xcax\x17\xbf\x89\nZ\xa8P\t\xe3\x9c\x00\x00\xe0\x94\xed\x12\xa1\xba\x1f\xb8\xad\xfc\xb2\r\xfa\x19X.RZ\xa3\xb7E$\x8a\x01je\x02\xf1Z\x1eT\x00\x00\u07d4\xed\x16\xce9\xfe\xef;\xd7\xf5\xd1b\x04^\x0fg\xc0\xf0\x00F\xbb\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xed\x1a\\C\xc5t\xd4\xe94)\x9b$\xf1G,\u071f\xd6\xf0\x10\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xed\x1b$\xb6\x91-Q\xb34\xac\r\xe6\xe7q\xc7\xc0EF\x95\xea\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\xed\x1f\x1e\x11Z\r`\xce\x02\xfb%\xdf\x01M(\x9e:\f\xbe}\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xed10\\1\x9f\x92s\u04d3m\x8f[/q\u9c72)c\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xed2z\x14\xd5\u03ed\u0641\x03\xfc\t\x99q\x8d~\xd7\x05(\xea\x89N\x10\x03\xb2\x8d\x92\x80\x00\x00\u07d4\xed<\xbc7\x82\u03bdg\x98\x9b0\\A3\xb2\xcd\xe3\"\x11\xeb\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xed@\x14S\x8c\xeefJ/\xbc\xb6\xdcf\x9fz\xb1m\v\xa5|\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xedA\u188f\\\xaa\x848\x80\xefN\x8b\b\xbdl3\x14\x1e\u07c9*\xd5\xdd\xfaz\x8d\x83\x00\x00\xe0\x94\xedK\xe0J\x05-z\u0333\xdc\u03901\x9d\xba@ \xab,h\x8a\a\xf3zp\xea\xf3b\x17\x80\x00\xe0\x94\xedR\xa2\xcc\bi\u071e\x9f\x84+\u0415|G\xa8\xe9\xb0\xc9\xff\x8a\x02\x05\xb4\u07e1\xeetx\x00\x00\u07d4\xed[LA\xe7b\xd9B@Cs\xca\xf2\x1e\xd4a]%\xe6\xc1\x89m-O=\x95%\xb4\x00\x00\u07d4\xed`\u012bnT\x02\x061~5\x94zc\xa9\xcak\x03\xe2\u02c9\x03\x1a\u066d\vF\u007f\x80\x00\u07d4\xedd\x1e\x066\x8f\xb0\xef\xaa\x17\x03\xe0\x1f\xe4\x8fJhS\t\xeb\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xedfC\xc0\xe8\x88K-2\x11\x857\x85\xa0\x8b\xf8\xf3>\u049f\x89Hz\x9a0E9D\x00\x00\xe0\x94\xedp\xa3|\xdd\x1c\xbd\xa9tm\x93\x96X\xae*a\x81(\x85x\x8a\x02\bj\xc3Q\x05&\x00\x00\x00\u07d4\xedsFvn\x1agm\r\x06\xec\x82\x18g\xa2v\xa0\x83\xbf1\x89\u064a\t1\xcc-I\x00\x00\u07d4\xed\x86&\x16\xfc\xbf\xb3\xbe\xcbt\x06\xf7<\\\xbf\xf0\f\x94\aU\x89\\(=A\x03\x94\x10\x00\x00\u07d4\xed\x9e\x03\f\xa7\\\xb1\u049e\xa0\x1d\rL\xdf\xdc\xcd8D\xb6\xe4\x89\x01\xac\xc1\x16\u03ef\xb1\x80\x00\xe0\x94\ud7bc\u02e4/\x98\x15\xe7\x823&m\xd6\xe85\xb6\xaf\xc3\x1b\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\ud7f1\xf5\xaf/\xbf\u007f\xfcP)\xce\xe4+p\xff\\'[\xf5\x89\x0f-\xc7\xd4\u007f\x15`\x00\x00\u07d4\xed\xa4\xb2\xfaY\u0584\xb2z\x81\r\xf8\x97\x8as\xdf0\x8ac\u0089\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\xed\xb4s59y\xa2\x06\x87\x9d\xe1D\xc1\n:\xcf\x12\xa7'OV9a\xf57R\x9d\x89\xc7\u0789lk\x93[\x8b\xbd@\x00\x00\u07d4\xeer\x88\xd9\x10\x86\xd9\xe2\xeb\x91\x00\x14\u066b\x90\xa0-x\u00a0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xee|=\xed|(\xf4Y\xc9/\xe1;M\x95\xba\xfb\xab\x026}\x89%\xf2s\x93=\xb5p\x00\x00\xe0\x94\xee\x86} \x91k\xd2\xe9\xc9\xec\xe0\x8a\xa0C\x85\xdbf|\x91.\x8a\n\x96\x81c\xf0\xa5{@\x00\x00\u07d4\ue25b\x02\xcb\xcb99\xcda\xde\x13B\xd5\x04\x82\xab\xb6\x852\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4\xee\x90m}_\x17H%\x81t\xbeL\xbc8\x93\x03\x02\xab{B\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\ue5ea\x8a\u019e\xdfz\x98}mp\x97\x9f\x8e\xc1\xfb\xcaz\x94\x89\x14b\fW\xdd\xda\xe0\x00\x00\u07d4\xee\xa1\xe9y\x88\xdeu\xd8!\xcd(\xadh\"\xb2,\u0398\x8b1\x89\x1c0s\x1c\xec\x03 \x00\x00\xe0\x94\xee\u048c?\x06\x8e\tJ0K\x85<\x95\nh\t\xeb\xcb\x03\xe0\x8a\x03\xa9\u057a\xa4\xab\xf1\xd0\x00\x00\u07d4\xee\u04c4\xef-A\xd9\xd2\x03\x97NW\xc1#(\xeav\x0e\b\xea\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xee\xdflB\x80\xe6\xeb\x05\xb94\xac\xe4(\xe1\x1dB1\xb5\x90[\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xee\xe7a\x84~3\xfda\u0653\x87\xee\x14b\x86\x94\u047f\xd5%\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xee\xe9\xd0Rn\xda\x01\xe41\x16\xa3\x952-\u0689pW\x8f9\x8a\x02\x1e\x19\x99\xbb\xd5\u04be\x00\x00\u07d4\xee\xf1\xbb\xb1\xe5\xa8?\u0782H\xf8\x8e\xe3\x01\x8a\xfa-\x132\xeb\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xee\xfb\xa1-\xfc\x99gB\xdby\x04d\xca}';\xe6\xe8\x1b>\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xee\xfd\x05\xb0\xe3\xc4\x17\xd5[3C\x06\x04\x86\xcd\xd5\xe9*\xa7\xa6\x89M\x85<\x8f\x89\b\x98\x00\x00\u07d4\xef\r\xc7\xddzS\xd6\x12r\x8b\xcb\u04b2|\x19\xddM}fo\x89&A\x1c[5\xf0Z\x00\x00\u07d4\xef\x11RR\xb1\xb8E\u0345\u007f\x00-c\x0f\x1bo\xa3zNP\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xef\x1c\x04w\xf1\x18M`\xac\u02b3t\xd3tUz\n>\x10\xf3\x89\b=lz\xabc`\x00\x00\u07d4\xef,4\xbbH}7b\xc3\u0327\x82\xcc\xddz\x8f\xbb\n\x991\x89\t\xc2\x00vQ\xb2P\x00\x00\u07d4\xef5\xf6\u0531\a^j\xa19\x15\x1c\x97K/FX\xf7\x058\x89<;\xc3?\x94\xe5\r\x80\x00\u07d4\xef9\u0291s\xdf\x15S\x1ds\xe6\xb7*hKQ\xba\x0f+\xb4\x89V\xa0\xb4un\xe28\x00\x00\u07d4\xefF<&y\xfb'\x91d\xe2\f=&\x915\x87s\xa0\xad\x95\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xefG\xcf\a>6\xf2q\xd5\"\xd7\xfaNq \xadP\a\xa0\xbc\x89\x87\x86x2n\xac\x90\x00\x00\u07d4\xefa\x15[\xa0\t\xdc\u07be\xf1\v(\xd9\xda=\x1b\xc6\xc9\xce\u0509\x034-`\xdf\xf1\x96\x00\x00\u0794\xefix\x1f2\xff\xce34o,\x9a\xe3\xf0\x84\x93\xf3\xe8/\x89\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\xefv\xa4\u034f\xeb\xcb\u0278\x18\xf1x(\xf8\xd94s\xf3\xf3\u02c9\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\uf4c1\x8fhM\xb0\xc3g^\xc8\x132\xb3\x18>\xcc(\xa4\x95\x89T\x06\x923\xbf\u007fx\x00\x00\xe0\x94\xef\x9fY\xae\xdaA\x8c\x14\x94h-\x94\x1a\xabI$\xb5\xf4\x92\x9a\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\uf9b1\xf0\xdb`57\x82h\x91\xb8\xb4\xbc\x169\x84\xbb@\u03495e\x9e\xf9?\x0f\xc4\x00\x00\u07d4\xef\xbdR\xf9}\xa5\xfd:g:F\xcb\xf30D{~\x8a\xad\\\x89\x05l<\x9b\x80\xa0\xa6\x80\x00\xe0\x94\xef\xc8\xcf\x19c\u0269Rg\xb2(\xc0\x86#\x98\x89\xf4\xdf\xd4g\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xef\u02ae\x9f\xf6M,\xd9[RI\xdc\xff\xe7\xfa\xa0\xa0\xc0\xe4M\x89\x15\xbeat\xe1\x91.\x00\x00\u07d4\xef\xcc\xe0k\xd6\b\x9d\x0eE\x8e\xf5a\xf5\xa6\x89H\n\xfep\x00\x89 \x86\xac5\x10R`\x00\x00\u07d4\xef\xe0g]\xa9\x8a]\xdap\u0356\x19k\x87\xf4\xe7&\xb43H\x89?\x19\xbe\xb8\xdd\x1a\xb0\x00\x00\u07d4\xef\xe8\xff\x87\xfc&\x0e\agc\x8d\xd5\xd0/\xc4g.\x0e\xc0m\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xef\xeb\x19\x97\xaa\xd2w\xcc3C\x0ea\x11\xed\tCY@H\xb8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xef\xee\xa0\x10uo\x81\xdaK\xa2[r\x17\x87\xf0X\x17\v\uff49\x01\u009c\x9c\xf7p\xef\x00\x00\u07d4\xef\xf5\x1dr\xad\xfa\xe1C\xed\xf3\xa4+\x1a\xecU\xa2\xcc\xdd\v\x90\x89\x10CV\x1a\x88)0\x00\x00\u07d4\xef\xf8kQ#\xbc\xdc\x17\xedL\xe8\xe0[~\x12\xe5\x13\x93\xa1\xf7\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xef\xfc\x15\u41f1\xbe\xda\n\x8d\x13%\xbd\xb4\x17\"@\xdcT\n\x89\x03\x8599\xee\xe1\xde\x00\x00\xe0\x94\xf0\x11\x95\xd6W\xef<\x94.l\xb89I\xe5\xa2\v\\\xfa\x8b\x1e\x8a\x05ts\xd0]\xab\xae\x80\x00\x00\u07d4\xf0'\x96)Q\x01gB\x88\xc1\xd94g\x05=\x04\"\x19\xb7\x94\x89(\x1d\x90\x1fO\xdd\x10\x00\x00\u07d4\xf09h={=\"[\xc7\xd8\u07ed\xefc\x164A\xbeA\xe2\x89\x01\xdd\x1eK\xd8\xd1\xee\x00\x00\u07d4\xf0Jj7\x97\b\xb9B\x8dr*\xa2\xb0kw\xe8\x895\u03c9\x89\x10CV\x1a\x88)0\x00\x00\u07d4\xf0M,\x91\xef\xb6\xe9\xc4_\xfb\xe7KCL\x8c_+\x02\x8f\x1f\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xf0W\xaaf\xcav~\xde\x12J\x1c[\x9c\xc5\xfc\x94\xef\v\x017\x89p\xa2K\u02b6\xf4]\x00\x00\u07d4\xf0[\xa8\u05f6\x859\xd930\v\xc9(\x9c=\x94t\xd0A\x9e\x89\x06\xda'\x02M\xd9`\x00\x00\u07d4\xf0\\\xee\xabeA\x05dp\x99Qw<\x84E\xad\x9fN\u01d7\x89\x10C\x16'\xa0\x93;\x00\x00\xe0\x94\xf0_\xcdL\rs\xaa\x16~US\xc8\xc0\xd6\xd4\xf2\xfa\xa3\x97W\x8a\x02\xd2\xd6l1p\xb2\x98\x00\x00\u07d4\xf0g\xe1\xf1\u0583UjL\xc4\xfd\f\x03\x13#\x9f2\xc4\xcf\u060965\u026d\xc5\u07a0\x00\x00\u07d4\xf0g\xfb\x10\u07f2\x93\u962b\xe5d\xc0U\xe34\x8f\x9f\xbf\x1e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf0h\xdf\xe9]\x15\xcd:\u007f\x98\xff\xa6\x88\xb44hB\xbe&\x90\x89D\n\xd8\x19\xe0\x97L\x00\x00\xe0\x94\xf0j\x85J<]\xc3m\x1cI\xf4\xc8}m\xb33\xb5~J\u074a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xf0y\xe1\xb1&_P\xe8\u0229\x8e\xc0\u01c1^\xb3\xae\xac\x9e\xb4\x89\x01\x16\xdc:\x89\x94\xb3\x00\x00\xe0\x94\xf0{\xd0\xe5\xc2\xcei\xc7\u0127$\xbd&\xbb\xfa\x9d*\x17\xca\x03\x8a\x01@a\xb9\xd7z^\x98\x00\x00\xe0\x94\xf0\x83*k\xb2U\x03\xee\xcaC[\xe3\x1b\v\xf9\x05\xca\x1f\xcfW\x8a\x01je\x02\xf1Z\x1eT\x00\x00\u07d4\xf0\x9b>\x87\xf9\x13\xdd\xfdW\xae\x80I\xc71\u06e9\xb66\xdf\u00c9 \xf5\xb1\uab4d\x80\x00\x00\u07d4\xf0\xb14\v\x99oo\v\xf0\xd9V\x1c\x84\x9c\xaf\u007fD0\xbe\xfa\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xf0\xb1\xf9\xe2x2\xc6\xdei\x14\xd7\n\xfc#\x8ct\x99\x95\xac\xe4\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xf0\xb4i\xea\xe8\x9d@\f\xe7\xd5\xd6j\x96\x95\x03p6\xb8\x89\x03\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xf0\xb9\u0583\u03a1+\xa6\x00\xba\xac\xe2\x19\xb0\xb3\xc9~\x8c\x00\xe4\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xf0\xbe\x0f\xafMy#\xfcDF\"\u0458\f\xf2\u0650\xaa\xb3\a\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf0\xc0\x81\xdaR\xa9\xae6d*\xdf^\b _\x05\xc5Ah\xa6\x89\x06\x04o7\xe5\x94\\\x00\x00\u07d4\xf0\xc7\r\rm\xabvc\xaa\x9e\xd9\xce\xeaV~\xe2\u01b0'e\x89qC\x8a\u0167\x91\xa0\x80\x00\u07d4\xf0\xcb\xef\x84\xe1ic\x00\x98\xd4\xe3\x01\xb2\x02\b\xef\x05\x84j\u0249\x0e\v\x83EPkN\x00\x00\u07d4\xf0\xd2\x16c\u0630\x17n\x05\xfd\xe1\xb9\x0e\xf3\x1f\x850\xfd\xa9_\x89lj\xccg\u05f1\xd4\x00\x00\xe0\x94\xf0\xd5\xc3\x1c\xcbl\xbe0\xc7\xc9\xea\x19\xf2h\xd1Y\x85\x1f\x8c\x9c\x8a\x03\x89O\x0eo\x9b\x9fp\x00\x00\u07d4\xf0\xd6L\xf9\xdf\tt\x113\xd1pH_\xd2K\x00P\x11\xd5 \x89\x1b\b\x93A\xe1O\xcc\x00\x00\u07d4\xf0\xd8X\x10^\x1bd\x81\x01\xac?\x85\xa0\xf8\"+\xf4\xf8\x1dj\x89 \x86\xac5\x10R`\x00\x00\u07d4\xf0\xdcC\xf2\x05a\x91'P{+\x1c\x1c\xfd\xf3-(1\t \x89\x10^\xb7\x9b\x94\x17\b\x80\x00\u07d4\xf0\xe1\u07e4*\u07ac/\x17\xf6\xfd\xf5\x84\xc9Hb\xfdV3\x93\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xf0\xe2d\x9c~j?,]\xfe3\xbb\xfb\xd9'\xca<5\nX\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf0\xe7\xfb\x9eB\nS@\xd56\xf4\x04\b4O\xea\xef\xc0j\xef\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xf1\x04b\xe5\x8f\xcc\a\U000d5121\x87c\x94Q\x16~\x85\x92\x01\x89\t4\xdd]3\xbc\x97\x00\x00\xe0\x94\xf1\x06a\xff\x94\x14\x0f >zH%rCy8\xbe\xc9\xc3\xf7\x8a\x04<3\xc1\x93ud\x80\x00\x00\u0794\xf1\x14\xff\r\x0f$\xef\xf8\x96\xed\xdeTq\u07a4\x84\x82J\x99\xb3\x88\xbe -j\x0e\xda\x00\x00\u07d4\xf1\x16\xb0\xb4h\x0fS\xabr\xc9h\xba\x80.\x10\xaa\x1b\xe1\x1d\u0209\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xf1\x1c\xf5\xd3cto\xeehd\xd3\xca3m\xd8\x06y\xbb\x87\xae\x8a\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\xf1\x1e\x01\u01e9\xd1$\x99\x00_M\xaew\x16\tZ4\x17bw\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xf1;\b0\x93\xbaVN-\xc61V\x8c\xf7T\r\x9a\x0e\xc7\x19\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xf1O\x0e\xb8m\xb0\xebhu?\x16\x91\x8e]K\x80t7\xbd>\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xf1Qx\xff\xc4:\xa8\a\x0e\xce2~\x93\x0f\x80\x9a\xb1\xa5O\x9d\x89\n\xb6@9\x12\x010\x00\x00\u07d4\xf1V\xdc\v*\x98\x1e[U\xd3\xf2\xf0;\x814\xe31\u06ed\xb7\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xf1]\x9dZ!\xb1\x92\x9ey\x03q\xa1\u007f\x16\xd9_\fie\\\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf1^\x18,O\xbb\xady\xbd\x934\"B\xd4\xdc\xcf+\xe5\x89%\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\xf1bM\x98\ve3o\xea\u0166\xd5A%\x00\\\xfc\xf2\xaa\u02c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xf1g\xf5\x86\x8d\xcfB3\xa7\x83\x06\th,\xaf-\xf4\xb1\xb8\a\x89\x81\xe5B\xe1\xa78?\x00\x00\u07d4\xf1m\xe1\x89\x1d\x81\x96F\x13\x95\xf9\xb16&[;\x95F\xf6\xef\x89\x01\xb2\x8e\x1f\x98\xbb\u0380\x00\u07d4\xf1z\x92\xe06\x1d\xba\xce\xcd\xc5\xde\r\x18\x94\x95Z\xf6\xa9\xb6\x06\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf1z\xdbt\x0fE\u02fd\xe3\tN~\x13qo\x81\x03\xf5c\xbd\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf1\x8b\x14\xcb\xf6iC6\xd0\xfe\x12\xac\x1f%\xdf-\xa0\xc0]\xbb\x89\xd8\xd4`,&\xbfl\x00\x00\u07d4\xf1\x9b98\x9dG\xb1\x1b\x8a,?\x1d\xa9\x12M\xec\xff\xbe\xfa\xf7\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf1\x9f\x195\b9>M*\x12{ \xb2\x03\x1f9\xc8%\x81\u0189\xbd\xbdz\x83\xbd/l\x00\x00\u07d4\xf1\xa1\xf3 @yd\xfd<\x8f.,\u0224X\r\xa9O\x01\xea\x89ll!wU|D\x00\x00\u07d4\xf1\xb4\xec\xc65%\xf7C,=\x83O\xfe+\x97\x0f\xbe\xb8r\x12\x89\xa2\xa2@h\xfa\u0340\x00\x00\u07d4\U000753ef\xfa\x87\x94\xf5\n\xf8\xe8\x83\t\u01e6&TU\xd5\x1a\x8963\x03\"\xd5#\x8c\x00\x00\u07d4\xf1\xc8\u0129A\xb4b\x8c\rl0\xfd\xa5dR\u065c~\x1bd\x89N\x8c\xea\x1e\xdeu\x04\x00\x00\u07d4\xf1\xda@so\x99\xd5\xdf;\x06\x8a]t_\xaf\xc6F?\u0271\x89\x06\x96\xca#\x05\x8d\xa1\x00\x00\u07d4\xf1\u070a\xc8\x10B\xc6z\x9c\\c2!\xa8\xf76>e\x87\f\x9f(t$\u04a9`\x89J\xcfX\xe0rW\x10\x00\x00\u07d4\xf2B\u0684]B\u053fw\x9a\x00\xf2\x95\xb4\aP\xfeI\xea\x13\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xf2RY\xa5\xc99\xcd%\x96l\x9bc\x03\xd3s\x1cS\u077cL\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xf2^Lp\xbcFV2\u021eV%\xa82\xa7r/k\xff\xab\x89\xf3K\x82\xfd\x8e\x91 \x00\x00\u07d4\xf2k\xce\xdc\xe3\xfe\xad\u03a3\xbc>\x96\xeb\x10@\xdf\xd8\xff\u1809*\x03I\x19\u07ff\xbc\x00\x00\u07d4\xf2py%v\xf0]QD\x93\xff\xd1\xf5\xe8K\xecK-\xf8\x10\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xf2s,\xf2\xc1;\x8b\xb8\xe7I*\x98\x8f_\x89\xe3\x82s\xdd\u0209 \x86\xac5\x10R`\x00\x00\xe0\x94\xf2t.hY\xc5i\xd5\xf2\x10\x83Q\xe0\xbfM\xca5*H\xa8\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xf2\x81:d\xc5&]\x02\x025\u02dc1\x9bl\x96\xf9\x06\xc4\x1e\x89\x12\xf99\u025e\u06b8\x00\x00\u07d4\xf2\x87\xffR\xf4a\x11z\xdb>\x1d\xaaq\x93-\x14\x93\xc6_.\x89\xc5S%\xcat\x15\xe0\x00\x00\u07d4\xf2\xab\x11au\x02D\xd0\xec\xd0H\xee\r>Q\xab\xb1C\xa2\xfd\x89B\xfe+\x90ss\xbc\x00\x00\u07d4\xf2\xb4\xab,\x94'\xa9\x01^\xf6\xee\xff\xf5\xed\xb6\x019\xb7\x19\u0449&\u06d9*;\x18\x00\x00\x00\u07d4\xf2\xc0>*8\x99\x8c!d\x87`\xf1\xe5\xae~\xa3\a}\x85\"\x89\x8f?q\x93\xab\a\x9c\x00\x00\u0794\xf2\u0090N\x9f\xa6d\xa1\x1e\xe2VV\xd8\xfd,\xc0\u0665\"\xa0\x88\xb9\x8b\xc8)\xa6\xf9\x00\x00\u07d4\xf2\xc3b\xb0\xef\x99\x1b\xc8/\xb3nf\xffu\x93*\xe8\u0742%\x89\x04\x02\xf4\xcf\xeeb\xe8\x00\x00\u07d4\xf2\xd0\xe9\x86\xd8\x14\xea\x13\xc8\xf4f\xa0S\x8cS\u0712&Q\xf0\x89J\xcfX\xe0rW\x10\x00\x00\xe0\x94\xf2\u04775w$\xecL\x03\x18[\x87\x9bc\xf5~&X\x91S\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\xf2\xd5v<\xe0s\x12~,\xed\xdeo\xab\xa7\x86\xc7<\xa9AA\x8a\x01\xacB\x86\x10\x01\x91\xf0\x00\x00\xe0\x94\xf2\u055c\x89#u\x90s\xd6\xf4\x15\xaa\xf8\xeb\x06_\xf2\U000f614a\x01\xab,\xf7\xc9\xf8~ \x00\x00\u07d4\xf2\xe9\x9f\\\xbb\x83kz\xd3bGW\x1a0,\xbeKH\x1ci\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xf2\xed>w%J\u02c3#\x1d\xc0\x86\x0e\x1a\x11$+\xa6'\u06c9kV\x05\x15\x82\xa9p\x00\x00\xe0\x94\xf2\xed\xde7\xf9\xa8\u00dd\u07a2My\xf4\x01WW\xd0k\xf7\x86\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xf2\xef\xe9e`\xc9\xd9{r\xbd6DxC\x88\\\x1d\x90\xc21\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf2\xfb\xb6\u0607\xf8\xb8\xcc:\x86\x9a\xba\x84\u007f=\x1fd\xfcc\x97\xaae\xfbS\xa8\xf0z\x0f\x89:\xae0\xe8\xbc\xee\x89|\xf28\x1fa\x9f\x15\x00\x00\u07d4\xf3@\x83\xec\xea8P\x17\xaa@\xbd\xd3^\xf7\xef\xfbL\xe7v-\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xf3F\xd7\u0792t\x1c\b\xfcX\xa6M\xb5[\x06-\xde\x01-\x14\x89\x0f\xffk\x1fv\x1em\x00\x00\xe0\x94\xf3U\xd3\xec\f\xfb\x90}\x8d\xbb\x1b\xf3FNE\x81(\x19\v\xac\x8a\x01\v\x04n\u007f\r\x80\x10\x00\x00\u07d4\xf3m\xf0/\xbd\x89`sG\xaf\xce)i\xb9\xc4#jX\xa5\x06\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf3s\xe9\u06ac\f\x86u\xf5;yz\x16\x0fo\xc04\xaek#\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xf3{BeG\xa1d-\x8032H\x14\xf0\xed\xe3\x11O\xc2\x12\x89\x15\xbeat\xe1\x91.\x00\x00\u07d4\xf3{\xf7\x8cXu\x15G\x11\xcbd\r7\xeam(\xcf\xcb\x12Y\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xf3\x82\xdfX1U\xd8T\x8f?\x93D\f\xd5\xf6\x8c\xb7\x9d`&\x8a8u}\x02\u007f\xc1\xfd\\\x00\x00\xe0\x94\xf3\x82\xe4\xc2\x04\x10\xb9Q\b\x9e\x19\xba\x96\xa2\xfe\xe3\xd9\x1c\xce~\x8a\x01\x11\xfaV\xee\u00a88\x00\x00\xe0\x94\xf3\x8al\xa8\x01hS~\x97M\x14\xe1\xc3\xd19\x90\xa4L,\x1b\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\u07d4\xf3\x9a\x9dz\xa3X\x1d\xf0~\xe4'\x9a\xe6\xc3\x12\xef!\x036X\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xf3\xb6h\xb3\xf1M\x92\x0e\xbc7\x90\x92\u06d8\x03\x1bg\xb2\x19\xb3\x89\n\xd6\xee\xdd\x17\xcf;\x80\x00\u07d4\U000fe679\x10<\xe7U\n\xa7O\xf1\xdb\x18\xe0\x9d\xfe2\xe0\x05\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf3\xc1\xab\u049d\xc5{A\xdc\x19-\x0e8M\x02\x1d\xf0\xb4\xf6\u0509\x97\xae\f\u07cf\x86\xf8\x00\x00\u07d4\xf3\xc4qm\x1e\xe5'\x9a\x86\xd0\x16:\x14a\x81\x81\xe1a6\u01c965\u026d\xc5\u07a0\x00\x00\xe0\x94\xf3\u030b\xcbU\x94e\xf8\x1b\xfeX;\u05eb\n#\x06E;\x9e\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xf3\u0588\xf0k\xbd\xbfP\xf9\x93,AE\xcb\xe4\x8e\xcd\xf6\x89\x04\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xf3\xdb\xcf\x13Z\u02dd\xee\x1aH\x9cY<\x02O\x03\u00bb\xae\u0389lk\x93[\x8b\xbd@\x00\x00\u07d4\xf3\xde_&\xefj\xde\xd6\xf0m;\x91\x13F\xeep@\x1d\xa4\xa0\x89\x13:\xb3}\x9f\x9d\x03\x00\x00\u07d4\xf3\xdfc\xa9q\x99\x93308;>\xd7W\v\x96\u0101#4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf3\xe7OG\f}:?\x003x\x0fv\xa8\x9f>\xf6\x91\xe6\u02c9\xa3\xcf\xe61\xd1Cd\x00\x00\u07d4\xf3\xeb\x19H\xb9Q\xe2-\xf1ax)\xbf;\x8d\x86\x80\xeckh\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xf3\xf1\xfa9\x18\xca4\xe2\xcf~\x84g\v\x1fM\x8e\xca\x16\r\xb3\x89$\xdc\xe5M4\xa1\xa0\x00\x00\u07d4\xf3\xf2O\u009e @?\xc0\xe8\xf5\xeb\xbbU4&\xf7\x82p\xa2\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xf3\xfar5R\xa5\xd0Q.+b\xf4\x8d\xca{+\x81\x050[\x89\amA\xc6$\x94\x84\x00\x00\u07d4\xf3\xfeQ\xfd\xe3D\x13\xc73\x18\xb9\xc8T7\xfe~\x82\x0fV\x1a\x896b2\\\u044f\xe0\x00\x00\u07d4\xf4\x00\xf9=_\\~?\xc3\x03\x12\x9a\xc8\xfb\f/xd\a\xfa\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf4\v\x13O\xea\"\u01b2\x9c\x84W\xf4\x9f\x00\x0f\x9c\xdax\x9a\u06c9 \x86\xac5\x10R`\x00\x00\u07d4\xf4\x15W\xdf\u07f1\xa1\xbd\xce\xfe\xfe.\xba\x1e!\xfe\nJ\x99B\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xf4\x17z\r\x85\u050b\x0e&B\x11\xce*\xa2\xef\xd3\xf1\xb4\u007f\b\x89\xc2\xcc\xca&\xb7\xe8\x0e\x80\x00\u07d4\xf4/\x90R1\xc7p\xf0\xa4\x06\xf2\xb7h\x87\u007f\xb4\x9e\xee\x0f!\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\xf42\xb9\u06ef\x11\xbd\xbds\xb6Q\x9f\xc0\xa9\x04\x19\x87q\xaa\u0189\b=lz\xabc`\x00\x00\u07d4\xf4=\xa3\xa4\xe3\xf5\xfa\xb1\x04\u029b\xc1\xa0\xf7\xf3\xbbJV\xf3Q\x89lj\xccg\u05f1\xd4\x00\x00\xe0\x94\xf4G\x10\x8b\x98\xdfd\xb5~\x87\x103\x88\\\x1a\xd7\x1d\xb1\xa3\xf9\x8a\x01v\xf4\x9e\xad4\x83P\x80\x00\u07d4\xf4O\x85Q\xac\xe93r\a\x12\xc5\u0111\u0376\xf2\xf9Qsl\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u0794\xf4V\x05Z\x11\xab\x91\xfff\x8e.\xc9\"\x96\x1f*#\xe3\xdb%\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\xf4V\xa7[\xb9\x96U\xa7A,\xe9}\xa0\x81\x81m\xfd\xb2\xb1\xf2\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xf4[\x1d\xcb.A\xdc'\xff\xa0$\u06ad\xf6\x19\xc1\x11u\xc0\x87\x89\x01\x11du\x9f\xfb2\x00\x00\u07d4\xf4c\xa9\f\xb3\xf1>\x1f\x06CB66\xbe\xab\x84\xc1#\xb0m\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\xf4h\x90n~\xdffJ\xb0\u063e=\x83\xebz\xb3\xf7\xff\xdcx\x89\\(=A\x03\x94\x10\x00\x00\u07d4\xf4i\x80\u3929\u049ajn\x90`E7\xa3\x11K\xcb(\x97\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xf4kk\x9c|\xb5R\x82\x9c\x1d=\xfd\x8f\xfb\x11\xaa\xba\xe7\x82\xf6\x89\x01#n\xfc\xbc\xbb4\x00\x00\u07d4\xf4v\xe1&\u007f\x86$|\xc9\b\x81o.z\xd58\x8c\x95-\xb0\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xf4v\xf2\xcbr\b\xa3.\x05\x1f\xd9N\xa8f)\x92c\x82\x87\xa2\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\xf4{\xb14\xda0\xa8\x12\xd0\x03\xaf\x8d\u0338\x88\xf4K\xbfW$\x8a\x01\x19Y\xb7\xfe3\x95X\x00\x00\u07d4\xf4\x83\xf6\a\xa2\x1f\xcc(\x10\n\x01\x8cV\x8f\xfb\xe1@8\x04\x10\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xf4\x8e\x1f\x13\xf6\xafM\x84\xb3q\xd7\xdeK'=\x03\xa2c'\x8e\x89 \x86\xac5\x10R`\x00\x00\xe0\x94\xf4\x9cG\xb3\xef\xd8knj[\xc9A\x8d\x1f\x9f\xec\x81Ki\xef\x8a\x04<3\xc1\x93ud\x80\x00\x00\xe0\x94\xf4\x9fo\x9b\xaa\xbc\x01\x8c\x8f\x8e\x11\x9e\x01\x15\xf4\x91\xfc\x92\xa8\xa4\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xf4\xa3g\xb1f\u0499\x1a+\xfd\xa9\xf5dc\xa0\x9f%,\x1b\x1d\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xf4\xa5\x1f\xceJ\x1d[\x94\xb0q\x83\x89\xbaNx\x14\x13\x9c\xa78\x89\x10CV\x1a\x88)0\x00\x00\u07d4\xf4\xa9\xd0\f\xef\xa9{zX\xef\x94\x17\xfcbg\xa5\x06\x909\xee\x89\x01.\x89(\u007f\xa7\x84\x00\x00\u07d4\xf4\xaa\xa3\xa6\x16>7\x06W{I\xc0v~\x94\x8ah\x1e\x16\xee\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf4\xb1bn$\xf3\v\xca\xd9'!\xb2\x93r\x89U\xa6\xe7\x9c\xcd\x1d0\x00\x00\u07d4\xf5U\xa2{\xb1\xe2\xfdN,\u01c4\xca\ue493\x9f\xc0n/\u0249lk\x93[\x8b\xbd@\x00\x00\u07d4\xf5X\xa2\xb2\xdd&\u0755\x93\xaa\xe0E1\xfd<<\u00c5Kg\x89\n\xbb\xcdN\xf3wX\x00\x00\u07d4\xf5`H\xdd!\x81\u0523od\xfc\xec\xc6!T\x81\xe4*\xbc\x15\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xf5dB\xf6\x0e!i\x13\x95\u043f\xfa\xa9\x19M\xca\xff\x12\u2dc9\x0e\x189\x8ev\x01\x90\x00\x00\u07d4\xf5yqJE\xeb\x8fR\xc3\xd5{\xbd\xef\xd2\xc1[./\x11\u07c9T\x91YV\xc4\t`\x00\x00\u07d4\xf5\x93\xc6R\x85\xeek\xbdf7\U000fe3c9\xad@\u0509\xf6U\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xf5\x98\xdb.\t\xa8\xa5\xee}r\r+\\C\xbb\x12m\x11\xec\u0089\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xf5\x9d\xab\x1b\xf8\xdf\x112~a\xf9\xb7\xa1KV:\x96\xec5T\x8a\x01EB\xba\x12\xa37\xc0\x00\x00\xe0\x94\xf5\x9f\x9f\x02\xbb\u024e\xfe\t~\xab\xb7\x82\x10\x97\x90!\x89\x8b\xfd\x8a\x02\x1e\x17\x1a>\xc9\xf7,\x00\x00\u07d4\xf5\xa5E\x9f\xcd\xd5\xe5\xb2s\x83\r\xf8\x8e\xeaL\xb7}\xda\u07f9\x89\x04\t\xe5+H6\x9a\x00\x00\u07d4\xf5\xa7gj\xd1H\xae\x9c\x1e\xf8\xb6\xf5\xe5\xa0\xc2\xc4s\xbe\x85\v\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xf5\xb0h\x98\x9d\xf2\x9c%5w\xd0@Z\xden\x0eu(\xf8\x9e\x89WG=\x05\u06ba\xe8\x00\x00\u07d4\xf5\xb6\xe9\x06\x1aN\xb0\x96\x16\aw\xe2gb\xcfH\xbd\u0635]\x89\r\xc5_\xdb\x17d{\x00\x00\u07d4\xf5\xcf\xfb\xbabN~\xb3!\xbc\x83\xc6\f\xa6\x81\x99\xb4\xe3fq\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf5\xd1ER\xb1\xdc\xe0\xd6\xdc\x1f2\r\xa6\xff\u02231\xcdo\f\x89Hz\x9a0E9D\x00\x00\xe0\x94\xf5\xd6\x1a\xc4\u0295G^[{\xff\xd5\xf2\xf6\x90\xb3\x16u\x96\x15\x8a\x06\x92\xae\x88\x97\b\x1d\x00\x00\x00\u07d4\xf5\xd9\xcf\x00\xd6X\xddEQzH\xa9\xd3\xf5\xf63T\x1aS=\x89\x06O_\xdfIOx\x00\x00\u07d4\xf5\xea\xdc\xd2\u0478ez\x12\x1f3\xc4X\xa8\xb1>v\xb6U&\x89\r\x8b\x0fZZ\xc2J\x00\x00\u07d4\xf6\a\xc2\x15\r>\x1b\x99\xf2O\xa1\xc7\xd5@\xad\xd3\\N\xbe\x1e\x89\xa7\xf1\xaa\a\xfc\x8f\xaa\x00\x00\u07d4\xf6\v\xd75T>k\xfd.\xa6\xf1\x1b\xffbs@\xbc\x03Z#\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf6\f\x1bE\xf1d\xb9X\x0e 'Z\\9\xe1\xd7\x1e5\xf8\x91\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xf6\x0fb\xd797\x95?\xef5\x16\x9e\x11\xd8r\xd2\xea1~\xec\x8a\x01!\xeah\xc1\x14\xe5\x10\x00\x00\u07d4\xf6\x12\x83\xb4\xbd\x85\x04\x05\x8c\xa3`\u94d9\x9bb\xcb\xc8\xcdg\x89\r\xd2\xd5\xfc\xf3\xbc\x9c\x00\x00\u07d4\xf6\x17\xb9g\xb9\xbdH_v\x95\xd2\xefQ\xfbw\x92\u0618\xf5\x00\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xf6\x18\u0671\x04A\x14\x80\xa8c\xe6#\xfcU#-\x1aOH\xaa\x89\x0eh\x9emD\xb1f\x80\x00\u07d4\xf6\"\u5126b>\xaa\xf9\x9f+\xe4\x9eS\x80\xc5\xcb\xcf\\\u0609\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xf62\xad\xffI\r\xa4\xb7-\x126\xd0KQ\x0ft\xd2\xfa\xa3\u0349K\xe4\xe7&{j\xe0\x00\x00\u07d4\xf69\xac1\u069fg'\x1b\xd1\x04\x02\xb7eN\\\xe7c\xbdG\x89\x15\xaf\x0fB\xba\xf9&\x00\x00\u07d4\xf6:W\x9b\xc3\xea\u00a9I\x04\x10\x12\x8d\xbc\xeb\xe6\xd9\u0782C\x89P\xc5\xe7a\xa4D\b\x00\x00\u07d4\xf6E\xdd|\x89\x00\x93\xe8\xe4\u022a\x92\xa6\xbb55\"\xd3\u0718\x89\aC\x9f\xa2\t\x9eX\x00\x00\xe0\x94\xf6H\xea\x89\xc2u%q\x01r\x94Ny\xed\xff\x84x\x03\xb7u\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xf6JJ\xc8\xd5@\xa9(\x9ch\xd9`\xd5\xfb|\xc4Zw\x83\x1c\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf6N\xcf!\x17\x93\x1cmSZ1\x1eO\xfe\xae\xf9\u0514\x05\xb8\x89\x90\xf54`\x8ar\x88\x00\x00\u07d4\xf6O\xe0\x93\x9a\x8d\x1e\xea*\x0e\u035a\x970\xfdyX\xe31\t\x89\x01\x1d\xe1\xe6\xdbE\f\x00\x00\u07d4\xf6V\x16\xbe\x9c\x8by~t\x15\"|\x918\xfa\xa0\x89\x17B\u05c9*\xd3s\xcef\x8e\x98\x00\x00\u07d4\xf6W\xfc\xbeh.\xb4\xe8\xdb\x15.\u03c9$V\x00\vQ=\x15\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\xf6X\x19\xacL\xc1L\x13\u007f\x05\xddyw\xc7\xda\xe0\x8d\x1aJ\xb5\x89\x05\x87\x88\u02d4\xb1\xd8\x00\x00\u07d4\xf6{\xb8\xe2\x11\x8b\xbc\u0550'fn\xed\xf6\x94>\xc9\xf8\x80\xa5\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xf6\x84d\xbfd\xf2A\x13V\xe4\xd3%\x0e\xfe\xfe\\P\xa5\xf6[\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xf6\x86x[\x89r\va\x14_\ua017\x8dj\u030e\v\xc1\x96\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xf6\x8c^3\xfa\x97\x13\x9d\xf5\xb2\xe68\x86\xce4\xeb\xf3\u45dc\x89\xb3\xfaAi\xe2\xd8\xe0\x00\x00\u07d4\xf6\xa8cWW\xc5\xe8\xc14\xd2\r\x02\x8c\xf7x\u03c6\t\xe4j\x89O\x1dw/\xae\xc1|\x00\x00\u07d4\xf6\xb7\x82\xf4\xdc\xd7E\xa6\xc0\xe2\xe00`\x0e\x04\xa2K%\xe5B\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\xe0\x94\xf6\xbc7\xb1\u04a3x\x8dX\x9bm\xe2\x12\xdc\x17\x13\xb2\xf6\u738a\x01\x0f\f\xf0d\xddY \x00\x00\u07d4\xf6\xc3\u010a\x1a\xc0\xa3G\x99\xf0M\xb8n\u01e9u\xfewh\xf3\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xf6\xd2]?=\x84m#\x9fR_\xa8\xca\xc9{\xc45x\u06ec\x890\x92\u007ft\xc9\xde\x00\x00\x00\u07d4\xf6\xea\xacp2\u0512\xef\x17\xfd`\x95\xaf\xc1\x1dcOV\xb3\x82\x89\x1b\x1bk\u05efd\xc7\x00\x00\xe0\x94\xf6\xea\xd6}\xbf[~\xb13X\xe1\x0f6\x18\x9dS\xe6C\xcf\u03ca\bxg\x83&\xea\xc9\x00\x00\x00\u07d4\xf6\xf1\xa4C\t\x05\x1ck%\xe4}\xff\x90\x9b\x17\x9b\xb9\xabY\x1c\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\xf7\x03(\xef\x97b_\xe7E\xfa\xa4\x9e\xe0\xf9\u052a;\r\xfbi\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xf7\n\x99\x8aq{3\x8d\x1d\u0658T@\x9b\x1a3\x8d\ue930\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf7\rcz\x84\\\x06\xdbl\u0711\xe67\x1c\xe7\xc48\x8ab\x8e\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xf7\x15R\x13D\x98\x92tK\xc6\x0f.\x04@\a\x88\xbd\x04\x1f\u0749\x03\x9f\xba\xe8\xd0B\xdd\x00\x00\xe0\x94\xf7\x1bE4\xf2\x86\xe40\x93\xb1\xe1^\xfe\xa7I\xe7Y{\x8bW\x8a\x16\x1c\x13\xd34\x1c\x87(\x00\x00\u07d4\xf74\xec\x03rM\xde\xe5\xbbRy\xaa\x1a\xfc\xf6\x1b\f\xb4H\xa1\x89\xe5\xbf,\u0270\x97\x80\x00\x00\u07d4\xf76\u0716v\x00\x128\x8f\xe8\x8bf\xc0n\xfeW\xe0\xd7\xcf\n\x89q\xd7Z\xb9\xb9 P\x00\x00\u07d4\xf7:\xc4l ;\xe1S\x81\x11\xb1Q\xec\x82 \u01c6\xd8AD\x89\x0f\xf77x\x17\xb8+\x80\x00\u07d4\xf7=\xd9\xc1B\xb7\x1b\xce\x11\xd0n0\xe7\xe7\xd02\xf2\uc71e\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xf7A\x8a\xa0\xe7\x13\xd2H\"\x87v\xb2\xe7CB\"\xaeu\u3949lk\x93[\x8b\xbd@\x00\x00\u07d4\xf7Nn\x14S\x82\xb4\u06c2\x1f\xe0\xf2\u0643\x88\xf4V\t\u019f\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xf7P\f\x16o\x8b\xea/\x824v\x06\xe5\x02K\xe9\xe4\xf4\u0399\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xf7W\xfc\x87 \xd3\xc4\xfaRw\a^`\xbd\\A\x1a\xeb\xd9w\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf7[\xb3\x9cy\x97y\xeb\xc0J3m&\r\xa61F\xed\x98\u0409\x01Z\xf1\u05cbX\xc4\x00\x00\xe0\x94\xf7h\xf3!\xfdd3\xd9kO5M<\xc1e,\x172\xf5\u007f\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xf7oi\xce\xe4\xfa\xa0\xa6;0\xae\x1ex\x81\xf4\xf7\x15ep\x10\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xf7w6\x1a=\u062bb\xe5\xf1\xb9\xb0GV\x8c\xc0\xb5UpL\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xf7|{\x84QI\xef\xba\x19\xe2a\xbc|u\x15y\b\xaf\xa9\x90\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf7\u007f\x95\x87\xffz-r\x95\xf1\xf5q\u0206\xbd3\x92jR|\x89lh\xcc\u041b\x02,\x00\x00\u07d4\xf7\x82X\xc1$\x81\xbc\xdd\u06f7*\x8c\xa0\xc0C\tra\xc6\u0149\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xf7\x98\xd1m\xa4\xe4`\xc4`\xcdH_\xae\x0f\xa0Y\x97\b\ub08965\u026d\xc5\u07a0\x00\x00\u07d4\xf7\xa1\xad\xe2\xd0\xf5)\x12=\x10U\xf1\x9b\x17\x91\x9fV!Ng\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xf7\xac\xff\x93K\x84\xda\ti\xdc7\xa8\xfc\xf6C\xb7\xd7\xfb\xedA\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xf7\xb1Q\xcc^W\x1c\x17\xc7e9\xdb\xe9\x96L\xbbo\xe5\xdey\x89tq|\xfbh\x83\x10\x00\x00\u07d4\xf7\xb2\x9b\x82\x19\\\x88-\xabx\x97\u00ae\x95\xe7w\x10\xf5xu\x89w5Aa2\xdb\xfc\x00\x00\u07d4\xf7\xbcLD\x91\rZ\xed\xd6n\xd25U8\xa6\xb1\x93\xc3a\xec\x89\x05A\xde,-\x8db\x00\x00\u07d4\xf7\xc0\f\xdb\x1f\x02\x03\x10\u056c\xab{Ij\xaaD\xb7y\b^\x89Z\x87\xe7\xd7\xf5\xf6X\x00\x00\u07d4\xf7\xc1\xb4C\x96\x8b\x11{]\u0677UW/\xcd9\xca^\xc0K\x89\x18\xb9h\u0092\xf1\xb5\x00\x00\xe0\x94\xf7\xc5\x0f\x92*\xd1ka\xc6\u047a\xa0E\xed\x81h\x15\xba\u010f\x8a\x02\xa99j\x97\x84\xad}\x00\x00\u07d4\xf7\xc7\b\x01Pq\xd4\xfb\n:*\t\xa4]\x15c\x96\xe34\x9e\x89\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xf7\xcb\u06e6\xbel\xfeh\xdb\xc2<+\x0f\xf50\xee\x05\"o\x84\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xf7\xd0\xd3\x10\xac\xea\x18@a8\xba\xaa\xbb\xfe\x05q\xe8\r\xe8_\x89Hz\x9a0E9D\x00\x00\u07d4\xf7\u05ef LV\xf3\x1f\xd9C\x98\xe4\r\xf1\x96K\u063f\x12<\x89\b!\xd2!\xb5)\x1f\x80\x00\u07d4\xf7\xdc%\x11\x96\xfb\u02f7|\x94}|\x19F\xb0\xffe\x02\x1c\xea\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xf7\xe4Z\x12\xaaq\x1cp\x9a\xce\xfe\x95\xf3;xa-*\xd2*\x8a\x0e\x06U\xe2\xf2k\xc9\x18\x00\x00\u07d4\xf7\xf4\x89\x8cLRm\x95_!\xf0U\xcbnG\xb9\x15\xe5\x19d\x89|\b`\xe5\xa8\r\xc0\x00\x00\u07d4\xf7\xf9\x1ez\xcb[\x81)\xa3\x06\x87|\xe3\x16\x8eoC\x8bf\xa1\x89\t\x8a}\x9b\x83\x14\xc0\x00\x00\u07d4\xf7\xfcE\xab\xf7oP\x88\xe2\u5d68\xd12\xf2\x8aMN\xc1\xc0\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf8\x06:\xf4\xcc\x1d\xd9a\x9a\xb5\u063f\xf3\xfc\xd1\xfa\xa8H\x82!\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf8\bnBf\x1e\xa9)\xd2\u0761\xablt\x8c\xe3\x05]\x11\x1e\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xf8\bw\x86\xb4-\xa0N\xd6\xd1\xe0\xfe&\xf6\xc0\xee\xfe\x1e\x9fZ\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xf8\r6\x19p/\xa5\x83\x8cH9\x18Y\xa89\xfb\x9c\xe7\x16\x0f\x89l\a\xa7\u0471np\x00\x00\u07d4\xf8\x14y\x9fm\xdfM\xcb)\xc7\xee\x87\x0eu\xf9\xcc-52m\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xf8\x15\xc1\n\x03-\x13\xc3K\x89v\xfan;\xd2\xc9\x13\x1a\x8b\xa9\x89Hz\x9a0E9D\x00\x00\u07d4\xf8\x16\"\xe5WW\xda\xeafu\x97]\xd958\xda}\x16\x99\x1e\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf8$\xee3\x1eJ\xc3\xccXv\x939[W\xec\xf6%\xa6\xc0\u0089V\xc9]\xe8\xe8\xca\x1d\x00\x00\u07d4\xf8'\xd5n\xd2\xd3' \u052b\xf1\x03\xd6\xd0\xefM;\xcdU\x9b\x89\x01l\x80\x06W\x91\xa2\x80\x00\u07d4\xf8)\x85\x91R>P\xb1\x03\xf0\xb7\x01\xd6#\xcb\xf0\xf7EV\xf6\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xf8H\xfc\xe9\xaba\x1c}\x99 n#\xfa\u019a\u0508\xb9O\xe1\x89\x02\xa1\x12\x9d\t6r\x00\x00\u07d4\xf8O\t\n\xdf?\x8d\xb7\u1533P\xfb\xb7u\x00i\x9ff\xfd\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xf8Q\xb0\x10\xf63\xc4\n\xf1\xa8\xf0js\ubeabe\az\xb5\x89\xee\x86D/\xcd\x06\xc0\x00\x00\u07d4\xf8X\x17\x1a\x04\xd3W\xa1;IA\xc1n~U\xdd\u0514\x13)\x89\x02F\xa5!\x8f*\x00\x00\x00\u07d4\xf8[\xab\x1c\xb3q\x0f\xc0_\xa1\x9f\xfa\xc2.gR\x1a\v\xa2\x1d\x89l\x955\u007f\xa6\xb3l\x00\x00\u07d4\xf8j>\xa8\a\x1fp\x95\xc7\u06ca\x05\xaePz\x89)\u06f8v\x89\x126\xef\xcb\u02f3@\x00\x00\u07d4\xf8pL\x16\xd2\xfd[\xa3\xa2\xc0\x1d\x0e\xb2\x04\x84\xe6\xec\xfa1\t\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xf8p\x99_\xe1\xe5\"2\x1duC7\xa4\\\f\x9d{8\x95\x1c\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xf8s\xe5ze\xc9;n\x18\xcbu\xf0\xdc\a}[\x893\xdc\\\x89\n\xad\xec\x98?\xcf\xf4\x00\x00\u07d4\xf8ua\x9d\x8a#\xe4]\x89\x98\u0444\u0500\xc0t\x89p\x82*\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xf8{\xb0{(\x9d\xf70\x1eT\xc0\xef\xdaj,\xf2\x91\xe8\x92\x00\x89K\xe4\xe7&{j\xe0\x00\x00\u0794\xf8\x89\x00\xdbsyU\xb1Q\x9b\x1a}\x17\n\x18\x86L\xe5\x90\xeb\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\xf8\x8bX\xdb7B\vFL\v\xe8\x8bE\xee+\x95)\x0f\x8c\xfa\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\xf8\x96+u\xdb]$\xc7\xe8\xb7\xce\xf1\x06\x8c>g\u03bb0\xa5\x89\x0f-\xc7\xd4\u007f\x15`\x00\x00\u07d4\xf8\xa0e\xf2\x87\xd9\x1dw\xcdbj\xf3\x8f\xfa\"\r\x9bU*+\x89g\x8a\x93 b\xe4\x18\x00\x00\u07d4\xf8\xa4\x9c\xa29\f\x1fm\\\x0ebQ;\a\x95qt?|\u0189\xa2\xa1]\tQ\x9b\xe0\x00\x00\u07d4\xf8\xa5\f\xee.h\x8c\xee\u3b24\u0522\x97%\xd4\a,\u0103\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf8\xacJ9\xb5<\x110x \x97;D\x13e\xcf\xfeYof\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf8\xae\x85{g\xa4\xa2\x89:?\xbe|z\x87\xff\x1c\x01\u01a6\xe7\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xf8\xbf\x9c\x04\x87NZw\xf3\x8fL8R~\x80\xc6v\xf7\xb8\x87\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf8\xc7\xf3J8\xb3\x18\x01\xdaC\x064w\xb1+'\xd0\xf2\x03\xff\x89\x1a\u04ba\xbao\xefH\x00\x00\u07d4\xf8\xca3l\x8e\x91\xbd \xe3\x14\xc2\v-\xd4`\x8b\x9c\x8b\x94Y\x89-\u071b\u0173,x\x00\x00\u07d4\xf8\xd1t$\xc7g\xbe\xa3\x12\x05s\x9a+W\xa7'r\x14\uef89\x02F\xdd\xf9yvh\x00\x00\u07d4\xf8\xd5-\xcc_\x96\xcc(\x00{>\u02f4\t\xf7\xe2*dl\xaa\x89\b\x16\x90\xe1\x81(H\x00\x00\u07d4\xf8\xdc\xe8g\xf0\xa3\x9c[\xef\x9e\xeb\xa6\t\"\x9e\xfa\x02g\x8bl\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf8\xf2&\x14*B\x844\xab\x17\xa1\x86J%\x97\xf6J\xab/\x06\x89\tY\x8b/\xb2\xe9\xf2\x80\x00\u07d4\xf8\xf6d^\r\xeedK=\xad\x81\xd5q\uf6ef\x84\x00!\xad\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf9\x01\xc0\x0f\xc1\u06c8\xb6\x9cK\xc3%+\\\xa7\x0e\xa6\xee\\\xf6\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xf9=[\xcb\x06D\xb0\xcc\xe5\xfc\u0763C\xf5\x16\x8f\xfa\xb2\x87}\x89\vb\a\xb6}&\xf9\x00\x00\u07d4\xf9W\x0e\x92L\x95\u07bbpa6\x97\x92\xcf.\xfe\u00a8-^\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xf9d \x86\xb1\xfb\xaea\xa6\x80M\xbe_\xb1^\xc2\u04b57\xf4\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf9d\x88i\x85\x90\xdc;,UVB\xb8q4\x8d\xfa\x06z\u0549\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xf9d\u064d(\x170\xba5\xb2\xe3\xa3\x14yn{B\xfe\xdfg\x89S\xb0\x87`\x98\xd8\f\x00\x00\u07d4\xf9e\ri\x89\xf1\x99\xab\x1c\xc4ycm\xed0\xf2A\x02\x1fe\x89.\x14\x1e\xa0\x81\xca\b\x00\x00\xe0\x94\xf9h\x83X$Y\x90\x8c\x82v'\xe8o(\xe6F\xf9\xc7\xfcz\x8a\x01\u0127\x877\xcd\u03f8\x00\x00\u07d4\xf9kL\x00voSsj\x85t\xf8\"\xe6GL/!\xda-\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xf9r\x9dH(,\x9e\x87\x16m^\xef-\x01\xed\xa9\xdb\xf7\x88!\x89\x05k\x83\xdd\xc7(T\x80\x00\u07d4\xf9v~N\xcbJY\x80Ru\b\u05fe\xc3\xd4^Ld\x9c\x13\x89g\x8a\x93 b\xe4\x18\x00\x00\xe0\x94\xf9x\xb0%\xb6B3U\\\xc3\xc1\x9a\xda\u007fA\x99\xc94\x8b\xf7\x8aT\xb4\v\x1f\x85+\xda\x00\x00\x00\u07d4\xf9{V\xeb\u0577z\xbc\x9f\xba\u02eb\u0514\xb9\xd2\xc2!\xcd\x03\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xf9\x81\x1f\xa1\x9d\xad\xbf\x02\x9f\x8b\xfeV\x9a\xdb\x18\"\x8c\x80H\x1a\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xf9\x82Ps\fLa\xc5\u007f\x12\x985\xf2h\b\x94yEB\xf3\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\xf9\x894gr\x99^\xc1\x90o\xaf\xfe\xba*\u007f\xe7\u079ck\xab\x8a\x01je\x02\xf1Z\x1eT\x00\x00\u07d4\xf9\x98\xca4\x11s\nl\xd1\x0etU\xb0A\x0f\xb0\xf6\xd3\xff\x80\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xf9\x9a\xeeDKW\x83\xc0\x93\xcf\xff\xd1\xc4c,\xf9\x90\x9f\xbb\x91\x1d/\x81\x92\xf8B\t\x89\x90\xf54`\x8ar\x88\x00\x00\u07d4\xf9\xbf\xb5\x9dS\x8a\xfcHt\xd4\xf5\x94\x1b\b\xc9s\x0e8\xe2K\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\u07d4\xf9\xdd#\x90\b\x18/\xb5\x19\xfb0\xee\xdd \x93\xfe\xd1c\x9b\xe8\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xf9\u07ba\xec\xb5\xf39\xbe\xeaH\x94\xe5 K\xfa4\r\x06\u007f%\x89ZB\x84Fs\xb1d\x00\x00\xe0\x94\xf9\xe3tG@lA!\x97\xb2\u2bbc\x00\x1dn0\u024c`\x8a\x01\xc4y\xbbCI\xc0\xee\x00\x00\u07d4\xf9\xe7\"/\xaa\xf0\xf4\xda@\xc1\u0124\x0607:\t\xbe\u05f6\x89\x9bO\u0730\x94V$\x00\x00\u07d4\xf9\xec\xe0\"\xbc\xcd,\x924i\x11\xe7\x9d\xd5\x03\x03\xc0\x1e\x01\x88\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xfa\x00\xc3v\xe8\x9c\x05\u81c1z\x9d\xd0t\x8d\x96\xf3A\xaa\x89\x89\x10M\r\x00\u04b7\xf6\x00\x00\u07d4\xfa\f\x1a\x98\x8c\x8a\x17\xad5(\xeb(\xb3@\x9d\xaaX\"_&\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xfa\x10_\x1a\x11\xb6\xe4\xb1\xf5`\x12\xa2y\"\xe2\xac-\xa4\x81/\x8a\x02\x05\xb4\u07e1\xeetx\x00\x00\u07d4\xfa\x14/\xe4~\u0697\xe6P;8k\x18\xa2\xbe\xdds\u0335\xb1\x89.\x15:\xd8\x15H\x10\x00\x00\u07d4\xfa\x14\xb5f#J\xbe\xe70B\xc3\x1d!qq\x82\u02e1J\xa1\x89\x11\xc7\xea\x16.x \x00\x00\u07d4\xfa\x19\xd6\xf7\xa5\x0fO\a\x98\x93\xd1g\xbf\x14\xe2\x1d\x00s\u0456\x89\x1c\xbb:?\xf0\x8d\b\x00\x00\u07d4\xfa\x1f\x19q\xa7u\xc3PO\xefPy\xf6@\xc2\u013c\xe7\xac\x05\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfa'\x9b\xfd\x87g\xf9V\xbf\u007f\xa0\xbdV`\x16\x8d\xa7V\x86\xbd\x89\x90\xf54`\x8ar\x88\x00\x00\xe0\x94\xfa'\xccI\xd0\vl\x98s6\xa8u\xae9\xdaX\xfb\x04\x1b.\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\xfa(2\x99`=\x87X\xe8\u02b0\x82\x12],\x8f}DT)\x8a\x01[\xca\xcb\x1e\x05\x01\xae\x80\x00\u07d4\xfa+\xbc\xa1]?\u37ca2\x8e\x91\xf9\r\xa1Oz\xc6%=\x89\n\u05ce\xbcZ\xc6 \x00\x00\xe0\x94\xfa/\u049d\x03\xfe\xe9\xa0x\x93\xdf:&\x9fV\xb7/.\x1ed\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xfa3U2\x85\xa9sq\x9a\r_\x95o\xf8a\xb2\u061e\xd3\x04\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xfa:\fK\x90?n\xa5.\xa7\xab{\x88c\xb6\xa6\x16\xadfP\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xfa:\x1a\xa4H\x8b5\x1a\xa7V\f\xf5\xeec\n/\xd4\\2\"\x89/\xa4~j\xa74\r\x00\x00\u07d4\xfaA\tq\xad\"\x9c06\xf4\x1a\u03c5/*\u0259(\x19P\x89\u0633\x11\xa8\xdd\xfa|\x00\x00\u07d4\xfaD\xa8U\xe4\x04\xc8m\f\xa8\xef3$%\x1d\xfb4\x9cS\x9e\x89T\"S\xa1&\xce@\x00\x00\xe0\x94\xfaR\x01\xfe\x13B\xaf\x110{\x91B\xa0A$<\xa9./\t\x8a 8\x11j:\xc0C\x98\x00\x00\xe0\x94\xfa`\x86\x8a\xaf\xd4\xffL\\W\x91K\x8e\u054bBWs\u07e9\x8a\x01\xcf\xe5\xc8\b\xf3\x9f\xbc\x00\x00\u07d4\xfag\xb6{O7\xa0\x15\t\x15\x11\x0e\xde\a;\x05\xb8S\xbd\xa2\x89#\x19\xba\x94sq\xad\x00\x00\u07d4\xfah\xe0\xcb>\xdfQ\xf0\xa6\xf2\x11\u0272\xcb^\a<\x9b\xff\xe6\x89\x0f\xc969(\x01\xc0\x00\x00\xe0\x94\xfaj7\xf0\x18\xe9yg\x93\u007f\xc5\xe8a{\xa1\u05c6\xdd_w\x8a\x04<0\xfb\b\x84\xa9l\x00\x00\u07d4\xfav\x06C[5l\xee%{\xd2\xfc\xd3\xd9\xea\xcb<\xd1\xc4\xe1\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\xfaz\xdff\v\x8d\x99\xce\x15\x93=|_\a/<\xbe\xb9\x9d3\x8a\x01@a\xb9\xd7z^\x98\x00\x00\u07d4\xfa\x86\xca'\xbf(T\u0648p\x83\u007f\xb6\xf6\xdf\xe4\xbfdS\xfc\x89\x11u~\x85%\xcf\x14\x80\x00\u07d4\xfa\x8c\xf4\xe6'i\x8c]W\x88\xab\xb7\x88\x04\x17\xe7P#\x13\x99\x89\xe6\x1a6\x96\xee\xf6\x10\x00\x00\u07d4\xfa\x8e;\x1f\x13C9\x00s}\xaa\xf1\xf6)\x9cH\x87\xf8[_\x89&\u009eG\u0104L\x00\x00\u07d4\xfa\x9e\xc8\xef\xe0\x86\x86\xfaX\xc1\x813Xr\xbai\x85`\ucac9lj\xccg\u05f1\xd4\x00\x00\u07d4\xfa\xad\x90]\x84|{#A\x8a\xee\xcb\xe3\xad\u06cd\xd3\xf8\x92J\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xfa\xae\xba\x8f\xc0\xbb\xdaU<\xa7.0\xef=s.&\xe8 A\x89H\x8d(*\xaf\xc9\xf6\x80\x00\u07d4\xfa\xb4\x87P\r\xf2\x0f\xb8>\xbe\xd9\x16y\x1dV\x17r\xad\xbe\xbf\x89lkLM\xa6\u077e\x00\x00\u07d4\xfa\xc5\u0294u\x80x\xfb\xfc\xcd\x19\xdb5X\xda~\u8827h\x897(\xa6+\r\xcf\xf6\x00\x00\u07d4\xfa\xd9j\xb6\xacv\x8a\xd5\t\x94R\xacGw\xbd\x1aG\xed\u010f\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\xfa\xe7g\x19\xd9~\xacA\x87\x04(\xe9@'\x9d\x97\xddW\xb2\xf6\x8a\x14\u06f2\x19\\\xa2(\x90\x00\x00\u07d4\xfa\u8053pG\x89Zf\f\xf2)v\x0f'\xe6h(\xd6C\x89\t\xdd\xc1\xe3\xb9\x01\x18\x00\x00\u07d4\xfa\xe9,\x13p\xe9\u115a]\xf8;V\xd0\xf5\x86\xaa;@L\x89\x05\u0174\xf3\xd8C\x98\x00\x00\xe0\x94\xfa\xf5\xf0\xb7\xb6\xd5X\xf5\t\r\x9e\xa1\xfb-B%\x9cX`x\x8a\x01Z\xff\xb8B\fkd\x00\x00\xe0\x94\xfb\x12o\x0e\xc7i\xf4\x9d\xce\xfc\xa2\xf2\x00(dQX0\x84\xb8\x8a\x01\x0f\xcb\xc25\x03\x96\xbf\x00\x00\xe0\x94\xfb\x13^\xb1Z\x8b\xacr\xb6\x99\x154*`\xbb\xc0k~\a|\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xfb\"<\x1e\"\xea\xc1&\x9b2\xee\x15jS\x85\x92.\xd3o\xb8\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfb7\xcfkO\x81\xa9\xe2\"\xfb\xa2.\x9b\xd2KP\x98\xb73\u03c9\x02\x1auJm\xc5(\x00\x00\u07d4\xfb8`\xf4\x12\x1cC.\xbd\xc8\xecj\x031\xb1\xb7\ty.\x90\x89 \x8c9J\xf1\u0208\x00\x00\u07d4\xfb9\x18\x9a\xf8v\xe7b\xc7\x1dl>t\x18\x93\xdf\"l\xed\u0589\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xfb:\v\rkjq\x8fo\xc0)*\x82]\xc9$z\x90\xa5\u0409\n\xd6\xdd\x19\x9e\x97[\x00\x00\xe0\x94\xfb?\xa1\xac\b\xab\xa9\xcc;\xf0\xfe\x9dH8 h\x8fe\xb4\x10\x8a\x06ZM\xa2]0\x16\xc0\x00\x00\u07d4\xfb?\xe0\x9b\xb86\x86\x15)\xd7Q\x8d\xa2v5\xf58PV\x15\x89K\xe3\x92\x16\xfd\xa0p\x00\x00\xe0\x94\xfbQ%\xbf\x0f^\xb0\xb6\xf0 \xe5k\xfc/\xdf=@,\t~\x8a\x01@a\xb9\xd7z^\x98\x00\x00\u07d4\xfbU\x18qL\xef\xc3m\x04\x86]\xe5\x91^\xf0\xffG\xdf\xe7C\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfb_\xfa\xa0\xf7aW&5x\x91GX\x18\x93\x9d 7\u03d6\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xfbh\\\x15\xe49\x96^\xf6&\xbf\r\x83L\u0468\x9f+V\x95\x89\u0556{\xe4\xfc?\x10\x00\x00\u07d4\xfbtK\x95\x1d\tK1\x02b\xc8\xf9\x86\xc8`\u07da\xb1\xdee\x89\x02\xd1\xc5\x15\xf1\xcbJ\x80\x00\u07d4\xfby\xab\u06d2\\U\xb9\xf9\x8e\xfe\xefd\xcf\xc9\xeba\xf5\x1b\xb1\x89a@\xc0V\xfb\n\xc8\x00\x00\u07d4\xfb\x81\x13\xf9M\x91s\xee\xfdZ0s\xf5\x16\x80:\x10\xb2\x86\xae\x89\x04V9\x18$O@\x00\x00\u07d4\xfb\x84,\xa2\xc5\xef\x139\x17\xa26\xa0\u052c@i\x01\x10\xb08\x89\x10\x96\x9ab\xbe\x15\x88\x00\x00\u07d4\xfb\x91\xfb\x1aiUS\xf0\u018e!'m\xec\xf0\xb89\t\xb8m\x89\x05l\x006\x17\xafx\x00\x00\u07d4\xfb\x94s\xcfw\x125\n\x1f\xa09Rs\xfc\x80V\aR\xe4\xfb\x89\x06\xaf!\x98\xba\x85\xaa\x00\x00\xe0\x94\xfb\x94\x9cd\u007f\xdc\xfd%\x14\xc7\u054e1\xf2\x8aS-\x8cX3\x8a\x04<3\xc1\x93ud\x80\x00\x00\u07d4\xfb\xa5HmS\xc6\xe2@IBA\xab\xf8~C\xc7`\rA:\x89k\xbfaIIH4\x00\x00\u07d4\xfb\xb1a\xfe\x87_\t)\nK&+\xc6\x01\x10\x84\x8f\r\"&\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfb\xbb\xeb\u03fe#^W\xdd#\x06\xad\x1a\x9e\u0141\xc7\xf9\xf4\x8f\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\xe0\x94\xfb\xc0\x1d\xb5NG\xcd\xc3\xc48iJ\xb7\x17\xa8V\xc2?\xe6\xe9\x8a\x01\xcaqP\xab\x17OG\x00\x00\xe0\x94\xfb\xcf\xccJ{\x0f&\xcf&\xe9\xf33!2\xe2\xfcj#\af\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xfb\xe7\x16\"\xbc\xbd1\xc1\xa3iv\xe7\xe5\xf6p\xc0\u007f\xfe\x16\u0789\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xfb\xed\xe3,4\x9f3\x00\xefL\xd3;M\xe7\xdc\x18\xe4C\xd3&\x89\xabM\xcf9\x9a:`\x00\x00\u07d4\xfb\xf2\x04\xc8\x13\xf86\xd89b\u01c7\fx\b\xca4\u007f\xd3>\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xfb\xf7Y3\xe0\x1bu\xb1T\xef\x06i\ak\xe8\u007fb\xdf\xfa\xe1\x8a\x10\x84cr\xf2I\xd4\xc0\x00\x00\u07d4\xfc\x00\x96\xb2\x1e\x95\xac\xb8\xd6\x19\xd1v\xa4\xa1\xd8\xd5)\xba\xdb\xef\x89\x14\xd9i;\xcb\xec\x02\x80\x00\xe0\x94\xfc\x00\xa4 \xa3a\a\xdf\xd5\xf4\x95\x12\x8a_\u5af2\xdb\x0f4\x8a\x01C\x17\x9d\x86\x91\x10 \x00\x00\xe0\x94\xfc\x01\x8ai\n\xd6tm\xbe:\u03d7\x12\xdd\xcaR\xb6%\x009\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\u07d4\xfc\x02s@3\xe5\u007fpQ~\n\xfc~\xe6$a\xf0o\xad\x8e\x89\x15[\xd90\u007f\x9f\xe8\x00\x00\u07d4\xfc\x0e\xe6\xf7\u00b3qJ\xe9\x91lEVf\x05\xb6V\xf3$A\x89_h\xe8\x13\x1e\u03c0\x00\x00\u07d4\xfc\x10\xb7\xa6{2h\xd53\x1b\xfbj\x14\xde\xf5\xeaJ\x16,\xa3\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xfc\x15\u02d9\xa8\xd1\x03\v\x12w\n\xdd\x03:y\xee\r\f\x90\x8c\x89\x12\xfa\x00\xbdR\xe6$\x00\x00\u07d4\xfc)R\xb4\u011f\xed\u043c\x05(\xa3\bI^mj\x1cq\u0589lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xfc,\x1f\x88\x96\x1d\x01\x9c>\x9e\xa30\t\x15.\x06\x93\xfb\xf8\x8a\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\xe0\x94\xfc6\x11\x05\u0750\xf9\xed\xe5fI\x9di\xe9\x13\x03\x95\xf1*\u020aS\xa4\xfe/ N\x80\xe0\x00\x00\u07d4\xfc7/\xf6\x92|\xb3\x96\xd9\xcf)\x805\x00\x11\r\xa62\xbcR\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfc9\xbeA\tK\x19\x97\xd2\x16\x9e\x82d\xc2\u00fa\xa6\u025b\u0109lk\x93[\x8b\xbd@\x00\x00\u07d4\xfc=\"k\xb3jX\xf5&V\x88W\xb0\xbb\x12\xd1\t\xec\x93\x01\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfcC\x82\x9a\u01c7\xff\x88\xaa\xf1\x83\xba5*\xad\xbfZ\x15\xb1\x93\x89\u05ac\n+\x05R\xe0\x00\x00\u07d4\xfcI\xc1C\x9aA\u05b3\xcf&\xbbg\xe06R$\xe5\xe3\x8f_\x8966\u05ef^\u024e\x00\x00\u07d4\xfcU\x00\x82Q\x05\xcfq*1\x8a^\x9c;\xfci\u021d\f\x12\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\xe0\x94\xfcf\xfa\xba'\u007fK]\xe6J\xd4^\xb1\x9c1\xe0\f\xed>\u054a\x011\xbe\xb9%\xff\xd3 \x00\x00\xe0\x94\xfc~\"\xa5\x03\xecZ\xbe\x9b\b\xc5\v\xd1I\x99\xf5 \xfaH\x84\x8a\x01ZG}\xfb\xe1\xea\x14\x80\x00\u07d4\xfc\x82\x15\xa0\xa6\x99\x13\xf6*C\xbf\x1c\x85\x90\xb9\xdd\xcd\r\x8d\u06c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xfc\x98\x9c\xb4\x87\xbf\x1a}\x17\xe4\xc1\xb7\u0137\xaa\xfd\xdak\n\x8d\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\xe0\x94\xfc\x9b4td\xb2\xf9\x92\x9d\x80~\x03\x9d\xaeH\xd3\u064d\xe3y\x8a\x02\xf6\xf1\a\x80\xd2,\xc0\x00\x00\u07d4\xfc\xa4;\xbc#\xa0\xd3!\xba\x9eF\xb9)s\\\xe7\xd8\xef\f\x18\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xfc\xa7>\xff\x87q\xc0\x10;\xa3\xcc\x1a\x9c%\x94H\xc7*\xbf\v\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xfc\xad\xa3\x00(?k\xcc\x13J\x91Eg`\xb0\xd7}\xe4\x10\xe0\x89lk\x93[\x8b\xbd@\x00\x00\xe0\x94\xfc\xbc\\q\xac\xe7\x97AE\v\x01,\xf6\xb8\xd3\xf1}\xb6\x8ap\x8a\x02\x05\xb4\u07e1\xeetx\x00\x00\u07d4\xfc\xbd\x85\xfe\xeajuO\xcf4ID\x9e7\xff\x97\x84\xf7w<\x89\xa7J\xdai\xab\xd7x\x00\x00\xe0\x94\xfc\xc9\u0524&.z\x02z\xb7Q\x91\x10\xd8\x02\u0115\xce\xea9\x8a\x01YQ\x82\"K&H\x00\x00\xe0\x94\xfc\xcd\r\x1e\xce\xe2z\xdd\xea\x95\xf6\x85z\xee\xc8\u01e0K(\xee\x8a\x02\x1e\x19\xe0\u027a\xb2@\x00\x00\xe0\x94\xfc\u0434\x82|\xd2\b\xff\xbf^u\x9d\xba\x8c<\xc6\x1d\x8c,<\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xfc\xe0\x89c\\\xe9z\xba\xc0kD\x81\x9b\xe5\xbb\n>.\v7\x89\x05\x03\x92\nv0\xa7\x80\x00\u07d4\xfc\xf1\x99\xf8\xb8T\"/\x18.N\x1d\t\x9dN2>*\xae\x01\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xfc\xfc:P\x04\xd6xa?\v6\xa6B&\x9a\u007f7\x1c?j\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xfd\x19\x1a5\x15}x\x13s\xfbA\x1b\xf9\xf2R\x90\x04|^\xef\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xfd\x1f\xaa4{\x0f\u0300L-\xa8l6\xd5\xf1\u044bp\x87\xbb\x89\x02\xd6\xeb$z\x96\xf6\x00\x00\u07d4\xfd\x1f\xb5\xa8\x9a\x89\xa7!\xb8yph\xfb\xc4\u007f>\x9dR\xe1I\x89\f\u0435\x83\u007f\xc6X\x00\x00\u07d4\xfd OOJ\xba%%\xbar\x8a\xfd\xf7\x87\x92\xcb\u07b75\xae\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfd'W\xcc5Q\xa0\x95\x87\x8d\x97\x87V\x15\xfe\fj2\xaa\x8a\x89 m\xb1R\x99\xbe\xac\x00\x00\u07d4\xfd(r\u045eW\x85<\xfa\x16\xef\xfe\x93\u0431\xd4{O\x93\xfb\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xfd))'\x1e\x9d \x95\xa2dv~{\r\xf5.\xa0\xd1\xd4\x00\x89\xa2\xa1\xeb%\x1bZ\xe4\x00\x00\u07d4\xfd7z8Rr\x90\f\xb46\xa3\xbbyb\xcd\xff\xe9?]\xad\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfd@$+\xb3Jp\x85^\xf0\xfd\x90\xf3\x80-\xec!6\xb3'\x89h\xa8u\a>)$\x00\x00\xe0\x94\xfdE,9i\xec\xe3\x80\x1cT \xf1\xcd\u02a1\xc7\x1e\xd2=\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\u07d4\xfdKU\x1fo\xdb\u0366\xc5\x11\xb5\xbb7\"P\xa6\xb7\x83\xe54\x89\x01\x1d\xe1\xe6\xdbE\f\x00\x00\u07d4\xfdK\x98\x95X\xae\x11\xbe\f;6\xe2\xd6\xf2\xa5J\x93C\xca.\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfdM\xe8\xe3t\x8a(\x9c\xf7\xd0`Q}\x9d88\x8d\xb0\x1f\xb8\x89\r\x8drkqw\xa8\x00\x00\u07d4\xfdZc\x15\u007f\x91O\u04d8\uac5c\x13}\xd9U\v\xb7q\\\x89\x05k\xc7^-c\x10\x00\x00\u07d4\xfd`\u04b5\xaf=5\xf7\xaa\xf0\u00d3\x05.y\xc4\xd8#\u0645\x89\x03\x0e\xb5\r.\x14\b\x00\x00\u07d4\xfdhm\xe5?\xa9\u007f\x99c\x9e%hT\x97 \xbcX\x8c\x9e\xfc\x89j\xc5\xc6-\x94\x86\a\x00\x00\u07d4\xfd~\u078fR@\xa0eA\xebi\x9dx,/\x9a\xfb!p\xf6\x89Hz\x9a0E9D\x00\x00\u07d4\xfd\x81+\u019f\xb1p\xefW\xe22~\x80\xaf\xfd\x14\xf8\xe4\xb6\u0489lk\x93[\x8b\xbd@\x00\x00\u07d4\xfd\x88\xd1\x14\"\x0f\b\x1c\xb3\xd5\xe1[\xe8\x15*\xb0sfWj\x89\x10CV\x1a\x88)0\x00\x00\u07d4\xfd\x91\x856\xa8\xef\xa6\xf6\xce\xfe\x1f\xa1\x159\x95\xfe\xf5\xe3=;\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xfd\x92\x0fr&\x82\xaf\xb5\xafE\x1b\x05D\xd4\xf4\x1b;\x9dWB\x89~R\x05j\x12?<\x00\x00\u07d4\xfd\x95y\xf1\x19\xbb\xc8\x19\xa0+a\u3348\x03\xc9B\xf2M2\x89\x05\xb9~\x90\x81\xd9@\x00\x00\u07d4\xfd\xa0\xce\x153\a\a\xf1\v\xce2\x01\x17- \x18\xb9\xdd\xeat\x89\x02\xd0A\xd7\x05\xa2\xc6\x00\x00\xe0\x94\xfd\xa3\x04(\x19\xaf>f)\x00\xe1\xb9+CX\xed\xa6\xe9%\x90\x8a\x19\a\xa2\x84\u054fc\xe0\x00\x00\u07d4\xfd\xa6\x81\x0e\xa5\xac\x98]o\xfb\xf1\xc5\x11\xf1\xc1B\xed\xcf\xdd\xf7\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xfd\xb39D\xf26\x06\x15\xe5\xbe#\x95w\u0221\x9b\xa5-\x98\x87\x89 \x9d\x92/RY\xc5\x00\x00\u07d4\xfd\xbaSY\xf7\xec;\xc7p\xacI\x97]\x84N\xc9qbV\xf1\x8965\u026d\xc5\u07a0\x00\x00\xe0\x94\xfd\xc4\xd4vZ\x94/[\xf9i1\xa9\xe8\xccz\xb8\xb7W\xffL\x8a\x12lG\x8a\x0e>\xa8`\x00\x00\xe0\x94\xfd\xcd]\x80\xb1\x05\x89zW\xab\xc4xev\x8b)\x00RB\x95\x8a\x01Z\xf1\u05cbX\xc4\x00\x00\x00\u0794\xfd\xd1\x19_y}O5q}\x15\xe6\xf9\x81\n\x9a?\xf5T`\x88\xfc\x93c\x92\x80\x1c\x00\x00\u07d4\xfd\xd5\x02\xa7N\x81;\u03e3U\xce\xda<\x17ojhq\xaf\u007f\x89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xfd\u357c\vm\\\xbbL\x1d\x8f\xea>\vK\xffc^\x9d\xb7\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfd\xea\xac*\xcf\x1d\x13\x8e\x19\xf2\xfc?\x9f\xb7E\x92\xe3\ud04a\x89$=M\x18\"\x9c\xa2\x00\x00\u07d4\xfd\xec\xc8-\xdf\xc5a\x92\xe2oV<=h\xcbTJ\x96\xbf\xed\x89\x17\xda:\x04\u01f3\xe0\x00\x00\u07d4\xfd\xf4#C\x01\x9b\v\fk\xf2`\xb1s\xaf\xab~E\xb9\xd6!\x89lj\xccg\u05f1\xd4\x00\x00\u07d4\xfd\xf4I\xf1\b\xc6\xfbOZ+\b\x1e\xed~E\u645eM%\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfd\xfda4\xc0J\x8a\xb7\xeb\x16\xf0\x06C\xf8\xfe\xd7\u06aa\ucc89\x15\xaf\x1dx\xb5\x8c@\x00\x00\u07d4\xfe\x00\xbfC\x99\x11\xa5S\x98-\xb68\x03\x92E\xbc\xf02\xdb\u0709\x15[\xd90\u007f\x9f\xe8\x00\x00\u07d4\xfe\x01n\xc1~\xc5\xf1\x0e;\xb9\x8f\xf4\xa1\xed\xa0E\x15v\x82\xab\x89\x14_T\x02\xe7\xb2\xe6\x00\x00\u07d4\xfe\x0e0\xe2\x14)\rt=\xd3\x0e\xb0\x82\xf1\xf0\xa5\"Z\xdea\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xfe!\v\x8f\x04\xdcmOv!j\xcf\xcb\u055b\xa8;\xe9\xb60\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xfe\"\xa0\xb3\x88f\x8d\x1a\xe2d>w\x1d\xac\xf3\x8aCB#\u0309\xd8\xdb^\xbd{&8\x00\x00\u07d4\xfe6&\x88\x84_\xa2D\u0300~K\x110\xeb7A\xa8\x05\x1e\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xfe8'\xd5v0\u03c7a\xd5\x12y{\v\x85\x8eG\x8b\xbd\x12\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xfeA\x8bB\x1a\x9cm76\x02y\x04u\xd20>\x11\xa7Y0\x897\b\xba\xed=h\x90\x00\x00\u07d4\xfeBI\x12yP\xe2\xf8\x96\xec\x0e~.=\x05Z\xab\x10U\x0f\x89$=M\x18\"\x9c\xa2\x00\x00\xe0\x94\xfeM\x84\x03!o\xd5qW+\xf1\xbd\xb0\x1d\x00W\x89x\u0588\x8a\x02\x15\xf85\xbcv\x9d\xa8\x00\x00\u07d4\xfeS\xb9I\x89\u0619d\xda aS\x95&\xbb\xe9y\xdd.\xa9\x89h\xa8u\a>)$\x00\x00\u07d4\xfeT\x9b\xbf\xe6G@\x18\x98\x92\x93%8\u06afF\u04b6\x1dO\x89\x02+\x1c\x8c\x12'\xa0\x00\x00\xe0\x94\xfea]\x97\\\b\x87\xe0\xc9\x11>\xc7)\x84 \xa7\x93\xaf\x8b\x96\x8a\x01\xb1\xaeMn.\xf5\x00\x00\x00\u07d4\xfee\xc4\x18\x8dy\"Wi\td D\xfd\xc5#\x95V\x01e\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xfei\u007f\xf2,\xa5G\xbf\xc9^3\xd9`\xda`\\gc\xf3[\x89G\xd4\x11\x9f\xd9`\x94\x00\x00\u07d4\xfej\x89[y\\\xb4\xbf\x85\x90=<\xe0\x9cZ\xa49S\u04ff\x89\xb8Pz\x82\a( \x00\x00\u07d4\xfeo_B\xb6\x19;\x1a\xd1b\x06\u4bf5#\x9dM}\xb4^\x89]\u0212\xaa\x111\xc8\x00\x00\u07d4\xfep\x11\xb6\x98\xbf3q\x13-tE\xb1\x9e\xb5\xb0\x945j\xee\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfe\x80\xe9#-\xea\xff\x19\xba\xf9\x98i\x88:K\xdf\x00\x04\xe5<\x89.b\xf2\ni\xbe@\x00\x00\u07d4\xfe\x8en6eW\r\xffz\x1b\xdaiz\xa5\x89\xc0\xb4\xe9\x02J\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfe\x8f\x1f\u072b\u007f\xbe\u0266\xa3\xfc\xc5\aa\x96\x00P\\6\xa3\x89\x01\x11du\x9f\xfb2\x00\x00\u07d4\xfe\x91\xec\xcf+\xd5f\xaf\xa1\x16\x96\xc5\x04\x9f\xa8Lic\nR\x89i*\xe8\x89p\x81\xd0\x00\x00\u07d4\xfe\x96\xc4\xcd8\x15b@\x1a\xa3*\x86\xe6[\x9dR\xfa\x8a\xee'\x89\x8f\x1d\\\x1c\xae7@\x00\x00\u07d4\xfe\x98\xc6d\xc3\xe4G\xa9^i\xbdX!q\xb7\x17n\xa2\xa6\x85\x89\xd8\xd7&\xb7\x17z\x80\x00\x00\u07d4\xfe\x9a\xd1.\xf0]m\x90&\x1f\x96\xc84\n\x03\x81\x97M\xf4w\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfe\x9c\x0f\xff\xef\xb8\x03\b\x12V\xc0\xcfMfY\xe6\xd3>\xb4\xfb\x89R\xd5B\x80O\x1c\xe0\x00\x00\u07d4\xfe\x9c\xfc;\xb2\x93\u0772\x85\xe6%\xf3X/t\xa6\xb0\xa5\xa6\u0349j\xcb=\xf2~\x1f\x88\x00\x00\xe0\x94\xfe\x9e\x11\x97\u05d7JvH\xdc\u01e01\x12\xa8\x8e\xdb\xc9\x04]\x8a\x01\n\xfc\x1a\xde;N\xd4\x00\x00\xe0\x94\xfe\xac\xa2\xactbK\xf3H\xda\u0258QC\xcf\xd6R\xa4\xbeU\x8a\x05\x89\u007f\u02f0)\x14\b\x80\x00\u07d4\xfe\xad\x18\x03\xe5\xe77\xa6\x8e\x18G-\x9a\xc7\x15\xf0\x99L\u00be\x89\x1b\x1a\xe4\xd6\xe2\xefP\x00\x00\u07d4\xfe\xb8\xb8\xe2\xafqj\xe4\x1f\xc7\xc0K\xcf)T\x01VF\x1ek\x89TQt\xa5(\xa7z\x00\x00\u07d4\xfe\xb9-0\xbf\x01\xff\x9a\x19\x01flUsS+\xfa\a\xee\xec\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xfe\xbc1s\xbc\x90r\x13cT\x00+{O\xb3\xbf\xc5?\"\xf1\x89\x14\x0e\xc8\x0f\xa7\xee\x88\x00\x00\u07d4\xfe\xbdH\xd0\xff\xdb\xd5el\xd5\xe6\x866:a\x14R(\xf2y\x89\x97\xc9\xceL\xf6\xd5\xc0\x00\x00\u07d4\xfe\xbd\x9f\x81\xcfx\xbd_\xb6\u0139\xa2K\xd4\x14\xbb\x9b\xfaLN\x89k\xe1\x0f\xb8\xedn\x13\x80\x00\u07d4\xfe\xc0o\xe2{D\u01c4\xb29n\xc9/{\x92:\xd1~\x90w\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xfe\xc1NT\x85\xde+>\xef^t\xc4aF\u06ceEN\x035\x89\t\xb4\x1f\xbf\x9e\n\xec\x00\x00\u07d4\xfe\xd8Gm\x10\u0544\xb3\x8b\xfag7`\x0e\xf1\x9d5\xc4\x1e\u0609b\xa9\x92\xe5:\n\xf0\x00\x00\u07d4\xfe\xef;n\xab\xc9J\xff\xd31\f\x1cM\x0ee7^\x13\x11\x19\x89\x01\x15\x8eF\t\x13\xd0\x00\x00\u07d4\xfe\xf0\x9dp$?9\xed\x8c\xd8\x00\xbf\x96QG\x9e\x8fJ\xca<\x89\n\u05ce\xbcZ\xc6 \x00\x00\u07d4\xfe\xf3\xb3\u07ad\x1ai&\u051a\xa3+\x12\xc2*\xf5M\x9f\xf9\x85\x8965\u026d\xc5\u07a0\x00\x00\u07d4\xff\v|\xb7\x1d\xa9\xd4\xc1\xean\xcc(\xeb\xdaPLc\xf8/\u04498\x8a\x88]\xf2\xfcl\x00\x00\u07d4\xff\f\xc6\u73c9lk\x93[\x8b\xbd@\x00\x00\u07d4\xff'&)AH\xb8lx\xa97$\x97\xe4Y\x89\x8e\xd3\xfe\xe3\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xff=\xedz@\u04ef\xf0\u05e8\xc4_\xa6\x13j\xa0C=\xb4W\x89lh\xcc\u041b\x02,\x00\x00\u07d4\xff>\xeeW\xc3Mm\xae\x97\r\x8b1\x11\x17\xc55\x86\xcd5\x02\x89\\(=A\x03\x94\x10\x00\x00\u07d4\xff>\xf6\xba\x15\x1c!\xb5\x99\x86\xaed\xf6\xe8\"\x8b\u0262\xc73\x89lk\x93[\x8b\xbd@\x00\x00\u07d4\xffA\xd9\xe1\xb4\xef\xfe\x18\u0630\xd1\xf6?\xc4%_\xb4\xe0l=\x89Hz\x9a0E9D\x00\x00\u07d4\xffE\xcb4\xc9(6M\x9c\xc9\u063b\x0074ta\x8f\x06\xf3\x89\x05k\xc7^-c\x10\x00\x00\xe0\x94\xffI\xa7u\x81N\xc0\x00Q\xa7\x95\xa8u\xde$Y.\xa4\x00\u050a*Z\x05\x8f\u0095\xed\x00\x00\x00\u07d4\xffJ@\x8fP\xe9\xe7!F\xa2\x8c\xe4\xfc\x8d\x90'\x1f\x11n\x84\x89j\xcb=\xf2~\x1f\x88\x00\x00\u07d4\xffM\x9c\x84\x84\xc4\x10T\x89H\xa4\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xf6\x941\xb9\x8d\x14\x00{\xde\xe67)\x80\x86\x98\x8a\v\xbd1\x18E#\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" const goerliAllocData = "\xf9\x04\x06\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xe0\x94L*\xe4\x82Y5\x05\xf0\x16<\xde\xfc\a>\x81\xc6<\xdaA\a\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94\xa8\xe8\xf1G2e\x8eKQ\xe8q\x191\x05:\x8ai\xba\xf2\xb1\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe1\x94\u0665\x17\x9f\t\x1d\x85\x05\x1d<\x98'\x85\xef\xd1E\\\uc199\x8b\bE\x95\x16\x14\x01HJ\x00\x00\x00\xe1\x94\u08bdBX\xd2v\x887\xba\xa2j(\xfeq\xdc\a\x9f\x84\u01cbJG\xe3\xc1$H\xf4\xad\x00\x00\x00" const sepoliaAllocData = "\xf9\x01\xee\u0791i\x16\xa8{\x823?BE\x04f#\xb27\x94\xc6\\\x8b\bE\x95\x16\x14\x01HJ\x00\x00\x00\xe1\x94\x10\xf5\xd4XT\xe08\a\x14\x85\xac\x9e@#\b\u03c0\xd2\xd2\xfe\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\u0794y\x9d2\x9e_X4\x19\x16|\xd7\"\x96$\x85\x92n3\x8fJ\x88\r\u0db3\xa7d\x00\x00\xe0\x94|\xf5\xb7\x9b\xfe)\x1ag\xab\x02\xb3\x93\xe4V\xcc\xc4\xc2f\xf7S\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\x8b\u007f\tw\xbbO\x0f\xbepv\xfa\"\xbc$\xac\xa0CX?^\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xa2\xa6\xd949\x14O\xfeM'\xc9\xe0\x88\xdc\u0637\x83\x94bc\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xaa\xec\x869DA\xf9\x15\xbc\xe3\xe6\xab9\x99w\xe9\x90o;i\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\u1532\x1c3\xde\x1f\xab?\xa1T\x99\xc6+Y\xfe\f\xc3%\x00 \u044bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe0\x94\xbc\x11)Y6\xaay\u0554\x13\x9d\xe1\xb2\xe1&)AO;\u06ca\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xbe\xef2\xca[\x9a\x19\x8d'\xb4\xe0/LpC\x9f\xe6\x03V\u03ca\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe1\x94\xd7\xd7lX\xb3\xa5\x19\xe9\xfal\xc4\xd2-\xc0\x17%\x9b\u011f\x1e\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe0\x94\xd7\xed\xdbx\xed)[<\x96)$\x0e\x89$\xfb\x8d\x88t\xdd\u060a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\u0665\x17\x9f\t\x1d\x85\x05\x1d<\x98'\x85\xef\xd1E\\\uc199\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xe2\xe2e\x90(\x147\x84\xd5W\xbc\xeco\xf3\xa0r\x10H\x88\n\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xf4|\xae\x1c\xf7\x9c\xa6u\x8b\xfcx}\xbd!\u6f7eq\x12\xb8\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00" -const KilnAllocData = `{ - "config": { - "chainId": 1337802, - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "mergeForkBlock": 1000, - "terminalTotalDifficulty": 20000000000000 - }, - "alloc": { - "0x0000000000000000000000000000000000000000": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000001": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000003": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000004": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000005": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000006": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000007": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000008": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000009": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000000f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000010": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000011": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000012": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000013": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000014": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000015": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000016": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000017": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000018": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000019": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000001f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000020": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000021": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000022": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000023": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000024": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000025": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000026": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000027": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000028": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000029": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000002f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000030": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000031": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000032": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000033": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000034": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000035": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000036": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000037": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000038": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000039": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000003f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000040": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000041": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000042": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000043": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000044": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000045": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000046": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000047": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000048": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000049": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000004f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000050": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000051": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000052": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000053": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000054": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000055": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000056": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000057": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000058": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000059": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000005f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000060": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000061": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000062": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000063": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000064": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000065": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000066": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000067": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000068": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000069": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000006f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000070": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000071": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000072": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000073": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000074": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000075": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000076": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000077": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000078": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000079": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000007f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000080": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000081": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000082": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000083": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000084": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000085": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000086": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000087": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000088": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000089": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000008f": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000090": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000091": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000092": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000093": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000094": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000095": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000096": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000097": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000098": { - "balance": "1" - }, - "0x0000000000000000000000000000000000000099": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009a": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009b": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009c": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009d": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009e": { - "balance": "1" - }, - "0x000000000000000000000000000000000000009f": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000a9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000aa": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ab": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ac": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ad": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ae": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000af": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000b9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ba": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000be": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000bf": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000c9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ca": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ce": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000cf": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000d9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000da": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000db": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000dc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000dd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000de": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000df": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000e9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ea": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000eb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ec": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ed": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ee": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ef": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f0": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f1": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f2": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f3": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f4": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f5": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f6": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f7": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f8": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000f9": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fa": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fb": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fc": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fd": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000fe": { - "balance": "1" - }, - "0x00000000000000000000000000000000000000ff": { - "balance": "1" - }, - "0x4242424242424242424242424242424242424242": { - "balance": "0", - "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a26469706673582212201dd26f37a621703009abf16e77e69c93dc50c79db7f6cc37543e3e0e3decdc9764736f6c634300060b0033", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", - "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", - "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", - "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", - "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", - "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", - "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", - "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", - "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", - "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", - "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", - "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", - "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", - "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", - "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", - "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", - "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", - "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", - "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", - "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", - "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", - "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", - "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", - "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", - "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", - "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", - "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", - "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", - "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", - "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", - "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" - } - }, - "0xf97e180c050e5Ab072211Ad2C213Eb5AEE4DF134": { - "balance": "10000000000000000000000000" - }, - "0x2cA5F489CC1Fd1CEC24747B64E8dE0F4A6A850E1": { - "balance": "10000000000000000000000000" - }, - "0x7203bd333a874D9d329050ecE393820fCD501eaA": { - "balance": "10000000000000000000000000" - }, - "0xA51918aA40D78Ff8be939bf0E8404252875c6aDF": { - "balance": "10000000000000000000000000" - }, - "0xAA81078e6b2121dd7A846690DFdD6b10d7658d8B": { - "balance": "10000000000000000000000000" - }, - "0xFA2d31D8f21c1D1633E9BEB641dF77D21D63ccDd": { - "balance": "10000000000000000000000000" - }, - "0xf751C9c6d60614226fE57D2cAD6e10C856a2ddA3": { - "balance": "10000000000000000000000000" - }, - "0x9cD16887f6A808AEaa65D3c840f059EeA4ca1319": { - "balance": "10000000000000000000000000" - }, - "0x2E07043584F11BFF0AC39c927665DF6c6ebaffFB": { - "balance": "10000000000000000000000000" - }, - "0x60e771E5eCA8E26690920de669520Da210D64A9B": { - "balance": "10000000000000000000000000" - }, - "0xFC4db92C2Cf77CE02fBfd7Da0346d2CbFA66aD59": { - "balance": "10000000000000000000000000" - } - }, - "coinbase": "0x0000000000000000000000000000000000000000", - "difficulty": "0x01", - "extraData": "", - "gasLimit": "0x400000", - "nonce": "0x1234", - "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0" - }` +const holeskyAllocData = "\xf9,\x85\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\x7f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\u0791i\x16\xa8{\x823?BE\x04f#\xb27\x94\xc6\\\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe1\x94\v\xe9I\x92\x8f\xf1\x99\xc9\xeb\xa9\xe1\x10\xdb!\n\xa5\xc9N\xfa\u040b|\x13\xbcK,\x13\x8e\u0344h\xa0\x03\x7f\x05\x8a\x9d\xaf\xady\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xf9!2\x94BBBBBBBBBBBBBBBBBBBB\x80\xf9!\x19\x80\xb9\x18\xd6`\x80`@R`\x046\x10a\x00?W`\x005`\xe0\x1c\x80c\x01\xff\u0267\x14a\x00DW\x80c\"\x89Q\x18\x14a\x00\xa4W\x80cb\x1f\xd10\x14a\x01\xbaW\x80c\xc5\xf2\x89/\x14a\x02DW[`\x00\x80\xfd[4\x80\x15a\x00PW`\x00\x80\xfd[Pa\x00\x90`\x04\x806\x03` \x81\x10\x15a\x00gW`\x00\x80\xfd[P5\x7f\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x16a\x02kV[`@\x80Q\x91\x15\x15\x82RQ\x90\x81\x90\x03` \x01\x90\xf3[a\x01\xb8`\x04\x806\x03`\x80\x81\x10\x15a\x00\xbaW`\x00\x80\xfd[\x81\x01\x90` \x81\x01\x815d\x01\x00\x00\x00\x00\x81\x11\x15a\x00\xd5W`\x00\x80\xfd[\x82\x01\x83` \x82\x01\x11\x15a\x00\xe7W`\x00\x80\xfd[\x805\x90` \x01\x91\x84`\x01\x83\x02\x84\x01\x11d\x01\x00\x00\x00\x00\x83\x11\x17\x15a\x01\tW`\x00\x80\xfd[\x91\x93\x90\x92\x90\x91` \x81\x01\x905d\x01\x00\x00\x00\x00\x81\x11\x15a\x01'W`\x00\x80\xfd[\x82\x01\x83` \x82\x01\x11\x15a\x019W`\x00\x80\xfd[\x805\x90` \x01\x91\x84`\x01\x83\x02\x84\x01\x11d\x01\x00\x00\x00\x00\x83\x11\x17\x15a\x01[W`\x00\x80\xfd[\x91\x93\x90\x92\x90\x91` \x81\x01\x905d\x01\x00\x00\x00\x00\x81\x11\x15a\x01yW`\x00\x80\xfd[\x82\x01\x83` \x82\x01\x11\x15a\x01\x8bW`\x00\x80\xfd[\x805\x90` \x01\x91\x84`\x01\x83\x02\x84\x01\x11d\x01\x00\x00\x00\x00\x83\x11\x17\x15a\x01\xadW`\x00\x80\xfd[\x91\x93P\x91P5a\x03\x04V[\x00[4\x80\x15a\x01\xc6W`\x00\x80\xfd[Pa\x01\xcfa\x10\xb5V[`@\x80Q` \x80\x82R\x83Q\x81\x83\x01R\x83Q\x91\x92\x83\x92\x90\x83\x01\x91\x85\x01\x90\x80\x83\x83`\x00[\x83\x81\x10\x15a\x02\tW\x81\x81\x01Q\x83\x82\x01R` \x01a\x01\xf1V[PPPP\x90P\x90\x81\x01\x90`\x1f\x16\x80\x15a\x026W\x80\x82\x03\x80Q`\x01\x83` \x03a\x01\x00\n\x03\x19\x16\x81R` \x01\x91P[P\x92PPP`@Q\x80\x91\x03\x90\xf3[4\x80\x15a\x02PW`\x00\x80\xfd[Pa\x02Ya\x10\xc7V[`@\x80Q\x91\x82RQ\x90\x81\x90\x03` \x01\x90\xf3[`\x00\x7f\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x82\x16\x7f\x01\xff\u0267\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x14\x80a\x02\xfeWP\x7f\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x82\x16\x7f\x85d\t\a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x14[\x92\x91PPV[`0\x86\x14a\x03]W`@Q\x7f\b\xc3y\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x81R`\x04\x01\x80\x80` \x01\x82\x81\x03\x82R`&\x81R` \x01\x80a\x18\x05`&\x919`@\x01\x91PP`@Q\x80\x91\x03\x90\xfd[` \x84\x14a\x03\xb6W`@Q\x7f\b\xc3y\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x81R`\x04\x01\x80\x80` \x01\x82\x81\x03\x82R`6\x81R` \x01\x80a\x17\x9c`6\x919`@\x01\x91PP`@Q\x80\x91\x03\x90\xfd[``\x82\x14a\x04\x0fW`@Q\x7f\b\xc3y\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x81R`\x04\x01\x80\x80` \x01\x82\x81\x03\x82R`)\x81R` \x01\x80a\x18x`)\x919`@\x01\x91PP`@Q\x80\x91\x03\x90\xfd[g\r\u0db3\xa7d\x00\x004\x10\x15a\x04pW`@Q\x7f\b\xc3y\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x81R`\x04\x01\x80\x80` \x01\x82\x81\x03\x82R`&\x81R` \x01\x80a\x18R`&\x919`@\x01\x91PP`@Q\x80\x91\x03\x90\xfd[c;\x9a\xca\x004\x06\x15a\x04\xcdW`@Q\x7f\b\xc3y\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x81R`\x04\x01\x80\x80` \x01\x82\x81\x03\x82R`3\x81R` \x01\x80a\x17\xd2`3\x919`@\x01\x91PP`@Q\x80\x91\x03\x90\xfd[c;\x9a\xca\x004\x04g\xff\xff\xff\xff\xff\xff\xff\xff\x81\x11\x15a\x055W`@Q\x7f\b\xc3y\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x81R`\x04\x01\x80\x80` \x01\x82\x81\x03\x82R`'\x81R` \x01\x80a\x18+`'\x919`@\x01\x91PP`@Q\x80\x91\x03\x90\xfd[``a\x05@\x82a\x14\xbaV[\x90P\x7fd\x9b\xbcb\xd0\xe3\x13B\xaf\xeaN\\\xd8-@I\xe7\xe1\xee\x91/\xc0\x88\x9a\xa7\x90\x80;\xe3\x908\u0149\x89\x89\x89\x85\x8a\x8aa\x05u` Ta\x14\xbaV[`@\x80Q`\xa0\x80\x82R\x81\x01\x89\x90R\x90\x81\x90` \x82\x01\x90\x82\x01``\x83\x01`\x80\x84\x01`\xc0\x85\x01\x8e\x8e\x80\x82\x847`\x00\x83\x82\x01R`\x1f\x01\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x16\x90\x91\x01\x87\x81\x03\x86R\x8c\x81R` \x01\x90P\x8c\x8c\x80\x82\x847`\x00\x83\x82\x01\x81\x90R`\x1f\x90\x91\x01\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x16\x90\x92\x01\x88\x81\x03\x86R\x8cQ\x81R\x8cQ` \x91\x82\x01\x93\x91\x8e\x01\x92P\x90\x81\x90\x84\x90\x84\x90[\x83\x81\x10\x15a\x06HW\x81\x81\x01Q\x83\x82\x01R` \x01a\x060V[PPPP\x90P\x90\x81\x01\x90`\x1f\x16\x80\x15a\x06uW\x80\x82\x03\x80Q`\x01\x83` \x03a\x01\x00\n\x03\x19\x16\x81R` \x01\x91P[P\x86\x81\x03\x83R\x88\x81R` \x01\x89\x89\x80\x82\x847`\x00\x83\x82\x01\x81\x90R`\x1f\x90\x91\x01\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x16\x90\x92\x01\x88\x81\x03\x84R\x89Q\x81R\x89Q` \x91\x82\x01\x93\x91\x8b\x01\x92P\x90\x81\x90\x84\x90\x84\x90[\x83\x81\x10\x15a\x06\xefW\x81\x81\x01Q\x83\x82\x01R` \x01a\x06\xd7V[PPPP\x90P\x90\x81\x01\x90`\x1f\x16\x80\x15a\a\x1cW\x80\x82\x03\x80Q`\x01\x83` \x03a\x01\x00\n\x03\x19\x16\x81R` \x01\x91P[P\x9dPPPPPPPPPPPPPP`@Q\x80\x91\x03\x90\xa1`\x00`\x02\x8a\x8a`\x00`\x80\x1b`@Q` \x01\x80\x84\x84\x80\x82\x847\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x94\x16\x91\x90\x93\x01\x90\x81R`@\x80Q\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf0\x81\x84\x03\x01\x81R`\x10\x90\x92\x01\x90\x81\x90R\x81Q\x91\x95P\x93P\x83\x92P` \x85\x01\x91P\x80\x83\x83[` \x83\x10a\a\xfcW\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\a\xbfV[Q\x81Q` \x93\x84\x03a\x01\x00\n\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\x19\x90\x92\x16\x91\x16\x17\x90R`@Q\x91\x90\x93\x01\x94P\x91\x92PP\x80\x83\x03\x81\x85Z\xfa\x15\x80\x15a\bYW=`\x00\x80>=`\x00\xfd[PPP`@Q=` \x81\x10\x15a\bnW`\x00\x80\xfd[PQ\x90P`\x00`\x02\x80a\b\x84`@\x84\x8a\x8ca\x16\xfeV[`@Q` \x01\x80\x83\x83\x80\x82\x847\x80\x83\x01\x92PPP\x92PPP`@Q` \x81\x83\x03\x03\x81R\x90`@R`@Q\x80\x82\x80Q\x90` \x01\x90\x80\x83\x83[` \x83\x10a\b\xf8W\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\b\xbbV[Q\x81Q` \x93\x84\x03a\x01\x00\n\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\x19\x90\x92\x16\x91\x16\x17\x90R`@Q\x91\x90\x93\x01\x94P\x91\x92PP\x80\x83\x03\x81\x85Z\xfa\x15\x80\x15a\tUW=`\x00\x80>=`\x00\xfd[PPP`@Q=` \x81\x10\x15a\tjW`\x00\x80\xfd[PQ`\x02a\t{\x89`@\x81\x8da\x16\xfeV[`@Q`\x00\x90` \x01\x80\x84\x84\x80\x82\x847\x91\x90\x91\x01\x92\x83RPP`@\x80Q\x80\x83\x03\x81R` \x92\x83\x01\x91\x82\x90R\x80Q\x90\x94P\x90\x92P\x82\x91\x84\x01\x90\x80\x83\x83[` \x83\x10a\t\xf4W\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\t\xb7V[Q\x81Q` \x93\x84\x03a\x01\x00\n\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\x19\x90\x92\x16\x91\x16\x17\x90R`@Q\x91\x90\x93\x01\x94P\x91\x92PP\x80\x83\x03\x81\x85Z\xfa\x15\x80\x15a\nQW=`\x00\x80>=`\x00\xfd[PPP`@Q=` \x81\x10\x15a\nfW`\x00\x80\xfd[PQ`@\x80Q` \x81\x81\x01\x94\x90\x94R\x80\x82\x01\x92\x90\x92R\x80Q\x80\x83\x03\x82\x01\x81R``\x90\x92\x01\x90\x81\x90R\x81Q\x91\x92\x90\x91\x82\x91\x84\x01\x90\x80\x83\x83[` \x83\x10a\n\xdaW\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\n\x9dV[Q\x81Q` \x93\x84\x03a\x01\x00\n\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\x19\x90\x92\x16\x91\x16\x17\x90R`@Q\x91\x90\x93\x01\x94P\x91\x92PP\x80\x83\x03\x81\x85Z\xfa\x15\x80\x15a\v7W=`\x00\x80>=`\x00\xfd[PPP`@Q=` \x81\x10\x15a\vLW`\x00\x80\xfd[PQ`@\x80Q` \x81\x01\x85\x81R\x92\x93P`\x00\x92`\x02\x92\x83\x92\x87\x92\x8f\x92\x8f\x92\x01\x83\x83\x80\x82\x847\x80\x83\x01\x92PPP\x93PPPP`@Q` \x81\x83\x03\x03\x81R\x90`@R`@Q\x80\x82\x80Q\x90` \x01\x90\x80\x83\x83[` \x83\x10a\v\xd9W\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\v\x9cV[Q\x81Q` \x93\x84\x03a\x01\x00\n\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\x19\x90\x92\x16\x91\x16\x17\x90R`@Q\x91\x90\x93\x01\x94P\x91\x92PP\x80\x83\x03\x81\x85Z\xfa\x15\x80\x15a\f6W=`\x00\x80>=`\x00\xfd[PPP`@Q=` \x81\x10\x15a\fKW`\x00\x80\xfd[PQ`@Q\x86Q`\x02\x91\x88\x91`\x00\x91\x88\x91` \x91\x82\x01\x91\x82\x91\x90\x86\x01\x90\x80\x83\x83[` \x83\x10a\f\xa9W\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\flV[`\x01\x83` \x03a\x01\x00\n\x03\x80\x19\x82Q\x16\x81\x84Q\x16\x80\x82\x17\x85RPPPPPP\x90P\x01\x83g\xff\xff\xff\xff\xff\xff\xff\xff\x19\x16g\xff\xff\xff\xff\xff\xff\xff\xff\x19\x16\x81R`\x18\x01\x82\x81R` \x01\x93PPPP`@Q` \x81\x83\x03\x03\x81R\x90`@R`@Q\x80\x82\x80Q\x90` \x01\x90\x80\x83\x83[` \x83\x10a\rNW\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\r\x11V[Q\x81Q` \x93\x84\x03a\x01\x00\n\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\x19\x90\x92\x16\x91\x16\x17\x90R`@Q\x91\x90\x93\x01\x94P\x91\x92PP\x80\x83\x03\x81\x85Z\xfa\x15\x80\x15a\r\xabW=`\x00\x80>=`\x00\xfd[PPP`@Q=` \x81\x10\x15a\r\xc0W`\x00\x80\xfd[PQ`@\x80Q` \x81\x81\x01\x94\x90\x94R\x80\x82\x01\x92\x90\x92R\x80Q\x80\x83\x03\x82\x01\x81R``\x90\x92\x01\x90\x81\x90R\x81Q\x91\x92\x90\x91\x82\x91\x84\x01\x90\x80\x83\x83[` \x83\x10a\x0e4W\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\r\xf7V[Q\x81Q` \x93\x84\x03a\x01\x00\n\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\x19\x90\x92\x16\x91\x16\x17\x90R`@Q\x91\x90\x93\x01\x94P\x91\x92PP\x80\x83\x03\x81\x85Z\xfa\x15\x80\x15a\x0e\x91W=`\x00\x80>=`\x00\xfd[PPP`@Q=` \x81\x10\x15a\x0e\xa6W`\x00\x80\xfd[PQ\x90P\x85\x81\x14a\x0f\x02W`@Q\x7f\b\xc3y\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x81R`\x04\x01\x80\x80` \x01\x82\x81\x03\x82R`T\x81R` \x01\x80a\x17H`T\x919``\x01\x91PP`@Q\x80\x91\x03\x90\xfd[` Tc\xff\xff\xff\xff\x11a\x0f`W`@Q\x7f\b\xc3y\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x81R`\x04\x01\x80\x80` \x01\x82\x81\x03\x82R`!\x81R` \x01\x80a\x17'`!\x919`@\x01\x91PP`@Q\x80\x91\x03\x90\xfd[` \x80T`\x01\x01\x90\x81\x90U`\x00[` \x81\x10\x15a\x10\xa9W\x81`\x01\x16`\x01\x14\x15a\x0f\xa0W\x82`\x00\x82` \x81\x10a\x0f\x91W\xfe[\x01UPa\x10\xac\x95PPPPPPV[`\x02`\x00\x82` \x81\x10a\x0f\xafW\xfe[\x01T\x84`@Q` \x01\x80\x83\x81R` \x01\x82\x81R` \x01\x92PPP`@Q` \x81\x83\x03\x03\x81R\x90`@R`@Q\x80\x82\x80Q\x90` \x01\x90\x80\x83\x83[` \x83\x10a\x10%W\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\x0f\xe8V[Q\x81Q` \x93\x84\x03a\x01\x00\n\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\x19\x90\x92\x16\x91\x16\x17\x90R`@Q\x91\x90\x93\x01\x94P\x91\x92PP\x80\x83\x03\x81\x85Z\xfa\x15\x80\x15a\x10\x82W=`\x00\x80>=`\x00\xfd[PPP`@Q=` \x81\x10\x15a\x10\x97W`\x00\x80\xfd[PQ\x92P`\x02\x82\x04\x91P`\x01\x01a\x0fnV[P\xfe[PPPPPPPV[``a\x10\xc2` Ta\x14\xbaV[\x90P\x90V[` T`\x00\x90\x81\x90\x81[` \x81\x10\x15a\x12\xf0W\x81`\x01\x16`\x01\x14\x15a\x11\xe6W`\x02`\x00\x82` \x81\x10a\x10\xf5W\xfe[\x01T\x84`@Q` \x01\x80\x83\x81R` \x01\x82\x81R` \x01\x92PPP`@Q` \x81\x83\x03\x03\x81R\x90`@R`@Q\x80\x82\x80Q\x90` \x01\x90\x80\x83\x83[` \x83\x10a\x11kW\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\x11.V[Q\x81Q` \x93\x84\x03a\x01\x00\n\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\x19\x90\x92\x16\x91\x16\x17\x90R`@Q\x91\x90\x93\x01\x94P\x91\x92PP\x80\x83\x03\x81\x85Z\xfa\x15\x80\x15a\x11\xc8W=`\x00\x80>=`\x00\xfd[PPP`@Q=` \x81\x10\x15a\x11\xddW`\x00\x80\xfd[PQ\x92Pa\x12\xe2V[`\x02\x83`!\x83` \x81\x10a\x11\xf6W\xfe[\x01T`@Q` \x01\x80\x83\x81R` \x01\x82\x81R` \x01\x92PPP`@Q` \x81\x83\x03\x03\x81R\x90`@R`@Q\x80\x82\x80Q\x90` \x01\x90\x80\x83\x83[` \x83\x10a\x12kW\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\x12.V[Q\x81Q` \x93\x84\x03a\x01\x00\n\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\x19\x90\x92\x16\x91\x16\x17\x90R`@Q\x91\x90\x93\x01\x94P\x91\x92PP\x80\x83\x03\x81\x85Z\xfa\x15\x80\x15a\x12\xc8W=`\x00\x80>=`\x00\xfd[PPP`@Q=` \x81\x10\x15a\x12\xddW`\x00\x80\xfd[PQ\x92P[`\x02\x82\x04\x91P`\x01\x01a\x10\xd1V[P`\x02\x82a\x12\xff` Ta\x14\xbaV[`\x00`@\x1b`@Q` \x01\x80\x84\x81R` \x01\x83\x80Q\x90` \x01\x90\x80\x83\x83[` \x83\x10a\x13ZW\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\x13\x1dV[Q\x81Q` \x93\x84\x03a\x01\x00\n\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\x19\x90\x92\x16\x91\x16\x17\x90R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x95\x90\x95\x16\x92\x01\x91\x82RP`@\x80Q\x80\x83\x03\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf8\x01\x81R`\x18\x90\x92\x01\x90\x81\x90R\x81Q\x91\x95P\x93P\x83\x92\x85\x01\x91P\x80\x83\x83[` \x83\x10a\x14?W\x80Q\x82R\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\x90\x92\x01\x91` \x91\x82\x01\x91\x01a\x14\x02V[Q\x81Q` \x93\x84\x03a\x01\x00\n\x7f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x80\x19\x90\x92\x16\x91\x16\x17\x90R`@Q\x91\x90\x93\x01\x94P\x91\x92PP\x80\x83\x03\x81\x85Z\xfa\x15\x80\x15a\x14\x9cW=`\x00\x80>=`\x00\xfd[PPP`@Q=` \x81\x10\x15a\x14\xb1W`\x00\x80\xfd[PQ\x92PPP\x90V[`@\x80Q`\b\x80\x82R\x81\x83\x01\x90\x92R``\x91` \x82\x01\x81\x806\x837\x01\x90PP\x90P`\xc0\x82\x90\x1b\x80`\a\x1a`\xf8\x1b\x82`\x00\x81Q\x81\x10a\x14\xf4W\xfe[` \x01\x01\x90~\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x19\x16\x90\x81`\x00\x1a\x90SP\x80`\x06\x1a`\xf8\x1b\x82`\x01\x81Q\x81\x10a\x157W\xfe[` \x01\x01\x90~\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x19\x16\x90\x81`\x00\x1a\x90SP\x80`\x05\x1a`\xf8\x1b\x82`\x02\x81Q\x81\x10a\x15zW\xfe[` \x01\x01\x90~\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x19\x16\x90\x81`\x00\x1a\x90SP\x80`\x04\x1a`\xf8\x1b\x82`\x03\x81Q\x81\x10a\x15\xbdW\xfe[` \x01\x01\x90~\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x19\x16\x90\x81`\x00\x1a\x90SP\x80`\x03\x1a`\xf8\x1b\x82`\x04\x81Q\x81\x10a\x16\x00W\xfe[` \x01\x01\x90~\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x19\x16\x90\x81`\x00\x1a\x90SP\x80`\x02\x1a`\xf8\x1b\x82`\x05\x81Q\x81\x10a\x16CW\xfe[` \x01\x01\x90~\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x19\x16\x90\x81`\x00\x1a\x90SP\x80`\x01\x1a`\xf8\x1b\x82`\x06\x81Q\x81\x10a\x16\x86W\xfe[` \x01\x01\x90~\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x19\x16\x90\x81`\x00\x1a\x90SP\x80`\x00\x1a`\xf8\x1b\x82`\a\x81Q\x81\x10a\x16\xc9W\xfe[` \x01\x01\x90~\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x19\x16\x90\x81`\x00\x1a\x90SPP\x91\x90PV[`\x00\x80\x85\x85\x11\x15a\x17\rW\x81\x82\xfd[\x83\x86\x11\x15a\x17\x19W\x81\x82\xfd[PP\x82\x01\x93\x91\x90\x92\x03\x91PV\xfeDepositContract: merkle tree fullDepositContract: reconstructed DepositData does not match supplied deposit_data_rootDepositContract: invalid withdrawal_credentials lengthDepositContract: deposit value not multiple of gweiDepositContract: invalid pubkey lengthDepositContract: deposit value too highDepositContract: deposit value too lowDepositContract: invalid signature length\xa2dipfsX\"\x12 \x1d\xd2o7\xa6!p0\t\xab\xf1nw\u6713\xdcP\u01dd\xb7\xf6\xcc7T>>\x0e=\xec\u0717dsolcC\x00\x06\v\x003\xf9\b<\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\"\xa0\xf5\xa5\xfdB\xd1j 0'\x98\xefn\xd3\t\x97\x9bC\x00=# \xd9\xf0\xe8\xea\x981\xa9'Y\xfbK\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00#\xa0\xdbV\x11N\x00\xfd\xd4\xc1\xf8\\\x89+\xf3Z\u0268\x92\x89\xaa\xec\xb1\xeb\u0429l\xde`jt\x8b]q\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00$\xa0\u01c0\t\xfd\xf0\x7f\xc5j\x11\xf1\"7\x06X\xa3S\xaa\xa5B\xedc\xe4LK\xc1_\xf4\xcd\x10Z\xb3<\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00%\xa0Sm\x98\x83\x7f-\xd1e\xa5]^\xea\xe9\x14\x85\x95Dr\xd5o$m\xf2V\xbf<\xae\x195*\x12<\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00&\xa0\x9e\xfd\xe0R\xaa\x15B\x9f\xae\x05\xba\xd4\u0431\xd7\xc6M\xa6M\x03\u05e1\x85JX\x8c,\xb8C\f\r0\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\xa0\u060d\xdf\xee\xd4\x00\xa8uU\x96\xb2\x19B\xc1I~\x11L0.a\x18)\x0f\x91\xe6w)v\x04\x1f\xa1\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(\xa0\x87\xeb\r\u06e5~5\xf6\u0486g8\x02\xa4\xafYu\xe2%\x06\xc7\xcfLd\xbbk\xe5\xee\x11R\x7f,\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00)\xa0&\x84dv\xfd_\xc5J]C8Qg\xc9QD\xf2d?S<\xc8[\xb9\xd1kx/\x8d}\xb1\x93\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00*\xa0Pm\x86X-%$\x05\xb8@\x01\x87\x92\xca\u04bf\x12Y\xf1\xefZ\xa5\xf8\x87\xe1<\xb2\xf0\tOQ\xe1\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\xa0\xff\xff\n\xd7\xe6Yw/\x954\xc1\x95\xc8\x15\xef\xc4\x01N\xf1\xe1\xda\xedD\x04\xc0c\x85\xd1\x11\x92\xe9+\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00,\xa0l\xf0A'\xdb\x05D\x1c\xd83\x10zR\xbe\x85(h\x89\x0eC\x17\xe6\xa0*\xb4v\x83\xaau\x96B \xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-\xa0\xb7\xd0_\x87_\x14\x00'\xefQ\x18\xa2${\xbb\x84\u038f/\x0f\x11#b0\x85\xda\xf7\x96\f2\x9f_\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00.\xa0\xdfj\xf5\xf5\xbb\xdbk\xe9\uf2a6\x18\u4fc0s\x96\bg\x17\x1e)go\x8b(M\xeaj\b\xa8^\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00/\xa0\xb5\x8d\x90\x0f^\x18.\x01t\u0285\x18.\xec\x9f:\t\xf6\xa6\xc0\xdfcw\xa5\x10\xd7\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x009\xa01 o\xa8\nP\xbbj\xbe)\bPX\xf1b\x12!*`\xee\xc8\xf0I\xfe\u02d2\xd8\xc8\xe0\xa8K\xc0\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:\xa0!5+\xfe\xcb\xed\xdd\u94c3\x9faL=\xac\n>\xe3uC\xf9\xb4\x12\xb1a\x99\xdc\x15\x8e#\xb5D\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00;\xa0a\x9e1'$\xbbm|1S\xed\x9d\xe7\x91\xd7d\xa3f\xb3\x89\xaf\x13\u014b\xf8\xa8\xd9\x04\x81\xa4ge\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00<\xa0|\xdd)\x86&\x82Pb\x8d\f\x10\xe3\x85\u014ca\x91\xe6\xfb\xe0Q\x91\xbc\xc0O\x13?,\xear\xc1\xc4\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00=\xa0\x84\x890\xbd{\xa8\xca\xc5Fa\a!\x13\xfb'\x88i\xe0{\xb8X\x7f\x919)37M\x01{\xcb\xe1\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>\xa0\x88i\xff,\"\xb2\x8c\xc1\x05\x10\u06452\x92\x803(\xbeO\xb0\xe8\x04\x95\u8ecd'\x1f[\x88\x966\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00?\xa0\xb5\xfe(\xe7\x9f\x1b\x85\x0f\x86X$l\u9da1\u7d1f\xc0m\xb7\x14>\x8f\xe0\xb4\xf2\xb0\xc5R:\\\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\xa0\x98^\x92\x9fp\xaf(\u043d\u0469\n\x80\x8f\x97\x7fY||w\x8cH\x9e\x98\u04fd\x89\x10\xd3\x1a\xc0\xf7\xe1\x94F#\x96\u677f\xa4U\xf4\x05\xf4\u0742\xf3\x01J\xf8\x00;r\x8b\xa5o\xa5\xb9\x90\x19\xa5\xc8\x00\x00\x00\xe0\x94I\xdf<\xca&p\xeb\rY\x11F\xb1cY\xfe3nGo)\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe1\x94K\xc6V\xb3M\xe28\x96\xfa`i\u0246/5[t\x04\x01\xaf\x8b\bE\x95\x16\x14\x01HJ\x00\x00\x00\xe0\x94M\v\x04\xb4\x05\u01b6,|\xfc:\xe5GYt~,\vFb\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe1\x94MIl\xcc(\x05\x8b\x1dt\xb7\xa1\x95Af>!\x15O\x9c\x84\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe1\x94P\x9avg\xac\x8d\x03 \xe3ar\xc1\x92Pja\x88\xaa\x84\xf6\x8b|\x13\xbcK,\x13\xfa<]\xc1\xaa\x19;\xc6\x03=\xfd\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe1\x94jz\xa9\xb8\x82\xd5\v\xb7\xbc]\xa1\xa2Dq\x9c\x99\xf1/\x06\xa3\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe1\x94l\xc99|;8s\x9d\xac\xbf\xaah\xea\xd5\xf5\xd7{\xa5\xf4U\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe1\x94s\xb2\xe0\xe5E\x10#\x9e\"\u0313o\vJm\xe1\xac\xf0\xab\u078bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe0\x94v,\xa6,\xa2T\x9a\xd8\x06v;:\xa1\xea1|B\x9b\xdb\u068a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe1\x94w\x8f_\x13\u013ex\xa3\xa4\xd7\x14\x1b\xcb&\x99\x97\x02\xf4\a\u03cbR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe0\x94\x83M\xbfZ\x03\xe2\x9c%\xbcUE\x9c\u039c\x02\x1e\xeb\xe6v\xad\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\x87]%\xeeK\xc6\x04\xc7\x1b\xafb6\xa8H\x8f\"9\x9b\xedK\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\u150d\xf7\x87\x8d5q\xbe\xf5\xe5\xa7D\xf9b\x87\xc8\xd2\x03\x86\xd7Z\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe0\x94\x9eAZ\to\xf7vP\u0712]\xeaTe\x85\xb4\xad\xb3\"\xb6\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xa0vke\xa4\xf7\xb1\xday\xa1\xafy\xaciTV\uf886D\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xa2\x9b\x14JD\x9eAJG,`\u01ea\xf1\xaa\xff\xe3)\x02\x1d\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xa5S\x95Vk\vT9[2F\xf9j\v\xdcK\x8aH=\xf9\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xac\x9b\xa7/\xb6\x1a\xa7\xc3\x1a\x95\xdf\n\x8bn\xbaoA\xef\x87^\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xb0I\x8c\x15\x87\x9d\xb2\xeeTq\u0512l_\xaa%\u0260\x96\x83\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xb0J\xef*=-\x86\xb0\x10\x06\xcc\xd43\x9a.\x94=\x9cd\x80\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\u1531\x9f\xb4\xc1\xf2\x802~`\xed7\xb1\xdcn\xe7u3S\x93\x14\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe0\x94\xbb\x97{.\xe8\xa1\x11\u05c8\xb3G}$ x\u04387\xe7+\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xc2\x1c\xb9\u025c1m\x18c\x14/}\xd8m\xd5Im\x81\xa8\u058a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xc4s\xd4\x12\xdcR\xe3I\x86\"\t\x92L\x89\x81\xb2\xeeB\ah\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe1\x94\u010e#\xc5\xf6\xe1\xea\v\xae\xf6S\a4\xed\u00d6\x8fy\xaf.\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe1\x94\xc6\xe2E\x99\x91\xbf\xe2|\xcam\x86r/5\xda#\xa1\xe4\u02d7\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe0\x94\xc9\xca+\xa9\xa2}\xe1\xdbX\x9d\x8c3\xab\x8e\u07e2\x11\x1b1\xfb\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xd1\xf7~L\x1cE\x18n\x86S\u0109\xf9\x0e\x00\x8asYr\x96\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe2\x94\u04d9NM2\x02\xdd#\xc8I}\x7fu\xbf\x16G\xd1\xda\x1b\xb1\x8c\x01\x9d\x97\x1eO\xe8@\x1et\x00\x00\x00\xe0\x94\u0726\u9d0e\xa8j\xeb\xfd\xf9\x92\x99I\x12@B)kn4\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xe0\x99\x1e\x84@A\xbeo\x11\xb9\x9d\xa5\xb1\x14\xb6\xbc\xf8N\xbdW\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe1\x94\u08bdBX\xd2v\x887\xba\xa2j(\xfeq\xdc\a\x9f\x84\u01cbR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe0\x94\xea(\xd0\x02\x04/\u0649\x8d\r\xb0\x16\xbe\x97X\xee\xaf\xe3\\\x1e\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xef\xa7EO\x11\x16\x80yu\xa4u\vFi^\x96xP\xde]\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe1\x94\xfb\xfdo\xa9\xf7:\u01a0X\xe0\x12Y\x03L(\x00\x1b\xef\x82G\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00" diff --git a/core/genesis_test.go b/core/genesis_test.go index 0e966bab0..6a0f2df08 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -30,18 +30,24 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) func TestInvalidCliqueConfig(t *testing.T) { block := DefaultGoerliGenesisBlock() block.ExtraData = []byte{} db := rawdb.NewMemoryDatabase() - if _, err := block.Commit(db, trie.NewDatabase(db)); err == nil { + if _, err := block.Commit(db, trie.NewDatabase(db, nil)); err == nil { t.Fatal("Expected error on invalid clique config") } } func TestSetupGenesis(t *testing.T) { + testSetupGenesis(t, rawdb.HashScheme) + testSetupGenesis(t, rawdb.PathScheme) +} + +func testSetupGenesis(t *testing.T, scheme string) { var ( customghash = common.HexToHash("0x89c99d90b79719238d2645c7642f2c9295246e80775b38cfd162b696817fbd50") customg = Genesis{ @@ -53,6 +59,7 @@ func TestSetupGenesis(t *testing.T) { oldcustomg = customg ) oldcustomg.Config = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(2)} + tests := []struct { name string fn func(ethdb.Database) (*params.ChainConfig, common.Hash, error) @@ -63,7 +70,7 @@ func TestSetupGenesis(t *testing.T) { { name: "genesis without ChainConfig", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - return SetupGenesisBlock(db, trie.NewDatabase(db), new(Genesis)) + return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), new(Genesis)) }, wantErr: errGenesisNoConfig, wantConfig: params.AllEthashProtocolChanges, @@ -71,7 +78,7 @@ func TestSetupGenesis(t *testing.T) { { name: "no block in DB, genesis == nil", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - return SetupGenesisBlock(db, trie.NewDatabase(db), nil) + return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil) }, wantHash: params.MainnetGenesisHash, wantConfig: params.MainnetChainConfig, @@ -79,8 +86,8 @@ func TestSetupGenesis(t *testing.T) { { name: "mainnet block in DB, genesis == nil", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - DefaultGenesisBlock().MustCommit(db) - return SetupGenesisBlock(db, trie.NewDatabase(db), nil) + DefaultGenesisBlock().MustCommit(db, trie.NewDatabase(db, newDbConfig(scheme))) + return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil) }, wantHash: params.MainnetGenesisHash, wantConfig: params.MainnetChainConfig, @@ -88,8 +95,9 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == nil", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - customg.MustCommit(db) - return SetupGenesisBlock(db, trie.NewDatabase(db), nil) + tdb := trie.NewDatabase(db, newDbConfig(scheme)) + customg.Commit(db, tdb) + return SetupGenesisBlock(db, tdb, nil) }, wantHash: customghash, wantConfig: customg.Config, @@ -97,8 +105,9 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == goerli", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - customg.MustCommit(db) - return SetupGenesisBlock(db, trie.NewDatabase(db), DefaultGoerliGenesisBlock()) + tdb := trie.NewDatabase(db, newDbConfig(scheme)) + customg.Commit(db, tdb) + return SetupGenesisBlock(db, tdb, DefaultGoerliGenesisBlock()) }, wantErr: &GenesisMismatchError{Stored: customghash, New: params.GoerliGenesisHash}, wantHash: params.GoerliGenesisHash, @@ -107,8 +116,9 @@ func TestSetupGenesis(t *testing.T) { { name: "compatible config in DB", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - oldcustomg.MustCommit(db) - return SetupGenesisBlock(db, trie.NewDatabase(db), &customg) + tdb := trie.NewDatabase(db, newDbConfig(scheme)) + oldcustomg.Commit(db, tdb) + return SetupGenesisBlock(db, tdb, &customg) }, wantHash: customghash, wantConfig: customg.Config, @@ -118,16 +128,17 @@ func TestSetupGenesis(t *testing.T) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { // Commit the 'old' genesis block with Homestead transition at #2. // Advance to block #4, past the homestead transition block of customg. - genesis := oldcustomg.MustCommit(db) + tdb := trie.NewDatabase(db, newDbConfig(scheme)) + oldcustomg.Commit(db, tdb) - bc, _ := NewBlockChain(db, nil, &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil) + bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil) defer bc.Stop() - blocks, _ := GenerateChain(oldcustomg.Config, genesis, ethash.NewFaker(), db, 4, nil) + _, blocks, _ := GenerateChainWithGenesis(&oldcustomg, ethash.NewFaker(), 4, nil) bc.InsertChain(blocks) // This should return a compatibility error. - return SetupGenesisBlock(db, trie.NewDatabase(db), &customg) + return SetupGenesisBlock(db, tdb, &customg) }, wantHash: customghash, wantConfig: customg.Config, @@ -172,11 +183,11 @@ func TestGenesisHashes(t *testing.T) { }{ {DefaultGenesisBlock(), params.MainnetGenesisHash}, {DefaultGoerliGenesisBlock(), params.GoerliGenesisHash}, - {DefaultRinkebyGenesisBlock(), params.RinkebyGenesisHash}, {DefaultSepoliaGenesisBlock(), params.SepoliaGenesisHash}, } { // Test via MustCommit - if have := c.genesis.MustCommit(rawdb.NewMemoryDatabase()).Hash(); have != c.want { + db := rawdb.NewMemoryDatabase() + if have := c.genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)).Hash(); have != c.want { t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex()) } // Test via ToBlock @@ -194,7 +205,7 @@ func TestGenesis_Commit(t *testing.T) { } db := rawdb.NewMemoryDatabase() - genesisBlock := genesis.MustCommit(db) + genesisBlock := genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) if genesis.Difficulty != nil { t.Fatalf("assumption wrong") @@ -243,3 +254,10 @@ func TestReadWriteGenesisAlloc(t *testing.T) { } } } + +func newDbConfig(scheme string) *trie.Config { + if scheme == rawdb.HashScheme { + return trie.HashDefaults + } + return &trie.Config{PathDB: pathdb.Defaults} +} diff --git a/core/headerchain_test.go b/core/headerchain_test.go index 08d19f695..2c0323e6f 100644 --- a/core/headerchain_test.go +++ b/core/headerchain_test.go @@ -73,7 +73,7 @@ func TestHeaderInsertion(t *testing.T) { db = rawdb.NewMemoryDatabase() gspec = &Genesis{BaseFee: big.NewInt(params.InitialBaseFee), Config: params.AllEthashProtocolChanges} ) - gspec.Commit(db, trie.NewDatabase(db)) + gspec.Commit(db, trie.NewDatabase(db, nil)) hc, err := NewHeaderChain(db, gspec.Config, ethash.NewFaker(), func() bool { return false }) if err != nil { t.Fatal(err) diff --git a/core/mkalloc.go b/core/mkalloc.go index e4c2ec0d8..12c40c14f 100644 --- a/core/mkalloc.go +++ b/core/mkalloc.go @@ -30,32 +30,55 @@ import ( "fmt" "math/big" "os" - "sort" "strconv" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/exp/slices" ) -type allocItem struct{ Addr, Balance *big.Int } +type allocItem struct { + Addr *big.Int + Balance *big.Int + Misc *allocItemMisc `rlp:"optional"` +} -type allocList []allocItem +type allocItemMisc struct { + Nonce uint64 + Code []byte + Slots []allocItemStorageItem +} -func (a allocList) Len() int { return len(a) } -func (a allocList) Less(i, j int) bool { return a[i].Addr.Cmp(a[j].Addr) < 0 } -func (a allocList) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +type allocItemStorageItem struct { + Key common.Hash + Val common.Hash +} -func makelist(g *core.Genesis) allocList { - a := make(allocList, 0, len(g.Alloc)) +func makelist(g *core.Genesis) []allocItem { + items := make([]allocItem, 0, len(g.Alloc)) for addr, account := range g.Alloc { + var misc *allocItemMisc if len(account.Storage) > 0 || len(account.Code) > 0 || account.Nonce != 0 { - panic(fmt.Sprintf("can't encode account %x", addr)) + misc = &allocItemMisc{ + Nonce: account.Nonce, + Code: account.Code, + Slots: make([]allocItemStorageItem, 0, len(account.Storage)), + } + for key, val := range account.Storage { + misc.Slots = append(misc.Slots, allocItemStorageItem{key, val}) + } + slices.SortFunc(misc.Slots, func(a, b allocItemStorageItem) int { + return a.Key.Cmp(b.Key) + }) } bigAddr := new(big.Int).SetBytes(addr.Bytes()) - a = append(a, allocItem{bigAddr, account.Balance}) + items = append(items, allocItem{bigAddr, account.Balance, misc}) } - sort.Sort(a) - return a + slices.SortFunc(items, func(a, b allocItem) int { + return a.Addr.Cmp(b.Addr) + }) + return items } func makealloc(g *core.Genesis) string { diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index b479655b0..97401d283 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -22,15 +22,16 @@ import ( "errors" "fmt" "math/big" - "sort" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/exp/slices" ) // ReadCanonicalHash retrieves the hash assigned to a canonical block number. @@ -380,7 +381,7 @@ func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header return nil } header := new(types.Header) - if err := rlp.Decode(bytes.NewReader(data), header); err != nil { + if err := rlp.DecodeBytes(data, header); err != nil { log.Error("Invalid block header RLP", "hash", hash, "err", err) return nil } @@ -497,7 +498,7 @@ func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body { return nil } body := new(types.Body) - if err := rlp.Decode(bytes.NewReader(data), body); err != nil { + if err := rlp.DecodeBytes(data, body); err != nil { log.Error("Invalid block body RLP", "hash", hash, "err", err) return nil } @@ -543,7 +544,7 @@ func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int { return nil } td := new(big.Int) - if err := rlp.Decode(bytes.NewReader(data), td); err != nil { + if err := rlp.DecodeBytes(data, td); err != nil { log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err) return nil } @@ -637,13 +638,19 @@ func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, time uint64, return nil } header := ReadHeader(db, hash, number) + var baseFee *big.Int if header == nil { baseFee = big.NewInt(0) } else { baseFee = header.BaseFee } - if err := receipts.DeriveFields(config, hash, number, time, baseFee, body.Transactions); err != nil { + // Compute effective blob gas price. + var blobGasPrice *big.Int + if header != nil && header.ExcessBlobGas != nil { + blobGasPrice = eip4844.CalcBlobFee(*header.ExcessBlobGas) + } + if err := receipts.DeriveFields(config, hash, number, time, baseFee, blobGasPrice, body.Transactions); err != nil { log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) return nil } @@ -724,7 +731,7 @@ func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, t // ReadLogs retrieves the logs for all transactions in a block. In case // receipts is not found, a nil is returned. // Note: ReadLogs does not derive unstored log fields. -func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) [][]*types.Log { +func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log { // Retrieve the flattened receipt slice data := ReadReceiptsRLP(db, hash, number) if len(data) == 0 { @@ -836,23 +843,13 @@ type badBlock struct { Body *types.Body } -// badBlockList implements the sort interface to allow sorting a list of -// bad blocks by their number in the reverse order. -type badBlockList []*badBlock - -func (s badBlockList) Len() int { return len(s) } -func (s badBlockList) Less(i, j int) bool { - return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64() -} -func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // ReadBadBlock retrieves the bad block with the corresponding block hash. func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block { blob, err := db.Get(badBlockKey) if err != nil { return nil } - var badBlocks badBlockList + var badBlocks []*badBlock if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { return nil } @@ -871,7 +868,7 @@ func ReadAllBadBlocks(db ethdb.Reader) []*types.Block { if err != nil { return nil } - var badBlocks badBlockList + var badBlocks []*badBlock if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { return nil } @@ -889,7 +886,7 @@ func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) { if err != nil { log.Warn("Failed to load old bad blocks", "error", err) } - var badBlocks badBlockList + var badBlocks []*badBlock if len(blob) > 0 { if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { log.Crit("Failed to decode old bad blocks", "error", err) @@ -905,7 +902,10 @@ func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) { Header: block.Header(), Body: block.Body(), }) - sort.Sort(sort.Reverse(badBlocks)) + slices.SortFunc(badBlocks, func(a, b *badBlock) int { + // Note: sorting in descending number order. + return -a.Header.Number.Cmp(b.Header.Number) + }) if len(badBlocks) > badBlockToKeep { badBlocks = badBlocks[:badBlockToKeep] } diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 32e38a81c..a7ceb7299 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -85,7 +85,7 @@ func TestBodyStorage(t *testing.T) { WriteBody(db, hash, 0, body) if entry := ReadBody(db, hash, 0); entry == nil { t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { + } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body) } if entry := ReadBodyRLP(db, hash, 0); entry == nil { @@ -139,7 +139,7 @@ func TestBlockStorage(t *testing.T) { } if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil { t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(block.Transactions(), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { + } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(block.Transactions(), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body()) } // Delete the block and verify the execution @@ -435,12 +435,12 @@ func checkReceiptsRLP(have, want types.Receipts) error { func TestAncientStorage(t *testing.T) { // Freezer style fast import the chain. frdir := t.TempDir() - db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false) if err != nil { t.Fatalf("failed to create database with ancient backend") } defer db.Close() + // Create a test block block := types.NewBlockWithHeader(&types.Header{ Number: big.NewInt(0), @@ -736,7 +736,7 @@ func TestReadLogs(t *testing.T) { // Insert the receipt slice into the database and check presence WriteReceipts(db, hash, 0, receipts) - logs := ReadLogs(db, hash, 0, params.TestChainConfig) + logs := ReadLogs(db, hash, 0) if len(logs) == 0 { t.Fatalf("no logs returned") } diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go index c5447b16d..124389ba7 100644 --- a/core/rawdb/accessors_indexes_test.go +++ b/core/rawdb/accessors_indexes_test.go @@ -18,42 +18,18 @@ package rawdb import ( "bytes" - "hash" "math/big" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/internal/blocktest" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" ) -// testHasher is the helper tool for transaction/receipt list hashing. -// The original hasher is trie, in order to get rid of import cycle, -// use the testing hasher instead. -type testHasher struct { - hasher hash.Hash -} - -func newHasher() *testHasher { - return &testHasher{hasher: sha3.NewLegacyKeccak256()} -} - -func (h *testHasher) Reset() { - h.hasher.Reset() -} - -func (h *testHasher) Update(key, val []byte) error { - h.hasher.Write(key) - h.hasher.Write(val) - return nil -} - -func (h *testHasher) Hash() common.Hash { - return common.BytesToHash(h.hasher.Sum(nil)) -} +var newTestHasher = blocktest.NewHasher // Tests that positional lookup metadata can be stored and retrieved. func TestLookupStorage(t *testing.T) { @@ -100,7 +76,7 @@ func TestLookupStorage(t *testing.T) { tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) txs := []*types.Transaction{tx1, tx2, tx3} - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newHasher()) + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newTestHasher()) // Check that no transactions entries are in a pristine database for i, tx := range txs { @@ -142,7 +118,7 @@ func TestDeleteBloomBits(t *testing.T) { for i := uint(0); i < 2; i++ { for s := uint64(0); s < 2; s++ { WriteBloomBits(db, i, s, params.MainnetGenesisHash, []byte{0x01, 0x02}) - WriteBloomBits(db, i, s, params.RinkebyGenesisHash, []byte{0x01, 0x02}) + WriteBloomBits(db, i, s, params.SepoliaGenesisHash, []byte{0x01, 0x02}) } } check := func(bit uint, section uint64, head common.Hash, exist bool) { @@ -156,25 +132,25 @@ func TestDeleteBloomBits(t *testing.T) { } // Check the existence of written data. check(0, 0, params.MainnetGenesisHash, true) - check(0, 0, params.RinkebyGenesisHash, true) + check(0, 0, params.SepoliaGenesisHash, true) // Check the existence of deleted data. DeleteBloombits(db, 0, 0, 1) check(0, 0, params.MainnetGenesisHash, false) - check(0, 0, params.RinkebyGenesisHash, false) + check(0, 0, params.SepoliaGenesisHash, false) check(0, 1, params.MainnetGenesisHash, true) - check(0, 1, params.RinkebyGenesisHash, true) + check(0, 1, params.SepoliaGenesisHash, true) // Check the existence of deleted data. DeleteBloombits(db, 0, 0, 2) check(0, 0, params.MainnetGenesisHash, false) - check(0, 0, params.RinkebyGenesisHash, false) + check(0, 0, params.SepoliaGenesisHash, false) check(0, 1, params.MainnetGenesisHash, false) - check(0, 1, params.RinkebyGenesisHash, false) + check(0, 1, params.SepoliaGenesisHash, false) // Bit1 shouldn't be affect. check(1, 0, params.MainnetGenesisHash, true) - check(1, 0, params.RinkebyGenesisHash, true) + check(1, 0, params.SepoliaGenesisHash, true) check(1, 1, params.MainnetGenesisHash, true) - check(1, 1, params.RinkebyGenesisHash, true) + check(1, 1, params.SepoliaGenesisHash, true) } diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index 2ff29d1ad..859566f72 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -111,10 +111,10 @@ const crashesToKeep = 10 func PushUncleanShutdownMarker(db ethdb.KeyValueStore) ([]uint64, uint64, error) { var uncleanShutdowns crashList // Read old data - if data, err := db.Get(uncleanShutdownKey); err != nil { - log.Warn("Error reading unclean shutdown markers", "error", err) - } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil { - return nil, 0, err + if data, err := db.Get(uncleanShutdownKey); err == nil { + if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil { + return nil, 0, err + } } var discarded = uncleanShutdowns.Discarded var previous = make([]uint64, len(uncleanShutdowns.Recent)) diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index 39900df23..9ce58e7d2 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -17,6 +17,8 @@ package rawdb import ( + "encoding/binary" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -92,3 +94,173 @@ func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { log.Crit("Failed to delete contract code", "err", err) } } + +// ReadStateID retrieves the state id with the provided state root. +func ReadStateID(db ethdb.KeyValueReader, root common.Hash) *uint64 { + data, err := db.Get(stateIDKey(root)) + if err != nil || len(data) == 0 { + return nil + } + number := binary.BigEndian.Uint64(data) + return &number +} + +// WriteStateID writes the provided state lookup to database. +func WriteStateID(db ethdb.KeyValueWriter, root common.Hash, id uint64) { + var buff [8]byte + binary.BigEndian.PutUint64(buff[:], id) + if err := db.Put(stateIDKey(root), buff[:]); err != nil { + log.Crit("Failed to store state ID", "err", err) + } +} + +// DeleteStateID deletes the specified state lookup from the database. +func DeleteStateID(db ethdb.KeyValueWriter, root common.Hash) { + if err := db.Delete(stateIDKey(root)); err != nil { + log.Crit("Failed to delete state ID", "err", err) + } +} + +// ReadPersistentStateID retrieves the id of the persistent state from the database. +func ReadPersistentStateID(db ethdb.KeyValueReader) uint64 { + data, _ := db.Get(persistentStateIDKey) + if len(data) != 8 { + return 0 + } + return binary.BigEndian.Uint64(data) +} + +// WritePersistentStateID stores the id of the persistent state into database. +func WritePersistentStateID(db ethdb.KeyValueWriter, number uint64) { + if err := db.Put(persistentStateIDKey, encodeBlockNumber(number)); err != nil { + log.Crit("Failed to store the persistent state ID", "err", err) + } +} + +// ReadTrieJournal retrieves the serialized in-memory trie nodes of layers saved at +// the last shutdown. +func ReadTrieJournal(db ethdb.KeyValueReader) []byte { + data, _ := db.Get(trieJournalKey) + return data +} + +// WriteTrieJournal stores the serialized in-memory trie nodes of layers to save at +// shutdown. +func WriteTrieJournal(db ethdb.KeyValueWriter, journal []byte) { + if err := db.Put(trieJournalKey, journal); err != nil { + log.Crit("Failed to store tries journal", "err", err) + } +} + +// DeleteTrieJournal deletes the serialized in-memory trie nodes of layers saved at +// the last shutdown. +func DeleteTrieJournal(db ethdb.KeyValueWriter) { + if err := db.Delete(trieJournalKey); err != nil { + log.Crit("Failed to remove tries journal", "err", err) + } +} + +// ReadStateHistoryMeta retrieves the metadata corresponding to the specified +// state history. Compute the position of state history in freezer by minus +// one since the id of first state history starts from one(zero for initial +// state). +func ReadStateHistoryMeta(db ethdb.AncientReaderOp, id uint64) []byte { + blob, err := db.Ancient(stateHistoryMeta, id-1) + if err != nil { + return nil + } + return blob +} + +// ReadStateHistoryMetaList retrieves a batch of meta objects with the specified +// start position and count. Compute the position of state history in freezer by +// minus one since the id of first state history starts from one(zero for initial +// state). +func ReadStateHistoryMetaList(db ethdb.AncientReaderOp, start uint64, count uint64) ([][]byte, error) { + return db.AncientRange(stateHistoryMeta, start-1, count, 0) +} + +// ReadStateAccountIndex retrieves the state root corresponding to the specified +// state history. Compute the position of state history in freezer by minus one +// since the id of first state history starts from one(zero for initial state). +func ReadStateAccountIndex(db ethdb.AncientReaderOp, id uint64) []byte { + blob, err := db.Ancient(stateHistoryAccountIndex, id-1) + if err != nil { + return nil + } + return blob +} + +// ReadStateStorageIndex retrieves the state root corresponding to the specified +// state history. Compute the position of state history in freezer by minus one +// since the id of first state history starts from one(zero for initial state). +func ReadStateStorageIndex(db ethdb.AncientReaderOp, id uint64) []byte { + blob, err := db.Ancient(stateHistoryStorageIndex, id-1) + if err != nil { + return nil + } + return blob +} + +// ReadStateAccountHistory retrieves the state root corresponding to the specified +// state history. Compute the position of state history in freezer by minus one +// since the id of first state history starts from one(zero for initial state). +func ReadStateAccountHistory(db ethdb.AncientReaderOp, id uint64) []byte { + blob, err := db.Ancient(stateHistoryAccountData, id-1) + if err != nil { + return nil + } + return blob +} + +// ReadStateStorageHistory retrieves the state root corresponding to the specified +// state history. Compute the position of state history in freezer by minus one +// since the id of first state history starts from one(zero for initial state). +func ReadStateStorageHistory(db ethdb.AncientReaderOp, id uint64) []byte { + blob, err := db.Ancient(stateHistoryStorageData, id-1) + if err != nil { + return nil + } + return blob +} + +// ReadStateHistory retrieves the state history from database with provided id. +// Compute the position of state history in freezer by minus one since the id +// of first state history starts from one(zero for initial state). +func ReadStateHistory(db ethdb.AncientReaderOp, id uint64) ([]byte, []byte, []byte, []byte, []byte, error) { + meta, err := db.Ancient(stateHistoryMeta, id-1) + if err != nil { + return nil, nil, nil, nil, nil, err + } + accountIndex, err := db.Ancient(stateHistoryAccountIndex, id-1) + if err != nil { + return nil, nil, nil, nil, nil, err + } + storageIndex, err := db.Ancient(stateHistoryStorageIndex, id-1) + if err != nil { + return nil, nil, nil, nil, nil, err + } + accountData, err := db.Ancient(stateHistoryAccountData, id-1) + if err != nil { + return nil, nil, nil, nil, nil, err + } + storageData, err := db.Ancient(stateHistoryStorageData, id-1) + if err != nil { + return nil, nil, nil, nil, nil, err + } + return meta, accountIndex, storageIndex, accountData, storageData, nil +} + +// WriteStateHistory writes the provided state history to database. Compute the +// position of state history in freezer by minus one since the id of first state +// history starts from one(zero for initial state). +func WriteStateHistory(db ethdb.AncientWriter, id uint64, meta []byte, accountIndex []byte, storageIndex []byte, accounts []byte, storages []byte) { + db.ModifyAncients(func(op ethdb.AncientWriteOp) error { + op.AppendRaw(stateHistoryMeta, id-1, meta) + op.AppendRaw(stateHistoryAccountIndex, id-1, accountIndex) + op.AppendRaw(stateHistoryStorageIndex, id-1, storageIndex) + op.AppendRaw(stateHistoryAccountData, id-1, accounts) + op.AppendRaw(stateHistoryStorageData, id-1, storages) + return nil + }) +} diff --git a/core/rawdb/accessors_sync.go b/core/rawdb/accessors_sync.go index e87ad43c3..7a7374e16 100644 --- a/core/rawdb/accessors_sync.go +++ b/core/rawdb/accessors_sync.go @@ -17,8 +17,6 @@ package rawdb import ( - "bytes" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -53,7 +51,7 @@ func ReadSkeletonHeader(db ethdb.KeyValueReader, number uint64) *types.Header { return nil } header := new(types.Header) - if err := rlp.Decode(bytes.NewReader(data), header); err != nil { + if err := rlp.DecodeBytes(data, header); err != nil { log.Error("Invalid skeleton header RLP", "number", number, "err", err) return nil } diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go index e24021302..f5c2f8899 100644 --- a/core/rawdb/accessors_trie.go +++ b/core/rawdb/accessors_trie.go @@ -36,7 +36,7 @@ import ( // // Now this scheme is still kept for backward compatibility, and it will be used // for archive node and some other tries(e.g. light trie). -const HashScheme = "hashScheme" +const HashScheme = "hash" // PathScheme is the new path-based state scheme with which trie nodes are stored // in the disk with node path as the database key. This scheme will only store one @@ -44,23 +44,25 @@ const HashScheme = "hashScheme" // is native. At the same time, this scheme will put adjacent trie nodes in the same // area of the disk with good data locality property. But this scheme needs to rely // on extra state diffs to survive deep reorg. -const PathScheme = "pathScheme" +const PathScheme = "path" -// nodeHasher used to derive the hash of trie node. -type nodeHasher struct{ sha crypto.KeccakState } +// hasher is used to compute the sha256 hash of the provided data. +type hasher struct{ sha crypto.KeccakState } var hasherPool = sync.Pool{ - New: func() interface{} { return &nodeHasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, + New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, } -func newNodeHasher() *nodeHasher { return hasherPool.Get().(*nodeHasher) } -func returnHasherToPool(h *nodeHasher) { hasherPool.Put(h) } +func newHasher() *hasher { + return hasherPool.Get().(*hasher) +} -func (h *nodeHasher) hashData(data []byte) (n common.Hash) { - h.sha.Reset() - h.sha.Write(data) - h.sha.Read(n[:]) - return n +func (h *hasher) hash(data []byte) common.Hash { + return crypto.HashData(h.sha, data) +} + +func (h *hasher) release() { + hasherPool.Put(h) } // ReadAccountTrieNode retrieves the account trie node and the associated node @@ -70,9 +72,9 @@ func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.H if err != nil { return nil, common.Hash{} } - hasher := newNodeHasher() - defer returnHasherToPool(hasher) - return data, hasher.hashData(data) + h := newHasher() + defer h.release() + return data, h.hash(data) } // HasAccountTrieNode checks the account trie node presence with the specified @@ -82,9 +84,9 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) if err != nil { return false } - hasher := newNodeHasher() - defer returnHasherToPool(hasher) - return hasher.hashData(data) == hash + h := newHasher() + defer h.release() + return h.hash(data) == hash } // WriteAccountTrieNode writes the provided account trie node into database. @@ -108,9 +110,9 @@ func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path if err != nil { return nil, common.Hash{} } - hasher := newNodeHasher() - defer returnHasherToPool(hasher) - return data, hasher.hashData(data) + h := newHasher() + defer h.release() + return data, h.hash(data) } // HasStorageTrieNode checks the storage trie node presence with the provided @@ -120,9 +122,9 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [ if err != nil { return false } - hasher := newNodeHasher() - defer returnHasherToPool(hasher) - return hasher.hashData(data) == hash + h := newHasher() + defer h.release() + return h.hash(data) == hash } // WriteStorageTrieNode writes the provided storage trie node into database. @@ -261,3 +263,25 @@ func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, has panic(fmt.Sprintf("Unknown scheme %v", scheme)) } } + +// ReadStateScheme reads the state scheme of persistent state, or none +// if the state is not present in database. +func ReadStateScheme(db ethdb.Reader) string { + // Check if state in path-based scheme is present + blob, _ := ReadAccountTrieNode(db, nil) + if len(blob) != 0 { + return PathScheme + } + // In a hash-based scheme, the genesis state is consistently stored + // on the disk. To assess the scheme of the persistent state, it + // suffices to inspect the scheme of the genesis state. + header := ReadHeader(db, ReadCanonicalHash(db, 0), 0) + if header == nil { + return "" // empty datadir + } + blob = ReadLegacyTrieNode(db, header.Root) + if len(blob) == 0 { + return "" // no state in disk + } + return HashScheme +} diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go index b0428c5f5..6f409fff1 100644 --- a/core/rawdb/ancient_scheme.go +++ b/core/rawdb/ancient_scheme.go @@ -16,6 +16,8 @@ package rawdb +import "path/filepath" + // The list of table names of chain freezer. const ( // ChainFreezerHeaderTable indicates the name of the freezer header table. @@ -44,10 +46,36 @@ var chainFreezerNoSnappy = map[string]bool{ ChainFreezerDifficultyTable: true, } +const ( + // stateHistoryTableSize defines the maximum size of freezer data files. + stateHistoryTableSize = 2 * 1000 * 1000 * 1000 + + // stateHistoryAccountIndex indicates the name of the freezer state history table. + stateHistoryMeta = "history.meta" + stateHistoryAccountIndex = "account.index" + stateHistoryStorageIndex = "storage.index" + stateHistoryAccountData = "account.data" + stateHistoryStorageData = "storage.data" +) + +var stateFreezerNoSnappy = map[string]bool{ + stateHistoryMeta: true, + stateHistoryAccountIndex: false, + stateHistoryStorageIndex: false, + stateHistoryAccountData: false, + stateHistoryStorageData: false, +} + // The list of identifiers of ancient stores. var ( chainFreezerName = "chain" // the folder name of chain segment ancient store. + stateFreezerName = "state" // the folder name of reverse diff ancient store. ) // freezers the collections of all builtin freezers. -var freezers = []string{chainFreezerName} +var freezers = []string{chainFreezerName, stateFreezerName} + +// NewStateFreezer initializes the freezer for state history. +func NewStateFreezer(ancientDir string, readOnly bool) (*ResettableFreezer, error) { + return NewResettableFreezer(filepath.Join(ancientDir, stateFreezerName), "eth/db/state", readOnly, stateHistoryTableSize, stateFreezerNoSnappy) +} diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go index 363a911ae..96bd9ee40 100644 --- a/core/rawdb/ancient_utils.go +++ b/core/rawdb/ancient_utils.go @@ -50,36 +50,58 @@ func (info *freezerInfo) size() common.StorageSize { return total } +func inspect(name string, order map[string]bool, reader ethdb.AncientReader) (freezerInfo, error) { + info := freezerInfo{name: name} + for t := range order { + size, err := reader.AncientSize(t) + if err != nil { + return freezerInfo{}, err + } + info.sizes = append(info.sizes, tableSize{name: t, size: common.StorageSize(size)}) + } + // Retrieve the number of last stored item + ancients, err := reader.Ancients() + if err != nil { + return freezerInfo{}, err + } + info.head = ancients - 1 + + // Retrieve the number of first stored item + tail, err := reader.Tail() + if err != nil { + return freezerInfo{}, err + } + info.tail = tail + return info, nil +} + // inspectFreezers inspects all freezers registered in the system. func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) { var infos []freezerInfo for _, freezer := range freezers { switch freezer { case chainFreezerName: - // Chain ancient store is a bit special. It's always opened along - // with the key-value store, inspect the chain store directly. - info := freezerInfo{name: freezer} - // Retrieve storage size of every contained table. - for table := range chainFreezerNoSnappy { - size, err := db.AncientSize(table) - if err != nil { - return nil, err - } - info.sizes = append(info.sizes, tableSize{name: table, size: common.StorageSize(size)}) - } - // Retrieve the number of last stored item - ancients, err := db.Ancients() + info, err := inspect(chainFreezerName, chainFreezerNoSnappy, db) if err != nil { return nil, err } - info.head = ancients - 1 + infos = append(infos, info) - // Retrieve the number of first stored item - tail, err := db.Tail() + case stateFreezerName: + datadir, err := db.AncientDatadir() + if err != nil { + return nil, err + } + f, err := NewStateFreezer(datadir, true) + if err != nil { + return nil, err + } + defer f.Close() + + info, err := inspect(stateFreezerName, stateFreezerNoSnappy, f) if err != nil { return nil, err } - info.tail = tail infos = append(infos, info) default: diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go index e1f515975..9580cd92a 100644 --- a/core/rawdb/chain_iterator_test.go +++ b/core/rawdb/chain_iterator_test.go @@ -34,7 +34,7 @@ func TestChainIterator(t *testing.T) { var block *types.Block var txs []*types.Transaction to := common.BytesToAddress([]byte{0x11}) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) // Empty genesis block + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) // Empty genesis block WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) for i := uint64(1); i <= 10; i++ { @@ -60,7 +60,7 @@ func TestChainIterator(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } @@ -111,7 +111,7 @@ func TestIndexTransactions(t *testing.T) { to := common.BytesToAddress([]byte{0x11}) // Write empty genesis block - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) @@ -138,7 +138,7 @@ func TestIndexTransactions(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index e864bcb2e..7a7845638 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -123,13 +123,13 @@ func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, e } // TruncateHead returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) TruncateHead(items uint64) error { - return errNotSupported +func (db *nofreezedb) TruncateHead(items uint64) (uint64, error) { + return 0, errNotSupported } // TruncateTail returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) TruncateTail(items uint64) error { - return errNotSupported +func (db *nofreezedb) TruncateTail(items uint64) (uint64, error) { + return 0, errNotSupported } // Sync returns an error as we don't have a backing chain freezer. @@ -326,10 +326,10 @@ const ( dbLeveldb = "leveldb" ) -// hasPreexistingDb checks the given data directory whether a database is already +// PreexistingDatabase checks the given data directory whether a database is already // instantiated at that location, and if so, returns the type of database (or the // empty string). -func hasPreexistingDb(path string) string { +func PreexistingDatabase(path string) string { if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { return "" // No pre-existing db } @@ -352,6 +352,9 @@ type OpenOptions struct { Cache int // the capacity(in megabytes) of the data caching Handles int // number of files to be open simultaneously ReadOnly bool + // Ephemeral means that filesystem sync operations should be avoided: data integrity in the face of + // a crash is not important. This option should typically be used in tests. + Ephemeral bool } // openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble. @@ -367,14 +370,14 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) { } // Retrieve any pre-existing database's type and use that or the requested one // as long as there's no conflict between the two types - existingDb := hasPreexistingDb(o.Directory) + existingDb := PreexistingDatabase(o.Directory) if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb { return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb) } if o.Type == dbPebble || existingDb == dbPebble { if PebbleEnabled { log.Info("Using pebble as the backing database") - return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) + return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral) } else { return nil, errors.New("db.engine 'pebble' not supported on this platform") } @@ -387,7 +390,7 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) { // on supported platforms and LevelDB on anything else. if PebbleEnabled { log.Info("Defaulting to pebble as the backing database") - return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) + return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral) } else { log.Info("Defaulting to leveldb as the backing database") return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) @@ -463,7 +466,10 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { tds stat numHashPairings stat hashNumPairings stat - tries stat + legacyTries stat + stateLookups stat + accountTries stat + storageTries stat codes stat txLookups stat accountSnaps stat @@ -504,8 +510,14 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { numHashPairings.Add(size) case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength): hashNumPairings.Add(size) - case len(key) == common.HashLength: - tries.Add(size) + case IsLegacyTrieNode(key, it.Value()): + legacyTries.Add(size) + case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength: + stateLookups.Add(size) + case IsAccountTrieNode(key): + accountTries.Add(size) + case IsStorageTrieNode(key): + storageTries.Add(size) case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength: codes.Add(size) case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength): @@ -543,6 +555,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey, + persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, } { if bytes.Equal(key, meta) { metadata.Add(size) @@ -571,7 +584,10 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { {"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()}, {"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()}, {"Key-Value store", "Contract codes", codes.Size(), codes.Count()}, - {"Key-Value store", "Trie nodes", tries.Size(), tries.Count()}, + {"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()}, + {"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()}, + {"Key-Value store", "Path trie account nodes", accountTries.Size(), accountTries.Count()}, + {"Key-Value store", "Path trie storage nodes", storageTries.Size(), storageTries.Count()}, {"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()}, {"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()}, {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()}, diff --git a/core/rawdb/databases_64bit.go b/core/rawdb/databases_64bit.go index 73bfeb208..1593e89bf 100644 --- a/core/rawdb/databases_64bit.go +++ b/core/rawdb/databases_64bit.go @@ -28,8 +28,8 @@ const PebbleEnabled = true // NewPebbleDBDatabase creates a persistent key-value database without a freezer // moving immutable chain segments into cold storage. -func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { - db, err := pebble.New(file, cache, handles, namespace, readonly) +func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) { + db, err := pebble.New(file, cache, handles, namespace, readonly, ephemeral) if err != nil { return nil, err } diff --git a/core/rawdb/databases_non64bit.go b/core/rawdb/databases_non64bit.go index 1f10c2f52..fb0777a5e 100644 --- a/core/rawdb/databases_non64bit.go +++ b/core/rawdb/databases_non64bit.go @@ -29,6 +29,6 @@ const PebbleEnabled = false // NewPebbleDBDatabase creates a persistent key-value database without a freezer // moving immutable chain segments into cold storage. -func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { +func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) { return nil, errors.New("pebble is not supported on this platform") } diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 48a7ba6ed..eb10ef54b 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -197,9 +197,10 @@ func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) { // AncientRange retrieves multiple items in sequence, starting from the index 'start'. // It will return -// - at most 'max' items, -// - at least 1 item (even if exceeding the maxByteSize), but will otherwise -// return as many items as fit into maxByteSize. +// - at most 'count' items, +// - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize), +// but will otherwise return as many items as fit into maxByteSize. +// - if maxBytes is not specified, 'count' items will be returned if they are present. func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { if table := f.tables[kind]; table != nil { return table.RetrieveItems(start, count, maxBytes) @@ -278,43 +279,46 @@ func (f *Freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize } // TruncateHead discards any recent data above the provided threshold number. -func (f *Freezer) TruncateHead(items uint64) error { +// It returns the previous head number. +func (f *Freezer) TruncateHead(items uint64) (uint64, error) { if f.readonly { - return errReadOnly + return 0, errReadOnly } f.writeLock.Lock() defer f.writeLock.Unlock() - if f.frozen.Load() <= items { - return nil + oitems := f.frozen.Load() + if oitems <= items { + return oitems, nil } for _, table := range f.tables { if err := table.truncateHead(items); err != nil { - return err + return 0, err } } f.frozen.Store(items) - return nil + return oitems, nil } // TruncateTail discards any recent data below the provided threshold number. -func (f *Freezer) TruncateTail(tail uint64) error { +func (f *Freezer) TruncateTail(tail uint64) (uint64, error) { if f.readonly { - return errReadOnly + return 0, errReadOnly } f.writeLock.Lock() defer f.writeLock.Unlock() - if f.tail.Load() >= tail { - return nil + old := f.tail.Load() + if old >= tail { + return old, nil } for _, table := range f.tables { if err := table.truncateTail(tail); err != nil { - return err + return 0, err } } f.tail.Store(tail) - return nil + return old, nil } // Sync flushes all data tables to disk. @@ -438,7 +442,7 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error { // TODO(s1na): This is a sanity-check since as of now no process does tail-deletion. But the migration // process assumes no deletion at tail and needs to be modified to account for that. if table.itemOffset.Load() > 0 || table.itemHidden.Load() > 0 { - return fmt.Errorf("migration not supported for tail-deleted freezers") + return errors.New("migration not supported for tail-deleted freezers") } ancientsPath := filepath.Dir(table.index.Name()) // Set up new dir for the migrated table, the content of which diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go index f9a56c6de..0a3892bcd 100644 --- a/core/rawdb/freezer_resettable.go +++ b/core/rawdb/freezer_resettable.go @@ -170,7 +170,8 @@ func (f *ResettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) } // TruncateHead discards any recent data above the provided threshold number. -func (f *ResettableFreezer) TruncateHead(items uint64) error { +// It returns the previous head number. +func (f *ResettableFreezer) TruncateHead(items uint64) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() @@ -178,7 +179,8 @@ func (f *ResettableFreezer) TruncateHead(items uint64) error { } // TruncateTail discards any recent data below the provided threshold number. -func (f *ResettableFreezer) TruncateTail(tail uint64) error { +// It returns the previous value +func (f *ResettableFreezer) TruncateTail(tail uint64) (uint64, error) { f.lock.RLock() defer f.lock.RUnlock() diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index 928b37d70..fc6316c95 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -712,7 +712,7 @@ func (t *freezerTable) RetrieveItems(start, count, maxBytes uint64) ([][]byte, e if !t.noCompression { decompressedSize, _ = snappy.DecodedLen(item) } - if i > 0 && uint64(outputSize+decompressedSize) > maxBytes { + if i > 0 && maxBytes != 0 && uint64(outputSize+decompressedSize) > maxBytes { break } if !t.noCompression { @@ -730,8 +730,10 @@ func (t *freezerTable) RetrieveItems(start, count, maxBytes uint64) ([][]byte, e } // retrieveItems reads up to 'count' items from the table. It reads at least -// one item, but otherwise avoids reading more than maxBytes bytes. -// It returns the (potentially compressed) data, and the sizes. +// one item, but otherwise avoids reading more than maxBytes bytes. Freezer +// will ignore the size limitation and continuously allocate memory to store +// data if maxBytes is 0. It returns the (potentially compressed) data, and +// the sizes. func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []int, error) { t.lock.RLock() defer t.lock.RUnlock() @@ -752,25 +754,22 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i if start+count > items { count = items - start } - var ( - output = make([]byte, maxBytes) // Buffer to read data into - outputSize int // Used size of that buffer - ) + var output []byte // Buffer to read data into + if maxBytes != 0 { + output = make([]byte, 0, maxBytes) + } else { + output = make([]byte, 0, 1024) // initial buffer cap + } // readData is a helper method to read a single data item from disk. readData := func(fileId, start uint32, length int) error { - // In case a small limit is used, and the elements are large, may need to - // realloc the read-buffer when reading the first (and only) item. - if len(output) < length { - output = make([]byte, length) - } + output = grow(output, length) dataFile, exist := t.files[fileId] if !exist { return fmt.Errorf("missing data file %d", fileId) } - if _, err := dataFile.ReadAt(output[outputSize:outputSize+length], int64(start)); err != nil { + if _, err := dataFile.ReadAt(output[len(output)-length:], int64(start)); err != nil { return err } - outputSize += length return nil } // Read all the indexes in one go @@ -801,7 +800,7 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i } readStart = 0 } - if i > 0 && uint64(totalSize+size) > maxBytes { + if i > 0 && uint64(totalSize+size) > maxBytes && maxBytes != 0 { // About to break out due to byte limit being exceeded. We don't // read this last item, but we need to do the deferred reads now. if unreadSize > 0 { @@ -815,7 +814,7 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i unreadSize += size totalSize += size sizes = append(sizes, size) - if i == len(indices)-2 || uint64(totalSize) > maxBytes { + if i == len(indices)-2 || (uint64(totalSize) > maxBytes && maxBytes != 0) { // Last item, need to do the read now if err := readData(secondIndex.filenum, readStart, unreadSize); err != nil { return nil, nil, err @@ -826,7 +825,7 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i // Update metrics. t.readMeter.Mark(int64(totalSize)) - return output[:outputSize], sizes, nil + return output, sizes, nil } // has returns an indicator whether the specified number data is still accessible diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go index 5c4cc40ed..939d09394 100644 --- a/core/rawdb/freezer_table_test.go +++ b/core/rawdb/freezer_table_test.go @@ -994,6 +994,52 @@ func TestSequentialReadByteLimit(t *testing.T) { } } +// TestSequentialReadNoByteLimit tests the batch-read if maxBytes is not specified. +// Freezer should return the requested items regardless the size limitation. +func TestSequentialReadNoByteLimit(t *testing.T) { + rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() + fname := fmt.Sprintf("batchread-3-%d", rand.Uint64()) + { // Fill table + f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false) + if err != nil { + t.Fatal(err) + } + // Write 10 bytes 30 times, + // Splitting it at every 100 bytes (10 items) + writeChunks(t, f, 30, 10) + f.Close() + } + for i, tc := range []struct { + items uint64 + want int + }{ + {1, 1}, + {30, 30}, + {31, 30}, + } { + { + f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false) + if err != nil { + t.Fatal(err) + } + items, err := f.RetrieveItems(0, tc.items, 0) + if err != nil { + t.Fatal(err) + } + if have, want := len(items), tc.want; have != want { + t.Fatalf("test %d: want %d items, have %d ", i, want, have) + } + for ii, have := range items { + want := getChunk(10, ii) + if !bytes.Equal(want, have) { + t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want) + } + } + f.Close() + } + } +} + func TestFreezerReadonly(t *testing.T) { tmpdir := os.TempDir() // Case 1: Check it fails on non-existent file. diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go index c38c9b1a8..6183d2274 100644 --- a/core/rawdb/freezer_test.go +++ b/core/rawdb/freezer_test.go @@ -198,7 +198,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) { for i := 0; i < 10; i++ { // First reset and write 100 items. - if err := f.TruncateHead(0); err != nil { + if _, err := f.TruncateHead(0); err != nil { t.Fatal("truncate failed:", err) } _, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error { @@ -233,7 +233,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) { wg.Done() }() go func() { - truncateErr = f.TruncateHead(10) + _, truncateErr = f.TruncateHead(10) wg.Done() }() go func() { diff --git a/core/rawdb/freezer_utils.go b/core/rawdb/freezer_utils.go index e7cce2920..1bbb50c49 100644 --- a/core/rawdb/freezer_utils.go +++ b/core/rawdb/freezer_utils.go @@ -117,3 +117,19 @@ func truncateFreezerFile(file *os.File, size int64) error { } return nil } + +// grow prepares the slice space for new item, and doubles the slice capacity +// if space is not enough. +func grow(buf []byte, n int) []byte { + if cap(buf)-len(buf) < n { + newcap := 2 * cap(buf) + if newcap-len(buf) < n { + newcap = len(buf) + n + } + nbuf := make([]byte, len(buf), newcap) + copy(nbuf, buf) + buf = nbuf + } + buf = buf[:len(buf)+n] + return buf +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 18722ed5d..7269fe5d5 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -43,6 +43,9 @@ var ( // headFinalizedBlockKey tracks the latest known finalized block hash. headFinalizedBlockKey = []byte("LastFinalized") + // persistentStateIDKey tracks the id of latest stored state(for path-based only). + persistentStateIDKey = []byte("LastStateID") + // lastPivotKey tracks the last pivot block used by fast sync (to reenable on sethead). lastPivotKey = []byte("LastPivot") @@ -70,6 +73,9 @@ var ( // skeletonSyncStatusKey tracks the skeleton sync status across restarts. skeletonSyncStatusKey = []byte("SkeletonSyncStatus") + // trieJournalKey tracks the in-memory trie node layers across restarts. + trieJournalKey = []byte("TrieJournal") + // txIndexTailKey tracks the oldest block whose transactions have been indexed. txIndexTailKey = []byte("TransactionIndexTail") @@ -104,6 +110,7 @@ var ( // Path-based storage scheme of merkle patricia trie. trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node + stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage configPrefix = []byte("ethereum-config-") // config prefix for the db @@ -188,7 +195,11 @@ func accountSnapshotKey(hash common.Hash) []byte { // storageSnapshotKey = SnapshotStoragePrefix + account hash + storage hash func storageSnapshotKey(accountHash, storageHash common.Hash) []byte { - return append(append(SnapshotStoragePrefix, accountHash.Bytes()...), storageHash.Bytes()...) + buf := make([]byte, len(SnapshotStoragePrefix)+common.HashLength+common.HashLength) + n := copy(buf, SnapshotStoragePrefix) + n += copy(buf[n:], accountHash.Bytes()) + copy(buf[n:], storageHash.Bytes()) + return buf } // storageSnapshotsKey = SnapshotStoragePrefix + account hash + storage hash @@ -240,6 +251,11 @@ func genesisStateSpecKey(hash common.Hash) []byte { return append(genesisPrefix, hash.Bytes()...) } +// stateIDKey = stateIDPrefix + root (32 bytes) +func stateIDKey(root common.Hash) []byte { + return append(stateIDPrefix, root.Bytes()...) +} + // accountTrieNodeKey = trieNodeAccountPrefix + nodePath. func accountTrieNodeKey(path []byte) []byte { return append(trieNodeAccountPrefix, path...) @@ -247,7 +263,11 @@ func accountTrieNodeKey(path []byte) []byte { // storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath. func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte { - return append(append(trieNodeStoragePrefix, accountHash.Bytes()...), path...) + buf := make([]byte, len(trieNodeStoragePrefix)+common.HashLength+len(path)) + n := copy(buf, trieNodeStoragePrefix) + n += copy(buf[n:], accountHash.Bytes()) + copy(buf[n:], path) + return buf } // IsLegacyTrieNode reports whether a provided database entry is a legacy trie @@ -261,9 +281,10 @@ func IsLegacyTrieNode(key []byte, val []byte) bool { return bytes.Equal(key, crypto.Keccak256(val)) } -// IsAccountTrieNode reports whether a provided database entry is an account -// trie node in path-based state scheme. -func IsAccountTrieNode(key []byte) (bool, []byte) { +// ResolveAccountTrieNodeKey reports whether a provided database entry is an +// account trie node in path-based state scheme, and returns the resolved +// node path if so. +func ResolveAccountTrieNodeKey(key []byte) (bool, []byte) { if !bytes.HasPrefix(key, trieNodeAccountPrefix) { return false, nil } @@ -276,9 +297,17 @@ func IsAccountTrieNode(key []byte) (bool, []byte) { return true, key[len(trieNodeAccountPrefix):] } -// IsStorageTrieNode reports whether a provided database entry is a storage +// IsAccountTrieNode reports whether a provided database entry is an account // trie node in path-based state scheme. -func IsStorageTrieNode(key []byte) (bool, common.Hash, []byte) { +func IsAccountTrieNode(key []byte) bool { + ok, _ := ResolveAccountTrieNodeKey(key) + return ok +} + +// ResolveStorageTrieNode reports whether a provided database entry is a storage +// trie node in path-based state scheme, and returns the resolved account hash +// and node path if so. +func ResolveStorageTrieNode(key []byte) (bool, common.Hash, []byte) { if !bytes.HasPrefix(key, trieNodeStoragePrefix) { return false, common.Hash{}, nil } @@ -294,3 +323,10 @@ func IsStorageTrieNode(key []byte) (bool, common.Hash, []byte) { accountHash := common.BytesToHash(key[len(trieNodeStoragePrefix) : len(trieNodeStoragePrefix)+common.HashLength]) return true, accountHash, key[len(trieNodeStoragePrefix)+common.HashLength:] } + +// IsStorageTrieNode reports whether a provided database entry is a storage +// trie node in path-based state scheme. +func IsStorageTrieNode(key []byte) bool { + ok, _, _ := ResolveStorageTrieNode(key) + return ok +} diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 6d6fa0555..1895f61da 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -97,13 +97,13 @@ func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err e // TruncateHead is a noop passthrough that just forwards the request to the underlying // database. -func (t *table) TruncateHead(items uint64) error { +func (t *table) TruncateHead(items uint64) (uint64, error) { return t.db.TruncateHead(items) } // TruncateTail is a noop passthrough that just forwards the request to the underlying // database. -func (t *table) TruncateTail(items uint64) error { +func (t *table) TruncateTail(items uint64) (uint64, error) { return t.db.TruncateTail(items) } diff --git a/core/state/database.go b/core/state/database.go index ace462165..9467c8f72 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" @@ -43,21 +44,21 @@ type Database interface { OpenTrie(root common.Hash) (Trie, error) // OpenStorageTrie opens the storage trie of an account. - OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) + OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) // CopyTrie returns an independent copy of the given trie. CopyTrie(Trie) Trie // ContractCode retrieves a particular contract's code. - ContractCode(addrHash, codeHash common.Hash) ([]byte, error) + ContractCode(addr common.Address, codeHash common.Hash) ([]byte, error) // ContractCodeSize retrieves a particular contracts code's size. - ContractCodeSize(addrHash, codeHash common.Hash) (int, error) + ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) // DiskDB returns the underlying key-value disk database. DiskDB() ethdb.KeyValueStore - // TrieDB retrieves the low level trie database used for data storage. + // TrieDB returns the underlying trie database for managing trie nodes. TrieDB() *trie.Database } @@ -93,6 +94,10 @@ type Trie interface { // in the trie with provided address. UpdateAccount(address common.Address, account *types.StateAccount) error + // UpdateContractCode abstracts code write to the trie. It is expected + // to be moved to the stateWriter interface when the latter is ready. + UpdateContractCode(address common.Address, codeHash common.Hash, code []byte) error + // DeleteStorage removes any existing value for key from the trie. If a node // was not found in the database, a trie.MissingNodeError is returned. DeleteStorage(addr common.Address, key []byte) error @@ -110,11 +115,12 @@ type Trie interface { // The returned nodeset can be nil if the trie is clean(nothing to commit). // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage - Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) + Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) // NodeIterator returns an iterator that returns nodes of the trie. Iteration - // starts at the key after the given start key. - NodeIterator(startKey []byte) trie.NodeIterator + // starts at the key after the given start key. And error will be returned + // if fails to create node iterator. + NodeIterator(startKey []byte) (trie.NodeIterator, error) // Prove constructs a Merkle proof for key. The result contains all encoded nodes // on the path to the value at key. The value itself is also included in the last @@ -123,7 +129,7 @@ type Trie interface { // If the trie does not contain a value for key, the returned proof contains all // nodes of the longest existing prefix of the key (at least the root), ending // with the node that proves the absence of the key. - Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error + Prove(key []byte, proofDb ethdb.KeyValueWriter) error } // NewDatabase creates a backing store for state. The returned database is safe for @@ -141,7 +147,7 @@ func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { disk: db, codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), - triedb: trie.NewDatabaseWithConfig(db, config), + triedb: trie.NewDatabase(db, config), } } @@ -172,8 +178,8 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { } // OpenStorageTrie opens the storage trie of an account. -func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) { - tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, addrHash, root), db.triedb) +func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) { + tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root), db.triedb) if err != nil { return nil, err } @@ -191,7 +197,7 @@ func (db *cachingDB) CopyTrie(t Trie) Trie { } // ContractCode retrieves a particular contract's code. -func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) { +func (db *cachingDB) ContractCode(address common.Address, codeHash common.Hash) ([]byte, error) { code, _ := db.codeCache.Get(codeHash) if len(code) > 0 { return code, nil @@ -208,7 +214,7 @@ func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error // ContractCodeWithPrefix retrieves a particular contract's code. If the // code can't be found in the cache, then check the existence with **new** // db scheme. -func (db *cachingDB) ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) { +func (db *cachingDB) ContractCodeWithPrefix(address common.Address, codeHash common.Hash) ([]byte, error) { code, _ := db.codeCache.Get(codeHash) if len(code) > 0 { return code, nil @@ -223,11 +229,11 @@ func (db *cachingDB) ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]b } // ContractCodeSize retrieves a particular contracts code's size. -func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) { +func (db *cachingDB) ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) { if cached, ok := db.codeSizeCache.Get(codeHash); ok { return cached, nil } - code, err := db.ContractCode(addrHash, codeHash) + code, err := db.ContractCode(addr, codeHash) return len(code), err } diff --git a/core/state/dump.go b/core/state/dump.go index 96a2ac3d3..9ce6cd394 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -140,7 +140,11 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] log.Info("Trie dumping started", "root", s.trie.Hash()) c.OnRoot(s.trie.Hash()) - it := trie.NewIterator(s.trie.NodeIterator(conf.Start)) + trieIt, err := s.trie.NodeIterator(conf.Start) + if err != nil { + return nil + } + it := trie.NewIterator(trieIt) for it.Next() { var data types.StateAccount if err := rlp.DecodeBytes(it.Value, &data); err != nil { @@ -167,18 +171,23 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] } else { address = &addr } - obj := newObject(s, addr, data) + obj := newObject(s, addr, &data) if !conf.SkipCode { - account.Code = obj.Code(s.db) + account.Code = obj.Code() } if !conf.SkipStorage { account.Storage = make(map[common.Hash]string) - tr, err := obj.getTrie(s.db) + tr, err := obj.getTrie() if err != nil { log.Error("Failed to load storage trie", "err", err) continue } - storageIt := trie.NewIterator(tr.NodeIterator(nil)) + trieIt, err := tr.NodeIterator(nil) + if err != nil { + log.Error("Failed to create trie iterator", "err", err) + continue + } + storageIt := trie.NewIterator(trieIt) for storageIt.Next() { _, content, _, err := rlp.Split(storageIt.Value) if err != nil { diff --git a/core/state/iterator.go b/core/state/iterator.go index 79bb650ab..683efd73d 100644 --- a/core/state/iterator.go +++ b/core/state/iterator.go @@ -18,6 +18,7 @@ package state import ( "bytes" + "errors" "fmt" "github.com/ethereum/go-ethereum/common" @@ -27,7 +28,8 @@ import ( ) // nodeIterator is an iterator to traverse the entire state trie post-order, -// including all of the contract code and contract state tries. +// including all of the contract code and contract state tries. Preimage is +// required in order to resolve the contract address. type nodeIterator struct { state *StateDB // State being iterated @@ -74,8 +76,12 @@ func (it *nodeIterator) step() error { return nil } // Initialize the iterator if we've just started + var err error if it.stateIt == nil { - it.stateIt = it.state.trie.NodeIterator(nil) + it.stateIt, err = it.state.trie.NodeIterator(nil) + if err != nil { + return err + } } // If we had data nodes previously, we surely have at least state nodes if it.dataIt != nil { @@ -106,21 +112,31 @@ func (it *nodeIterator) step() error { } // Otherwise we've reached an account node, initiate data iteration var account types.StateAccount - if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil { + if err := rlp.DecodeBytes(it.stateIt.LeafBlob(), &account); err != nil { return err } - dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, common.BytesToHash(it.stateIt.LeafKey()), account.Root) + // Lookup the preimage of account hash + preimage := it.state.trie.GetKey(it.stateIt.LeafKey()) + if preimage == nil { + return errors.New("account address is not available") + } + address := common.BytesToAddress(preimage) + + // Traverse the storage slots belong to the account + dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, address, account.Root) + if err != nil { + return err + } + it.dataIt, err = dataTrie.NodeIterator(nil) if err != nil { return err } - it.dataIt = dataTrie.NodeIterator(nil) if !it.dataIt.Next(true) { it.dataIt = nil } if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) { it.codeHash = common.BytesToHash(account.CodeHash) - addrHash := common.BytesToHash(it.stateIt.LeafKey()) - it.code, err = it.state.db.ContractCode(addrHash, common.BytesToHash(account.CodeHash)) + it.code, err = it.state.db.ContractCode(address, common.BytesToHash(account.CodeHash)) if err != nil { return fmt.Errorf("code %x: %v", account.CodeHash, err) } diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go index c8e1a181e..73cc22490 100644 --- a/core/state/iterator_test.go +++ b/core/state/iterator_test.go @@ -26,9 +26,14 @@ import ( // Tests that the node iterator indeed walks over the entire database contents. func TestNodeIteratorCoverage(t *testing.T) { + testNodeIteratorCoverage(t, rawdb.HashScheme) + testNodeIteratorCoverage(t, rawdb.PathScheme) +} + +func testNodeIteratorCoverage(t *testing.T, scheme string) { // Create some arbitrary test state to iterate - db, sdb, root, _ := makeTestState() - sdb.TrieDB().Commit(root, false) + db, sdb, ndb, root, _ := makeTestState(scheme) + ndb.Commit(root, false) state, err := New(root, sdb, nil) if err != nil { @@ -48,7 +53,7 @@ func TestNodeIteratorCoverage(t *testing.T) { ) it := db.NewIterator(nil, nil) for it.Next() { - ok, hash := isTrieNode(sdb.TrieDB().Scheme(), it.Key(), it.Value()) + ok, hash := isTrieNode(scheme, it.Key(), it.Value()) if !ok { continue } @@ -90,11 +95,11 @@ func isTrieNode(scheme string, key, val []byte) (bool, common.Hash) { return true, common.BytesToHash(key) } } else { - ok, _ := rawdb.IsAccountTrieNode(key) + ok := rawdb.IsAccountTrieNode(key) if ok { return true, crypto.Keccak256Hash(val) } - ok, _, _ = rawdb.IsStorageTrieNode(key) + ok = rawdb.IsStorageTrieNode(key) if ok { return true, crypto.Keccak256Hash(val) } diff --git a/core/state/journal.go b/core/state/journal.go index 1722fb4c0..137ec7639 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -90,12 +90,19 @@ type ( account *common.Address } resetObjectChange struct { + account *common.Address prev *stateObject prevdestruct bool + prevAccount []byte + prevStorage map[common.Hash][]byte + + prevAccountOriginExist bool + prevAccountOrigin []byte + prevStorageOrigin map[common.Hash][]byte } - suicideChange struct { + selfDestructChange struct { account *common.Address - prev bool // whether account had already suicided + prev bool // whether account had already self-destructed prevbalance *big.Int } @@ -159,21 +166,33 @@ func (ch resetObjectChange) revert(s *StateDB) { if !ch.prevdestruct { delete(s.stateObjectsDestruct, ch.prev.address) } + if ch.prevAccount != nil { + s.accounts[ch.prev.addrHash] = ch.prevAccount + } + if ch.prevStorage != nil { + s.storages[ch.prev.addrHash] = ch.prevStorage + } + if ch.prevAccountOriginExist { + s.accountsOrigin[ch.prev.address] = ch.prevAccountOrigin + } + if ch.prevStorageOrigin != nil { + s.storagesOrigin[ch.prev.address] = ch.prevStorageOrigin + } } func (ch resetObjectChange) dirtied() *common.Address { - return nil + return ch.account } -func (ch suicideChange) revert(s *StateDB) { +func (ch selfDestructChange) revert(s *StateDB) { obj := s.getStateObject(*ch.account) if obj != nil { - obj.suicided = ch.prev + obj.selfDestructed = ch.prev obj.setBalance(ch.prevbalance) } } -func (ch suicideChange) dirtied() *common.Address { +func (ch selfDestructChange) dirtied() *common.Address { return ch.account } diff --git a/core/state/metrics.go b/core/state/metrics.go index e702ef3a8..64c651461 100644 --- a/core/state/metrics.go +++ b/core/state/metrics.go @@ -27,4 +27,11 @@ var ( storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil) accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil) storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil) + + slotDeletionMaxCount = metrics.NewRegisteredGauge("state/delete/storage/max/slot", nil) + slotDeletionMaxSize = metrics.NewRegisteredGauge("state/delete/storage/max/size", nil) + slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil) + slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil) + slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil) + slotDeletionSkip = metrics.NewRegisteredGauge("state/delete/storage/skip", nil) ) diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index a8142c51f..5acf54f64 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -57,7 +57,6 @@ const ( // Config includes all the configurations for pruning. type Config struct { Datadir string // The directory of the state database - Cachedir string // The directory of state clean cache BloomSize uint64 // The Megabytes of memory allocated to bloom-filter } @@ -86,13 +85,16 @@ func NewPruner(db ethdb.Database, config Config) (*Pruner, error) { if headBlock == nil { return nil, errors.New("failed to load head block") } + // Offline pruning is only supported in legacy hash based scheme. + triedb := trie.NewDatabase(db, trie.HashDefaults) + snapconfig := snapshot.Config{ CacheSize: 256, Recovery: false, NoBuild: true, AsyncBuild: false, } - snaptree, err := snapshot.New(snapconfig, db, trie.NewDatabase(db), headBlock.Root()) + snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Root()) if err != nil { return nil, err // The relevant snapshot(s) might not exist } @@ -241,7 +243,7 @@ func (p *Pruner) Prune(root common.Hash) error { return err } if stateBloomRoot != (common.Hash{}) { - return RecoverPruning(p.config.Datadir, p.db, p.config.Cachedir) + return RecoverPruning(p.config.Datadir, p.db) } // If the target state root is not specified, use the HEAD-127 as the // target. The reason for picking it is: @@ -266,7 +268,7 @@ func (p *Pruner) Prune(root common.Hash) error { // is the presence of root can indicate the presence of the // entire trie. if !rawdb.HasLegacyTrieNode(p.db, root) { - // The special case is for clique based networks(rinkeby, goerli + // The special case is for clique based networks(goerli // and some other private networks), it's possible that two // consecutive blocks will have same root. In this case snapshot // difflayer won't be created. So HEAD-127 may not paired with @@ -299,12 +301,6 @@ func (p *Pruner) Prune(root common.Hash) error { log.Info("Selecting user-specified state as the pruning target", "root", root) } } - // Before start the pruning, delete the clean trie cache first. - // It's necessary otherwise in the next restart we will hit the - // deleted state root in the "clean cache" so that the incomplete - // state is picked for usage. - deleteCleanTrieCache(p.config.Cachedir) - // All the state roots of the middle layer should be forcibly pruned, // otherwise the dangling state will be left. middleRoots := make(map[common.Hash]struct{}) @@ -342,7 +338,7 @@ func (p *Pruner) Prune(root common.Hash) error { // pruning can be resumed. What's more if the bloom filter is constructed, the // pruning **has to be resumed**. Otherwise a lot of dangling nodes may be left // in the disk. -func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) error { +func RecoverPruning(datadir string, db ethdb.Database) error { stateBloomPath, stateBloomRoot, err := findBloomFilter(datadir) if err != nil { return err @@ -368,7 +364,9 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err NoBuild: true, AsyncBuild: false, } - snaptree, err := snapshot.New(snapconfig, db, trie.NewDatabase(db), headBlock.Root()) + // Offline pruning is only supported in legacy hash based scheme. + triedb := trie.NewDatabase(db, trie.HashDefaults) + snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Root()) if err != nil { return err // The relevant snapshot(s) might not exist } @@ -378,12 +376,6 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err } log.Info("Loaded state bloom filter", "path", stateBloomPath) - // Before start the pruning, delete the clean trie cache first. - // It's necessary otherwise in the next restart we will hit the - // deleted state root in the "clean cache" so that the incomplete - // state is picked for usage. - deleteCleanTrieCache(trieCachePath) - // All the state roots of the middle layers should be forcibly pruned, // otherwise the dangling state will be left. var ( @@ -416,11 +408,14 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { if genesis == nil { return errors.New("missing genesis block") } - t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db)) + t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db, trie.HashDefaults)) + if err != nil { + return err + } + accIter, err := t.NodeIterator(nil) if err != nil { return err } - accIter := t.NodeIterator(nil) for accIter.Next(true) { hash := accIter.Hash() @@ -437,11 +432,14 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { } if acc.Root != types.EmptyRootHash { id := trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root) - storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db)) + storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db, trie.HashDefaults)) + if err != nil { + return err + } + storageIter, err := storageTrie.NodeIterator(nil) if err != nil { return err } - storageIter := storageTrie.NodeIterator(nil) for storageIter.Next(true) { hash := storageIter.Hash() if hash != (common.Hash{}) { @@ -491,23 +489,3 @@ func findBloomFilter(datadir string) (string, common.Hash, error) { } return stateBloomPath, stateBloomRoot, nil } - -const warningLog = ` - -WARNING! - -The clean trie cache is not found. Please delete it by yourself after the -pruning. Remember don't start the Geth without deleting the clean trie cache -otherwise the entire database may be damaged! - -Check the command description "geth snapshot prune-state --help" for more details. -` - -func deleteCleanTrieCache(path string) { - if !common.FileExist(path) { - log.Warn(warningLog) - return - } - os.RemoveAll(path) - log.Info("Deleted trie clean cache", "path", path) -} diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go deleted file mode 100644 index b5634972a..000000000 --- a/core/state/snapshot/account.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package snapshot - -import ( - "bytes" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rlp" -) - -// Account is a modified version of a state.Account, where the root is replaced -// with a byte slice. This format can be used to represent full-consensus format -// or slim-snapshot format which replaces the empty root and code hash as nil -// byte slice. -type Account struct { - Nonce uint64 - Balance *big.Int - Root []byte - CodeHash []byte -} - -// SlimAccount converts a state.Account content into a slim snapshot account -func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) Account { - slim := Account{ - Nonce: nonce, - Balance: balance, - } - if root != types.EmptyRootHash { - slim.Root = root[:] - } - if !bytes.Equal(codehash, types.EmptyCodeHash[:]) { - slim.CodeHash = codehash - } - return slim -} - -// SlimAccountRLP converts a state.Account content into a slim snapshot -// version RLP encoded. -func SlimAccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) []byte { - data, err := rlp.EncodeToBytes(SlimAccount(nonce, balance, root, codehash)) - if err != nil { - panic(err) - } - return data -} - -// FullAccount decodes the data on the 'slim RLP' format and return -// the consensus format account. -func FullAccount(data []byte) (Account, error) { - var account Account - if err := rlp.DecodeBytes(data, &account); err != nil { - return Account{}, err - } - if len(account.Root) == 0 { - account.Root = types.EmptyRootHash[:] - } - if len(account.CodeHash) == 0 { - account.CodeHash = types.EmptyCodeHash[:] - } - return account, nil -} - -// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format. -func FullAccountRLP(data []byte) ([]byte, error) { - account, err := FullAccount(data) - if err != nil { - return nil, err - } - return rlp.EncodeToBytes(account) -} diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go index 0e583e626..1e683f76c 100644 --- a/core/state/snapshot/conversion.go +++ b/core/state/snapshot/conversion.go @@ -17,7 +17,6 @@ package snapshot import ( - "bytes" "encoding/binary" "errors" "fmt" @@ -301,7 +300,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou fullData []byte ) if leafCallback == nil { - fullData, err = FullAccountRLP(it.(AccountIterator).Account()) + fullData, err = types.FullAccountRLP(it.(AccountIterator).Account()) if err != nil { return stop(err) } @@ -313,7 +312,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou return stop(err) } // Fetch the next account and process it concurrently - account, err := FullAccount(it.(AccountIterator).Account()) + account, err := types.FullAccount(it.(AccountIterator).Account()) if err != nil { return stop(err) } @@ -323,7 +322,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou results <- err return } - if !bytes.Equal(account.Root, subroot.Bytes()) { + if account.Root != subroot { results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x", hash, account.Root, subroot) return } diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index 203d66f93..b6aca599c 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -21,14 +21,15 @@ import ( "fmt" "math" "math/rand" - "sort" "sync" "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" bloomfilter "github.com/holiman/bloomfilter/v2" + "golang.org/x/exp/slices" ) var ( @@ -272,7 +273,7 @@ func (dl *diffLayer) Stale() bool { // Account directly retrieves the account associated with a particular hash in // the snapshot slim data format. -func (dl *diffLayer) Account(hash common.Hash) (*Account, error) { +func (dl *diffLayer) Account(hash common.Hash) (*types.SlimAccount, error) { data, err := dl.AccountRLP(hash) if err != nil { return nil, err @@ -280,7 +281,7 @@ func (dl *diffLayer) Account(hash common.Hash) (*Account, error) { if len(data) == 0 { // can be both nil and []byte{} return nil, nil } - account := new(Account) + account := new(types.SlimAccount) if err := rlp.DecodeBytes(data, account); err != nil { panic(err) } @@ -292,8 +293,8 @@ func (dl *diffLayer) Account(hash common.Hash) (*Account, error) { // // Note the returned account is not a copy, please don't modify it. func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) { - dl.lock.RLock() // Check staleness before reaching further. + dl.lock.RLock() if dl.Stale() { dl.lock.RUnlock() return nil, ErrSnapshotStale @@ -524,7 +525,7 @@ func (dl *diffLayer) AccountList() []common.Hash { dl.accountList = append(dl.accountList, hash) } } - sort.Sort(hashes(dl.accountList)) + slices.SortFunc(dl.accountList, common.Hash.Cmp) dl.memory += uint64(len(dl.accountList) * common.HashLength) return dl.accountList } @@ -562,7 +563,7 @@ func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool) for k := range storageMap { storageList = append(storageList, k) } - sort.Sort(hashes(storageList)) + slices.SortFunc(storageList, common.Hash.Cmp) dl.storageList[accountHash] = storageList dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength) return storageList, destructed diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go index 7cbf6e293..513f0f5ab 100644 --- a/core/state/snapshot/disklayer.go +++ b/core/state/snapshot/disklayer.go @@ -23,6 +23,7 @@ import ( "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" @@ -65,7 +66,7 @@ func (dl *diskLayer) Stale() bool { // Account directly retrieves the account associated with a particular hash in // the snapshot slim data format. -func (dl *diskLayer) Account(hash common.Hash) (*Account, error) { +func (dl *diskLayer) Account(hash common.Hash) (*types.SlimAccount, error) { data, err := dl.AccountRLP(hash) if err != nil { return nil, err @@ -73,7 +74,7 @@ func (dl *diskLayer) Account(hash common.Hash) (*Account, error) { if len(data) == 0 { // can be both nil and []byte{} return nil, nil } - account := new(Account) + account := new(types.SlimAccount) if err := rlp.DecodeBytes(data, account); err != nil { panic(err) } diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index 4703b45df..40264b092 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -20,7 +20,6 @@ import ( "bytes" "errors" "fmt" - "math/big" "time" "github.com/VictoriaMetrics/fastcache" @@ -257,7 +256,7 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix [ if origin == nil { origin = common.Hash{}.Bytes() } - if err := tr.Prove(origin, 0, proof); err != nil { + if err := tr.Prove(origin, proof); err != nil { log.Debug("Failed to prove range", "kind", kind, "origin", origin, "err", err) return &proofResult{ keys: keys, @@ -268,7 +267,7 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix [ }, nil } if last != nil { - if err := tr.Prove(last, 0, proof); err != nil { + if err := tr.Prove(last, proof); err != nil { log.Debug("Failed to prove range", "kind", kind, "last", last, "err", err) return &proofResult{ keys: keys, @@ -357,14 +356,18 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi var resolver trie.NodeResolver if len(result.keys) > 0 { mdb := rawdb.NewMemoryDatabase() - tdb := trie.NewDatabase(mdb) + tdb := trie.NewDatabase(mdb, trie.HashDefaults) + defer tdb.Close() snapTrie := trie.NewEmpty(tdb) for i, key := range result.keys { snapTrie.Update(key, result.vals[i]) } - root, nodes := snapTrie.Commit(false) + root, nodes, err := snapTrie.Commit(false) + if err != nil { + return false, nil, err + } if nodes != nil { - tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) tdb.Commit(root, false) } resolver = func(owner common.Hash, path []byte, hash common.Hash) []byte { @@ -383,8 +386,6 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi } var ( trieMore bool - nodeIt = tr.NodeIterator(origin) - iter = trie.NewIterator(nodeIt) kvkeys, kvvals = result.keys, result.vals // counters @@ -398,7 +399,12 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi start = time.Now() internal time.Duration ) + nodeIt, err := tr.NodeIterator(origin) + if err != nil { + return false, nil, err + } nodeIt.AddResolver(resolver) + iter := trie.NewIterator(nodeIt) for iter.Next() { if last != nil && bytes.Compare(iter.Key, last) > 0 { @@ -573,12 +579,7 @@ func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) er return nil } // Retrieve the current account and flatten it into the internal format - var acc struct { - Nonce uint64 - Balance *big.Int - Root common.Hash - CodeHash []byte - } + var acc types.StateAccount if err := rlp.DecodeBytes(val, &acc); err != nil { log.Crit("Invalid account encountered during snapshot creation", "err", err) } @@ -594,7 +595,7 @@ func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) er } snapRecoveredAccountMeter.Mark(1) } else { - data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash) + data := types.SlimAccountRLP(acc) dataLen = len(data) rawdb.WriteAccountSnapshot(ctx.batch, account, data) snapGeneratedAccountMeter.Mark(1) @@ -640,7 +641,7 @@ func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) er origin := common.CopyBytes(accMarker) for { id := trie.StateTrieID(dl.root) - exhausted, last, err := dl.generateRange(ctx, id, rawdb.SnapshotAccountPrefix, snapAccount, origin, accountRange, onAccount, FullAccountRLP) + exhausted, last, err := dl.generateRange(ctx, id, rawdb.SnapshotAccountPrefix, snapAccount, origin, accountRange, onAccount, types.FullAccountRLP) if err != nil { return err // The procedure it aborted, either by external signal or internal error. } diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go index a17ae05e5..07016b675 100644 --- a/core/state/snapshot/generate_test.go +++ b/core/state/snapshot/generate_test.go @@ -30,6 +30,8 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/trie/trienode" "golang.org/x/crypto/sha3" ) @@ -45,15 +47,20 @@ func hashData(input []byte) common.Hash { // Tests that snapshot generation from an empty database. func TestGeneration(t *testing.T) { + testGeneration(t, rawdb.HashScheme) + testGeneration(t, rawdb.PathScheme) +} + +func testGeneration(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. - var helper = newHelper() + var helper = newHelper(scheme) stRoot := helper.makeStorageTrie(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false) - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) @@ -79,22 +86,27 @@ func TestGeneration(t *testing.T) { // Tests that snapshot generation with existent flat state. func TestGenerateExistentState(t *testing.T) { + testGenerateExistentState(t, rawdb.HashScheme) + testGenerateExistentState(t, rawdb.PathScheme) +} + +func testGenerateExistentState(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. - var helper = newHelper() + var helper = newHelper(scheme) stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) root, snap := helper.CommitAndGenerate() @@ -148,9 +160,15 @@ type testHelper struct { nodes *trienode.MergedNodeSet } -func newHelper() *testHelper { +func newHelper(scheme string) *testHelper { diskdb := rawdb.NewMemoryDatabase() - triedb := trie.NewDatabase(diskdb) + config := &trie.Config{} + if scheme == rawdb.PathScheme { + config.PathDB = &pathdb.Config{} // disable caching + } else { + config.HashDB = &hashdb.Config{} // disable caching + } + triedb := trie.NewDatabase(diskdb, config) accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb) return &testHelper{ diskdb: diskdb, @@ -160,18 +178,17 @@ func newHelper() *testHelper { } } -func (t *testHelper) addTrieAccount(acckey string, acc *Account) { +func (t *testHelper) addTrieAccount(acckey string, acc *types.StateAccount) { val, _ := rlp.EncodeToBytes(acc) t.accTrie.MustUpdate([]byte(acckey), val) } -func (t *testHelper) addSnapAccount(acckey string, acc *Account) { - val, _ := rlp.EncodeToBytes(acc) +func (t *testHelper) addSnapAccount(acckey string, acc *types.StateAccount) { key := hashData([]byte(acckey)) - rawdb.WriteAccountSnapshot(t.diskdb, key, val) + rawdb.WriteAccountSnapshot(t.diskdb, key, types.SlimAccountRLP(*acc)) } -func (t *testHelper) addAccount(acckey string, acc *Account) { +func (t *testHelper) addAccount(acckey string, acc *types.StateAccount) { t.addTrieAccount(acckey, acc) t.addSnapAccount(acckey, acc) } @@ -183,28 +200,28 @@ func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string) } } -func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []string, commit bool) []byte { +func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []string, commit bool) common.Hash { id := trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash) stTrie, _ := trie.NewStateTrie(id, t.triedb) for i, k := range keys { stTrie.MustUpdate([]byte(k), []byte(vals[i])) } if !commit { - return stTrie.Hash().Bytes() + return stTrie.Hash() } - root, nodes := stTrie.Commit(false) + root, nodes, _ := stTrie.Commit(false) if nodes != nil { t.nodes.Merge(nodes) } - return root.Bytes() + return root } func (t *testHelper) Commit() common.Hash { - root, nodes := t.accTrie.Commit(true) + root, nodes, _ := t.accTrie.Commit(true) if nodes != nil { t.nodes.Merge(nodes) } - t.triedb.Update(root, types.EmptyRootHash, t.nodes) + t.triedb.Update(root, types.EmptyRootHash, 0, t.nodes, nil) t.triedb.Commit(root, false) return root } @@ -234,31 +251,36 @@ func (t *testHelper) CommitAndGenerate() (common.Hash, *diskLayer) { // - extra slots in the middle // - extra slots in the end func TestGenerateExistentStateWithWrongStorage(t *testing.T) { - helper := newHelper() + testGenerateExistentStateWithWrongStorage(t, rawdb.HashScheme) + testGenerateExistentStateWithWrongStorage(t, rawdb.PathScheme) +} + +func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) { + helper := newHelper(scheme) // Account one, empty root but non-empty database - helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) // Account two, non empty root but empty database stRoot := helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Miss slots { // Account three, non empty root but misses slots in the beginning helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"}) // Account four, non empty root but misses slots in the middle helper.makeStorageTrie(hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"}) // Account five, non empty root but misses slots in the end helper.makeStorageTrie(hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"}) } @@ -266,22 +288,22 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { { // Account six, non empty root but wrong slots in the beginning helper.makeStorageTrie(hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"}) // Account seven, non empty root but wrong slots in the middle helper.makeStorageTrie(hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"}) // Account eight, non empty root but wrong slots in the end helper.makeStorageTrie(hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-8", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"}) // Account 9, non empty root but rotated slots helper.makeStorageTrie(hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-9", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"}) } @@ -289,17 +311,17 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { { // Account 10, non empty root but extra slots in the beginning helper.makeStorageTrie(hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-10", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"}) // Account 11, non empty root but extra slots in the middle helper.makeStorageTrie(hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-11", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"}) // Account 12, non empty root but extra slots in the end helper.makeStorageTrie(hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-12", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"}) } @@ -326,7 +348,12 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { // - wrong accounts // - extra accounts func TestGenerateExistentStateWithWrongAccounts(t *testing.T) { - helper := newHelper() + testGenerateExistentStateWithWrongAccounts(t, rawdb.HashScheme) + testGenerateExistentStateWithWrongAccounts(t, rawdb.PathScheme) +} + +func testGenerateExistentStateWithWrongAccounts(t *testing.T, scheme string) { + helper := newHelper(scheme) helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) @@ -339,25 +366,25 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) { // Missing accounts, only in the trie { - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning - helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle - helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End + helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning + helper.addTrieAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle + helper.addTrieAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End } // Wrong accounts { - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")}) + helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")}) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) } // Extra accounts, only in the snap { - helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning - helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle - helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // after the end + helper.addSnapAccount("acc-0", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning + helper.addSnapAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: common.Hex2Bytes("0x1234")}) // Middle + helper.addSnapAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // after the end } root, snap := helper.CommitAndGenerate() @@ -381,20 +408,27 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) { // Tests that snapshot generation errors out correctly in case of a missing trie // node in the account trie. func TestGenerateCorruptAccountTrie(t *testing.T) { + testGenerateCorruptAccountTrie(t, rawdb.HashScheme) + testGenerateCorruptAccountTrie(t, rawdb.PathScheme) +} + +func testGenerateCorruptAccountTrie(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // without any storage slots to keep the test smaller. - helper := newHelper() + helper := newHelper(scheme) - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074 - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4 + helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074 + helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 + helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4 root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978 - // Delete an account trie leaf and ensure the generator chokes - helper.triedb.Commit(root, false) - helper.diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes()) + // Delete an account trie node and ensure the generator chokes + targetPath := []byte{0xc} + targetHash := common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7") + + rawdb.DeleteTrieNode(helper.diskdb, common.Hash{}, targetPath, targetHash, scheme) snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root) select { @@ -415,21 +449,30 @@ func TestGenerateCorruptAccountTrie(t *testing.T) { // trie node for a storage trie. It's similar to internal corruption but it is // handled differently inside the generator. func TestGenerateMissingStorageTrie(t *testing.T) { + testGenerateMissingStorageTrie(t, rawdb.HashScheme) + testGenerateMissingStorageTrie(t, rawdb.PathScheme) +} + +func testGenerateMissingStorageTrie(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. - helper := newHelper() - - stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 + var ( + acc1 = hashData([]byte("acc-1")) + acc3 = hashData([]byte("acc-3")) + helper = newHelper(scheme) + ) + stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 + helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e + helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 + helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 root := helper.Commit() - // Delete a storage trie root and ensure the generator chokes - helper.diskdb.Delete(stRoot) + // Delete storage trie root of account one and three. + rawdb.DeleteTrieNode(helper.diskdb, acc1, nil, stRoot, scheme) + rawdb.DeleteTrieNode(helper.diskdb, acc3, nil, stRoot, scheme) snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root) select { @@ -449,21 +492,29 @@ func TestGenerateMissingStorageTrie(t *testing.T) { // Tests that snapshot generation errors out correctly in case of a missing trie // node in a storage trie. func TestGenerateCorruptStorageTrie(t *testing.T) { + testGenerateCorruptStorageTrie(t, rawdb.HashScheme) + testGenerateCorruptStorageTrie(t, rawdb.PathScheme) +} + +func testGenerateCorruptStorageTrie(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. - helper := newHelper() + helper := newHelper(scheme) - stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 + stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 + helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e + helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 + helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 root := helper.Commit() - // Delete a storage trie leaf and ensure the generator chokes - helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes()) + // Delete a node in the storage trie. + targetPath := []byte{0x4} + targetHash := common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371") + rawdb.DeleteTrieNode(helper.diskdb, hashData([]byte("acc-1")), targetPath, targetHash, scheme) + rawdb.DeleteTrieNode(helper.diskdb, hashData([]byte("acc-3")), targetPath, targetHash, scheme) snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root) select { @@ -482,7 +533,12 @@ func TestGenerateCorruptStorageTrie(t *testing.T) { // Tests that snapshot generation when an extra account with storage exists in the snap state. func TestGenerateWithExtraAccounts(t *testing.T) { - helper := newHelper() + testGenerateWithExtraAccounts(t, rawdb.HashScheme) + testGenerateWithExtraAccounts(t, rawdb.PathScheme) +} + +func testGenerateWithExtraAccounts(t *testing.T, scheme string) { + helper := newHelper(scheme) { // Account one in the trie stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), @@ -490,7 +546,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) { []string{"val-1", "val-2", "val-3", "val-4", "val-5"}, true, ) - acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e @@ -510,7 +566,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) { []string{"val-1", "val-2", "val-3", "val-4", "val-5"}, true, ) - acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte("acc-2")) rawdb.WriteAccountSnapshot(helper.diskdb, key, val) @@ -550,10 +606,15 @@ func enableLogging() { // Tests that snapshot generation when an extra account with storage exists in the snap state. func TestGenerateWithManyExtraAccounts(t *testing.T) { + testGenerateWithManyExtraAccounts(t, rawdb.HashScheme) + testGenerateWithManyExtraAccounts(t, rawdb.PathScheme) +} + +func testGenerateWithManyExtraAccounts(t *testing.T, scheme string) { if false { enableLogging() } - helper := newHelper() + helper := newHelper(scheme) { // Account one in the trie stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), @@ -561,7 +622,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) { []string{"val-1", "val-2", "val-3"}, true, ) - acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e @@ -575,7 +636,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) { { // 100 accounts exist only in snapshot for i := 0; i < 1000; i++ { - acc := &Account{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} + acc := &types.StateAccount{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte(fmt.Sprintf("acc-%d", i))) rawdb.WriteAccountSnapshot(helper.diskdb, key, val) @@ -606,13 +667,18 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) { // So in trie, we iterate 2 entries 0x03, 0x07. We create the 0x07 in the database and abort the procedure, because the trie is exhausted. // But in the database, we still have the stale storage slots 0x04, 0x05. They are not iterated yet, but the procedure is finished. func TestGenerateWithExtraBeforeAndAfter(t *testing.T) { + testGenerateWithExtraBeforeAndAfter(t, rawdb.HashScheme) + testGenerateWithExtraBeforeAndAfter(t, rawdb.PathScheme) +} + +func testGenerateWithExtraBeforeAndAfter(t *testing.T, scheme string) { accountCheckRange = 3 if false { enableLogging() } - helper := newHelper() + helper := newHelper(scheme) { - acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} + acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val) helper.accTrie.MustUpdate(common.HexToHash("0x07").Bytes(), val) @@ -643,13 +709,18 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) { // TestGenerateWithMalformedSnapdata tests what happes if we have some junk // in the snapshot database, which cannot be parsed back to an account func TestGenerateWithMalformedSnapdata(t *testing.T) { + testGenerateWithMalformedSnapdata(t, rawdb.HashScheme) + testGenerateWithMalformedSnapdata(t, rawdb.PathScheme) +} + +func testGenerateWithMalformedSnapdata(t *testing.T, scheme string) { accountCheckRange = 3 if false { enableLogging() } - helper := newHelper() + helper := newHelper(scheme) { - acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} + acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val) @@ -680,15 +751,20 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) { } func TestGenerateFromEmptySnap(t *testing.T) { + testGenerateFromEmptySnap(t, rawdb.HashScheme) + testGenerateFromEmptySnap(t, rawdb.PathScheme) +} + +func testGenerateFromEmptySnap(t *testing.T, scheme string) { //enableLogging() accountCheckRange = 10 storageCheckRange = 20 - helper := newHelper() + helper := newHelper(scheme) // Add 1K accounts to the trie for i := 0; i < 400; i++ { stRoot := helper.makeStorageTrie(hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.addTrieAccount(fmt.Sprintf("acc-%d", i), - &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) } root, snap := helper.CommitAndGenerate() t.Logf("Root: %#x\n", root) // Root: 0x6f7af6d2e1a1bf2b84a3beb3f8b64388465fbc1e274ca5d5d3fc787ca78f59e4 @@ -715,8 +791,13 @@ func TestGenerateFromEmptySnap(t *testing.T) { // This hits a case where the snap verification passes, but there are more elements in the trie // which we must also add. func TestGenerateWithIncompleteStorage(t *testing.T) { + testGenerateWithIncompleteStorage(t, rawdb.HashScheme) + testGenerateWithIncompleteStorage(t, rawdb.PathScheme) +} + +func testGenerateWithIncompleteStorage(t *testing.T, scheme string) { storageCheckRange = 4 - helper := newHelper() + helper := newHelper(scheme) stKeys := []string{"1", "2", "3", "4", "5", "6", "7", "8"} stVals := []string{"v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"} // We add 8 accounts, each one is missing exactly one of the storage slots. This means @@ -725,7 +806,7 @@ func TestGenerateWithIncompleteStorage(t *testing.T) { for i := 0; i < 8; i++ { accKey := fmt.Sprintf("acc-%d", i) stRoot := helper.makeStorageTrie(hashData([]byte(accKey)), stKeys, stVals, true) - helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount(accKey, &types.StateAccount{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) var moddedKeys []string var moddedVals []string for ii := 0; ii < 8; ii++ { @@ -814,14 +895,19 @@ func populateDangling(disk ethdb.KeyValueStore) { // // This test will populate some dangling storages to see if they can be cleaned up. func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) { - var helper = newHelper() + testGenerateCompleteSnapshotWithDanglingStorage(t, rawdb.HashScheme) + testGenerateCompleteSnapshotWithDanglingStorage(t, rawdb.PathScheme) +} + +func testGenerateCompleteSnapshotWithDanglingStorage(t *testing.T, scheme string) { + var helper = newHelper(scheme) stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) @@ -849,14 +935,19 @@ func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) { // // This test will populate some dangling storages to see if they can be cleaned up. func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) { - var helper = newHelper() + testGenerateBrokenSnapshotWithDanglingStorage(t, rawdb.HashScheme) + testGenerateBrokenSnapshotWithDanglingStorage(t, rawdb.PathScheme) +} + +func testGenerateBrokenSnapshotWithDanglingStorage(t *testing.T, scheme string) { + var helper = newHelper(scheme) stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) populateDangling(helper.diskdb) diff --git a/core/state/snapshot/iterator_fast.go b/core/state/snapshot/iterator_fast.go index 1a042c7cd..0502d9cf8 100644 --- a/core/state/snapshot/iterator_fast.go +++ b/core/state/snapshot/iterator_fast.go @@ -22,6 +22,7 @@ import ( "sort" "github.com/ethereum/go-ethereum/common" + "golang.org/x/exp/slices" ) // weightedIterator is a iterator with an assigned weight. It is used to prioritise @@ -32,32 +33,25 @@ type weightedIterator struct { priority int } -// weightedIterators is a set of iterators implementing the sort.Interface. -type weightedIterators []*weightedIterator - -// Len implements sort.Interface, returning the number of active iterators. -func (its weightedIterators) Len() int { return len(its) } - -// Less implements sort.Interface, returning which of two iterators in the stack -// is before the other. -func (its weightedIterators) Less(i, j int) bool { +func (it *weightedIterator) Cmp(other *weightedIterator) int { // Order the iterators primarily by the account hashes - hashI := its[i].it.Hash() - hashJ := its[j].it.Hash() + hashI := it.it.Hash() + hashJ := other.it.Hash() switch bytes.Compare(hashI[:], hashJ[:]) { case -1: - return true + return -1 case 1: - return false + return 1 } // Same account/storage-slot in multiple layers, split by priority - return its[i].priority < its[j].priority -} - -// Swap implements sort.Interface, swapping two entries in the iterator stack. -func (its weightedIterators) Swap(i, j int) { - its[i], its[j] = its[j], its[i] + if it.priority < other.priority { + return -1 + } + if it.priority > other.priority { + return 1 + } + return 0 } // fastIterator is a more optimized multi-layer iterator which maintains a @@ -69,7 +63,7 @@ type fastIterator struct { curAccount []byte curSlot []byte - iterators weightedIterators + iterators []*weightedIterator initiated bool account bool fail error @@ -167,7 +161,7 @@ func (fi *fastIterator) init() { } } // Re-sort the entire list - sort.Sort(fi.iterators) + slices.SortFunc(fi.iterators, func(a, b *weightedIterator) int { return a.Cmp(b) }) fi.initiated = false } diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index c1a4cc3d4..4d070208f 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -305,7 +305,7 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error { } if baseRoot := rawdb.ReadSnapshotRoot(db); baseRoot != parent { log.Warn("Loaded snapshot journal", "diskroot", baseRoot, "diffs", "unmatched") - return fmt.Errorf("mismatched disk and diff layers") + return errors.New("mismatched disk and diff layers") } for { var ( diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 2e57a059d..e30a0005c 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -102,7 +103,7 @@ type Snapshot interface { // Account directly retrieves the account associated with a particular hash in // the snapshot slim data format. - Account(hash common.Hash) (*Account, error) + Account(hash common.Hash) (*types.SlimAccount, error) // AccountRLP directly retrieves the account RLP associated with a particular // hash in the snapshot slim data format. @@ -563,7 +564,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer { // Ensure we don't delete too much data blindly (contract can be // huge). It's ok to flush, the root will go missing in case of a // crash and we'll detect and regenerate the snapshot. - if batch.ValueSize() > ethdb.IdealBatchSize { + if batch.ValueSize() > 64*1024*1024 { if err := batch.Write(); err != nil { log.Crit("Failed to write storage deletions", "err", err) } @@ -589,7 +590,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer { // Ensure we don't write too much data blindly. It's ok to flush, the // root will go missing in case of a crash and we'll detect and regen // the snapshot. - if batch.ValueSize() > ethdb.IdealBatchSize { + if batch.ValueSize() > 64*1024*1024 { if err := batch.Write(); err != nil { log.Crit("Failed to write storage deletions", "err", err) } @@ -851,3 +852,21 @@ func (t *Tree) DiskRoot() common.Hash { return t.diskRoot() } + +// Size returns the memory usage of the diff layers above the disk layer and the +// dirty nodes buffered in the disk layer. Currently, the implementation uses a +// special diff layer (the first) as an aggregator simulating a dirty buffer, so +// the second return will always be 0. However, this will be made consistent with +// the pathdb, which will require a second return. +func (t *Tree) Size() (diffs common.StorageSize, buf common.StorageSize) { + t.lock.RLock() + defer t.lock.RUnlock() + + var size common.StorageSize + for _, layer := range t.layers { + if layer, ok := layer.(*diffLayer); ok { + size += common.StorageSize(layer.memory) + } + } + return size, 0 +} diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go index 038bd6df6..b66799757 100644 --- a/core/state/snapshot/snapshot_test.go +++ b/core/state/snapshot/snapshot_test.go @@ -43,11 +43,10 @@ func randomHash() common.Hash { // randomAccount generates a random account and returns it RLP encoded. func randomAccount() []byte { - root := randomHash() - a := Account{ + a := &types.StateAccount{ Balance: big.NewInt(rand.Int63()), Nonce: rand.Uint64(), - Root: root[:], + Root: randomHash(), CodeHash: types.EmptyCodeHash[:], } data, _ := rlp.EncodeToBytes(a) @@ -465,7 +464,7 @@ func TestReadStateDuringFlattening(t *testing.T) { snap := snaps.Snapshot(common.HexToHash("0xa3")) // Register the testing hook to access the state after flattening - var result = make(chan *Account) + var result = make(chan *types.SlimAccount) snaps.onFlatten = func() { // Spin up a thread to read the account from the pre-created // snapshot handler. It's expected to be blocked. diff --git a/core/state/snapshot/sort.go b/core/state/snapshot/sort.go deleted file mode 100644 index 88841231d..000000000 --- a/core/state/snapshot/sort.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package snapshot - -import ( - "bytes" - - "github.com/ethereum/go-ethereum/common" -) - -// hashes is a helper to implement sort.Interface. -type hashes []common.Hash - -// Len is the number of elements in the collection. -func (hs hashes) Len() int { return len(hs) } - -// Less reports whether the element with index i should sort before the element -// with index j. -func (hs hashes) Less(i, j int) bool { return bytes.Compare(hs[i][:], hs[j][:]) < 0 } - -// Swap swaps the elements with indexes i and j. -func (hs hashes) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] } diff --git a/core/state/snapshot/utils.go b/core/state/snapshot/utils.go index fa1f216e6..62f073d2e 100644 --- a/core/state/snapshot/utils.go +++ b/core/state/snapshot/utils.go @@ -23,9 +23,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" ) // CheckDanglingStorage iterates the snap storage data, and verifies that all @@ -98,8 +98,8 @@ func CheckJournalAccount(db ethdb.KeyValueStore, hash common.Hash) error { baseRoot := rawdb.ReadSnapshotRoot(db) fmt.Printf("Disklayer: Root: %x\n", baseRoot) if data := rawdb.ReadAccountSnapshot(db, hash); data != nil { - account := new(Account) - if err := rlp.DecodeBytes(data, account); err != nil { + account, err := types.FullAccount(data) + if err != nil { panic(err) } fmt.Printf("\taccount.nonce: %d\n", account.Nonce) @@ -129,8 +129,8 @@ func CheckJournalAccount(db ethdb.KeyValueStore, hash common.Hash) error { } fmt.Printf("Disklayer+%d: Root: %x, parent %x\n", depth, root, pRoot) if data, ok := accounts[hash]; ok { - account := new(Account) - if err := rlp.DecodeBytes(data, account); err != nil { + account, err := types.FullAccount(data) + if err != nil { panic(err) } fmt.Printf("\taccount.nonce: %d\n", account.Nonce) diff --git a/core/state/state_object.go b/core/state/state_object.go index 1e28b4c12..d42d2c34d 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -57,29 +57,38 @@ func (s Storage) Copy() Storage { // stateObject represents an Ethereum account which is being modified. // // The usage pattern is as follows: -// First you need to obtain a state object. -// Account values can be accessed and modified through the object. -// Finally, call commitTrie to write the modified storage trie into a database. +// - First you need to obtain a state object. +// - Account values as well as storages can be accessed and modified through the object. +// - Finally, call commit to return the changes of storage trie and update account data. type stateObject struct { - address common.Address - addrHash common.Hash // hash of ethereum address of the account - data types.StateAccount db *StateDB + address common.Address // address of ethereum account + addrHash common.Hash // hash of ethereum address of the account + origin *types.StateAccount // Account original data without any change applied, nil means it was not existent + data types.StateAccount // Account data with all mutations applied in the scope of block // Write caches. trie Trie // storage trie, which becomes non-nil on first access code Code // contract bytecode, which gets set when code is loaded - originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction + originStorage Storage // Storage cache of original entries to dedup rewrites pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block - dirtyStorage Storage // Storage entries that have been modified in the current transaction execution + dirtyStorage Storage // Storage entries that have been modified in the current transaction execution, reset for every transaction // Cache flags. - // When an object is marked suicided it will be deleted from the trie - // during the "update" phase of the state transition. dirtyCode bool // true if the code was updated - suicided bool - deleted bool + + // Flag whether the account was marked as self-destructed. The self-destructed account + // is still accessible in the scope of same transaction. + selfDestructed bool + + // Flag whether the account was marked as deleted. A self-destructed account + // or an account that is considered as empty will be marked as deleted at + // the end of transaction and no longer accessible anymore. + deleted bool + + // Flag whether the object was created in the current transaction + created bool } // empty returns whether the account is considered empty. @@ -88,21 +97,17 @@ func (s *stateObject) empty() bool { } // newObject creates a state object. -func newObject(db *StateDB, address common.Address, data types.StateAccount) *stateObject { - if data.Balance == nil { - data.Balance = new(big.Int) - } - if data.CodeHash == nil { - data.CodeHash = types.EmptyCodeHash.Bytes() - } - if data.Root == (common.Hash{}) { - data.Root = types.EmptyRootHash +func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *stateObject { + origin := acct + if acct == nil { + acct = types.NewEmptyStateAccount() } return &stateObject{ db: db, address: address, addrHash: crypto.Keccak256Hash(address[:]), - data: data, + origin: origin, + data: *acct, originStorage: make(Storage), pendingStorage: make(Storage), dirtyStorage: make(Storage), @@ -114,8 +119,8 @@ func (s *stateObject) EncodeRLP(w io.Writer) error { return rlp.Encode(w, &s.data) } -func (s *stateObject) markSuicided() { - s.suicided = true +func (s *stateObject) markSelfdestructed() { + s.selfDestructed = true } func (s *stateObject) touch() { @@ -132,17 +137,15 @@ func (s *stateObject) touch() { // getTrie returns the associated storage trie. The trie will be opened // if it's not loaded previously. An error will be returned if trie can't // be loaded. -func (s *stateObject) getTrie(db Database) (Trie, error) { +func (s *stateObject) getTrie() (Trie, error) { if s.trie == nil { // Try fetching from prefetcher first - // We don't prefetch empty tries if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil { - // When the miner is creating the pending state, there is no - // prefetcher + // When the miner is creating the pending state, there is no prefetcher s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) } if s.trie == nil { - tr, err := db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root) + tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root) if err != nil { return nil, err } @@ -153,18 +156,18 @@ func (s *stateObject) getTrie(db Database) (Trie, error) { } // GetState retrieves a value from the account storage trie. -func (s *stateObject) GetState(db Database, key common.Hash) common.Hash { +func (s *stateObject) GetState(key common.Hash) common.Hash { // If we have a dirty value for this state entry, return it value, dirty := s.dirtyStorage[key] if dirty { return value } // Otherwise return the entry's original value - return s.GetCommittedState(db, key) + return s.GetCommittedState(key) } // GetCommittedState retrieves a value from the committed account storage trie. -func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Hash { +func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { // If we have a pending write or clean cached, return that if value, pending := s.pendingStorage[key]; pending { return value @@ -183,8 +186,9 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has } // If no live objects are available, attempt to use snapshots var ( - enc []byte - err error + enc []byte + err error + value common.Hash ) if s.db.snap != nil { start := time.Now() @@ -192,16 +196,23 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has if metrics.EnabledExpensive { s.db.SnapshotStorageReads += time.Since(start) } + if len(enc) > 0 { + _, content, _, err := rlp.Split(enc) + if err != nil { + s.db.setError(err) + } + value.SetBytes(content) + } } // If the snapshot is unavailable or reading from it fails, load from the database. if s.db.snap == nil || err != nil { start := time.Now() - tr, err := s.getTrie(db) + tr, err := s.getTrie() if err != nil { s.db.setError(err) return common.Hash{} } - enc, err = tr.GetStorage(s.address, key.Bytes()) + val, err := tr.GetStorage(s.address, key.Bytes()) if metrics.EnabledExpensive { s.db.StorageReads += time.Since(start) } @@ -209,23 +220,16 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has s.db.setError(err) return common.Hash{} } - } - var value common.Hash - if len(enc) > 0 { - _, content, _, err := rlp.Split(enc) - if err != nil { - s.db.setError(err) - } - value.SetBytes(content) + value.SetBytes(val) } s.originStorage[key] = value return value } // SetState updates a value in account storage. -func (s *stateObject) SetState(db Database, key, value common.Hash) { +func (s *stateObject) SetState(key, value common.Hash) { // If the new value is the same as old, don't set - prev := s.GetState(db, key) + prev := s.GetState(key) if prev == value { return } @@ -260,12 +264,17 @@ func (s *stateObject) finalise(prefetch bool) { } } -// updateTrie writes cached storage modifications into the object's storage trie. -// It will return nil if the trie has not been loaded and no changes have been -// made. An error will be returned if the trie can't be loaded/updated correctly. -func (s *stateObject) updateTrie(db Database) (Trie, error) { +// updateTrie is responsible for persisting cached storage changes into the +// object's storage trie. In case the storage trie is not yet loaded, this +// function will load the trie automatically. If any issues arise during the +// loading or updating of the trie, an error will be returned. Furthermore, +// this function will return the mutated storage trie, or nil if there is no +// storage change at all. +func (s *stateObject) updateTrie() (Trie, error) { // Make sure all dirty slots are finalized into the pending storage area - s.finalise(false) // Don't prefetch anymore, pull directly if need be + s.finalise(false) + + // Short circuit if nothing changed, don't bother with hashing anything if len(s.pendingStorage) == 0 { return s.trie, nil } @@ -276,23 +285,24 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) { // The snapshot storage map for the object var ( storage map[common.Hash][]byte - hasher = s.db.hasher + origin map[common.Hash][]byte ) - tr, err := s.getTrie(db) + tr, err := s.getTrie() if err != nil { s.db.setError(err) return nil, err } - // Insert all the pending updates into the trie + // Insert all the pending storage updates into the trie usedStorage := make([][]byte, 0, len(s.pendingStorage)) for key, value := range s.pendingStorage { // Skip noop changes, persist actual changes if value == s.originStorage[key] { continue } + prev := s.originStorage[key] s.originStorage[key] = value - var v []byte + var encoded []byte // rlp-encoded value to be used by the snapshot if (value == common.Hash{}) { if err := tr.DeleteStorage(s.address, key[:]); err != nil { s.db.setError(err) @@ -301,44 +311,58 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) { s.db.StorageDeleted += 1 } else { // Encoding []byte cannot fail, ok to ignore the error. - v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) - if err := tr.UpdateStorage(s.address, key[:], v); err != nil { + trimmed := common.TrimLeftZeroes(value[:]) + encoded, _ = rlp.EncodeToBytes(trimmed) + if err := tr.UpdateStorage(s.address, key[:], trimmed); err != nil { s.db.setError(err) return nil, err } s.db.StorageUpdated += 1 } - // If state snapshotting is active, cache the data til commit - if s.db.snap != nil { - if storage == nil { - // Retrieve the old storage map, if available, create a new one otherwise - if storage = s.db.snapStorage[s.addrHash]; storage == nil { - storage = make(map[common.Hash][]byte) - s.db.snapStorage[s.addrHash] = storage - } + // Cache the mutated storage slots until commit + if storage == nil { + if storage = s.db.storages[s.addrHash]; storage == nil { + storage = make(map[common.Hash][]byte) + s.db.storages[s.addrHash] = storage } - storage[crypto.HashData(hasher, key[:])] = v // v will be nil if it's deleted } + khash := crypto.HashData(s.db.hasher, key[:]) + storage[khash] = encoded // encoded will be nil if it's deleted + + // Cache the original value of mutated storage slots + if origin == nil { + if origin = s.db.storagesOrigin[s.address]; origin == nil { + origin = make(map[common.Hash][]byte) + s.db.storagesOrigin[s.address] = origin + } + } + // Track the original value of slot only if it's mutated first time + if _, ok := origin[khash]; !ok { + if prev == (common.Hash{}) { + origin[khash] = nil // nil if it was not present previously + } else { + // Encoding []byte cannot fail, ok to ignore the error. + b, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(prev[:])) + origin[khash] = b + } + } + // Cache the items for preloading usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure } if s.db.prefetcher != nil { s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage) } - if len(s.pendingStorage) > 0 { - s.pendingStorage = make(Storage) - } + s.pendingStorage = make(Storage) // reset pending map return tr, nil } -// UpdateRoot sets the trie root to the current root hash of. An error -// will be returned if trie root hash is not computed correctly. -func (s *stateObject) updateRoot(db Database) { - tr, err := s.updateTrie(db) - if err != nil { - return - } - // If nothing changed, don't bother with hashing anything - if tr == nil { +// updateRoot flushes all cached storage mutations to trie, recalculating the +// new storage trie root. +func (s *stateObject) updateRoot() { + // Flush cached storage mutations into trie, short circuit if any error + // is occurred or there is not change in the trie. + tr, err := s.updateTrie() + if err != nil || tr == nil { return } // Track the amount of time wasted on hashing the storage trie @@ -348,23 +372,30 @@ func (s *stateObject) updateRoot(db Database) { s.data.Root = tr.Hash() } -// commitTrie submits the storage changes into the storage trie and re-computes -// the root. Besides, all trie changes will be collected in a nodeset and returned. -func (s *stateObject) commitTrie(db Database) (*trienode.NodeSet, error) { - tr, err := s.updateTrie(db) - if err != nil { - return nil, err - } - // If nothing changed, don't bother with committing anything - if tr == nil { +// commit obtains a set of dirty storage trie nodes and updates the account data. +// The returned set can be nil if nothing to commit. This function assumes all +// storage mutations have already been flushed into trie by updateRoot. +func (s *stateObject) commit() (*trienode.NodeSet, error) { + // Short circuit if trie is not even loaded, don't bother with committing anything + if s.trie == nil { + s.origin = s.data.Copy() return nil, nil } // Track the amount of time wasted on committing the storage trie if metrics.EnabledExpensive { defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now()) } - root, nodes := tr.Commit(false) + // The trie is currently in an open state and could potentially contain + // cached mutations. Call commit to acquire a set of nodes that have been + // modified, the set can be nil if nothing to commit. + root, nodes, err := s.trie.Commit(false) + if err != nil { + return nil, err + } s.data.Root = root + + // Update original account data after commit + s.origin = s.data.Copy() return nodes, nil } @@ -404,18 +435,24 @@ func (s *stateObject) setBalance(amount *big.Int) { } func (s *stateObject) deepCopy(db *StateDB) *stateObject { - stateObject := newObject(db, s.address, s.data) - if s.trie != nil { - stateObject.trie = db.db.CopyTrie(s.trie) + obj := &stateObject{ + db: db, + address: s.address, + addrHash: s.addrHash, + origin: s.origin, + data: s.data, } - stateObject.code = s.code - stateObject.dirtyStorage = s.dirtyStorage.Copy() - stateObject.originStorage = s.originStorage.Copy() - stateObject.pendingStorage = s.pendingStorage.Copy() - stateObject.suicided = s.suicided - stateObject.dirtyCode = s.dirtyCode - stateObject.deleted = s.deleted - return stateObject + if s.trie != nil { + obj.trie = db.db.CopyTrie(s.trie) + } + obj.code = s.code + obj.dirtyStorage = s.dirtyStorage.Copy() + obj.originStorage = s.originStorage.Copy() + obj.pendingStorage = s.pendingStorage.Copy() + obj.selfDestructed = s.selfDestructed + obj.dirtyCode = s.dirtyCode + obj.deleted = s.deleted + return obj } // @@ -428,14 +465,14 @@ func (s *stateObject) Address() common.Address { } // Code returns the contract code associated with this object, if any. -func (s *stateObject) Code(db Database) []byte { +func (s *stateObject) Code() []byte { if s.code != nil { return s.code } if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { return nil } - code, err := db.ContractCode(s.addrHash, common.BytesToHash(s.CodeHash())) + code, err := s.db.db.ContractCode(s.address, common.BytesToHash(s.CodeHash())) if err != nil { s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err)) } @@ -446,14 +483,14 @@ func (s *stateObject) Code(db Database) []byte { // CodeSize returns the size of the contract code associated with this object, // or zero if none. This method is an almost mirror of Code, but uses a cache // inside the database to avoid loading codes seen recently. -func (s *stateObject) CodeSize(db Database) int { +func (s *stateObject) CodeSize() int { if s.code != nil { return len(s.code) } if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { return 0 } - size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.CodeHash())) + size, err := s.db.db.ContractCodeSize(s.address, common.BytesToHash(s.CodeHash())) if err != nil { s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err)) } @@ -461,7 +498,7 @@ func (s *stateObject) CodeSize(db Database) int { } func (s *stateObject) SetCode(codeHash common.Hash, code []byte) { - prevcode := s.Code(s.db.db) + prevcode := s.Code() s.db.journal.append(codeChange{ account: &s.address, prevhash: s.CodeHash(), @@ -499,3 +536,7 @@ func (s *stateObject) Balance() *big.Int { func (s *stateObject) Nonce() uint64 { return s.data.Nonce } + +func (s *stateObject) Root() common.Hash { + return s.data.Root +} diff --git a/core/state/state_test.go b/core/state/state_test.go index 15e603726..2553133de 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -30,21 +30,22 @@ import ( "github.com/ethereum/go-ethereum/trie" ) -type stateTest struct { +type stateEnv struct { db ethdb.Database state *StateDB } -func newStateTest() *stateTest { +func newStateEnv() *stateEnv { db := rawdb.NewMemoryDatabase() sdb, _ := New(types.EmptyRootHash, NewDatabase(db), nil) - return &stateTest{db: db, state: sdb} + return &stateEnv{db: db, state: sdb} } func TestDump(t *testing.T) { db := rawdb.NewMemoryDatabase() - sdb, _ := New(types.EmptyRootHash, NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil) - s := &stateTest{db: db, state: sdb} + tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) + sdb, _ := New(types.EmptyRootHash, tdb, nil) + s := &stateEnv{db: db, state: sdb} // generate a few entries obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01})) @@ -57,9 +58,10 @@ func TestDump(t *testing.T) { // write some of them to the trie s.state.updateStateObject(obj1) s.state.updateStateObject(obj2) - s.state.Commit(false) + root, _ := s.state.Commit(0, false) // check that DumpToCollector contains the state objects that are in trie + s.state, _ = New(root, tdb, nil) got := string(s.state.Dump(nil)) want := `{ "root": "71edff0130dd2385947095001c73d9e28d862fc286fca2b922ca6f6f3cddfdd2", @@ -95,8 +97,9 @@ func TestDump(t *testing.T) { func TestIterativeDump(t *testing.T) { db := rawdb.NewMemoryDatabase() - sdb, _ := New(types.EmptyRootHash, NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil) - s := &stateTest{db: db, state: sdb} + tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) + sdb, _ := New(types.EmptyRootHash, tdb, nil) + s := &stateEnv{db: db, state: sdb} // generate a few entries obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01})) @@ -111,7 +114,8 @@ func TestIterativeDump(t *testing.T) { // write some of them to the trie s.state.updateStateObject(obj1) s.state.updateStateObject(obj2) - s.state.Commit(false) + root, _ := s.state.Commit(0, false) + s.state, _ = New(root, tdb, nil) b := &bytes.Buffer{} s.state.IterativeDump(nil, json.NewEncoder(b)) @@ -129,14 +133,14 @@ func TestIterativeDump(t *testing.T) { } func TestNull(t *testing.T) { - s := newStateTest() + s := newStateEnv() address := common.HexToAddress("0x823140710bf13990e4500136726d8b55") s.state.CreateAccount(address) //value := common.FromHex("0x823140710bf13990e4500136726d8b55") var value common.Hash s.state.SetState(address, common.Hash{}, value) - s.state.Commit(false) + s.state.Commit(0, false) if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) { t.Errorf("expected empty current value, got %x", value) @@ -151,7 +155,7 @@ func TestSnapshot(t *testing.T) { var storageaddr common.Hash data1 := common.BytesToHash([]byte{42}) data2 := common.BytesToHash([]byte{43}) - s := newStateTest() + s := newStateEnv() // snapshot the genesis state genesis := s.state.Snapshot() @@ -182,7 +186,7 @@ func TestSnapshot(t *testing.T) { } func TestSnapshotEmpty(t *testing.T) { - s := newStateTest() + s := newStateEnv() s.state.RevertToSnapshot(s.state.Snapshot()) } @@ -204,11 +208,11 @@ func TestSnapshot2(t *testing.T) { so0.SetBalance(big.NewInt(42)) so0.SetNonce(43) so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'}) - so0.suicided = false + so0.selfDestructed = false so0.deleted = false state.setStateObject(so0) - root, _ := state.Commit(false) + root, _ := state.Commit(0, false) state, _ = New(root, state.db, state.snaps) // and one with deleted == true @@ -216,7 +220,7 @@ func TestSnapshot2(t *testing.T) { so1.SetBalance(big.NewInt(52)) so1.SetNonce(53) so1.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e', '2'}), []byte{'c', 'a', 'f', 'e', '2'}) - so1.suicided = true + so1.selfDestructed = true so1.deleted = true state.setStateObject(so1) @@ -230,8 +234,8 @@ func TestSnapshot2(t *testing.T) { so0Restored := state.getStateObject(stateobjaddr0) // Update lazily-loaded values before comparing. - so0Restored.GetState(state.db, storageaddr) - so0Restored.Code(state.db) + so0Restored.GetState(storageaddr) + so0Restored.Code() // non-deleted is equal (restored) compareStateObjects(so0Restored, so0, t) diff --git a/core/state/statedb.go b/core/state/statedb.go index 6f61f68bb..e280309a7 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,7 +18,6 @@ package state import ( - "errors" "fmt" "math/big" "sort" @@ -32,9 +31,15 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +const ( + // storageDeleteLimit denotes the highest permissible memory allocation + // employed for contract storage deletion. + storageDeleteLimit = 512 * 1024 * 1024 ) type revision struct { @@ -42,42 +47,42 @@ type revision struct { journalIndex int } -type proofList [][]byte - -func (n *proofList) Put(key []byte, value []byte) error { - *n = append(*n, value) - return nil -} - -func (n *proofList) Delete(key []byte) error { - panic("not supported") -} - // StateDB structs within the ethereum protocol are used to store anything // within the merkle trie. StateDBs take care of caching and storing // nested states. It's the general query interface to retrieve: +// // * Contracts // * Accounts +// +// Once the state is committed, tries cached in stateDB (including account +// trie, storage tries) will no longer be functional. A new state instance +// must be created with new root and updated database for accessing post- +// commit states. type StateDB struct { db Database prefetcher *triePrefetcher trie Trie hasher crypto.KeccakState + snaps *snapshot.Tree // Nil if snapshot is not available + snap snapshot.Snapshot // Nil if snapshot is not available // originalRoot is the pre-state root, before any changes were made. // It will be updated when the Commit is called. originalRoot common.Hash - snaps *snapshot.Tree - snap snapshot.Snapshot - snapAccounts map[common.Hash][]byte - snapStorage map[common.Hash]map[common.Hash][]byte + // These maps hold the state changes (including the corresponding + // original value) that occurred in this **block**. + accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding + storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format + accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding + storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format - // This map holds 'live' objects, which will get modified while processing a state transition. + // This map holds 'live' objects, which will get modified while processing + // a state transition. stateObjects map[common.Address]*stateObject - stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie - stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution - stateObjectsDestruct map[common.Address]struct{} // State objects destructed in the block + stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie + stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution + stateObjectsDestruct map[common.Address]*types.StateAccount // State objects destructed in the block along with its previous value // DB error. // State objects are used by the consensus core and VM which are @@ -91,11 +96,13 @@ type StateDB struct { // The refund counter, also used by state transitioning. refund uint64 + // The tx context and all occurred logs in the scope of transaction. thash common.Hash txIndex int logs map[common.Hash][]*types.Log logSize uint + // Preimages occurred seen by VM in the scope of block. preimages map[common.Hash][]byte // Per-transaction access list @@ -128,6 +135,9 @@ type StateDB struct { StorageUpdated int AccountDeleted int StorageDeleted int + + // Testing hooks + onCommit func(states *triestate.Set) // Hook invoked when commit is performed } // New creates a new state from a given trie. @@ -141,10 +151,14 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) trie: tr, originalRoot: root, snaps: snaps, + accounts: make(map[common.Hash][]byte), + storages: make(map[common.Hash]map[common.Hash][]byte), + accountsOrigin: make(map[common.Address][]byte), + storagesOrigin: make(map[common.Address]map[common.Hash][]byte), stateObjects: make(map[common.Address]*stateObject), stateObjectsPending: make(map[common.Address]struct{}), stateObjectsDirty: make(map[common.Address]struct{}), - stateObjectsDestruct: make(map[common.Address]struct{}), + stateObjectsDestruct: make(map[common.Address]*types.StateAccount), logs: make(map[common.Hash][]*types.Log), preimages: make(map[common.Hash][]byte), journal: newJournal(), @@ -153,10 +167,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) hasher: crypto.NewKeccakState(), } if sdb.snaps != nil { - if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil { - sdb.snapAccounts = make(map[common.Hash][]byte) - sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte) - } + sdb.snap = sdb.snaps.Snapshot(root) } // Start PluGeth section if sdb.snap == nil { @@ -265,7 +276,7 @@ func (s *StateDB) SubRefund(gas uint64) { } // Exist reports whether the given account address exists in the state. -// Notably this also returns true for suicided accounts. +// Notably this also returns true for self-destructed accounts. func (s *StateDB) Exist(addr common.Address) bool { return s.getStateObject(addr) != nil } @@ -286,6 +297,7 @@ func (s *StateDB) GetBalance(addr common.Address) *big.Int { return common.Big0 } +// GetNonce retrieves the nonce from the given address or 0 if object not found func (s *StateDB) GetNonce(addr common.Address) uint64 { stateObject := s.getStateObject(addr) if stateObject != nil { @@ -295,6 +307,16 @@ func (s *StateDB) GetNonce(addr common.Address) uint64 { return 0 } +// GetStorageRoot retrieves the storage root from the given address or empty +// if object not found. +func (s *StateDB) GetStorageRoot(addr common.Address) common.Hash { + stateObject := s.getStateObject(addr) + if stateObject != nil { + return stateObject.Root() + } + return common.Hash{} +} + // TxIndex returns the current transaction index set by Prepare. func (s *StateDB) TxIndex() int { return s.txIndex @@ -303,7 +325,7 @@ func (s *StateDB) TxIndex() int { func (s *StateDB) GetCode(addr common.Address) []byte { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.Code(s.db) + return stateObject.Code() } return nil } @@ -311,7 +333,7 @@ func (s *StateDB) GetCode(addr common.Address) []byte { func (s *StateDB) GetCodeSize(addr common.Address) int { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.CodeSize(s.db) + return stateObject.CodeSize() } return 0 } @@ -328,45 +350,16 @@ func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.GetState(s.db, hash) + return stateObject.GetState(hash) } return common.Hash{} } -// GetProof returns the Merkle proof for a given account. -func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) { - return s.GetProofByHash(crypto.Keccak256Hash(addr.Bytes())) -} - -// GetProofByHash returns the Merkle proof for a given account. -func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) { - var proof proofList - err := s.trie.Prove(addrHash[:], 0, &proof) - return proof, err -} - -// GetStorageProof returns the Merkle proof for given storage slot. -func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) { - trie, err := s.StorageTrie(a) - if err != nil { - return nil, err - } - if trie == nil { - return nil, errors.New("storage trie for requested address does not exist") - } - var proof proofList - err = trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) - if err != nil { - return nil, err - } - return proof, nil -} - // GetCommittedState retrieves a value from the given account's committed storage trie. func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.GetCommittedState(s.db, hash) + return stateObject.GetCommittedState(hash) } return common.Hash{} } @@ -376,25 +369,10 @@ func (s *StateDB) Database() Database { return s.db } -// StorageTrie returns the storage trie of an account. The return value is a copy -// and is nil for non-existent accounts. An error will be returned if storage trie -// is existent but can't be loaded correctly. -func (s *StateDB) StorageTrie(addr common.Address) (Trie, error) { - stateObject := s.getStateObject(addr) - if stateObject == nil { - return nil, nil - } - cpy := stateObject.deepCopy(s) - if _, err := cpy.updateTrie(s.db); err != nil { - return nil, err - } - return cpy.getTrie(s.db) -} - -func (s *StateDB) HasSuicided(addr common.Address) bool { +func (s *StateDB) HasSelfDestructed(addr common.Address) bool { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.suicided + return stateObject.selfDestructed } return false } @@ -443,44 +421,59 @@ func (s *StateDB) SetCode(addr common.Address, code []byte) { func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { - stateObject.SetState(s.db, key, value) + stateObject.SetState(key, value) } } // SetStorage replaces the entire storage for the specified account with given -// storage. This function should only be used for debugging. +// storage. This function should only be used for debugging and the mutations +// must be discarded afterwards. func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { // SetStorage needs to wipe existing storage. We achieve this by pretending // that the account self-destructed earlier in this block, by flagging // it in stateObjectsDestruct. The effect of doing so is that storage lookups // will not hit disk, since it is assumed that the disk-data is belonging // to a previous incarnation of the object. - s.stateObjectsDestruct[addr] = struct{}{} + // + // TODO(rjl493456442) this function should only be supported by 'unwritable' + // state and all mutations made should all be discarded afterwards. + if _, ok := s.stateObjectsDestruct[addr]; !ok { + s.stateObjectsDestruct[addr] = nil + } stateObject := s.GetOrNewStateObject(addr) for k, v := range storage { - stateObject.SetState(s.db, k, v) + stateObject.SetState(k, v) } } -// Suicide marks the given account as suicided. +// SelfDestruct marks the given account as selfdestructed. // This clears the account balance. // // The account's state object is still available until the state is committed, -// getStateObject will return a non-nil account after Suicide. -func (s *StateDB) Suicide(addr common.Address) bool { +// getStateObject will return a non-nil account after SelfDestruct. +func (s *StateDB) SelfDestruct(addr common.Address) { stateObject := s.getStateObject(addr) if stateObject == nil { - return false + return } - s.journal.append(suicideChange{ + s.journal.append(selfDestructChange{ account: &addr, - prev: stateObject.suicided, + prev: stateObject.selfDestructed, prevbalance: new(big.Int).Set(stateObject.Balance()), }) - stateObject.markSuicided() + stateObject.markSelfdestructed() stateObject.data.Balance = new(big.Int) +} - return true +func (s *StateDB) Selfdestruct6780(addr common.Address) { + stateObject := s.getStateObject(addr) + if stateObject == nil { + return + } + + if stateObject.created { + s.SelfDestruct(addr) + } } // SetTransientState sets transient storage for a given account. It @@ -525,13 +518,24 @@ func (s *StateDB) updateStateObject(obj *stateObject) { if err := s.trie.UpdateAccount(addr, &obj.data); err != nil { s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) } + if obj.dirtyCode { + s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code) + } + // Cache the data until commit. Note, this update mechanism is not symmetric + // to the deletion, because whereas it is enough to track account updates + // at commit time, deletions need tracking at transaction boundary level to + // ensure we capture state clearing. + s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) - // If state snapshotting is active, cache the data til commit. Note, this - // update mechanism is not symmetric to the deletion, because whereas it is - // enough to track account updates at commit time, deletions need tracking - // at transaction boundary level to ensure we capture state clearing. - if s.snap != nil { - s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash) + // Track the original value of mutated account, nil means it was not present. + // Skip if it has been tracked (because updateStateObject may be called + // multiple times in a block). + if _, ok := s.accountsOrigin[obj.address]; !ok { + if obj.origin == nil { + s.accountsOrigin[obj.address] = nil + } else { + s.accountsOrigin[obj.address] = types.SlimAccountRLP(*obj.origin) + } } } @@ -610,7 +614,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { } } // Insert into the live set - obj := newObject(s, addr, *data) + obj := newObject(s, addr, data) s.setStateObject(obj) return obj } @@ -632,20 +636,40 @@ func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject { // the given address, it is overwritten and returned as the second return value. func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) { prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! - - var prevdestruct bool - if prev != nil { - _, prevdestruct = s.stateObjectsDestruct[prev.address] - if !prevdestruct { - s.stateObjectsDestruct[prev.address] = struct{}{} - } - } - newobj = newObject(s, addr, types.StateAccount{}) + newobj = newObject(s, addr, nil) if prev == nil { s.journal.append(createObjectChange{account: &addr}) } else { - s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) + // The original account should be marked as destructed and all cached + // account and storage data should be cleared as well. Note, it must + // be done here, otherwise the destruction event of "original account" + // will be lost. + _, prevdestruct := s.stateObjectsDestruct[prev.address] + if !prevdestruct { + s.stateObjectsDestruct[prev.address] = prev.origin + } + // There may be some cached account/storage data already since IntermediateRoot + // will be called for each transaction before byzantium fork which will always + // cache the latest account/storage data. + prevAccount, ok := s.accountsOrigin[prev.address] + s.journal.append(resetObjectChange{ + account: &addr, + prev: prev, + prevdestruct: prevdestruct, + prevAccount: s.accounts[prev.addrHash], + prevStorage: s.storages[prev.addrHash], + prevAccountOriginExist: ok, + prevAccountOrigin: prevAccount, + prevStorageOrigin: s.storagesOrigin[prev.address], + }) + delete(s.accounts, prev.addrHash) + delete(s.storages, prev.addrHash) + delete(s.accountsOrigin, prev.address) + delete(s.storagesOrigin, prev.address) } + + newobj.created = true + s.setStateObject(newobj) if prev != nil && !prev.deleted { return newobj, prev @@ -670,39 +694,6 @@ func (s *StateDB) CreateAccount(addr common.Address) { } } -func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { - so := db.getStateObject(addr) - if so == nil { - return nil - } - tr, err := so.getTrie(db.db) - if err != nil { - return err - } - it := trie.NewIterator(tr.NodeIterator(nil)) - - for it.Next() { - key := common.BytesToHash(db.trie.GetKey(it.Key)) - if value, dirty := so.dirtyStorage[key]; dirty { - if !cb(key, value) { - return nil - } - continue - } - - if len(it.Value) > 0 { - _, content, _, err := rlp.Split(it.Value) - if err != nil { - return err - } - if !cb(key, common.BytesToHash(content)) { - return nil - } - } - } - return nil -} - // Copy creates a deep, independent copy of the state. // Snapshots of the copied state cannot be applied to the copy. func (s *StateDB) Copy() *StateDB { @@ -711,16 +702,27 @@ func (s *StateDB) Copy() *StateDB { db: s.db, trie: s.db.CopyTrie(s.trie), originalRoot: s.originalRoot, + accounts: make(map[common.Hash][]byte), + storages: make(map[common.Hash]map[common.Hash][]byte), + accountsOrigin: make(map[common.Address][]byte), + storagesOrigin: make(map[common.Address]map[common.Hash][]byte), stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), - stateObjectsDestruct: make(map[common.Address]struct{}, len(s.stateObjectsDestruct)), + stateObjectsDestruct: make(map[common.Address]*types.StateAccount, len(s.stateObjectsDestruct)), refund: s.refund, logs: make(map[common.Hash][]*types.Log, len(s.logs)), logSize: s.logSize, preimages: make(map[common.Hash][]byte, len(s.preimages)), journal: newJournal(), hasher: crypto.NewKeccakState(), + + // In order for the block producer to be able to use and make additions + // to the snapshot tree, we need to copy that as well. Otherwise, any + // block mined by ourselves will cause gaps in the tree, and force the + // miner to operate trie-backed only. + snaps: s.snaps, + snap: s.snap, } // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { @@ -754,10 +756,18 @@ func (s *StateDB) Copy() *StateDB { } state.stateObjectsDirty[addr] = struct{}{} } - // Deep copy the destruction flag. - for addr := range s.stateObjectsDestruct { - state.stateObjectsDestruct[addr] = struct{}{} + // Deep copy the destruction markers. + for addr, value := range s.stateObjectsDestruct { + state.stateObjectsDestruct[addr] = value } + // Deep copy the state changes made in the scope of block + // along with their original values. + state.accounts = copySet(s.accounts) + state.storages = copy2DSet(s.storages) + state.accountsOrigin = copySet(state.accountsOrigin) + state.storagesOrigin = copy2DSet(state.storagesOrigin) + + // Deep copy the logs occurred in the scope of block for hash, logs := range s.logs { cpy := make([]*types.Log, len(logs)) for i, l := range logs { @@ -766,6 +776,7 @@ func (s *StateDB) Copy() *StateDB { } state.logs[hash] = cpy } + // Deep copy the preimages occurred in the scope of block for hash, preimage := range s.preimages { state.preimages[hash] = preimage } @@ -784,28 +795,6 @@ func (s *StateDB) Copy() *StateDB { if s.prefetcher != nil { state.prefetcher = s.prefetcher.copy() } - if s.snaps != nil { - // In order for the miner to be able to use and make additions - // to the snapshot tree, we need to copy that as well. - // Otherwise, any block mined by ourselves will cause gaps in the tree, - // and force the miner to operate trie-backed only - state.snaps = s.snaps - state.snap = s.snap - - // deep copy needed - state.snapAccounts = make(map[common.Hash][]byte, len(s.snapAccounts)) - for k, v := range s.snapAccounts { - state.snapAccounts[k] = v - } - state.snapStorage = make(map[common.Hash]map[common.Hash][]byte, len(s.snapStorage)) - for k, v := range s.snapStorage { - temp := make(map[common.Hash][]byte, len(v)) - for kk, vv := range v { - temp[kk] = vv - } - state.snapStorage[k] = temp - } - } return state } @@ -854,24 +843,26 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // Thus, we can safely ignore it here continue } - if obj.suicided || (deleteEmptyObjects && obj.empty()) { + if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { obj.deleted = true // We need to maintain account deletions explicitly (will remain - // set indefinitely). - s.stateObjectsDestruct[obj.address] = struct{}{} - - // If state snapshotting is active, also mark the destruction there. + // set indefinitely). Note only the first occurred self-destruct + // event is tracked. + if _, ok := s.stateObjectsDestruct[obj.address]; !ok { + s.stateObjectsDestruct[obj.address] = obj.origin + } // Note, we can't do this only at the end of a block because multiple // transactions within the same block might self destruct and then // resurrect an account; but the snapshotter needs both events. - if s.snap != nil { - delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) - } + delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) + delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) + delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) + delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) } else { obj.finalise(true) // Prefetch slots in the background } + obj.created = false s.stateObjectsPending[addr] = struct{}{} s.stateObjectsDirty[addr] = struct{}{} @@ -915,7 +906,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // to pull useful data from disk. for addr := range s.stateObjectsPending { if obj := s.stateObjects[addr]; !obj.deleted { - obj.updateRoot(s.db) + obj.updateRoot() } } // Now we're about to start to write changes to the trie. The trie is so far @@ -966,8 +957,223 @@ func (s *StateDB) clearJournalAndRefund() { s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries } +// fastDeleteStorage is the function that efficiently deletes the storage trie +// of a specific account. It leverages the associated state snapshot for fast +// storage iteration and constructs trie node deletion markers by creating +// stack trie with iterated slots. +func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { + iter, err := s.snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{}) + if err != nil { + return false, 0, nil, nil, err + } + defer iter.Release() + + var ( + size common.StorageSize + nodes = trienode.NewNodeSet(addrHash) + slots = make(map[common.Hash][]byte) + ) + stack := trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + nodes.AddNode(path, trienode.NewDeleted()) + size += common.StorageSize(len(path)) + }) + for iter.Next() { + if size > storageDeleteLimit { + return true, size, nil, nil, nil + } + slot := common.CopyBytes(iter.Slot()) + if iter.Error() != nil { // error might occur after Slot function + return false, 0, nil, nil, err + } + size += common.StorageSize(common.HashLength + len(slot)) + slots[iter.Hash()] = slot + + if err := stack.Update(iter.Hash().Bytes(), slot); err != nil { + return false, 0, nil, nil, err + } + } + if iter.Error() != nil { // error might occur during iteration + return false, 0, nil, nil, err + } + if stack.Hash() != root { + return false, 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash()) + } + return false, size, slots, nodes, nil +} + +// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage," +// employed when the associated state snapshot is not available. It iterates the +// storage slots along with all internal trie nodes via trie directly. +func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { + tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root) + if err != nil { + return false, 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) + } + it, err := tr.NodeIterator(nil) + if err != nil { + return false, 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err) + } + var ( + size common.StorageSize + nodes = trienode.NewNodeSet(addrHash) + slots = make(map[common.Hash][]byte) + ) + for it.Next(true) { + if size > storageDeleteLimit { + return true, size, nil, nil, nil + } + if it.Leaf() { + slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob()) + size += common.StorageSize(common.HashLength + len(it.LeafBlob())) + continue + } + if it.Hash() == (common.Hash{}) { + continue + } + size += common.StorageSize(len(it.Path())) + nodes.AddNode(it.Path(), trienode.NewDeleted()) + } + if err := it.Error(); err != nil { + return false, 0, nil, nil, err + } + return false, size, slots, nodes, nil +} + +// deleteStorage is designed to delete the storage trie of a designated account. +// It could potentially be terminated if the storage size is excessively large, +// potentially leading to an out-of-memory panic. The function will make an attempt +// to utilize an efficient strategy if the associated state snapshot is reachable; +// otherwise, it will resort to a less-efficient approach. +func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) { + var ( + start = time.Now() + err error + aborted bool + size common.StorageSize + slots map[common.Hash][]byte + nodes *trienode.NodeSet + ) + // The fast approach can be failed if the snapshot is not fully + // generated, or it's internally corrupted. Fallback to the slow + // one just in case. + if s.snap != nil { + aborted, size, slots, nodes, err = s.fastDeleteStorage(addrHash, root) + } + if s.snap == nil || err != nil { + aborted, size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root) + } + if err != nil { + return false, nil, nil, err + } + if metrics.EnabledExpensive { + if aborted { + slotDeletionSkip.Inc(1) + } + n := int64(len(slots)) + if n > slotDeletionMaxCount.Value() { + slotDeletionMaxCount.Update(n) + } + if int64(size) > slotDeletionMaxSize.Value() { + slotDeletionMaxSize.Update(int64(size)) + } + slotDeletionTimer.UpdateSince(start) + slotDeletionCount.Mark(n) + slotDeletionSize.Mark(int64(size)) + } + return aborted, slots, nodes, nil +} + +// handleDestruction processes all destruction markers and deletes the account +// and associated storage slots if necessary. There are four possible situations +// here: +// +// - the account was not existent and be marked as destructed +// +// - the account was not existent and be marked as destructed, +// however, it's resurrected later in the same block. +// +// - the account was existent and be marked as destructed +// +// - the account was existent and be marked as destructed, +// however it's resurrected later in the same block. +// +// In case (a), nothing needs be deleted, nil to nil transition can be ignored. +// +// In case (b), nothing needs be deleted, nil is used as the original value for +// newly created account and storages +// +// In case (c), **original** account along with its storages should be deleted, +// with their values be tracked as original value. +// +// In case (d), **original** account along with its storages should be deleted, +// with their values be tracked as original value. +func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Address]struct{}, error) { + // Short circuit if geth is running with hash mode. This procedure can consume + // considerable time and storage deletion isn't supported in hash mode, thus + // preemptively avoiding unnecessary expenses. + incomplete := make(map[common.Address]struct{}) + if s.db.TrieDB().Scheme() == rawdb.HashScheme { + return incomplete, nil + } + for addr, prev := range s.stateObjectsDestruct { + // The original account was non-existing, and it's marked as destructed + // in the scope of block. It can be case (a) or (b). + // - for (a), skip it without doing anything. + // - for (b), track account's original value as nil. It may overwrite + // the data cached in s.accountsOrigin set by 'updateStateObject'. + addrHash := crypto.Keccak256Hash(addr[:]) + if prev == nil { + if _, ok := s.accounts[addrHash]; ok { + s.accountsOrigin[addr] = nil // case (b) + } + continue + } + // It can overwrite the data in s.accountsOrigin set by 'updateStateObject'. + s.accountsOrigin[addr] = types.SlimAccountRLP(*prev) // case (c) or (d) + + // Short circuit if the storage was empty. + if prev.Root == types.EmptyRootHash { + continue + } + // Remove storage slots belong to the account. + aborted, slots, set, err := s.deleteStorage(addr, addrHash, prev.Root) + if err != nil { + return nil, fmt.Errorf("failed to delete storage, err: %w", err) + } + // The storage is too huge to handle, skip it but mark as incomplete. + // For case (d), the account is resurrected might with a few slots + // created. In this case, wipe the entire storage state diff because + // of aborted deletion. + if aborted { + incomplete[addr] = struct{}{} + delete(s.storagesOrigin, addr) + continue + } + if s.storagesOrigin[addr] == nil { + s.storagesOrigin[addr] = slots + } else { + // It can overwrite the data in s.storagesOrigin[addrHash] set by + // 'object.updateTrie'. + for key, val := range slots { + s.storagesOrigin[addr][key] = val + } + } + if err := nodes.Merge(set); err != nil { + return nil, err + } + } + return incomplete, nil +} + // Commit writes the state to the underlying in-memory trie database. -func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { +// Once the state is committed, tries cached in stateDB (including account +// trie, storage tries) will no longer be functional. A new state instance +// must be created with new root and updated database for accessing post- +// commit states. +// +// The associated block number of the state transition is also provided +// for more chain context. +func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) { // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) @@ -984,6 +1190,13 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { nodes = trienode.NewMergedNodeSet() codeWriter = s.db.DiskDB().NewBatch() ) + // Handle all state deletions first + incomplete, err := s.handleDestruction(nodes) + if err != nil { + return common.Hash{}, err + } + // Handle all state updates afterwards + // begin PluGeth injection codeUpdates := make(map[common.Hash][]byte) // end PluGeth injection @@ -1002,25 +1215,10 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { if err != nil { return common.Hash{}, err } - // Merge the dirty nodes of storage trie into global set. - if set != nil { - if err := nodes.Merge(set); err != nil { - return common.Hash{}, err - } - updates, deleted := set.Size() - storageTrieNodesUpdated += updates - storageTrieNodesDeleted += deleted - } + updates, deleted := set.Size() + storageTrieNodesUpdated += updates + storageTrieNodesDeleted += deleted } - // If the contract is destructed, the storage is still left in the - // database as dangling data. Theoretically it's should be wiped from - // database as well, but in hash-based-scheme it's extremely hard to - // determine that if the trie nodes are also referenced by other storage, - // and in path-based-scheme some technical challenges are still unsolved. - // Although it won't affect the correctness but please fix it TODO(rjl493456442). - } - if len(s.stateObjectsDirty) > 0 { - s.stateObjectsDirty = make(map[common.Address]struct{}) } if codeWriter.ValueSize() > 0 { if err := codeWriter.Write(); err != nil { @@ -1032,7 +1230,10 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { if metrics.EnabledExpensive { start = time.Now() } - root, set := s.trie.Commit(true) + root, set, err := s.trie.Commit(true) + if err != nil { + return common.Hash{}, err + } // Merge the dirty nodes of account trie into global set if set != nil { if err := nodes.Merge(set); err != nil { @@ -1078,10 +1279,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { if metrics.EnabledExpensive { s.SnapshotCommits += time.Since(start) } - s.snap, s.snapAccounts, s.snapStorage = nil, nil, nil - } - if len(s.stateObjectsDestruct) > 0 { - s.stateObjectsDestruct = make(map[common.Address]struct{}) + s.snap = nil } if root == (common.Hash{}) { root = types.EmptyRootHash @@ -1092,14 +1290,25 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { } if root != origin { start := time.Now() - if err := s.db.TrieDB().Update(root, origin, nodes); err != nil { + set := triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete) + if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil { return common.Hash{}, err } s.originalRoot = root if metrics.EnabledExpensive { s.TrieDBCommits += time.Since(start) } + if s.onCommit != nil { + s.onCommit(set) + } } + // Clear all internal flags at the end of commit operation. + s.accounts = make(map[common.Hash][]byte) + s.storages = make(map[common.Hash]map[common.Hash][]byte) + s.accountsOrigin = make(map[common.Address][]byte) + s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte) + s.stateObjectsDirty = make(map[common.Address]struct{}) + s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) return root, nil } @@ -1180,7 +1389,7 @@ func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addre } // convertAccountSet converts a provided account set from address keyed to hash keyed. -func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common.Hash]struct{} { +func (s *StateDB) convertAccountSet(set map[common.Address]*types.StateAccount) map[common.Hash]struct{} { ret := make(map[common.Hash]struct{}, len(set)) for addr := range set { obj, exist := s.stateObjects[addr] @@ -1192,3 +1401,24 @@ func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common. } return ret } + +// copySet returns a deep-copied set. +func copySet[k comparable](set map[k][]byte) map[k][]byte { + copied := make(map[k][]byte, len(set)) + for key, val := range set { + copied[key] = common.CopyBytes(val) + } + return copied +} + +// copy2DSet returns a two-dimensional deep-copied set. +func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common.Hash][]byte { + copied := make(map[k]map[common.Hash][]byte, len(set)) + for addr, subset := range set { + copied[addr] = make(map[common.Hash][]byte, len(subset)) + for key, val := range subset { + copied[addr][key] = common.CopyBytes(val) + } + } + return copied +} diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go new file mode 100644 index 000000000..c4704257c --- /dev/null +++ b/core/state/statedb_fuzz_test.go @@ -0,0 +1,392 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package state + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + "math/big" + "math/rand" + "reflect" + "strings" + "testing" + "testing/quick" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state/snapshot" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +// A stateTest checks that the state changes are correctly captured. Instances +// of this test with pseudorandom content are created by Generate. +// +// The test works as follows: +// +// A list of states are created by applying actions. The state changes between +// each state instance are tracked and be verified. +type stateTest struct { + addrs []common.Address // all account addresses + actions [][]testAction // modifications to the state, grouped by block + chunk int // The number of actions per chunk + err error // failure details are reported through this field +} + +// newStateTestAction creates a random action that changes state. +func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction { + actions := []testAction{ + { + name: "SetBalance", + fn: func(a testAction, s *StateDB) { + s.SetBalance(addr, big.NewInt(a.args[0])) + }, + args: make([]int64, 1), + }, + { + name: "SetNonce", + fn: func(a testAction, s *StateDB) { + s.SetNonce(addr, uint64(a.args[0])) + }, + args: make([]int64, 1), + }, + { + name: "SetState", + fn: func(a testAction, s *StateDB) { + var key, val common.Hash + binary.BigEndian.PutUint16(key[:], uint16(a.args[0])) + binary.BigEndian.PutUint16(val[:], uint16(a.args[1])) + s.SetState(addr, key, val) + }, + args: make([]int64, 2), + }, + { + name: "SetCode", + fn: func(a testAction, s *StateDB) { + code := make([]byte, 16) + binary.BigEndian.PutUint64(code, uint64(a.args[0])) + binary.BigEndian.PutUint64(code[8:], uint64(a.args[1])) + s.SetCode(addr, code) + }, + args: make([]int64, 2), + }, + { + name: "CreateAccount", + fn: func(a testAction, s *StateDB) { + s.CreateAccount(addr) + }, + }, + { + name: "Selfdestruct", + fn: func(a testAction, s *StateDB) { + s.SelfDestruct(addr) + }, + }, + } + var nonRandom = index != -1 + if index == -1 { + index = r.Intn(len(actions)) + } + action := actions[index] + var names []string + if !action.noAddr { + names = append(names, addr.Hex()) + } + for i := range action.args { + if nonRandom { + action.args[i] = rand.Int63n(10000) + 1 // set balance to non-zero + } else { + action.args[i] = rand.Int63n(10000) + } + names = append(names, fmt.Sprint(action.args[i])) + } + action.name += " " + strings.Join(names, ", ") + return action +} + +// Generate returns a new snapshot test of the given size. All randomness is +// derived from r. +func (*stateTest) Generate(r *rand.Rand, size int) reflect.Value { + addrs := make([]common.Address, 5) + for i := range addrs { + addrs[i][0] = byte(i) + } + actions := make([][]testAction, rand.Intn(5)+1) + + for i := 0; i < len(actions); i++ { + actions[i] = make([]testAction, size) + for j := range actions[i] { + if j == 0 { + // Always include a set balance action to make sure + // the state changes are not empty. + actions[i][j] = newStateTestAction(common.HexToAddress("0xdeadbeef"), r, 0) + continue + } + actions[i][j] = newStateTestAction(addrs[r.Intn(len(addrs))], r, -1) + } + } + chunk := int(math.Sqrt(float64(size))) + if size > 0 && chunk == 0 { + chunk = 1 + } + return reflect.ValueOf(&stateTest{ + addrs: addrs, + actions: actions, + chunk: chunk, + }) +} + +func (test *stateTest) String() string { + out := new(bytes.Buffer) + for i, actions := range test.actions { + fmt.Fprintf(out, "---- block %d ----\n", i) + for j, action := range actions { + if j%test.chunk == 0 { + fmt.Fprintf(out, "---- transaction %d ----\n", j/test.chunk) + } + fmt.Fprintf(out, "%4d: %s\n", j%test.chunk, action.name) + } + } + return out.String() +} + +func (test *stateTest) run() bool { + var ( + roots []common.Hash + accountList []map[common.Address][]byte + storageList []map[common.Address]map[common.Hash][]byte + onCommit = func(states *triestate.Set) { + accountList = append(accountList, copySet(states.Accounts)) + storageList = append(storageList, copy2DSet(states.Storages)) + } + disk = rawdb.NewMemoryDatabase() + tdb = trie.NewDatabase(disk, &trie.Config{PathDB: pathdb.Defaults}) + sdb = NewDatabaseWithNodeDB(disk, tdb) + byzantium = rand.Intn(2) == 0 + ) + defer disk.Close() + defer tdb.Close() + + var snaps *snapshot.Tree + if rand.Intn(3) == 0 { + snaps, _ = snapshot.New(snapshot.Config{ + CacheSize: 1, + Recovery: false, + NoBuild: false, + AsyncBuild: false, + }, disk, tdb, types.EmptyRootHash) + } + for i, actions := range test.actions { + root := types.EmptyRootHash + if i != 0 { + root = roots[len(roots)-1] + } + state, err := New(root, sdb, snaps) + if err != nil { + panic(err) + } + state.onCommit = onCommit + + for i, action := range actions { + if i%test.chunk == 0 && i != 0 { + if byzantium { + state.Finalise(true) // call finalise at the transaction boundary + } else { + state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary + } + } + action.fn(action, state) + } + if byzantium { + state.Finalise(true) // call finalise at the transaction boundary + } else { + state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary + } + nroot, err := state.Commit(0, true) // call commit at the block boundary + if err != nil { + panic(err) + } + if nroot == root { + return true // filter out non-change state transition + } + roots = append(roots, nroot) + } + for i := 0; i < len(test.actions); i++ { + root := types.EmptyRootHash + if i != 0 { + root = roots[i-1] + } + test.err = test.verify(root, roots[i], tdb, accountList[i], storageList[i]) + if test.err != nil { + return false + } + } + return true +} + +// verifyAccountCreation this function is called once the state diff says that +// specific account was not present. A serial of checks will be performed to +// ensure the state diff is correct, includes: +// +// - the account was indeed not present in trie +// - the account is present in new trie, nil->nil is regarded as invalid +// - the slots transition is correct +func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error { + // Verify account change + addrHash := crypto.Keccak256Hash(addr.Bytes()) + oBlob, err := otr.Get(addrHash.Bytes()) + if err != nil { + return err + } + nBlob, err := ntr.Get(addrHash.Bytes()) + if err != nil { + return err + } + if len(oBlob) != 0 { + return fmt.Errorf("unexpected account in old trie, %x", addrHash) + } + if len(nBlob) == 0 { + return fmt.Errorf("missing account in new trie, %x", addrHash) + } + + // Verify storage changes + var nAcct types.StateAccount + if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil { + return err + } + // Account has no slot, empty slot set is expected + if nAcct.Root == types.EmptyRootHash { + if len(slots) != 0 { + return fmt.Errorf("unexpected slot changes %x", addrHash) + } + return nil + } + // Account has slots, ensure all new slots are contained + st, err := trie.New(trie.StorageTrieID(next, addrHash, nAcct.Root), db) + if err != nil { + return err + } + for key, val := range slots { + st.Update(key.Bytes(), val) + } + if st.Hash() != types.EmptyRootHash { + return errors.New("invalid slot changes") + } + return nil +} + +// verifyAccountUpdate this function is called once the state diff says that +// specific account was present. A serial of checks will be performed to +// ensure the state diff is correct, includes: +// +// - the account was indeed present in trie +// - the account in old trie matches the provided value +// - the slots transition is correct +func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error { + // Verify account change + addrHash := crypto.Keccak256Hash(addr.Bytes()) + oBlob, err := otr.Get(addrHash.Bytes()) + if err != nil { + return err + } + nBlob, err := ntr.Get(addrHash.Bytes()) + if err != nil { + return err + } + if len(oBlob) == 0 { + return fmt.Errorf("missing account in old trie, %x", addrHash) + } + full, err := types.FullAccountRLP(origin) + if err != nil { + return err + } + if !bytes.Equal(full, oBlob) { + return fmt.Errorf("account value is not matched, %x", addrHash) + } + + // Decode accounts + var ( + oAcct types.StateAccount + nAcct types.StateAccount + nRoot common.Hash + ) + if err := rlp.DecodeBytes(oBlob, &oAcct); err != nil { + return err + } + if len(nBlob) == 0 { + nRoot = types.EmptyRootHash + } else { + if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil { + return err + } + nRoot = nAcct.Root + } + + // Verify storage + st, err := trie.New(trie.StorageTrieID(next, addrHash, nRoot), db) + if err != nil { + return err + } + for key, val := range slots { + st.Update(key.Bytes(), val) + } + if st.Hash() != oAcct.Root { + return errors.New("invalid slot changes") + } + return nil +} + +func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error { + otr, err := trie.New(trie.StateTrieID(root), db) + if err != nil { + return err + } + ntr, err := trie.New(trie.StateTrieID(next), db) + if err != nil { + return err + } + for addr, account := range accountsOrigin { + var err error + if len(account) == 0 { + err = test.verifyAccountCreation(next, db, otr, ntr, addr, storagesOrigin[addr]) + } else { + err = test.verifyAccountUpdate(next, db, otr, ntr, addr, accountsOrigin[addr], storagesOrigin[addr]) + } + if err != nil { + return err + } + } + return nil +} + +func TestStateChanges(t *testing.T) { + config := &quick.Config{MaxCount: 1000} + err := quick.Check((*stateTest).run, config) + if cerr, ok := err.(*quick.CheckError); ok { + test := cerr.In[0].(*stateTest) + t.Errorf("%v:\n%s", test.err, test) + } else if err != nil { + t.Error(err) + } +} diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index e73b1b8c6..ad829a0c8 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -19,6 +19,7 @@ package state import ( "bytes" "encoding/binary" + "errors" "fmt" "math" "math/big" @@ -31,15 +32,26 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/holiman/uint256" ) // Tests that updating a state trie does not leak any database writes prior to // actually committing the state. func TestUpdateLeaks(t *testing.T) { // Create an empty state database - db := rawdb.NewMemoryDatabase() - state, _ := New(types.EmptyRootHash, NewDatabase(db), nil) + var ( + db = rawdb.NewMemoryDatabase() + tdb = trie.NewDatabase(db, nil) + ) + state, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(db, tdb), nil) // Update it with some accounts for i := byte(0); i < 255; i++ { @@ -55,7 +67,7 @@ func TestUpdateLeaks(t *testing.T) { } root := state.IntermediateRoot(false) - if err := state.Database().TrieDB().Commit(root, false); err != nil { + if err := tdb.Commit(root, false); err != nil { t.Errorf("can not commit trie %v to persistent database", root.Hex()) } @@ -73,8 +85,10 @@ func TestIntermediateLeaks(t *testing.T) { // Create two state databases, one transitioning to the final state, the other final from the beginning transDb := rawdb.NewMemoryDatabase() finalDb := rawdb.NewMemoryDatabase() - transState, _ := New(types.EmptyRootHash, NewDatabase(transDb), nil) - finalState, _ := New(types.EmptyRootHash, NewDatabase(finalDb), nil) + transNdb := trie.NewDatabase(transDb, nil) + finalNdb := trie.NewDatabase(finalDb, nil) + transState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(transDb, transNdb), nil) + finalState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(finalDb, finalNdb), nil) modify := func(state *StateDB, addr common.Address, i, tweak byte) { state.SetBalance(addr, big.NewInt(int64(11*i)+int64(tweak))) @@ -102,19 +116,19 @@ func TestIntermediateLeaks(t *testing.T) { } // Commit and cross check the databases. - transRoot, err := transState.Commit(false) + transRoot, err := transState.Commit(0, false) if err != nil { t.Fatalf("failed to commit transition state: %v", err) } - if err = transState.Database().TrieDB().Commit(transRoot, false); err != nil { + if err = transNdb.Commit(transRoot, false); err != nil { t.Errorf("can not commit trie %v to persistent database", transRoot.Hex()) } - finalRoot, err := finalState.Commit(false) + finalRoot, err := finalState.Commit(0, false) if err != nil { t.Fatalf("failed to commit final state: %v", err) } - if err = finalState.Database().TrieDB().Commit(finalRoot, false); err != nil { + if err = finalNdb.Commit(finalRoot, false); err != nil { t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex()) } @@ -297,9 +311,9 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction { }, }, { - name: "Suicide", + name: "SelfDestruct", fn: func(a testAction, s *StateDB) { - s.Suicide(addr) + s.SelfDestruct(addr) }, }, { @@ -436,6 +450,43 @@ func (test *snapshotTest) run() bool { return true } +func forEachStorage(s *StateDB, addr common.Address, cb func(key, value common.Hash) bool) error { + so := s.getStateObject(addr) + if so == nil { + return nil + } + tr, err := so.getTrie() + if err != nil { + return err + } + trieIt, err := tr.NodeIterator(nil) + if err != nil { + return err + } + it := trie.NewIterator(trieIt) + + for it.Next() { + key := common.BytesToHash(s.trie.GetKey(it.Key)) + if value, dirty := so.dirtyStorage[key]; dirty { + if !cb(key, value) { + return nil + } + continue + } + + if len(it.Value) > 0 { + _, content, _, err := rlp.Split(it.Value) + if err != nil { + return err + } + if !cb(key, common.BytesToHash(content)) { + return nil + } + } + } + return nil +} + // checkEqual checks that methods of state and checkstate return the same values. func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { for _, addr := range test.addrs { @@ -449,7 +500,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { } // Check basic accessor methods. checkeq("Exist", state.Exist(addr), checkstate.Exist(addr)) - checkeq("HasSuicided", state.HasSuicided(addr), checkstate.HasSuicided(addr)) + checkeq("HasSelfdestructed", state.HasSelfDestructed(addr), checkstate.HasSelfDestructed(addr)) checkeq("GetBalance", state.GetBalance(addr), checkstate.GetBalance(addr)) checkeq("GetNonce", state.GetNonce(addr), checkstate.GetNonce(addr)) checkeq("GetCode", state.GetCode(addr), checkstate.GetCode(addr)) @@ -457,10 +508,10 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { checkeq("GetCodeSize", state.GetCodeSize(addr), checkstate.GetCodeSize(addr)) // Check storage. if obj := state.getStateObject(addr); obj != nil { - state.ForEachStorage(addr, func(key, value common.Hash) bool { + forEachStorage(state, addr, func(key, value common.Hash) bool { return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value) }) - checkstate.ForEachStorage(addr, func(key, value common.Hash) bool { + forEachStorage(checkstate, addr, func(key, value common.Hash) bool { return checkeq("GetState("+key.Hex()+")", checkstate.GetState(addr, key), value) }) } @@ -481,9 +532,9 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { } func TestTouchDelete(t *testing.T) { - s := newStateTest() + s := newStateEnv() s.state.GetOrNewStateObject(common.Address{}) - root, _ := s.state.Commit(false) + root, _ := s.state.Commit(0, false) s.state, _ = New(root, s.state.db, s.state.snaps) snapshot := s.state.Snapshot() @@ -518,7 +569,8 @@ func TestCopyOfCopy(t *testing.T) { // // See https://github.com/ethereum/go-ethereum/issues/20106. func TestCopyCommitCopy(t *testing.T) { - state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) + tdb := NewDatabase(rawdb.NewMemoryDatabase()) + state, _ := New(types.EmptyRootHash, tdb, nil) // Create an account and check if the retrieved balance is correct addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") @@ -555,20 +607,6 @@ func TestCopyCommitCopy(t *testing.T) { if val := copyOne.GetCommittedState(addr, skey); val != (common.Hash{}) { t.Fatalf("first copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{}) } - - copyOne.Commit(false) - if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { - t.Fatalf("first copy post-commit balance mismatch: have %v, want %v", balance, 42) - } - if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) { - t.Fatalf("first copy post-commit code mismatch: have %x, want %x", code, []byte("hello")) - } - if val := copyOne.GetState(addr, skey); val != sval { - t.Fatalf("first copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval) - } - if val := copyOne.GetCommittedState(addr, skey); val != sval { - t.Fatalf("first copy post-commit committed storage slot mismatch: have %x, want %x", val, sval) - } // Copy the copy and check the balance once more copyTwo := copyOne.Copy() if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { @@ -580,8 +618,23 @@ func TestCopyCommitCopy(t *testing.T) { if val := copyTwo.GetState(addr, skey); val != sval { t.Fatalf("second copy non-committed storage slot mismatch: have %x, want %x", val, sval) } - if val := copyTwo.GetCommittedState(addr, skey); val != sval { - t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval) + if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) { + t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval) + } + // Commit state, ensure states can be loaded from disk + root, _ := state.Commit(0, false) + state, _ = New(root, tdb, nil) + if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { + t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42) + } + if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) { + t.Fatalf("state post-commit code mismatch: have %x, want %x", code, []byte("hello")) + } + if val := state.GetState(addr, skey); val != sval { + t.Fatalf("state post-commit non-committed storage slot mismatch: have %x, want %x", val, sval) + } + if val := state.GetCommittedState(addr, skey); val != sval { + t.Fatalf("state post-commit committed storage slot mismatch: have %x, want %x", val, sval) } } @@ -641,19 +694,6 @@ func TestCopyCopyCommitCopy(t *testing.T) { if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) { t.Fatalf("second copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{}) } - copyTwo.Commit(false) - if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { - t.Fatalf("second copy post-commit balance mismatch: have %v, want %v", balance, 42) - } - if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) { - t.Fatalf("second copy post-commit code mismatch: have %x, want %x", code, []byte("hello")) - } - if val := copyTwo.GetState(addr, skey); val != sval { - t.Fatalf("second copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval) - } - if val := copyTwo.GetCommittedState(addr, skey); val != sval { - t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval) - } // Copy the copy-copy and check the balance once more copyThree := copyTwo.Copy() if balance := copyThree.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { @@ -665,11 +705,56 @@ func TestCopyCopyCommitCopy(t *testing.T) { if val := copyThree.GetState(addr, skey); val != sval { t.Fatalf("third copy non-committed storage slot mismatch: have %x, want %x", val, sval) } - if val := copyThree.GetCommittedState(addr, skey); val != sval { + if val := copyThree.GetCommittedState(addr, skey); val != (common.Hash{}) { t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, sval) } } +// TestCommitCopy tests the copy from a committed state is not functional. +func TestCommitCopy(t *testing.T) { + state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) + + // Create an account and check if the retrieved balance is correct + addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") + skey := common.HexToHash("aaa") + sval := common.HexToHash("bbb") + + state.SetBalance(addr, big.NewInt(42)) // Change the account trie + state.SetCode(addr, []byte("hello")) // Change an external metadata + state.SetState(addr, skey, sval) // Change the storage trie + + if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { + t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42) + } + if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) { + t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello")) + } + if val := state.GetState(addr, skey); val != sval { + t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval) + } + if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) { + t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{}) + } + // Copy the committed state database, the copied one is not functional. + state.Commit(0, true) + copied := state.Copy() + if balance := copied.GetBalance(addr); balance.Cmp(big.NewInt(0)) != 0 { + t.Fatalf("unexpected balance: have %v", balance) + } + if code := copied.GetCode(addr); code != nil { + t.Fatalf("unexpected code: have %x", code) + } + if val := copied.GetState(addr, skey); val != (common.Hash{}) { + t.Fatalf("unexpected storage slot: have %x", val) + } + if val := copied.GetCommittedState(addr, skey); val != (common.Hash{}) { + t.Fatalf("unexpected storage slot: have %x", val) + } + if !errors.Is(copied.Error(), trie.ErrCommitted) { + t.Fatalf("unexpected state error, %v", copied.Error()) + } +} + // TestDeleteCreateRevert tests a weird state transition corner case that we hit // while changing the internals of StateDB. The workflow is that a contract is // self-destructed, then in a follow-up transaction (but same block) it's created @@ -685,11 +770,11 @@ func TestDeleteCreateRevert(t *testing.T) { addr := common.BytesToAddress([]byte("so")) state.SetBalance(addr, big.NewInt(1)) - root, _ := state.Commit(false) + root, _ := state.Commit(0, false) state, _ = New(root, state.db, state.snaps) // Simulate self-destructing in one transaction, then create-reverting in another - state.Suicide(addr) + state.SelfDestruct(addr) state.Finalise(true) id := state.Snapshot() @@ -697,7 +782,7 @@ func TestDeleteCreateRevert(t *testing.T) { state.RevertToSnapshot(id) // Commit the entire state and make sure we don't crash and have the correct state - root, _ = state.Commit(true) + root, _ = state.Commit(0, true) state, _ = New(root, state.db, state.snaps) if state.getStateObject(addr) != nil { @@ -709,9 +794,28 @@ func TestDeleteCreateRevert(t *testing.T) { // the Commit operation fails with an error // If we are missing trie nodes, we should not continue writing to the trie func TestMissingTrieNodes(t *testing.T) { + testMissingTrieNodes(t, rawdb.HashScheme) + testMissingTrieNodes(t, rawdb.PathScheme) +} + +func testMissingTrieNodes(t *testing.T, scheme string) { // Create an initial state with a few accounts - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) + var ( + triedb *trie.Database + memDb = rawdb.NewMemoryDatabase() + ) + if scheme == rawdb.PathScheme { + triedb = trie.NewDatabase(memDb, &trie.Config{PathDB: &pathdb.Config{ + CleanCacheSize: 0, + DirtyCacheSize: 0, + }}) // disable caching + } else { + triedb = trie.NewDatabase(memDb, &trie.Config{HashDB: &hashdb.Config{ + CleanCacheSize: 0, + }}) // disable caching + } + db := NewDatabaseWithNodeDB(memDb, triedb) + var root common.Hash state, _ := New(types.EmptyRootHash, db, nil) addr := common.BytesToAddress([]byte("so")) @@ -721,10 +825,10 @@ func TestMissingTrieNodes(t *testing.T) { a2 := common.BytesToAddress([]byte("another")) state.SetBalance(a2, big.NewInt(100)) state.SetCode(a2, []byte{1, 2, 4}) - root, _ = state.Commit(false) + root, _ = state.Commit(0, false) t.Logf("root: %x", root) // force-flush - state.Database().TrieDB().Cap(0) + triedb.Commit(root, false) } // Create a new state on the old root state, _ = New(root, db, nil) @@ -745,7 +849,7 @@ func TestMissingTrieNodes(t *testing.T) { } // Modify the state state.SetBalance(addr, big.NewInt(2)) - root, err := state.Commit(false) + root, err := state.Commit(0, false) if err == nil { t.Fatalf("expected error, got root :%x", root) } @@ -931,7 +1035,8 @@ func TestFlushOrderDataLoss(t *testing.T) { // Create a state trie with many accounts and slots var ( memdb = rawdb.NewMemoryDatabase() - statedb = NewDatabase(memdb) + triedb = trie.NewDatabase(memdb, nil) + statedb = NewDatabaseWithNodeDB(memdb, triedb) state, _ = New(types.EmptyRootHash, statedb, nil) ) for a := byte(0); a < 10; a++ { @@ -940,15 +1045,15 @@ func TestFlushOrderDataLoss(t *testing.T) { state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s}) } } - root, err := state.Commit(false) + root, err := state.Commit(0, false) if err != nil { t.Fatalf("failed to commit state trie: %v", err) } - statedb.TrieDB().Reference(root, common.Hash{}) - if err := statedb.TrieDB().Cap(1024); err != nil { + triedb.Reference(root, common.Hash{}) + if err := triedb.Cap(1024); err != nil { t.Fatalf("failed to cap trie dirty cache: %v", err) } - if err := statedb.TrieDB().Commit(root, false); err != nil { + if err := triedb.Commit(root, false); err != nil { t.Fatalf("failed to commit state trie: %v", err) } // Reopen the state trie from flushed disk and verify it @@ -998,3 +1103,91 @@ func TestStateDBTransientStorage(t *testing.T) { t.Fatalf("transient storage mismatch: have %x, want %x", got, value) } } + +func TestResetObject(t *testing.T) { + var ( + disk = rawdb.NewMemoryDatabase() + tdb = trie.NewDatabase(disk, nil) + db = NewDatabaseWithNodeDB(disk, tdb) + snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash) + state, _ = New(types.EmptyRootHash, db, snaps) + addr = common.HexToAddress("0x1") + slotA = common.HexToHash("0x1") + slotB = common.HexToHash("0x2") + ) + // Initialize account with balance and storage in first transaction. + state.SetBalance(addr, big.NewInt(1)) + state.SetState(addr, slotA, common.BytesToHash([]byte{0x1})) + state.IntermediateRoot(true) + + // Reset account and mutate balance and storages + state.CreateAccount(addr) + state.SetBalance(addr, big.NewInt(2)) + state.SetState(addr, slotB, common.BytesToHash([]byte{0x2})) + root, _ := state.Commit(0, true) + + // Ensure the original account is wiped properly + snap := snaps.Snapshot(root) + slot, _ := snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotA.Bytes())) + if len(slot) != 0 { + t.Fatalf("Unexpected storage slot") + } + slot, _ = snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotB.Bytes())) + if !bytes.Equal(slot, []byte{0x2}) { + t.Fatalf("Unexpected storage slot value %v", slot) + } +} + +func TestDeleteStorage(t *testing.T) { + var ( + disk = rawdb.NewMemoryDatabase() + tdb = trie.NewDatabase(disk, nil) + db = NewDatabaseWithNodeDB(disk, tdb) + snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash) + state, _ = New(types.EmptyRootHash, db, snaps) + addr = common.HexToAddress("0x1") + ) + // Initialize account and populate storage + state.SetBalance(addr, big.NewInt(1)) + state.CreateAccount(addr) + for i := 0; i < 1000; i++ { + slot := common.Hash(uint256.NewInt(uint64(i)).Bytes32()) + value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32()) + state.SetState(addr, slot, value) + } + root, _ := state.Commit(0, true) + // Init phase done, create two states, one with snap and one without + fastState, _ := New(root, db, snaps) + slowState, _ := New(root, db, nil) + + obj := fastState.GetOrNewStateObject(addr) + storageRoot := obj.data.Root + + _, _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) + if err != nil { + t.Fatal(err) + } + + _, _, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) + if err != nil { + t.Fatal(err) + } + check := func(set *trienode.NodeSet) string { + var a []string + set.ForEachWithOrder(func(path string, n *trienode.Node) { + if n.Hash != (common.Hash{}) { + t.Fatal("delete should have empty hashes") + } + if len(n.Blob) != 0 { + t.Fatal("delete should have have empty blobs") + } + a = append(a, fmt.Sprintf("%x", path)) + }) + return strings.Join(a, ",") + } + slowRes := check(slowNodes) + fastRes := check(fastNodes) + if slowRes != fastRes { + t.Fatalf("difference found:\nfast: %v\nslow: %v\n", fastRes, slowRes) + } +} diff --git a/core/state/sync.go b/core/state/sync.go index 61097c646..d6775e889 100644 --- a/core/state/sync.go +++ b/core/state/sync.go @@ -17,8 +17,6 @@ package state import ( - "bytes" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" @@ -45,7 +43,7 @@ func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(k } } var obj types.StateAccount - if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil { + if err := rlp.DecodeBytes(leaf, &obj); err != nil { return err } syncer.AddSubTrie(obj.Root, path, parent, parentPath, onSlot) diff --git a/core/state/sync_test.go b/core/state/sync_test.go index 6e9d9342e..6196e7781 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -28,6 +28,8 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) // testAccount is the data associated with an account used by the state tests. @@ -39,10 +41,17 @@ type testAccount struct { } // makeTestState create a sample test state to test node-wise reconstruction. -func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { +func makeTestState(scheme string) (ethdb.Database, Database, *trie.Database, common.Hash, []*testAccount) { // Create an empty state + config := &trie.Config{Preimages: true} + if scheme == rawdb.PathScheme { + config.PathDB = pathdb.Defaults + } else { + config.HashDB = hashdb.Defaults + } db := rawdb.NewMemoryDatabase() - sdb := NewDatabase(db) + nodeDb := trie.NewDatabase(db, config) + sdb := NewDatabaseWithNodeDB(db, nodeDb) state, _ := New(types.EmptyRootHash, sdb, nil) // Fill it with some arbitrary data @@ -64,27 +73,30 @@ func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { if i%5 == 0 { for j := byte(0); j < 5; j++ { hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j}) - obj.SetState(sdb, hash, hash) + obj.SetState(hash, hash) } } - state.updateStateObject(obj) accounts = append(accounts, acc) } - root, _ := state.Commit(false) + root, _ := state.Commit(0, false) // Return the generated state - return db, sdb, root, accounts + return db, sdb, nodeDb, root, accounts } // checkStateAccounts cross references a reconstructed state with an expected // account array. -func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) { +func checkStateAccounts(t *testing.T, db ethdb.Database, scheme string, root common.Hash, accounts []*testAccount) { + var config trie.Config + if scheme == rawdb.PathScheme { + config.PathDB = pathdb.Defaults + } // Check root availability and state contents - state, err := New(root, NewDatabase(db), nil) + state, err := New(root, NewDatabaseWithConfig(db, &config), nil) if err != nil { t.Fatalf("failed to create state trie at %x: %v", root, err) } - if err := checkStateConsistency(db, root); err != nil { + if err := checkStateConsistency(db, scheme, root); err != nil { t.Fatalf("inconsistent state trie at %x: %v", root, err) } for i, acc := range accounts { @@ -100,28 +112,13 @@ func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accou } } -// checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present. -func checkTrieConsistency(db ethdb.Database, root common.Hash) error { - if v, _ := db.Get(root[:]); v == nil { - return nil // Consider a non existent state consistent. - } - trie, err := trie.New(trie.StateTrieID(root), trie.NewDatabase(db)) - if err != nil { - return err - } - it := trie.NodeIterator(nil) - for it.Next(true) { - } - return it.Error() -} - // checkStateConsistency checks that all data of a state root is present. -func checkStateConsistency(db ethdb.Database, root common.Hash) error { - // Create and iterate a state trie rooted in a sub-node - if _, err := db.Get(root.Bytes()); err != nil { - return nil // Consider a non existent state consistent. +func checkStateConsistency(db ethdb.Database, scheme string, root common.Hash) error { + config := &trie.Config{Preimages: true} + if scheme == rawdb.PathScheme { + config.PathDB = pathdb.Defaults } - state, err := New(root, NewDatabase(db), nil) + state, err := New(root, NewDatabaseWithConfig(db, config), nil) if err != nil { return err } @@ -133,8 +130,14 @@ func checkStateConsistency(db ethdb.Database, root common.Hash) error { // Tests that an empty state is not scheduled for syncing. func TestEmptyStateSync(t *testing.T) { - db := trie.NewDatabase(rawdb.NewMemoryDatabase()) - sync := NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), nil, db.Scheme()) + dbA := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil) + dbB := trie.NewDatabase(rawdb.NewMemoryDatabase(), &trie.Config{PathDB: pathdb.Defaults}) + + sync := NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), nil, dbA.Scheme()) + if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { + t.Errorf("content requested for empty state: %v, %v, %v", nodes, paths, codes) + } + sync = NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), nil, dbB.Scheme()) if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { t.Errorf("content requested for empty state: %v, %v, %v", nodes, paths, codes) } @@ -143,22 +146,28 @@ func TestEmptyStateSync(t *testing.T) { // Tests that given a root hash, a state can sync iteratively on a single thread, // requesting retrieval tasks and returning all of them in one go. func TestIterativeStateSyncIndividual(t *testing.T) { - testIterativeStateSync(t, 1, false, false) + testIterativeStateSync(t, 1, false, false, rawdb.HashScheme) + testIterativeStateSync(t, 1, false, false, rawdb.PathScheme) } func TestIterativeStateSyncBatched(t *testing.T) { - testIterativeStateSync(t, 100, false, false) + testIterativeStateSync(t, 100, false, false, rawdb.HashScheme) + testIterativeStateSync(t, 100, false, false, rawdb.PathScheme) } func TestIterativeStateSyncIndividualFromDisk(t *testing.T) { - testIterativeStateSync(t, 1, true, false) + testIterativeStateSync(t, 1, true, false, rawdb.HashScheme) + testIterativeStateSync(t, 1, true, false, rawdb.PathScheme) } func TestIterativeStateSyncBatchedFromDisk(t *testing.T) { - testIterativeStateSync(t, 100, true, false) + testIterativeStateSync(t, 100, true, false, rawdb.HashScheme) + testIterativeStateSync(t, 100, true, false, rawdb.PathScheme) } func TestIterativeStateSyncIndividualByPath(t *testing.T) { - testIterativeStateSync(t, 1, false, true) + testIterativeStateSync(t, 1, false, true, rawdb.HashScheme) + testIterativeStateSync(t, 1, false, true, rawdb.PathScheme) } func TestIterativeStateSyncBatchedByPath(t *testing.T) { - testIterativeStateSync(t, 100, false, true) + testIterativeStateSync(t, 100, false, true, rawdb.HashScheme) + testIterativeStateSync(t, 100, false, true, rawdb.PathScheme) } // stateElement represents the element in the state trie(bytecode or trie node). @@ -169,17 +178,17 @@ type stateElement struct { syncPath trie.SyncPath } -func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { +func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool, scheme string) { // Create a random state to copy - _, srcDb, srcRoot, srcAccounts := makeTestState() + srcDisk, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme) if commit { - srcDb.TrieDB().Commit(srcRoot, false) + ndb.Commit(srcRoot, false) } - srcTrie, _ := trie.New(trie.StateTrieID(srcRoot), srcDb.TrieDB()) + srcTrie, _ := trie.New(trie.StateTrieID(srcRoot), ndb) // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) + sched := NewStateSync(srcRoot, dstDb, nil, ndb.Scheme()) var ( nodeElements []stateElement @@ -194,9 +203,11 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { }) } for i := 0; i < len(codes); i++ { - codeElements = append(codeElements, stateElement{ - code: codes[i], - }) + codeElements = append(codeElements, stateElement{code: codes[i]}) + } + reader, err := ndb.Reader(srcRoot) + if err != nil { + t.Fatalf("state is not existent, %#x", srcRoot) } for len(nodeElements)+len(codeElements) > 0 { var ( @@ -204,7 +215,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { codeResults = make([]trie.CodeSyncResult, len(codeElements)) ) for i, element := range codeElements { - data, err := srcDb.ContractCode(common.Hash{}, element.code) + data, err := srcDb.ContractCode(common.Address{}, element.code) if err != nil { t.Fatalf("failed to retrieve contract bytecode for hash %x", element.code) } @@ -224,7 +235,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { t.Fatalf("failed to decode account on path %x: %v", node.syncPath[0], err) } id := trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root) - stTrie, err := trie.New(id, srcDb.TrieDB()) + stTrie, err := trie.New(id, ndb) if err != nil { t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err) } @@ -235,7 +246,8 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { nodeResults[i] = trie.NodeSyncResult{Path: node.path, Data: data} } } else { - data, err := srcDb.TrieDB().Node(node.hash) + owner, inner := trie.ResolvePath([]byte(node.path)) + data, err := reader.Node(owner, inner, node.hash) if err != nil { t.Fatalf("failed to retrieve node data for key %v", []byte(node.path)) } @@ -274,19 +286,28 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { }) } } + // Copy the preimages from source db in order to traverse the state. + srcDb.TrieDB().WritePreimages() + copyPreimages(srcDisk, dstDb) + // Cross check that the two states are in sync - checkStateAccounts(t, dstDb, srcRoot, srcAccounts) + checkStateAccounts(t, dstDb, ndb.Scheme(), srcRoot, srcAccounts) } // Tests that the trie scheduler can correctly reconstruct the state even if only // partial results are returned, and the others sent only later. func TestIterativeDelayedStateSync(t *testing.T) { + testIterativeDelayedStateSync(t, rawdb.HashScheme) + testIterativeDelayedStateSync(t, rawdb.PathScheme) +} + +func testIterativeDelayedStateSync(t *testing.T, scheme string) { // Create a random state to copy - _, srcDb, srcRoot, srcAccounts := makeTestState() + srcDisk, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme) // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) + sched := NewStateSync(srcRoot, dstDb, nil, ndb.Scheme()) var ( nodeElements []stateElement @@ -301,9 +322,11 @@ func TestIterativeDelayedStateSync(t *testing.T) { }) } for i := 0; i < len(codes); i++ { - codeElements = append(codeElements, stateElement{ - code: codes[i], - }) + codeElements = append(codeElements, stateElement{code: codes[i]}) + } + reader, err := ndb.Reader(srcRoot) + if err != nil { + t.Fatalf("state is not existent, %#x", srcRoot) } for len(nodeElements)+len(codeElements) > 0 { // Sync only half of the scheduled nodes @@ -312,7 +335,7 @@ func TestIterativeDelayedStateSync(t *testing.T) { if len(codeElements) > 0 { codeResults := make([]trie.CodeSyncResult, len(codeElements)/2+1) for i, element := range codeElements[:len(codeResults)] { - data, err := srcDb.ContractCode(common.Hash{}, element.code) + data, err := srcDb.ContractCode(common.Address{}, element.code) if err != nil { t.Fatalf("failed to retrieve contract bytecode for %x", element.code) } @@ -328,7 +351,8 @@ func TestIterativeDelayedStateSync(t *testing.T) { if len(nodeElements) > 0 { nodeResults := make([]trie.NodeSyncResult, len(nodeElements)/2+1) for i, element := range nodeElements[:len(nodeResults)] { - data, err := srcDb.TrieDB().Node(element.hash) + owner, inner := trie.ResolvePath([]byte(element.path)) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve contract bytecode for %x", element.code) } @@ -363,23 +387,33 @@ func TestIterativeDelayedStateSync(t *testing.T) { }) } } + // Copy the preimages from source db in order to traverse the state. + srcDb.TrieDB().WritePreimages() + copyPreimages(srcDisk, dstDb) + // Cross check that the two states are in sync - checkStateAccounts(t, dstDb, srcRoot, srcAccounts) + checkStateAccounts(t, dstDb, ndb.Scheme(), srcRoot, srcAccounts) } // Tests that given a root hash, a trie can sync iteratively on a single thread, // requesting retrieval tasks and returning all of them in one go, however in a // random order. -func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) } -func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomStateSync(t, 100) } +func TestIterativeRandomStateSyncIndividual(t *testing.T) { + testIterativeRandomStateSync(t, 1, rawdb.HashScheme) + testIterativeRandomStateSync(t, 1, rawdb.PathScheme) +} +func TestIterativeRandomStateSyncBatched(t *testing.T) { + testIterativeRandomStateSync(t, 100, rawdb.HashScheme) + testIterativeRandomStateSync(t, 100, rawdb.PathScheme) +} -func testIterativeRandomStateSync(t *testing.T, count int) { +func testIterativeRandomStateSync(t *testing.T, count int, scheme string) { // Create a random state to copy - _, srcDb, srcRoot, srcAccounts := makeTestState() + srcDisk, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme) // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) + sched := NewStateSync(srcRoot, dstDb, nil, ndb.Scheme()) nodeQueue := make(map[string]stateElement) codeQueue := make(map[common.Hash]struct{}) @@ -394,12 +428,16 @@ func testIterativeRandomStateSync(t *testing.T, count int) { for _, hash := range codes { codeQueue[hash] = struct{}{} } + reader, err := ndb.Reader(srcRoot) + if err != nil { + t.Fatalf("state is not existent, %#x", srcRoot) + } for len(nodeQueue)+len(codeQueue) > 0 { // Fetch all the queued nodes in a random order if len(codeQueue) > 0 { results := make([]trie.CodeSyncResult, 0, len(codeQueue)) for hash := range codeQueue { - data, err := srcDb.ContractCode(common.Hash{}, hash) + data, err := srcDb.ContractCode(common.Address{}, hash) if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } @@ -414,7 +452,8 @@ func testIterativeRandomStateSync(t *testing.T, count int) { if len(nodeQueue) > 0 { results := make([]trie.NodeSyncResult, 0, len(nodeQueue)) for path, element := range nodeQueue { - data, err := srcDb.TrieDB().Node(element.hash) + owner, inner := trie.ResolvePath([]byte(element.path)) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for %x %v %v", element.hash, []byte(element.path), element.path) } @@ -426,7 +465,6 @@ func testIterativeRandomStateSync(t *testing.T, count int) { } } } - // Feed the retrieved results back and queue new tasks batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { t.Fatalf("failed to commit data: %v", err) @@ -447,19 +485,28 @@ func testIterativeRandomStateSync(t *testing.T, count int) { codeQueue[hash] = struct{}{} } } + // Copy the preimages from source db in order to traverse the state. + srcDb.TrieDB().WritePreimages() + copyPreimages(srcDisk, dstDb) + // Cross check that the two states are in sync - checkStateAccounts(t, dstDb, srcRoot, srcAccounts) + checkStateAccounts(t, dstDb, ndb.Scheme(), srcRoot, srcAccounts) } // Tests that the trie scheduler can correctly reconstruct the state even if only // partial results are returned (Even those randomly), others sent only later. func TestIterativeRandomDelayedStateSync(t *testing.T) { + testIterativeRandomDelayedStateSync(t, rawdb.HashScheme) + testIterativeRandomDelayedStateSync(t, rawdb.PathScheme) +} + +func testIterativeRandomDelayedStateSync(t *testing.T, scheme string) { // Create a random state to copy - _, srcDb, srcRoot, srcAccounts := makeTestState() + srcDisk, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme) // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) + sched := NewStateSync(srcRoot, dstDb, nil, ndb.Scheme()) nodeQueue := make(map[string]stateElement) codeQueue := make(map[common.Hash]struct{}) @@ -474,6 +521,10 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { for _, hash := range codes { codeQueue[hash] = struct{}{} } + reader, err := ndb.Reader(srcRoot) + if err != nil { + t.Fatalf("state is not existent, %#x", srcRoot) + } for len(nodeQueue)+len(codeQueue) > 0 { // Sync only half of the scheduled nodes, even those in random order if len(codeQueue) > 0 { @@ -481,7 +532,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { for hash := range codeQueue { delete(codeQueue, hash) - data, err := srcDb.ContractCode(common.Hash{}, hash) + data, err := srcDb.ContractCode(common.Address{}, hash) if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } @@ -502,7 +553,8 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { for path, element := range nodeQueue { delete(nodeQueue, path) - data, err := srcDb.TrieDB().Node(element.hash) + owner, inner := trie.ResolvePath([]byte(element.path)) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for %x", element.hash) } @@ -537,15 +589,24 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { codeQueue[hash] = struct{}{} } } + // Copy the preimages from source db in order to traverse the state. + srcDb.TrieDB().WritePreimages() + copyPreimages(srcDisk, dstDb) + // Cross check that the two states are in sync - checkStateAccounts(t, dstDb, srcRoot, srcAccounts) + checkStateAccounts(t, dstDb, ndb.Scheme(), srcRoot, srcAccounts) } // Tests that at any point in time during a sync, only complete sub-tries are in // the database. func TestIncompleteStateSync(t *testing.T) { + testIncompleteStateSync(t, rawdb.HashScheme) + testIncompleteStateSync(t, rawdb.PathScheme) +} + +func testIncompleteStateSync(t *testing.T, scheme string) { // Create a random state to copy - db, srcDb, srcRoot, srcAccounts := makeTestState() + db, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme) // isCodeLookup to save some hashing var isCode = make(map[common.Hash]struct{}) @@ -555,17 +616,20 @@ func TestIncompleteStateSync(t *testing.T) { } } isCode[types.EmptyCodeHash] = struct{}{} - checkTrieConsistency(db, srcRoot) // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) + sched := NewStateSync(srcRoot, dstDb, nil, ndb.Scheme()) var ( addedCodes []common.Hash addedPaths []string addedHashes []common.Hash ) + reader, err := ndb.Reader(srcRoot) + if err != nil { + t.Fatalf("state is not available %x", srcRoot) + } nodeQueue := make(map[string]stateElement) codeQueue := make(map[common.Hash]struct{}) paths, nodes, codes := sched.Missing(1) @@ -584,7 +648,7 @@ func TestIncompleteStateSync(t *testing.T) { if len(codeQueue) > 0 { results := make([]trie.CodeSyncResult, 0, len(codeQueue)) for hash := range codeQueue { - data, err := srcDb.ContractCode(common.Hash{}, hash) + data, err := srcDb.ContractCode(common.Address{}, hash) if err != nil { t.Fatalf("failed to retrieve node data for %x", hash) } @@ -598,12 +662,11 @@ func TestIncompleteStateSync(t *testing.T) { } } } - var nodehashes []common.Hash if len(nodeQueue) > 0 { results := make([]trie.NodeSyncResult, 0, len(nodeQueue)) for path, element := range nodeQueue { owner, inner := trie.ResolvePath([]byte(element.path)) - data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for %x", element.hash) } @@ -613,7 +676,6 @@ func TestIncompleteStateSync(t *testing.T) { addedPaths = append(addedPaths, element.path) addedHashes = append(addedHashes, element.hash) } - nodehashes = append(nodehashes, element.hash) } // Process each of the state nodes for _, result := range results { @@ -628,13 +690,6 @@ func TestIncompleteStateSync(t *testing.T) { } batch.Write() - for _, root := range nodehashes { - // Can't use checkStateConsistency here because subtrie keys may have odd - // length and crash in LeafKey. - if err := checkTrieConsistency(dstDb, root); err != nil { - t.Fatalf("state inconsistent: %v", err) - } - } // Fetch the next batch to retrieve nodeQueue = make(map[string]stateElement) codeQueue = make(map[common.Hash]struct{}) @@ -650,16 +705,19 @@ func TestIncompleteStateSync(t *testing.T) { codeQueue[hash] = struct{}{} } } + // Copy the preimages from source db in order to traverse the state. + srcDb.TrieDB().WritePreimages() + copyPreimages(db, dstDb) + // Sanity check that removing any node from the database is detected for _, node := range addedCodes { val := rawdb.ReadCode(dstDb, node) rawdb.DeleteCode(dstDb, node) - if err := checkStateConsistency(dstDb, srcRoot); err == nil { + if err := checkStateConsistency(dstDb, ndb.Scheme(), srcRoot); err == nil { t.Errorf("trie inconsistency not caught, missing: %x", node) } rawdb.WriteCode(dstDb, node, val) } - scheme := srcDb.TrieDB().Scheme() for i, path := range addedPaths { owner, inner := trie.ResolvePath([]byte(path)) hash := addedHashes[i] @@ -668,9 +726,21 @@ func TestIncompleteStateSync(t *testing.T) { t.Error("missing trie node") } rawdb.DeleteTrieNode(dstDb, owner, inner, hash, scheme) - if err := checkStateConsistency(dstDb, srcRoot); err == nil { + if err := checkStateConsistency(dstDb, scheme, srcRoot); err == nil { t.Errorf("trie inconsistency not caught, missing: %v", path) } rawdb.WriteTrieNode(dstDb, owner, inner, hash, val, scheme) } } + +func copyPreimages(srcDb, dstDb ethdb.Database) { + it := srcDb.NewIterator(rawdb.PreimagePrefix, nil) + defer it.Release() + + preimages := make(map[common.Hash][]byte) + for it.Next() { + hash := it.Key()[len(rawdb.PreimagePrefix):] + preimages[common.BytesToHash(hash)] = common.CopyBytes(it.Value()) + } + rawdb.WritePreimages(dstDb, preimages) +} diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 844f72fc1..4e8fd1e10 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -302,7 +302,7 @@ func (sf *subfetcher) loop() { } sf.trie = trie } else { - trie, err := sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root) + trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) return diff --git a/core/state_processor.go b/core/state_processor.go index d2db9382b..97130c700 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -17,12 +17,14 @@ package core import ( + "errors" "fmt" "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -82,6 +84,9 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vmenv = vm.NewEVM(context, vm.TxContext{}, statedb, p.config, cfg) signer = types.MakeSigner(p.config, header.Number, header.Time) ) + if beaconRoot := block.BeaconRoot(); beaconRoot != nil { + ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb) + } // begin PluGeth code injection pluginPreProcessBlock(block) blockTracer.PreProcessBlock(block) @@ -119,7 +124,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg // Fail if Shanghai not enabled and len(withdrawals) is non-zero. withdrawals := block.Withdrawals() if len(withdrawals) > 0 && !p.config.IsShanghai(block.Number(), block.Time()) { - return nil, nil, 0, fmt.Errorf("withdrawals before shanghai") + return nil, nil, 0, errors.New("withdrawals before shanghai") } // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), withdrawals) @@ -162,6 +167,11 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta receipt.TxHash = tx.Hash() receipt.GasUsed = result.UsedGas + if tx.Type() == types.BlobTxType { + receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob) + receipt.BlobGasPrice = eip4844.CalcBlobFee(*evm.Context.ExcessBlobGas) + } + // If the transaction created a contract, store the creation address in the receipt. if msg.To == nil { receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) @@ -187,6 +197,26 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo } // Create a new context to be used in the EVM environment blockContext := NewEVMBlockContext(header, bc, author) - vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) + vmenv := vm.NewEVM(blockContext, vm.TxContext{BlobHashes: tx.BlobHashes()}, statedb, config, cfg) return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) } + +// ProcessBeaconBlockRoot applies the EIP-4788 system call to the beacon block root +// contract. This method is exported to be used in tests. +func ProcessBeaconBlockRoot(beaconRoot common.Hash, vmenv *vm.EVM, statedb *state.StateDB) { + // If EIP-4788 is enabled, we need to invoke the beaconroot storage contract with + // the new root + msg := &Message{ + From: params.SystemAddress, + GasLimit: 30_000_000, + GasPrice: common.Big0, + GasFeeCap: common.Big0, + GasTipCap: common.Big0, + To: ¶ms.BeaconRootsStorageAddress, + Data: beaconRoot[:], + } + vmenv.Reset(NewEVMTxContext(msg), statedb) + statedb.AddAddressToAccessList(params.BeaconRootsStorageAddress) + _, _, _ = vmenv.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.Big0) + statedb.Finalise(true) +} diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 5dbeed97a..aade2f6d7 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -26,13 +26,15 @@ import ( "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" + "github.com/holiman/uint256" "golang.org/x/crypto/sha3" ) @@ -45,19 +47,23 @@ func u64(val uint64) *uint64 { return &val } func TestStateProcessorErrors(t *testing.T) { var ( config = ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), - Ethash: new(params.EthashConfig), + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + TerminalTotalDifficulty: big.NewInt(0), + TerminalTotalDifficultyPassed: true, + ShanghaiTime: new(uint64), + CancunTime: new(uint64), } signer = types.LatestSigner(config) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") @@ -89,6 +95,22 @@ func TestStateProcessorErrors(t *testing.T) { }), signer, key1) return tx } + var mkBlobTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, hashes []common.Hash) *types.Transaction { + tx, err := types.SignTx(types.NewTx(&types.BlobTx{ + Nonce: nonce, + GasTipCap: uint256.MustFromBig(gasTipCap), + GasFeeCap: uint256.MustFromBig(gasFeeCap), + Gas: gasLimit, + To: to, + BlobHashes: hashes, + Value: new(uint256.Int), + }), signer, key1) + if err != nil { + t.Fatal(err) + } + return tx + } + { // Tests against a 'recent' chain definition var ( db = rawdb.NewMemoryDatabase() @@ -105,8 +127,10 @@ func TestStateProcessorErrors(t *testing.T) { }, }, } - blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) + tooBigInitCode = [params.MaxInitCodeSize + 1]byte{} ) + defer blockchain.Stop() bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) tooBigNumber := new(big.Int).Set(bigNumber) @@ -209,8 +233,26 @@ func TestStateProcessorErrors(t *testing.T) { }, want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 1000000000000000000 want 2431633873983640103894990685182446064918669677978451844828609264166175722438635000", }, + { // ErrMaxInitCodeSizeExceeded + txs: []*types.Transaction{ + mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.InitialBaseFee), tooBigInitCode[:]), + }, + want: "could not apply tx 0 [0xd491405f06c92d118dd3208376fcee18a57c54bc52063ee4a26b1cf296857c25]: max initcode size exceeded: code size 49153 limit 49152", + }, + { // ErrIntrinsicGas: Not enough gas to cover init code + txs: []*types.Transaction{ + mkDynamicCreationTx(0, 54299, common.Big0, big.NewInt(params.InitialBaseFee), make([]byte, 320)), + }, + want: "could not apply tx 0 [0xfd49536a9b323769d8472fcb3ebb3689b707a349379baee3e2ee3fe7baae06a1]: intrinsic gas too low: have 54299, want 54300", + }, + { // ErrBlobFeeCapTooLow + txs: []*types.Transaction{ + mkBlobTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(1), []common.Hash{(common.Hash{1})}), + }, + want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1 baseFee: 875000000", + }, } { - block := GenerateBadBlock(gspec.ToBlock(), ethash.NewFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -284,7 +326,7 @@ func TestStateProcessorErrors(t *testing.T) { }, }, } - blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) ) defer blockchain.Stop() for i, tt := range []struct { @@ -298,73 +340,7 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: sender not an eoa: address 0x71562b71999873DB5b286dF957af199Ec94617F7, codehash: 0x9280914443471259d4570a8661015ae4a5b80186dbc619658fb494bebc3da3d1", }, } { - block := GenerateBadBlock(gspec.ToBlock(), ethash.NewFaker(), tt.txs, gspec.Config) - _, err := blockchain.InsertChain(types.Blocks{block}) - if err == nil { - t.Fatal("block imported without errors") - } - if have, want := err.Error(), tt.want; have != want { - t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want) - } - } - } - - // ErrMaxInitCodeSizeExceeded, for this we need extra Shanghai (EIP-3860) enabled. - { - var ( - db = rawdb.NewMemoryDatabase() - gspec = &Genesis{ - Config: ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), - ArrowGlacierBlock: big.NewInt(0), - GrayGlacierBlock: big.NewInt(0), - MergeNetsplitBlock: big.NewInt(0), - TerminalTotalDifficulty: big.NewInt(0), - TerminalTotalDifficultyPassed: true, - ShanghaiTime: u64(0), - }, - Alloc: GenesisAlloc{ - common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 0, - }, - }, - } - genesis = gspec.MustCommit(db) - blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) - tooBigInitCode = [params.MaxInitCodeSize + 1]byte{} - smallInitCode = [320]byte{} - ) - defer blockchain.Stop() - for i, tt := range []struct { - txs []*types.Transaction - want string - }{ - { // ErrMaxInitCodeSizeExceeded - txs: []*types.Transaction{ - mkDynamicCreationTx(0, 500000, common.Big0, misc.CalcBaseFee(config, genesis.Header()), tooBigInitCode[:]), - }, - want: "could not apply tx 0 [0x832b54a6c3359474a9f504b1003b2cc1b6fcaa18e4ef369eb45b5d40dad6378f]: max initcode size exceeded: code size 49153 limit 49152", - }, - { // ErrIntrinsicGas: Not enough gas to cover init code - txs: []*types.Transaction{ - mkDynamicCreationTx(0, 54299, common.Big0, misc.CalcBaseFee(config, genesis.Header()), smallInitCode[:]), - }, - want: "could not apply tx 0 [0x39b7436cb432d3662a25626474282c5c4c1a213326fd87e4e18a91477bae98b2]: intrinsic gas too low: have 54299, want 54300", - }, - } { - block := GenerateBadBlock(genesis, beacon.New(ethash.NewFaker()), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -401,7 +377,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr UncleHash: types.EmptyUncleHash, } if config.IsLondon(header.Number) { - header.BaseFee = misc.CalcBaseFee(config, parent.Header()) + header.BaseFee = eip1559.CalcBaseFee(config, parent.Header()) } if config.IsShanghai(header.Number, header.Time) { header.WithdrawalsHash = &types.EmptyWithdrawalsHash @@ -412,6 +388,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr hasher := sha3.NewLegacyKeccak256() hasher.Write(header.Number.Bytes()) var cumulativeGas uint64 + var nBlobs int for _, tx := range txs { txh := tx.Hash() hasher.Write(txh[:]) @@ -420,8 +397,23 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr receipt.GasUsed = tx.Gas() receipts = append(receipts, receipt) cumulativeGas += tx.Gas() + nBlobs += len(tx.BlobHashes()) } header.Root = common.BytesToHash(hasher.Sum(nil)) + if config.IsCancun(header.Number, header.Time) { + var pExcess, pUsed = uint64(0), uint64(0) + if parent.ExcessBlobGas() != nil { + pExcess = *parent.ExcessBlobGas() + pUsed = *parent.BlobGasUsed() + } + excess := eip4844.CalcExcessBlobGas(pExcess, pUsed) + used := uint64(nBlobs * params.BlobTxBlobGasPerBlob) + header.ExcessBlobGas = &excess + header.BlobGasUsed = &used + + beaconRoot := common.HexToHash("0xbeac00") + header.ParentBeaconRoot = &beaconRoot + } // Assemble and return the final block for sealing if config.IsShanghai(header.Number, header.Time) { return types.NewBlockWithWithdrawals(header, txs, nil, receipts, []*types.Withdrawal{}, trie.NewStackTrie(nil)) diff --git a/core/state_transition.go b/core/state_transition.go index df30ee223..cb9287a82 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -17,12 +17,14 @@ package core import ( + "errors" "fmt" "math" "math/big" "github.com/ethereum/go-ethereum/common" cmath "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" @@ -125,16 +127,18 @@ func toWordSize(size uint64) uint64 { // A Message contains the data derived from a single transaction that is relevant to state // processing. type Message struct { - To *common.Address - From common.Address - Nonce uint64 - Value *big.Int - GasLimit uint64 - GasPrice *big.Int - GasFeeCap *big.Int - GasTipCap *big.Int - Data []byte - AccessList types.AccessList + To *common.Address + From common.Address + Nonce uint64 + Value *big.Int + GasLimit uint64 + GasPrice *big.Int + GasFeeCap *big.Int + GasTipCap *big.Int + Data []byte + AccessList types.AccessList + BlobGasFeeCap *big.Int + BlobHashes []common.Hash // When SkipAccountChecks is true, the message nonce is not checked against the // account nonce in state. It also disables checking that the sender is an EOA. @@ -155,6 +159,8 @@ func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.In Data: tx.Data(), AccessList: tx.AccessList(), SkipAccountChecks: false, + BlobHashes: tx.BlobHashes(), + BlobGasFeeCap: tx.BlobGasFeeCap(), } // If baseFee provided, set gasPrice to effectiveGasPrice. if baseFee != nil { @@ -228,12 +234,24 @@ func (st *StateTransition) to() common.Address { func (st *StateTransition) buyGas() error { mgval := new(big.Int).SetUint64(st.msg.GasLimit) mgval = mgval.Mul(mgval, st.msg.GasPrice) - balanceCheck := mgval + balanceCheck := new(big.Int).Set(mgval) if st.msg.GasFeeCap != nil { - balanceCheck = new(big.Int).SetUint64(st.msg.GasLimit) + balanceCheck.SetUint64(st.msg.GasLimit) balanceCheck = balanceCheck.Mul(balanceCheck, st.msg.GasFeeCap) balanceCheck.Add(balanceCheck, st.msg.Value) } + if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { + if blobGas := st.blobGasUsed(); blobGas > 0 { + // Check that the user has enough funds to cover blobGasUsed * tx.BlobGasFeeCap + blobBalanceCheck := new(big.Int).SetUint64(blobGas) + blobBalanceCheck.Mul(blobBalanceCheck, st.msg.BlobGasFeeCap) + balanceCheck.Add(balanceCheck, blobBalanceCheck) + // Pay for blobGasUsed * actual blob fee + blobFee := new(big.Int).SetUint64(blobGas) + blobFee.Mul(blobFee, eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas)) + mgval.Add(mgval, blobFee) + } + } if have, want := st.state.GetBalance(st.msg.From), balanceCheck; have.Cmp(want) < 0 { return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From.Hex(), have, want) } @@ -295,6 +313,29 @@ func (st *StateTransition) preCheck() error { } } } + // Check the blob version validity + if msg.BlobHashes != nil { + if len(msg.BlobHashes) == 0 { + return errors.New("blob transaction missing blob hashes") + } + for i, hash := range msg.BlobHashes { + if hash[0] != params.BlobTxHashVersion { + return fmt.Errorf("blob %d hash version mismatch (have %d, supported %d)", + i, hash[0], params.BlobTxHashVersion) + } + } + } + + if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { + if st.blobGasUsed() > 0 { + // Check that the user is paying at least the current blob fee + blobFee := eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas) + if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 { + return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee) + } + } + } + return st.buyGas() } @@ -433,3 +474,8 @@ func (st *StateTransition) refundGas(refundQuotient uint64) { func (st *StateTransition) gasUsed() uint64 { return st.initialGas - st.gasRemaining } + +// blobGasUsed returns the amount of blob gas used by the message. +func (st *StateTransition) blobGasUsed() uint64 { + return uint64(len(st.msg.BlobHashes) * params.BlobTxBlobGasPerBlob) +} diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go new file mode 100644 index 000000000..042ff3be2 --- /dev/null +++ b/core/txpool/blobpool/blobpool.go @@ -0,0 +1,1528 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package blobpool implements the EIP-4844 blob transaction pool. +package blobpool + +import ( + "container/heap" + "errors" + "fmt" + "math" + "math/big" + "os" + "path/filepath" + "sort" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/holiman/billy" + "github.com/holiman/uint256" +) + +const ( + // blobSize is the protocol constrained byte size of a single blob in a + // transaction. There can be multiple of these embedded into a single tx. + blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement + + // maxBlobsPerTransaction is the maximum number of blobs a single transaction + // is allowed to contain. Whilst the spec states it's unlimited, the block + // data slots are protocol bound, which implicitly also limit this. + maxBlobsPerTransaction = params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob + + // txAvgSize is an approximate byte size of a transaction metadata to avoid + // tiny overflows causing all txs to move a shelf higher, wasting disk space. + txAvgSize = 4 * 1024 + + // txMaxSize is the maximum size a single transaction can have, outside + // the included blobs. Since blob transactions are pulled instead of pushed, + // and only a small metadata is kept in ram, the rest is on disk, there is + // no critical limit that should be enforced. Still, capping it to some sane + // limit can never hurt. + txMaxSize = 1024 * 1024 + + // maxTxsPerAccount is the maximum number of blob transactions admitted from + // a single account. The limit is enforced to minimize the DoS potential of + // a private tx cancelling publicly propagated blobs. + // + // Note, transactions resurrected by a reorg are also subject to this limit, + // so pushing it down too aggressively might make resurrections non-functional. + maxTxsPerAccount = 16 + + // pendingTransactionStore is the subfolder containing the currently queued + // blob transactions. + pendingTransactionStore = "queue" + + // limboedTransactionStore is the subfolder containing the currently included + // but not yet finalized transaction blobs. + limboedTransactionStore = "limbo" +) + +// blobTxMeta is the minimal subset of types.BlobTx necessary to validate and +// schedule the blob transactions into the following blocks. Only ever add the +// bare minimum needed fields to keep the size down (and thus number of entries +// larger with the same memory consumption). +type blobTxMeta struct { + hash common.Hash // Transaction hash to maintain the lookup table + id uint64 // Storage ID in the pool's persistent store + size uint32 // Byte size in the pool's persistent store + + nonce uint64 // Needed to prioritize inclusion order within an account + costCap *uint256.Int // Needed to validate cumulative balance sufficiency + execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump + execFeeCap *uint256.Int // Needed to validate replacement price bump + blobFeeCap *uint256.Int // Needed to validate replacement price bump + + basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap + blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap + + evictionExecTip *uint256.Int // Worst gas tip across all previous nonces + evictionExecFeeJumps float64 // Worst base fee (converted to fee jumps) across all previous nonces + evictionBlobFeeJumps float64 // Worse blob fee (converted to fee jumps) across all previous nonces +} + +// newBlobTxMeta retrieves the indexed metadata fields from a blob transaction +// and assembles a helper struct to track in memory. +func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta { + meta := &blobTxMeta{ + hash: tx.Hash(), + id: id, + size: size, + nonce: tx.Nonce(), + costCap: uint256.MustFromBig(tx.Cost()), + execTipCap: uint256.MustFromBig(tx.GasTipCap()), + execFeeCap: uint256.MustFromBig(tx.GasFeeCap()), + blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()), + } + meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap) + meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap) + + return meta +} + +// BlobPool is the transaction pool dedicated to EIP-4844 blob transactions. +// +// Blob transactions are special snowflakes that are designed for a very specific +// purpose (rollups) and are expected to adhere to that specific use case. These +// behavioural expectations allow us to design a transaction pool that is more robust +// (i.e. resending issues) and more resilient to DoS attacks (e.g. replace-flush +// attacks) than the generic tx pool. These improvements will also mean, however, +// that we enforce a significantly more aggressive strategy on entering and exiting +// the pool: +// +// - Blob transactions are large. With the initial design aiming for 128KB blobs, +// we must ensure that these only traverse the network the absolute minimum +// number of times. Broadcasting to sqrt(peers) is out of the question, rather +// these should only ever be announced and the remote side should request it if +// it wants to. +// +// - Block blob-space is limited. With blocks being capped to a few blob txs, we +// can make use of the very low expected churn rate within the pool. Notably, +// we should be able to use a persistent disk backend for the pool, solving +// the tx resend issue that plagues the generic tx pool, as long as there's no +// artificial churn (i.e. pool wars). +// +// - Purpose of blobs are layer-2s. Layer-2s are meant to use blob transactions to +// commit to their own current state, which is independent of Ethereum mainnet +// (state, txs). This means that there's no reason for blob tx cancellation or +// replacement, apart from a potential basefee / miner tip adjustment. +// +// - Replacements are expensive. Given their size, propagating a replacement +// blob transaction to an existing one should be aggressively discouraged. +// Whilst generic transactions can start at 1 Wei gas cost and require a 10% +// fee bump to replace, we suggest requiring a higher min cost (e.g. 1 gwei) +// and a more aggressive bump (100%). +// +// - Cancellation is prohibitive. Evicting an already propagated blob tx is a huge +// DoS vector. As such, a) replacement (higher-fee) blob txs mustn't invalidate +// already propagated (future) blob txs (cumulative fee); b) nonce-gapped blob +// txs are disallowed; c) the presence of blob transactions exclude non-blob +// transactions. +// +// - Malicious cancellations are possible. Although the pool might prevent txs +// that cancel blobs, blocks might contain such transaction (malicious miner +// or flashbotter). The pool should cap the total number of blob transactions +// per account as to prevent propagating too much data before cancelling it +// via a normal transaction. It should nonetheless be high enough to support +// resurrecting reorged transactions. Perhaps 4-16. +// +// - Local txs are meaningless. Mining pools historically used local transactions +// for payouts or for backdoor deals. With 1559 in place, the basefee usually +// dominates the final price, so 0 or non-0 tip doesn't change much. Blob txs +// retain the 1559 2D gas pricing (and introduce on top a dynamic blob gas fee), +// so locality is moot. With a disk backed blob pool avoiding the resend issue, +// there's also no need to save own transactions for later. +// +// - No-blob blob-txs are bad. Theoretically there's no strong reason to disallow +// blob txs containing 0 blobs. In practice, admitting such txs into the pool +// breaks the low-churn invariant as blob constraints don't apply anymore. Even +// though we could accept blocks containing such txs, a reorg would require moving +// them back into the blob pool, which can break invariants. +// +// - Dropping blobs needs delay. When normal transactions are included, they +// are immediately evicted from the pool since they are contained in the +// including block. Blobs however are not included in the execution chain, +// so a mini reorg cannot re-pool "lost" blob transactions. To support reorgs, +// blobs are retained on disk until they are finalised. +// +// - Blobs can arrive via flashbots. Blocks might contain blob transactions we +// have never seen on the network. Since we cannot recover them from blocks +// either, the engine_newPayload needs to give them to us, and we cache them +// until finality to support reorgs without tx losses. +// +// Whilst some constraints above might sound overly aggressive, the general idea is +// that the blob pool should work robustly for its intended use case and whilst +// anyone is free to use blob transactions for arbitrary non-rollup use cases, +// they should not be allowed to run amok the network. +// +// Implementation wise there are a few interesting design choices: +// +// - Adding a transaction to the pool blocks until persisted to disk. This is +// viable because TPS is low (2-4 blobs per block initially, maybe 8-16 at +// peak), so natural churn is a couple MB per block. Replacements doing O(n) +// updates are forbidden and transaction propagation is pull based (i.e. no +// pileup of pending data). +// +// - When transactions are chosen for inclusion, the primary criteria is the +// signer tip (and having a basefee/data fee high enough of course). However, +// same-tip transactions will be split by their basefee/datafee, preferring +// those that are closer to the current network limits. The idea being that +// very relaxed ones can be included even if the fees go up, when the closer +// ones could already be invalid. +// +// When the pool eventually reaches saturation, some old transactions - that may +// never execute - will need to be evicted in favor of newer ones. The eviction +// strategy is quite complex: +// +// - Exceeding capacity evicts the highest-nonce of the account with the lowest +// paying blob transaction anywhere in the pooled nonce-sequence, as that tx +// would be executed the furthest in the future and is thus blocking anything +// after it. The smallest is deliberately not evicted to avoid a nonce-gap. +// +// - Analogously, if the pool is full, the consideration price of a new tx for +// evicting an old one is the smallest price in the entire nonce-sequence of +// the account. This avoids malicious users DoSing the pool with seemingly +// high paying transactions hidden behind a low-paying blocked one. +// +// - Since blob transactions have 3 price parameters: execution tip, execution +// fee cap and data fee cap, there's no singular parameter to create a total +// price ordering on. What's more, since the base fee and blob fee can move +// independently of one another, there's no pre-defined way to combine them +// into a stable order either. This leads to a multi-dimensional problem to +// solve after every block. +// +// - The first observation is that comparing 1559 base fees or 4844 blob fees +// needs to happen in the context of their dynamism. Since these fees jump +// up or down in ~1.125 multipliers (at max) across blocks, comparing fees +// in two transactions should be based on log1.125(fee) to eliminate noise. +// +// - The second observation is that the basefee and blobfee move independently, +// so there's no way to split mixed txs on their own (A has higher base fee, +// B has higher blob fee). Rather than look at the absolute fees, the useful +// metric is the max time it can take to exceed the transaction's fee caps. +// Specifically, we're interested in the number of jumps needed to go from +// the current fee to the transaction's cap: +// +// jumps = log1.125(txfee) - log1.125(basefee) +// +// - The third observation is that the base fee tends to hover around rather +// than swing wildly. The number of jumps needed from the current fee starts +// to get less relevant the higher it is. To remove the noise here too, the +// pool will use log(jumps) as the delta for comparing transactions. +// +// delta = sign(jumps) * log(abs(jumps)) +// +// - To establish a total order, we need to reduce the dimensionality of the +// two base fees (log jumps) to a single value. The interesting aspect from +// the pool's perspective is how fast will a tx get executable (fees going +// down, crossing the smaller negative jump counter) or non-executable (fees +// going up, crossing the smaller positive jump counter). As such, the pool +// cares only about the min of the two delta values for eviction priority. +// +// priority = min(delta-basefee, delta-blobfee) +// +// - The above very aggressive dimensionality and noise reduction should result +// in transaction being grouped into a small number of buckets, the further +// the fees the larger the buckets. This is good because it allows us to use +// the miner tip meaningfully as a splitter. +// +// - For the scenario where the pool does not contain non-executable blob txs +// anymore, it does not make sense to grant a later eviction priority to txs +// with high fee caps since it could enable pool wars. As such, any positive +// priority will be grouped together. +// +// priority = min(delta-basefee, delta-blobfee, 0) +// +// Optimisation tradeoffs: +// +// - Eviction relies on 3 fee minimums per account (exec tip, exec cap and blob +// cap). Maintaining these values across all transactions from the account is +// problematic as each transaction replacement or inclusion would require a +// rescan of all other transactions to recalculate the minimum. Instead, the +// pool maintains a rolling minimum across the nonce range. Updating all the +// minimums will need to be done only starting at the swapped in/out nonce +// and leading up to the first no-change. +type BlobPool struct { + config Config // Pool configuration + reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools + + store billy.Database // Persistent data store for the tx metadata and blobs + stored uint64 // Useful data size of all transactions on disk + limbo *limbo // Persistent data store for the non-finalized blobs + + signer types.Signer // Transaction signer to use for sender recovery + chain BlockChain // Chain object to access the state through + + head *types.Header // Current head of the chain + state *state.StateDB // Current state at the head of the chain + gasTip *uint256.Int // Currently accepted minimum gas tip + + lookup map[common.Hash]uint64 // Lookup table mapping hashes to tx billy entries + index map[common.Address][]*blobTxMeta // Blob transactions grouped by accounts, sorted by nonce + spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts + evict *evictHeap // Heap of cheapest accounts for eviction when full + + eventFeed event.Feed // Event feed to send out new tx events on pool inclusion + eventScope event.SubscriptionScope // Event scope to track and mass unsubscribe on termination + + lock sync.RWMutex // Mutex protecting the pool during reorg handling +} + +// New creates a new blob transaction pool to gather, sort and filter inbound +// blob transactions from the network. +func New(config Config, chain BlockChain) *BlobPool { + // Sanitize the input to ensure no vulnerable gas prices are set + config = (&config).sanitize() + + // Create the transaction pool with its initial settings + return &BlobPool{ + config: config, + signer: types.LatestSigner(chain.Config()), + chain: chain, + lookup: make(map[common.Hash]uint64), + index: make(map[common.Address][]*blobTxMeta), + spent: make(map[common.Address]*uint256.Int), + } +} + +// Filter returns whether the given transaction can be consumed by the blob pool. +func (p *BlobPool) Filter(tx *types.Transaction) bool { + return tx.Type() == types.BlobTxType +} + +// Init sets the gas price needed to keep a transaction in the pool and the chain +// head to allow balance / nonce checks. The transaction journal will be loaded +// from disk and filtered based on the provided starting settings. +func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error { + p.reserve = reserve + + var ( + queuedir string + limbodir string + ) + if p.config.Datadir != "" { + queuedir = filepath.Join(p.config.Datadir, pendingTransactionStore) + if err := os.MkdirAll(queuedir, 0700); err != nil { + return err + } + limbodir = filepath.Join(p.config.Datadir, limboedTransactionStore) + if err := os.MkdirAll(limbodir, 0700); err != nil { + return err + } + } + state, err := p.chain.StateAt(head.Root) + if err != nil { + return err + } + p.head, p.state = head, state + + // Index all transactions on disk and delete anything inprocessable + var fails []uint64 + index := func(id uint64, size uint32, blob []byte) { + if p.parseTransaction(id, size, blob) != nil { + fails = append(fails, id) + } + } + store, err := billy.Open(billy.Options{Path: queuedir}, newSlotter(), index) + if err != nil { + return err + } + p.store = store + + if len(fails) > 0 { + log.Warn("Dropping invalidated blob transactions", "ids", fails) + for _, id := range fails { + if err := p.store.Delete(id); err != nil { + p.Close() + return err + } + } + } + // Sort the indexed transactions by nonce and delete anything gapped, create + // the eviction heap of anyone still standing + for addr := range p.index { + p.recheck(addr, nil) + } + var ( + basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head)) + blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice)) + ) + if p.head.ExcessBlobGas != nil { + blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*p.head.ExcessBlobGas)) + } + p.evict = newPriceHeap(basefee, blobfee, &p.index) + + // Pool initialized, attach the blob limbo to it to track blobs included + // recently but not yet finalized + p.limbo, err = newLimbo(limbodir) + if err != nil { + p.Close() + return err + } + // Set the configured gas tip, triggering a filtering of anything just loaded + basefeeGauge.Update(int64(basefee.Uint64())) + blobfeeGauge.Update(int64(blobfee.Uint64())) + + p.SetGasTip(gasTip) + + // Since the user might have modified their pool's capacity, evict anything + // above the current allowance + for p.stored > p.config.Datacap { + p.drop() + } + // Update the metrics and return the constructed pool + datacapGauge.Update(int64(p.config.Datacap)) + p.updateStorageMetrics() + return nil +} + +// Close closes down the underlying persistent store. +func (p *BlobPool) Close() error { + var errs []error + if err := p.limbo.Close(); err != nil { + errs = append(errs, err) + } + if err := p.store.Close(); err != nil { + errs = append(errs, err) + } + p.eventScope.Close() + + switch { + case errs == nil: + return nil + case len(errs) == 1: + return errs[0] + default: + return fmt.Errorf("%v", errs) + } +} + +// parseTransaction is a callback method on pool creation that gets called for +// each transaction on disk to create the in-memory metadata index. +func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error { + tx := new(types.Transaction) + if err := rlp.DecodeBytes(blob, tx); err != nil { + // This path is impossible unless the disk data representation changes + // across restarts. For that ever unprobable case, recover gracefully + // by ignoring this data entry. + log.Error("Failed to decode blob pool entry", "id", id, "err", err) + return err + } + if tx.BlobTxSidecar() == nil { + log.Error("Missing sidecar in blob pool entry", "id", id, "hash", tx.Hash()) + return errors.New("missing blob sidecar") + } + + meta := newBlobTxMeta(id, size, tx) + + sender, err := p.signer.Sender(tx) + if err != nil { + // This path is impossible unless the signature validity changes across + // restarts. For that ever unprobable case, recover gracefully by ignoring + // this data entry. + log.Error("Failed to recover blob tx sender", "id", id, "hash", tx.Hash(), "err", err) + return err + } + if _, ok := p.index[sender]; !ok { + if err := p.reserve(sender, true); err != nil { + return err + } + p.index[sender] = []*blobTxMeta{} + p.spent[sender] = new(uint256.Int) + } + p.index[sender] = append(p.index[sender], meta) + p.spent[sender] = new(uint256.Int).Add(p.spent[sender], meta.costCap) + + p.lookup[meta.hash] = meta.id + p.stored += uint64(meta.size) + + return nil +} + +// recheck verifies the pool's content for a specific account and drops anything +// that does not fit anymore (dangling or filled nonce, overdraft). +func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint64) { + // Sort the transactions belonging to the account so reinjects can be simpler + txs := p.index[addr] + if inclusions != nil && txs == nil { // during reorgs, we might find new accounts + return + } + sort.Slice(txs, func(i, j int) bool { + return txs[i].nonce < txs[j].nonce + }) + // If there is a gap between the chain state and the blob pool, drop + // all the transactions as they are non-executable. Similarly, if the + // entire tx range was included, drop all. + var ( + next = p.state.GetNonce(addr) + gapped = txs[0].nonce > next + filled = txs[len(txs)-1].nonce < next + ) + if gapped || filled { + var ( + ids []uint64 + nonces []uint64 + ) + for i := 0; i < len(txs); i++ { + ids = append(ids, txs[i].id) + nonces = append(nonces, txs[i].nonce) + + p.stored -= uint64(txs[i].size) + delete(p.lookup, txs[i].hash) + + // Included transactions blobs need to be moved to the limbo + if filled && inclusions != nil { + p.offload(addr, txs[i].nonce, txs[i].id, inclusions) + } + } + delete(p.index, addr) + delete(p.spent, addr) + if inclusions != nil { // only during reorgs will the heap will be initialized + heap.Remove(p.evict, p.evict.index[addr]) + } + p.reserve(addr, false) + + if gapped { + log.Warn("Dropping dangling blob transactions", "from", addr, "missing", next, "drop", nonces, "ids", ids) + } else { + log.Trace("Dropping filled blob transactions", "from", addr, "filled", nonces, "ids", ids) + } + for _, id := range ids { + if err := p.store.Delete(id); err != nil { + log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err) + } + } + return + } + // If there is overlap between the chain state and the blob pool, drop + // anything below the current state + if txs[0].nonce < next { + var ( + ids []uint64 + nonces []uint64 + ) + for txs[0].nonce < next { + ids = append(ids, txs[0].id) + nonces = append(nonces, txs[0].nonce) + + p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[0].costCap) + p.stored -= uint64(txs[0].size) + delete(p.lookup, txs[0].hash) + + // Included transactions blobs need to be moved to the limbo + if inclusions != nil { + p.offload(addr, txs[0].nonce, txs[0].id, inclusions) + } + txs = txs[1:] + } + log.Trace("Dropping overlapped blob transactions", "from", addr, "overlapped", nonces, "ids", ids, "left", len(txs)) + for _, id := range ids { + if err := p.store.Delete(id); err != nil { + log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err) + } + } + p.index[addr] = txs + } + // Iterate over the transactions to initialize their eviction thresholds + // and to detect any nonce gaps + txs[0].evictionExecTip = txs[0].execTipCap + txs[0].evictionExecFeeJumps = txs[0].basefeeJumps + txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps + + for i := 1; i < len(txs); i++ { + // If there's no nonce gap, initialize the evicion thresholds as the + // minimum between the cumulative thresholds and the current tx fees + if txs[i].nonce == txs[i-1].nonce+1 { + txs[i].evictionExecTip = txs[i-1].evictionExecTip + if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 { + txs[i].evictionExecTip = txs[i].execTipCap + } + txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps + if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps { + txs[i].evictionExecFeeJumps = txs[i].basefeeJumps + } + txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps + if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps { + txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps + } + continue + } + // Sanity check that there's no double nonce. This case would be a coding + // error, but better know about it + if txs[i].nonce == txs[i-1].nonce { + log.Error("Duplicate nonce blob transaction", "from", addr, "nonce", txs[i].nonce) + } + // Otherwise if there's a nonce gap evict all later transactions + var ( + ids []uint64 + nonces []uint64 + ) + for j := i; j < len(txs); j++ { + ids = append(ids, txs[j].id) + nonces = append(nonces, txs[j].nonce) + + p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[j].costCap) + p.stored -= uint64(txs[j].size) + delete(p.lookup, txs[j].hash) + } + txs = txs[:i] + + log.Error("Dropping gapped blob transactions", "from", addr, "missing", txs[i-1].nonce+1, "drop", nonces, "ids", ids) + for _, id := range ids { + if err := p.store.Delete(id); err != nil { + log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err) + } + } + p.index[addr] = txs + break + } + // Ensure that there's no over-draft, this is expected to happen when some + // transactions get included without publishing on the network + var ( + balance = uint256.MustFromBig(p.state.GetBalance(addr)) + spent = p.spent[addr] + ) + if spent.Cmp(balance) > 0 { + // Evict the highest nonce transactions until the pending set falls under + // the account's available balance + var ( + ids []uint64 + nonces []uint64 + ) + for p.spent[addr].Cmp(balance) > 0 { + last := txs[len(txs)-1] + txs[len(txs)-1] = nil + txs = txs[:len(txs)-1] + + ids = append(ids, last.id) + nonces = append(nonces, last.nonce) + + p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap) + p.stored -= uint64(last.size) + delete(p.lookup, last.hash) + } + if len(txs) == 0 { + delete(p.index, addr) + delete(p.spent, addr) + if inclusions != nil { // only during reorgs will the heap will be initialized + heap.Remove(p.evict, p.evict.index[addr]) + } + p.reserve(addr, false) + } else { + p.index[addr] = txs + } + log.Warn("Dropping overdrafted blob transactions", "from", addr, "balance", balance, "spent", spent, "drop", nonces, "ids", ids) + for _, id := range ids { + if err := p.store.Delete(id); err != nil { + log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err) + } + } + } + // Sanity check that no account can have more queued transactions than the + // DoS protection threshold. + if len(txs) > maxTxsPerAccount { + // Evict the highest nonce transactions until the pending set falls under + // the account's transaction cap + var ( + ids []uint64 + nonces []uint64 + ) + for len(txs) > maxTxsPerAccount { + last := txs[len(txs)-1] + txs[len(txs)-1] = nil + txs = txs[:len(txs)-1] + + ids = append(ids, last.id) + nonces = append(nonces, last.nonce) + + p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap) + p.stored -= uint64(last.size) + delete(p.lookup, last.hash) + } + p.index[addr] = txs + + log.Warn("Dropping overcapped blob transactions", "from", addr, "kept", len(txs), "drop", nonces, "ids", ids) + for _, id := range ids { + if err := p.store.Delete(id); err != nil { + log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err) + } + } + } + // Included cheap transactions might have left the remaining ones better from + // an eviction point, fix any potential issues in the heap. + if _, ok := p.index[addr]; ok && inclusions != nil { + heap.Fix(p.evict, p.evict.index[addr]) + } +} + +// offload removes a tracked blob transaction from the pool and moves it into the +// limbo for tracking until finality. +// +// The method may log errors for various unexpcted scenarios but will not return +// any of it since there's no clear error case. Some errors may be due to coding +// issues, others caused by signers mining MEV stuff or swapping transactions. In +// all cases, the pool needs to continue operating. +func (p *BlobPool) offload(addr common.Address, nonce uint64, id uint64, inclusions map[common.Hash]uint64) { + data, err := p.store.Get(id) + if err != nil { + log.Error("Blobs missing for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err) + return + } + var tx types.Transaction + if err = rlp.DecodeBytes(data, &tx); err != nil { + log.Error("Blobs corrupted for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err) + return + } + block, ok := inclusions[tx.Hash()] + if !ok { + log.Warn("Blob transaction swapped out by signer", "from", addr, "nonce", nonce, "id", id) + return + } + if err := p.limbo.push(&tx, block); err != nil { + log.Warn("Failed to offload blob tx into limbo", "err", err) + return + } +} + +// Reset implements txpool.SubPool, allowing the blob pool's internal state to be +// kept in sync with the main transacion pool's internal state. +func (p *BlobPool) Reset(oldHead, newHead *types.Header) { + waitStart := time.Now() + p.lock.Lock() + resetwaitHist.Update(time.Since(waitStart).Nanoseconds()) + defer p.lock.Unlock() + + defer func(start time.Time) { + resettimeHist.Update(time.Since(start).Nanoseconds()) + }(time.Now()) + + statedb, err := p.chain.StateAt(newHead.Root) + if err != nil { + log.Error("Failed to reset blobpool state", "err", err) + return + } + p.head = newHead + p.state = statedb + + // Run the reorg between the old and new head and figure out which accounts + // need to be rechecked and which transactions need to be readded + if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil { + for addr, txs := range reinject { + // Blindly push all the lost transactions back into the pool + for _, tx := range txs { + p.reinject(addr, tx.Hash()) + } + // Recheck the account's pooled transactions to drop included and + // invalidated one + p.recheck(addr, inclusions) + } + } + // Flush out any blobs from limbo that are older than the latest finality + if p.chain.Config().IsCancun(p.head.Number, p.head.Time) { + p.limbo.finalize(p.chain.CurrentFinalBlock()) + } + // Reset the price heap for the new set of basefee/blobfee pairs + var ( + basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), newHead)) + blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice)) + ) + if newHead.ExcessBlobGas != nil { + blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*newHead.ExcessBlobGas)) + } + p.evict.reinit(basefee, blobfee, false) + + basefeeGauge.Update(int64(basefee.Uint64())) + blobfeeGauge.Update(int64(blobfee.Uint64())) + p.updateStorageMetrics() +} + +// reorg assembles all the transactors and missing transactions between an old +// and new head to figure out which account's tx set needs to be rechecked and +// which transactions need to be requeued. +// +// The transactionblock inclusion infos are also returned to allow tracking any +// just-included blocks by block number in the limbo. +func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*types.Transaction, map[common.Hash]uint64) { + // If the pool was not yet initialized, don't do anything + if oldHead == nil { + return nil, nil + } + // If the reorg is too deep, avoid doing it (will happen during snap sync) + oldNum := oldHead.Number.Uint64() + newNum := newHead.Number.Uint64() + + if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { + return nil, nil + } + // Reorg seems shallow enough to pull in all transactions into memory + var ( + transactors = make(map[common.Address]struct{}) + discarded = make(map[common.Address][]*types.Transaction) + included = make(map[common.Address][]*types.Transaction) + inclusions = make(map[common.Hash]uint64) + + rem = p.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) + add = p.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) + ) + if add == nil { + // if the new head is nil, it means that something happened between + // the firing of newhead-event and _now_: most likely a + // reorg caused by sync-reversion or explicit sethead back to an + // earlier block. + log.Warn("Blobpool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash()) + return nil, nil + } + if rem == nil { + // This can happen if a setHead is performed, where we simply discard + // the old head from the chain. If that is the case, we don't have the + // lost transactions anymore, and there's nothing to add. + if newNum >= oldNum { + // If we reorged to a same or higher number, then it's not a case + // of setHead + log.Warn("Blobpool reset with missing old head", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + return nil, nil + } + // If the reorg ended up on a lower number, it's indicative of setHead + // being the cause + log.Debug("Skipping blobpool reset caused by setHead", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + return nil, nil + } + // Both old and new blocks exist, traverse through the progression chain + // and accumulate the transactors and transactions + for rem.NumberU64() > add.NumberU64() { + for _, tx := range rem.Transactions() { + from, _ := p.signer.Sender(tx) + + discarded[from] = append(discarded[from], tx) + transactors[from] = struct{}{} + } + if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash()) + return nil, nil + } + } + for add.NumberU64() > rem.NumberU64() { + for _, tx := range add.Transactions() { + from, _ := p.signer.Sender(tx) + + included[from] = append(included[from], tx) + inclusions[tx.Hash()] = add.NumberU64() + transactors[from] = struct{}{} + } + if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash()) + return nil, nil + } + } + for rem.Hash() != add.Hash() { + for _, tx := range rem.Transactions() { + from, _ := p.signer.Sender(tx) + + discarded[from] = append(discarded[from], tx) + transactors[from] = struct{}{} + } + if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash()) + return nil, nil + } + for _, tx := range add.Transactions() { + from, _ := p.signer.Sender(tx) + + included[from] = append(included[from], tx) + inclusions[tx.Hash()] = add.NumberU64() + transactors[from] = struct{}{} + } + if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash()) + return nil, nil + } + } + // Generate the set of transactions per address to pull back into the pool, + // also updating the rest along the way + reinject := make(map[common.Address][]*types.Transaction) + for addr := range transactors { + // Generate the set that was lost to reinject into the pool + lost := make([]*types.Transaction, 0, len(discarded[addr])) + for _, tx := range types.TxDifference(discarded[addr], included[addr]) { + if p.Filter(tx) { + lost = append(lost, tx) + } + } + reinject[addr] = lost + + // Update the set that was already reincluded to track the blocks in limbo + for _, tx := range types.TxDifference(included[addr], discarded[addr]) { + if p.Filter(tx) { + p.limbo.update(tx.Hash(), inclusions[tx.Hash()]) + } + } + } + return reinject, inclusions +} + +// reinject blindly pushes a transaction previously included in the chain - and +// just reorged out - into the pool. The transaction is assumed valid (having +// been in the chain), thus the only validation needed is nonce sorting and over- +// draft checks after injection. +// +// Note, the method will not initialize the eviction cache values as those will +// be done once for all transactions belonging to an account after all individual +// transactions are injected back into the pool. +func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { + // Retrieve the associated blob from the limbo. Without the blobs, we cannot + // add the transaction back into the pool as it is not mineable. + tx, err := p.limbo.pull(txhash) + if err != nil { + log.Error("Blobs unavailable, dropping reorged tx", "err", err) + return + } + // TODO: seems like an easy optimization here would be getting the serialized tx + // from limbo instead of re-serializing it here. + + // Serialize the transaction back into the primary datastore. + blob, err := rlp.EncodeToBytes(tx) + if err != nil { + log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err) + return + } + id, err := p.store.Put(blob) + if err != nil { + log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err) + return + } + + // Update the indixes and metrics + meta := newBlobTxMeta(id, p.store.Size(id), tx) + if _, ok := p.index[addr]; !ok { + if err := p.reserve(addr, true); err != nil { + log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err) + return + } + p.index[addr] = []*blobTxMeta{meta} + p.spent[addr] = meta.costCap + p.evict.Push(addr) + } else { + p.index[addr] = append(p.index[addr], meta) + p.spent[addr] = new(uint256.Int).Add(p.spent[addr], meta.costCap) + } + p.lookup[meta.hash] = meta.id + p.stored += uint64(meta.size) +} + +// SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements +// to be kept in sync with the main transacion pool's gas requirements. +func (p *BlobPool) SetGasTip(tip *big.Int) { + p.lock.Lock() + defer p.lock.Unlock() + + // Store the new minimum gas tip + old := p.gasTip + p.gasTip = uint256.MustFromBig(tip) + + // If the min miner fee increased, remove transactions below the new threshold + if old == nil || p.gasTip.Cmp(old) > 0 { + for addr, txs := range p.index { + for i, tx := range txs { + if tx.execTipCap.Cmp(p.gasTip) < 0 { + // Drop the offending transaction + var ( + ids = []uint64{tx.id} + nonces = []uint64{tx.nonce} + ) + p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap) + p.stored -= uint64(tx.size) + delete(p.lookup, tx.hash) + txs[i] = nil + + // Drop everything afterwards, no gaps allowed + for j, tx := range txs[i+1:] { + ids = append(ids, tx.id) + nonces = append(nonces, tx.nonce) + + p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], tx.costCap) + p.stored -= uint64(tx.size) + delete(p.lookup, tx.hash) + txs[i+1+j] = nil + } + // Clear out the dropped transactions from the index + if i > 0 { + p.index[addr] = txs[:i] + heap.Fix(p.evict, p.evict.index[addr]) + } else { + delete(p.index, addr) + delete(p.spent, addr) + + heap.Remove(p.evict, p.evict.index[addr]) + p.reserve(addr, false) + } + // Clear out the transactions from the data store + log.Warn("Dropping underpriced blob transaction", "from", addr, "rejected", tx.nonce, "tip", tx.execTipCap, "want", tip, "drop", nonces, "ids", ids) + for _, id := range ids { + if err := p.store.Delete(id); err != nil { + log.Error("Failed to delete dropped transaction", "id", id, "err", err) + } + } + break + } + } + } + } + log.Debug("Blobpool tip threshold updated", "tip", tip) + pooltipGauge.Update(tip.Int64()) + p.updateStorageMetrics() +} + +// validateTx checks whether a transaction is valid according to the consensus +// rules and adheres to some heuristic limits of the local node (price and size). +func (p *BlobPool) validateTx(tx *types.Transaction) error { + // Ensure the transaction adheres to basic pool filters (type, size, tip) and + // consensus rules + baseOpts := &txpool.ValidationOptions{ + Config: p.chain.Config(), + Accept: 1 << types.BlobTxType, + MaxSize: txMaxSize, + MinTip: p.gasTip.ToBig(), + } + if err := txpool.ValidateTransaction(tx, p.head, p.signer, baseOpts); err != nil { + return err + } + // Ensure the transaction adheres to the stateful pool filters (nonce, balance) + stateOpts := &txpool.ValidationOptionsWithState{ + State: p.state, + + FirstNonceGap: func(addr common.Address) uint64 { + // Nonce gaps are not permitted in the blob pool, the first gap will + // be the next nonce shifted by however many transactions we already + // have pooled. + return p.state.GetNonce(addr) + uint64(len(p.index[addr])) + }, + UsedAndLeftSlots: func(addr common.Address) (int, int) { + have := len(p.index[addr]) + if have >= maxTxsPerAccount { + return have, 0 + } + return have, maxTxsPerAccount - have + }, + ExistingExpenditure: func(addr common.Address) *big.Int { + if spent := p.spent[addr]; spent != nil { + return spent.ToBig() + } + return new(big.Int) + }, + ExistingCost: func(addr common.Address, nonce uint64) *big.Int { + next := p.state.GetNonce(addr) + if uint64(len(p.index[addr])) > nonce-next { + return p.index[addr][int(tx.Nonce()-next)].costCap.ToBig() + } + return nil + }, + } + if err := txpool.ValidateTransactionWithState(tx, p.signer, stateOpts); err != nil { + return err + } + // If the transaction replaces an existing one, ensure that price bumps are + // adhered to. + var ( + from, _ = p.signer.Sender(tx) // already validated above + next = p.state.GetNonce(from) + ) + if uint64(len(p.index[from])) > tx.Nonce()-next { + // Account can support the replacement, but the price bump must also be met + prev := p.index[from][int(tx.Nonce()-next)] + switch { + case tx.GasFeeCapIntCmp(prev.execFeeCap.ToBig()) <= 0: + return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap) + case tx.GasTipCapIntCmp(prev.execTipCap.ToBig()) <= 0: + return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap) + case tx.BlobGasFeeCapIntCmp(prev.blobFeeCap.ToBig()) <= 0: + return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap) + } + var ( + multiplier = uint256.NewInt(100 + p.config.PriceBump) + onehundred = uint256.NewInt(100) + + minGasFeeCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execFeeCap), onehundred) + minGasTipCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execTipCap), onehundred) + minBlobGasFeeCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.blobFeeCap), onehundred) + ) + switch { + case tx.GasFeeCapIntCmp(minGasFeeCap.ToBig()) < 0: + return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap, p.config.PriceBump) + case tx.GasTipCapIntCmp(minGasTipCap.ToBig()) < 0: + return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap, p.config.PriceBump) + case tx.BlobGasFeeCapIntCmp(minBlobGasFeeCap.ToBig()) < 0: + return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap, p.config.PriceBump) + } + } + return nil +} + +// Has returns an indicator whether subpool has a transaction cached with the +// given hash. +func (p *BlobPool) Has(hash common.Hash) bool { + p.lock.RLock() + defer p.lock.RUnlock() + + _, ok := p.lookup[hash] + return ok +} + +// Get returns a transaction if it is contained in the pool, or nil otherwise. +func (p *BlobPool) Get(hash common.Hash) *types.Transaction { + // Track the amount of time waiting to retrieve a fully resolved blob tx from + // the pool and the amount of time actually spent on pulling the data from disk. + getStart := time.Now() + p.lock.RLock() + getwaitHist.Update(time.Since(getStart).Nanoseconds()) + defer p.lock.RUnlock() + + defer func(start time.Time) { + gettimeHist.Update(time.Since(start).Nanoseconds()) + }(time.Now()) + + // Pull the blob from disk and return an assembled response + id, ok := p.lookup[hash] + if !ok { + return nil + } + data, err := p.store.Get(id) + if err != nil { + log.Error("Tracked blob transaction missing from store", "hash", hash, "id", id, "err", err) + return nil + } + item := new(types.Transaction) + if err = rlp.DecodeBytes(data, item); err != nil { + log.Error("Blobs corrupted for traced transaction", "hash", hash, "id", id, "err", err) + return nil + } + return item +} + +// Add inserts a set of blob transactions into the pool if they pass validation (both +// consensus validity and pool restictions). +func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error { + errs := make([]error, len(txs)) + for i, tx := range txs { + errs[i] = p.add(tx) + } + return errs +} + +// Add inserts a new blob transaction into the pool if it passes validation (both +// consensus validity and pool restictions). +func (p *BlobPool) add(tx *types.Transaction) (err error) { + // The blob pool blocks on adding a transaction. This is because blob txs are + // only even pulled form the network, so this method will act as the overload + // protection for fetches. + waitStart := time.Now() + p.lock.Lock() + addwaitHist.Update(time.Since(waitStart).Nanoseconds()) + defer p.lock.Unlock() + + defer func(start time.Time) { + addtimeHist.Update(time.Since(start).Nanoseconds()) + }(time.Now()) + + // Ensure the transaction is valid from all perspectives + if err := p.validateTx(tx); err != nil { + log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err) + return err + } + // If the address is not yet known, request exclusivity to track the account + // only by this subpool until all transactions are evicted + from, _ := types.Sender(p.signer, tx) // already validated above + if _, ok := p.index[from]; !ok { + if err := p.reserve(from, true); err != nil { + return err + } + defer func() { + // If the transaction is rejected by some post-validation check, remove + // the lock on the reservation set. + // + // Note, `err` here is the named error return, which will be initialized + // by a return statement before running deferred methods. Take care with + // removing or subscoping err as it will break this clause. + if err != nil { + p.reserve(from, false) + } + }() + } + // Transaction permitted into the pool from a nonce and cost perspective, + // insert it into the database and update the indices + blob, err := rlp.EncodeToBytes(tx) + if err != nil { + log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err) + return err + } + id, err := p.store.Put(blob) + if err != nil { + return err + } + meta := newBlobTxMeta(id, p.store.Size(id), tx) + + var ( + next = p.state.GetNonce(from) + offset = int(tx.Nonce() - next) + newacc = false + ) + var oldEvictionExecFeeJumps, oldEvictionBlobFeeJumps float64 + if txs, ok := p.index[from]; ok { + oldEvictionExecFeeJumps = txs[len(txs)-1].evictionExecFeeJumps + oldEvictionBlobFeeJumps = txs[len(txs)-1].evictionBlobFeeJumps + } + if len(p.index[from]) > offset { + // Transaction replaces a previously queued one + prev := p.index[from][offset] + if err := p.store.Delete(prev.id); err != nil { + // Shitty situation, but try to recover gracefully instead of going boom + log.Error("Failed to delete replaced transaction", "id", prev.id, "err", err) + } + // Update the transaction index + p.index[from][offset] = meta + p.spent[from] = new(uint256.Int).Sub(p.spent[from], prev.costCap) + p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap) + + delete(p.lookup, prev.hash) + p.lookup[meta.hash] = meta.id + p.stored += uint64(meta.size) - uint64(prev.size) + } else { + // Transaction extends previously scheduled ones + p.index[from] = append(p.index[from], meta) + if _, ok := p.spent[from]; !ok { + p.spent[from] = new(uint256.Int) + newacc = true + } + p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap) + p.lookup[meta.hash] = meta.id + p.stored += uint64(meta.size) + } + // Recompute the rolling eviction fields. In case of a replacement, this will + // recompute all subsequent fields. In case of an append, this will only do + // the fresh calculation. + txs := p.index[from] + + for i := offset; i < len(txs); i++ { + // The first transaction will always use itself + if i == 0 { + txs[0].evictionExecTip = txs[0].execTipCap + txs[0].evictionExecFeeJumps = txs[0].basefeeJumps + txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps + + continue + } + // Subsequent transactions will use a rolling calculation + txs[i].evictionExecTip = txs[i-1].evictionExecTip + if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 { + txs[i].evictionExecTip = txs[i].execTipCap + } + txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps + if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps { + txs[i].evictionExecFeeJumps = txs[i].basefeeJumps + } + txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps + if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps { + txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps + } + } + // Update the eviction heap with the new information: + // - If the transaction is from a new account, add it to the heap + // - If the account had a singleton tx replaced, update the heap (new price caps) + // - If the account has a transaction replaced or appended, update the heap if significantly changed + switch { + case newacc: + heap.Push(p.evict, from) + + case len(txs) == 1: // 1 tx and not a new acc, must be replacement + heap.Fix(p.evict, p.evict.index[from]) + + default: // replacement or new append + evictionExecFeeDiff := oldEvictionExecFeeJumps - txs[len(txs)-1].evictionExecFeeJumps + evictionBlobFeeDiff := oldEvictionBlobFeeJumps - txs[len(txs)-1].evictionBlobFeeJumps + + if math.Abs(evictionExecFeeDiff) > 0.001 || math.Abs(evictionBlobFeeDiff) > 0.001 { // need math.Abs, can go up and down + heap.Fix(p.evict, p.evict.index[from]) + } + } + // If the pool went over the allowed data limit, evict transactions until + // we're again below the threshold + for p.stored > p.config.Datacap { + p.drop() + } + p.updateStorageMetrics() + + return nil +} + +// drop removes the worst transaction from the pool. It is primarily used when a +// freshly added transaction overflows the pool and needs to evict something. The +// method is also called on startup if the user resizes their storage, might be an +// expensive run but it should be fine-ish. +func (p *BlobPool) drop() { + // Peek at the account with the worse transaction set to evict from (Go's heap + // stores the minimum at index zero of the heap slice) and retrieve it's last + // transaction. + var ( + from = p.evict.addrs[0] // cannot call drop on empty pool + + txs = p.index[from] + drop = txs[len(txs)-1] + last = len(txs) == 1 + ) + // Remove the transaction from the pool's index + if last { + delete(p.index, from) + delete(p.spent, from) + p.reserve(from, false) + } else { + txs[len(txs)-1] = nil + txs = txs[:len(txs)-1] + + p.index[from] = txs + p.spent[from] = new(uint256.Int).Sub(p.spent[from], drop.costCap) + } + p.stored -= uint64(drop.size) + delete(p.lookup, drop.hash) + + // Remove the transaction from the pool's evicion heap: + // - If the entire account was dropped, pop off the address + // - Otherwise, if the new tail has better eviction caps, fix the heap + if last { + heap.Pop(p.evict) + } else { + tail := txs[len(txs)-1] // new tail, surely exists + + evictionExecFeeDiff := tail.evictionExecFeeJumps - drop.evictionExecFeeJumps + evictionBlobFeeDiff := tail.evictionBlobFeeJumps - drop.evictionBlobFeeJumps + + if evictionExecFeeDiff > 0.001 || evictionBlobFeeDiff > 0.001 { // no need for math.Abs, monotonic decreasing + heap.Fix(p.evict, 0) + } + } + // Remove the transaction from the data store + log.Warn("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id) + if err := p.store.Delete(drop.id); err != nil { + log.Error("Failed to drop evicted transaction", "id", drop.id, "err", err) + } +} + +// Pending retrieves all currently processable transactions, grouped by origin +// account and sorted by nonce. +func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction { + // Track the amount of time waiting to retrieve the list of pending blob txs + // from the pool and the amount of time actually spent on assembling the data. + // The latter will be pretty much moot, but we've kept it to have symmetric + // across all user operations. + pendStart := time.Now() + p.lock.RLock() + pendwaitHist.Update(time.Since(pendStart).Nanoseconds()) + defer p.lock.RUnlock() + + defer func(start time.Time) { + pendtimeHist.Update(time.Since(start).Nanoseconds()) + }(time.Now()) + + pending := make(map[common.Address][]*txpool.LazyTransaction) + for addr, txs := range p.index { + var lazies []*txpool.LazyTransaction + for _, tx := range txs { + lazies = append(lazies, &txpool.LazyTransaction{ + Pool: p, + Hash: tx.hash, + Time: time.Now(), // TODO(karalabe): Maybe save these and use that? + GasFeeCap: tx.execFeeCap.ToBig(), + GasTipCap: tx.execTipCap.ToBig(), + }) + } + if len(lazies) > 0 { + pending[addr] = lazies + } + } + return pending +} + +// updateStorageMetrics retrieves a bunch of stats from the data store and pushes +// them out as metrics. +func (p *BlobPool) updateStorageMetrics() { + stats := p.store.Infos() + + var ( + dataused uint64 + datareal uint64 + slotused uint64 + + oversizedDataused uint64 + oversizedDatagaps uint64 + oversizedSlotused uint64 + oversizedSlotgaps uint64 + ) + for _, shelf := range stats.Shelves { + slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize) + slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize) + + dataused += slotDataused + datareal += slotDataused + slotDatagaps + slotused += shelf.FilledSlots + + metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused)) + metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps)) + metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots)) + metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots)) + + if shelf.SlotSize/blobSize > maxBlobsPerTransaction { + oversizedDataused += slotDataused + oversizedDatagaps += slotDatagaps + oversizedSlotused += shelf.FilledSlots + oversizedSlotgaps += shelf.GappedSlots + } + } + datausedGauge.Update(int64(dataused)) + datarealGauge.Update(int64(datareal)) + slotusedGauge.Update(int64(slotused)) + + oversizedDatausedGauge.Update(int64(oversizedDataused)) + oversizedDatagapsGauge.Update(int64(oversizedDatagaps)) + oversizedSlotusedGauge.Update(int64(oversizedSlotused)) + oversizedSlotgapsGauge.Update(int64(oversizedSlotgaps)) + + p.updateLimboMetrics() +} + +// updateLimboMetrics retrieves a bunch of stats from the limbo store and pushes +// // them out as metrics. +func (p *BlobPool) updateLimboMetrics() { + stats := p.limbo.store.Infos() + + var ( + dataused uint64 + datareal uint64 + slotused uint64 + ) + for _, shelf := range stats.Shelves { + slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize) + slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize) + + dataused += slotDataused + datareal += slotDataused + slotDatagaps + slotused += shelf.FilledSlots + + metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused)) + metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps)) + metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots)) + metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots)) + } + limboDatausedGauge.Update(int64(dataused)) + limboDatarealGauge.Update(int64(datareal)) + limboSlotusedGauge.Update(int64(slotused)) +} + +// SubscribeTransactions registers a subscription of NewTxsEvent and +// starts sending event to the given channel. +func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { + return p.eventScope.Track(p.eventFeed.Subscribe(ch)) +} + +// Nonce returns the next nonce of an account, with all transactions executable +// by the pool already applied on top. +func (p *BlobPool) Nonce(addr common.Address) uint64 { + p.lock.Lock() + defer p.lock.Unlock() + + if txs, ok := p.index[addr]; ok { + return txs[len(txs)-1].nonce + 1 + } + return p.state.GetNonce(addr) +} + +// Stats retrieves the current pool stats, namely the number of pending and the +// number of queued (non-executable) transactions. +func (p *BlobPool) Stats() (int, int) { + p.lock.Lock() + defer p.lock.Unlock() + + var pending int + for _, txs := range p.index { + pending += len(txs) + } + return pending, 0 // No non-executable txs in the blob pool +} + +// Content retrieves the data content of the transaction pool, returning all the +// pending as well as queued transactions, grouped by account and sorted by nonce. +// +// For the blob pool, this method will return nothing for now. +// TODO(karalabe): Abstract out the returned metadata. +func (p *BlobPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { + return make(map[common.Address][]*types.Transaction), make(map[common.Address][]*types.Transaction) +} + +// ContentFrom retrieves the data content of the transaction pool, returning the +// pending as well as queued transactions of this address, grouped by nonce. +// +// For the blob pool, this method will return nothing for now. +// TODO(karalabe): Abstract out the returned metadata. +func (p *BlobPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { + return []*types.Transaction{}, []*types.Transaction{} +} + +// Locals retrieves the accounts currently considered local by the pool. +// +// There is no notion of local accounts in the blob pool. +func (p *BlobPool) Locals() []common.Address { + return []common.Address{} +} + +// Status returns the known status (unknown/pending/queued) of a transaction +// identified by their hashes. +func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus { + if p.Has(hash) { + return txpool.TxStatusPending + } + return txpool.TxStatusUnknown +} diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go new file mode 100644 index 000000000..8914301e1 --- /dev/null +++ b/core/txpool/blobpool/blobpool_test.go @@ -0,0 +1,1251 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +import ( + "bytes" + "crypto/ecdsa" + "crypto/sha256" + "errors" + "math" + "math/big" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/holiman/billy" + "github.com/holiman/uint256" +) + +var ( + emptyBlob = kzg4844.Blob{} + emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob) + emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit) + emptyBlobVHash = blobHash(emptyBlobCommit) +) + +func blobHash(commit kzg4844.Commitment) common.Hash { + hasher := sha256.New() + hasher.Write(commit[:]) + hash := hasher.Sum(nil) + + var vhash common.Hash + vhash[0] = params.BlobTxHashVersion + copy(vhash[1:], hash[1:]) + + return vhash +} + +// Chain configuration with Cancun enabled. +// +// TODO(karalabe): replace with params.MainnetChainConfig after Cancun. +var testChainConfig *params.ChainConfig + +func init() { + testChainConfig = new(params.ChainConfig) + *testChainConfig = *params.MainnetChainConfig + + testChainConfig.CancunTime = new(uint64) + *testChainConfig.CancunTime = uint64(time.Now().Unix()) +} + +// testBlockChain is a mock of the live chain for testing the pool. +type testBlockChain struct { + config *params.ChainConfig + basefee *uint256.Int + blobfee *uint256.Int + statedb *state.StateDB +} + +func (bc *testBlockChain) Config() *params.ChainConfig { + return bc.config +} + +func (bc *testBlockChain) CurrentBlock() *types.Header { + // Yolo, life is too short to invert mist.CalcBaseFee and misc.CalcBlobFee, + // just binary search it them. + + // The base fee at 5714 ETH translates into the 21000 base gas higher than + // mainnet ether existence, use that as a cap for the tests. + var ( + blockNumber = new(big.Int).Add(bc.config.LondonBlock, big.NewInt(1)) + blockTime = *bc.config.CancunTime + 1 + gasLimit = uint64(30_000_000) + ) + lo := new(big.Int) + hi := new(big.Int).Mul(big.NewInt(5714), new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)) + + for new(big.Int).Add(lo, big.NewInt(1)).Cmp(hi) != 0 { + mid := new(big.Int).Add(lo, hi) + mid.Div(mid, big.NewInt(2)) + + if eip1559.CalcBaseFee(bc.config, &types.Header{ + Number: blockNumber, + GasLimit: gasLimit, + GasUsed: 0, + BaseFee: mid, + }).Cmp(bc.basefee.ToBig()) > 0 { + hi = mid + } else { + lo = mid + } + } + baseFee := lo + + // The excess blob gas at 2^27 translates into a blob fee higher than mainnet + // ether existence, use that as a cap for the tests. + lo = new(big.Int) + hi = new(big.Int).Exp(big.NewInt(2), big.NewInt(27), nil) + + for new(big.Int).Add(lo, big.NewInt(1)).Cmp(hi) != 0 { + mid := new(big.Int).Add(lo, hi) + mid.Div(mid, big.NewInt(2)) + + if eip4844.CalcBlobFee(mid.Uint64()).Cmp(bc.blobfee.ToBig()) > 0 { + hi = mid + } else { + lo = mid + } + } + excessBlobGas := lo.Uint64() + + return &types.Header{ + Number: blockNumber, + Time: blockTime, + GasLimit: gasLimit, + BaseFee: baseFee, + ExcessBlobGas: &excessBlobGas, + } +} + +func (bc *testBlockChain) CurrentFinalBlock() *types.Header { + return &types.Header{ + Number: big.NewInt(0), + } +} + +func (bt *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { + return nil +} + +func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { + return bc.statedb, nil +} + +// makeAddressReserver is a utility method to sanity check that accounts are +// properly reserved by the blobpool (no duplicate reserves or unreserves). +func makeAddressReserver() txpool.AddressReserver { + var ( + reserved = make(map[common.Address]struct{}) + lock sync.Mutex + ) + return func(addr common.Address, reserve bool) error { + lock.Lock() + defer lock.Unlock() + + _, exists := reserved[addr] + if reserve { + if exists { + panic("already reserved") + } + reserved[addr] = struct{}{} + return nil + } + if !exists { + panic("not reserved") + } + delete(reserved, addr) + return nil + } +} + +// makeTx is a utility method to construct a random blob transaction and sign it +// with a valid key, only setting the interesting fields from the perspective of +// the blob pool. +func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, key *ecdsa.PrivateKey) *types.Transaction { + blobtx := makeUnsignedTx(nonce, gasTipCap, gasFeeCap, blobFeeCap) + return types.MustSignNewTx(key, types.LatestSigner(testChainConfig), blobtx) +} + +// makeUnsignedTx is a utility method to construct a random blob tranasaction +// without signing it. +func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx { + return &types.BlobTx{ + ChainID: uint256.MustFromBig(testChainConfig.ChainID), + Nonce: nonce, + GasTipCap: uint256.NewInt(gasTipCap), + GasFeeCap: uint256.NewInt(gasFeeCap), + Gas: 21000, + BlobFeeCap: uint256.NewInt(blobFeeCap), + BlobHashes: []common.Hash{emptyBlobVHash}, + Value: uint256.NewInt(100), + Sidecar: &types.BlobTxSidecar{ + Blobs: []kzg4844.Blob{emptyBlob}, + Commitments: []kzg4844.Commitment{emptyBlobCommit}, + Proofs: []kzg4844.Proof{emptyBlobProof}, + }, + } +} + +// verifyPoolInternals iterates over all the transactions in the pool and checks +// that sort orders, calculated fields, cumulated fields are correct. +func verifyPoolInternals(t *testing.T, pool *BlobPool) { + // Mark this method as a helper to remove from stack traces + t.Helper() + + // Verify that all items in the index are present in the lookup and nothing more + seen := make(map[common.Hash]struct{}) + for addr, txs := range pool.index { + for _, tx := range txs { + if _, ok := seen[tx.hash]; ok { + t.Errorf("duplicate hash #%x in transaction index: address %s, nonce %d", tx.hash, addr, tx.nonce) + } + seen[tx.hash] = struct{}{} + } + } + for hash, id := range pool.lookup { + if _, ok := seen[hash]; !ok { + t.Errorf("lookup entry missing from transaction index: hash #%x, id %d", hash, id) + } + delete(seen, hash) + } + for hash := range seen { + t.Errorf("indexed transaction hash #%x missing from lookup table", hash) + } + // Verify that transactions are sorted per account and contain no nonce gaps + for addr, txs := range pool.index { + for i := 1; i < len(txs); i++ { + if txs[i].nonce != txs[i-1].nonce+1 { + t.Errorf("addr %v, tx %d nonce mismatch: have %d, want %d", addr, i, txs[i].nonce, txs[i-1].nonce+1) + } + } + } + // Verify that calculated evacuation thresholds are correct + for addr, txs := range pool.index { + if !txs[0].evictionExecTip.Eq(txs[0].execTipCap) { + t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, 0, txs[0].evictionExecTip, txs[0].execTipCap) + } + if math.Abs(txs[0].evictionExecFeeJumps-txs[0].basefeeJumps) > 0.001 { + t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, 0, txs[0].evictionExecFeeJumps, txs[0].basefeeJumps) + } + if math.Abs(txs[0].evictionBlobFeeJumps-txs[0].blobfeeJumps) > 0.001 { + t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, 0, txs[0].evictionBlobFeeJumps, txs[0].blobfeeJumps) + } + for i := 1; i < len(txs); i++ { + wantExecTip := txs[i-1].evictionExecTip + if wantExecTip.Gt(txs[i].execTipCap) { + wantExecTip = txs[i].execTipCap + } + if !txs[i].evictionExecTip.Eq(wantExecTip) { + t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, i, txs[i].evictionExecTip, wantExecTip) + } + + wantExecFeeJumps := txs[i-1].evictionExecFeeJumps + if wantExecFeeJumps > txs[i].basefeeJumps { + wantExecFeeJumps = txs[i].basefeeJumps + } + if math.Abs(txs[i].evictionExecFeeJumps-wantExecFeeJumps) > 0.001 { + t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionExecFeeJumps, wantExecFeeJumps) + } + + wantBlobFeeJumps := txs[i-1].evictionBlobFeeJumps + if wantBlobFeeJumps > txs[i].blobfeeJumps { + wantBlobFeeJumps = txs[i].blobfeeJumps + } + if math.Abs(txs[i].evictionBlobFeeJumps-wantBlobFeeJumps) > 0.001 { + t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionBlobFeeJumps, wantBlobFeeJumps) + } + } + } + // Verify that account balance accumulations are correct + for addr, txs := range pool.index { + spent := new(uint256.Int) + for _, tx := range txs { + spent.Add(spent, tx.costCap) + } + if !pool.spent[addr].Eq(spent) { + t.Errorf("addr %v expenditure mismatch: have %d, want %d", addr, pool.spent[addr], spent) + } + } + // Verify that pool storage size is correct + var stored uint64 + for _, txs := range pool.index { + for _, tx := range txs { + stored += uint64(tx.size) + } + } + if pool.stored != stored { + t.Errorf("pool storage mismatch: have %d, want %d", pool.stored, stored) + } + // Verify the price heap internals + verifyHeapInternals(t, pool.evict) +} + +// Tests that transactions can be loaded from disk on startup and that they are +// correctly discarded if invalid. +// +// - 1. A transaction that cannot be decoded must be dropped +// - 2. A transaction that cannot be recovered (bad signature) must be dropped +// - 3. All transactions after a nonce gap must be dropped +// - 4. All transactions after an underpriced one (including it) must be dropped +func TestOpenDrops(t *testing.T) { + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + + // Create a temporary folder for the persistent backend + storage, _ := os.MkdirTemp("", "blobpool-") + defer os.RemoveAll(storage) + + os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) + + // Insert a malformed transaction to verify that decoding errors (or format + // changes) are handled gracefully (case 1) + malformed, _ := store.Put([]byte("this is a badly encoded transaction")) + + // Insert a transaction with a bad signature to verify that stale junk after + // potential hard-forks can get evicted (case 2) + tx := types.NewTx(&types.BlobTx{ + ChainID: uint256.MustFromBig(testChainConfig.ChainID), + GasTipCap: new(uint256.Int), + GasFeeCap: new(uint256.Int), + Gas: 0, + Value: new(uint256.Int), + Data: nil, + BlobFeeCap: new(uint256.Int), + V: new(uint256.Int), + R: new(uint256.Int), + S: new(uint256.Int), + }) + blob, _ := rlp.EncodeToBytes(tx) + badsig, _ := store.Put(blob) + + // Insert a sequence of transactions with a nonce gap in between to verify + // that anything gapped will get evicted (case 3) + var ( + gapper, _ = crypto.GenerateKey() + + valids = make(map[uint64]struct{}) + gapped = make(map[uint64]struct{}) + ) + for _, nonce := range []uint64{0, 1, 3, 4, 6, 7} { // first gap at #2, another at #5 + tx := makeTx(nonce, 1, 1, 1, gapper) + blob, _ := rlp.EncodeToBytes(tx) + + id, _ := store.Put(blob) + if nonce < 2 { + valids[id] = struct{}{} + } else { + gapped[id] = struct{}{} + } + } + // Insert a sequence of transactions with a gapped starting nonce to verify + // that the entire set will get dropped. + var ( + dangler, _ = crypto.GenerateKey() + dangling = make(map[uint64]struct{}) + ) + for _, nonce := range []uint64{1, 2, 3} { // first gap at #0, all set dangling + tx := makeTx(nonce, 1, 1, 1, dangler) + blob, _ := rlp.EncodeToBytes(tx) + + id, _ := store.Put(blob) + dangling[id] = struct{}{} + } + // Insert a sequence of transactions with already passed nonces to veirfy + // that the entire set will get dropped. + var ( + filler, _ = crypto.GenerateKey() + filled = make(map[uint64]struct{}) + ) + for _, nonce := range []uint64{0, 1, 2} { // account nonce at 3, all set filled + tx := makeTx(nonce, 1, 1, 1, filler) + blob, _ := rlp.EncodeToBytes(tx) + + id, _ := store.Put(blob) + filled[id] = struct{}{} + } + // Insert a sequence of transactions with partially passed nonces to veirfy + // that the included part of the set will get dropped + var ( + overlapper, _ = crypto.GenerateKey() + overlapped = make(map[uint64]struct{}) + ) + for _, nonce := range []uint64{0, 1, 2, 3} { // account nonce at 2, half filled + tx := makeTx(nonce, 1, 1, 1, overlapper) + blob, _ := rlp.EncodeToBytes(tx) + + id, _ := store.Put(blob) + if nonce >= 2 { + valids[id] = struct{}{} + } else { + overlapped[id] = struct{}{} + } + } + // Insert a sequence of transactions with an underpriced first to verify that + // the entire set will get dropped (case 4). + var ( + underpayer, _ = crypto.GenerateKey() + underpaid = make(map[uint64]struct{}) + ) + for i := 0; i < 5; i++ { // make #0 underpriced + var tx *types.Transaction + if i == 0 { + tx = makeTx(uint64(i), 0, 0, 0, underpayer) + } else { + tx = makeTx(uint64(i), 1, 1, 1, underpayer) + } + blob, _ := rlp.EncodeToBytes(tx) + + id, _ := store.Put(blob) + underpaid[id] = struct{}{} + } + + // Insert a sequence of transactions with an underpriced in between to verify + // that it and anything newly gapped will get evicted (case 4). + var ( + outpricer, _ = crypto.GenerateKey() + outpriced = make(map[uint64]struct{}) + ) + for i := 0; i < 5; i++ { // make #2 underpriced + var tx *types.Transaction + if i == 2 { + tx = makeTx(uint64(i), 0, 0, 0, outpricer) + } else { + tx = makeTx(uint64(i), 1, 1, 1, outpricer) + } + blob, _ := rlp.EncodeToBytes(tx) + + id, _ := store.Put(blob) + if i < 2 { + valids[id] = struct{}{} + } else { + outpriced[id] = struct{}{} + } + } + // Insert a sequence of transactions fully overdrafted to verify that the + // entire set will get invalidated. + var ( + exceeder, _ = crypto.GenerateKey() + exceeded = make(map[uint64]struct{}) + ) + for _, nonce := range []uint64{0, 1, 2} { // nonce 0 overdrafts the account + var tx *types.Transaction + if nonce == 0 { + tx = makeTx(nonce, 1, 100, 1, exceeder) + } else { + tx = makeTx(nonce, 1, 1, 1, exceeder) + } + blob, _ := rlp.EncodeToBytes(tx) + + id, _ := store.Put(blob) + exceeded[id] = struct{}{} + } + // Insert a sequence of transactions partially overdrafted to verify that part + // of the set will get invalidated. + var ( + overdrafter, _ = crypto.GenerateKey() + overdrafted = make(map[uint64]struct{}) + ) + for _, nonce := range []uint64{0, 1, 2} { // nonce 1 overdrafts the account + var tx *types.Transaction + if nonce == 1 { + tx = makeTx(nonce, 1, 100, 1, overdrafter) + } else { + tx = makeTx(nonce, 1, 1, 1, overdrafter) + } + blob, _ := rlp.EncodeToBytes(tx) + + id, _ := store.Put(blob) + if nonce < 1 { + valids[id] = struct{}{} + } else { + overdrafted[id] = struct{}{} + } + } + // Insert a sequence of transactions overflowing the account cap to verify + // that part of the set will get invalidated. + var ( + overcapper, _ = crypto.GenerateKey() + overcapped = make(map[uint64]struct{}) + ) + for nonce := uint64(0); nonce < maxTxsPerAccount+3; nonce++ { + blob, _ := rlp.EncodeToBytes(makeTx(nonce, 1, 1, 1, overcapper)) + + id, _ := store.Put(blob) + if nonce < maxTxsPerAccount { + valids[id] = struct{}{} + } else { + overcapped[id] = struct{}{} + } + } + store.Close() + + // Create a blob pool out of the pre-seeded data + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil) + statedb.AddBalance(crypto.PubkeyToAddress(gapper.PublicKey), big.NewInt(1000000)) + statedb.AddBalance(crypto.PubkeyToAddress(dangler.PublicKey), big.NewInt(1000000)) + statedb.AddBalance(crypto.PubkeyToAddress(filler.PublicKey), big.NewInt(1000000)) + statedb.SetNonce(crypto.PubkeyToAddress(filler.PublicKey), 3) + statedb.AddBalance(crypto.PubkeyToAddress(overlapper.PublicKey), big.NewInt(1000000)) + statedb.SetNonce(crypto.PubkeyToAddress(overlapper.PublicKey), 2) + statedb.AddBalance(crypto.PubkeyToAddress(underpayer.PublicKey), big.NewInt(1000000)) + statedb.AddBalance(crypto.PubkeyToAddress(outpricer.PublicKey), big.NewInt(1000000)) + statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), big.NewInt(1000000)) + statedb.AddBalance(crypto.PubkeyToAddress(overdrafter.PublicKey), big.NewInt(1000000)) + statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), big.NewInt(10000000)) + statedb.Commit(0, true) + + chain := &testBlockChain{ + config: testChainConfig, + basefee: uint256.NewInt(params.InitialBaseFee), + blobfee: uint256.NewInt(params.BlobTxMinBlobGasprice), + statedb: statedb, + } + pool := New(Config{Datadir: storage}, chain) + if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { + t.Fatalf("failed to create blob pool: %v", err) + } + defer pool.Close() + + // Verify that the malformed (case 1), badly signed (case 2) and gapped (case + // 3) txs have been deleted from the pool + alive := make(map[uint64]struct{}) + for _, txs := range pool.index { + for _, tx := range txs { + switch tx.id { + case malformed: + t.Errorf("malformed RLP transaction remained in storage") + case badsig: + t.Errorf("invalidly signed transaction remained in storage") + default: + if _, ok := dangling[tx.id]; ok { + t.Errorf("dangling transaction remained in storage: %d", tx.id) + } else if _, ok := filled[tx.id]; ok { + t.Errorf("filled transaction remained in storage: %d", tx.id) + } else if _, ok := overlapped[tx.id]; ok { + t.Errorf("overlapped transaction remained in storage: %d", tx.id) + } else if _, ok := gapped[tx.id]; ok { + t.Errorf("gapped transaction remained in storage: %d", tx.id) + } else if _, ok := underpaid[tx.id]; ok { + t.Errorf("underpaid transaction remained in storage: %d", tx.id) + } else if _, ok := outpriced[tx.id]; ok { + t.Errorf("outpriced transaction remained in storage: %d", tx.id) + } else if _, ok := exceeded[tx.id]; ok { + t.Errorf("fully overdrafted transaction remained in storage: %d", tx.id) + } else if _, ok := overdrafted[tx.id]; ok { + t.Errorf("partially overdrafted transaction remained in storage: %d", tx.id) + } else if _, ok := overcapped[tx.id]; ok { + t.Errorf("overcapped transaction remained in storage: %d", tx.id) + } else { + alive[tx.id] = struct{}{} + } + } + } + } + // Verify that the rest of the transactions remained alive + if len(alive) != len(valids) { + t.Errorf("valid transaction count mismatch: have %d, want %d", len(alive), len(valids)) + } + for id := range alive { + if _, ok := valids[id]; !ok { + t.Errorf("extra transaction %d", id) + } + } + for id := range valids { + if _, ok := alive[id]; !ok { + t.Errorf("missing transaction %d", id) + } + } + // Verify all the calculated pool internals. Interestingly, this is **not** + // a duplication of the above checks, this actually validates the verifier + // using the above already hard coded checks. + // + // Do not remove this, nor alter the above to be generic. + verifyPoolInternals(t, pool) +} + +// Tests that transactions loaded from disk are indexed corrently. +// +// - 1. Transactions must be groupped by sender, sorted by nonce +// - 2. Eviction thresholds are calculated correctly for the sequences +// - 3. Balance usage of an account is totals across all transactions +func TestOpenIndex(t *testing.T) { + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + + // Create a temporary folder for the persistent backend + storage, _ := os.MkdirTemp("", "blobpool-") + defer os.RemoveAll(storage) + + os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) + + // Insert a sequence of transactions with varying price points to check that + // the cumulative minimumw will be maintained. + var ( + key, _ = crypto.GenerateKey() + addr = crypto.PubkeyToAddress(key.PublicKey) + + txExecTipCaps = []uint64{10, 25, 5, 7, 1, 100} + txExecFeeCaps = []uint64{100, 90, 200, 10, 80, 300} + txBlobFeeCaps = []uint64{55, 66, 77, 33, 22, 11} + + //basefeeJumps = []float64{39.098, 38.204, 44.983, 19.549, 37.204, 48.426} // log 1.125 (exec fee cap) + //blobfeeJumps = []float64{34.023, 35.570, 36.879, 29.686, 26.243, 20.358} // log 1.125 (blob fee cap) + + evictExecTipCaps = []uint64{10, 10, 5, 5, 1, 1} + evictExecFeeJumps = []float64{39.098, 38.204, 38.204, 19.549, 19.549, 19.549} // min(log 1.125 (exec fee cap)) + evictBlobFeeJumps = []float64{34.023, 34.023, 34.023, 29.686, 26.243, 20.358} // min(log 1.125 (blob fee cap)) + + totalSpent = uint256.NewInt(21000*(100+90+200+10+80+300) + blobSize*(55+66+77+33+22+11) + 100*6) // 21000 gas x price + 128KB x blobprice + value + ) + for _, i := range []int{5, 3, 4, 2, 0, 1} { // Randomize the tx insertion order to force sorting on load + tx := makeTx(uint64(i), txExecTipCaps[i], txExecFeeCaps[i], txBlobFeeCaps[i], key) + blob, _ := rlp.EncodeToBytes(tx) + store.Put(blob) + } + store.Close() + + // Create a blob pool out of the pre-seeded data + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil) + statedb.AddBalance(addr, big.NewInt(1_000_000_000)) + statedb.Commit(0, true) + + chain := &testBlockChain{ + config: testChainConfig, + basefee: uint256.NewInt(params.InitialBaseFee), + blobfee: uint256.NewInt(params.BlobTxMinBlobGasprice), + statedb: statedb, + } + pool := New(Config{Datadir: storage}, chain) + if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { + t.Fatalf("failed to create blob pool: %v", err) + } + defer pool.Close() + + // Verify that the transactions have been sorted by nonce (case 1) + for i := 0; i < len(pool.index[addr]); i++ { + if pool.index[addr][i].nonce != uint64(i) { + t.Errorf("tx %d nonce mismatch: have %d, want %d", i, pool.index[addr][i].nonce, uint64(i)) + } + } + // Verify that the cumulative fee minimums have been correctly calculated (case 2) + for i, cap := range evictExecTipCaps { + if !pool.index[addr][i].evictionExecTip.Eq(uint256.NewInt(cap)) { + t.Errorf("eviction tip cap %d mismatch: have %d, want %d", i, pool.index[addr][i].evictionExecTip, cap) + } + } + for i, jumps := range evictExecFeeJumps { + if math.Abs(pool.index[addr][i].evictionExecFeeJumps-jumps) > 0.001 { + t.Errorf("eviction fee cap jumps %d mismatch: have %f, want %f", i, pool.index[addr][i].evictionExecFeeJumps, jumps) + } + } + for i, jumps := range evictBlobFeeJumps { + if math.Abs(pool.index[addr][i].evictionBlobFeeJumps-jumps) > 0.001 { + t.Errorf("eviction blob fee cap jumps %d mismatch: have %f, want %f", i, pool.index[addr][i].evictionBlobFeeJumps, jumps) + } + } + // Verify that the balance usage has been correctly calculated (case 3) + if !pool.spent[addr].Eq(totalSpent) { + t.Errorf("expenditure mismatch: have %d, want %d", pool.spent[addr], totalSpent) + } + // Verify all the calculated pool internals. Interestingly, this is **not** + // a duplication of the above checks, this actually validates the verifier + // using the above already hard coded checks. + // + // Do not remove this, nor alter the above to be generic. + verifyPoolInternals(t, pool) +} + +// Tests that after indexing all the loaded transactions from disk, a price heap +// is correctly constructed based on the head basefee and blobfee. +func TestOpenHeap(t *testing.T) { + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + + // Create a temporary folder for the persistent backend + storage, _ := os.MkdirTemp("", "blobpool-") + defer os.RemoveAll(storage) + + os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) + + // Insert a few transactions from a few accounts. To remove randomness from + // the heap initialization, use a deterministic account/tx/priority ordering. + var ( + key1, _ = crypto.GenerateKey() + key2, _ = crypto.GenerateKey() + key3, _ = crypto.GenerateKey() + + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + addr3 = crypto.PubkeyToAddress(key3.PublicKey) + ) + if bytes.Compare(addr1[:], addr2[:]) > 0 { + key1, addr1, key2, addr2 = key2, addr2, key1, addr1 + } + if bytes.Compare(addr1[:], addr3[:]) > 0 { + key1, addr1, key3, addr3 = key3, addr3, key1, addr1 + } + if bytes.Compare(addr2[:], addr3[:]) > 0 { + key2, addr2, key3, addr3 = key3, addr3, key2, addr2 + } + var ( + tx1 = makeTx(0, 1, 1000, 90, key1) + tx2 = makeTx(0, 1, 800, 70, key2) + tx3 = makeTx(0, 1, 1500, 110, key3) + + blob1, _ = rlp.EncodeToBytes(tx1) + blob2, _ = rlp.EncodeToBytes(tx2) + blob3, _ = rlp.EncodeToBytes(tx3) + + heapOrder = []common.Address{addr2, addr1, addr3} + heapIndex = map[common.Address]int{addr2: 0, addr1: 1, addr3: 2} + ) + store.Put(blob1) + store.Put(blob2) + store.Put(blob3) + store.Close() + + // Create a blob pool out of the pre-seeded data + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil) + statedb.AddBalance(addr1, big.NewInt(1_000_000_000)) + statedb.AddBalance(addr2, big.NewInt(1_000_000_000)) + statedb.AddBalance(addr3, big.NewInt(1_000_000_000)) + statedb.Commit(0, true) + + chain := &testBlockChain{ + config: testChainConfig, + basefee: uint256.NewInt(1050), + blobfee: uint256.NewInt(105), + statedb: statedb, + } + pool := New(Config{Datadir: storage}, chain) + if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { + t.Fatalf("failed to create blob pool: %v", err) + } + defer pool.Close() + + // Verify that the heap's internal state matches the expectations + for i, addr := range pool.evict.addrs { + if addr != heapOrder[i] { + t.Errorf("slot %d mismatch: have %v, want %v", i, addr, heapOrder[i]) + } + } + for addr, i := range pool.evict.index { + if i != heapIndex[addr] { + t.Errorf("index for %v mismatch: have %d, want %d", addr, i, heapIndex[addr]) + } + } + // Verify all the calculated pool internals. Interestingly, this is **not** + // a duplication of the above checks, this actually validates the verifier + // using the above already hard coded checks. + // + // Do not remove this, nor alter the above to be generic. + verifyPoolInternals(t, pool) +} + +// Tests that after the pool's previous state is loaded back, any transactions +// over the new storage cap will get dropped. +func TestOpenCap(t *testing.T) { + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + + // Create a temporary folder for the persistent backend + storage, _ := os.MkdirTemp("", "blobpool-") + defer os.RemoveAll(storage) + + os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) + + // Insert a few transactions from a few accounts + var ( + key1, _ = crypto.GenerateKey() + key2, _ = crypto.GenerateKey() + key3, _ = crypto.GenerateKey() + + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + addr3 = crypto.PubkeyToAddress(key3.PublicKey) + + tx1 = makeTx(0, 1, 1000, 100, key1) + tx2 = makeTx(0, 1, 800, 70, key2) + tx3 = makeTx(0, 1, 1500, 110, key3) + + blob1, _ = rlp.EncodeToBytes(tx1) + blob2, _ = rlp.EncodeToBytes(tx2) + blob3, _ = rlp.EncodeToBytes(tx3) + + keep = []common.Address{addr1, addr3} + drop = []common.Address{addr2} + size = uint64(2 * (txAvgSize + blobSize)) + ) + store.Put(blob1) + store.Put(blob2) + store.Put(blob3) + store.Close() + + // Verify pool capping twice: first by reducing the data cap, then restarting + // with a high cap to ensure everything was persisted previously + for _, datacap := range []uint64{2 * (txAvgSize + blobSize), 100 * (txAvgSize + blobSize)} { + // Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil) + statedb.AddBalance(addr1, big.NewInt(1_000_000_000)) + statedb.AddBalance(addr2, big.NewInt(1_000_000_000)) + statedb.AddBalance(addr3, big.NewInt(1_000_000_000)) + statedb.Commit(0, true) + + chain := &testBlockChain{ + config: testChainConfig, + basefee: uint256.NewInt(1050), + blobfee: uint256.NewInt(105), + statedb: statedb, + } + pool := New(Config{Datadir: storage, Datacap: datacap}, chain) + if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { + t.Fatalf("failed to create blob pool: %v", err) + } + // Verify that enough transactions have been dropped to get the pool's size + // under the requested limit + if len(pool.index) != len(keep) { + t.Errorf("tracked account count mismatch: have %d, want %d", len(pool.index), len(keep)) + } + for _, addr := range keep { + if _, ok := pool.index[addr]; !ok { + t.Errorf("expected account %v missing from pool", addr) + } + } + for _, addr := range drop { + if _, ok := pool.index[addr]; ok { + t.Errorf("unexpected account %v present in pool", addr) + } + } + if pool.stored != size { + t.Errorf("pool stored size mismatch: have %v, want %v", pool.stored, size) + } + // Verify all the calculated pool internals. Interestingly, this is **not** + // a duplication of the above checks, this actually validates the verifier + // using the above already hard coded checks. + // + // Do not remove this, nor alter the above to be generic. + verifyPoolInternals(t, pool) + + pool.Close() + } +} + +// Tests that adding transaction will correctly store it in the persistent store +// and update all the indices. +// +// Note, this tests mostly checks the pool transaction shuffling logic or things +// specific to the blob pool. It does not do an exhaustive transaction validity +// check. +func TestAdd(t *testing.T) { + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + + // seed is a helper tumpe to seed an initial state db and pool + type seed struct { + balance uint64 + nonce uint64 + txs []*types.BlobTx + } + + // addtx is a helper sender/tx tuple to represent a new tx addition + type addtx struct { + from string + tx *types.BlobTx + err error + } + + tests := []struct { + seeds map[string]seed + adds []addtx + }{ + // Transactions from new accounts should be accepted if their initial + // nonce matches the expected one from the statedb. Higher or lower must + // be rejected. + { + seeds: map[string]seed{ + "alice": {balance: 21100 + blobSize}, + "bob": {balance: 21100 + blobSize, nonce: 1}, + "claire": {balance: 21100 + blobSize}, + "dave": {balance: 21100 + blobSize, nonce: 1}, + }, + adds: []addtx{ + { // New account, no previous txs: accept nonce 0 + from: "alice", + tx: makeUnsignedTx(0, 1, 1, 1), + err: nil, + }, + { // Old account, 1 tx in chain, 0 pending: accept nonce 1 + from: "bob", + tx: makeUnsignedTx(1, 1, 1, 1), + err: nil, + }, + { // New account, no previous txs: reject nonce 1 + from: "claire", + tx: makeUnsignedTx(1, 1, 1, 1), + err: core.ErrNonceTooHigh, + }, + { // Old account, 1 tx in chain, 0 pending: reject nonce 0 + from: "dave", + tx: makeUnsignedTx(0, 1, 1, 1), + err: core.ErrNonceTooLow, + }, + { // Old account, 1 tx in chain, 0 pending: reject nonce 2 + from: "dave", + tx: makeUnsignedTx(2, 1, 1, 1), + err: core.ErrNonceTooHigh, + }, + }, + }, + // Transactions from already pooled accounts should only be accepted if + // the nonces are contiguous (ignore prices for now, will check later) + { + seeds: map[string]seed{ + "alice": { + balance: 1000000, + txs: []*types.BlobTx{ + makeUnsignedTx(0, 1, 1, 1), + }, + }, + "bob": { + balance: 1000000, + nonce: 1, + txs: []*types.BlobTx{ + makeUnsignedTx(1, 1, 1, 1), + }, + }, + }, + adds: []addtx{ + { // New account, 1 tx pending: reject replacement nonce 0 (ignore price for now) + from: "alice", + tx: makeUnsignedTx(0, 1, 1, 1), + err: txpool.ErrReplaceUnderpriced, + }, + { // New account, 1 tx pending: accept nonce 1 + from: "alice", + tx: makeUnsignedTx(1, 1, 1, 1), + err: nil, + }, + { // New account, 2 txs pending: reject nonce 3 + from: "alice", + tx: makeUnsignedTx(3, 1, 1, 1), + err: core.ErrNonceTooHigh, + }, + { // New account, 2 txs pending: accept nonce 2 + from: "alice", + tx: makeUnsignedTx(2, 1, 1, 1), + err: nil, + }, + { // New account, 3 txs pending: accept nonce 3 now + from: "alice", + tx: makeUnsignedTx(3, 1, 1, 1), + err: nil, + }, + { // Old account, 1 tx in chain, 1 tx pending: reject replacement nonce 1 (ignore price for now) + from: "bob", + tx: makeUnsignedTx(1, 1, 1, 1), + err: txpool.ErrReplaceUnderpriced, + }, + { // Old account, 1 tx in chain, 1 tx pending: accept nonce 2 (ignore price for now) + from: "bob", + tx: makeUnsignedTx(2, 1, 1, 1), + err: nil, + }, + }, + }, + // Transactions should only be accepted into the pool if the cumulative + // expenditure doesn't overflow the account balance + { + seeds: map[string]seed{ + "alice": {balance: 63299 + 3*blobSize}, // 3 tx - 1 wei + }, + adds: []addtx{ + { // New account, no previous txs: accept nonce 0 with 21100 wei spend + from: "alice", + tx: makeUnsignedTx(0, 1, 1, 1), + err: nil, + }, + { // New account, 1 pooled tx with 21100 wei spent: accept nonce 1 with 21100 wei spend + from: "alice", + tx: makeUnsignedTx(1, 1, 1, 1), + err: nil, + }, + { // New account, 2 pooled tx with 42200 wei spent: reject nonce 2 with 21100 wei spend (1 wei overflow) + from: "alice", + tx: makeUnsignedTx(2, 1, 1, 1), + err: core.ErrInsufficientFunds, + }, + }, + }, + // Transactions should only be accepted into the pool if the total count + // from the same account doesn't overflow the pool limits + { + seeds: map[string]seed{ + "alice": {balance: 10000000}, + }, + adds: []addtx{ + { // New account, no previous txs, 16 slots left: accept nonce 0 + from: "alice", + tx: makeUnsignedTx(0, 1, 1, 1), + err: nil, + }, + { // New account, 1 pooled tx, 15 slots left: accept nonce 1 + from: "alice", + tx: makeUnsignedTx(1, 1, 1, 1), + err: nil, + }, + { // New account, 2 pooled tx, 14 slots left: accept nonce 2 + from: "alice", + tx: makeUnsignedTx(2, 1, 1, 1), + err: nil, + }, + { // New account, 3 pooled tx, 13 slots left: accept nonce 3 + from: "alice", + tx: makeUnsignedTx(3, 1, 1, 1), + err: nil, + }, + { // New account, 4 pooled tx, 12 slots left: accept nonce 4 + from: "alice", + tx: makeUnsignedTx(4, 1, 1, 1), + err: nil, + }, + { // New account, 5 pooled tx, 11 slots left: accept nonce 5 + from: "alice", + tx: makeUnsignedTx(5, 1, 1, 1), + err: nil, + }, + { // New account, 6 pooled tx, 10 slots left: accept nonce 6 + from: "alice", + tx: makeUnsignedTx(6, 1, 1, 1), + err: nil, + }, + { // New account, 7 pooled tx, 9 slots left: accept nonce 7 + from: "alice", + tx: makeUnsignedTx(7, 1, 1, 1), + err: nil, + }, + { // New account, 8 pooled tx, 8 slots left: accept nonce 8 + from: "alice", + tx: makeUnsignedTx(8, 1, 1, 1), + err: nil, + }, + { // New account, 9 pooled tx, 7 slots left: accept nonce 9 + from: "alice", + tx: makeUnsignedTx(9, 1, 1, 1), + err: nil, + }, + { // New account, 10 pooled tx, 6 slots left: accept nonce 10 + from: "alice", + tx: makeUnsignedTx(10, 1, 1, 1), + err: nil, + }, + { // New account, 11 pooled tx, 5 slots left: accept nonce 11 + from: "alice", + tx: makeUnsignedTx(11, 1, 1, 1), + err: nil, + }, + { // New account, 12 pooled tx, 4 slots left: accept nonce 12 + from: "alice", + tx: makeUnsignedTx(12, 1, 1, 1), + err: nil, + }, + { // New account, 13 pooled tx, 3 slots left: accept nonce 13 + from: "alice", + tx: makeUnsignedTx(13, 1, 1, 1), + err: nil, + }, + { // New account, 14 pooled tx, 2 slots left: accept nonce 14 + from: "alice", + tx: makeUnsignedTx(14, 1, 1, 1), + err: nil, + }, + { // New account, 15 pooled tx, 1 slots left: accept nonce 15 + from: "alice", + tx: makeUnsignedTx(15, 1, 1, 1), + err: nil, + }, + { // New account, 16 pooled tx, 0 slots left: accept nonce 15 replacement + from: "alice", + tx: makeUnsignedTx(15, 10, 10, 10), + err: nil, + }, + { // New account, 16 pooled tx, 0 slots left: reject nonce 16 with overcap + from: "alice", + tx: makeUnsignedTx(16, 1, 1, 1), + err: txpool.ErrAccountLimitExceeded, + }, + }, + }, + // Previously existing transactions should be allowed to be replaced iff + // the new cumulative expenditure can be covered by the account and the + // prices are bumped all around (no percentage check here). + { + seeds: map[string]seed{ + "alice": {balance: 2*100 + 5*21000 + 3*blobSize}, + }, + adds: []addtx{ + { // New account, no previous txs: reject nonce 0 with 341172 wei spend + from: "alice", + tx: makeUnsignedTx(0, 1, 20, 1), + err: core.ErrInsufficientFunds, + }, + { // New account, no previous txs: accept nonce 0 with 173172 wei spend + from: "alice", + tx: makeUnsignedTx(0, 1, 2, 1), + err: nil, + }, + { // New account, 1 pooled tx with 173172 wei spent: accept nonce 1 with 152172 wei spend + from: "alice", + tx: makeUnsignedTx(1, 1, 1, 1), + err: nil, + }, + { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with 599684 wei spend (173072 extra) (would overflow balance at nonce 1) + from: "alice", + tx: makeUnsignedTx(0, 2, 5, 2), + err: core.ErrInsufficientFunds, + }, + { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-gastip-bump + from: "alice", + tx: makeUnsignedTx(0, 1, 3, 2), + err: txpool.ErrReplaceUnderpriced, + }, + { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-gascap-bump + from: "alice", + tx: makeUnsignedTx(0, 2, 2, 2), + err: txpool.ErrReplaceUnderpriced, + }, + { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-blobcap-bump + from: "alice", + tx: makeUnsignedTx(0, 2, 4, 1), + err: txpool.ErrReplaceUnderpriced, + }, + { // New account, 2 pooled tx with 325344 wei spent: accept nonce 0 with 84100 wei spend (42000 extra) + from: "alice", + tx: makeUnsignedTx(0, 2, 4, 2), + err: nil, + }, + }, + }, + // Previously existing transactions should be allowed to be replaced iff + // the new prices are bumped by a sufficient amount. + { + seeds: map[string]seed{ + "alice": {balance: 100 + 8*21000 + 4*blobSize}, + }, + adds: []addtx{ + { // New account, no previous txs: accept nonce 0 + from: "alice", + tx: makeUnsignedTx(0, 2, 4, 2), + err: nil, + }, + { // New account, 1 pooled tx: reject nonce 0 with low-gastip-bump + from: "alice", + tx: makeUnsignedTx(0, 3, 8, 4), + err: txpool.ErrReplaceUnderpriced, + }, + { // New account, 1 pooled tx: reject nonce 0 with low-gascap-bump + from: "alice", + tx: makeUnsignedTx(0, 4, 6, 4), + err: txpool.ErrReplaceUnderpriced, + }, + { // New account, 1 pooled tx: reject nonce 0 with low-blobcap-bump + from: "alice", + tx: makeUnsignedTx(0, 4, 8, 3), + err: txpool.ErrReplaceUnderpriced, + }, + { // New account, 1 pooled tx: accept nonce 0 with all-bumps + from: "alice", + tx: makeUnsignedTx(0, 4, 8, 4), + err: nil, + }, + }, + }, + } + for i, tt := range tests { + // Create a temporary folder for the persistent backend + storage, _ := os.MkdirTemp("", "blobpool-") + defer os.RemoveAll(storage) // late defer, still ok + + os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) + store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) + + // Insert the seed transactions for the pool startup + var ( + keys = make(map[string]*ecdsa.PrivateKey) + addrs = make(map[string]common.Address) + ) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil) + for acc, seed := range tt.seeds { + // Generate a new random key/address for the seed account + keys[acc], _ = crypto.GenerateKey() + addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey) + + // Seed the state database with this acocunt + statedb.AddBalance(addrs[acc], new(big.Int).SetUint64(seed.balance)) + statedb.SetNonce(addrs[acc], seed.nonce) + + // Sign the seed transactions and store them in the data store + for _, tx := range seed.txs { + signed := types.MustSignNewTx(keys[acc], types.LatestSigner(testChainConfig), tx) + blob, _ := rlp.EncodeToBytes(signed) + store.Put(blob) + } + } + statedb.Commit(0, true) + store.Close() + + // Create a blob pool out of the pre-seeded dats + chain := &testBlockChain{ + config: testChainConfig, + basefee: uint256.NewInt(1050), + blobfee: uint256.NewInt(105), + statedb: statedb, + } + pool := New(Config{Datadir: storage}, chain) + if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { + t.Fatalf("test %d: failed to create blob pool: %v", i, err) + } + verifyPoolInternals(t, pool) + + // Add each transaction one by one, verifying the pool internals in between + for j, add := range tt.adds { + signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(testChainConfig), add.tx) + if err := pool.add(signed); !errors.Is(err, add.err) { + t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err) + } + verifyPoolInternals(t, pool) + } + // Verify the pool internals and close down the test + verifyPoolInternals(t, pool) + pool.Close() + } +} diff --git a/core/txpool/blobpool/config.go b/core/txpool/blobpool/config.go new file mode 100644 index 000000000..99a2002a3 --- /dev/null +++ b/core/txpool/blobpool/config.go @@ -0,0 +1,50 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +import ( + "github.com/ethereum/go-ethereum/log" +) + +// Config are the configuration parameters of the blob transaction pool. +type Config struct { + Datadir string // Data directory containing the currently executable blobs + Datacap uint64 // Soft-cap of database storage (hard cap is larger due to overhead) + PriceBump uint64 // Minimum price bump percentage to replace an already existing nonce +} + +// DefaultConfig contains the default configurations for the transaction pool. +var DefaultConfig = Config{ + Datadir: "blobpool", + Datacap: 10 * 1024 * 1024 * 1024, + PriceBump: 100, // either have patience or be aggressive, no mushy ground +} + +// sanitize checks the provided user configurations and changes anything that's +// unreasonable or unworkable. +func (config *Config) sanitize() Config { + conf := *config + if conf.Datacap < 1 { + log.Warn("Sanitizing invalid blobpool storage cap", "provided", conf.Datacap, "updated", DefaultConfig.Datacap) + conf.Datacap = DefaultConfig.Datacap + } + if conf.PriceBump < 1 { + log.Warn("Sanitizing invalid blobpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump) + conf.PriceBump = DefaultConfig.PriceBump + } + return conf +} diff --git a/core/txpool/blobpool/evictheap.go b/core/txpool/blobpool/evictheap.go new file mode 100644 index 000000000..7607a911c --- /dev/null +++ b/core/txpool/blobpool/evictheap.go @@ -0,0 +1,146 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +import ( + "bytes" + "container/heap" + "math" + "sort" + + "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" +) + +// evictHeap is a helper data structure to keep track of the cheapest bottleneck +// transaction from each account to determine which account to evict from. +// +// The heap internally tracks a slice of cheapest transactions from each account +// and a mapping from addresses to indices for direct removals/udates. +// +// The goal of the heap is to decide which account has the worst bottleneck to +// evict transactions from. +type evictHeap struct { + metas *map[common.Address][]*blobTxMeta // Pointer to the blob pool's index for price retrievals + + basefeeJumps float64 // Pre-calculated absolute dynamic fee jumps for the base fee + blobfeeJumps float64 // Pre-calculated absolute dynamic fee jumps for the blob fee + + addrs []common.Address // Heap of addresses to retrieve the cheapest out of + index map[common.Address]int // Indices into the heap for replacements +} + +// newPriceHeap creates a new heap of cheapets accounts in the blob pool to evict +// from in case of over saturation. +func newPriceHeap(basefee *uint256.Int, blobfee *uint256.Int, index *map[common.Address][]*blobTxMeta) *evictHeap { + heap := &evictHeap{ + metas: index, + index: make(map[common.Address]int), + } + // Populate the heap in account sort order. Not really needed in practice, + // but it makes the heap initialization deterministic and less annoying to + // test in unit tests. + addrs := make([]common.Address, 0, len(*index)) + for addr := range *index { + addrs = append(addrs, addr) + } + sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i][:], addrs[j][:]) < 0 }) + + for _, addr := range addrs { + heap.index[addr] = len(heap.addrs) + heap.addrs = append(heap.addrs, addr) + } + heap.reinit(basefee, blobfee, true) + return heap +} + +// reinit updates the pre-calculated dynamic fee jumps in the price heap and runs +// the sorting algorithm from scratch on the entire heap. +func (h *evictHeap) reinit(basefee *uint256.Int, blobfee *uint256.Int, force bool) { + // If the update is mostly the same as the old, don't sort pointlessly + basefeeJumps := dynamicFeeJumps(basefee) + blobfeeJumps := dynamicFeeJumps(blobfee) + + if !force && math.Abs(h.basefeeJumps-basefeeJumps) < 0.01 && math.Abs(h.blobfeeJumps-blobfeeJumps) < 0.01 { // TODO(karalabe): 0.01 enough, maybe should be smaller? Maybe this optimization is moot? + return + } + // One or both of the dynamic fees jumped, resort the pool + h.basefeeJumps = basefeeJumps + h.blobfeeJumps = blobfeeJumps + + heap.Init(h) +} + +// Len implements sort.Interface as part of heap.Interface, returning the number +// of accounts in the pool which can be considered for eviction. +func (h *evictHeap) Len() int { + return len(h.addrs) +} + +// Less implements sort.Interface as part of heap.Interface, returning which of +// the two requested accounts has a cheaper bottleneck. +func (h *evictHeap) Less(i, j int) bool { + txsI := (*(h.metas))[h.addrs[i]] + txsJ := (*(h.metas))[h.addrs[j]] + + lastI := txsI[len(txsI)-1] + lastJ := txsJ[len(txsJ)-1] + + prioI := evictionPriority(h.basefeeJumps, lastI.evictionExecFeeJumps, h.blobfeeJumps, lastI.evictionBlobFeeJumps) + if prioI > 0 { + prioI = 0 + } + prioJ := evictionPriority(h.basefeeJumps, lastJ.evictionExecFeeJumps, h.blobfeeJumps, lastJ.evictionBlobFeeJumps) + if prioJ > 0 { + prioJ = 0 + } + if prioI == prioJ { + return lastI.evictionExecTip.Lt(lastJ.evictionExecTip) + } + return prioI < prioJ +} + +// Swap implements sort.Interface as part of heap.Interface, maintaining both the +// order of the accounts according to the heap, and the account->item slot mapping +// for replacements. +func (h *evictHeap) Swap(i, j int) { + h.index[h.addrs[i]], h.index[h.addrs[j]] = h.index[h.addrs[j]], h.index[h.addrs[i]] + h.addrs[i], h.addrs[j] = h.addrs[j], h.addrs[i] +} + +// Push implements heap.Interface, appending an item to the end of the account +// ordering as well as the address to item slot mapping. +func (h *evictHeap) Push(x any) { + h.index[x.(common.Address)] = len(h.addrs) + h.addrs = append(h.addrs, x.(common.Address)) +} + +// Pop implements heap.Interface, removing and returning the last element of the +// heap. +// +// Note, use `heap.Pop`, not `evictHeap.Pop`. This method is used by Go's heap, +// to provide the functionality, it does not embed it. +func (h *evictHeap) Pop() any { + // Remove the last element from the heap + size := len(h.addrs) + addr := h.addrs[size-1] + h.addrs = h.addrs[:size-1] + + // Unindex the removed element and return + delete(h.index, addr) + return addr +} diff --git a/core/txpool/blobpool/evictheap_test.go b/core/txpool/blobpool/evictheap_test.go new file mode 100644 index 000000000..01b136551 --- /dev/null +++ b/core/txpool/blobpool/evictheap_test.go @@ -0,0 +1,320 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +import ( + "container/heap" + mrand "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" +) + +var rand = mrand.New(mrand.NewSource(1)) + +// verifyHeapInternals verifies that all accounts present in the index are also +// present in the heap and internals are consistent across various indices. +func verifyHeapInternals(t *testing.T, evict *evictHeap) { + t.Helper() + + // Ensure that all accounts are present in the heap and no extras + seen := make(map[common.Address]struct{}) + for i, addr := range evict.addrs { + seen[addr] = struct{}{} + if _, ok := (*evict.metas)[addr]; !ok { + t.Errorf("heap contains unexpected address at slot %d: %v", i, addr) + } + } + for addr := range *evict.metas { + if _, ok := seen[addr]; !ok { + t.Errorf("heap is missing required address %v", addr) + } + } + if len(evict.addrs) != len(*evict.metas) { + t.Errorf("heap size %d mismatches metadata size %d", len(evict.addrs), len(*evict.metas)) + } + // Ensure that all accounts are present in the heap order index and no extras + have := make([]common.Address, len(evict.index)) + for addr, i := range evict.index { + have[i] = addr + } + if len(have) != len(evict.addrs) { + t.Errorf("heap index size %d mismatches heap size %d", len(have), len(evict.addrs)) + } + for i := 0; i < len(have) && i < len(evict.addrs); i++ { + if have[i] != evict.addrs[i] { + t.Errorf("heap index for slot %d mismatches: have %v, want %v", i, have[i], evict.addrs[i]) + } + } +} + +// Tests that the price heap can correctly sort its set of transactions based on +// an input base- and blob fee. +func TestPriceHeapSorting(t *testing.T) { + tests := []struct { + execTips []uint64 + execFees []uint64 + blobFees []uint64 + + basefee uint64 + blobfee uint64 + + order []int + }{ + // If everything is above the basefee and blobfee, order by miner tip + { + execTips: []uint64{1, 0, 2}, + execFees: []uint64{1, 2, 3}, + blobFees: []uint64{3, 2, 1}, + basefee: 0, + blobfee: 0, + order: []int{1, 0, 2}, + }, + // If only basefees are used (blob fee matches with network), return the + // ones the furthest below the current basefee, splitting same ones with + // the tip. Anything above the basefee should be split by tip. + { + execTips: []uint64{100, 50, 100, 50, 1, 2, 3}, + execFees: []uint64{1000, 1000, 500, 500, 2000, 2000, 2000}, + blobFees: []uint64{0, 0, 0, 0, 0, 0, 0}, + basefee: 1999, + blobfee: 0, + order: []int{3, 2, 1, 0, 4, 5, 6}, + }, + // If only blobfees are used (base fee matches with network), return the + // ones the furthest below the current blobfee, splitting same ones with + // the tip. Anything above the blobfee should be split by tip. + { + execTips: []uint64{100, 50, 100, 50, 1, 2, 3}, + execFees: []uint64{0, 0, 0, 0, 0, 0, 0}, + blobFees: []uint64{1000, 1000, 500, 500, 2000, 2000, 2000}, + basefee: 0, + blobfee: 1999, + order: []int{3, 2, 1, 0, 4, 5, 6}, + }, + // If both basefee and blobfee is specified, sort by the larger distance + // of the two from the current network conditions, splitting same (loglog) + // ones via the tip. + // + // Basefee: 1000 + // Blobfee: 100 + // + // Tx #0: (800, 80) - 2 jumps below both => priority -1 + // Tx #1: (630, 63) - 4 jumps below both => priority -2 + // Tx #2: (800, 63) - 2 jumps below basefee, 4 jumps below blobfee => priority -2 (blob penalty dominates) + // Tx #3: (630, 80) - 4 jumps below basefee, 2 jumps below blobfee => priority -2 (base penalty dominates) + // + // Txs 1, 2, 3 share the same priority, split via tip, prefer 0 as the best + { + execTips: []uint64{1, 2, 3, 4}, + execFees: []uint64{800, 630, 800, 630}, + blobFees: []uint64{80, 63, 63, 80}, + basefee: 1000, + blobfee: 100, + order: []int{1, 2, 3, 0}, + }, + } + for i, tt := range tests { + // Create an index of the transactions + index := make(map[common.Address][]*blobTxMeta) + for j := byte(0); j < byte(len(tt.execTips)); j++ { + addr := common.Address{j} + + var ( + execTip = uint256.NewInt(tt.execTips[j]) + execFee = uint256.NewInt(tt.execFees[j]) + blobFee = uint256.NewInt(tt.blobFees[j]) + + basefeeJumps = dynamicFeeJumps(execFee) + blobfeeJumps = dynamicFeeJumps(blobFee) + ) + index[addr] = []*blobTxMeta{{ + id: uint64(j), + size: 128 * 1024, + nonce: 0, + execTipCap: execTip, + execFeeCap: execFee, + blobFeeCap: blobFee, + basefeeJumps: basefeeJumps, + blobfeeJumps: blobfeeJumps, + evictionExecTip: execTip, + evictionExecFeeJumps: basefeeJumps, + evictionBlobFeeJumps: blobfeeJumps, + }} + } + // Create a price heap and check the pop order + priceheap := newPriceHeap(uint256.NewInt(tt.basefee), uint256.NewInt(tt.blobfee), &index) + verifyHeapInternals(t, priceheap) + + for j := 0; j < len(tt.order); j++ { + if next := heap.Pop(priceheap); int(next.(common.Address)[0]) != tt.order[j] { + t.Errorf("test %d, item %d: order mismatch: have %d, want %d", i, j, next.(common.Address)[0], tt.order[j]) + } else { + delete(index, next.(common.Address)) // remove to simulate a correct pool for the test + } + verifyHeapInternals(t, priceheap) + } + } +} + +// Benchmarks reheaping the entire set of accounts in the blob pool. +func BenchmarkPriceHeapReinit1MB(b *testing.B) { benchmarkPriceHeapReinit(b, 1024*1024) } +func BenchmarkPriceHeapReinit10MB(b *testing.B) { benchmarkPriceHeapReinit(b, 10*1024*1024) } +func BenchmarkPriceHeapReinit100MB(b *testing.B) { benchmarkPriceHeapReinit(b, 100*1024*1024) } +func BenchmarkPriceHeapReinit1GB(b *testing.B) { benchmarkPriceHeapReinit(b, 1024*1024*1024) } +func BenchmarkPriceHeapReinit10GB(b *testing.B) { benchmarkPriceHeapReinit(b, 10*1024*1024*1024) } +func BenchmarkPriceHeapReinit25GB(b *testing.B) { benchmarkPriceHeapReinit(b, 25*1024*1024*1024) } +func BenchmarkPriceHeapReinit50GB(b *testing.B) { benchmarkPriceHeapReinit(b, 50*1024*1024*1024) } +func BenchmarkPriceHeapReinit100GB(b *testing.B) { benchmarkPriceHeapReinit(b, 100*1024*1024*1024) } + +func benchmarkPriceHeapReinit(b *testing.B, datacap uint64) { + // Calculate how many unique transactions we can fit into the provided disk + // data cap + blobs := datacap / (params.BlobTxBytesPerFieldElement * params.BlobTxFieldElementsPerBlob) + + // Create a random set of transactions with random fees. Use a separate account + // for each transaction to make it worse case. + index := make(map[common.Address][]*blobTxMeta) + for i := 0; i < int(blobs); i++ { + var addr common.Address + rand.Read(addr[:]) + + var ( + execTip = uint256.NewInt(rand.Uint64()) + execFee = uint256.NewInt(rand.Uint64()) + blobFee = uint256.NewInt(rand.Uint64()) + + basefeeJumps = dynamicFeeJumps(execFee) + blobfeeJumps = dynamicFeeJumps(blobFee) + ) + index[addr] = []*blobTxMeta{{ + id: uint64(i), + size: 128 * 1024, + nonce: 0, + execTipCap: execTip, + execFeeCap: execFee, + blobFeeCap: blobFee, + basefeeJumps: basefeeJumps, + blobfeeJumps: blobfeeJumps, + evictionExecTip: execTip, + evictionExecFeeJumps: basefeeJumps, + evictionBlobFeeJumps: blobfeeJumps, + }} + } + // Create a price heap and reinit it over and over + heap := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), &index) + + basefees := make([]*uint256.Int, b.N) + blobfees := make([]*uint256.Int, b.N) + for i := 0; i < b.N; i++ { + basefees[i] = uint256.NewInt(rand.Uint64()) + blobfees[i] = uint256.NewInt(rand.Uint64()) + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + heap.reinit(basefees[i], blobfees[i], true) + } +} + +// Benchmarks overflowing the heap over and over (add and then drop). +func BenchmarkPriceHeapOverflow1MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024) } +func BenchmarkPriceHeapOverflow10MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024) } +func BenchmarkPriceHeapOverflow100MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024) } +func BenchmarkPriceHeapOverflow1GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024*1024) } +func BenchmarkPriceHeapOverflow10GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024*1024) } +func BenchmarkPriceHeapOverflow25GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 25*1024*1024*1024) } +func BenchmarkPriceHeapOverflow50GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 50*1024*1024*1024) } +func BenchmarkPriceHeapOverflow100GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024*1024) } + +func benchmarkPriceHeapOverflow(b *testing.B, datacap uint64) { + // Calculate how many unique transactions we can fit into the provided disk + // data cap + blobs := datacap / (params.BlobTxBytesPerFieldElement * params.BlobTxFieldElementsPerBlob) + + // Create a random set of transactions with random fees. Use a separate account + // for each transaction to make it worse case. + index := make(map[common.Address][]*blobTxMeta) + for i := 0; i < int(blobs); i++ { + var addr common.Address + rand.Read(addr[:]) + + var ( + execTip = uint256.NewInt(rand.Uint64()) + execFee = uint256.NewInt(rand.Uint64()) + blobFee = uint256.NewInt(rand.Uint64()) + + basefeeJumps = dynamicFeeJumps(execFee) + blobfeeJumps = dynamicFeeJumps(blobFee) + ) + index[addr] = []*blobTxMeta{{ + id: uint64(i), + size: 128 * 1024, + nonce: 0, + execTipCap: execTip, + execFeeCap: execFee, + blobFeeCap: blobFee, + basefeeJumps: basefeeJumps, + blobfeeJumps: blobfeeJumps, + evictionExecTip: execTip, + evictionExecFeeJumps: basefeeJumps, + evictionBlobFeeJumps: blobfeeJumps, + }} + } + // Create a price heap and overflow it over and over + evict := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), &index) + var ( + addrs = make([]common.Address, b.N) + metas = make([]*blobTxMeta, b.N) + ) + for i := 0; i < b.N; i++ { + rand.Read(addrs[i][:]) + + var ( + execTip = uint256.NewInt(rand.Uint64()) + execFee = uint256.NewInt(rand.Uint64()) + blobFee = uint256.NewInt(rand.Uint64()) + + basefeeJumps = dynamicFeeJumps(execFee) + blobfeeJumps = dynamicFeeJumps(blobFee) + ) + metas[i] = &blobTxMeta{ + id: uint64(int(blobs) + i), + size: 128 * 1024, + nonce: 0, + execTipCap: execTip, + execFeeCap: execFee, + blobFeeCap: blobFee, + basefeeJumps: basefeeJumps, + blobfeeJumps: blobfeeJumps, + evictionExecTip: execTip, + evictionExecFeeJumps: basefeeJumps, + evictionBlobFeeJumps: blobfeeJumps, + } + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + index[addrs[i]] = []*blobTxMeta{metas[i]} + heap.Push(evict, addrs[i]) + + drop := heap.Pop(evict) + delete(index, drop.(common.Address)) + } +} diff --git a/core/txpool/blobpool/interface.go b/core/txpool/blobpool/interface.go new file mode 100644 index 000000000..6f296a54b --- /dev/null +++ b/core/txpool/blobpool/interface.go @@ -0,0 +1,44 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" +) + +// BlockChain defines the minimal set of methods needed to back a blob pool with +// a chain. Exists to allow mocking the live chain out of tests. +type BlockChain interface { + // Config retrieves the chain's fork configuration. + Config() *params.ChainConfig + + // CurrentBlock returns the current head of the chain. + CurrentBlock() *types.Header + + // CurrentFinalBlock returns the current block below which blobs should not + // be maintained anymore for reorg purposes. + CurrentFinalBlock() *types.Header + + // GetBlock retrieves a specific block, used during pool resets. + GetBlock(hash common.Hash, number uint64) *types.Block + + // StateAt returns a state database for a given root hash (generally the head). + StateAt(root common.Hash) (*state.StateDB, error) +} diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go new file mode 100644 index 000000000..2d62593de --- /dev/null +++ b/core/txpool/blobpool/limbo.go @@ -0,0 +1,253 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +import ( + "errors" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/holiman/billy" +) + +// limboBlob is a wrapper around an opaque blobset that also contains the tx hash +// to which it belongs as well as the block number in which it was included for +// finality eviction. +type limboBlob struct { + TxHash common.Hash // Owner transaction's hash to support resurrecting reorged txs + Block uint64 // Block in which the blob transaction was included + Tx *types.Transaction +} + +// limbo is a light, indexed database to temporarily store recently included +// blobs until they are finalized. The purpose is to support small reorgs, which +// would require pulling back up old blobs (which aren't part of the chain). +// +// TODO(karalabe): Currently updating the inclusion block of a blob needs a full db rewrite. Can we do without? +type limbo struct { + store billy.Database // Persistent data store for limboed blobs + + index map[common.Hash]uint64 // Mappings from tx hashes to datastore ids + groups map[uint64]map[uint64]common.Hash // Set of txs included in past blocks +} + +// newLimbo opens and indexes a set of limboed blob transactions. +func newLimbo(datadir string) (*limbo, error) { + l := &limbo{ + index: make(map[common.Hash]uint64), + groups: make(map[uint64]map[uint64]common.Hash), + } + // Index all limboed blobs on disk and delete anything inprocessable + var fails []uint64 + index := func(id uint64, size uint32, data []byte) { + if l.parseBlob(id, data) != nil { + fails = append(fails, id) + } + } + store, err := billy.Open(billy.Options{Path: datadir}, newSlotter(), index) + if err != nil { + return nil, err + } + l.store = store + + if len(fails) > 0 { + log.Warn("Dropping invalidated limboed blobs", "ids", fails) + for _, id := range fails { + if err := l.store.Delete(id); err != nil { + l.Close() + return nil, err + } + } + } + return l, nil +} + +// Close closes down the underlying persistent store. +func (l *limbo) Close() error { + return l.store.Close() +} + +// parseBlob is a callback method on limbo creation that gets called for each +// limboed blob on disk to create the in-memory metadata index. +func (l *limbo) parseBlob(id uint64, data []byte) error { + item := new(limboBlob) + if err := rlp.DecodeBytes(data, item); err != nil { + // This path is impossible unless the disk data representation changes + // across restarts. For that ever unprobable case, recover gracefully + // by ignoring this data entry. + log.Error("Failed to decode blob limbo entry", "id", id, "err", err) + return err + } + if _, ok := l.index[item.TxHash]; ok { + // This path is impossible, unless due to a programming error a blob gets + // inserted into the limbo which was already part of if. Recover gracefully + // by ignoring this data entry. + log.Error("Dropping duplicate blob limbo entry", "owner", item.TxHash, "id", id) + return errors.New("duplicate blob") + } + l.index[item.TxHash] = id + + if _, ok := l.groups[item.Block]; !ok { + l.groups[item.Block] = make(map[uint64]common.Hash) + } + l.groups[item.Block][id] = item.TxHash + + return nil +} + +// finalize evicts all blobs belonging to a recently finalized block or older. +func (l *limbo) finalize(final *types.Header) { + // Just in case there's no final block yet (network not yet merged, weird + // restart, sethead, etc), fail gracefully. + if final == nil { + log.Error("Nil finalized block cannot evict old blobs") + return + } + for block, ids := range l.groups { + if block > final.Number.Uint64() { + continue + } + for id, owner := range ids { + if err := l.store.Delete(id); err != nil { + log.Error("Failed to drop finalized blob", "block", block, "id", id, "err", err) + } + delete(l.index, owner) + } + delete(l.groups, block) + } +} + +// push stores a new blob transaction into the limbo, waiting until finality for +// it to be automatically evicted. +func (l *limbo) push(tx *types.Transaction, block uint64) error { + // If the blobs are already tracked by the limbo, consider it a programming + // error. There's not much to do against it, but be loud. + if _, ok := l.index[tx.Hash()]; ok { + log.Error("Limbo cannot push already tracked blobs", "tx", tx) + return errors.New("already tracked blob transaction") + } + if err := l.setAndIndex(tx, block); err != nil { + log.Error("Failed to set and index liboed blobs", "tx", tx, "err", err) + return err + } + return nil +} + +// pull retrieves a previously pushed set of blobs back from the limbo, removing +// it at the same time. This method should be used when a previously included blob +// transaction gets reorged out. +func (l *limbo) pull(tx common.Hash) (*types.Transaction, error) { + // If the blobs are not tracked by the limbo, there's not much to do. This + // can happen for example if a blob transaction is mined without pushing it + // into the network first. + id, ok := l.index[tx] + if !ok { + log.Trace("Limbo cannot pull non-tracked blobs", "tx", tx) + return nil, errors.New("unseen blob transaction") + } + item, err := l.getAndDrop(id) + if err != nil { + log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err) + return nil, err + } + return item.Tx, nil +} + +// update changes the block number under which a blob transaction is tracked. This +// method should be used when a reorg changes a transaction's inclusion block. +// +// The method may log errors for various unexpcted scenarios but will not return +// any of it since there's no clear error case. Some errors may be due to coding +// issues, others caused by signers mining MEV stuff or swapping transactions. In +// all cases, the pool needs to continue operating. +func (l *limbo) update(txhash common.Hash, block uint64) { + // If the blobs are not tracked by the limbo, there's not much to do. This + // can happen for example if a blob transaction is mined without pushing it + // into the network first. + id, ok := l.index[txhash] + if !ok { + log.Trace("Limbo cannot update non-tracked blobs", "tx", txhash) + return + } + // If there was no change in the blob's inclusion block, don't mess around + // with heavy database operations. + if _, ok := l.groups[block][id]; ok { + log.Trace("Blob transaction unchanged in limbo", "tx", txhash, "block", block) + return + } + // Retrieve the old blobs from the data store and write tehm back with a new + // block number. IF anything fails, there's not much to do, go on. + item, err := l.getAndDrop(id) + if err != nil { + log.Error("Failed to get and drop limboed blobs", "tx", txhash, "id", id, "err", err) + return + } + if err := l.setAndIndex(item.Tx, block); err != nil { + log.Error("Failed to set and index limboed blobs", "tx", txhash, "err", err) + return + } + log.Trace("Blob transaction updated in limbo", "tx", txhash, "old-block", item.Block, "new-block", block) +} + +// getAndDrop retrieves a blob item from the limbo store and deletes it both from +// the store and indices. +func (l *limbo) getAndDrop(id uint64) (*limboBlob, error) { + data, err := l.store.Get(id) + if err != nil { + return nil, err + } + item := new(limboBlob) + if err = rlp.DecodeBytes(data, item); err != nil { + return nil, err + } + delete(l.index, item.TxHash) + delete(l.groups[item.Block], id) + if len(l.groups[item.Block]) == 0 { + delete(l.groups, item.Block) + } + if err := l.store.Delete(id); err != nil { + return nil, err + } + return item, nil +} + +// setAndIndex assembles a limbo blob database entry and stores it, also updating +// the in-memory indices. +func (l *limbo) setAndIndex(tx *types.Transaction, block uint64) error { + txhash := tx.Hash() + item := &limboBlob{ + TxHash: txhash, + Block: block, + Tx: tx, + } + data, err := rlp.EncodeToBytes(item) + if err != nil { + panic(err) // cannot happen runtime, dev error + } + id, err := l.store.Put(data) + if err != nil { + return err + } + l.index[txhash] = id + if _, ok := l.groups[block]; !ok { + l.groups[block] = make(map[uint64]common.Hash) + } + l.groups[block][id] = txhash + return nil +} diff --git a/core/txpool/blobpool/metrics.go b/core/txpool/blobpool/metrics.go new file mode 100644 index 000000000..070cc5ca4 --- /dev/null +++ b/core/txpool/blobpool/metrics.go @@ -0,0 +1,78 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +import "github.com/ethereum/go-ethereum/metrics" + +var ( + // datacapGauge tracks the user's configured capacity for the blob pool. It + // is mostly a way to expose/debug issues. + datacapGauge = metrics.NewRegisteredGauge("blobpool/datacap", nil) + + // The below metrics track the per-datastore metrics for the primary blob + // store and the temporary limbo store. + datausedGauge = metrics.NewRegisteredGauge("blobpool/dataused", nil) + datarealGauge = metrics.NewRegisteredGauge("blobpool/datareal", nil) + slotusedGauge = metrics.NewRegisteredGauge("blobpool/slotused", nil) + + limboDatausedGauge = metrics.NewRegisteredGauge("blobpool/limbo/dataused", nil) + limboDatarealGauge = metrics.NewRegisteredGauge("blobpool/limbo/datareal", nil) + limboSlotusedGauge = metrics.NewRegisteredGauge("blobpool/limbo/slotused", nil) + + // The below metrics track the per-shelf metrics for the primary blob store + // and the temporary limbo store. + shelfDatausedGaugeName = "blobpool/shelf_%d/dataused" + shelfDatagapsGaugeName = "blobpool/shelf_%d/datagaps" + shelfSlotusedGaugeName = "blobpool/shelf_%d/slotused" + shelfSlotgapsGaugeName = "blobpool/shelf_%d/slotgaps" + + limboShelfDatausedGaugeName = "blobpool/limbo/shelf_%d/dataused" + limboShelfDatagapsGaugeName = "blobpool/limbo/shelf_%d/datagaps" + limboShelfSlotusedGaugeName = "blobpool/limbo/shelf_%d/slotused" + limboShelfSlotgapsGaugeName = "blobpool/limbo/shelf_%d/slotgaps" + + // The oversized metrics aggregate the shelf stats above the max blob count + // limits to track transactions that are just huge, but don't contain blobs. + // + // There are no oversized data in the limbo, it only contains blobs and some + // constant metadata. + oversizedDatausedGauge = metrics.NewRegisteredGauge("blobpool/oversized/dataused", nil) + oversizedDatagapsGauge = metrics.NewRegisteredGauge("blobpool/oversized/datagaps", nil) + oversizedSlotusedGauge = metrics.NewRegisteredGauge("blobpool/oversized/slotused", nil) + oversizedSlotgapsGauge = metrics.NewRegisteredGauge("blobpool/oversized/slotgaps", nil) + + // basefeeGauge and blobfeeGauge track the current network 1559 base fee and + // 4844 blob fee respectively. + basefeeGauge = metrics.NewRegisteredGauge("blobpool/basefee", nil) + blobfeeGauge = metrics.NewRegisteredGauge("blobpool/blobfee", nil) + + // pooltipGauge is the configurable miner tip to permit a transaction into + // the pool. + pooltipGauge = metrics.NewRegisteredGauge("blobpool/pooltip", nil) + + // addwait/time, resetwait/time and getwait/time track the rough health of + // the pool and wether or not it's capable of keeping up with the load from + // the network. + addwaitHist = metrics.NewRegisteredHistogram("blobpool/addwait", nil, metrics.NewExpDecaySample(1028, 0.015)) + addtimeHist = metrics.NewRegisteredHistogram("blobpool/addtime", nil, metrics.NewExpDecaySample(1028, 0.015)) + getwaitHist = metrics.NewRegisteredHistogram("blobpool/getwait", nil, metrics.NewExpDecaySample(1028, 0.015)) + gettimeHist = metrics.NewRegisteredHistogram("blobpool/gettime", nil, metrics.NewExpDecaySample(1028, 0.015)) + pendwaitHist = metrics.NewRegisteredHistogram("blobpool/pendwait", nil, metrics.NewExpDecaySample(1028, 0.015)) + pendtimeHist = metrics.NewRegisteredHistogram("blobpool/pendtime", nil, metrics.NewExpDecaySample(1028, 0.015)) + resetwaitHist = metrics.NewRegisteredHistogram("blobpool/resetwait", nil, metrics.NewExpDecaySample(1028, 0.015)) + resettimeHist = metrics.NewRegisteredHistogram("blobpool/resettime", nil, metrics.NewExpDecaySample(1028, 0.015)) +) diff --git a/core/txpool/blobpool/priority.go b/core/txpool/blobpool/priority.go new file mode 100644 index 000000000..18e545c2a --- /dev/null +++ b/core/txpool/blobpool/priority.go @@ -0,0 +1,90 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +import ( + "math" + "math/bits" + + "github.com/holiman/uint256" +) + +// log2_1_125 is used in the eviction priority calculation. +var log2_1_125 = math.Log2(1.125) + +// evictionPriority calculates the eviction priority based on the algorithm +// described in the BlobPool docs for a both fee components. +// +// This method takes about 8ns on a very recent laptop CPU, recalculating about +// 125 million transaction priority values per second. +func evictionPriority(basefeeJumps float64, txBasefeeJumps, blobfeeJumps, txBlobfeeJumps float64) int { + var ( + basefeePriority = evictionPriority1D(basefeeJumps, txBasefeeJumps) + blobfeePriority = evictionPriority1D(blobfeeJumps, txBlobfeeJumps) + ) + if basefeePriority < blobfeePriority { + return basefeePriority + } + return blobfeePriority +} + +// evictionPriority1D calculates the eviction priority based on the algorithm +// described in the BlobPool docs for a single fee component. +func evictionPriority1D(basefeeJumps float64, txfeeJumps float64) int { + jumps := txfeeJumps - basefeeJumps + if int(jumps) == 0 { + return 0 // can't log2 0 + } + if jumps < 0 { + return -intLog2(uint(-math.Floor(jumps))) + } + return intLog2(uint(math.Ceil(jumps))) +} + +// dynamicFeeJumps calculates the log1.125(fee), namely the number of fee jumps +// needed to reach the requested one. We only use it when calculating the jumps +// between 2 fees, so it doesn't matter from what exact number with returns. +// it returns the result from (0, 1, 1.125). +// +// This method is very expensive, taking about 75ns on a very recent laptop CPU, +// but the result does not change with the lifetime of a transaction, so it can +// be cached. +func dynamicFeeJumps(fee *uint256.Int) float64 { + if fee.IsZero() { + return 0 // can't log2 zero, should never happen outside tests, but don't choke + } + return math.Log2(fee.Float64()) / log2_1_125 +} + +// intLog2 is a helper to calculate the integral part of a log2 of an unsigned +// integer. It is a very specific calculation that's not particularly useful in +// general, but it's what we need here (it's fast). +func intLog2(n uint) int { + switch { + case n == 0: + panic("log2(0) is undefined") + + case n < 2048: + return bits.UintSize - bits.LeadingZeros(n) - 1 + + default: + // The input is log1.125(uint256) = log2(uint256) / log2(1.125). At the + // most extreme, log2(uint256) will be a bit below 257, and the constant + // log2(1.125) ~= 0.17. The larges input thus is ~257 / ~0.17 ~= ~1511. + panic("dynamic fee jump diffs cannot reach this") + } +} diff --git a/core/txpool/blobpool/priority_test.go b/core/txpool/blobpool/priority_test.go new file mode 100644 index 000000000..4aad91992 --- /dev/null +++ b/core/txpool/blobpool/priority_test.go @@ -0,0 +1,87 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +import ( + "testing" + + "github.com/holiman/uint256" +) + +// Tests that the priority fees are calculated correctly as the log2 of the fee +// jumps needed to go from the base fee to the tx's fee cap. +func TestPriorityCalculation(t *testing.T) { + tests := []struct { + basefee uint64 + txfee uint64 + result int + }{ + {basefee: 7, txfee: 10, result: 2}, // 3.02 jumps, 4 ceil, 2 log2 + {basefee: 17_200_000_000, txfee: 17_200_000_000, result: 0}, // 0 jumps, special case 0 log2 + {basefee: 9_853_941_692, txfee: 11_085_092_510, result: 0}, // 0.99 jumps, 1 ceil, 0 log2 + {basefee: 11_544_106_391, txfee: 10_356_781_100, result: 0}, // -0.92 jumps, -1 floor, 0 log2 + {basefee: 17_200_000_000, txfee: 7, result: -7}, // -183.57 jumps, -184 floor, -7 log2 + {basefee: 7, txfee: 17_200_000_000, result: 7}, // 183.57 jumps, 184 ceil, 7 log2 + } + for i, tt := range tests { + var ( + baseJumps = dynamicFeeJumps(uint256.NewInt(tt.basefee)) + feeJumps = dynamicFeeJumps(uint256.NewInt(tt.txfee)) + ) + if prio := evictionPriority1D(baseJumps, feeJumps); prio != tt.result { + t.Errorf("test %d priority mismatch: have %d, want %d", i, prio, tt.result) + } + } +} + +// Benchmarks how many dynamic fee jump values can be done. +func BenchmarkDynamicFeeJumpCalculation(b *testing.B) { + fees := make([]*uint256.Int, b.N) + for i := 0; i < b.N; i++ { + fees[i] = uint256.NewInt(rand.Uint64()) + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + dynamicFeeJumps(fees[i]) + } +} + +// Benchmarks how many priority recalculations can be done. +func BenchmarkPriorityCalculation(b *testing.B) { + // The basefee and blob fee is constant for all transactions across a block, + // so we can assume theit absolute jump counts can be pre-computed. + basefee := uint256.NewInt(17_200_000_000) // 17.2 Gwei is the 22.03.2023 zero-emission basefee, random number + blobfee := uint256.NewInt(123_456_789_000) // Completely random, no idea what this will be + + basefeeJumps := dynamicFeeJumps(basefee) + blobfeeJumps := dynamicFeeJumps(blobfee) + + // The transaction's fee cap and blob fee cap are constant across the life + // of the transaction, so we can pre-calculate and cache them. + txBasefeeJumps := make([]float64, b.N) + txBlobfeeJumps := make([]float64, b.N) + for i := 0; i < b.N; i++ { + txBasefeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64())) + txBlobfeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64())) + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + evictionPriority(basefeeJumps, txBasefeeJumps[i], blobfeeJumps, txBlobfeeJumps[i]) + } +} diff --git a/core/txpool/blobpool/slotter.go b/core/txpool/blobpool/slotter.go new file mode 100644 index 000000000..35349c344 --- /dev/null +++ b/core/txpool/blobpool/slotter.go @@ -0,0 +1,38 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +// newSlotter creates a helper method for the Billy datastore that returns the +// individual shelf sizes used to store transactions in. +// +// The slotter will create shelves for each possible blob count + some tx metadata +// wiggle room, up to the max permitted limits. +// +// The slotter also creates a shelf for 0-blob transactions. Whilst those are not +// allowed in the current protocol, having an empty shelf is not a relevant use +// of resources, but it makes stress testing with junk transactions simpler. +func newSlotter() func() (uint32, bool) { + slotsize := uint32(txAvgSize) + slotsize -= uint32(blobSize) // underflows, it's ok, will overflow back in the first return + + return func() (size uint32, done bool) { + slotsize += blobSize + finished := slotsize > maxBlobsPerTransaction*blobSize+txMaxSize + + return slotsize, finished + } +} diff --git a/core/txpool/blobpool/slotter_test.go b/core/txpool/blobpool/slotter_test.go new file mode 100644 index 000000000..a7b43b4d2 --- /dev/null +++ b/core/txpool/blobpool/slotter_test.go @@ -0,0 +1,60 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blobpool + +import "testing" + +// Tests that the slotter creates the expected database shelves. +func TestNewSlotter(t *testing.T) { + // Generate the database shelve sizes + slotter := newSlotter() + + var shelves []uint32 + for { + shelf, done := slotter() + shelves = append(shelves, shelf) + if done { + break + } + } + // Compare the database shelves to the expected ones + want := []uint32{ + 0*blobSize + txAvgSize, // 0 blob + some expected tx infos + 1*blobSize + txAvgSize, // 1 blob + some expected tx infos + 2*blobSize + txAvgSize, // 2 blob + some expected tx infos (could be fewer blobs and more tx data) + 3*blobSize + txAvgSize, // 3 blob + some expected tx infos (could be fewer blobs and more tx data) + 4*blobSize + txAvgSize, // 4 blob + some expected tx infos (could be fewer blobs and more tx data) + 5*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 6*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 7*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 8*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 9*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 10*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 11*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 12*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 13*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size + 14*blobSize + txAvgSize, // 1-6 blobs + unexpectedly large tx infos >= 4 blobs + max tx metadata size + } + if len(shelves) != len(want) { + t.Errorf("shelves count mismatch: have %d, want %d", len(shelves), len(want)) + } + for i := 0; i < len(shelves) && i < len(want); i++ { + if shelves[i] != want[i] { + t.Errorf("shelf %d mismatch: have %d, want %d", i, shelves[i], want[i]) + } + } +} diff --git a/core/txpool/errors.go b/core/txpool/errors.go new file mode 100644 index 000000000..bc26550f7 --- /dev/null +++ b/core/txpool/errors.go @@ -0,0 +1,57 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package txpool + +import "errors" + +var ( + // ErrAlreadyKnown is returned if the transactions is already contained + // within the pool. + ErrAlreadyKnown = errors.New("already known") + + // ErrInvalidSender is returned if the transaction contains an invalid signature. + ErrInvalidSender = errors.New("invalid sender") + + // ErrUnderpriced is returned if a transaction's gas price is below the minimum + // configured for the transaction pool. + ErrUnderpriced = errors.New("transaction underpriced") + + // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced + // with a different one without the required price bump. + ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") + + // ErrAccountLimitExceeded is returned if a transaction would exceed the number + // allowed by a pool for a single account. + ErrAccountLimitExceeded = errors.New("account limit exceeded") + + // ErrGasLimit is returned if a transaction's requested gas limit exceeds the + // maximum allowance of the current block. + ErrGasLimit = errors.New("exceeds block gas limit") + + // ErrNegativeValue is a sanity error to ensure no one is able to specify a + // transaction with a negative value. + ErrNegativeValue = errors.New("negative value") + + // ErrOversizedData is returned if the input data of a transaction is greater + // than some meaningful limit a user might use. This is not a consensus error + // making the transaction invalid, rather a DOS protection. + ErrOversizedData = errors.New("oversized data") + + // ErrFutureReplacePending is returned if a future transaction replaces a pending + // transaction. Future transactions should only be able to replace other future transactions. + ErrFutureReplacePending = errors.New("future transaction tries to replace pending") +) diff --git a/core/txpool/journal.go b/core/txpool/legacypool/journal.go similarity index 99% rename from core/txpool/journal.go rename to core/txpool/legacypool/journal.go index 1b330b0c3..f04ab8fc1 100644 --- a/core/txpool/journal.go +++ b/core/txpool/legacypool/journal.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package txpool +package legacypool import ( "errors" diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go new file mode 100644 index 000000000..00e326c4b --- /dev/null +++ b/core/txpool/legacypool/legacypool.go @@ -0,0 +1,1931 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package legacypool implements the normal EVM execution transaction pool. +package legacypool + +import ( + "errors" + "math" + "math/big" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/params" +) + +const ( + // txSlotSize is used to calculate how many data slots a single transaction + // takes up based on its size. The slots are used as DoS protection, ensuring + // that validating a new transaction remains a constant operation (in reality + // O(maxslots), where max slots are 4 currently). + txSlotSize = 32 * 1024 + + // txMaxSize is the maximum size a single transaction can have. This field has + // non-trivial consequences: larger transactions are significantly harder and + // more expensive to propagate; larger transactions also take more resources + // to validate whether they fit into the pool or not. + txMaxSize = 4 * txSlotSize // 128KB +) + +var ( + // ErrAlreadyKnown is returned if the transactions is already contained + // within the pool. + ErrAlreadyKnown = errors.New("already known") + + // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept + // another remote transaction. + ErrTxPoolOverflow = errors.New("txpool is full") +) + +var ( + evictionInterval = time.Minute // Time interval to check for evictable transactions + statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats +) + +var ( + // Metrics for the pending pool + pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) + pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) + pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting + pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds + + // Metrics for the queued pool + queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) + queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) + queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting + queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds + queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime + + // General tx metrics + knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) + validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) + invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) + underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) + overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) + + // throttleTxMeter counts how many transactions are rejected due to too-many-changes between + // txpool reorgs. + throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) + // reorgDurationTimer measures how long time a txpool reorg takes. + reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil) + // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected + // that this number is pretty low, since txpool reorgs happen very frequently. + dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) + + pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) + queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) + localGauge = metrics.NewRegisteredGauge("txpool/local", nil) + slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) + + reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) +) + +// BlockChain defines the minimal set of methods needed to back a tx pool with +// a chain. Exists to allow mocking the live chain out of tests. +type BlockChain interface { + // Config retrieves the chain's fork configuration. + Config() *params.ChainConfig + + // CurrentBlock returns the current head of the chain. + CurrentBlock() *types.Header + + // GetBlock retrieves a specific block, used during pool resets. + GetBlock(hash common.Hash, number uint64) *types.Block + + // StateAt returns a state database for a given root hash (generally the head). + StateAt(root common.Hash) (*state.StateDB, error) +} + +// Config are the configuration parameters of the transaction pool. +type Config struct { + Locals []common.Address // Addresses that should be treated by default as local + NoLocals bool // Whether local transaction handling should be disabled + Journal string // Journal of local transactions to survive node restarts + Rejournal time.Duration // Time interval to regenerate the local transaction journal + + PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool + PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) + + AccountSlots uint64 // Number of executable transaction slots guaranteed per account + GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts + AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account + GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts + + Lifetime time.Duration // Maximum amount of time non-executable transaction are queued +} + +// DefaultConfig contains the default configurations for the transaction pool. +var DefaultConfig = Config{ + Journal: "transactions.rlp", + Rejournal: time.Hour, + + PriceLimit: 1, + PriceBump: 10, + + AccountSlots: 16, + GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio + AccountQueue: 64, + GlobalQueue: 1024, + + Lifetime: 3 * time.Hour, +} + +// sanitize checks the provided user configurations and changes anything that's +// unreasonable or unworkable. +func (config *Config) sanitize() Config { + conf := *config + if conf.Rejournal < time.Second { + log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) + conf.Rejournal = time.Second + } + if conf.PriceLimit < 1 { + log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit) + conf.PriceLimit = DefaultConfig.PriceLimit + } + if conf.PriceBump < 1 { + log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump) + conf.PriceBump = DefaultConfig.PriceBump + } + if conf.AccountSlots < 1 { + log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots) + conf.AccountSlots = DefaultConfig.AccountSlots + } + if conf.GlobalSlots < 1 { + log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots) + conf.GlobalSlots = DefaultConfig.GlobalSlots + } + if conf.AccountQueue < 1 { + log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue) + conf.AccountQueue = DefaultConfig.AccountQueue + } + if conf.GlobalQueue < 1 { + log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue) + conf.GlobalQueue = DefaultConfig.GlobalQueue + } + if conf.Lifetime < 1 { + log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime) + conf.Lifetime = DefaultConfig.Lifetime + } + return conf +} + +// LegacyPool contains all currently known transactions. Transactions +// enter the pool when they are received from the network or submitted +// locally. They exit the pool when they are included in the blockchain. +// +// The pool separates processable transactions (which can be applied to the +// current state) and future transactions. Transactions move between those +// two states over time as they are received and processed. +type LegacyPool struct { + config Config + chainconfig *params.ChainConfig + chain BlockChain + gasTip atomic.Pointer[big.Int] + txFeed event.Feed + scope event.SubscriptionScope + signer types.Signer + mu sync.RWMutex + + currentHead atomic.Pointer[types.Header] // Current head of the blockchain + currentState *state.StateDB // Current state in the blockchain head + pendingNonces *noncer // Pending state tracking virtual nonces + + locals *accountSet // Set of local transaction to exempt from eviction rules + journal *journal // Journal of local transaction to back up to disk + + reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools + pending map[common.Address]*list // All currently processable transactions + queue map[common.Address]*list // Queued but non-processable transactions + beats map[common.Address]time.Time // Last heartbeat from each known account + all *lookup // All transactions to allow lookups + priced *pricedList // All transactions sorted by price + + reqResetCh chan *txpoolResetRequest + reqPromoteCh chan *accountSet + queueTxEventCh chan *types.Transaction + reorgDoneCh chan chan struct{} + reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop + wg sync.WaitGroup // tracks loop, scheduleReorgLoop + initDoneCh chan struct{} // is closed once the pool is initialized (for tests) + + changesSinceReorg int // A counter for how many drops we've performed in-between reorg. +} + +type txpoolResetRequest struct { + oldHead, newHead *types.Header +} + +// New creates a new transaction pool to gather, sort and filter inbound +// transactions from the network. +func New(config Config, chain BlockChain) *LegacyPool { + // Sanitize the input to ensure no vulnerable gas prices are set + config = (&config).sanitize() + + // Create the transaction pool with its initial settings + pool := &LegacyPool{ + config: config, + chain: chain, + chainconfig: chain.Config(), + signer: types.LatestSigner(chain.Config()), + pending: make(map[common.Address]*list), + queue: make(map[common.Address]*list), + beats: make(map[common.Address]time.Time), + all: newLookup(), + reqResetCh: make(chan *txpoolResetRequest), + reqPromoteCh: make(chan *accountSet), + queueTxEventCh: make(chan *types.Transaction), + reorgDoneCh: make(chan chan struct{}), + reorgShutdownCh: make(chan struct{}), + initDoneCh: make(chan struct{}), + } + pool.locals = newAccountSet(pool.signer) + for _, addr := range config.Locals { + log.Info("Setting new local account", "address", addr) + pool.locals.add(addr) + } + pool.priced = newPricedList(pool.all) + + if !config.NoLocals && config.Journal != "" { + pool.journal = newTxJournal(config.Journal) + } + return pool +} + +// Filter returns whether the given transaction can be consumed by the legacy +// pool, specifically, whether it is a Legacy, AccessList or Dynamic transaction. +func (pool *LegacyPool) Filter(tx *types.Transaction) bool { + switch tx.Type() { + case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType: + return true + default: + return false + } +} + +// Init sets the gas price needed to keep a transaction in the pool and the chain +// head to allow balance / nonce checks. The transaction journal will be loaded +// from disk and filtered based on the provided starting settings. The internal +// goroutines will be spun up and the pool deemed operational afterwards. +func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error { + // Set the address reserver to request exclusive access to pooled accounts + pool.reserve = reserve + + // Set the basic pool parameters + pool.gasTip.Store(gasTip) + pool.reset(nil, head) + + // Start the reorg loop early, so it can handle requests generated during + // journal loading. + pool.wg.Add(1) + go pool.scheduleReorgLoop() + + // If local transactions and journaling is enabled, load from disk + if pool.journal != nil { + if err := pool.journal.load(pool.addLocals); err != nil { + log.Warn("Failed to load transaction journal", "err", err) + } + if err := pool.journal.rotate(pool.local()); err != nil { + log.Warn("Failed to rotate transaction journal", "err", err) + } + } + pool.wg.Add(1) + go pool.loop() + return nil +} + +// loop is the transaction pool's main event loop, waiting for and reacting to +// outside blockchain events as well as for various reporting and transaction +// eviction events. +func (pool *LegacyPool) loop() { + defer pool.wg.Done() + + var ( + prevPending, prevQueued, prevStales int + + // Start the stats reporting and transaction eviction tickers + report = time.NewTicker(statsReportInterval) + evict = time.NewTicker(evictionInterval) + journal = time.NewTicker(pool.config.Rejournal) + ) + defer report.Stop() + defer evict.Stop() + defer journal.Stop() + + // Notify tests that the init phase is done + close(pool.initDoneCh) + for { + select { + // Handle pool shutdown + case <-pool.reorgShutdownCh: + return + + // Handle stats reporting ticks + case <-report.C: + pool.mu.RLock() + pending, queued := pool.stats() + pool.mu.RUnlock() + stales := int(pool.priced.stales.Load()) + + if pending != prevPending || queued != prevQueued || stales != prevStales { + log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) + prevPending, prevQueued, prevStales = pending, queued, stales + } + + // Handle inactive account transaction eviction + case <-evict.C: + pool.mu.Lock() + for addr := range pool.queue { + // Skip local transactions from the eviction mechanism + if pool.locals.contains(addr) { + continue + } + // Any non-locals old enough should be removed + if time.Since(pool.beats[addr]) > pool.config.Lifetime { + list := pool.queue[addr].Flatten() + for _, tx := range list { + pool.removeTx(tx.Hash(), true, true) + } + queuedEvictionMeter.Mark(int64(len(list))) + } + } + pool.mu.Unlock() + + // Handle local transaction journal rotation + case <-journal.C: + if pool.journal != nil { + pool.mu.Lock() + if err := pool.journal.rotate(pool.local()); err != nil { + log.Warn("Failed to rotate local tx journal", "err", err) + } + pool.mu.Unlock() + } + } + } +} + +// Close terminates the transaction pool. +func (pool *LegacyPool) Close() error { + // Unsubscribe all subscriptions registered from txpool + pool.scope.Close() + + // Terminate the pool reorger and return + close(pool.reorgShutdownCh) + pool.wg.Wait() + + if pool.journal != nil { + pool.journal.close() + } + log.Info("Transaction pool stopped") + return nil +} + +// Reset implements txpool.SubPool, allowing the legacy pool's internal state to be +// kept in sync with the main transacion pool's internal state. +func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) { + wait := pool.requestReset(oldHead, newHead) + <-wait +} + +// SubscribeTransactions registers a subscription of NewTxsEvent and +// starts sending event to the given channel. +func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { + return pool.scope.Track(pool.txFeed.Subscribe(ch)) +} + +// SetGasTip updates the minimum gas tip required by the transaction pool for a +// new transaction, and drops all transactions below this threshold. +func (pool *LegacyPool) SetGasTip(tip *big.Int) { + pool.mu.Lock() + defer pool.mu.Unlock() + + old := pool.gasTip.Load() + pool.gasTip.Store(new(big.Int).Set(tip)) + + // If the min miner fee increased, remove transactions below the new threshold + if tip.Cmp(old) > 0 { + // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead + drop := pool.all.RemotesBelowTip(tip) + for _, tx := range drop { + pool.removeTx(tx.Hash(), false, true) + } + pool.priced.Removed(len(drop)) + } + log.Info("Legacy pool tip threshold updated", "tip", tip) +} + +// Nonce returns the next nonce of an account, with all transactions executable +// by the pool already applied on top. +func (pool *LegacyPool) Nonce(addr common.Address) uint64 { + pool.mu.RLock() + defer pool.mu.RUnlock() + + return pool.pendingNonces.get(addr) +} + +// Stats retrieves the current pool stats, namely the number of pending and the +// number of queued (non-executable) transactions. +func (pool *LegacyPool) Stats() (int, int) { + pool.mu.RLock() + defer pool.mu.RUnlock() + + return pool.stats() +} + +// stats retrieves the current pool stats, namely the number of pending and the +// number of queued (non-executable) transactions. +func (pool *LegacyPool) stats() (int, int) { + pending := 0 + for _, list := range pool.pending { + pending += list.Len() + } + queued := 0 + for _, list := range pool.queue { + queued += list.Len() + } + return pending, queued +} + +// Content retrieves the data content of the transaction pool, returning all the +// pending as well as queued transactions, grouped by account and sorted by nonce. +func (pool *LegacyPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { + pool.mu.Lock() + defer pool.mu.Unlock() + + pending := make(map[common.Address][]*types.Transaction, len(pool.pending)) + for addr, list := range pool.pending { + pending[addr] = list.Flatten() + } + queued := make(map[common.Address][]*types.Transaction, len(pool.queue)) + for addr, list := range pool.queue { + queued[addr] = list.Flatten() + } + return pending, queued +} + +// ContentFrom retrieves the data content of the transaction pool, returning the +// pending as well as queued transactions of this address, grouped by nonce. +func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { + pool.mu.RLock() + defer pool.mu.RUnlock() + + var pending []*types.Transaction + if list, ok := pool.pending[addr]; ok { + pending = list.Flatten() + } + var queued []*types.Transaction + if list, ok := pool.queue[addr]; ok { + queued = list.Flatten() + } + return pending, queued +} + +// Pending retrieves all currently processable transactions, grouped by origin +// account and sorted by nonce. The returned transaction set is a copy and can be +// freely modified by calling code. +// +// The enforceTips parameter can be used to do an extra filtering on the pending +// transactions and only return those whose **effective** tip is large enough in +// the next pending execution environment. +func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction { + pool.mu.Lock() + defer pool.mu.Unlock() + + pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending)) + for addr, list := range pool.pending { + txs := list.Flatten() + + // If the miner requests tip enforcement, cap the lists now + if enforceTips && !pool.locals.contains(addr) { + for i, tx := range txs { + if tx.EffectiveGasTipIntCmp(pool.gasTip.Load(), pool.priced.urgent.baseFee) < 0 { + txs = txs[:i] + break + } + } + } + if len(txs) > 0 { + lazies := make([]*txpool.LazyTransaction, len(txs)) + for i := 0; i < len(txs); i++ { + lazies[i] = &txpool.LazyTransaction{ + Pool: pool, + Hash: txs[i].Hash(), + Tx: txs[i], + Time: txs[i].Time(), + GasFeeCap: txs[i].GasFeeCap(), + GasTipCap: txs[i].GasTipCap(), + } + } + pending[addr] = lazies + } + } + return pending +} + +// Locals retrieves the accounts currently considered local by the pool. +func (pool *LegacyPool) Locals() []common.Address { + pool.mu.Lock() + defer pool.mu.Unlock() + + return pool.locals.flatten() +} + +// local retrieves all currently known local transactions, grouped by origin +// account and sorted by nonce. The returned transaction set is a copy and can be +// freely modified by calling code. +func (pool *LegacyPool) local() map[common.Address]types.Transactions { + txs := make(map[common.Address]types.Transactions) + for addr := range pool.locals.accounts { + if pending := pool.pending[addr]; pending != nil { + txs[addr] = append(txs[addr], pending.Flatten()...) + } + if queued := pool.queue[addr]; queued != nil { + txs[addr] = append(txs[addr], queued.Flatten()...) + } + } + return txs +} + +// validateTxBasics checks whether a transaction is valid according to the consensus +// rules, but does not check state-dependent validation such as sufficient balance. +// This check is meant as an early check which only needs to be performed once, +// and does not require the pool mutex to be held. +func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) error { + opts := &txpool.ValidationOptions{ + Config: pool.chainconfig, + Accept: 0 | + 1< pool.config.GlobalSlots+pool.config.GlobalQueue { + // If the new transaction is underpriced, don't accept it + if !isLocal && pool.priced.Underpriced(tx) { + log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) + underpricedTxMeter.Mark(1) + return false, txpool.ErrUnderpriced + } + + // We're about to replace a transaction. The reorg does a more thorough + // analysis of what to remove and how, but it runs async. We don't want to + // do too many replacements between reorg-runs, so we cap the number of + // replacements to 25% of the slots + if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { + throttleTxMeter.Mark(1) + return false, ErrTxPoolOverflow + } + + // New transaction is better than our worse ones, make room for it. + // If it's a local transaction, forcibly discard all available transactions. + // Otherwise if we can't make enough room for new one, abort the operation. + drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) + + // Special case, we still can't make the room for the new remote one. + if !isLocal && !success { + log.Trace("Discarding overflown transaction", "hash", hash) + overflowedTxMeter.Mark(1) + return false, ErrTxPoolOverflow + } + + // If the new transaction is a future transaction it should never churn pending transactions + if !isLocal && pool.isGapped(from, tx) { + var replacesPending bool + for _, dropTx := range drop { + dropSender, _ := types.Sender(pool.signer, dropTx) + if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) { + replacesPending = true + break + } + } + // Add all transactions back to the priced queue + if replacesPending { + for _, dropTx := range drop { + pool.priced.Put(dropTx, false) + } + log.Trace("Discarding future transaction replacing pending tx", "hash", hash) + return false, txpool.ErrFutureReplacePending + } + } + + // Kick out the underpriced remote transactions. + for _, tx := range drop { + log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) + underpricedTxMeter.Mark(1) + + sender, _ := types.Sender(pool.signer, tx) + dropped := pool.removeTx(tx.Hash(), false, sender != from) // Don't unreserve the sender of the tx being added if last from the acc + + pool.changesSinceReorg += dropped + } + } + + // Try to replace an existing transaction in the pending pool + if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) { + // Nonce already pending, check if required price bump is met + inserted, old := list.Add(tx, pool.config.PriceBump) + if !inserted { + pendingDiscardMeter.Mark(1) + return false, txpool.ErrReplaceUnderpriced + } + // New transaction is better, replace old one + if old != nil { + pool.all.Remove(old.Hash()) + pool.priced.Removed(1) + pendingReplaceMeter.Mark(1) + } + pool.all.Add(tx, isLocal) + pool.priced.Put(tx, isLocal) + pool.journalTx(from, tx) + pool.queueTxEvent(tx) + log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) + + // Successful promotion, bump the heartbeat + pool.beats[from] = time.Now() + return old != nil, nil + } + // New transaction isn't replacing a pending one, push into queue + replaced, err = pool.enqueueTx(hash, tx, isLocal, true) + if err != nil { + return false, err + } + // Mark local addresses and journal local transactions + if local && !pool.locals.contains(from) { + log.Info("Setting new local account", "address", from) + pool.locals.add(from) + pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. + } + if isLocal { + localGauge.Inc(1) + } + pool.journalTx(from, tx) + + log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) + return replaced, nil +} + +// isGapped reports whether the given transaction is immediately executable. +func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) bool { + // Short circuit if transaction falls within the scope of the pending list + // or matches the next pending nonce which can be promoted as an executable + // transaction afterwards. Note, the tx staleness is already checked in + // 'validateTx' function previously. + next := pool.pendingNonces.get(from) + if tx.Nonce() <= next { + return false + } + // The transaction has a nonce gap with pending list, it's only considered + // as executable if transactions in queue can fill up the nonce gap. + queue, ok := pool.queue[from] + if !ok { + return true + } + for nonce := next; nonce < tx.Nonce(); nonce++ { + if !queue.Contains(nonce) { + return true // txs in queue can't fill up the nonce gap + } + } + return false +} + +// enqueueTx inserts a new transaction into the non-executable transaction queue. +// +// Note, this method assumes the pool lock is held! +func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { + // Try to insert the transaction into the future queue + from, _ := types.Sender(pool.signer, tx) // already validated + if pool.queue[from] == nil { + pool.queue[from] = newList(false) + } + inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) + if !inserted { + // An older transaction was better, discard this + queuedDiscardMeter.Mark(1) + return false, txpool.ErrReplaceUnderpriced + } + // Discard any previous transaction and mark this + if old != nil { + pool.all.Remove(old.Hash()) + pool.priced.Removed(1) + queuedReplaceMeter.Mark(1) + } else { + // Nothing was replaced, bump the queued counter + queuedGauge.Inc(1) + } + // If the transaction isn't in lookup set but it's expected to be there, + // show the error log. + if pool.all.Get(hash) == nil && !addAll { + log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) + } + if addAll { + pool.all.Add(tx, local) + pool.priced.Put(tx, local) + } + // If we never record the heartbeat, do it right now. + if _, exist := pool.beats[from]; !exist { + pool.beats[from] = time.Now() + } + return old != nil, nil +} + +// journalTx adds the specified transaction to the local disk journal if it is +// deemed to have been sent from a local account. +func (pool *LegacyPool) journalTx(from common.Address, tx *types.Transaction) { + // Only journal if it's enabled and the transaction is local + if pool.journal == nil || !pool.locals.contains(from) { + return + } + if err := pool.journal.insert(tx); err != nil { + log.Warn("Failed to journal local transaction", "err", err) + } +} + +// promoteTx adds a transaction to the pending (processable) list of transactions +// and returns whether it was inserted or an older was better. +// +// Note, this method assumes the pool lock is held! +func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { + // Try to insert the transaction into the pending queue + if pool.pending[addr] == nil { + pool.pending[addr] = newList(true) + } + list := pool.pending[addr] + + inserted, old := list.Add(tx, pool.config.PriceBump) + if !inserted { + // An older transaction was better, discard this + pool.all.Remove(hash) + pool.priced.Removed(1) + pendingDiscardMeter.Mark(1) + return false + } + // Otherwise discard any previous transaction and mark this + if old != nil { + pool.all.Remove(old.Hash()) + pool.priced.Removed(1) + pendingReplaceMeter.Mark(1) + } else { + // Nothing was replaced, bump the pending counter + pendingGauge.Inc(1) + } + // Set the potentially new pending nonce and notify any subsystems of the new tx + pool.pendingNonces.set(addr, tx.Nonce()+1) + + // Successful promotion, bump the heartbeat + pool.beats[addr] = time.Now() + return true +} + +// addLocals enqueues a batch of transactions into the pool if they are valid, marking the +// senders as a local ones, ensuring they go around the local pricing constraints. +// +// This method is used to add transactions from the RPC API and performs synchronous pool +// reorganization and event propagation. +func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error { + return pool.Add(txs, !pool.config.NoLocals, true) +} + +// addLocal enqueues a single local transaction into the pool if it is valid. This is +// a convenience wrapper around addLocals. +func (pool *LegacyPool) addLocal(tx *types.Transaction) error { + errs := pool.addLocals([]*types.Transaction{tx}) + return errs[0] +} + +// addRemotes enqueues a batch of transactions into the pool if they are valid. If the +// senders are not among the locally tracked ones, full pricing constraints will apply. +// +// This method is used to add transactions from the p2p network and does not wait for pool +// reorganization and internal event propagation. +func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error { + return pool.Add(txs, false, false) +} + +// addRemote enqueues a single transaction into the pool if it is valid. This is a convenience +// wrapper around addRemotes. +func (pool *LegacyPool) addRemote(tx *types.Transaction) error { + errs := pool.addRemotes([]*types.Transaction{tx}) + return errs[0] +} + +// addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method. +func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error { + return pool.Add(txs, false, true) +} + +// This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method. +func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error { + return pool.Add([]*types.Transaction{tx}, false, true)[0] +} + +// Add enqueues a batch of transactions into the pool if they are valid. Depending +// on the local flag, full pricing contraints will or will not be applied. +// +// If sync is set, the method will block until all internal maintenance related +// to the add is finished. Only use this during tests for determinism! +func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error { + // Filter out known ones without obtaining the pool lock or recovering signatures + var ( + errs = make([]error, len(txs)) + news = make([]*types.Transaction, 0, len(txs)) + ) + for i, tx := range txs { + // If the transaction is known, pre-set the error slot + if pool.all.Get(tx.Hash()) != nil { + errs[i] = ErrAlreadyKnown + knownTxMeter.Mark(1) + continue + } + // Exclude transactions with basic errors, e.g invalid signatures and + // insufficient intrinsic gas as soon as possible and cache senders + // in transactions before obtaining lock + if err := pool.validateTxBasics(tx, local); err != nil { + errs[i] = err + invalidTxMeter.Mark(1) + continue + } + // Accumulate all unknown transactions for deeper processing + news = append(news, tx) + } + if len(news) == 0 { + return errs + } + + // Process all the new transaction and merge any errors into the original slice + pool.mu.Lock() + newErrs, dirtyAddrs := pool.addTxsLocked(news, local) + pool.mu.Unlock() + + var nilSlot = 0 + for _, err := range newErrs { + for errs[nilSlot] != nil { + nilSlot++ + } + errs[nilSlot] = err + nilSlot++ + } + // Reorg the pool internals if needed and return + done := pool.requestPromoteExecutables(dirtyAddrs) + if sync { + <-done + } + return errs +} + +// addTxsLocked attempts to queue a batch of transactions if they are valid. +// The transaction pool lock must be held. +func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { + dirty := newAccountSet(pool.signer) + errs := make([]error, len(txs)) + for i, tx := range txs { + replaced, err := pool.add(tx, local) + errs[i] = err + if err == nil && !replaced { + dirty.addTx(tx) + } + } + validTxMeter.Mark(int64(len(dirty.accounts))) + return errs, dirty +} + +// Status returns the status (unknown/pending/queued) of a batch of transactions +// identified by their hashes. +func (pool *LegacyPool) Status(hash common.Hash) txpool.TxStatus { + tx := pool.get(hash) + if tx == nil { + return txpool.TxStatusUnknown + } + from, _ := types.Sender(pool.signer, tx) // already validated + + pool.mu.RLock() + defer pool.mu.RUnlock() + + if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { + return txpool.TxStatusPending + } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { + return txpool.TxStatusQueued + } + return txpool.TxStatusUnknown +} + +// Get returns a transaction if it is contained in the pool and nil otherwise. +func (pool *LegacyPool) Get(hash common.Hash) *types.Transaction { + tx := pool.get(hash) + if tx == nil { + return nil + } + return tx +} + +// get returns a transaction if it is contained in the pool and nil otherwise. +func (pool *LegacyPool) get(hash common.Hash) *types.Transaction { + return pool.all.Get(hash) +} + +// Has returns an indicator whether txpool has a transaction cached with the +// given hash. +func (pool *LegacyPool) Has(hash common.Hash) bool { + return pool.all.Get(hash) != nil +} + +// removeTx removes a single transaction from the queue, moving all subsequent +// transactions back to the future queue. +// +// In unreserve is false, the account will not be relinquished to the main txpool +// even if there are no more references to it. This is used to handle a race when +// a tx being added, and it evicts a previously scheduled tx from the same account, +// which could lead to a premature release of the lock. +// +// Returns the number of transactions removed from the pending queue. +func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bool) int { + // Fetch the transaction we wish to delete + tx := pool.all.Get(hash) + if tx == nil { + return 0 + } + addr, _ := types.Sender(pool.signer, tx) // already validated during insertion + + // If after deletion there are no more transactions belonging to this account, + // relinquish the address reservation. It's a bit convoluted do this, via a + // defer, but it's safer vs. the many return pathways. + if unreserve { + defer func() { + var ( + _, hasPending = pool.pending[addr] + _, hasQueued = pool.queue[addr] + ) + if !hasPending && !hasQueued { + pool.reserve(addr, false) + } + }() + } + // Remove it from the list of known transactions + pool.all.Remove(hash) + if outofbound { + pool.priced.Removed(1) + } + if pool.locals.contains(addr) { + localGauge.Dec(1) + } + // Remove the transaction from the pending lists and reset the account nonce + if pending := pool.pending[addr]; pending != nil { + if removed, invalids := pending.Remove(tx); removed { + // If no more pending transactions are left, remove the list + if pending.Empty() { + delete(pool.pending, addr) + } + // Postpone any invalidated transactions + for _, tx := range invalids { + // Internal shuffle shouldn't touch the lookup set. + pool.enqueueTx(tx.Hash(), tx, false, false) + } + // Update the account nonce if needed + pool.pendingNonces.setIfLower(addr, tx.Nonce()) + // Reduce the pending counter + pendingGauge.Dec(int64(1 + len(invalids))) + return 1 + len(invalids) + } + } + // Transaction is in the future queue + if future := pool.queue[addr]; future != nil { + if removed, _ := future.Remove(tx); removed { + // Reduce the queued counter + queuedGauge.Dec(1) + } + if future.Empty() { + delete(pool.queue, addr) + delete(pool.beats, addr) + } + } + return 0 +} + +// requestReset requests a pool reset to the new head block. +// The returned channel is closed when the reset has occurred. +func (pool *LegacyPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { + select { + case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: + return <-pool.reorgDoneCh + case <-pool.reorgShutdownCh: + return pool.reorgShutdownCh + } +} + +// requestPromoteExecutables requests transaction promotion checks for the given addresses. +// The returned channel is closed when the promotion checks have occurred. +func (pool *LegacyPool) requestPromoteExecutables(set *accountSet) chan struct{} { + select { + case pool.reqPromoteCh <- set: + return <-pool.reorgDoneCh + case <-pool.reorgShutdownCh: + return pool.reorgShutdownCh + } +} + +// queueTxEvent enqueues a transaction event to be sent in the next reorg run. +func (pool *LegacyPool) queueTxEvent(tx *types.Transaction) { + select { + case pool.queueTxEventCh <- tx: + case <-pool.reorgShutdownCh: + } +} + +// scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not +// call those methods directly, but request them being run using requestReset and +// requestPromoteExecutables instead. +func (pool *LegacyPool) scheduleReorgLoop() { + defer pool.wg.Done() + + var ( + curDone chan struct{} // non-nil while runReorg is active + nextDone = make(chan struct{}) + launchNextRun bool + reset *txpoolResetRequest + dirtyAccounts *accountSet + queuedEvents = make(map[common.Address]*sortedMap) + ) + for { + // Launch next background reorg if needed + if curDone == nil && launchNextRun { + // Run the background reorg and announcements + go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) + + // Prepare everything for the next round of reorg + curDone, nextDone = nextDone, make(chan struct{}) + launchNextRun = false + + reset, dirtyAccounts = nil, nil + queuedEvents = make(map[common.Address]*sortedMap) + } + + select { + case req := <-pool.reqResetCh: + // Reset request: update head if request is already pending. + if reset == nil { + reset = req + } else { + reset.newHead = req.newHead + } + launchNextRun = true + pool.reorgDoneCh <- nextDone + + case req := <-pool.reqPromoteCh: + // Promote request: update address set if request is already pending. + if dirtyAccounts == nil { + dirtyAccounts = req + } else { + dirtyAccounts.merge(req) + } + launchNextRun = true + pool.reorgDoneCh <- nextDone + + case tx := <-pool.queueTxEventCh: + // Queue up the event, but don't schedule a reorg. It's up to the caller to + // request one later if they want the events sent. + addr, _ := types.Sender(pool.signer, tx) + if _, ok := queuedEvents[addr]; !ok { + queuedEvents[addr] = newSortedMap() + } + queuedEvents[addr].Put(tx) + + case <-curDone: + curDone = nil + + case <-pool.reorgShutdownCh: + // Wait for current run to finish. + if curDone != nil { + <-curDone + } + close(nextDone) + return + } + } +} + +// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. +func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) { + defer func(t0 time.Time) { + reorgDurationTimer.Update(time.Since(t0)) + }(time.Now()) + defer close(done) + + var promoteAddrs []common.Address + if dirtyAccounts != nil && reset == nil { + // Only dirty accounts need to be promoted, unless we're resetting. + // For resets, all addresses in the tx queue will be promoted and + // the flatten operation can be avoided. + promoteAddrs = dirtyAccounts.flatten() + } + pool.mu.Lock() + if reset != nil { + // Reset from the old head to the new, rescheduling any reorged transactions + pool.reset(reset.oldHead, reset.newHead) + + // Nonces were reset, discard any events that became stale + for addr := range events { + events[addr].Forward(pool.pendingNonces.get(addr)) + if events[addr].Len() == 0 { + delete(events, addr) + } + } + // Reset needs promote for all addresses + promoteAddrs = make([]common.Address, 0, len(pool.queue)) + for addr := range pool.queue { + promoteAddrs = append(promoteAddrs, addr) + } + } + // Check for pending transactions for every account that sent new ones + promoted := pool.promoteExecutables(promoteAddrs) + + // If a new block appeared, validate the pool of pending transactions. This will + // remove any transaction that has been included in the block or was invalidated + // because of another transaction (e.g. higher gas price). + if reset != nil { + pool.demoteUnexecutables() + if reset.newHead != nil { + if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { + pendingBaseFee := eip1559.CalcBaseFee(pool.chainconfig, reset.newHead) + pool.priced.SetBaseFee(pendingBaseFee) + } else { + pool.priced.Reheap() + } + } + // Update all accounts to the latest known pending nonce + nonces := make(map[common.Address]uint64, len(pool.pending)) + for addr, list := range pool.pending { + highestPending := list.LastElement() + nonces[addr] = highestPending.Nonce() + 1 + } + pool.pendingNonces.setAll(nonces) + } + // Ensure pool.queue and pool.pending sizes stay within the configured limits. + pool.truncatePending() + pool.truncateQueue() + + dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) + pool.changesSinceReorg = 0 // Reset change counter + pool.mu.Unlock() + + // Notify subsystems for newly added transactions + for _, tx := range promoted { + addr, _ := types.Sender(pool.signer, tx) + if _, ok := events[addr]; !ok { + events[addr] = newSortedMap() + } + events[addr].Put(tx) + } + if len(events) > 0 { + var txs []*types.Transaction + for _, set := range events { + txs = append(txs, set.Flatten()...) + } + pool.txFeed.Send(core.NewTxsEvent{Txs: txs}) + } +} + +// reset retrieves the current state of the blockchain and ensures the content +// of the transaction pool is valid with regard to the chain state. +func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { + // If we're reorging an old state, reinject all dropped transactions + var reinject types.Transactions + + if oldHead != nil && oldHead.Hash() != newHead.ParentHash { + // If the reorg is too deep, avoid doing it (will happen during fast sync) + oldNum := oldHead.Number.Uint64() + newNum := newHead.Number.Uint64() + + if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { + log.Debug("Skipping deep transaction reorg", "depth", depth) + } else { + // Reorg seems shallow enough to pull in all transactions into memory + var ( + rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) + add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) + ) + if rem == nil { + // This can happen if a setHead is performed, where we simply discard the old + // head from the chain. + // If that is the case, we don't have the lost transactions anymore, and + // there's nothing to add + if newNum >= oldNum { + // If we reorged to a same or higher number, then it's not a case of setHead + log.Warn("Transaction pool reset with missing old head", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + return + } + // If the reorg ended up on a lower number, it's indicative of setHead being the cause + log.Debug("Skipping transaction reset caused by setHead", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + // We still need to update the current state s.th. the lost transactions can be readded by the user + } else { + if add == nil { + // if the new head is nil, it means that something happened between + // the firing of newhead-event and _now_: most likely a + // reorg caused by sync-reversion or explicit sethead back to an + // earlier block. + log.Warn("Transaction pool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash()) + return + } + var discarded, included types.Transactions + for rem.NumberU64() > add.NumberU64() { + discarded = append(discarded, rem.Transactions()...) + if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) + return + } + } + for add.NumberU64() > rem.NumberU64() { + included = append(included, add.Transactions()...) + if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) + return + } + } + for rem.Hash() != add.Hash() { + discarded = append(discarded, rem.Transactions()...) + if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) + return + } + included = append(included, add.Transactions()...) + if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) + return + } + } + lost := make([]*types.Transaction, 0, len(discarded)) + for _, tx := range types.TxDifference(discarded, included) { + if pool.Filter(tx) { + lost = append(lost, tx) + } + } + reinject = lost + } + } + } + // Initialize the internal state to the current head + if newHead == nil { + newHead = pool.chain.CurrentBlock() // Special case during testing + } + statedb, err := pool.chain.StateAt(newHead.Root) + if err != nil { + log.Error("Failed to reset txpool state", "err", err) + return + } + pool.currentHead.Store(newHead) + pool.currentState = statedb + pool.pendingNonces = newNoncer(statedb) + + // Inject any transactions discarded due to reorgs + log.Debug("Reinjecting stale transactions", "count", len(reinject)) + core.SenderCacher.Recover(pool.signer, reinject) + pool.addTxsLocked(reinject, false) +} + +// promoteExecutables moves transactions that have become processable from the +// future queue to the set of pending transactions. During this process, all +// invalidated transactions (low nonce, low balance) are deleted. +func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction { + // Track the promoted transactions to broadcast them at once + var promoted []*types.Transaction + + // Iterate over all accounts and promote any executable transactions + gasLimit := pool.currentHead.Load().GasLimit + for _, addr := range accounts { + list := pool.queue[addr] + if list == nil { + continue // Just in case someone calls with a non existing account + } + // Drop all transactions that are deemed too old (low nonce) + forwards := list.Forward(pool.currentState.GetNonce(addr)) + for _, tx := range forwards { + hash := tx.Hash() + pool.all.Remove(hash) + } + log.Trace("Removed old queued transactions", "count", len(forwards)) + // Drop all transactions that are too costly (low balance or out of gas) + drops, _ := list.Filter(pool.currentState.GetBalance(addr), gasLimit) + for _, tx := range drops { + hash := tx.Hash() + pool.all.Remove(hash) + } + log.Trace("Removed unpayable queued transactions", "count", len(drops)) + queuedNofundsMeter.Mark(int64(len(drops))) + + // Gather all executable transactions and promote them + readies := list.Ready(pool.pendingNonces.get(addr)) + for _, tx := range readies { + hash := tx.Hash() + if pool.promoteTx(addr, hash, tx) { + promoted = append(promoted, tx) + } + } + log.Trace("Promoted queued transactions", "count", len(promoted)) + queuedGauge.Dec(int64(len(readies))) + + // Drop all transactions over the allowed limit + var caps types.Transactions + if !pool.locals.contains(addr) { + caps = list.Cap(int(pool.config.AccountQueue)) + for _, tx := range caps { + hash := tx.Hash() + pool.all.Remove(hash) + log.Trace("Removed cap-exceeding queued transaction", "hash", hash) + } + queuedRateLimitMeter.Mark(int64(len(caps))) + } + // Mark all the items dropped as removed + pool.priced.Removed(len(forwards) + len(drops) + len(caps)) + queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) + if pool.locals.contains(addr) { + localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) + } + // Delete the entire queue entry if it became empty. + if list.Empty() { + delete(pool.queue, addr) + delete(pool.beats, addr) + if _, ok := pool.pending[addr]; !ok { + pool.reserve(addr, false) + } + } + } + return promoted +} + +// truncatePending removes transactions from the pending queue if the pool is above the +// pending limit. The algorithm tries to reduce transaction counts by an approximately +// equal number for all for accounts with many pending transactions. +func (pool *LegacyPool) truncatePending() { + pending := uint64(0) + for _, list := range pool.pending { + pending += uint64(list.Len()) + } + if pending <= pool.config.GlobalSlots { + return + } + + pendingBeforeCap := pending + // Assemble a spam order to penalize large transactors first + spammers := prque.New[int64, common.Address](nil) + for addr, list := range pool.pending { + // Only evict transactions from high rollers + if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { + spammers.Push(addr, int64(list.Len())) + } + } + // Gradually drop transactions from offenders + offenders := []common.Address{} + for pending > pool.config.GlobalSlots && !spammers.Empty() { + // Retrieve the next offender if not local address + offender, _ := spammers.Pop() + offenders = append(offenders, offender) + + // Equalize balances until all the same or below threshold + if len(offenders) > 1 { + // Calculate the equalization threshold for all current offenders + threshold := pool.pending[offender].Len() + + // Iteratively reduce all offenders until below limit or threshold reached + for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { + for i := 0; i < len(offenders)-1; i++ { + list := pool.pending[offenders[i]] + + caps := list.Cap(list.Len() - 1) + for _, tx := range caps { + // Drop the transaction from the global pools too + hash := tx.Hash() + pool.all.Remove(hash) + + // Update the account nonce to the dropped transaction + pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) + log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) + } + pool.priced.Removed(len(caps)) + pendingGauge.Dec(int64(len(caps))) + if pool.locals.contains(offenders[i]) { + localGauge.Dec(int64(len(caps))) + } + pending-- + } + } + } + } + + // If still above threshold, reduce to limit or min allowance + if pending > pool.config.GlobalSlots && len(offenders) > 0 { + for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { + for _, addr := range offenders { + list := pool.pending[addr] + + caps := list.Cap(list.Len() - 1) + for _, tx := range caps { + // Drop the transaction from the global pools too + hash := tx.Hash() + pool.all.Remove(hash) + + // Update the account nonce to the dropped transaction + pool.pendingNonces.setIfLower(addr, tx.Nonce()) + log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) + } + pool.priced.Removed(len(caps)) + pendingGauge.Dec(int64(len(caps))) + if pool.locals.contains(addr) { + localGauge.Dec(int64(len(caps))) + } + pending-- + } + } + } + pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) +} + +// truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit. +func (pool *LegacyPool) truncateQueue() { + queued := uint64(0) + for _, list := range pool.queue { + queued += uint64(list.Len()) + } + if queued <= pool.config.GlobalQueue { + return + } + + // Sort all accounts with queued transactions by heartbeat + addresses := make(addressesByHeartbeat, 0, len(pool.queue)) + for addr := range pool.queue { + if !pool.locals.contains(addr) { // don't drop locals + addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) + } + } + sort.Sort(sort.Reverse(addresses)) + + // Drop transactions until the total is below the limit or only locals remain + for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { + addr := addresses[len(addresses)-1] + list := pool.queue[addr.address] + + addresses = addresses[:len(addresses)-1] + + // Drop all transactions if they are less than the overflow + if size := uint64(list.Len()); size <= drop { + for _, tx := range list.Flatten() { + pool.removeTx(tx.Hash(), true, true) + } + drop -= size + queuedRateLimitMeter.Mark(int64(size)) + continue + } + // Otherwise drop only last few transactions + txs := list.Flatten() + for i := len(txs) - 1; i >= 0 && drop > 0; i-- { + pool.removeTx(txs[i].Hash(), true, true) + drop-- + queuedRateLimitMeter.Mark(1) + } + } +} + +// demoteUnexecutables removes invalid and processed transactions from the pools +// executable/pending queue and any subsequent transactions that become unexecutable +// are moved back into the future queue. +// +// Note: transactions are not marked as removed in the priced list because re-heaping +// is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful +// to trigger a re-heap is this function +func (pool *LegacyPool) demoteUnexecutables() { + // Iterate over all accounts and demote any non-executable transactions + gasLimit := pool.currentHead.Load().GasLimit + for addr, list := range pool.pending { + nonce := pool.currentState.GetNonce(addr) + + // Drop all transactions that are deemed too old (low nonce) + olds := list.Forward(nonce) + for _, tx := range olds { + hash := tx.Hash() + pool.all.Remove(hash) + log.Trace("Removed old pending transaction", "hash", hash) + } + // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later + drops, invalids := list.Filter(pool.currentState.GetBalance(addr), gasLimit) + for _, tx := range drops { + hash := tx.Hash() + log.Trace("Removed unpayable pending transaction", "hash", hash) + pool.all.Remove(hash) + } + pendingNofundsMeter.Mark(int64(len(drops))) + + for _, tx := range invalids { + hash := tx.Hash() + log.Trace("Demoting pending transaction", "hash", hash) + + // Internal shuffle shouldn't touch the lookup set. + pool.enqueueTx(hash, tx, false, false) + } + pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) + if pool.locals.contains(addr) { + localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) + } + // If there's a gap in front, alert (should never happen) and postpone all transactions + if list.Len() > 0 && list.txs.Get(nonce) == nil { + gapped := list.Cap(0) + for _, tx := range gapped { + hash := tx.Hash() + log.Error("Demoting invalidated transaction", "hash", hash) + + // Internal shuffle shouldn't touch the lookup set. + pool.enqueueTx(hash, tx, false, false) + } + pendingGauge.Dec(int64(len(gapped))) + } + // Delete the entire pending entry if it became empty. + if list.Empty() { + delete(pool.pending, addr) + if _, ok := pool.queue[addr]; !ok { + pool.reserve(addr, false) + } + } + } +} + +// addressByHeartbeat is an account address tagged with its last activity timestamp. +type addressByHeartbeat struct { + address common.Address + heartbeat time.Time +} + +type addressesByHeartbeat []addressByHeartbeat + +func (a addressesByHeartbeat) Len() int { return len(a) } +func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } +func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// accountSet is simply a set of addresses to check for existence, and a signer +// capable of deriving addresses from transactions. +type accountSet struct { + accounts map[common.Address]struct{} + signer types.Signer + cache *[]common.Address +} + +// newAccountSet creates a new address set with an associated signer for sender +// derivations. +func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { + as := &accountSet{ + accounts: make(map[common.Address]struct{}, len(addrs)), + signer: signer, + } + for _, addr := range addrs { + as.add(addr) + } + return as +} + +// contains checks if a given address is contained within the set. +func (as *accountSet) contains(addr common.Address) bool { + _, exist := as.accounts[addr] + return exist +} + +// containsTx checks if the sender of a given tx is within the set. If the sender +// cannot be derived, this method returns false. +func (as *accountSet) containsTx(tx *types.Transaction) bool { + if addr, err := types.Sender(as.signer, tx); err == nil { + return as.contains(addr) + } + return false +} + +// add inserts a new address into the set to track. +func (as *accountSet) add(addr common.Address) { + as.accounts[addr] = struct{}{} + as.cache = nil +} + +// addTx adds the sender of tx into the set. +func (as *accountSet) addTx(tx *types.Transaction) { + if addr, err := types.Sender(as.signer, tx); err == nil { + as.add(addr) + } +} + +// flatten returns the list of addresses within this set, also caching it for later +// reuse. The returned slice should not be changed! +func (as *accountSet) flatten() []common.Address { + if as.cache == nil { + accounts := make([]common.Address, 0, len(as.accounts)) + for account := range as.accounts { + accounts = append(accounts, account) + } + as.cache = &accounts + } + return *as.cache +} + +// merge adds all addresses from the 'other' set into 'as'. +func (as *accountSet) merge(other *accountSet) { + for addr := range other.accounts { + as.accounts[addr] = struct{}{} + } + as.cache = nil +} + +// lookup is used internally by LegacyPool to track transactions while allowing +// lookup without mutex contention. +// +// Note, although this type is properly protected against concurrent access, it +// is **not** a type that should ever be mutated or even exposed outside of the +// transaction pool, since its internal state is tightly coupled with the pools +// internal mechanisms. The sole purpose of the type is to permit out-of-bound +// peeking into the pool in LegacyPool.Get without having to acquire the widely scoped +// LegacyPool.mu mutex. +// +// This lookup set combines the notion of "local transactions", which is useful +// to build upper-level structure. +type lookup struct { + slots int + lock sync.RWMutex + locals map[common.Hash]*types.Transaction + remotes map[common.Hash]*types.Transaction +} + +// newLookup returns a new lookup structure. +func newLookup() *lookup { + return &lookup{ + locals: make(map[common.Hash]*types.Transaction), + remotes: make(map[common.Hash]*types.Transaction), + } +} + +// Range calls f on each key and value present in the map. The callback passed +// should return the indicator whether the iteration needs to be continued. +// Callers need to specify which set (or both) to be iterated. +func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { + t.lock.RLock() + defer t.lock.RUnlock() + + if local { + for key, value := range t.locals { + if !f(key, value, true) { + return + } + } + } + if remote { + for key, value := range t.remotes { + if !f(key, value, false) { + return + } + } + } +} + +// Get returns a transaction if it exists in the lookup, or nil if not found. +func (t *lookup) Get(hash common.Hash) *types.Transaction { + t.lock.RLock() + defer t.lock.RUnlock() + + if tx := t.locals[hash]; tx != nil { + return tx + } + return t.remotes[hash] +} + +// GetLocal returns a transaction if it exists in the lookup, or nil if not found. +func (t *lookup) GetLocal(hash common.Hash) *types.Transaction { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.locals[hash] +} + +// GetRemote returns a transaction if it exists in the lookup, or nil if not found. +func (t *lookup) GetRemote(hash common.Hash) *types.Transaction { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.remotes[hash] +} + +// Count returns the current number of transactions in the lookup. +func (t *lookup) Count() int { + t.lock.RLock() + defer t.lock.RUnlock() + + return len(t.locals) + len(t.remotes) +} + +// LocalCount returns the current number of local transactions in the lookup. +func (t *lookup) LocalCount() int { + t.lock.RLock() + defer t.lock.RUnlock() + + return len(t.locals) +} + +// RemoteCount returns the current number of remote transactions in the lookup. +func (t *lookup) RemoteCount() int { + t.lock.RLock() + defer t.lock.RUnlock() + + return len(t.remotes) +} + +// Slots returns the current number of slots used in the lookup. +func (t *lookup) Slots() int { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.slots +} + +// Add adds a transaction to the lookup. +func (t *lookup) Add(tx *types.Transaction, local bool) { + t.lock.Lock() + defer t.lock.Unlock() + + t.slots += numSlots(tx) + slotsGauge.Update(int64(t.slots)) + + if local { + t.locals[tx.Hash()] = tx + } else { + t.remotes[tx.Hash()] = tx + } +} + +// Remove removes a transaction from the lookup. +func (t *lookup) Remove(hash common.Hash) { + t.lock.Lock() + defer t.lock.Unlock() + + tx, ok := t.locals[hash] + if !ok { + tx, ok = t.remotes[hash] + } + if !ok { + log.Error("No transaction found to be deleted", "hash", hash) + return + } + t.slots -= numSlots(tx) + slotsGauge.Update(int64(t.slots)) + + delete(t.locals, hash) + delete(t.remotes, hash) +} + +// RemoteToLocals migrates the transactions belongs to the given locals to locals +// set. The assumption is held the locals set is thread-safe to be used. +func (t *lookup) RemoteToLocals(locals *accountSet) int { + t.lock.Lock() + defer t.lock.Unlock() + + var migrated int + for hash, tx := range t.remotes { + if locals.containsTx(tx) { + t.locals[hash] = tx + delete(t.remotes, hash) + migrated += 1 + } + } + return migrated +} + +// RemotesBelowTip finds all remote transactions below the given tip threshold. +func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions { + found := make(types.Transactions, 0, 128) + t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { + if tx.GasTipCapIntCmp(threshold) < 0 { + found = append(found, tx) + } + return true + }, false, true) // Only iterate remotes + return found +} + +// numSlots calculates the number of slots needed for a single transaction. +func numSlots(tx *types.Transaction) int { + return int((tx.Size() + txSlotSize - 1) / txSlotSize) +} diff --git a/core/txpool/txpool2_test.go b/core/txpool/legacypool/legacypool2_test.go similarity index 84% rename from core/txpool/txpool2_test.go rename to core/txpool/legacypool/legacypool2_test.go index d7ae3bdb1..a73c1bb8a 100644 --- a/core/txpool/txpool2_test.go +++ b/core/txpool/legacypool/legacypool2_test.go @@ -13,7 +13,7 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package txpool +package legacypool import ( "crypto/ecdsa" @@ -33,7 +33,7 @@ func pricedValuedTransaction(nonce uint64, value int64, gaslimit uint64, gaspric return tx } -func count(t *testing.T, pool *TxPool) (pending int, queued int) { +func count(t *testing.T, pool *LegacyPool) (pending int, queued int) { t.Helper() pending, queued = pool.stats() if err := validatePoolInternals(pool); err != nil { @@ -42,7 +42,7 @@ func count(t *testing.T, pool *TxPool) (pending int, queued int) { return pending, queued } -func fillPool(t testing.TB, pool *TxPool) { +func fillPool(t testing.TB, pool *LegacyPool) { t.Helper() // Create a number of test accounts, fund them and make transactions executableTxs := types.Transactions{} @@ -56,8 +56,8 @@ func fillPool(t testing.TB, pool *TxPool) { } } // Import the batch and verify that limits have been enforced - pool.AddRemotesSync(executableTxs) - pool.AddRemotesSync(nonExecutableTxs) + pool.addRemotesSync(executableTxs) + pool.addRemotesSync(nonExecutableTxs) pending, queued := pool.Stats() slots := pool.all.Slots() // sanity-check that the test prerequisites are ok (pending full) @@ -79,12 +79,13 @@ func TestTransactionFutureAttack(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalQueue = 100 config.GlobalSlots = 100 - pool := NewTxPool(config, eip1559Config, blockchain) - defer pool.Stop() + pool := New(config, blockchain) + pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() fillPool(t, pool) pending, _ := pool.Stats() // Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops @@ -96,7 +97,7 @@ func TestTransactionFutureAttack(t *testing.T) { futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 100000, big.NewInt(500), key)) } for i := 0; i < 5; i++ { - pool.AddRemotesSync(futureTxs) + pool.addRemotesSync(futureTxs) newPending, newQueued := count(t, pool) t.Logf("pending: %d queued: %d, all: %d\n", newPending, newQueued, pool.all.Slots()) } @@ -115,9 +116,10 @@ func TestTransactionFuture1559(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) - pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) - defer pool.Stop() + blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) + pool := New(testTxPoolConfig, blockchain) + pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Create a number of test accounts, fund them and make transactions fillPool(t, pool) @@ -131,7 +133,7 @@ func TestTransactionFuture1559(t *testing.T) { for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ { futureTxs = append(futureTxs, dynamicFeeTx(1000+uint64(j), 100000, big.NewInt(200), big.NewInt(101), key)) } - pool.AddRemotesSync(futureTxs) + pool.addRemotesSync(futureTxs) } newPending, _ := pool.Stats() // Pending should not have been touched @@ -147,9 +149,10 @@ func TestTransactionZAttack(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) - pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) - defer pool.Stop() + blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) + pool := New(testTxPoolConfig, blockchain) + pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Create a number of test accounts, fund them and make transactions fillPool(t, pool) @@ -181,7 +184,7 @@ func TestTransactionZAttack(t *testing.T) { key, _ := crypto.GenerateKey() pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 21000, big.NewInt(500), key)) - pool.AddRemotesSync(futureTxs) + pool.addRemotesSync(futureTxs) } overDraftTxs := types.Transactions{} @@ -192,11 +195,11 @@ func TestTransactionZAttack(t *testing.T) { overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 600000000000, 21000, big.NewInt(500), key)) } } - pool.AddRemotesSync(overDraftTxs) - pool.AddRemotesSync(overDraftTxs) - pool.AddRemotesSync(overDraftTxs) - pool.AddRemotesSync(overDraftTxs) - pool.AddRemotesSync(overDraftTxs) + pool.addRemotesSync(overDraftTxs) + pool.addRemotesSync(overDraftTxs) + pool.addRemotesSync(overDraftTxs) + pool.addRemotesSync(overDraftTxs) + pool.addRemotesSync(overDraftTxs) newPending, newQueued := count(t, pool) newIvPending := countInvalidPending() @@ -214,12 +217,13 @@ func TestTransactionZAttack(t *testing.T) { func BenchmarkFutureAttack(b *testing.B) { // Create the pool to test the limit enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalQueue = 100 config.GlobalSlots = 100 - pool := NewTxPool(config, eip1559Config, blockchain) - defer pool.Stop() + pool := New(config, blockchain) + pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() fillPool(b, pool) key, _ := crypto.GenerateKey() @@ -231,6 +235,6 @@ func BenchmarkFutureAttack(b *testing.B) { } b.ResetTimer() for i := 0; i < 5; i++ { - pool.AddRemotesSync(futureTxs) + pool.addRemotesSync(futureTxs) } } diff --git a/core/txpool/txpool_test.go b/core/txpool/legacypool/legacypool_test.go similarity index 85% rename from core/txpool/txpool_test.go rename to core/txpool/legacypool/legacypool_test.go index 22e106aaf..a8f3dd7d8 100644 --- a/core/txpool/txpool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package txpool +package legacypool import ( "crypto/ecdsa" @@ -24,6 +24,7 @@ import ( "math/big" "math/rand" "os" + "sync" "sync/atomic" "testing" "time" @@ -32,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" @@ -59,17 +61,22 @@ func init() { } type testBlockChain struct { + config *params.ChainConfig gasLimit atomic.Uint64 statedb *state.StateDB chainHeadFeed *event.Feed } -func newTestBlockChain(gasLimit uint64, statedb *state.StateDB, chainHeadFeed *event.Feed) *testBlockChain { - bc := testBlockChain{statedb: statedb, chainHeadFeed: new(event.Feed)} +func newTestBlockChain(config *params.ChainConfig, gasLimit uint64, statedb *state.StateDB, chainHeadFeed *event.Feed) *testBlockChain { + bc := testBlockChain{config: config, statedb: statedb, chainHeadFeed: new(event.Feed)} bc.gasLimit.Store(gasLimit) return &bc } +func (bc *testBlockChain) Config() *params.ChainConfig { + return bc.config +} + func (bc *testBlockChain) CurrentBlock() *types.Header { return &types.Header{ Number: new(big.Int), @@ -121,24 +128,51 @@ func dynamicFeeTx(nonce uint64, gaslimit uint64, gasFee *big.Int, tip *big.Int, return tx } -func setupPool() (*TxPool, *ecdsa.PrivateKey) { +func makeAddressReserver() txpool.AddressReserver { + var ( + reserved = make(map[common.Address]struct{}) + lock sync.Mutex + ) + return func(addr common.Address, reserve bool) error { + lock.Lock() + defer lock.Unlock() + + _, exists := reserved[addr] + if reserve { + if exists { + panic("already reserved") + } + reserved[addr] = struct{}{} + return nil + } + if !exists { + panic("not reserved") + } + delete(reserved, addr) + return nil + } +} + +func setupPool() (*LegacyPool, *ecdsa.PrivateKey) { return setupPoolWithConfig(params.TestChainConfig) } -func setupPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) { +func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.PrivateKey) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(10000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(config, 10000000, statedb, new(event.Feed)) key, _ := crypto.GenerateKey() - pool := NewTxPool(testTxPoolConfig, config, blockchain) - + pool := New(testTxPoolConfig, blockchain) + if err := pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()); err != nil { + panic(err) + } // wait for the pool to initialize <-pool.initDoneCh return pool, key } // validatePoolInternals checks various consistency invariants within the pool. -func validatePoolInternals(pool *TxPool) error { +func validatePoolInternals(pool *LegacyPool) error { pool.mu.RLock() defer pool.mu.RUnlock() @@ -242,20 +276,21 @@ func TestStateChangeDuringReset(t *testing.T) { // setup pool with 2 transaction in it statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether)) - blockchain := &testChain{newTestBlockChain(1000000000, statedb, new(event.Feed)), address, &trigger} + blockchain := &testChain{newTestBlockChain(params.TestChainConfig, 1000000000, statedb, new(event.Feed)), address, &trigger} tx0 := transaction(0, 100000, key) tx1 := transaction(1, 100000, key) - pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(testTxPoolConfig, blockchain) + pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() nonce := pool.Nonce(address) if nonce != 0 { t.Fatalf("Invalid nonce, want 0, got %d", nonce) } - pool.AddRemotesSync([]*types.Transaction{tx0, tx1}) + pool.addRemotesSync([]*types.Transaction{tx0, tx1}) nonce = pool.Nonce(address) if nonce != 2 { @@ -272,13 +307,13 @@ func TestStateChangeDuringReset(t *testing.T) { } } -func testAddBalance(pool *TxPool, addr common.Address, amount *big.Int) { +func testAddBalance(pool *LegacyPool, addr common.Address, amount *big.Int) { pool.mu.Lock() pool.currentState.AddBalance(addr, amount) pool.mu.Unlock() } -func testSetNonce(pool *TxPool, addr common.Address, nonce uint64) { +func testSetNonce(pool *LegacyPool, addr common.Address, nonce uint64) { pool.mu.Lock() pool.currentState.SetNonce(addr, nonce) pool.mu.Unlock() @@ -288,36 +323,36 @@ func TestInvalidTransactions(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Stop() + defer pool.Close() tx := transaction(0, 100, key) from, _ := deriveSender(tx) // Intrinsic gas too low testAddBalance(pool, from, big.NewInt(1)) - if err, want := pool.AddRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) { + if err, want := pool.addRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) { t.Errorf("want %v have %v", want, err) } // Insufficient funds tx = transaction(0, 100000, key) - if err, want := pool.AddRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) { + if err, want := pool.addRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) { t.Errorf("want %v have %v", want, err) } testSetNonce(pool, from, 1) testAddBalance(pool, from, big.NewInt(0xffffffffffffff)) tx = transaction(0, 100000, key) - if err, want := pool.AddRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) { + if err, want := pool.addRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) { t.Errorf("want %v have %v", want, err) } tx = transaction(1, 100000, key) - pool.gasPrice = big.NewInt(1000) - if err, want := pool.AddRemote(tx), ErrUnderpriced; !errors.Is(err, want) { + pool.gasTip.Store(big.NewInt(1000)) + if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) { t.Errorf("want %v have %v", want, err) } - if err := pool.AddLocal(tx); err != nil { + if err := pool.addLocal(tx); err != nil { t.Error("expected", nil, "got", err) } } @@ -326,7 +361,7 @@ func TestQueue(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Stop() + defer pool.Close() tx := transaction(0, 100, key) from, _ := deriveSender(tx) @@ -357,7 +392,7 @@ func TestQueue2(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Stop() + defer pool.Close() tx1 := transaction(0, 100, key) tx2 := transaction(10, 100, key) @@ -383,13 +418,13 @@ func TestNegativeValue(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Stop() + defer pool.Close() tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key) from, _ := deriveSender(tx) testAddBalance(pool, from, big.NewInt(1)) - if err := pool.AddRemote(tx); err != ErrNegativeValue { - t.Error("expected", ErrNegativeValue, "got", err) + if err := pool.addRemote(tx); err != txpool.ErrNegativeValue { + t.Error("expected", txpool.ErrNegativeValue, "got", err) } } @@ -397,11 +432,11 @@ func TestTipAboveFeeCap(t *testing.T) { t.Parallel() pool, key := setupPoolWithConfig(eip1559Config) - defer pool.Stop() + defer pool.Close() tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key) - if err := pool.AddRemote(tx); err != core.ErrTipAboveFeeCap { + if err := pool.addRemote(tx); err != core.ErrTipAboveFeeCap { t.Error("expected", core.ErrTipAboveFeeCap, "got", err) } } @@ -410,18 +445,18 @@ func TestVeryHighValues(t *testing.T) { t.Parallel() pool, key := setupPoolWithConfig(eip1559Config) - defer pool.Stop() + defer pool.Close() veryBigNumber := big.NewInt(1) veryBigNumber.Lsh(veryBigNumber, 300) tx := dynamicFeeTx(0, 100, big.NewInt(1), veryBigNumber, key) - if err := pool.AddRemote(tx); err != core.ErrTipVeryHigh { + if err := pool.addRemote(tx); err != core.ErrTipVeryHigh { t.Error("expected", core.ErrTipVeryHigh, "got", err) } tx2 := dynamicFeeTx(0, 100, veryBigNumber, big.NewInt(1), key) - if err := pool.AddRemote(tx2); err != core.ErrFeeCapVeryHigh { + if err := pool.addRemote(tx2); err != core.ErrFeeCapVeryHigh { t.Error("expected", core.ErrFeeCapVeryHigh, "got", err) } } @@ -430,14 +465,14 @@ func TestChainFork(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Stop() + defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) resetState := func() { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.AddBalance(addr, big.NewInt(100000000000000)) - pool.chain = newTestBlockChain(1000000, statedb, new(event.Feed)) + pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed)) <-pool.requestReset(nil, nil) } resetState() @@ -446,7 +481,7 @@ func TestChainFork(t *testing.T) { if _, err := pool.add(tx, false); err != nil { t.Error("didn't expect error", err) } - pool.removeTx(tx.Hash(), true) + pool.removeTx(tx.Hash(), true, true) // reset the pool's internal state resetState() @@ -459,14 +494,14 @@ func TestDoubleNonce(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Stop() + defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) resetState := func() { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.AddBalance(addr, big.NewInt(100000000000000)) - pool.chain = newTestBlockChain(1000000, statedb, new(event.Feed)) + pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed)) <-pool.requestReset(nil, nil) } resetState() @@ -510,7 +545,7 @@ func TestMissingNonce(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Stop() + defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, addr, big.NewInt(100000000000000)) @@ -534,7 +569,7 @@ func TestNonceRecovery(t *testing.T) { const n = 10 pool, key := setupPool() - defer pool.Stop() + defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) testSetNonce(pool, addr, n) @@ -542,7 +577,7 @@ func TestNonceRecovery(t *testing.T) { <-pool.requestReset(nil, nil) tx := transaction(n, 100000, key) - if err := pool.AddRemote(tx); err != nil { + if err := pool.addRemote(tx); err != nil { t.Error(err) } // simulate some weird re-order of transactions and missing nonce(s) @@ -560,7 +595,7 @@ func TestDropping(t *testing.T) { // Create a test account and fund it pool, key := setupPool() - defer pool.Stop() + defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000)) @@ -664,10 +699,11 @@ func TestPostponing(t *testing.T) { // Create the pool to test the postponing with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(testTxPoolConfig, blockchain) + pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Create two test accounts to produce different gap profiles with keys := make([]*ecdsa.PrivateKey, 2) @@ -692,7 +728,7 @@ func TestPostponing(t *testing.T) { txs = append(txs, tx) } } - for i, err := range pool.AddRemotesSync(txs) { + for i, err := range pool.addRemotesSync(txs) { if err != nil { t.Fatalf("tx %d: failed to add transactions: %v", i, err) } @@ -777,7 +813,7 @@ func TestGapFilling(t *testing.T) { // Create a test account and fund it pool, key := setupPool() - defer pool.Stop() + defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) @@ -788,7 +824,7 @@ func TestGapFilling(t *testing.T) { defer sub.Unsubscribe() // Create a pending and a queued transaction with a nonce-gap in between - pool.AddRemotesSync([]*types.Transaction{ + pool.addRemotesSync([]*types.Transaction{ transaction(0, 100000, key), transaction(2, 100000, key), }) @@ -831,7 +867,7 @@ func TestQueueAccountLimiting(t *testing.T) { // Create a test account and fund it pool, key := setupPool() - defer pool.Stop() + defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) @@ -876,14 +912,15 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { // Create the pool to test the limit enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.NoLocals = nolocals config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible) - pool := NewTxPool(config, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(config, blockchain) + pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Create a number of test accounts and fund them (last one will be the local) keys := make([]*ecdsa.PrivateKey, 5) @@ -905,7 +942,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { nonces[addr]++ } // Import the batch and verify that limits have been enforced - pool.AddRemotesSync(txs) + pool.addRemotesSync(txs) queued := 0 for addr, list := range pool.queue { @@ -922,7 +959,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { for i := uint64(0); i < 3*config.GlobalQueue; i++ { txs = append(txs, transaction(i+1, 100000, local)) } - pool.AddLocals(txs) + pool.addLocals(txs) // If locals are disabled, the previous eviction algorithm should apply here too if nolocals { @@ -968,14 +1005,15 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { // Create the pool to test the non-expiration enforcement statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.Lifetime = time.Second config.NoLocals = nolocals - pool := NewTxPool(config, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(config, blockchain) + pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Create two test accounts to ensure remotes expire but locals do not local, _ := crypto.GenerateKey() @@ -985,10 +1023,10 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) // Add the two transactions and ensure they both are queued up - if err := pool.AddLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { + if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil { + if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } pending, queued := pool.Stats() @@ -1055,7 +1093,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { } // Queue gapped transactions - if err := pool.AddLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil { + if err := pool.addLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil { @@ -1064,7 +1102,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { time.Sleep(5 * evictionInterval) // A half lifetime pass // Queue executable transactions, the life cycle should be restarted. - if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { + if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil { @@ -1078,7 +1116,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) } if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) @@ -1112,7 +1150,7 @@ func TestPendingLimiting(t *testing.T) { // Create a test account and fund it pool, key := setupPool() - defer pool.Stop() + defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000000000)) @@ -1153,13 +1191,14 @@ func TestPendingGlobalLimiting(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = config.AccountSlots * 10 - pool := NewTxPool(config, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(config, blockchain) + pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Create a number of test accounts and fund them keys := make([]*ecdsa.PrivateKey, 5) @@ -1179,7 +1218,7 @@ func TestPendingGlobalLimiting(t *testing.T) { } } // Import the batch and verify that limits have been enforced - pool.AddRemotesSync(txs) + pool.addRemotesSync(txs) pending := 0 for _, list := range pool.pending { @@ -1201,7 +1240,7 @@ func TestAllowedTxSize(t *testing.T) { // Create a test account and fund it pool, key := setupPool() - defer pool.Stop() + defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000000)) @@ -1210,7 +1249,7 @@ func TestAllowedTxSize(t *testing.T) { // // It is assumed the fields in the transaction (except of the data) are: // - nonce <= 32 bytes - // - gasPrice <= 32 bytes + // - gasTip <= 32 bytes // - gasLimit <= 32 bytes // - recipient == 20 bytes // - value <= 32 bytes @@ -1218,22 +1257,21 @@ func TestAllowedTxSize(t *testing.T) { // All those fields are summed up to at most 213 bytes. baseSize := uint64(213) dataSize := txMaxSize - baseSize - maxGas := pool.currentMaxGas.Load() // Try adding a transaction with maximal allowed size - tx := pricedDataTransaction(0, maxGas, big.NewInt(1), key, dataSize) + tx := pricedDataTransaction(0, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize) if err := pool.addRemoteSync(tx); err != nil { t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err) } // Try adding a transaction with random allowed size - if err := pool.addRemoteSync(pricedDataTransaction(1, maxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil { + if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentHead.Load().GasLimit, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil { t.Fatalf("failed to add transaction of random allowed size: %v", err) } // Try adding a transaction of minimal not allowed size - if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, txMaxSize)); err == nil { + if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, txMaxSize)); err == nil { t.Fatalf("expected rejection on slightly oversize transaction") } // Try adding a transaction of random not allowed size - if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil { + if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil { t.Fatalf("expected rejection on oversize transaction") } // Run some sanity checks on the pool internals @@ -1255,15 +1293,16 @@ func TestCapClearsFromAll(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.AccountSlots = 2 config.AccountQueue = 2 config.GlobalSlots = 8 - pool := NewTxPool(config, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(config, blockchain) + pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Create a number of test accounts and fund them key, _ := crypto.GenerateKey() @@ -1275,7 +1314,7 @@ func TestCapClearsFromAll(t *testing.T) { txs = append(txs, transaction(uint64(j), 100000, key)) } // Import the batch and verify that limits have been enforced - pool.AddRemotes(txs) + pool.addRemotes(txs) if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1289,13 +1328,14 @@ func TestPendingMinimumAllowance(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 1 - pool := NewTxPool(config, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(config, blockchain) + pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Create a number of test accounts and fund them keys := make([]*ecdsa.PrivateKey, 5) @@ -1315,7 +1355,7 @@ func TestPendingMinimumAllowance(t *testing.T) { } } // Import the batch and verify that limits have been enforced - pool.AddRemotesSync(txs) + pool.addRemotesSync(txs) for addr, list := range pool.pending { if list.Len() != int(config.AccountSlots) { @@ -1337,10 +1377,11 @@ func TestRepricing(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(testTxPoolConfig, blockchain) + pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Keep track of transaction events to ensure all executables get announced events := make(chan core.NewTxsEvent, 32) @@ -1371,8 +1412,8 @@ func TestRepricing(t *testing.T) { ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3]) // Import the batch and that both pending and queued transactions match up - pool.AddRemotesSync(txs) - pool.AddLocal(ltx) + pool.addRemotesSync(txs) + pool.addLocal(ltx) pending, queued := pool.Stats() if pending != 7 { @@ -1388,7 +1429,7 @@ func TestRepricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Reprice the pool and check that underpriced transactions get dropped - pool.SetGasPrice(big.NewInt(2)) + pool.SetGasTip(big.NewInt(2)) pending, queued = pool.Stats() if pending != 2 { @@ -1404,14 +1445,14 @@ func TestRepricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Check that we can't add the old transactions back - if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); err != ErrUnderpriced { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) + if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) } - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) + if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) } - if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); err != ErrUnderpriced { - t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced) + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) } if err := validateEvents(events, 0); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) @@ -1421,7 +1462,7 @@ func TestRepricing(t *testing.T) { } // However we can add local underpriced transactions tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3]) - if err := pool.AddLocal(tx); err != nil { + if err := pool.addLocal(tx); err != nil { t.Fatalf("failed to add underpriced local transaction: %v", err) } if pending, _ = pool.Stats(); pending != 3 { @@ -1434,13 +1475,13 @@ func TestRepricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // And we can fill gaps with properly priced transactions - if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil { + if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil { + if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil { + if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil { t.Fatalf("failed to add queued transaction: %v", err) } if err := validateEvents(events, 5); err != nil { @@ -1461,7 +1502,7 @@ func TestRepricingDynamicFee(t *testing.T) { // Create the pool to test the pricing enforcement with pool, _ := setupPoolWithConfig(eip1559Config) - defer pool.Stop() + defer pool.Close() // Keep track of transaction events to ensure all executables get announced events := make(chan core.NewTxsEvent, 32) @@ -1492,8 +1533,8 @@ func TestRepricingDynamicFee(t *testing.T) { ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[3]) // Import the batch and that both pending and queued transactions match up - pool.AddRemotesSync(txs) - pool.AddLocal(ltx) + pool.addRemotesSync(txs) + pool.addLocal(ltx) pending, queued := pool.Stats() if pending != 7 { @@ -1509,7 +1550,7 @@ func TestRepricingDynamicFee(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Reprice the pool and check that underpriced transactions get dropped - pool.SetGasPrice(big.NewInt(2)) + pool.SetGasTip(big.NewInt(2)) pending, queued = pool.Stats() if pending != 2 { @@ -1526,16 +1567,16 @@ func TestRepricingDynamicFee(t *testing.T) { } // Check that we can't add the old transactions back tx := pricedTransaction(1, 100000, big.NewInt(1), keys[0]) - if err := pool.AddRemote(tx); err != ErrUnderpriced { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) + if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) } tx = dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) - if err := pool.AddRemote(tx); err != ErrUnderpriced { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) + if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) } tx = dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2]) - if err := pool.AddRemote(tx); err != ErrUnderpriced { - t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced) + if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) } if err := validateEvents(events, 0); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) @@ -1545,7 +1586,7 @@ func TestRepricingDynamicFee(t *testing.T) { } // However we can add local underpriced transactions tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3]) - if err := pool.AddLocal(tx); err != nil { + if err := pool.addLocal(tx); err != nil { t.Fatalf("failed to add underpriced local transaction: %v", err) } if pending, _ = pool.Stats(); pending != 3 { @@ -1559,15 +1600,15 @@ func TestRepricingDynamicFee(t *testing.T) { } // And we can fill gaps with properly priced transactions tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0]) - if err := pool.AddRemote(tx); err != nil { + if err := pool.addRemote(tx); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1]) - if err := pool.AddRemote(tx); err != nil { + if err := pool.addRemote(tx); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2]) - if err := pool.AddRemote(tx); err != nil { + if err := pool.addRemoteSync(tx); err != nil { t.Fatalf("failed to add queued transaction: %v", err) } if err := validateEvents(events, 5); err != nil { @@ -1585,10 +1626,11 @@ func TestRepricingKeepsLocals(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) - pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) - defer pool.Stop() + pool := New(testTxPoolConfig, blockchain) + pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Create a number of test accounts and fund them keys := make([]*ecdsa.PrivateKey, 3) @@ -1600,23 +1642,23 @@ func TestRepricingKeepsLocals(t *testing.T) { for i := uint64(0); i < 500; i++ { // Add pending transaction. pendingTx := pricedTransaction(i, 100000, big.NewInt(int64(i)), keys[2]) - if err := pool.AddLocal(pendingTx); err != nil { + if err := pool.addLocal(pendingTx); err != nil { t.Fatal(err) } // Add queued transaction. queuedTx := pricedTransaction(i+501, 100000, big.NewInt(int64(i)), keys[2]) - if err := pool.AddLocal(queuedTx); err != nil { + if err := pool.addLocal(queuedTx); err != nil { t.Fatal(err) } // Add pending dynamic fee transaction. pendingTx = dynamicFeeTx(i, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1]) - if err := pool.AddLocal(pendingTx); err != nil { + if err := pool.addLocal(pendingTx); err != nil { t.Fatal(err) } // Add queued dynamic fee transaction. queuedTx = dynamicFeeTx(i+501, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1]) - if err := pool.AddLocal(queuedTx); err != nil { + if err := pool.addLocal(queuedTx); err != nil { t.Fatal(err) } } @@ -1638,13 +1680,13 @@ func TestRepricingKeepsLocals(t *testing.T) { validate() // Reprice the pool and check that nothing is dropped - pool.SetGasPrice(big.NewInt(2)) + pool.SetGasTip(big.NewInt(2)) validate() - pool.SetGasPrice(big.NewInt(2)) - pool.SetGasPrice(big.NewInt(4)) - pool.SetGasPrice(big.NewInt(8)) - pool.SetGasPrice(big.NewInt(100)) + pool.SetGasTip(big.NewInt(2)) + pool.SetGasTip(big.NewInt(4)) + pool.SetGasTip(big.NewInt(8)) + pool.SetGasTip(big.NewInt(100)) validate() } @@ -1658,14 +1700,15 @@ func TestUnderpricing(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 2 config.GlobalQueue = 2 - pool := NewTxPool(config, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(config, blockchain) + pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Keep track of transaction events to ensure all executables get announced events := make(chan core.NewTxsEvent, 32) @@ -1689,8 +1732,8 @@ func TestUnderpricing(t *testing.T) { ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[2]) // Import the batch and that both pending and queued transactions match up - pool.AddRemotes(txs) - pool.AddLocal(ltx) + pool.addRemotes(txs) + pool.addLocal(ltx) pending, queued := pool.Stats() if pending != 3 { @@ -1706,26 +1749,26 @@ func TestUnderpricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding an underpriced transaction on block limit fails - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) } // Replace a future transaction with a future transaction - if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1 + if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1 t.Fatalf("failed to add well priced transaction: %v", err) } // Ensure that adding high priced transactions drops cheap ones, but not own - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - t.Fatalf("failed to add well priced transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2 + if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2 t.Fatalf("failed to add well priced transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 + if err := pool.addRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } // Ensure that replacing a pending transaction with a future transaction fails - if err := pool.AddRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != ErrFutureReplacePending { - t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, ErrFutureReplacePending) + if err := pool.addRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != txpool.ErrFutureReplacePending { + t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, txpool.ErrFutureReplacePending) } pending, queued = pool.Stats() if pending != 2 { @@ -1742,11 +1785,11 @@ func TestUnderpricing(t *testing.T) { } // Ensure that adding local transactions can push out even higher priced ones ltx = pricedTransaction(1, 100000, big.NewInt(0), keys[2]) - if err := pool.AddLocal(ltx); err != nil { + if err := pool.addLocal(ltx); err != nil { t.Fatalf("failed to append underpriced local transaction: %v", err) } ltx = pricedTransaction(0, 100000, big.NewInt(0), keys[3]) - if err := pool.AddLocal(ltx); err != nil { + if err := pool.addLocal(ltx); err != nil { t.Fatalf("failed to add new underpriced local transaction: %v", err) } pending, queued = pool.Stats() @@ -1772,14 +1815,15 @@ func TestStableUnderpricing(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 128 config.GlobalQueue = 0 - pool := NewTxPool(config, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(config, blockchain) + pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Keep track of transaction events to ensure all executables get announced events := make(chan core.NewTxsEvent, 32) @@ -1797,7 +1841,7 @@ func TestStableUnderpricing(t *testing.T) { for i := uint64(0); i < config.GlobalSlots; i++ { txs = append(txs, pricedTransaction(i, 100000, big.NewInt(1), keys[0])) } - pool.AddRemotesSync(txs) + pool.addRemotesSync(txs) pending, queued := pool.Stats() if pending != int(config.GlobalSlots) { @@ -1840,7 +1884,7 @@ func TestUnderpricingDynamicFee(t *testing.T) { t.Parallel() pool, _ := setupPoolWithConfig(eip1559Config) - defer pool.Stop() + defer pool.Close() pool.config.GlobalSlots = 2 pool.config.GlobalQueue = 2 @@ -1867,8 +1911,8 @@ func TestUnderpricingDynamicFee(t *testing.T) { ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2]) // Import the batch and that both pending and queued transactions match up - pool.AddRemotes(txs) // Pend K0:0, K0:1; Que K1:1 - pool.AddLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1 + pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1 + pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1 pending, queued := pool.Stats() if pending != 3 { @@ -1886,22 +1930,22 @@ func TestUnderpricingDynamicFee(t *testing.T) { // Ensure that adding an underpriced transaction fails tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) - if err := pool.AddRemote(tx); err != ErrUnderpriced { // Pend K0:0, K0:1, K2:0; Que K1:1 - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) + if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1 + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) } // Ensure that adding high priced transactions drops cheap ones, but not own tx = pricedTransaction(0, 100000, big.NewInt(2), keys[1]) - if err := pool.AddRemote(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - + if err := pool.addRemote(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - t.Fatalf("failed to add well priced transaction: %v", err) } tx = pricedTransaction(1, 100000, big.NewInt(3), keys[1]) - if err := pool.AddRemote(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2 + if err := pool.addRemoteSync(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2 t.Fatalf("failed to add well priced transaction: %v", err) } tx = dynamicFeeTx(2, 100000, big.NewInt(4), big.NewInt(1), keys[1]) - if err := pool.AddRemote(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3 + if err := pool.addRemoteSync(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } pending, queued = pool.Stats() @@ -1919,11 +1963,11 @@ func TestUnderpricingDynamicFee(t *testing.T) { } // Ensure that adding local transactions can push out even higher priced ones ltx = dynamicFeeTx(1, 100000, big.NewInt(0), big.NewInt(0), keys[2]) - if err := pool.AddLocal(ltx); err != nil { + if err := pool.addLocal(ltx); err != nil { t.Fatalf("failed to append underpriced local transaction: %v", err) } ltx = dynamicFeeTx(0, 100000, big.NewInt(0), big.NewInt(0), keys[3]) - if err := pool.AddLocal(ltx); err != nil { + if err := pool.addLocal(ltx); err != nil { t.Fatalf("failed to add new underpriced local transaction: %v", err) } pending, queued = pool.Stats() @@ -1947,7 +1991,7 @@ func TestDualHeapEviction(t *testing.T) { t.Parallel() pool, _ := setupPoolWithConfig(eip1559Config) - defer pool.Stop() + defer pool.Close() pool.config.GlobalSlots = 10 pool.config.GlobalQueue = 10 @@ -1976,7 +2020,7 @@ func TestDualHeapEviction(t *testing.T) { tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key) highCap = tx } - pool.AddRemotesSync([]*types.Transaction{tx}) + pool.addRemotesSync([]*types.Transaction{tx}) } pending, queued := pool.Stats() if pending+queued != 20 { @@ -2004,10 +2048,11 @@ func TestDeduplication(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(testTxPoolConfig, blockchain) + pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Create a test account to add transactions with key, _ := crypto.GenerateKey() @@ -2022,7 +2067,7 @@ func TestDeduplication(t *testing.T) { for i := 0; i < len(txs); i += 2 { firsts = append(firsts, txs[i]) } - errs := pool.AddRemotesSync(firsts) + errs := pool.addRemotesSync(firsts) if len(errs) != len(firsts) { t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts)) } @@ -2039,7 +2084,7 @@ func TestDeduplication(t *testing.T) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1) } // Try to add all of them now and ensure previous ones error out as knowns - errs = pool.AddRemotesSync(txs) + errs = pool.addRemotesSync(txs) if len(errs) != len(txs) { t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs)) } @@ -2070,10 +2115,11 @@ func TestReplacement(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(testTxPoolConfig, blockchain) + pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Keep track of transaction events to ensure all executables get announced events := make(chan core.NewTxsEvent, 32) @@ -2091,10 +2137,10 @@ func TestReplacement(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap pending transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced { - t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) + if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) } - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil { + if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil { t.Fatalf("failed to replace original cheap pending transaction: %v", err) } if err := validateEvents(events, 2); err != nil { @@ -2104,10 +2150,10 @@ func TestReplacement(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil { t.Fatalf("failed to add original proper pending transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { - t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) + if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) } - if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil { + if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil { t.Fatalf("failed to replace original proper pending transaction: %v", err) } if err := validateEvents(events, 2); err != nil { @@ -2115,23 +2161,23 @@ func TestReplacement(t *testing.T) { } // Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) - if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil { + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap queued transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced { - t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) + if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) } - if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil { + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil { t.Fatalf("failed to replace original cheap queued transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil { + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil { t.Fatalf("failed to add original proper queued transaction: %v", err) } - if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { - t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) + if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) } - if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil { + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil { t.Fatalf("failed to replace original proper queued transaction: %v", err) } @@ -2150,7 +2196,7 @@ func TestReplacementDynamicFee(t *testing.T) { // Create the pool to test the pricing enforcement with pool, key := setupPoolWithConfig(eip1559Config) - defer pool.Stop() + defer pool.Close() testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000)) // Keep track of transaction events to ensure all executables get announced @@ -2192,12 +2238,12 @@ func TestReplacementDynamicFee(t *testing.T) { } // 2. Don't bump tip or feecap => discard tx = dynamicFeeTx(nonce, 100001, big.NewInt(2), big.NewInt(1), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { - t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) } // 3. Bump both more than min => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(3), big.NewInt(2), key) - if err := pool.AddRemote(tx); err != nil { + if err := pool.addRemote(tx); err != nil { t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err) } // 4. Check events match expected (2 new executable txs during pending, 0 during queue) @@ -2215,27 +2261,27 @@ func TestReplacementDynamicFee(t *testing.T) { } // 6. Bump tip max allowed so it's still underpriced => discard tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold-1), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { - t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) } // 7. Bump fee cap max allowed so it's still underpriced => discard tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold-1), big.NewInt(gasTipCap), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { - t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) } // 8. Bump tip min for acceptance => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { - t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) } // 9. Bump fee cap min for acceptance => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(gasTipCap), key) - if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { - t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) } // 10. Check events match expected (3 new executable txs during pending, 0 during queue) tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(tipThreshold), key) - if err := pool.AddRemote(tx); err != nil { + if err := pool.addRemote(tx); err != nil { t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err) } // 11. Check events match expected (3 new executable txs during pending, 0 during queue) @@ -2275,14 +2321,15 @@ func testJournaling(t *testing.T, nolocals bool) { // Create the original pool to inject transaction into the journal statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.NoLocals = nolocals config.Journal = journal config.Rejournal = time.Second - pool := NewTxPool(config, params.TestChainConfig, blockchain) + pool := New(config, blockchain) + pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) // Create two test accounts to ensure remotes expire but locals do not local, _ := crypto.GenerateKey() @@ -2292,13 +2339,13 @@ func testJournaling(t *testing.T, nolocals bool) { testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) // Add three local and a remote transactions and ensure they are queued up - if err := pool.AddLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil { + if err := pool.addLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } - if err := pool.AddLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { + if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } - if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { + if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil { @@ -2315,11 +2362,12 @@ func testJournaling(t *testing.T, nolocals bool) { t.Fatalf("pool internal state corrupted: %v", err) } // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive - pool.Stop() + pool.Close() statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool = NewTxPool(config, params.TestChainConfig, blockchain) + pool = New(config, blockchain) + pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pending, queued = pool.Stats() if queued != 0 { @@ -2341,11 +2389,12 @@ func testJournaling(t *testing.T, nolocals bool) { statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) <-pool.requestReset(nil, nil) time.Sleep(2 * config.Rejournal) - pool.Stop() + pool.Close() statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = newTestBlockChain(1000000, statedb, new(event.Feed)) - pool = NewTxPool(config, params.TestChainConfig, blockchain) + blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + pool = New(config, blockchain) + pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) pending, queued = pool.Stats() if pending != 0 { @@ -2363,7 +2412,7 @@ func testJournaling(t *testing.T, nolocals bool) { if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } - pool.Stop() + pool.Close() } // TestStatusCheck tests that the pool can correctly retrieve the @@ -2373,10 +2422,11 @@ func TestStatusCheck(t *testing.T) { // Create the pool to test the status retrievals with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) - defer pool.Stop() + pool := New(testTxPoolConfig, blockchain) + pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() // Create the test accounts to check various transaction statuses with keys := make([]*ecdsa.PrivateKey, 3) @@ -2393,7 +2443,7 @@ func TestStatusCheck(t *testing.T) { txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) // Queued only // Import the transaction and ensure they are correctly added - pool.AddRemotesSync(txs) + pool.addRemotesSync(txs) pending, queued := pool.Stats() if pending != 2 { @@ -2411,13 +2461,11 @@ func TestStatusCheck(t *testing.T) { hashes[i] = tx.Hash() } hashes = append(hashes, common.Hash{}) + expect := []txpool.TxStatus{txpool.TxStatusPending, txpool.TxStatusPending, txpool.TxStatusQueued, txpool.TxStatusQueued, txpool.TxStatusUnknown} - statuses := pool.Status(hashes) - expect := []TxStatus{TxStatusPending, TxStatusPending, TxStatusQueued, TxStatusQueued, TxStatusUnknown} - - for i := 0; i < len(statuses); i++ { - if statuses[i] != expect[i] { - t.Errorf("transaction %d: status mismatch: have %v, want %v", i, statuses[i], expect[i]) + for i := 0; i < len(hashes); i++ { + if status := pool.Status(hashes[i]); status != expect[i] { + t.Errorf("transaction %d: status mismatch: have %v, want %v", i, status, expect[i]) } } } @@ -2449,7 +2497,7 @@ func BenchmarkPendingDemotion10000(b *testing.B) { benchmarkPendingDemotion(b, 1 func benchmarkPendingDemotion(b *testing.B, size int) { // Add a batch of transactions to a pool one by one pool, key := setupPool() - defer pool.Stop() + defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) @@ -2474,7 +2522,7 @@ func BenchmarkFuturePromotion10000(b *testing.B) { benchmarkFuturePromotion(b, 1 func benchmarkFuturePromotion(b *testing.B, size int) { // Add a batch of transactions to a pool one by one pool, key := setupPool() - defer pool.Stop() + defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) @@ -2502,7 +2550,7 @@ func BenchmarkBatchLocalInsert10000(b *testing.B) { benchmarkBatchInsert(b, 1000 func benchmarkBatchInsert(b *testing.B, size int, local bool) { // Generate a batch of transactions to enqueue into the pool pool, key := setupPool() - defer pool.Stop() + defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000000000000000)) @@ -2518,9 +2566,9 @@ func benchmarkBatchInsert(b *testing.B, size int, local bool) { b.ResetTimer() for _, batch := range batches { if local { - pool.AddLocals(batch) + pool.addLocals(batch) } else { - pool.AddRemotes(batch) + pool.addRemotes(batch) } } } @@ -2548,15 +2596,15 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { pool, _ := setupPool() testAddBalance(pool, account, big.NewInt(100000000)) for _, local := range locals { - pool.AddLocal(local) + pool.addLocal(local) } b.StartTimer() // Assign a high enough balance for testing testAddBalance(pool, remoteAddr, big.NewInt(100000000)) for i := 0; i < len(remotes); i++ { - pool.AddRemotes([]*types.Transaction{remotes[i]}) + pool.addRemotes([]*types.Transaction{remotes[i]}) } - pool.Stop() + pool.Close() } } @@ -2564,7 +2612,7 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { func BenchmarkMultiAccountBatchInsert(b *testing.B) { // Generate a batch of transactions to enqueue into the pool pool, _ := setupPool() - defer pool.Stop() + defer pool.Close() b.ReportAllocs() batches := make(types.Transactions, b.N) for i := 0; i < b.N; i++ { @@ -2577,6 +2625,6 @@ func BenchmarkMultiAccountBatchInsert(b *testing.B) { // Benchmark importing the transactions into the queue b.ResetTimer() for _, tx := range batches { - pool.AddRemotesSync([]*types.Transaction{tx}) + pool.addRemotesSync([]*types.Transaction{tx}) } } diff --git a/core/txpool/list.go b/core/txpool/legacypool/list.go similarity index 96% rename from core/txpool/list.go rename to core/txpool/legacypool/list.go index fae7c2fca..384fa7b61 100644 --- a/core/txpool/list.go +++ b/core/txpool/legacypool/list.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package txpool +package legacypool import ( "container/heap" @@ -53,9 +53,10 @@ func (h *nonceHeap) Pop() interface{} { // sortedMap is a nonce->transaction hash map with a heap based index to allow // iterating over the contents in a nonce-incrementing way. type sortedMap struct { - items map[uint64]*types.Transaction // Hash map storing the transaction data - index *nonceHeap // Heap of nonces of all the stored transactions (non-strict mode) - cache types.Transactions // Cache of the transactions already sorted + items map[uint64]*types.Transaction // Hash map storing the transaction data + index *nonceHeap // Heap of nonces of all the stored transactions (non-strict mode) + cache types.Transactions // Cache of the transactions already sorted + cacheMu sync.Mutex // Mutex covering the cache } // newSortedMap creates a new nonce-sorted transaction map. @@ -78,7 +79,9 @@ func (m *sortedMap) Put(tx *types.Transaction) { if m.items[nonce] == nil { heap.Push(m.index, nonce) } + m.cacheMu.Lock() m.items[nonce], m.cache = tx, nil + m.cacheMu.Unlock() } // Forward removes all transactions from the map with a nonce lower than the @@ -94,9 +97,11 @@ func (m *sortedMap) Forward(threshold uint64) types.Transactions { delete(m.items, nonce) } // If we had a cached order, shift the front + m.cacheMu.Lock() if m.cache != nil { m.cache = m.cache[len(removed):] } + m.cacheMu.Unlock() return removed } @@ -120,7 +125,9 @@ func (m *sortedMap) reheap() { *m.index = append(*m.index, nonce) } heap.Init(m.index) + m.cacheMu.Lock() m.cache = nil + m.cacheMu.Unlock() } // filter is identical to Filter, but **does not** regenerate the heap. This method @@ -136,7 +143,9 @@ func (m *sortedMap) filter(filter func(*types.Transaction) bool) types.Transacti } } if len(removed) > 0 { + m.cacheMu.Lock() m.cache = nil + m.cacheMu.Unlock() } return removed } @@ -160,9 +169,11 @@ func (m *sortedMap) Cap(threshold int) types.Transactions { heap.Init(m.index) // If we had a cache, shift the back + m.cacheMu.Lock() if m.cache != nil { m.cache = m.cache[:len(m.cache)-len(drops)] } + m.cacheMu.Unlock() return drops } @@ -182,7 +193,9 @@ func (m *sortedMap) Remove(nonce uint64) bool { } } delete(m.items, nonce) + m.cacheMu.Lock() m.cache = nil + m.cacheMu.Unlock() return true } @@ -206,7 +219,9 @@ func (m *sortedMap) Ready(start uint64) types.Transactions { delete(m.items, next) heap.Pop(m.index) } + m.cacheMu.Lock() m.cache = nil + m.cacheMu.Unlock() return ready } @@ -217,6 +232,8 @@ func (m *sortedMap) Len() int { } func (m *sortedMap) flatten() types.Transactions { + m.cacheMu.Lock() + defer m.cacheMu.Unlock() // If the sorting was not cached yet, create and cache it if m.cache == nil { m.cache = make(types.Transactions, 0, len(m.items)) @@ -232,8 +249,8 @@ func (m *sortedMap) flatten() types.Transactions { // sorted internal representation. The result of the sorting is cached in case // it's requested again before any modifications are made to the contents. func (m *sortedMap) Flatten() types.Transactions { - // Copy the cache to prevent accidental modifications cache := m.flatten() + // Copy the cache to prevent accidental modification txs := make(types.Transactions, len(cache)) copy(txs, cache) return txs @@ -590,7 +607,7 @@ func (l *pricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool { func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) { drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop for slots > 0 { - if len(l.urgent.list)*floatingRatio > len(l.floating.list)*urgentRatio || floatingRatio == 0 { + if len(l.urgent.list)*floatingRatio > len(l.floating.list)*urgentRatio { // Discard stale transactions if found during cleanup tx := heap.Pop(&l.urgent).(*types.Transaction) if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated diff --git a/core/txpool/list_test.go b/core/txpool/legacypool/list_test.go similarity index 99% rename from core/txpool/list_test.go rename to core/txpool/legacypool/list_test.go index 4e1a5d5e8..b5cd34b23 100644 --- a/core/txpool/list_test.go +++ b/core/txpool/legacypool/list_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package txpool +package legacypool import ( "math/big" diff --git a/core/txpool/noncer.go b/core/txpool/legacypool/noncer.go similarity index 99% rename from core/txpool/noncer.go rename to core/txpool/legacypool/noncer.go index ba7fbedad..2c65dd2ca 100644 --- a/core/txpool/noncer.go +++ b/core/txpool/legacypool/noncer.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package txpool +package legacypool import ( "sync" diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go new file mode 100644 index 000000000..85312c431 --- /dev/null +++ b/core/txpool/subpool.go @@ -0,0 +1,127 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package txpool + +import ( + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// LazyTransaction contains a small subset of the transaction properties that is +// enough for the miner and other APIs to handle large batches of transactions; +// and supports pulling up the entire transaction when really needed. +type LazyTransaction struct { + Pool SubPool // Transaction subpool to pull the real transaction up + Hash common.Hash // Transaction hash to pull up if needed + Tx *types.Transaction // Transaction if already resolved + + Time time.Time // Time when the transaction was first seen + GasFeeCap *big.Int // Maximum fee per gas the transaction may consume + GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay +} + +// Resolve retrieves the full transaction belonging to a lazy handle if it is still +// maintained by the transaction pool. +func (ltx *LazyTransaction) Resolve() *types.Transaction { + if ltx.Tx == nil { + ltx.Tx = ltx.Pool.Get(ltx.Hash) + } + return ltx.Tx +} + +// AddressReserver is passed by the main transaction pool to subpools, so they +// may request (and relinquish) exclusive access to certain addresses. +type AddressReserver func(addr common.Address, reserve bool) error + +// SubPool represents a specialized transaction pool that lives on its own (e.g. +// blob pool). Since independent of how many specialized pools we have, they do +// need to be updated in lockstep and assemble into one coherent view for block +// production, this interface defines the common methods that allow the primary +// transaction pool to manage the subpools. +type SubPool interface { + // Filter is a selector used to decide whether a transaction whould be added + // to this particular subpool. + Filter(tx *types.Transaction) bool + + // Init sets the base parameters of the subpool, allowing it to load any saved + // transactions from disk and also permitting internal maintenance routines to + // start up. + // + // These should not be passed as a constructor argument - nor should the pools + // start by themselves - in order to keep multiple subpools in lockstep with + // one another. + Init(gasTip *big.Int, head *types.Header, reserve AddressReserver) error + + // Close terminates any background processing threads and releases any held + // resources. + Close() error + + // Reset retrieves the current state of the blockchain and ensures the content + // of the transaction pool is valid with regard to the chain state. + Reset(oldHead, newHead *types.Header) + + // SetGasTip updates the minimum price required by the subpool for a new + // transaction, and drops all transactions below this threshold. + SetGasTip(tip *big.Int) + + // Has returns an indicator whether subpool has a transaction cached with the + // given hash. + Has(hash common.Hash) bool + + // Get returns a transaction if it is contained in the pool, or nil otherwise. + Get(hash common.Hash) *types.Transaction + + // Add enqueues a batch of transactions into the pool if they are valid. Due + // to the large transaction churn, add may postpone fully integrating the tx + // to a later point to batch multiple ones together. + Add(txs []*types.Transaction, local bool, sync bool) []error + + // Pending retrieves all currently processable transactions, grouped by origin + // account and sorted by nonce. + Pending(enforceTips bool) map[common.Address][]*LazyTransaction + + // SubscribeTransactions subscribes to new transaction events. + SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription + + // Nonce returns the next nonce of an account, with all transactions executable + // by the pool already applied on top. + Nonce(addr common.Address) uint64 + + // Stats retrieves the current pool stats, namely the number of pending and the + // number of queued (non-executable) transactions. + Stats() (int, int) + + // Content retrieves the data content of the transaction pool, returning all the + // pending as well as queued transactions, grouped by account and sorted by nonce. + Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) + + // ContentFrom retrieves the data content of the transaction pool, returning the + // pending as well as queued transactions of this address, grouped by nonce. + ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) + + // Locals retrieves the accounts currently considered local by the pool. + Locals() []common.Address + + // Status returns the known status (unknown/pending/queued) of a transaction + // identified by their hashes. + Status(hash common.Hash) TxStatus +} diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index cbb8cc287..e40b41405 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -1,4 +1,4 @@ -// Copyright 2014 The go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -19,125 +19,15 @@ package txpool import ( "errors" "fmt" - "math" "math/big" - "sort" "sync" - "sync/atomic" - "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/prque" - "github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/params" -) - -const ( - // chainHeadChanSize is the size of channel listening to ChainHeadEvent. - chainHeadChanSize = 10 - - // txSlotSize is used to calculate how many data slots a single transaction - // takes up based on its size. The slots are used as DoS protection, ensuring - // that validating a new transaction remains a constant operation (in reality - // O(maxslots), where max slots are 4 currently). - txSlotSize = 32 * 1024 - - // txMaxSize is the maximum size a single transaction can have. This field has - // non-trivial consequences: larger transactions are significantly harder and - // more expensive to propagate; larger transactions also take more resources - // to validate whether they fit into the pool or not. - txMaxSize = 4 * txSlotSize // 128KB -) - -var ( - // ErrAlreadyKnown is returned if the transactions is already contained - // within the pool. - ErrAlreadyKnown = errors.New("already known") - - // ErrInvalidSender is returned if the transaction contains an invalid signature. - ErrInvalidSender = errors.New("invalid sender") - - // ErrUnderpriced is returned if a transaction's gas price is below the minimum - // configured for the transaction pool. - ErrUnderpriced = errors.New("transaction underpriced") - - // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept - // another remote transaction. - ErrTxPoolOverflow = errors.New("txpool is full") - - // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced - // with a different one without the required price bump. - ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") - - // ErrGasLimit is returned if a transaction's requested gas limit exceeds the - // maximum allowance of the current block. - ErrGasLimit = errors.New("exceeds block gas limit") - - // ErrNegativeValue is a sanity error to ensure no one is able to specify a - // transaction with a negative value. - ErrNegativeValue = errors.New("negative value") - - // ErrOversizedData is returned if the input data of a transaction is greater - // than some meaningful limit a user might use. This is not a consensus error - // making the transaction invalid, rather a DOS protection. - ErrOversizedData = errors.New("oversized data") - - // ErrFutureReplacePending is returned if a future transaction replaces a pending - // transaction. Future transactions should only be able to replace other future transactions. - ErrFutureReplacePending = errors.New("future transaction tries to replace pending") - - // ErrOverdraft is returned if a transaction would cause the senders balance to go negative - // thus invalidating a potential large number of transactions. - ErrOverdraft = errors.New("transaction would cause overdraft") -) - -var ( - evictionInterval = time.Minute // Time interval to check for evictable transactions - statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats -) - -var ( - // Metrics for the pending pool - pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) - pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) - pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting - pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds - - // Metrics for the queued pool - queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) - queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) - queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting - queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds - queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime - - // General tx metrics - knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) - validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) - invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) - underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) - overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) - - // throttleTxMeter counts how many transactions are rejected due to too-many-changes between - // txpool reorgs. - throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) - // reorgDurationTimer measures how long time a txpool reorg takes. - reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil) - // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected - // that this number is pretty low, since txpool reorgs happen very frequently. - dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) - - pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) - queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) - localGauge = metrics.NewRegisteredGauge("txpool/local", nil) - slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) - - reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) ) // TxStatus is the current status of a transaction as seen by the pool. @@ -150,1768 +40,376 @@ const ( TxStatusIncluded ) -// blockChain provides the state of blockchain and current gas limit to do -// some pre checks in tx pool and event subscribers. -type blockChain interface { - CurrentBlock() *types.Header - GetBlock(hash common.Hash, number uint64) *types.Block - StateAt(root common.Hash) (*state.StateDB, error) +var ( + // reservationsGaugeName is the prefix of a per-subpool address reservation + // metric. + // + // This is mostly a sanity metric to ensure there's no bug that would make + // some subpool hog all the reservations due to mis-accounting. + reservationsGaugeName = "txpool/reservations" +) +// BlockChain defines the minimal set of methods needed to back a tx pool with +// a chain. Exists to allow mocking the live chain out of tests. +type BlockChain interface { + // CurrentBlock returns the current head of the chain. + CurrentBlock() *types.Header + + // SubscribeChainHeadEvent subscribes to new blocks being added to the chain. SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription } -// Config are the configuration parameters of the transaction pool. -type Config struct { - Locals []common.Address // Addresses that should be treated by default as local - NoLocals bool // Whether local transaction handling should be disabled - Journal string // Journal of local transactions to survive node restarts - Rejournal time.Duration // Time interval to regenerate the local transaction journal - - PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool - PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) - - AccountSlots uint64 // Number of executable transaction slots guaranteed per account - GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts - AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account - GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts - - Lifetime time.Duration // Maximum amount of time non-executable transaction are queued -} - -// DefaultConfig contains the default configurations for the transaction -// pool. -var DefaultConfig = Config{ - Journal: "transactions.rlp", - Rejournal: time.Hour, - - PriceLimit: 1, - PriceBump: 10, - - AccountSlots: 16, - GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio - AccountQueue: 64, - GlobalQueue: 1024, - - Lifetime: 3 * time.Hour, -} - -// sanitize checks the provided user configurations and changes anything that's -// unreasonable or unworkable. -func (config *Config) sanitize() Config { - conf := *config - if conf.Rejournal < time.Second { - log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) - conf.Rejournal = time.Second - } - if conf.PriceLimit < 1 { - log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit) - conf.PriceLimit = DefaultConfig.PriceLimit - } - if conf.PriceBump < 1 { - log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump) - conf.PriceBump = DefaultConfig.PriceBump - } - if conf.AccountSlots < 1 { - log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots) - conf.AccountSlots = DefaultConfig.AccountSlots - } - if conf.GlobalSlots < 1 { - log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots) - conf.GlobalSlots = DefaultConfig.GlobalSlots - } - if conf.AccountQueue < 1 { - log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue) - conf.AccountQueue = DefaultConfig.AccountQueue - } - if conf.GlobalQueue < 1 { - log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue) - conf.GlobalQueue = DefaultConfig.GlobalQueue - } - if conf.Lifetime < 1 { - log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime) - conf.Lifetime = DefaultConfig.Lifetime - } - return conf -} - -// TxPool contains all currently known transactions. Transactions -// enter the pool when they are received from the network or submitted -// locally. They exit the pool when they are included in the blockchain. -// -// The pool separates processable transactions (which can be applied to the -// current state) and future transactions. Transactions move between those -// two states over time as they are received and processed. +// TxPool is an aggregator for various transaction specific pools, collectively +// tracking all the transactions deemed interesting by the node. Transactions +// enter the pool when they are received from the network or submitted locally. +// They exit the pool when they are included in the blockchain or evicted due to +// resource constraints. type TxPool struct { - config Config - chainconfig *params.ChainConfig - chain blockChain - gasPrice *big.Int - txFeed event.Feed - scope event.SubscriptionScope - signer types.Signer - mu sync.RWMutex + subpools []SubPool // List of subpools for specialized transaction handling - istanbul atomic.Bool // Fork indicator whether we are in the istanbul stage. - eip2718 atomic.Bool // Fork indicator whether we are using EIP-2718 type transactions. - eip1559 atomic.Bool // Fork indicator whether we are using EIP-1559 type transactions. - shanghai atomic.Bool // Fork indicator whether we are in the Shanghai stage. + reservations map[common.Address]SubPool // Map with the account to pool reservations + reserveLock sync.Mutex // Lock protecting the account reservations - currentState *state.StateDB // Current state in the blockchain head - pendingNonces *noncer // Pending state tracking virtual nonces - currentMaxGas atomic.Uint64 // Current gas limit for transaction caps - - locals *accountSet // Set of local transaction to exempt from eviction rules - journal *journal // Journal of local transaction to back up to disk - - pending map[common.Address]*list // All currently processable transactions - queue map[common.Address]*list // Queued but non-processable transactions - beats map[common.Address]time.Time // Last heartbeat from each known account - all *lookup // All transactions to allow lookups - priced *pricedList // All transactions sorted by price - - chainHeadCh chan core.ChainHeadEvent - chainHeadSub event.Subscription - reqResetCh chan *txpoolResetRequest - reqPromoteCh chan *accountSet - queueTxEventCh chan *types.Transaction - reorgDoneCh chan chan struct{} - reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop - wg sync.WaitGroup // tracks loop, scheduleReorgLoop - initDoneCh chan struct{} // is closed once the pool is initialized (for tests) - - changesSinceReorg int // A counter for how many drops we've performed in-between reorg. + subs event.SubscriptionScope // Subscription scope to unscubscribe all on shutdown + quit chan chan error // Quit channel to tear down the head updater } -type txpoolResetRequest struct { - oldHead, newHead *types.Header -} - -// NewTxPool creates a new transaction pool to gather, sort and filter inbound +// New creates a new transaction pool to gather, sort and filter inbound // transactions from the network. -func NewTxPool(config Config, chainconfig *params.ChainConfig, chain blockChain) *TxPool { - // Sanitize the input to ensure no vulnerable gas prices are set - config = (&config).sanitize() +func New(gasTip *big.Int, chain BlockChain, subpools []SubPool) (*TxPool, error) { + // Retrieve the current head so that all subpools and this main coordinator + // pool will have the same starting state, even if the chain moves forward + // during initialization. + head := chain.CurrentBlock() - // Create the transaction pool with its initial settings pool := &TxPool{ - config: config, - chainconfig: chainconfig, - chain: chain, - signer: types.LatestSigner(chainconfig), - pending: make(map[common.Address]*list), - queue: make(map[common.Address]*list), - beats: make(map[common.Address]time.Time), - all: newLookup(), - chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), - reqResetCh: make(chan *txpoolResetRequest), - reqPromoteCh: make(chan *accountSet), - queueTxEventCh: make(chan *types.Transaction), - reorgDoneCh: make(chan chan struct{}), - reorgShutdownCh: make(chan struct{}), - initDoneCh: make(chan struct{}), - gasPrice: new(big.Int).SetUint64(config.PriceLimit), + subpools: subpools, + reservations: make(map[common.Address]SubPool), + quit: make(chan chan error), } - pool.locals = newAccountSet(pool.signer) - for _, addr := range config.Locals { - log.Info("Setting new local account", "address", addr) - pool.locals.add(addr) - } - pool.priced = newPricedList(pool.all) - pool.reset(nil, chain.CurrentBlock()) - - // Start the reorg loop early so it can handle requests generated during journal loading. - pool.wg.Add(1) - go pool.scheduleReorgLoop() - - // If local transactions and journaling is enabled, load from disk - if !config.NoLocals && config.Journal != "" { - pool.journal = newTxJournal(config.Journal) - - if err := pool.journal.load(pool.AddLocals); err != nil { - log.Warn("Failed to load transaction journal", "err", err) - } - if err := pool.journal.rotate(pool.local()); err != nil { - log.Warn("Failed to rotate transaction journal", "err", err) + for i, subpool := range subpools { + if err := subpool.Init(gasTip, head, pool.reserver(i, subpool)); err != nil { + for j := i - 1; j >= 0; j-- { + subpools[j].Close() + } + return nil, err } } + go pool.loop(head, chain) + return pool, nil +} - // Subscribe events from blockchain and start the main event loop. - pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) - pool.wg.Add(1) - go pool.loop() +// reserver is a method to create an address reservation callback to exclusively +// assign/deassign addresses to/from subpools. This can ensure that at any point +// in time, only a single subpool is able to manage an account, avoiding cross +// subpool eviction issues and nonce conflicts. +func (p *TxPool) reserver(id int, subpool SubPool) AddressReserver { + return func(addr common.Address, reserve bool) error { + p.reserveLock.Lock() + defer p.reserveLock.Unlock() - return pool + owner, exists := p.reservations[addr] + if reserve { + // Double reservations are forbidden even from the same pool to + // avoid subtle bugs in the long term. + if exists { + if owner == subpool { + log.Error("pool attempted to reserve already-owned address", "address", addr) + return nil // Ignore fault to give the pool a chance to recover while the bug gets fixed + } + return errors.New("address already reserved") + } + p.reservations[addr] = subpool + if metrics.Enabled { + m := fmt.Sprintf("%s/%d", reservationsGaugeName, id) + metrics.GetOrRegisterGauge(m, nil).Inc(1) + } + return nil + } + // Ensure subpools only attempt to unreserve their own owned addresses, + // otherwise flag as a programming error. + if !exists { + log.Error("pool attempted to unreserve non-reserved address", "address", addr) + return errors.New("address not reserved") + } + if subpool != owner { + log.Error("pool attempted to unreserve non-owned address", "address", addr) + return errors.New("address not owned") + } + delete(p.reservations, addr) + if metrics.Enabled { + m := fmt.Sprintf("%s/%d", reservationsGaugeName, id) + metrics.GetOrRegisterGauge(m, nil).Dec(1) + } + return nil + } +} + +// Close terminates the transaction pool and all its subpools. +func (p *TxPool) Close() error { + var errs []error + + // Terminate the reset loop and wait for it to finish + errc := make(chan error) + p.quit <- errc + if err := <-errc; err != nil { + errs = append(errs, err) + } + + // Terminate each subpool + for _, subpool := range p.subpools { + if err := subpool.Close(); err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return fmt.Errorf("subpool close errors: %v", errs) + } + return nil } // loop is the transaction pool's main event loop, waiting for and reacting to // outside blockchain events as well as for various reporting and transaction // eviction events. -func (pool *TxPool) loop() { - defer pool.wg.Done() - +func (p *TxPool) loop(head *types.Header, chain BlockChain) { + // Subscribe to chain head events to trigger subpool resets var ( - prevPending, prevQueued, prevStales int - // Start the stats reporting and transaction eviction tickers - report = time.NewTicker(statsReportInterval) - evict = time.NewTicker(evictionInterval) - journal = time.NewTicker(pool.config.Rejournal) - // Track the previous head headers for transaction reorgs - head = pool.chain.CurrentBlock() + newHeadCh = make(chan core.ChainHeadEvent) + newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh) ) - defer report.Stop() - defer evict.Stop() - defer journal.Stop() + defer newHeadSub.Unsubscribe() - // Notify tests that the init phase is done - close(pool.initDoneCh) - for { - select { - // Handle ChainHeadEvent - case ev := <-pool.chainHeadCh: - if ev.Block != nil { - pool.requestReset(head, ev.Block.Header()) - head = ev.Block.Header() - } - - // System shutdown. - case <-pool.chainHeadSub.Err(): - close(pool.reorgShutdownCh) - return - - // Handle stats reporting ticks - case <-report.C: - pool.mu.RLock() - pending, queued := pool.stats() - pool.mu.RUnlock() - stales := int(pool.priced.stales.Load()) - - if pending != prevPending || queued != prevQueued || stales != prevStales { - log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) - prevPending, prevQueued, prevStales = pending, queued, stales - } - - // Handle inactive account transaction eviction - case <-evict.C: - pool.mu.Lock() - for addr := range pool.queue { - // Skip local transactions from the eviction mechanism - if pool.locals.contains(addr) { - continue - } - // Any non-locals old enough should be removed - if time.Since(pool.beats[addr]) > pool.config.Lifetime { - list := pool.queue[addr].Flatten() - for _, tx := range list { - pool.removeTx(tx.Hash(), true) + // Track the previous and current head to feed to an idle reset + var ( + oldHead = head + newHead = oldHead + ) + // Consume chain head events and start resets when none is running + var ( + resetBusy = make(chan struct{}, 1) // Allow 1 reset to run concurrently + resetDone = make(chan *types.Header) + ) + var errc chan error + for errc == nil { + // Something interesting might have happened, run a reset if there is + // one needed but none is running. The resetter will run on its own + // goroutine to allow chain head events to be consumed contiguously. + if newHead != oldHead { + // Try to inject a busy marker and start a reset if successful + select { + case resetBusy <- struct{}{}: + // Busy marker injected, start a new subpool reset + go func(oldHead, newHead *types.Header) { + for _, subpool := range p.subpools { + subpool.Reset(oldHead, newHead) } - queuedEvictionMeter.Mark(int64(len(list))) - } - } - pool.mu.Unlock() + resetDone <- newHead + }(oldHead, newHead) - // Handle local transaction journal rotation - case <-journal.C: - if pool.journal != nil { - pool.mu.Lock() - if err := pool.journal.rotate(pool.local()); err != nil { - log.Warn("Failed to rotate local tx journal", "err", err) - } - pool.mu.Unlock() + default: + // Reset already running, wait until it finishes } } + // Wait for the next chain head event or a previous reset finish + select { + case event := <-newHeadCh: + // Chain moved forward, store the head for later consumption + newHead = event.Block.Header() + + case head := <-resetDone: + // Previous reset finished, update the old head and allow a new reset + oldHead = head + <-resetBusy + + case errc = <-p.quit: + // Termination requested, break out on the next loop round + } } + // Notify the closer of termination (no error possible for now) + errc <- nil } -// Stop terminates the transaction pool. -func (pool *TxPool) Stop() { - // Unsubscribe all subscriptions registered from txpool - pool.scope.Close() - - // Unsubscribe subscriptions registered from blockchain - pool.chainHeadSub.Unsubscribe() - pool.wg.Wait() - - if pool.journal != nil { - pool.journal.close() - } - log.Info("Transaction pool stopped") -} - -// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and -// starts sending event to the given channel. -func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { - return pool.scope.Track(pool.txFeed.Subscribe(ch)) -} - -// GasPrice returns the current gas price enforced by the transaction pool. -func (pool *TxPool) GasPrice() *big.Int { - pool.mu.RLock() - defer pool.mu.RUnlock() - - return new(big.Int).Set(pool.gasPrice) -} - -// SetGasPrice updates the minimum price required by the transaction pool for a +// SetGasTip updates the minimum gas tip required by the transaction pool for a // new transaction, and drops all transactions below this threshold. -func (pool *TxPool) SetGasPrice(price *big.Int) { - pool.mu.Lock() - defer pool.mu.Unlock() +func (p *TxPool) SetGasTip(tip *big.Int) { + for _, subpool := range p.subpools { + subpool.SetGasTip(tip) + } +} - old := pool.gasPrice - pool.gasPrice = price - // if the min miner fee increased, remove transactions below the new threshold - if price.Cmp(old) > 0 { - // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead - drop := pool.all.RemotesBelowTip(price) - for _, tx := range drop { - pool.removeTx(tx.Hash(), false) +// Has returns an indicator whether the pool has a transaction cached with the +// given hash. +func (p *TxPool) Has(hash common.Hash) bool { + for _, subpool := range p.subpools { + if subpool.Has(hash) { + return true } - pool.priced.Removed(len(drop)) } - - log.Info("Transaction pool price threshold updated", "price", price) + return false } -// Nonce returns the next nonce of an account, with all transactions executable -// by the pool already applied on top. -func (pool *TxPool) Nonce(addr common.Address) uint64 { - pool.mu.RLock() - defer pool.mu.RUnlock() - - return pool.pendingNonces.get(addr) +// Get returns a transaction if it is contained in the pool, or nil otherwise. +func (p *TxPool) Get(hash common.Hash) *types.Transaction { + for _, subpool := range p.subpools { + if tx := subpool.Get(hash); tx != nil { + return tx + } + } + return nil } -// Stats retrieves the current pool stats, namely the number of pending and the -// number of queued (non-executable) transactions. -func (pool *TxPool) Stats() (int, int) { - pool.mu.RLock() - defer pool.mu.RUnlock() +// Add enqueues a batch of transactions into the pool if they are valid. Due +// to the large transaction churn, add may postpone fully integrating the tx +// to a later point to batch multiple ones together. +func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool) []error { + // Split the input transactions between the subpools. It shouldn't really + // happen that we receive merged batches, but better graceful than strange + // errors. + // + // We also need to track how the transactions were split across the subpools, + // so we can piece back the returned errors into the original order. + txsets := make([][]*types.Transaction, len(p.subpools)) + splits := make([]int, len(txs)) - return pool.stats() -} + for i, tx := range txs { + // Mark this transaction belonging to no-subpool + splits[i] = -1 -// stats retrieves the current pool stats, namely the number of pending and the -// number of queued (non-executable) transactions. -func (pool *TxPool) stats() (int, int) { - pending := 0 - for _, list := range pool.pending { - pending += list.Len() + // Try to find a subpool that accepts the transaction + for j, subpool := range p.subpools { + if subpool.Filter(tx) { + txsets[j] = append(txsets[j], tx) + splits[i] = j + break + } + } } - queued := 0 - for _, list := range pool.queue { - queued += list.Len() + // Add the transactions split apart to the individual subpools and piece + // back the errors into the original sort order. + errsets := make([][]error, len(p.subpools)) + for i := 0; i < len(p.subpools); i++ { + errsets[i] = p.subpools[i].Add(txsets[i], local, sync) } - return pending, queued -} - -// Content retrieves the data content of the transaction pool, returning all the -// pending as well as queued transactions, grouped by account and sorted by nonce. -func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { - pool.mu.Lock() - defer pool.mu.Unlock() - - pending := make(map[common.Address]types.Transactions, len(pool.pending)) - for addr, list := range pool.pending { - pending[addr] = list.Flatten() + errs := make([]error, len(txs)) + for i, split := range splits { + // If the transaction was rejected by all subpools, mark it unsupported + if split == -1 { + errs[i] = core.ErrTxTypeNotSupported + continue + } + // Find which subpool handled it and pull in the corresponding error + errs[i] = errsets[split][0] + errsets[split] = errsets[split][1:] } - queued := make(map[common.Address]types.Transactions, len(pool.queue)) - for addr, list := range pool.queue { - queued[addr] = list.Flatten() - } - return pending, queued -} - -// ContentFrom retrieves the data content of the transaction pool, returning the -// pending as well as queued transactions of this address, grouped by nonce. -func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { - pool.mu.RLock() - defer pool.mu.RUnlock() - - var pending types.Transactions - if list, ok := pool.pending[addr]; ok { - pending = list.Flatten() - } - var queued types.Transactions - if list, ok := pool.queue[addr]; ok { - queued = list.Flatten() - } - return pending, queued + return errs } // Pending retrieves all currently processable transactions, grouped by origin -// account and sorted by nonce. The returned transaction set is a copy and can be -// freely modified by calling code. -// -// The enforceTips parameter can be used to do an extra filtering on the pending -// transactions and only return those whose **effective** tip is large enough in -// the next pending execution environment. -func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { - pool.mu.Lock() - defer pool.mu.Unlock() - - pending := make(map[common.Address]types.Transactions, len(pool.pending)) - for addr, list := range pool.pending { - txs := list.Flatten() - - // If the miner requests tip enforcement, cap the lists now - if enforceTips && !pool.locals.contains(addr) { - for i, tx := range txs { - if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { - txs = txs[:i] - break - } - } - } - if len(txs) > 0 { - pending[addr] = txs - } - } - return pending -} - -// Locals retrieves the accounts currently considered local by the pool. -func (pool *TxPool) Locals() []common.Address { - pool.mu.Lock() - defer pool.mu.Unlock() - - return pool.locals.flatten() -} - -// local retrieves all currently known local transactions, grouped by origin -// account and sorted by nonce. The returned transaction set is a copy and can be -// freely modified by calling code. -func (pool *TxPool) local() map[common.Address]types.Transactions { - txs := make(map[common.Address]types.Transactions) - for addr := range pool.locals.accounts { - if pending := pool.pending[addr]; pending != nil { - txs[addr] = append(txs[addr], pending.Flatten()...) - } - if queued := pool.queue[addr]; queued != nil { - txs[addr] = append(txs[addr], queued.Flatten()...) +// account and sorted by nonce. +func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction { + txs := make(map[common.Address][]*LazyTransaction) + for _, subpool := range p.subpools { + for addr, set := range subpool.Pending(enforceTips) { + txs[addr] = set } } return txs } -// validateTxBasics checks whether a transaction is valid according to the consensus -// rules, but does not check state-dependent validation such as sufficient balance. -// This check is meant as an early check which only needs to be performed once, -// and does not require the pool mutex to be held. -func (pool *TxPool) validateTxBasics(tx *types.Transaction, local bool) error { - // Accept only legacy transactions until EIP-2718/2930 activates. - if !pool.eip2718.Load() && tx.Type() != types.LegacyTxType { - return core.ErrTxTypeNotSupported +// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and starts sending +// events to the given channel. +func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { + subs := make([]event.Subscription, len(p.subpools)) + for i, subpool := range p.subpools { + subs[i] = subpool.SubscribeTransactions(ch) } - // Reject dynamic fee transactions until EIP-1559 activates. - if !pool.eip1559.Load() && tx.Type() == types.DynamicFeeTxType { - return core.ErrTxTypeNotSupported - } - // Reject blob transactions forever, those will have their own pool. - if tx.Type() == types.BlobTxType { - return core.ErrTxTypeNotSupported - } - // Reject transactions over defined size to prevent DOS attacks - if tx.Size() > txMaxSize { - return ErrOversizedData - } - // Check whether the init code size has been exceeded. - if pool.shanghai.Load() && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { - return fmt.Errorf("%w: code size %v limit %v", core.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) - } - // Transactions can't be negative. This may never happen using RLP decoded - // transactions but may occur if you create a transaction using the RPC. - if tx.Value().Sign() < 0 { - return ErrNegativeValue - } - // Ensure the transaction doesn't exceed the current block limit gas. - if pool.currentMaxGas.Load() < tx.Gas() { - return ErrGasLimit - } - // Sanity check for extremely large numbers - if tx.GasFeeCap().BitLen() > 256 { - return core.ErrFeeCapVeryHigh - } - if tx.GasTipCap().BitLen() > 256 { - return core.ErrTipVeryHigh - } - // Ensure gasFeeCap is greater than or equal to gasTipCap. - if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { - return core.ErrTipAboveFeeCap - } - // Make sure the transaction is signed properly. - if _, err := types.Sender(pool.signer, tx); err != nil { - return ErrInvalidSender - } - // Drop non-local transactions under our own minimal accepted gas price or tip - if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { - return ErrUnderpriced - } - // Ensure the transaction has more gas than the basic tx fee. - intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul.Load(), pool.shanghai.Load()) - if err != nil { - return err - } - if tx.Gas() < intrGas { - return core.ErrIntrinsicGas - } - return nil + return p.subs.Track(event.JoinSubscriptions(subs...)) } -// validateTx checks whether a transaction is valid according to the consensus -// rules and adheres to some heuristic limits of the local node (price and size). -func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { - // Signature has been checked already, this cannot error. - from, _ := types.Sender(pool.signer, tx) - // Ensure the transaction adheres to nonce ordering - if pool.currentState.GetNonce(from) > tx.Nonce() { - return core.ErrNonceTooLow - } - // Transactor should have enough funds to cover the costs - // cost == V + GP * GL - balance := pool.currentState.GetBalance(from) - if balance.Cmp(tx.Cost()) < 0 { - return core.ErrInsufficientFunds - } - - // Verify that replacing transactions will not result in overdraft - list := pool.pending[from] - if list != nil { // Sender already has pending txs - sum := new(big.Int).Add(tx.Cost(), list.totalcost) - if repl := list.txs.Get(tx.Nonce()); repl != nil { - // Deduct the cost of a transaction replaced by this - sum.Sub(sum, repl.Cost()) - } - if balance.Cmp(sum) < 0 { - log.Trace("Replacing transactions would overdraft", "sender", from, "balance", pool.currentState.GetBalance(from), "required", sum) - return ErrOverdraft +// Nonce returns the next nonce of an account, with all transactions executable +// by the pool already applied on top. +func (p *TxPool) Nonce(addr common.Address) uint64 { + // Since (for now) accounts are unique to subpools, only one pool will have + // (at max) a non-state nonce. To avoid stateful lookups, just return the + // highest nonce for now. + var nonce uint64 + for _, subpool := range p.subpools { + if next := subpool.Nonce(addr); nonce < next { + nonce = next } } - return nil + return nonce } -// add validates a transaction and inserts it into the non-executable queue for later -// pending promotion and execution. If the transaction is a replacement for an already -// pending or queued one, it overwrites the previous transaction if its price is higher. -// -// If a newly added transaction is marked as local, its sending account will be -// be added to the allowlist, preventing any associated transaction from being dropped -// out of the pool due to pricing constraints. -func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { - // If the transaction is already known, discard it - hash := tx.Hash() - if pool.all.Get(hash) != nil { - log.Trace("Discarding already known transaction", "hash", hash) - knownTxMeter.Mark(1) - return false, ErrAlreadyKnown +// Stats retrieves the current pool stats, namely the number of pending and the +// number of queued (non-executable) transactions. +func (p *TxPool) Stats() (int, int) { + var runnable, blocked int + for _, subpool := range p.subpools { + run, block := subpool.Stats() + + runnable += run + blocked += block } - // Make the local flag. If it's from local source or it's from the network but - // the sender is marked as local previously, treat it as the local transaction. - isLocal := local || pool.locals.containsTx(tx) - - // If the transaction fails basic validation, discard it - if err := pool.validateTx(tx, isLocal); err != nil { - log.Trace("Discarding invalid transaction", "hash", hash, "err", err) - invalidTxMeter.Mark(1) - return false, err - } - - // already validated by this point - from, _ := types.Sender(pool.signer, tx) - - // If the transaction pool is full, discard underpriced transactions - if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { - // If the new transaction is underpriced, don't accept it - if !isLocal && pool.priced.Underpriced(tx) { - log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) - underpricedTxMeter.Mark(1) - return false, ErrUnderpriced - } - - // We're about to replace a transaction. The reorg does a more thorough - // analysis of what to remove and how, but it runs async. We don't want to - // do too many replacements between reorg-runs, so we cap the number of - // replacements to 25% of the slots - if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { - throttleTxMeter.Mark(1) - return false, ErrTxPoolOverflow - } - - // New transaction is better than our worse ones, make room for it. - // If it's a local transaction, forcibly discard all available transactions. - // Otherwise if we can't make enough room for new one, abort the operation. - drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) - - // Special case, we still can't make the room for the new remote one. - if !isLocal && !success { - log.Trace("Discarding overflown transaction", "hash", hash) - overflowedTxMeter.Mark(1) - return false, ErrTxPoolOverflow - } - - // If the new transaction is a future transaction it should never churn pending transactions - if !isLocal && pool.isGapped(from, tx) { - var replacesPending bool - for _, dropTx := range drop { - dropSender, _ := types.Sender(pool.signer, dropTx) - if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) { - replacesPending = true - break - } - } - // Add all transactions back to the priced queue - if replacesPending { - for _, dropTx := range drop { - pool.priced.Put(dropTx, false) - } - log.Trace("Discarding future transaction replacing pending tx", "hash", hash) - return false, ErrFutureReplacePending - } - } - - // Kick out the underpriced remote transactions. - for _, tx := range drop { - log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) - underpricedTxMeter.Mark(1) - dropped := pool.removeTx(tx.Hash(), false) - pool.changesSinceReorg += dropped - } - } - - // Try to replace an existing transaction in the pending pool - if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) { - // Nonce already pending, check if required price bump is met - inserted, old := list.Add(tx, pool.config.PriceBump) - if !inserted { - pendingDiscardMeter.Mark(1) - return false, ErrReplaceUnderpriced - } - // New transaction is better, replace old one - if old != nil { - pool.all.Remove(old.Hash()) - pool.priced.Removed(1) - pendingReplaceMeter.Mark(1) - } - pool.all.Add(tx, isLocal) - pool.priced.Put(tx, isLocal) - pool.journalTx(from, tx) - pool.queueTxEvent(tx) - log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) - - // Successful promotion, bump the heartbeat - pool.beats[from] = time.Now() - return old != nil, nil - } - // New transaction isn't replacing a pending one, push into queue - replaced, err = pool.enqueueTx(hash, tx, isLocal, true) - if err != nil { - return false, err - } - // Mark local addresses and journal local transactions - if local && !pool.locals.contains(from) { - log.Info("Setting new local account", "address", from) - pool.locals.add(from) - pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. - } - if isLocal { - localGauge.Inc(1) - } - pool.journalTx(from, tx) - - log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) - return replaced, nil + return runnable, blocked } -// isGapped reports whether the given transaction is immediately executable. -func (pool *TxPool) isGapped(from common.Address, tx *types.Transaction) bool { - // Short circuit if transaction matches pending nonce and can be promoted - // to pending list as an executable transaction. - next := pool.pendingNonces.get(from) - if tx.Nonce() == next { - return false - } - // The transaction has a nonce gap with pending list, it's only considered - // as executable if transactions in queue can fill up the nonce gap. - queue, ok := pool.queue[from] - if !ok { - return true - } - for nonce := next; nonce < tx.Nonce(); nonce++ { - if !queue.Contains(nonce) { - return true // txs in queue can't fill up the nonce gap - } - } - return false -} - -// enqueueTx inserts a new transaction into the non-executable transaction queue. -// -// Note, this method assumes the pool lock is held! -func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { - // Try to insert the transaction into the future queue - from, _ := types.Sender(pool.signer, tx) // already validated - if pool.queue[from] == nil { - pool.queue[from] = newList(false) - } - inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) - if !inserted { - // An older transaction was better, discard this - queuedDiscardMeter.Mark(1) - return false, ErrReplaceUnderpriced - } - // Discard any previous transaction and mark this - if old != nil { - pool.all.Remove(old.Hash()) - pool.priced.Removed(1) - queuedReplaceMeter.Mark(1) - } else { - // Nothing was replaced, bump the queued counter - queuedGauge.Inc(1) - } - // If the transaction isn't in lookup set but it's expected to be there, - // show the error log. - if pool.all.Get(hash) == nil && !addAll { - log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) - } - if addAll { - pool.all.Add(tx, local) - pool.priced.Put(tx, local) - } - // If we never record the heartbeat, do it right now. - if _, exist := pool.beats[from]; !exist { - pool.beats[from] = time.Now() - } - return old != nil, nil -} - -// journalTx adds the specified transaction to the local disk journal if it is -// deemed to have been sent from a local account. -func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { - // Only journal if it's enabled and the transaction is local - if pool.journal == nil || !pool.locals.contains(from) { - return - } - if err := pool.journal.insert(tx); err != nil { - log.Warn("Failed to journal local transaction", "err", err) - } -} - -// promoteTx adds a transaction to the pending (processable) list of transactions -// and returns whether it was inserted or an older was better. -// -// Note, this method assumes the pool lock is held! -func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { - // Try to insert the transaction into the pending queue - if pool.pending[addr] == nil { - pool.pending[addr] = newList(true) - } - list := pool.pending[addr] - - inserted, old := list.Add(tx, pool.config.PriceBump) - if !inserted { - // An older transaction was better, discard this - pool.all.Remove(hash) - pool.priced.Removed(1) - pendingDiscardMeter.Mark(1) - return false - } - // Otherwise discard any previous transaction and mark this - if old != nil { - pool.all.Remove(old.Hash()) - pool.priced.Removed(1) - pendingReplaceMeter.Mark(1) - } else { - // Nothing was replaced, bump the pending counter - pendingGauge.Inc(1) - } - // Set the potentially new pending nonce and notify any subsystems of the new tx - pool.pendingNonces.set(addr, tx.Nonce()+1) - - // Successful promotion, bump the heartbeat - pool.beats[addr] = time.Now() - return true -} - -// AddLocals enqueues a batch of transactions into the pool if they are valid, marking the -// senders as a local ones, ensuring they go around the local pricing constraints. -// -// This method is used to add transactions from the RPC API and performs synchronous pool -// reorganization and event propagation. -func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { - return pool.addTxs(txs, !pool.config.NoLocals, true) -} - -// AddLocal enqueues a single local transaction into the pool if it is valid. This is -// a convenience wrapper around AddLocals. -func (pool *TxPool) AddLocal(tx *types.Transaction) error { - errs := pool.AddLocals([]*types.Transaction{tx}) - return errs[0] -} - -// AddRemotes enqueues a batch of transactions into the pool if they are valid. If the -// senders are not among the locally tracked ones, full pricing constraints will apply. -// -// This method is used to add transactions from the p2p network and does not wait for pool -// reorganization and internal event propagation. -func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { - return pool.addTxs(txs, false, false) -} - -// AddRemotesSync is like AddRemotes, but waits for pool reorganization. Tests use this method. -func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { - return pool.addTxs(txs, false, true) -} - -// This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. -func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { - errs := pool.AddRemotesSync([]*types.Transaction{tx}) - return errs[0] -} - -// AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience -// wrapper around AddRemotes. -// -// Deprecated: use AddRemotes -func (pool *TxPool) AddRemote(tx *types.Transaction) error { - errs := pool.AddRemotes([]*types.Transaction{tx}) - return errs[0] -} - -// addTxs attempts to queue a batch of transactions if they are valid. -func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { - // Filter out known ones without obtaining the pool lock or recovering signatures +// Content retrieves the data content of the transaction pool, returning all the +// pending as well as queued transactions, grouped by account and sorted by nonce. +func (p *TxPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { var ( - errs = make([]error, len(txs)) - news = make([]*types.Transaction, 0, len(txs)) + runnable = make(map[common.Address][]*types.Transaction) + blocked = make(map[common.Address][]*types.Transaction) ) - for i, tx := range txs { - // If the transaction is known, pre-set the error slot - if pool.all.Get(tx.Hash()) != nil { - errs[i] = ErrAlreadyKnown - knownTxMeter.Mark(1) - continue - } - // Exclude transactions with basic errors, e.g invalid signatures and - // insufficient intrinsic gas as soon as possible and cache senders - // in transactions before obtaining lock + for _, subpool := range p.subpools { + run, block := subpool.Content() - if err := pool.validateTxBasics(tx, local); err != nil { - errs[i] = err - invalidTxMeter.Mark(1) - continue + for addr, txs := range run { + runnable[addr] = txs } - // Accumulate all unknown transactions for deeper processing - news = append(news, tx) - } - if len(news) == 0 { - return errs - } - - // Process all the new transaction and merge any errors into the original slice - pool.mu.Lock() - newErrs, dirtyAddrs := pool.addTxsLocked(news, local) - pool.mu.Unlock() - - var nilSlot = 0 - for _, err := range newErrs { - for errs[nilSlot] != nil { - nilSlot++ + for addr, txs := range block { + blocked[addr] = txs } - errs[nilSlot] = err - nilSlot++ } - // Reorg the pool internals if needed and return - done := pool.requestPromoteExecutables(dirtyAddrs) - if sync { - <-done - } - return errs + return runnable, blocked } -// addTxsLocked attempts to queue a batch of transactions if they are valid. -// The transaction pool lock must be held. -func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { - dirty := newAccountSet(pool.signer) - errs := make([]error, len(txs)) - for i, tx := range txs { - replaced, err := pool.add(tx, local) - errs[i] = err - if err == nil && !replaced { - dirty.addTx(tx) +// ContentFrom retrieves the data content of the transaction pool, returning the +// pending as well as queued transactions of this address, grouped by nonce. +func (p *TxPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { + for _, subpool := range p.subpools { + run, block := subpool.ContentFrom(addr) + if len(run) != 0 || len(block) != 0 { + return run, block } } - validTxMeter.Mark(int64(len(dirty.accounts))) - return errs, dirty + return []*types.Transaction{}, []*types.Transaction{} } -// Status returns the status (unknown/pending/queued) of a batch of transactions +// Locals retrieves the accounts currently considered local by the pool. +func (p *TxPool) Locals() []common.Address { + // Retrieve the locals from each subpool and deduplicate them + locals := make(map[common.Address]struct{}) + for _, subpool := range p.subpools { + for _, local := range subpool.Locals() { + locals[local] = struct{}{} + } + } + // Flatten and return the deduplicated local set + flat := make([]common.Address, 0, len(locals)) + for local := range locals { + flat = append(flat, local) + } + return flat +} + +// Status returns the known status (unknown/pending/queued) of a transaction // identified by their hashes. -func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { - status := make([]TxStatus, len(hashes)) - for i, hash := range hashes { - tx := pool.Get(hash) - if tx == nil { - continue - } - from, _ := types.Sender(pool.signer, tx) // already validated - pool.mu.RLock() - if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { - status[i] = TxStatusPending - } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { - status[i] = TxStatusQueued - } - // implicit else: the tx may have been included into a block between - // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct - pool.mu.RUnlock() - } - return status -} - -// Get returns a transaction if it is contained in the pool and nil otherwise. -func (pool *TxPool) Get(hash common.Hash) *types.Transaction { - return pool.all.Get(hash) -} - -// Has returns an indicator whether txpool has a transaction cached with the -// given hash. -func (pool *TxPool) Has(hash common.Hash) bool { - return pool.all.Get(hash) != nil -} - -// removeTx removes a single transaction from the queue, moving all subsequent -// transactions back to the future queue. -// Returns the number of transactions removed from the pending queue. -func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) int { - // Fetch the transaction we wish to delete - tx := pool.all.Get(hash) - if tx == nil { - return 0 - } - addr, _ := types.Sender(pool.signer, tx) // already validated during insertion - - // Remove it from the list of known transactions - pool.all.Remove(hash) - if outofbound { - pool.priced.Removed(1) - } - if pool.locals.contains(addr) { - localGauge.Dec(1) - } - // Remove the transaction from the pending lists and reset the account nonce - if pending := pool.pending[addr]; pending != nil { - if removed, invalids := pending.Remove(tx); removed { - // If no more pending transactions are left, remove the list - if pending.Empty() { - delete(pool.pending, addr) - } - // Postpone any invalidated transactions - for _, tx := range invalids { - // Internal shuffle shouldn't touch the lookup set. - pool.enqueueTx(tx.Hash(), tx, false, false) - } - // Update the account nonce if needed - pool.pendingNonces.setIfLower(addr, tx.Nonce()) - // Reduce the pending counter - pendingGauge.Dec(int64(1 + len(invalids))) - return 1 + len(invalids) +func (p *TxPool) Status(hash common.Hash) TxStatus { + for _, subpool := range p.subpools { + if status := subpool.Status(hash); status != TxStatusUnknown { + return status } } - // Transaction is in the future queue - if future := pool.queue[addr]; future != nil { - if removed, _ := future.Remove(tx); removed { - // Reduce the queued counter - queuedGauge.Dec(1) - } - if future.Empty() { - delete(pool.queue, addr) - delete(pool.beats, addr) - } - } - return 0 -} - -// requestReset requests a pool reset to the new head block. -// The returned channel is closed when the reset has occurred. -func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { - select { - case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: - return <-pool.reorgDoneCh - case <-pool.reorgShutdownCh: - return pool.reorgShutdownCh - } -} - -// requestPromoteExecutables requests transaction promotion checks for the given addresses. -// The returned channel is closed when the promotion checks have occurred. -func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { - select { - case pool.reqPromoteCh <- set: - return <-pool.reorgDoneCh - case <-pool.reorgShutdownCh: - return pool.reorgShutdownCh - } -} - -// queueTxEvent enqueues a transaction event to be sent in the next reorg run. -func (pool *TxPool) queueTxEvent(tx *types.Transaction) { - select { - case pool.queueTxEventCh <- tx: - case <-pool.reorgShutdownCh: - } -} - -// scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not -// call those methods directly, but request them being run using requestReset and -// requestPromoteExecutables instead. -func (pool *TxPool) scheduleReorgLoop() { - defer pool.wg.Done() - - var ( - curDone chan struct{} // non-nil while runReorg is active - nextDone = make(chan struct{}) - launchNextRun bool - reset *txpoolResetRequest - dirtyAccounts *accountSet - queuedEvents = make(map[common.Address]*sortedMap) - ) - for { - // Launch next background reorg if needed - if curDone == nil && launchNextRun { - // Run the background reorg and announcements - go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) - - // Prepare everything for the next round of reorg - curDone, nextDone = nextDone, make(chan struct{}) - launchNextRun = false - - reset, dirtyAccounts = nil, nil - queuedEvents = make(map[common.Address]*sortedMap) - } - - select { - case req := <-pool.reqResetCh: - // Reset request: update head if request is already pending. - if reset == nil { - reset = req - } else { - reset.newHead = req.newHead - } - launchNextRun = true - pool.reorgDoneCh <- nextDone - - case req := <-pool.reqPromoteCh: - // Promote request: update address set if request is already pending. - if dirtyAccounts == nil { - dirtyAccounts = req - } else { - dirtyAccounts.merge(req) - } - launchNextRun = true - pool.reorgDoneCh <- nextDone - - case tx := <-pool.queueTxEventCh: - // Queue up the event, but don't schedule a reorg. It's up to the caller to - // request one later if they want the events sent. - addr, _ := types.Sender(pool.signer, tx) - if _, ok := queuedEvents[addr]; !ok { - queuedEvents[addr] = newSortedMap() - } - queuedEvents[addr].Put(tx) - - case <-curDone: - curDone = nil - - case <-pool.reorgShutdownCh: - // Wait for current run to finish. - if curDone != nil { - <-curDone - } - close(nextDone) - return - } - } -} - -// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. -func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) { - defer func(t0 time.Time) { - reorgDurationTimer.Update(time.Since(t0)) - }(time.Now()) - defer close(done) - - var promoteAddrs []common.Address - if dirtyAccounts != nil && reset == nil { - // Only dirty accounts need to be promoted, unless we're resetting. - // For resets, all addresses in the tx queue will be promoted and - // the flatten operation can be avoided. - promoteAddrs = dirtyAccounts.flatten() - } - pool.mu.Lock() - if reset != nil { - // Reset from the old head to the new, rescheduling any reorged transactions - pool.reset(reset.oldHead, reset.newHead) - - // Nonces were reset, discard any events that became stale - for addr := range events { - events[addr].Forward(pool.pendingNonces.get(addr)) - if events[addr].Len() == 0 { - delete(events, addr) - } - } - // Reset needs promote for all addresses - promoteAddrs = make([]common.Address, 0, len(pool.queue)) - for addr := range pool.queue { - promoteAddrs = append(promoteAddrs, addr) - } - } - // Check for pending transactions for every account that sent new ones - promoted := pool.promoteExecutables(promoteAddrs) - - // If a new block appeared, validate the pool of pending transactions. This will - // remove any transaction that has been included in the block or was invalidated - // because of another transaction (e.g. higher gas price). - if reset != nil { - pool.demoteUnexecutables() - if reset.newHead != nil && pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { - pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) - pool.priced.SetBaseFee(pendingBaseFee) - } - // Update all accounts to the latest known pending nonce - nonces := make(map[common.Address]uint64, len(pool.pending)) - for addr, list := range pool.pending { - highestPending := list.LastElement() - nonces[addr] = highestPending.Nonce() + 1 - } - pool.pendingNonces.setAll(nonces) - } - // Ensure pool.queue and pool.pending sizes stay within the configured limits. - pool.truncatePending() - pool.truncateQueue() - - dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) - pool.changesSinceReorg = 0 // Reset change counter - pool.mu.Unlock() - - // Notify subsystems for newly added transactions - for _, tx := range promoted { - addr, _ := types.Sender(pool.signer, tx) - if _, ok := events[addr]; !ok { - events[addr] = newSortedMap() - } - events[addr].Put(tx) - } - if len(events) > 0 { - var txs []*types.Transaction - for _, set := range events { - txs = append(txs, set.Flatten()...) - } - pool.txFeed.Send(core.NewTxsEvent{Txs: txs}) - } -} - -// reset retrieves the current state of the blockchain and ensures the content -// of the transaction pool is valid with regard to the chain state. -func (pool *TxPool) reset(oldHead, newHead *types.Header) { - // If we're reorging an old state, reinject all dropped transactions - var reinject types.Transactions - - if oldHead != nil && oldHead.Hash() != newHead.ParentHash { - // If the reorg is too deep, avoid doing it (will happen during fast sync) - oldNum := oldHead.Number.Uint64() - newNum := newHead.Number.Uint64() - - if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { - log.Debug("Skipping deep transaction reorg", "depth", depth) - } else { - // Reorg seems shallow enough to pull in all transactions into memory - var discarded, included types.Transactions - var ( - rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) - add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) - ) - if rem == nil { - // This can happen if a setHead is performed, where we simply discard the old - // head from the chain. - // If that is the case, we don't have the lost transactions anymore, and - // there's nothing to add - if newNum >= oldNum { - // If we reorged to a same or higher number, then it's not a case of setHead - log.Warn("Transaction pool reset with missing oldhead", - "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - return - } - // If the reorg ended up on a lower number, it's indicative of setHead being the cause - log.Debug("Skipping transaction reset caused by setHead", - "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - // We still need to update the current state s.th. the lost transactions can be readded by the user - } else { - for rem.NumberU64() > add.NumberU64() { - discarded = append(discarded, rem.Transactions()...) - if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) - return - } - } - for add.NumberU64() > rem.NumberU64() { - included = append(included, add.Transactions()...) - if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) - return - } - } - for rem.Hash() != add.Hash() { - discarded = append(discarded, rem.Transactions()...) - if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) - return - } - included = append(included, add.Transactions()...) - if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) - return - } - } - reinject = types.TxDifference(discarded, included) - } - } - } - // Initialize the internal state to the current head - if newHead == nil { - newHead = pool.chain.CurrentBlock() // Special case during testing - } - statedb, err := pool.chain.StateAt(newHead.Root) - if err != nil { - log.Error("Failed to reset txpool state", "err", err) - return - } - pool.currentState = statedb - pool.pendingNonces = newNoncer(statedb) - pool.currentMaxGas.Store(newHead.GasLimit) - - // Inject any transactions discarded due to reorgs - log.Debug("Reinjecting stale transactions", "count", len(reinject)) - core.SenderCacher.Recover(pool.signer, reinject) - pool.addTxsLocked(reinject, false) - - // Update all fork indicator by next pending block number. - next := new(big.Int).Add(newHead.Number, big.NewInt(1)) - pool.istanbul.Store(pool.chainconfig.IsIstanbul(next)) - pool.eip2718.Store(pool.chainconfig.IsBerlin(next)) - pool.eip1559.Store(pool.chainconfig.IsLondon(next)) - pool.shanghai.Store(pool.chainconfig.IsShanghai(next, uint64(time.Now().Unix()))) -} - -// promoteExecutables moves transactions that have become processable from the -// future queue to the set of pending transactions. During this process, all -// invalidated transactions (low nonce, low balance) are deleted. -func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { - // Track the promoted transactions to broadcast them at once - var promoted []*types.Transaction - - // Iterate over all accounts and promote any executable transactions - for _, addr := range accounts { - list := pool.queue[addr] - if list == nil { - continue // Just in case someone calls with a non existing account - } - // Drop all transactions that are deemed too old (low nonce) - forwards := list.Forward(pool.currentState.GetNonce(addr)) - for _, tx := range forwards { - hash := tx.Hash() - pool.all.Remove(hash) - } - log.Trace("Removed old queued transactions", "count", len(forwards)) - // Drop all transactions that are too costly (low balance or out of gas) - drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load()) - for _, tx := range drops { - hash := tx.Hash() - pool.all.Remove(hash) - } - log.Trace("Removed unpayable queued transactions", "count", len(drops)) - queuedNofundsMeter.Mark(int64(len(drops))) - - // Gather all executable transactions and promote them - readies := list.Ready(pool.pendingNonces.get(addr)) - for _, tx := range readies { - hash := tx.Hash() - if pool.promoteTx(addr, hash, tx) { - promoted = append(promoted, tx) - } - } - log.Trace("Promoted queued transactions", "count", len(promoted)) - queuedGauge.Dec(int64(len(readies))) - - // Drop all transactions over the allowed limit - var caps types.Transactions - if !pool.locals.contains(addr) { - caps = list.Cap(int(pool.config.AccountQueue)) - for _, tx := range caps { - hash := tx.Hash() - pool.all.Remove(hash) - log.Trace("Removed cap-exceeding queued transaction", "hash", hash) - } - queuedRateLimitMeter.Mark(int64(len(caps))) - } - // Mark all the items dropped as removed - pool.priced.Removed(len(forwards) + len(drops) + len(caps)) - queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) - if pool.locals.contains(addr) { - localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) - } - // Delete the entire queue entry if it became empty. - if list.Empty() { - delete(pool.queue, addr) - delete(pool.beats, addr) - } - } - return promoted -} - -// truncatePending removes transactions from the pending queue if the pool is above the -// pending limit. The algorithm tries to reduce transaction counts by an approximately -// equal number for all for accounts with many pending transactions. -func (pool *TxPool) truncatePending() { - pending := uint64(0) - for _, list := range pool.pending { - pending += uint64(list.Len()) - } - if pending <= pool.config.GlobalSlots { - return - } - - pendingBeforeCap := pending - // Assemble a spam order to penalize large transactors first - spammers := prque.New[int64, common.Address](nil) - for addr, list := range pool.pending { - // Only evict transactions from high rollers - if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { - spammers.Push(addr, int64(list.Len())) - } - } - // Gradually drop transactions from offenders - offenders := []common.Address{} - for pending > pool.config.GlobalSlots && !spammers.Empty() { - // Retrieve the next offender if not local address - offender, _ := spammers.Pop() - offenders = append(offenders, offender) - - // Equalize balances until all the same or below threshold - if len(offenders) > 1 { - // Calculate the equalization threshold for all current offenders - threshold := pool.pending[offender].Len() - - // Iteratively reduce all offenders until below limit or threshold reached - for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { - for i := 0; i < len(offenders)-1; i++ { - list := pool.pending[offenders[i]] - - caps := list.Cap(list.Len() - 1) - for _, tx := range caps { - // Drop the transaction from the global pools too - hash := tx.Hash() - pool.all.Remove(hash) - - // Update the account nonce to the dropped transaction - pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) - log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) - } - pool.priced.Removed(len(caps)) - pendingGauge.Dec(int64(len(caps))) - if pool.locals.contains(offenders[i]) { - localGauge.Dec(int64(len(caps))) - } - pending-- - } - } - } - } - - // If still above threshold, reduce to limit or min allowance - if pending > pool.config.GlobalSlots && len(offenders) > 0 { - for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { - for _, addr := range offenders { - list := pool.pending[addr] - - caps := list.Cap(list.Len() - 1) - for _, tx := range caps { - // Drop the transaction from the global pools too - hash := tx.Hash() - pool.all.Remove(hash) - - // Update the account nonce to the dropped transaction - pool.pendingNonces.setIfLower(addr, tx.Nonce()) - log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) - } - pool.priced.Removed(len(caps)) - pendingGauge.Dec(int64(len(caps))) - if pool.locals.contains(addr) { - localGauge.Dec(int64(len(caps))) - } - pending-- - } - } - } - pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) -} - -// truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit. -func (pool *TxPool) truncateQueue() { - queued := uint64(0) - for _, list := range pool.queue { - queued += uint64(list.Len()) - } - if queued <= pool.config.GlobalQueue { - return - } - - // Sort all accounts with queued transactions by heartbeat - addresses := make(addressesByHeartbeat, 0, len(pool.queue)) - for addr := range pool.queue { - if !pool.locals.contains(addr) { // don't drop locals - addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) - } - } - sort.Sort(sort.Reverse(addresses)) - - // Drop transactions until the total is below the limit or only locals remain - for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { - addr := addresses[len(addresses)-1] - list := pool.queue[addr.address] - - addresses = addresses[:len(addresses)-1] - - // Drop all transactions if they are less than the overflow - if size := uint64(list.Len()); size <= drop { - for _, tx := range list.Flatten() { - pool.removeTx(tx.Hash(), true) - } - drop -= size - queuedRateLimitMeter.Mark(int64(size)) - continue - } - // Otherwise drop only last few transactions - txs := list.Flatten() - for i := len(txs) - 1; i >= 0 && drop > 0; i-- { - pool.removeTx(txs[i].Hash(), true) - drop-- - queuedRateLimitMeter.Mark(1) - } - } -} - -// demoteUnexecutables removes invalid and processed transactions from the pools -// executable/pending queue and any subsequent transactions that become unexecutable -// are moved back into the future queue. -// -// Note: transactions are not marked as removed in the priced list because re-heaping -// is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful -// to trigger a re-heap is this function -func (pool *TxPool) demoteUnexecutables() { - // Iterate over all accounts and demote any non-executable transactions - for addr, list := range pool.pending { - nonce := pool.currentState.GetNonce(addr) - - // Drop all transactions that are deemed too old (low nonce) - olds := list.Forward(nonce) - for _, tx := range olds { - hash := tx.Hash() - pool.all.Remove(hash) - log.Trace("Removed old pending transaction", "hash", hash) - } - // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later - drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load()) - for _, tx := range drops { - hash := tx.Hash() - log.Trace("Removed unpayable pending transaction", "hash", hash) - pool.all.Remove(hash) - } - pendingNofundsMeter.Mark(int64(len(drops))) - - for _, tx := range invalids { - hash := tx.Hash() - log.Trace("Demoting pending transaction", "hash", hash) - - // Internal shuffle shouldn't touch the lookup set. - pool.enqueueTx(hash, tx, false, false) - } - pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) - if pool.locals.contains(addr) { - localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) - } - // If there's a gap in front, alert (should never happen) and postpone all transactions - if list.Len() > 0 && list.txs.Get(nonce) == nil { - gapped := list.Cap(0) - for _, tx := range gapped { - hash := tx.Hash() - log.Error("Demoting invalidated transaction", "hash", hash) - - // Internal shuffle shouldn't touch the lookup set. - pool.enqueueTx(hash, tx, false, false) - } - pendingGauge.Dec(int64(len(gapped))) - } - // Delete the entire pending entry if it became empty. - if list.Empty() { - delete(pool.pending, addr) - } - } -} - -// addressByHeartbeat is an account address tagged with its last activity timestamp. -type addressByHeartbeat struct { - address common.Address - heartbeat time.Time -} - -type addressesByHeartbeat []addressByHeartbeat - -func (a addressesByHeartbeat) Len() int { return len(a) } -func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } -func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// accountSet is simply a set of addresses to check for existence, and a signer -// capable of deriving addresses from transactions. -type accountSet struct { - accounts map[common.Address]struct{} - signer types.Signer - cache *[]common.Address -} - -// newAccountSet creates a new address set with an associated signer for sender -// derivations. -func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { - as := &accountSet{ - accounts: make(map[common.Address]struct{}, len(addrs)), - signer: signer, - } - for _, addr := range addrs { - as.add(addr) - } - return as -} - -// contains checks if a given address is contained within the set. -func (as *accountSet) contains(addr common.Address) bool { - _, exist := as.accounts[addr] - return exist -} - -// containsTx checks if the sender of a given tx is within the set. If the sender -// cannot be derived, this method returns false. -func (as *accountSet) containsTx(tx *types.Transaction) bool { - if addr, err := types.Sender(as.signer, tx); err == nil { - return as.contains(addr) - } - return false -} - -// add inserts a new address into the set to track. -func (as *accountSet) add(addr common.Address) { - as.accounts[addr] = struct{}{} - as.cache = nil -} - -// addTx adds the sender of tx into the set. -func (as *accountSet) addTx(tx *types.Transaction) { - if addr, err := types.Sender(as.signer, tx); err == nil { - as.add(addr) - } -} - -// flatten returns the list of addresses within this set, also caching it for later -// reuse. The returned slice should not be changed! -func (as *accountSet) flatten() []common.Address { - if as.cache == nil { - accounts := make([]common.Address, 0, len(as.accounts)) - for account := range as.accounts { - accounts = append(accounts, account) - } - as.cache = &accounts - } - return *as.cache -} - -// merge adds all addresses from the 'other' set into 'as'. -func (as *accountSet) merge(other *accountSet) { - for addr := range other.accounts { - as.accounts[addr] = struct{}{} - } - as.cache = nil -} - -// lookup is used internally by TxPool to track transactions while allowing -// lookup without mutex contention. -// -// Note, although this type is properly protected against concurrent access, it -// is **not** a type that should ever be mutated or even exposed outside of the -// transaction pool, since its internal state is tightly coupled with the pools -// internal mechanisms. The sole purpose of the type is to permit out-of-bound -// peeking into the pool in TxPool.Get without having to acquire the widely scoped -// TxPool.mu mutex. -// -// This lookup set combines the notion of "local transactions", which is useful -// to build upper-level structure. -type lookup struct { - slots int - lock sync.RWMutex - locals map[common.Hash]*types.Transaction - remotes map[common.Hash]*types.Transaction -} - -// newLookup returns a new lookup structure. -func newLookup() *lookup { - return &lookup{ - locals: make(map[common.Hash]*types.Transaction), - remotes: make(map[common.Hash]*types.Transaction), - } -} - -// Range calls f on each key and value present in the map. The callback passed -// should return the indicator whether the iteration needs to be continued. -// Callers need to specify which set (or both) to be iterated. -func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { - t.lock.RLock() - defer t.lock.RUnlock() - - if local { - for key, value := range t.locals { - if !f(key, value, true) { - return - } - } - } - if remote { - for key, value := range t.remotes { - if !f(key, value, false) { - return - } - } - } -} - -// Get returns a transaction if it exists in the lookup, or nil if not found. -func (t *lookup) Get(hash common.Hash) *types.Transaction { - t.lock.RLock() - defer t.lock.RUnlock() - - if tx := t.locals[hash]; tx != nil { - return tx - } - return t.remotes[hash] -} - -// GetLocal returns a transaction if it exists in the lookup, or nil if not found. -func (t *lookup) GetLocal(hash common.Hash) *types.Transaction { - t.lock.RLock() - defer t.lock.RUnlock() - - return t.locals[hash] -} - -// GetRemote returns a transaction if it exists in the lookup, or nil if not found. -func (t *lookup) GetRemote(hash common.Hash) *types.Transaction { - t.lock.RLock() - defer t.lock.RUnlock() - - return t.remotes[hash] -} - -// Count returns the current number of transactions in the lookup. -func (t *lookup) Count() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return len(t.locals) + len(t.remotes) -} - -// LocalCount returns the current number of local transactions in the lookup. -func (t *lookup) LocalCount() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return len(t.locals) -} - -// RemoteCount returns the current number of remote transactions in the lookup. -func (t *lookup) RemoteCount() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return len(t.remotes) -} - -// Slots returns the current number of slots used in the lookup. -func (t *lookup) Slots() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return t.slots -} - -// Add adds a transaction to the lookup. -func (t *lookup) Add(tx *types.Transaction, local bool) { - t.lock.Lock() - defer t.lock.Unlock() - - t.slots += numSlots(tx) - slotsGauge.Update(int64(t.slots)) - - if local { - t.locals[tx.Hash()] = tx - } else { - t.remotes[tx.Hash()] = tx - } -} - -// Remove removes a transaction from the lookup. -func (t *lookup) Remove(hash common.Hash) { - t.lock.Lock() - defer t.lock.Unlock() - - tx, ok := t.locals[hash] - if !ok { - tx, ok = t.remotes[hash] - } - if !ok { - log.Error("No transaction found to be deleted", "hash", hash) - return - } - t.slots -= numSlots(tx) - slotsGauge.Update(int64(t.slots)) - - delete(t.locals, hash) - delete(t.remotes, hash) -} - -// RemoteToLocals migrates the transactions belongs to the given locals to locals -// set. The assumption is held the locals set is thread-safe to be used. -func (t *lookup) RemoteToLocals(locals *accountSet) int { - t.lock.Lock() - defer t.lock.Unlock() - - var migrated int - for hash, tx := range t.remotes { - if locals.containsTx(tx) { - t.locals[hash] = tx - delete(t.remotes, hash) - migrated += 1 - } - } - return migrated -} - -// RemotesBelowTip finds all remote transactions below the given tip threshold. -func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions { - found := make(types.Transactions, 0, 128) - t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { - if tx.GasTipCapIntCmp(threshold) < 0 { - found = append(found, tx) - } - return true - }, false, true) // Only iterate remotes - return found -} - -// numSlots calculates the number of slots needed for a single transaction. -func numSlots(tx *types.Transaction) int { - return int((tx.Size() + txSlotSize - 1) / txSlotSize) + return TxStatusUnknown } diff --git a/core/txpool/validation.go b/core/txpool/validation.go new file mode 100644 index 000000000..630d5340c --- /dev/null +++ b/core/txpool/validation.go @@ -0,0 +1,247 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package txpool + +import ( + "crypto/sha256" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" +) + +// ValidationOptions define certain differences between transaction validation +// across the different pools without having to duplicate those checks. +type ValidationOptions struct { + Config *params.ChainConfig // Chain configuration to selectively validate based on current fork rules + + Accept uint8 // Bitmap of transaction types that should be accepted for the calling pool + MaxSize uint64 // Maximum size of a transaction that the caller can meaningfully handle + MinTip *big.Int // Minimum gas tip needed to allow a transaction into the caller pool +} + +// ValidateTransaction is a helper method to check whether a transaction is valid +// according to the consensus rules, but does not check state-dependent validation +// (balance, nonce, etc). +// +// This check is public to allow different transaction pools to check the basic +// rules without duplicating code and running the risk of missed updates. +func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types.Signer, opts *ValidationOptions) error { + // Ensure transactions not implemented by the calling pool are rejected + if opts.Accept&(1< opts.MaxSize { + return fmt.Errorf("%w: transaction size %v, limit %v", ErrOversizedData, tx.Size(), opts.MaxSize) + } + // Ensure only transactions that have been enabled are accepted + if !opts.Config.IsBerlin(head.Number) && tx.Type() != types.LegacyTxType { + return fmt.Errorf("%w: type %d rejected, pool not yet in Berlin", core.ErrTxTypeNotSupported, tx.Type()) + } + if !opts.Config.IsLondon(head.Number) && tx.Type() == types.DynamicFeeTxType { + return fmt.Errorf("%w: type %d rejected, pool not yet in London", core.ErrTxTypeNotSupported, tx.Type()) + } + if !opts.Config.IsCancun(head.Number, head.Time) && tx.Type() == types.BlobTxType { + return fmt.Errorf("%w: type %d rejected, pool not yet in Cancun", core.ErrTxTypeNotSupported, tx.Type()) + } + // Check whether the init code size has been exceeded + if opts.Config.IsShanghai(head.Number, head.Time) && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { + return fmt.Errorf("%w: code size %v, limit %v", core.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) + } + // Transactions can't be negative. This may never happen using RLP decoded + // transactions but may occur for transactions created using the RPC. + if tx.Value().Sign() < 0 { + return ErrNegativeValue + } + // Ensure the transaction doesn't exceed the current block limit gas + if head.GasLimit < tx.Gas() { + return ErrGasLimit + } + // Sanity check for extremely large numbers (supported by RLP or RPC) + if tx.GasFeeCap().BitLen() > 256 { + return core.ErrFeeCapVeryHigh + } + if tx.GasTipCap().BitLen() > 256 { + return core.ErrTipVeryHigh + } + // Ensure gasFeeCap is greater than or equal to gasTipCap + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + return core.ErrTipAboveFeeCap + } + // Make sure the transaction is signed properly + if _, err := types.Sender(signer, tx); err != nil { + return ErrInvalidSender + } + // Ensure the transaction has more gas than the bare minimum needed to cover + // the transaction metadata + intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, opts.Config.IsIstanbul(head.Number), opts.Config.IsShanghai(head.Number, head.Time)) + if err != nil { + return err + } + if tx.Gas() < intrGas { + return fmt.Errorf("%w: needed %v, allowed %v", core.ErrIntrinsicGas, intrGas, tx.Gas()) + } + // Ensure the gasprice is high enough to cover the requirement of the calling + // pool and/or block producer + if tx.GasTipCapIntCmp(opts.MinTip) < 0 { + return fmt.Errorf("%w: tip needed %v, tip permitted %v", ErrUnderpriced, opts.MinTip, tx.GasTipCap()) + } + // Ensure blob transactions have valid commitments + if tx.Type() == types.BlobTxType { + sidecar := tx.BlobTxSidecar() + if sidecar == nil { + return fmt.Errorf("missing sidecar in blob transaction") + } + // Ensure the number of items in the blob transaction and vairous side + // data match up before doing any expensive validations + hashes := tx.BlobHashes() + if len(hashes) == 0 { + return fmt.Errorf("blobless blob transaction") + } + if len(hashes) > params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob { + return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) + } + if err := validateBlobSidecar(hashes, sidecar); err != nil { + return err + } + } + return nil +} + +func validateBlobSidecar(hashes []common.Hash, sidecar *types.BlobTxSidecar) error { + if len(sidecar.Blobs) != len(hashes) { + return fmt.Errorf("invalid number of %d blobs compared to %d blob hashes", len(sidecar.Blobs), len(hashes)) + } + if len(sidecar.Commitments) != len(hashes) { + return fmt.Errorf("invalid number of %d blob commitments compared to %d blob hashes", len(sidecar.Commitments), len(hashes)) + } + if len(sidecar.Proofs) != len(hashes) { + return fmt.Errorf("invalid number of %d blob proofs compared to %d blob hashes", len(sidecar.Proofs), len(hashes)) + } + // Blob quantities match up, validate that the provers match with the + // transaction hash before getting to the cryptography + hasher := sha256.New() + for i, want := range hashes { + hasher.Write(sidecar.Commitments[i][:]) + hash := hasher.Sum(nil) + hasher.Reset() + + var vhash common.Hash + vhash[0] = params.BlobTxHashVersion + copy(vhash[1:], hash[1:]) + + if vhash != want { + return fmt.Errorf("blob %d: computed hash %#x mismatches transaction one %#x", i, vhash, want) + } + } + // Blob commitments match with the hashes in the transaction, verify the + // blobs themselves via KZG + for i := range sidecar.Blobs { + if err := kzg4844.VerifyBlobProof(sidecar.Blobs[i], sidecar.Commitments[i], sidecar.Proofs[i]); err != nil { + return fmt.Errorf("invalid blob %d: %v", i, err) + } + } + return nil +} + +// ValidationOptionsWithState define certain differences between stateful transaction +// validation across the different pools without having to duplicate those checks. +type ValidationOptionsWithState struct { + State *state.StateDB // State database to check nonces and balances against + + // FirstNonceGap is an optional callback to retrieve the first nonce gap in + // the list of pooled transactions of a specific account. If this method is + // set, nonce gaps will be checked and forbidden. If this method is not set, + // nonce gaps will be ignored and permitted. + FirstNonceGap func(addr common.Address) uint64 + + // UsedAndLeftSlots is a mandatory callback to retrieve the number of tx slots + // used and the number still permitted for an account. New transactions will + // be rejected once the number of remaining slots reaches zero. + UsedAndLeftSlots func(addr common.Address) (int, int) + + // ExistingExpenditure is a mandatory callback to retrieve the cummulative + // cost of the already pooled transactions to check for overdrafts. + ExistingExpenditure func(addr common.Address) *big.Int + + // ExistingCost is a mandatory callback to retrieve an already pooled + // transaction's cost with the given nonce to check for overdrafts. + ExistingCost func(addr common.Address, nonce uint64) *big.Int +} + +// ValidateTransactionWithState is a helper method to check whether a transaction +// is valid according to the pool's internal state checks (balance, nonce, gaps). +// +// This check is public to allow different transaction pools to check the stateful +// rules without duplicating code and running the risk of missed updates. +func ValidateTransactionWithState(tx *types.Transaction, signer types.Signer, opts *ValidationOptionsWithState) error { + // Ensure the transaction adheres to nonce ordering + from, err := signer.Sender(tx) // already validated (and cached), but cleaner to check + if err != nil { + log.Error("Transaction sender recovery failed", "err", err) + return err + } + next := opts.State.GetNonce(from) + if next > tx.Nonce() { + return fmt.Errorf("%w: next nonce %v, tx nonce %v", core.ErrNonceTooLow, next, tx.Nonce()) + } + // Ensure the transaction doesn't produce a nonce gap in pools that do not + // support arbitrary orderings + if opts.FirstNonceGap != nil { + if gap := opts.FirstNonceGap(from); gap < tx.Nonce() { + return fmt.Errorf("%w: tx nonce %v, gapped nonce %v", core.ErrNonceTooHigh, tx.Nonce(), gap) + } + } + // Ensure the transactor has enough funds to cover the transaction costs + var ( + balance = opts.State.GetBalance(from) + cost = tx.Cost() + ) + if balance.Cmp(cost) < 0 { + return fmt.Errorf("%w: balance %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, cost, new(big.Int).Sub(cost, balance)) + } + // Ensure the transactor has enough funds to cover for replacements or nonce + // expansions without overdrafts + spent := opts.ExistingExpenditure(from) + if prev := opts.ExistingCost(from, tx.Nonce()); prev != nil { + bump := new(big.Int).Sub(cost, prev) + need := new(big.Int).Add(spent, bump) + if balance.Cmp(need) < 0 { + return fmt.Errorf("%w: balance %v, queued cost %v, tx bumped %v, overshot %v", core.ErrInsufficientFunds, balance, spent, bump, new(big.Int).Sub(need, balance)) + } + } else { + need := new(big.Int).Add(spent, cost) + if balance.Cmp(need) < 0 { + return fmt.Errorf("%w: balance %v, queued cost %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, spent, cost, new(big.Int).Sub(need, balance)) + } + // Transaction takes a new nonce value out of the pool. Ensure it doesn't + // overflow the number of permitted transactions from a single accoun + // (i.e. max cancellable via out-of-bound transaction). + if used, left := opts.UsedAndLeftSlots(from); left <= 0 { + return fmt.Errorf("%w: pooled %d txs", ErrAccountLimitExceeded, used) + } + } + return nil +} diff --git a/core/types/block.go b/core/types/block.go index a1a14f5b1..1a357baa3 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -85,26 +85,28 @@ type Header struct { // WithdrawalsHash was added by EIP-4895 and is ignored in legacy headers. WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` - // ExcessDataGas was added by EIP-4844 and is ignored in legacy headers. - ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"` + // BlobGasUsed was added by EIP-4844 and is ignored in legacy headers. + BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"` - /* - TODO (MariusVanDerWijden) Add this field once needed - // Random was added during the merge and contains the BeaconState randomness - Random common.Hash `json:"random" rlp:"optional"` - */ + // ExcessBlobGas was added by EIP-4844 and is ignored in legacy headers. + ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"` + + // ParentBeaconRoot was added by EIP-4788 and is ignored in legacy headers. + ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` } // field type overrides for gencodec type headerMarshaling struct { - Difficulty *hexutil.Big - Number *hexutil.Big - GasLimit hexutil.Uint64 - GasUsed hexutil.Uint64 - Time hexutil.Uint64 - Extra hexutil.Bytes - BaseFee *hexutil.Big - Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON + Difficulty *hexutil.Big + Number *hexutil.Big + GasLimit hexutil.Uint64 + GasUsed hexutil.Uint64 + Time hexutil.Uint64 + Extra hexutil.Bytes + BaseFee *hexutil.Big + Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON + BlobGasUsed *hexutil.Uint64 + ExcessBlobGas *hexutil.Uint64 } // Hash returns the block hash of the header, which is simply the keccak256 hash of its @@ -152,10 +154,10 @@ func (h *Header) SanityCheck() error { // EmptyBody returns true if there is no additional 'body' to complete the header // that is: no transactions, no uncles and no withdrawals. func (h *Header) EmptyBody() bool { - if h.WithdrawalsHash == nil { - return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash + if h.WithdrawalsHash != nil { + return h.TxHash == EmptyTxsHash && *h.WithdrawalsHash == EmptyWithdrawalsHash } - return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash && *h.WithdrawalsHash == EmptyWithdrawalsHash + return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash } // EmptyReceipts returns true if there are no receipts for this header/block. @@ -171,7 +173,23 @@ type Body struct { Withdrawals []*Withdrawal `rlp:"optional"` } -// Block represents an entire block in the Ethereum blockchain. +// Block represents an Ethereum block. +// +// Note the Block type tries to be 'immutable', and contains certain caches that rely +// on that. The rules around block immutability are as follows: +// +// - We copy all data when the block is constructed. This makes references held inside +// the block independent of whatever value was passed in. +// +// - We copy all header data on access. This is because any change to the header would mess +// up the cached hash and size values in the block. Calling code is expected to take +// advantage of this to avoid over-allocating! +// +// - When new body data is attached to the block, a shallow copy of the block is returned. +// This ensures block modifications are race-free. +// +// - We do not copy body data on access because it does not affect the caches, and also +// because it would be too expensive. type Block struct { header *Header uncles []*Header @@ -196,9 +214,8 @@ type extblock struct { Withdrawals []*Withdrawal `rlp:"optional"` } -// NewBlock creates a new block. The input data is copied, -// changes to header and to the field values will not affect the -// block. +// NewBlock creates a new block. The input data is copied, changes to header and to the +// field values will not affect the block. // // The values of TxHash, UncleHash, ReceiptHash and Bloom in header // are ignored and set to values derived from the given txs, uncles @@ -235,13 +252,11 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* return b } -// NewBlockWithWithdrawals creates a new block with withdrawals. The input data -// is copied, changes to header and to the field values will not -// affect the block. +// NewBlockWithWithdrawals creates a new block with withdrawals. The input data is copied, +// changes to header and to the field values will not affect the block. // -// The values of TxHash, UncleHash, ReceiptHash and Bloom in header -// are ignored and set to values derived from the given txs, uncles -// and receipts. +// The values of TxHash, UncleHash, ReceiptHash and Bloom in header are ignored and set to +// values derived from the given txs, uncles and receipts. func NewBlockWithWithdrawals(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, withdrawals []*Withdrawal, hasher TrieHasher) *Block { b := NewBlock(header, txs, uncles, receipts, hasher) @@ -257,15 +272,7 @@ func NewBlockWithWithdrawals(header *Header, txs []*Transaction, uncles []*Heade return b.WithWithdrawals(withdrawals) } -// NewBlockWithHeader creates a block with the given header data. The -// header data is copied, changes to header and to the field values -// will not affect the block. -func NewBlockWithHeader(header *Header) *Block { - return &Block{header: CopyHeader(header)} -} - -// CopyHeader creates a deep copy of a block header to prevent side effects from -// modifying a header variable. +// CopyHeader creates a deep copy of a block header. func CopyHeader(h *Header) *Header { cpy := *h if cpy.Difficulty = new(big.Int); h.Difficulty != nil { @@ -285,10 +292,22 @@ func CopyHeader(h *Header) *Header { cpy.WithdrawalsHash = new(common.Hash) *cpy.WithdrawalsHash = *h.WithdrawalsHash } + if h.ExcessBlobGas != nil { + cpy.ExcessBlobGas = new(uint64) + *cpy.ExcessBlobGas = *h.ExcessBlobGas + } + if h.BlobGasUsed != nil { + cpy.BlobGasUsed = new(uint64) + *cpy.BlobGasUsed = *h.BlobGasUsed + } + if h.ParentBeaconRoot != nil { + cpy.ParentBeaconRoot = new(common.Hash) + *cpy.ParentBeaconRoot = *h.ParentBeaconRoot + } return &cpy } -// DecodeRLP decodes the Ethereum +// DecodeRLP decodes a block from RLP. func (b *Block) DecodeRLP(s *rlp.Stream) error { var eb extblock _, size, _ := s.Kind() @@ -300,9 +319,9 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error { return nil } -// EncodeRLP serializes b into the Ethereum RLP block format. +// EncodeRLP serializes a block as RLP. func (b *Block) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, extblock{ + return rlp.Encode(w, &extblock{ Header: b.header, Txs: b.transactions, Uncles: b.uncles, @@ -310,10 +329,18 @@ func (b *Block) EncodeRLP(w io.Writer) error { }) } -// TODO: copies +// Body returns the non-header content of the block. +// Note the returned data is not an independent copy. +func (b *Block) Body() *Body { + return &Body{b.transactions, b.uncles, b.withdrawals} +} + +// Accessors for body data. These do not return a copy because the content +// of the body slices does not affect the cached hash/size in block. func (b *Block) Uncles() []*Header { return b.uncles } func (b *Block) Transactions() Transactions { return b.transactions } +func (b *Block) Withdrawals() Withdrawals { return b.withdrawals } func (b *Block) Transaction(hash common.Hash) *Transaction { for _, transaction := range b.transactions { @@ -324,6 +351,13 @@ func (b *Block) Transaction(hash common.Hash) *Transaction { return nil } +// Header returns the block header (as a copy). +func (b *Block) Header() *Header { + return CopyHeader(b.header) +} + +// Header value accessors. These do copy! + func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) } func (b *Block) GasLimit() uint64 { return b.header.GasLimit } func (b *Block) GasUsed() uint64 { return b.header.GasUsed } @@ -349,14 +383,25 @@ func (b *Block) BaseFee() *big.Int { return new(big.Int).Set(b.header.BaseFee) } -func (b *Block) Withdrawals() Withdrawals { - return b.withdrawals +func (b *Block) BeaconRoot() *common.Hash { return b.header.ParentBeaconRoot } + +func (b *Block) ExcessBlobGas() *uint64 { + var excessBlobGas *uint64 + if b.header.ExcessBlobGas != nil { + excessBlobGas = new(uint64) + *excessBlobGas = *b.header.ExcessBlobGas + } + return excessBlobGas } -func (b *Block) Header() *Header { return CopyHeader(b.header) } - -// Body returns the non-header content of the block. -func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles, b.withdrawals} } +func (b *Block) BlobGasUsed() *uint64 { + var blobGasUsed *uint64 + if b.header.BlobGasUsed != nil { + blobGasUsed = new(uint64) + *blobGasUsed = *b.header.BlobGasUsed + } + return blobGasUsed +} // Size returns the true RLP encoded storage size of the block, either by encoding // and returning it, or returning a previously cached value. @@ -390,25 +435,31 @@ func CalcUncleHash(uncles []*Header) common.Hash { return rlpHash(uncles) } +// NewBlockWithHeader creates a block with the given header data. The +// header data is copied, changes to header and to the field values +// will not affect the block. +func NewBlockWithHeader(header *Header) *Block { + return &Block{header: CopyHeader(header)} +} + // WithSeal returns a new block with the data from b but the header replaced with // the sealed one. func (b *Block) WithSeal(header *Header) *Block { - cpy := *header - return &Block{ - header: &cpy, + header: CopyHeader(header), transactions: b.transactions, uncles: b.uncles, withdrawals: b.withdrawals, } } -// WithBody returns a new block with the given transaction and uncle contents. +// WithBody returns a copy of the block with the given transaction and uncle contents. func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block { block := &Block{ - header: CopyHeader(b.header), + header: b.header, transactions: make([]*Transaction, len(transactions)), uncles: make([]*Header, len(uncles)), + withdrawals: b.withdrawals, } copy(block.transactions, transactions) for i := range uncles { @@ -417,13 +468,18 @@ func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block { return block } -// WithWithdrawals sets the withdrawal contents of a block, does not return a new block. +// WithWithdrawals returns a copy of the block containing the given withdrawals. func (b *Block) WithWithdrawals(withdrawals []*Withdrawal) *Block { - if withdrawals != nil { - b.withdrawals = make([]*Withdrawal, len(withdrawals)) - copy(b.withdrawals, withdrawals) + block := &Block{ + header: b.header, + transactions: b.transactions, + uncles: b.uncles, } - return b + if withdrawals != nil { + block.withdrawals = make([]*Withdrawal, len(withdrawals)) + copy(block.withdrawals, withdrawals) + } + return block } // Hash returns the keccak256 hash of b's header. diff --git a/core/types/block_test.go b/core/types/block_test.go index 966015eb0..cf0b1dd85 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -18,7 +18,6 @@ package types import ( "bytes" - "hash" "math/big" "reflect" "testing" @@ -26,9 +25,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/blocktest" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" ) // from bcValidBlockTest.json, "SimpleTx" @@ -217,31 +216,6 @@ func BenchmarkEncodeBlock(b *testing.B) { } } -// testHasher is the helper tool for transaction/receipt list hashing. -// The original hasher is trie, in order to get rid of import cycle, -// use the testing hasher instead. -type testHasher struct { - hasher hash.Hash -} - -func newHasher() *testHasher { - return &testHasher{hasher: sha3.NewLegacyKeccak256()} -} - -func (h *testHasher) Reset() { - h.hasher.Reset() -} - -func (h *testHasher) Update(key, val []byte) error { - h.hasher.Write(key) - h.hasher.Write(val) - return nil -} - -func (h *testHasher) Hash() common.Hash { - return common.BytesToHash(h.hasher.Sum(nil)) -} - func makeBenchBlock() *Block { var ( key, _ = crypto.GenerateKey() @@ -280,7 +254,7 @@ func makeBenchBlock() *Block { Extra: []byte("benchmark uncle"), } } - return NewBlock(header, txs, uncles, receipts, newHasher()) + return NewBlock(header, txs, uncles, receipts, blocktest.NewHasher()) } func TestRlpDecodeParentHash(t *testing.T) { diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go index 73287257b..fb1f915d0 100644 --- a/core/types/gen_header_json.go +++ b/core/types/gen_header_json.go @@ -16,25 +16,27 @@ var _ = (*headerMarshaling)(nil) // MarshalJSON marshals as JSON. func (h Header) MarshalJSON() ([]byte, error) { type Header struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` - Coinbase common.Address `json:"miner"` - Root common.Hash `json:"stateRoot" gencodec:"required"` - TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` - ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` - Bloom Bloom `json:"logsBloom" gencodec:"required"` - Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` - Number *hexutil.Big `json:"number" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` - Extra hexutil.Bytes `json:"extraData" gencodec:"required"` - MixDigest common.Hash `json:"mixHash"` - Nonce BlockNonce `json:"nonce"` - BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` - ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"` - Hash common.Hash `json:"hash"` + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase common.Address `json:"miner"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` + Bloom Bloom `json:"logsBloom" gencodec:"required"` + Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` + Number *hexutil.Big `json:"number" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` + Extra hexutil.Bytes `json:"extraData" gencodec:"required"` + MixDigest common.Hash `json:"mixHash"` + Nonce BlockNonce `json:"nonce"` + BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"` + ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"` + ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` + Hash common.Hash `json:"hash"` } var enc Header enc.ParentHash = h.ParentHash @@ -54,7 +56,9 @@ func (h Header) MarshalJSON() ([]byte, error) { enc.Nonce = h.Nonce enc.BaseFee = (*hexutil.Big)(h.BaseFee) enc.WithdrawalsHash = h.WithdrawalsHash - enc.ExcessDataGas = h.ExcessDataGas + enc.BlobGasUsed = (*hexutil.Uint64)(h.BlobGasUsed) + enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas) + enc.ParentBeaconRoot = h.ParentBeaconRoot enc.Hash = h.Hash() return json.Marshal(&enc) } @@ -62,24 +66,26 @@ func (h Header) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals from JSON. func (h *Header) UnmarshalJSON(input []byte) error { type Header struct { - ParentHash *common.Hash `json:"parentHash" gencodec:"required"` - UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"` - Coinbase *common.Address `json:"miner"` - Root *common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"` - ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"` - Bloom *Bloom `json:"logsBloom" gencodec:"required"` - Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` - Number *hexutil.Big `json:"number" gencodec:"required"` - GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"` - Extra *hexutil.Bytes `json:"extraData" gencodec:"required"` - MixDigest *common.Hash `json:"mixHash"` - Nonce *BlockNonce `json:"nonce"` - BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` - ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"` + ParentHash *common.Hash `json:"parentHash" gencodec:"required"` + UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase *common.Address `json:"miner"` + Root *common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"` + Bloom *Bloom `json:"logsBloom" gencodec:"required"` + Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` + Number *hexutil.Big `json:"number" gencodec:"required"` + GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"` + Extra *hexutil.Bytes `json:"extraData" gencodec:"required"` + MixDigest *common.Hash `json:"mixHash"` + Nonce *BlockNonce `json:"nonce"` + BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` + WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"` + ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"` + ParentBeaconRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"` } var dec Header if err := json.Unmarshal(input, &dec); err != nil { @@ -148,8 +154,14 @@ func (h *Header) UnmarshalJSON(input []byte) error { if dec.WithdrawalsHash != nil { h.WithdrawalsHash = dec.WithdrawalsHash } - if dec.ExcessDataGas != nil { - h.ExcessDataGas = dec.ExcessDataGas + if dec.BlobGasUsed != nil { + h.BlobGasUsed = (*uint64)(dec.BlobGasUsed) + } + if dec.ExcessBlobGas != nil { + h.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) + } + if dec.ParentBeaconRoot != nil { + h.ParentBeaconRoot = dec.ParentBeaconRoot } return nil } diff --git a/core/types/gen_header_rlp.go b/core/types/gen_header_rlp.go index 005f6c2ad..b91a255a5 100644 --- a/core/types/gen_header_rlp.go +++ b/core/types/gen_header_rlp.go @@ -42,8 +42,10 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBytes(obj.Nonce[:]) _tmp1 := obj.BaseFee != nil _tmp2 := obj.WithdrawalsHash != nil - _tmp3 := obj.ExcessDataGas != nil - if _tmp1 || _tmp2 || _tmp3 { + _tmp3 := obj.BlobGasUsed != nil + _tmp4 := obj.ExcessBlobGas != nil + _tmp5 := obj.ParentBeaconRoot != nil + if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 { if obj.BaseFee == nil { w.Write(rlp.EmptyString) } else { @@ -53,21 +55,32 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBigInt(obj.BaseFee) } } - if _tmp2 || _tmp3 { + if _tmp2 || _tmp3 || _tmp4 || _tmp5 { if obj.WithdrawalsHash == nil { w.Write([]byte{0x80}) } else { w.WriteBytes(obj.WithdrawalsHash[:]) } } - if _tmp3 { - if obj.ExcessDataGas == nil { - w.Write(rlp.EmptyString) + if _tmp3 || _tmp4 || _tmp5 { + if obj.BlobGasUsed == nil { + w.Write([]byte{0x80}) } else { - if obj.ExcessDataGas.Sign() == -1 { - return rlp.ErrNegativeBigInt - } - w.WriteBigInt(obj.ExcessDataGas) + w.WriteUint64((*obj.BlobGasUsed)) + } + } + if _tmp4 || _tmp5 { + if obj.ExcessBlobGas == nil { + w.Write([]byte{0x80}) + } else { + w.WriteUint64((*obj.ExcessBlobGas)) + } + } + if _tmp5 { + if obj.ParentBeaconRoot == nil { + w.Write([]byte{0x80}) + } else { + w.WriteBytes(obj.ParentBeaconRoot[:]) } } w.ListEnd(_tmp0) diff --git a/core/types/gen_log_json.go b/core/types/gen_log_json.go index 90e1c14d9..3ffa9c2fe 100644 --- a/core/types/gen_log_json.go +++ b/core/types/gen_log_json.go @@ -18,12 +18,12 @@ func (l Log) MarshalJSON() ([]byte, error) { Address common.Address `json:"address" gencodec:"required"` Topics []common.Hash `json:"topics" gencodec:"required"` Data hexutil.Bytes `json:"data" gencodec:"required"` - BlockNumber hexutil.Uint64 `json:"blockNumber"` - TxHash common.Hash `json:"transactionHash" gencodec:"required"` - TxIndex hexutil.Uint `json:"transactionIndex"` - BlockHash common.Hash `json:"blockHash"` - Index hexutil.Uint `json:"logIndex"` - Removed bool `json:"removed"` + BlockNumber hexutil.Uint64 `json:"blockNumber" rlp:"-"` + TxHash common.Hash `json:"transactionHash" gencodec:"required" rlp:"-"` + TxIndex hexutil.Uint `json:"transactionIndex" rlp:"-"` + BlockHash common.Hash `json:"blockHash" rlp:"-"` + Index hexutil.Uint `json:"logIndex" rlp:"-"` + Removed bool `json:"removed" rlp:"-"` } var enc Log enc.Address = l.Address @@ -44,12 +44,12 @@ func (l *Log) UnmarshalJSON(input []byte) error { Address *common.Address `json:"address" gencodec:"required"` Topics []common.Hash `json:"topics" gencodec:"required"` Data *hexutil.Bytes `json:"data" gencodec:"required"` - BlockNumber *hexutil.Uint64 `json:"blockNumber"` - TxHash *common.Hash `json:"transactionHash" gencodec:"required"` - TxIndex *hexutil.Uint `json:"transactionIndex"` - BlockHash *common.Hash `json:"blockHash"` - Index *hexutil.Uint `json:"logIndex"` - Removed *bool `json:"removed"` + BlockNumber *hexutil.Uint64 `json:"blockNumber" rlp:"-"` + TxHash *common.Hash `json:"transactionHash" gencodec:"required" rlp:"-"` + TxIndex *hexutil.Uint `json:"transactionIndex" rlp:"-"` + BlockHash *common.Hash `json:"blockHash" rlp:"-"` + Index *hexutil.Uint `json:"logIndex" rlp:"-"` + Removed *bool `json:"removed" rlp:"-"` } var dec Log if err := json.Unmarshal(input, &dec); err != nil { diff --git a/core/types/gen_log_rlp.go b/core/types/gen_log_rlp.go index 4a6c6b009..cbdb6736e 100644 --- a/core/types/gen_log_rlp.go +++ b/core/types/gen_log_rlp.go @@ -8,7 +8,7 @@ package types import "github.com/ethereum/go-ethereum/rlp" import "io" -func (obj *rlpLog) EncodeRLP(_w io.Writer) error { +func (obj *Log) EncodeRLP(_w io.Writer) error { w := rlp.NewEncoderBuffer(_w) _tmp0 := w.List() w.WriteBytes(obj.Address[:]) diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go index d83be1447..4c641a972 100644 --- a/core/types/gen_receipt_json.go +++ b/core/types/gen_receipt_json.go @@ -26,6 +26,8 @@ func (r Receipt) MarshalJSON() ([]byte, error) { ContractAddress common.Address `json:"contractAddress"` GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"` + BlobGasUsed hexutil.Uint64 `json:"blobGasUsed,omitempty"` + BlobGasPrice *hexutil.Big `json:"blobGasPrice,omitempty"` BlockHash common.Hash `json:"blockHash,omitempty"` BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` TransactionIndex hexutil.Uint `json:"transactionIndex"` @@ -41,6 +43,8 @@ func (r Receipt) MarshalJSON() ([]byte, error) { enc.ContractAddress = r.ContractAddress enc.GasUsed = hexutil.Uint64(r.GasUsed) enc.EffectiveGasPrice = (*hexutil.Big)(r.EffectiveGasPrice) + enc.BlobGasUsed = hexutil.Uint64(r.BlobGasUsed) + enc.BlobGasPrice = (*hexutil.Big)(r.BlobGasPrice) enc.BlockHash = r.BlockHash enc.BlockNumber = (*hexutil.Big)(r.BlockNumber) enc.TransactionIndex = hexutil.Uint(r.TransactionIndex) @@ -60,6 +64,8 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { ContractAddress *common.Address `json:"contractAddress"` GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed,omitempty"` + BlobGasPrice *hexutil.Big `json:"blobGasPrice,omitempty"` BlockHash *common.Hash `json:"blockHash,omitempty"` BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` TransactionIndex *hexutil.Uint `json:"transactionIndex"` @@ -103,6 +109,12 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { if dec.EffectiveGasPrice != nil { r.EffectiveGasPrice = (*big.Int)(dec.EffectiveGasPrice) } + if dec.BlobGasUsed != nil { + r.BlobGasUsed = uint64(*dec.BlobGasUsed) + } + if dec.BlobGasPrice != nil { + r.BlobGasPrice = (*big.Int)(dec.BlobGasPrice) + } if dec.BlockHash != nil { r.BlockHash = *dec.BlockHash } diff --git a/core/types/hashes.go b/core/types/hashes.go index 3bad430be..3a787aa13 100644 --- a/core/types/hashes.go +++ b/core/types/hashes.go @@ -19,6 +19,7 @@ package types import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" ) var ( @@ -40,3 +41,13 @@ var ( // EmptyWithdrawalsHash is the known hash of the empty withdrawal set. EmptyWithdrawalsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") ) + +// TrieRootHash returns the hash itself if it's non-empty or the predefined +// emptyHash one instead. +func TrieRootHash(hash common.Hash) common.Hash { + if hash == (common.Hash{}) { + log.Error("Zero trie root hash!") + return EmptyRootHash + } + return hash +} diff --git a/core/types/hashing.go b/core/types/hashing.go index fbdeaf0d0..9a6a80ac5 100644 --- a/core/types/hashing.go +++ b/core/types/hashing.go @@ -18,6 +18,8 @@ package types import ( "bytes" + "fmt" + "math" "sync" "github.com/ethereum/go-ethereum/common" @@ -36,6 +38,22 @@ var encodeBufferPool = sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } +// getPooledBuffer retrieves a buffer from the pool and creates a byte slice of the +// requested size from it. +// +// The caller should return the *bytes.Buffer object back into encodeBufferPool after use! +// The returned byte slice must not be used after returning the buffer. +func getPooledBuffer(size uint64) ([]byte, *bytes.Buffer, error) { + if size > math.MaxInt { + return nil, nil, fmt.Errorf("can't get buffer of size %d", size) + } + buf := encodeBufferPool.Get().(*bytes.Buffer) + buf.Reset() + buf.Grow(int(size)) + b := buf.Bytes()[:int(size)] + return b, buf, nil +} + // rlpHash encodes x and hashes the encoded bytes. func rlpHash(x interface{}) (h common.Hash) { sha := hasherPool.Get().(crypto.KeccakState) diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go index c5b9f690d..d2a98ed7b 100644 --- a/core/types/hashing_test.go +++ b/core/types/hashing_test.go @@ -39,7 +39,7 @@ func TestDeriveSha(t *testing.T) { t.Fatal(err) } for len(txs) < 1000 { - exp := types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))) + exp := types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) got := types.DeriveSha(txs, trie.NewStackTrie(nil)) if !bytes.Equal(got[:], exp[:]) { t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp) @@ -86,7 +86,7 @@ func BenchmarkDeriveSha200(b *testing.B) { b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { - exp = types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))) + exp = types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) } }) @@ -107,7 +107,7 @@ func TestFuzzDeriveSha(t *testing.T) { rndSeed := mrand.Int() for i := 0; i < 10; i++ { seed := rndSeed + i - exp := types.DeriveSha(newDummy(i), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))) + exp := types.DeriveSha(newDummy(i), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil)) if !bytes.Equal(got[:], exp[:]) { printList(newDummy(seed)) @@ -135,7 +135,7 @@ func TestDerivableList(t *testing.T) { }, } for i, tc := range tcs[1:] { - exp := types.DeriveSha(flatList(tc), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))) + exp := types.DeriveSha(flatList(tc), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil)) if !bytes.Equal(got[:], exp[:]) { t.Fatalf("case %d: got %x exp %x", i, got, exp) diff --git a/core/types/log.go b/core/types/log.go index e48919136..54c7ff637 100644 --- a/core/types/log.go +++ b/core/types/log.go @@ -17,13 +17,11 @@ package types import ( - "io" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/rlp" ) +//go:generate go run ../../rlp/rlpgen -type Log -out gen_log_rlp.go //go:generate go run github.com/fjl/gencodec -type Log -field-override logMarshaling -out gen_log_json.go // Log represents a contract log event. These events are generated by the LOG opcode and @@ -40,19 +38,19 @@ type Log struct { // Derived fields. These fields are filled in by the node // but not secured by consensus. // block in which the transaction was included - BlockNumber uint64 `json:"blockNumber"` + BlockNumber uint64 `json:"blockNumber" rlp:"-"` // hash of the transaction - TxHash common.Hash `json:"transactionHash" gencodec:"required"` + TxHash common.Hash `json:"transactionHash" gencodec:"required" rlp:"-"` // index of the transaction in the block - TxIndex uint `json:"transactionIndex"` + TxIndex uint `json:"transactionIndex" rlp:"-"` // hash of the block in which the transaction was included - BlockHash common.Hash `json:"blockHash"` + BlockHash common.Hash `json:"blockHash" rlp:"-"` // index of the log in the block - Index uint `json:"logIndex"` + Index uint `json:"logIndex" rlp:"-"` // The Removed field is true if this log was reverted due to a chain reorganisation. // You must pay attention to this field if you receive logs through a filter query. - Removed bool `json:"removed"` + Removed bool `json:"removed" rlp:"-"` } type logMarshaling struct { @@ -61,28 +59,3 @@ type logMarshaling struct { TxIndex hexutil.Uint Index hexutil.Uint } - -//go:generate go run ../../rlp/rlpgen -type rlpLog -out gen_log_rlp.go - -// rlpLog is used to RLP-encode both the consensus and storage formats. -type rlpLog struct { - Address common.Address - Topics []common.Hash - Data []byte -} - -// EncodeRLP implements rlp.Encoder. -func (l *Log) EncodeRLP(w io.Writer) error { - rl := rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data} - return rlp.Encode(w, &rl) -} - -// DecodeRLP implements rlp.Decoder. -func (l *Log) DecodeRLP(s *rlp.Stream) error { - var dec rlpLog - err := s.Decode(&dec) - if err == nil { - l.Address, l.Topics, l.Data = dec.Address, dec.Topics, dec.Data - } - return err -} diff --git a/core/types/log_test.go b/core/types/log_test.go index 0e56acfe4..02eef3ecd 100644 --- a/core/types/log_test.go +++ b/core/types/log_test.go @@ -18,7 +18,7 @@ package types import ( "encoding/json" - "fmt" + "errors" "reflect" "testing" @@ -97,7 +97,7 @@ var unmarshalLogTests = map[string]struct { }, "missing data": { input: `{"address":"0xecf8f87f810ecf450940c9f60066b4a7a501d6a7","blockHash":"0x656c34545f90a730a19008c0e7a7cd4fb3895064b48d6d69761bd5abad681056","blockNumber":"0x1ecfa4","logIndex":"0x2","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x00000000000000000000000080b2c9d7cbbf30a1b0fc8983c647d754c6525615","0x000000000000000000000000f9dff387dcb5cc4cca5b91adb07a95f54e9f1bb6"],"transactionHash":"0x3b198bfd5d2907285af009e9ae84a0ecd63677110d89d7e030251acb87f6487e","transactionIndex":"0x3"}`, - wantError: fmt.Errorf("missing required field 'data' for Log"), + wantError: errors.New("missing required field 'data' for Log"), }, } diff --git a/core/types/receipt.go b/core/types/receipt.go index 8fc9ec989..4f96fde59 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -63,6 +63,8 @@ type Receipt struct { ContractAddress common.Address `json:"contractAddress"` GasUsed uint64 `json:"gasUsed" gencodec:"required"` EffectiveGasPrice *big.Int `json:"effectiveGasPrice"` // required, but tag omitted for backwards compatibility + BlobGasUsed uint64 `json:"blobGasUsed,omitempty"` + BlobGasPrice *big.Int `json:"blobGasPrice,omitempty"` // Inclusion information: These fields provide information about the inclusion of the // transaction corresponding to this receipt. @@ -78,6 +80,8 @@ type receiptMarshaling struct { CumulativeGasUsed hexutil.Uint64 GasUsed hexutil.Uint64 EffectiveGasPrice *hexutil.Big + BlobGasUsed hexutil.Uint64 + BlobGasPrice *hexutil.Big BlockNumber *hexutil.Big TransactionIndex hexutil.Uint } @@ -149,7 +153,7 @@ func (r *Receipt) MarshalBinary() ([]byte, error) { // DecodeRLP implements rlp.Decoder, and loads the consensus fields of a receipt // from an RLP stream. func (r *Receipt) DecodeRLP(s *rlp.Stream) error { - kind, _, err := s.Kind() + kind, size, err := s.Kind() switch { case err != nil: return err @@ -161,12 +165,18 @@ func (r *Receipt) DecodeRLP(s *rlp.Stream) error { } r.Type = LegacyTxType return r.setFromRLP(dec) + case kind == rlp.Byte: + return errShortTypedReceipt default: // It's an EIP-2718 typed tx receipt. - b, err := s.Bytes() + b, buf, err := getPooledBuffer(size) if err != nil { return err } + defer encodeBufferPool.Put(buf) + if err := s.ReadBytes(b); err != nil { + return err + } return r.decodeTyped(b) } } @@ -194,7 +204,7 @@ func (r *Receipt) decodeTyped(b []byte) error { return errShortTypedReceipt } switch b[0] { - case DynamicFeeTxType, AccessListTxType: + case DynamicFeeTxType, AccessListTxType, BlobTxType: var data receiptRLP err := rlp.DecodeBytes(b[1:], &data) if err != nil { @@ -260,7 +270,7 @@ func (r *ReceiptForStorage) EncodeRLP(_w io.Writer) error { w.WriteUint64(r.CumulativeGasUsed) logList := w.List() for _, log := range r.Logs { - if err := rlp.Encode(w, log); err != nil { + if err := log.EncodeRLP(w); err != nil { return err } } @@ -296,14 +306,13 @@ func (rs Receipts) Len() int { return len(rs) } func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) { r := rs[i] data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs} + if r.Type == LegacyTxType { + rlp.Encode(w, data) + return + } + w.WriteByte(r.Type) switch r.Type { - case LegacyTxType: - rlp.Encode(w, data) - case AccessListTxType: - w.WriteByte(AccessListTxType) - rlp.Encode(w, data) - case DynamicFeeTxType: - w.WriteByte(DynamicFeeTxType) + case AccessListTxType, DynamicFeeTxType, BlobTxType: rlp.Encode(w, data) default: // For unsupported types, write nothing. Since this is for @@ -314,7 +323,7 @@ func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) { // DeriveFields fills the receipts with their computed fields based on consensus // data and contextual infos like containing block and transactions. -func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, time uint64, baseFee *big.Int, txs []*Transaction) error { +func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, time uint64, baseFee *big.Int, blobGasPrice *big.Int, txs []*Transaction) error { signer := MakeSigner(config, new(big.Int).SetUint64(number), time) logIndex := uint(0) @@ -325,9 +334,14 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, nu // The transaction type and hash can be retrieved from the transaction itself rs[i].Type = txs[i].Type() rs[i].TxHash = txs[i].Hash() - rs[i].EffectiveGasPrice = txs[i].inner.effectiveGasPrice(new(big.Int), baseFee) + // EIP-4844 blob transaction fields + if txs[i].Type() == BlobTxType { + rs[i].BlobGasUsed = txs[i].BlobGas() + rs[i].BlobGasPrice = blobGasPrice + } + // block location fields rs[i].BlockHash = hash rs[i].BlockNumber = new(big.Int).SetUint64(number) diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go index 168f36208..a7b264447 100644 --- a/core/types/receipt_test.go +++ b/core/types/receipt_test.go @@ -130,22 +130,24 @@ var ( }), // EIP-4844 transactions. NewTx(&BlobTx{ - To: &to6, + To: to6, Nonce: 6, Value: uint256.NewInt(6), Gas: 6, GasTipCap: uint256.NewInt(66), GasFeeCap: uint256.NewInt(1066), BlobFeeCap: uint256.NewInt(100066), + BlobHashes: []common.Hash{{}}, }), NewTx(&BlobTx{ - To: &to7, + To: to7, Nonce: 7, Value: uint256.NewInt(7), Gas: 7, GasTipCap: uint256.NewInt(77), GasFeeCap: uint256.NewInt(1077), BlobFeeCap: uint256.NewInt(100077), + BlobHashes: []common.Hash{{}, {}, {}}, }), } @@ -270,6 +272,8 @@ var ( TxHash: txs[5].Hash(), GasUsed: 6, EffectiveGasPrice: big.NewInt(1066), + BlobGasUsed: params.BlobTxBlobGasPerBlob, + BlobGasPrice: big.NewInt(920), BlockHash: blockHash, BlockNumber: blockNumber, TransactionIndex: 5, @@ -283,6 +287,8 @@ var ( TxHash: txs[6].Hash(), GasUsed: 7, EffectiveGasPrice: big.NewInt(1077), + BlobGasUsed: 3 * params.BlobTxBlobGasPerBlob, + BlobGasPrice: big.NewInt(920), BlockHash: blockHash, BlockNumber: blockNumber, TransactionIndex: 6, @@ -303,8 +309,9 @@ func TestDecodeEmptyTypedReceipt(t *testing.T) { func TestDeriveFields(t *testing.T) { // Re-derive receipts. basefee := big.NewInt(1000) + blobGasPrice := big.NewInt(920) derivedReceipts := clearComputedFieldsOnReceipts(receipts) - err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, basefee, txs) + err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, basefee, blobGasPrice, txs) if err != nil { t.Fatalf("DeriveFields(...) = %v, want ", err) } @@ -501,6 +508,9 @@ func clearComputedFieldsOnReceipt(receipt *Receipt) *Receipt { cpy.ContractAddress = common.Address{0xff, 0xff, 0x33} cpy.GasUsed = 0xffffffff cpy.Logs = clearComputedFieldsOnLogs(receipt.Logs) + cpy.EffectiveGasPrice = big.NewInt(0) + cpy.BlobGasUsed = 0 + cpy.BlobGasPrice = nil return &cpy } diff --git a/core/types/state_account.go b/core/types/state_account.go index 3b01be451..314f4943e 100644 --- a/core/types/state_account.go +++ b/core/types/state_account.go @@ -17,9 +17,11 @@ package types import ( + "bytes" "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" ) //go:generate go run ../../rlp/rlpgen -type StateAccount -out gen_account_rlp.go @@ -32,3 +34,88 @@ type StateAccount struct { Root common.Hash // merkle root of the storage trie CodeHash []byte } + +// NewEmptyStateAccount constructs an empty state account. +func NewEmptyStateAccount() *StateAccount { + return &StateAccount{ + Balance: new(big.Int), + Root: EmptyRootHash, + CodeHash: EmptyCodeHash.Bytes(), + } +} + +// Copy returns a deep-copied state account object. +func (acct *StateAccount) Copy() *StateAccount { + var balance *big.Int + if acct.Balance != nil { + balance = new(big.Int).Set(acct.Balance) + } + return &StateAccount{ + Nonce: acct.Nonce, + Balance: balance, + Root: acct.Root, + CodeHash: common.CopyBytes(acct.CodeHash), + } +} + +// SlimAccount is a modified version of an Account, where the root is replaced +// with a byte slice. This format can be used to represent full-consensus format +// or slim format which replaces the empty root and code hash as nil byte slice. +type SlimAccount struct { + Nonce uint64 + Balance *big.Int + Root []byte // Nil if root equals to types.EmptyRootHash + CodeHash []byte // Nil if hash equals to types.EmptyCodeHash +} + +// SlimAccountRLP encodes the state account in 'slim RLP' format. +func SlimAccountRLP(account StateAccount) []byte { + slim := SlimAccount{ + Nonce: account.Nonce, + Balance: account.Balance, + } + if account.Root != EmptyRootHash { + slim.Root = account.Root[:] + } + if !bytes.Equal(account.CodeHash, EmptyCodeHash[:]) { + slim.CodeHash = account.CodeHash + } + data, err := rlp.EncodeToBytes(slim) + if err != nil { + panic(err) + } + return data +} + +// FullAccount decodes the data on the 'slim RLP' format and return +// the consensus format account. +func FullAccount(data []byte) (*StateAccount, error) { + var slim SlimAccount + if err := rlp.DecodeBytes(data, &slim); err != nil { + return nil, err + } + var account StateAccount + account.Nonce, account.Balance = slim.Nonce, slim.Balance + + // Interpret the storage root and code hash in slim format. + if len(slim.Root) == 0 { + account.Root = EmptyRootHash + } else { + account.Root = common.BytesToHash(slim.Root) + } + if len(slim.CodeHash) == 0 { + account.CodeHash = EmptyCodeHash[:] + } else { + account.CodeHash = slim.CodeHash + } + return &account, nil +} + +// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format. +func FullAccountRLP(data []byte) ([]byte, error) { + account, err := FullAccount(data) + if err != nil { + return nil, err + } + return rlp.EncodeToBytes(account) +} diff --git a/core/types/transaction.go b/core/types/transaction.go index b7cb36b60..78a1b9ba6 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -18,7 +18,6 @@ package types import ( "bytes" - "container/heap" "errors" "io" "math/big" @@ -83,9 +82,6 @@ type TxData interface { value() *big.Int nonce() uint64 to() *common.Address - blobGas() uint64 - blobGasFeeCap() *big.Int - blobHashes() []common.Hash rawSignatureValues() (v, r, s *big.Int) setSignatureValues(chainID, v, r, s *big.Int) @@ -97,6 +93,9 @@ type TxData interface { // copy of the computed value, i.e. callers are allowed to mutate the result. // Method implementations can use 'dst' to store the result. effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int + + encode(*bytes.Buffer) error + decode([]byte) error } // EncodeRLP implements rlp.Encoder @@ -117,7 +116,7 @@ func (tx *Transaction) EncodeRLP(w io.Writer) error { // encodeTyped writes the canonical encoding of a typed transaction to w. func (tx *Transaction) encodeTyped(w *bytes.Buffer) error { w.WriteByte(tx.Type()) - return rlp.Encode(w, tx.inner) + return tx.inner.encode(w) } // MarshalBinary returns the canonical encoding of the transaction. @@ -146,15 +145,23 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error { tx.setDecoded(&inner, rlp.ListSize(size)) } return err + case kind == rlp.Byte: + return errShortTypedTx default: // It's an EIP-2718 typed TX envelope. - var b []byte - if b, err = s.Bytes(); err != nil { + // First read the tx payload bytes into a temporary buffer. + b, buf, err := getPooledBuffer(size) + if err != nil { return err } + defer encodeBufferPool.Put(buf) + if err := s.ReadBytes(b); err != nil { + return err + } + // Now decode the inner transaction. inner, err := tx.decodeTyped(b) if err == nil { - tx.setDecoded(inner, uint64(len(b))) + tx.setDecoded(inner, size) } return err } @@ -187,22 +194,19 @@ func (tx *Transaction) decodeTyped(b []byte) (TxData, error) { if len(b) <= 1 { return nil, errShortTypedTx } + var inner TxData switch b[0] { case AccessListTxType: - var inner AccessListTx - err := rlp.DecodeBytes(b[1:], &inner) - return &inner, err + inner = new(AccessListTx) case DynamicFeeTxType: - var inner DynamicFeeTx - err := rlp.DecodeBytes(b[1:], &inner) - return &inner, err + inner = new(DynamicFeeTx) case BlobTxType: - var inner BlobTx - err := rlp.DecodeBytes(b[1:], &inner) - return &inner, err + inner = new(BlobTx) default: return nil, ErrTxTypeNotSupported } + err := inner.decode(b[1:]) + return inner, err } // setDecoded sets the inner transaction and size after decoding. @@ -289,15 +293,6 @@ func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.g // GasFeeCap returns the fee cap per gas of the transaction. func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) } -// BlobGas returns the data gas limit of the transaction for blob transactions, 0 otherwise. -func (tx *Transaction) BlobGas() uint64 { return tx.inner.blobGas() } - -// BlobGasFeeCap returns the data gas fee cap per data gas of the transaction for blob transactions, nil otherwise. -func (tx *Transaction) BlobGasFeeCap() *big.Int { return tx.inner.blobGasFeeCap() } - -// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise. -func (tx *Transaction) BlobHashes() []common.Hash { return tx.inner.blobHashes() } - // Value returns the ether amount of the transaction. func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) } @@ -384,14 +379,79 @@ func (tx *Transaction) EffectiveGasTipIntCmp(other *big.Int, baseFee *big.Int) i return tx.EffectiveGasTipValue(baseFee).Cmp(other) } +// BlobGas returns the blob gas limit of the transaction for blob transactions, 0 otherwise. +func (tx *Transaction) BlobGas() uint64 { + if blobtx, ok := tx.inner.(*BlobTx); ok { + return blobtx.blobGas() + } + return 0 +} + +// BlobGasFeeCap returns the blob gas fee cap per blob gas of the transaction for blob transactions, nil otherwise. +func (tx *Transaction) BlobGasFeeCap() *big.Int { + if blobtx, ok := tx.inner.(*BlobTx); ok { + return blobtx.BlobFeeCap.ToBig() + } + return nil +} + +// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise. +func (tx *Transaction) BlobHashes() []common.Hash { + if blobtx, ok := tx.inner.(*BlobTx); ok { + return blobtx.BlobHashes + } + return nil +} + +// BlobTxSidecar returns the sidecar of a blob transaction, nil otherwise. +func (tx *Transaction) BlobTxSidecar() *BlobTxSidecar { + if blobtx, ok := tx.inner.(*BlobTx); ok { + return blobtx.Sidecar + } + return nil +} + // BlobGasFeeCapCmp compares the blob fee cap of two transactions. func (tx *Transaction) BlobGasFeeCapCmp(other *Transaction) int { - return tx.inner.blobGasFeeCap().Cmp(other.inner.blobGasFeeCap()) + return tx.BlobGasFeeCap().Cmp(other.BlobGasFeeCap()) } // BlobGasFeeCapIntCmp compares the blob fee cap of the transaction against the given blob fee cap. func (tx *Transaction) BlobGasFeeCapIntCmp(other *big.Int) int { - return tx.inner.blobGasFeeCap().Cmp(other) + return tx.BlobGasFeeCap().Cmp(other) +} + +// WithoutBlobTxSidecar returns a copy of tx with the blob sidecar removed. +func (tx *Transaction) WithoutBlobTxSidecar() *Transaction { + blobtx, ok := tx.inner.(*BlobTx) + if !ok { + return tx + } + cpy := &Transaction{ + inner: blobtx.withoutSidecar(), + time: tx.time, + } + // Note: tx.size cache not carried over because the sidecar is included in size! + if h := tx.hash.Load(); h != nil { + cpy.hash.Store(h) + } + if f := tx.from.Load(); f != nil { + cpy.from.Store(f) + } + return cpy +} + +// SetTime sets the decoding time of a transaction. This is used by tests to set +// arbitrary times and by persistent transaction pools when loading old txs from +// disk. +func (tx *Transaction) SetTime(t time.Time) { + tx.time = t +} + +// Time returns the time when the transaction was first seen on the network. It +// is a heuristic to prefer mining older txs vs new all other things equal. +func (tx *Transaction) Time() time.Time { + return tx.time } // Hash returns the transaction hash. @@ -416,13 +476,24 @@ func (tx *Transaction) Size() uint64 { if size := tx.size.Load(); size != nil { return size.(uint64) } + + // Cache miss, encode and cache. + // Note we rely on the assumption that all tx.inner values are RLP-encoded! c := writeCounter(0) rlp.Encode(&c, &tx.inner) - size := uint64(c) - if tx.Type() != LegacyTxType { - size += 1 // type byte + + // For blob transactions, add the size of the blob content and the outer list of the + // tx + sidecar encoding. + if sc := tx.BlobTxSidecar(); sc != nil { + size += rlp.ListSize(sc.encodedSize()) } + + // For typed transactions, the encoding also includes the leading type byte. + if tx.Type() != LegacyTxType { + size += 1 + } + tx.size.Store(size) return size } @@ -502,123 +573,6 @@ func (s TxByNonce) Len() int { return len(s) } func (s TxByNonce) Less(i, j int) bool { return s[i].Nonce() < s[j].Nonce() } func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -// TxWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap -type TxWithMinerFee struct { - tx *Transaction - minerFee *big.Int -} - -// NewTxWithMinerFee creates a wrapped transaction, calculating the effective -// miner gasTipCap if a base fee is provided. -// Returns error in case of a negative effective miner gasTipCap. -func NewTxWithMinerFee(tx *Transaction, baseFee *big.Int) (*TxWithMinerFee, error) { - minerFee, err := tx.EffectiveGasTip(baseFee) - if err != nil { - return nil, err - } - return &TxWithMinerFee{ - tx: tx, - minerFee: minerFee, - }, nil -} - -// TxByPriceAndTime implements both the sort and the heap interface, making it useful -// for all at once sorting as well as individually adding and removing elements. -type TxByPriceAndTime []*TxWithMinerFee - -func (s TxByPriceAndTime) Len() int { return len(s) } -func (s TxByPriceAndTime) Less(i, j int) bool { - // If the prices are equal, use the time the transaction was first seen for - // deterministic sorting - cmp := s[i].minerFee.Cmp(s[j].minerFee) - if cmp == 0 { - return s[i].tx.time.Before(s[j].tx.time) - } - return cmp > 0 -} -func (s TxByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s *TxByPriceAndTime) Push(x interface{}) { - *s = append(*s, x.(*TxWithMinerFee)) -} - -func (s *TxByPriceAndTime) Pop() interface{} { - old := *s - n := len(old) - x := old[n-1] - old[n-1] = nil - *s = old[0 : n-1] - return x -} - -// TransactionsByPriceAndNonce represents a set of transactions that can return -// transactions in a profit-maximizing sorted order, while supporting removing -// entire batches of transactions for non-executable accounts. -type TransactionsByPriceAndNonce struct { - txs map[common.Address]Transactions // Per account nonce-sorted list of transactions - heads TxByPriceAndTime // Next transaction for each unique account (price heap) - signer Signer // Signer for the set of transactions - baseFee *big.Int // Current base fee -} - -// NewTransactionsByPriceAndNonce creates a transaction set that can retrieve -// price sorted transactions in a nonce-honouring way. -// -// Note, the input map is reowned so the caller should not interact any more with -// if after providing it to the constructor. -func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *big.Int) *TransactionsByPriceAndNonce { - // Initialize a price and received time based heap with the head transactions - heads := make(TxByPriceAndTime, 0, len(txs)) - for from, accTxs := range txs { - acc, _ := Sender(signer, accTxs[0]) - wrapped, err := NewTxWithMinerFee(accTxs[0], baseFee) - // Remove transaction if sender doesn't match from, or if wrapping fails. - if acc != from || err != nil { - delete(txs, from) - continue - } - heads = append(heads, wrapped) - txs[from] = accTxs[1:] - } - heap.Init(&heads) - - // Assemble and return the transaction set - return &TransactionsByPriceAndNonce{ - txs: txs, - heads: heads, - signer: signer, - baseFee: baseFee, - } -} - -// Peek returns the next transaction by price. -func (t *TransactionsByPriceAndNonce) Peek() *Transaction { - if len(t.heads) == 0 { - return nil - } - return t.heads[0].tx -} - -// Shift replaces the current best head with the next one from the same account. -func (t *TransactionsByPriceAndNonce) Shift() { - acc, _ := Sender(t.signer, t.heads[0].tx) - if txs, ok := t.txs[acc]; ok && len(txs) > 0 { - if wrapped, err := NewTxWithMinerFee(txs[0], t.baseFee); err == nil { - t.heads[0], t.txs[acc] = wrapped, txs[1:] - heap.Fix(&t.heads, 0) - return - } - } - heap.Pop(&t.heads) -} - -// Pop removes the best transaction, *not* replacing it with the next one from -// the same account. This should be used when a transaction cannot be executed -// and hence all subsequent ones should be discarded from the same account. -func (t *TransactionsByPriceAndNonce) Pop() { - heap.Pop(&t.heads) -} - // copyAddressPtr copies an address. func copyAddressPtr(a *common.Address) *common.Address { if a == nil { diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go index 6c6c50d49..e5d71a85d 100644 --- a/core/types/transaction_marshalling.go +++ b/core/types/transaction_marshalling.go @@ -37,7 +37,7 @@ type txJSON struct { GasPrice *hexutil.Big `json:"gasPrice"` MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"` MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"` - MaxFeePerDataGas *hexutil.Big `json:"maxFeePerDataGas,omitempty"` + MaxFeePerBlobGas *hexutil.Big `json:"maxFeePerBlobGas,omitempty"` Value *hexutil.Big `json:"value"` Input *hexutil.Bytes `json:"input"` AccessList *AccessList `json:"accessList,omitempty"` @@ -45,11 +45,32 @@ type txJSON struct { V *hexutil.Big `json:"v"` R *hexutil.Big `json:"r"` S *hexutil.Big `json:"s"` + YParity *hexutil.Uint64 `json:"yParity,omitempty"` // Only used for encoding: Hash common.Hash `json:"hash"` } +// yParityValue returns the YParity value from JSON. For backwards-compatibility reasons, +// this can be given in the 'v' field or the 'yParity' field. If both exist, they must match. +func (tx *txJSON) yParityValue() (*big.Int, error) { + if tx.YParity != nil { + val := uint64(*tx.YParity) + if val != 0 && val != 1 { + return nil, errors.New("'yParity' field must be 0 or 1") + } + bigval := new(big.Int).SetUint64(val) + if tx.V != nil && tx.V.ToInt().Cmp(bigval) != 0 { + return nil, errors.New("'v' and 'yParity' fields do not match") + } + return bigval, nil + } + if tx.V != nil { + return tx.V.ToInt(), nil + } + return nil, errors.New("missing 'yParity' or 'v' field in transaction") +} + // MarshalJSON marshals as JSON with a hash. func (tx *Transaction) MarshalJSON() ([]byte, error) { var enc txJSON @@ -69,6 +90,9 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { enc.V = (*hexutil.Big)(itx.V) enc.R = (*hexutil.Big)(itx.R) enc.S = (*hexutil.Big)(itx.S) + if tx.Protected() { + enc.ChainID = (*hexutil.Big)(tx.ChainId()) + } case *AccessListTx: enc.ChainID = (*hexutil.Big)(itx.ChainID) @@ -82,6 +106,8 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { enc.V = (*hexutil.Big)(itx.V) enc.R = (*hexutil.Big)(itx.R) enc.S = (*hexutil.Big)(itx.S) + yparity := itx.V.Uint64() + enc.YParity = (*hexutil.Uint64)(&yparity) case *DynamicFeeTx: enc.ChainID = (*hexutil.Big)(itx.ChainID) @@ -96,6 +122,8 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { enc.V = (*hexutil.Big)(itx.V) enc.R = (*hexutil.Big)(itx.R) enc.S = (*hexutil.Big)(itx.S) + yparity := itx.V.Uint64() + enc.YParity = (*hexutil.Uint64)(&yparity) case *BlobTx: enc.ChainID = (*hexutil.Big)(itx.ChainID.ToBig()) @@ -103,7 +131,7 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { enc.Gas = (*hexutil.Uint64)(&itx.Gas) enc.MaxFeePerGas = (*hexutil.Big)(itx.GasFeeCap.ToBig()) enc.MaxPriorityFeePerGas = (*hexutil.Big)(itx.GasTipCap.ToBig()) - enc.MaxFeePerDataGas = (*hexutil.Big)(itx.BlobFeeCap.ToBig()) + enc.MaxFeePerBlobGas = (*hexutil.Big)(itx.BlobFeeCap.ToBig()) enc.Value = (*hexutil.Big)(itx.Value.ToBig()) enc.Input = (*hexutil.Bytes)(&itx.Data) enc.AccessList = &itx.AccessList @@ -112,6 +140,8 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { enc.V = (*hexutil.Big)(itx.V.ToBig()) enc.R = (*hexutil.Big)(itx.R.ToBig()) enc.S = (*hexutil.Big)(itx.S.ToBig()) + yparity := itx.V.Uint64() + enc.YParity = (*hexutil.Uint64)(&yparity) } return json.Marshal(&enc) } @@ -119,7 +149,8 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals from JSON. func (tx *Transaction) UnmarshalJSON(input []byte) error { var dec txJSON - if err := json.Unmarshal(input, &dec); err != nil { + err := json.Unmarshal(input, &dec) + if err != nil { return err } @@ -152,20 +183,23 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'input' in transaction") } itx.Data = *dec.Input - if dec.V == nil { - return errors.New("missing required field 'v' in transaction") - } - itx.V = (*big.Int)(dec.V) + + // signature R if dec.R == nil { return errors.New("missing required field 'r' in transaction") } itx.R = (*big.Int)(dec.R) + // signature S if dec.S == nil { return errors.New("missing required field 's' in transaction") } itx.S = (*big.Int)(dec.S) - withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 - if withSignature { + // signature V + if dec.V == nil { + return errors.New("missing required field 'v' in transaction") + } + itx.V = (*big.Int)(dec.V) + if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 { if err := sanityCheckSignature(itx.V, itx.R, itx.S, true); err != nil { return err } @@ -201,23 +235,26 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'input' in transaction") } itx.Data = *dec.Input - if dec.V == nil { - return errors.New("missing required field 'v' in transaction") - } if dec.AccessList != nil { itx.AccessList = *dec.AccessList } - itx.V = (*big.Int)(dec.V) + + // signature R if dec.R == nil { return errors.New("missing required field 'r' in transaction") } itx.R = (*big.Int)(dec.R) + // signature S if dec.S == nil { return errors.New("missing required field 's' in transaction") } itx.S = (*big.Int)(dec.S) - withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 - if withSignature { + // signature V + itx.V, err = dec.yParityValue() + if err != nil { + return err + } + if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 { if err := sanityCheckSignature(itx.V, itx.R, itx.S, false); err != nil { return err } @@ -263,17 +300,23 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { if dec.AccessList != nil { itx.AccessList = *dec.AccessList } - itx.V = (*big.Int)(dec.V) + + // signature R if dec.R == nil { return errors.New("missing required field 'r' in transaction") } itx.R = (*big.Int)(dec.R) + // signature S if dec.S == nil { return errors.New("missing required field 's' in transaction") } itx.S = (*big.Int)(dec.S) - withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 - if withSignature { + // signature V + itx.V, err = dec.yParityValue() + if err != nil { + return err + } + if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 { if err := sanityCheckSignature(itx.V, itx.R, itx.S, false); err != nil { return err } @@ -290,9 +333,10 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'nonce' in transaction") } itx.Nonce = uint64(*dec.Nonce) - if dec.To != nil { - itx.To = dec.To + if dec.To == nil { + return errors.New("missing required field 'to' in transaction") } + itx.To = *dec.To if dec.Gas == nil { return errors.New("missing required field 'gas' for txdata") } @@ -305,10 +349,10 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'maxFeePerGas' for txdata") } itx.GasFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerGas)) - if dec.MaxFeePerDataGas == nil { - return errors.New("missing required field 'maxFeePerDataGas' for txdata") + if dec.MaxFeePerBlobGas == nil { + return errors.New("missing required field 'maxFeePerBlobGas' for txdata") } - itx.BlobFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerDataGas)) + itx.BlobFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerBlobGas)) if dec.Value == nil { return errors.New("missing required field 'value' in transaction") } @@ -327,18 +371,35 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'blobVersionedHashes' in transaction") } itx.BlobHashes = dec.BlobVersionedHashes - itx.V = uint256.MustFromBig((*big.Int)(dec.V)) + + // signature R + var overflow bool if dec.R == nil { return errors.New("missing required field 'r' in transaction") } - itx.R = uint256.MustFromBig((*big.Int)(dec.R)) + itx.R, overflow = uint256.FromBig((*big.Int)(dec.R)) + if overflow { + return errors.New("'r' value overflows uint256") + } + // signature S if dec.S == nil { return errors.New("missing required field 's' in transaction") } - itx.S = uint256.MustFromBig((*big.Int)(dec.S)) - withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 - if withSignature { - if err := sanityCheckSignature(itx.V.ToBig(), itx.R.ToBig(), itx.S.ToBig(), false); err != nil { + itx.S, overflow = uint256.FromBig((*big.Int)(dec.S)) + if overflow { + return errors.New("'s' value overflows uint256") + } + // signature V + vbig, err := dec.yParityValue() + if err != nil { + return err + } + itx.V, overflow = uint256.FromBig(vbig) + if overflow { + return errors.New("'v' value overflows uint256") + } + if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 { + if err := sanityCheckSignature(vbig, itx.R.ToBig(), itx.S.ToBig(), false); err != nil { return err } } diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index 59dd2e76e..cd57effcb 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -57,9 +57,9 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint } // LatestSigner returns the 'most permissive' Signer available for the given chain -// configuration. Specifically, this enables support of EIP-155 replay protection and -// EIP-2930 access list transactions when their respective forks are scheduled to occur at -// any block number in the chain config. +// configuration. Specifically, this enables support of all types of transacrions +// when their respective forks are scheduled to occur at any block number (or time) +// in the chain config. // // Use this in transaction-handling code where the current block number is unknown. If you // have the current block number available, use MakeSigner instead. @@ -331,11 +331,7 @@ func (s eip2930Signer) Sender(tx *Transaction) (common.Address, error) { V, R, S := tx.RawSignatureValues() switch tx.Type() { case LegacyTxType: - if !tx.Protected() { - return HomesteadSigner{}.Sender(tx) - } - V = new(big.Int).Sub(V, s.chainIdMul) - V.Sub(V, big8) + return s.EIP155Signer.Sender(tx) case AccessListTxType: // AL txs are defined to use 0 and 1 as their recovery // id, add 27 to become equivalent to unprotected Homestead signatures. @@ -372,15 +368,7 @@ func (s eip2930Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *bi func (s eip2930Signer) Hash(tx *Transaction) common.Hash { switch tx.Type() { case LegacyTxType: - return rlpHash([]interface{}{ - tx.Nonce(), - tx.GasPrice(), - tx.Gas(), - tx.To(), - tx.Value(), - tx.Data(), - s.chainId, uint(0), uint(0), - }) + return s.EIP155Signer.Hash(tx) case AccessListTxType: return prefixedRlpHash( tx.Type(), diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index 4b96c6b91..25ced0841 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -23,10 +23,8 @@ import ( "errors" "fmt" "math/big" - "math/rand" "reflect" "testing" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -216,7 +214,7 @@ func TestEIP2718TransactionEncode(t *testing.T) { func decodeTx(data []byte) (*Transaction, error) { var tx Transaction - t, err := &tx, rlp.Decode(bytes.NewReader(data), &tx) + t, err := &tx, rlp.DecodeBytes(data, &tx) return t, err } @@ -259,152 +257,6 @@ func TestRecipientNormal(t *testing.T) { } } -func TestTransactionPriceNonceSortLegacy(t *testing.T) { - testTransactionPriceNonceSort(t, nil) -} - -func TestTransactionPriceNonceSort1559(t *testing.T) { - testTransactionPriceNonceSort(t, big.NewInt(0)) - testTransactionPriceNonceSort(t, big.NewInt(5)) - testTransactionPriceNonceSort(t, big.NewInt(50)) -} - -// Tests that transactions can be correctly sorted according to their price in -// decreasing order, but at the same time with increasing nonces when issued by -// the same account. -func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { - // Generate a batch of accounts to start with - keys := make([]*ecdsa.PrivateKey, 25) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - } - signer := LatestSignerForChainID(common.Big1) - - // Generate a batch of transactions with overlapping values, but shifted nonces - groups := map[common.Address]Transactions{} - expectedCount := 0 - for start, key := range keys { - addr := crypto.PubkeyToAddress(key.PublicKey) - count := 25 - for i := 0; i < 25; i++ { - var tx *Transaction - gasFeeCap := rand.Intn(50) - if baseFee == nil { - tx = NewTx(&LegacyTx{ - Nonce: uint64(start + i), - To: &common.Address{}, - Value: big.NewInt(100), - Gas: 100, - GasPrice: big.NewInt(int64(gasFeeCap)), - Data: nil, - }) - } else { - tx = NewTx(&DynamicFeeTx{ - Nonce: uint64(start + i), - To: &common.Address{}, - Value: big.NewInt(100), - Gas: 100, - GasFeeCap: big.NewInt(int64(gasFeeCap)), - GasTipCap: big.NewInt(int64(rand.Intn(gasFeeCap + 1))), - Data: nil, - }) - if count == 25 && int64(gasFeeCap) < baseFee.Int64() { - count = i - } - } - tx, err := SignTx(tx, signer, key) - if err != nil { - t.Fatalf("failed to sign tx: %s", err) - } - groups[addr] = append(groups[addr], tx) - } - expectedCount += count - } - // Sort the transactions and cross check the nonce ordering - txset := NewTransactionsByPriceAndNonce(signer, groups, baseFee) - - txs := Transactions{} - for tx := txset.Peek(); tx != nil; tx = txset.Peek() { - txs = append(txs, tx) - txset.Shift() - } - if len(txs) != expectedCount { - t.Errorf("expected %d transactions, found %d", expectedCount, len(txs)) - } - for i, txi := range txs { - fromi, _ := Sender(signer, txi) - - // Make sure the nonce order is valid - for j, txj := range txs[i+1:] { - fromj, _ := Sender(signer, txj) - if fromi == fromj && txi.Nonce() > txj.Nonce() { - t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce()) - } - } - // If the next tx has different from account, the price must be lower than the current one - if i+1 < len(txs) { - next := txs[i+1] - fromNext, _ := Sender(signer, next) - tip, err := txi.EffectiveGasTip(baseFee) - nextTip, nextErr := next.EffectiveGasTip(baseFee) - if err != nil || nextErr != nil { - t.Errorf("error calculating effective tip") - } - if fromi != fromNext && tip.Cmp(nextTip) < 0 { - t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice()) - } - } - } -} - -// Tests that if multiple transactions have the same price, the ones seen earlier -// are prioritized to avoid network spam attacks aiming for a specific ordering. -func TestTransactionTimeSort(t *testing.T) { - // Generate a batch of accounts to start with - keys := make([]*ecdsa.PrivateKey, 5) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - } - signer := HomesteadSigner{} - - // Generate a batch of transactions with overlapping prices, but different creation times - groups := map[common.Address]Transactions{} - for start, key := range keys { - addr := crypto.PubkeyToAddress(key.PublicKey) - - tx, _ := SignTx(NewTransaction(0, common.Address{}, big.NewInt(100), 100, big.NewInt(1), nil), signer, key) - tx.time = time.Unix(0, int64(len(keys)-start)) - - groups[addr] = append(groups[addr], tx) - } - // Sort the transactions and cross check the nonce ordering - txset := NewTransactionsByPriceAndNonce(signer, groups, nil) - - txs := Transactions{} - for tx := txset.Peek(); tx != nil; tx = txset.Peek() { - txs = append(txs, tx) - txset.Shift() - } - if len(txs) != len(keys) { - t.Errorf("expected %d transactions, found %d", len(keys), len(txs)) - } - for i, txi := range txs { - fromi, _ := Sender(signer, txi) - if i+1 < len(txs) { - next := txs[i+1] - fromNext, _ := Sender(signer, next) - - if txi.GasPrice().Cmp(next.GasPrice()) < 0 { - t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice()) - } - // Make sure time order is ascending if the txs have the same gas price - if txi.GasPrice().Cmp(next.GasPrice()) == 0 && txi.time.After(next.time) { - t.Errorf("invalid received time ordering: tx #%d (A=%x T=%v) > tx #%d (A=%x T=%v)", i, fromi[:4], txi.time, i+1, fromNext[:4], next.time) - } - } - } -} - // TestTransactionCoding tests serializing/de-serializing to/from rlp and JSON. func TestTransactionCoding(t *testing.T) { key, err := crypto.GenerateKey() @@ -526,7 +378,7 @@ func assertEqual(orig *Transaction, cpy *Transaction) error { } if orig.AccessList() != nil { if !reflect.DeepEqual(orig.AccessList(), cpy.AccessList()) { - return fmt.Errorf("access list wrong!") + return errors.New("access list wrong!") } } return nil diff --git a/core/types/tx_access_list.go b/core/types/tx_access_list.go index 7ce9da73e..730a77b75 100644 --- a/core/types/tx_access_list.go +++ b/core/types/tx_access_list.go @@ -17,9 +17,11 @@ package types import ( + "bytes" "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" ) //go:generate go run github.com/fjl/gencodec -type AccessTuple -out gen_access_tuple.go @@ -94,20 +96,17 @@ func (tx *AccessListTx) copy() TxData { } // accessors for innerTx. -func (tx *AccessListTx) txType() byte { return AccessListTxType } -func (tx *AccessListTx) chainID() *big.Int { return tx.ChainID } -func (tx *AccessListTx) accessList() AccessList { return tx.AccessList } -func (tx *AccessListTx) data() []byte { return tx.Data } -func (tx *AccessListTx) gas() uint64 { return tx.Gas } -func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice } -func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice } -func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice } -func (tx *AccessListTx) value() *big.Int { return tx.Value } -func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } -func (tx *AccessListTx) to() *common.Address { return tx.To } -func (tx *AccessListTx) blobGas() uint64 { return 0 } -func (tx *AccessListTx) blobGasFeeCap() *big.Int { return nil } -func (tx *AccessListTx) blobHashes() []common.Hash { return nil } +func (tx *AccessListTx) txType() byte { return AccessListTxType } +func (tx *AccessListTx) chainID() *big.Int { return tx.ChainID } +func (tx *AccessListTx) accessList() AccessList { return tx.AccessList } +func (tx *AccessListTx) data() []byte { return tx.Data } +func (tx *AccessListTx) gas() uint64 { return tx.Gas } +func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) value() *big.Int { return tx.Value } +func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } +func (tx *AccessListTx) to() *common.Address { return tx.To } func (tx *AccessListTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { return dst.Set(tx.GasPrice) @@ -120,3 +119,11 @@ func (tx *AccessListTx) rawSignatureValues() (v, r, s *big.Int) { func (tx *AccessListTx) setSignatureValues(chainID, v, r, s *big.Int) { tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s } + +func (tx *AccessListTx) encode(b *bytes.Buffer) error { + return rlp.Encode(b, tx) +} + +func (tx *AccessListTx) decode(input []byte) error { + return rlp.DecodeBytes(input, tx) +} diff --git a/core/types/tx_blob.go b/core/types/tx_blob.go index 58065d001..da4a9b72f 100644 --- a/core/types/tx_blob.go +++ b/core/types/tx_blob.go @@ -17,10 +17,14 @@ package types import ( + "bytes" + "crypto/sha256" "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/holiman/uint256" ) @@ -31,24 +35,68 @@ type BlobTx struct { GasTipCap *uint256.Int // a.k.a. maxPriorityFeePerGas GasFeeCap *uint256.Int // a.k.a. maxFeePerGas Gas uint64 - To *common.Address `rlp:"nil"` // nil means contract creation + To common.Address Value *uint256.Int Data []byte AccessList AccessList - BlobFeeCap *uint256.Int // a.k.a. maxFeePerDataGas + BlobFeeCap *uint256.Int // a.k.a. maxFeePerBlobGas BlobHashes []common.Hash + // A blob transaction can optionally contain blobs. This field must be set when BlobTx + // is used to create a transaction for sigining. + Sidecar *BlobTxSidecar `rlp:"-"` + // Signature values V *uint256.Int `json:"v" gencodec:"required"` R *uint256.Int `json:"r" gencodec:"required"` S *uint256.Int `json:"s" gencodec:"required"` } +// BlobTxSidecar contains the blobs of a blob transaction. +type BlobTxSidecar struct { + Blobs []kzg4844.Blob // Blobs needed by the blob pool + Commitments []kzg4844.Commitment // Commitments needed by the blob pool + Proofs []kzg4844.Proof // Proofs needed by the blob pool +} + +// BlobHashes computes the blob hashes of the given blobs. +func (sc *BlobTxSidecar) BlobHashes() []common.Hash { + h := make([]common.Hash, len(sc.Commitments)) + for i := range sc.Blobs { + h[i] = blobHash(&sc.Commitments[i]) + } + return h +} + +// encodedSize computes the RLP size of the sidecar elements. This does NOT return the +// encoded size of the BlobTxSidecar, it's just a helper for tx.Size(). +func (sc *BlobTxSidecar) encodedSize() uint64 { + var blobs, commitments, proofs uint64 + for i := range sc.Blobs { + blobs += rlp.BytesSize(sc.Blobs[i][:]) + } + for i := range sc.Commitments { + commitments += rlp.BytesSize(sc.Commitments[i][:]) + } + for i := range sc.Proofs { + proofs += rlp.BytesSize(sc.Proofs[i][:]) + } + return rlp.ListSize(blobs) + rlp.ListSize(commitments) + rlp.ListSize(proofs) +} + +// blobTxWithBlobs is used for encoding of transactions when blobs are present. +type blobTxWithBlobs struct { + BlobTx *BlobTx + Blobs []kzg4844.Blob + Commitments []kzg4844.Commitment + Proofs []kzg4844.Proof +} + // copy creates a deep copy of the transaction data and initializes all fields. func (tx *BlobTx) copy() TxData { cpy := &BlobTx{ Nonce: tx.Nonce, - To: copyAddressPtr(tx.To), + To: tx.To, Data: common.CopyBytes(tx.Data), Gas: tx.Gas, // These are copied below. @@ -90,24 +138,29 @@ func (tx *BlobTx) copy() TxData { if tx.S != nil { cpy.S.Set(tx.S) } + if tx.Sidecar != nil { + cpy.Sidecar = &BlobTxSidecar{ + Blobs: append([]kzg4844.Blob(nil), tx.Sidecar.Blobs...), + Commitments: append([]kzg4844.Commitment(nil), tx.Sidecar.Commitments...), + Proofs: append([]kzg4844.Proof(nil), tx.Sidecar.Proofs...), + } + } return cpy } // accessors for innerTx. -func (tx *BlobTx) txType() byte { return BlobTxType } -func (tx *BlobTx) chainID() *big.Int { return tx.ChainID.ToBig() } -func (tx *BlobTx) accessList() AccessList { return tx.AccessList } -func (tx *BlobTx) data() []byte { return tx.Data } -func (tx *BlobTx) gas() uint64 { return tx.Gas } -func (tx *BlobTx) gasFeeCap() *big.Int { return tx.GasFeeCap.ToBig() } -func (tx *BlobTx) gasTipCap() *big.Int { return tx.GasTipCap.ToBig() } -func (tx *BlobTx) gasPrice() *big.Int { return tx.GasFeeCap.ToBig() } -func (tx *BlobTx) value() *big.Int { return tx.Value.ToBig() } -func (tx *BlobTx) nonce() uint64 { return tx.Nonce } -func (tx *BlobTx) to() *common.Address { return tx.To } -func (tx *BlobTx) blobGas() uint64 { return params.BlobTxDataGasPerBlob * uint64(len(tx.BlobHashes)) } -func (tx *BlobTx) blobGasFeeCap() *big.Int { return tx.BlobFeeCap.ToBig() } -func (tx *BlobTx) blobHashes() []common.Hash { return tx.BlobHashes } +func (tx *BlobTx) txType() byte { return BlobTxType } +func (tx *BlobTx) chainID() *big.Int { return tx.ChainID.ToBig() } +func (tx *BlobTx) accessList() AccessList { return tx.AccessList } +func (tx *BlobTx) data() []byte { return tx.Data } +func (tx *BlobTx) gas() uint64 { return tx.Gas } +func (tx *BlobTx) gasFeeCap() *big.Int { return tx.GasFeeCap.ToBig() } +func (tx *BlobTx) gasTipCap() *big.Int { return tx.GasTipCap.ToBig() } +func (tx *BlobTx) gasPrice() *big.Int { return tx.GasFeeCap.ToBig() } +func (tx *BlobTx) value() *big.Int { return tx.Value.ToBig() } +func (tx *BlobTx) nonce() uint64 { return tx.Nonce } +func (tx *BlobTx) to() *common.Address { tmp := tx.To; return &tmp } +func (tx *BlobTx) blobGas() uint64 { return params.BlobTxBlobGasPerBlob * uint64(len(tx.BlobHashes)) } func (tx *BlobTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { if baseFee == nil { @@ -130,3 +183,64 @@ func (tx *BlobTx) setSignatureValues(chainID, v, r, s *big.Int) { tx.R.SetFromBig(r) tx.S.SetFromBig(s) } + +func (tx *BlobTx) withoutSidecar() *BlobTx { + cpy := *tx + cpy.Sidecar = nil + return &cpy +} + +func (tx *BlobTx) encode(b *bytes.Buffer) error { + if tx.Sidecar == nil { + return rlp.Encode(b, tx) + } + inner := &blobTxWithBlobs{ + BlobTx: tx, + Blobs: tx.Sidecar.Blobs, + Commitments: tx.Sidecar.Commitments, + Proofs: tx.Sidecar.Proofs, + } + return rlp.Encode(b, inner) +} + +func (tx *BlobTx) decode(input []byte) error { + // Here we need to support two formats: the network protocol encoding of the tx (with + // blobs) or the canonical encoding without blobs. + // + // The two encodings can be distinguished by checking whether the first element of the + // input list is itself a list. + + outerList, _, err := rlp.SplitList(input) + if err != nil { + return err + } + firstElemKind, _, _, err := rlp.Split(outerList) + if err != nil { + return err + } + + if firstElemKind != rlp.List { + return rlp.DecodeBytes(input, tx) + } + // It's a tx with blobs. + var inner blobTxWithBlobs + if err := rlp.DecodeBytes(input, &inner); err != nil { + return err + } + *tx = *inner.BlobTx + tx.Sidecar = &BlobTxSidecar{ + Blobs: inner.Blobs, + Commitments: inner.Commitments, + Proofs: inner.Proofs, + } + return nil +} + +func blobHash(commit *kzg4844.Commitment) common.Hash { + hasher := sha256.New() + hasher.Write(commit[:]) + var vhash common.Hash + hasher.Sum(vhash[:0]) + vhash[0] = params.BlobTxHashVersion + return vhash +} diff --git a/core/types/tx_blob_test.go b/core/types/tx_blob_test.go new file mode 100644 index 000000000..44ac48cc6 --- /dev/null +++ b/core/types/tx_blob_test.go @@ -0,0 +1,90 @@ +package types + +import ( + "crypto/ecdsa" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/holiman/uint256" +) + +// This test verifies that tx.Hash() is not affected by presence of a BlobTxSidecar. +func TestBlobTxHashing(t *testing.T) { + key, _ := crypto.GenerateKey() + withBlobs := createEmptyBlobTx(key, true) + withBlobsStripped := withBlobs.WithoutBlobTxSidecar() + withoutBlobs := createEmptyBlobTx(key, false) + + hash := withBlobs.Hash() + t.Log("tx hash:", hash) + + if h := withBlobsStripped.Hash(); h != hash { + t.Fatal("wrong tx hash after WithoutBlobTxSidecar:", h) + } + if h := withoutBlobs.Hash(); h != hash { + t.Fatal("wrong tx hash on tx created without sidecar:", h) + } +} + +// This test verifies that tx.Size() takes BlobTxSidecar into account. +func TestBlobTxSize(t *testing.T) { + key, _ := crypto.GenerateKey() + withBlobs := createEmptyBlobTx(key, true) + withBlobsStripped := withBlobs.WithoutBlobTxSidecar() + withoutBlobs := createEmptyBlobTx(key, false) + + withBlobsEnc, _ := withBlobs.MarshalBinary() + withoutBlobsEnc, _ := withoutBlobs.MarshalBinary() + + size := withBlobs.Size() + t.Log("size with blobs:", size) + + sizeNoBlobs := withoutBlobs.Size() + t.Log("size without blobs:", sizeNoBlobs) + + if size != uint64(len(withBlobsEnc)) { + t.Error("wrong size with blobs:", size, "encoded length:", len(withBlobsEnc)) + } + if sizeNoBlobs != uint64(len(withoutBlobsEnc)) { + t.Error("wrong size without blobs:", sizeNoBlobs, "encoded length:", len(withoutBlobsEnc)) + } + if sizeNoBlobs >= size { + t.Error("size without blobs >= size with blobs") + } + if sz := withBlobsStripped.Size(); sz != sizeNoBlobs { + t.Fatal("wrong size on tx after WithoutBlobTxSidecar:", sz) + } +} + +var ( + emptyBlob = kzg4844.Blob{} + emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob) + emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit) +) + +func createEmptyBlobTx(key *ecdsa.PrivateKey, withSidecar bool) *Transaction { + sidecar := &BlobTxSidecar{ + Blobs: []kzg4844.Blob{emptyBlob}, + Commitments: []kzg4844.Commitment{emptyBlobCommit}, + Proofs: []kzg4844.Proof{emptyBlobProof}, + } + blobtx := &BlobTx{ + ChainID: uint256.NewInt(1), + Nonce: 5, + GasTipCap: uint256.NewInt(22), + GasFeeCap: uint256.NewInt(5), + Gas: 25000, + To: common.Address{0x03, 0x04, 0x05}, + Value: uint256.NewInt(99), + Data: make([]byte, 50), + BlobFeeCap: uint256.NewInt(15), + BlobHashes: sidecar.BlobHashes(), + } + if withSidecar { + blobtx.Sidecar = sidecar + } + signer := NewCancunSigner(blobtx.ChainID.ToBig()) + return MustSignNewTx(key, signer, blobtx) +} diff --git a/core/types/tx_dynamic_fee.go b/core/types/tx_dynamic_fee.go index 47b870abb..8b5b514fd 100644 --- a/core/types/tx_dynamic_fee.go +++ b/core/types/tx_dynamic_fee.go @@ -17,9 +17,11 @@ package types import ( + "bytes" "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" ) // DynamicFeeTx represents an EIP-1559 transaction. @@ -83,20 +85,17 @@ func (tx *DynamicFeeTx) copy() TxData { } // accessors for innerTx. -func (tx *DynamicFeeTx) txType() byte { return DynamicFeeTxType } -func (tx *DynamicFeeTx) chainID() *big.Int { return tx.ChainID } -func (tx *DynamicFeeTx) accessList() AccessList { return tx.AccessList } -func (tx *DynamicFeeTx) data() []byte { return tx.Data } -func (tx *DynamicFeeTx) gas() uint64 { return tx.Gas } -func (tx *DynamicFeeTx) gasFeeCap() *big.Int { return tx.GasFeeCap } -func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap } -func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap } -func (tx *DynamicFeeTx) value() *big.Int { return tx.Value } -func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce } -func (tx *DynamicFeeTx) to() *common.Address { return tx.To } -func (tx *DynamicFeeTx) blobGas() uint64 { return 0 } -func (tx *DynamicFeeTx) blobGasFeeCap() *big.Int { return nil } -func (tx *DynamicFeeTx) blobHashes() []common.Hash { return nil } +func (tx *DynamicFeeTx) txType() byte { return DynamicFeeTxType } +func (tx *DynamicFeeTx) chainID() *big.Int { return tx.ChainID } +func (tx *DynamicFeeTx) accessList() AccessList { return tx.AccessList } +func (tx *DynamicFeeTx) data() []byte { return tx.Data } +func (tx *DynamicFeeTx) gas() uint64 { return tx.Gas } +func (tx *DynamicFeeTx) gasFeeCap() *big.Int { return tx.GasFeeCap } +func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap } +func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap } +func (tx *DynamicFeeTx) value() *big.Int { return tx.Value } +func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce } +func (tx *DynamicFeeTx) to() *common.Address { return tx.To } func (tx *DynamicFeeTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { if baseFee == nil { @@ -116,3 +115,11 @@ func (tx *DynamicFeeTx) rawSignatureValues() (v, r, s *big.Int) { func (tx *DynamicFeeTx) setSignatureValues(chainID, v, r, s *big.Int) { tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s } + +func (tx *DynamicFeeTx) encode(b *bytes.Buffer) error { + return rlp.Encode(b, tx) +} + +func (tx *DynamicFeeTx) decode(input []byte) error { + return rlp.DecodeBytes(input, tx) +} diff --git a/core/types/tx_legacy.go b/core/types/tx_legacy.go index 902e70cf9..71025b78f 100644 --- a/core/types/tx_legacy.go +++ b/core/types/tx_legacy.go @@ -17,6 +17,7 @@ package types import ( + "bytes" "math/big" "github.com/ethereum/go-ethereum/common" @@ -91,20 +92,17 @@ func (tx *LegacyTx) copy() TxData { } // accessors for innerTx. -func (tx *LegacyTx) txType() byte { return LegacyTxType } -func (tx *LegacyTx) chainID() *big.Int { return deriveChainId(tx.V) } -func (tx *LegacyTx) accessList() AccessList { return nil } -func (tx *LegacyTx) data() []byte { return tx.Data } -func (tx *LegacyTx) gas() uint64 { return tx.Gas } -func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice } -func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice } -func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice } -func (tx *LegacyTx) value() *big.Int { return tx.Value } -func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } -func (tx *LegacyTx) to() *common.Address { return tx.To } -func (tx *LegacyTx) blobGas() uint64 { return 0 } -func (tx *LegacyTx) blobGasFeeCap() *big.Int { return nil } -func (tx *LegacyTx) blobHashes() []common.Hash { return nil } +func (tx *LegacyTx) txType() byte { return LegacyTxType } +func (tx *LegacyTx) chainID() *big.Int { return deriveChainId(tx.V) } +func (tx *LegacyTx) accessList() AccessList { return nil } +func (tx *LegacyTx) data() []byte { return tx.Data } +func (tx *LegacyTx) gas() uint64 { return tx.Gas } +func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) value() *big.Int { return tx.Value } +func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } +func (tx *LegacyTx) to() *common.Address { return tx.To } func (tx *LegacyTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { return dst.Set(tx.GasPrice) @@ -117,3 +115,11 @@ func (tx *LegacyTx) rawSignatureValues() (v, r, s *big.Int) { func (tx *LegacyTx) setSignatureValues(chainID, v, r, s *big.Int) { tx.V, tx.R, tx.S = v, r, s } + +func (tx *LegacyTx) encode(*bytes.Buffer) error { + panic("encode called on LegacyTx") +} + +func (tx *LegacyTx) decode([]byte) error { + panic("decode called on LegacyTx)") +} diff --git a/core/vm/contracts.go b/core/vm/contracts.go index aa4a3f13d..574bb9bef 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -20,6 +20,7 @@ import ( "crypto/sha256" "encoding/binary" "errors" + "fmt" "math/big" "github.com/ethereum/go-ethereum/common" @@ -28,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/crypto/blake2b" "github.com/ethereum/go-ethereum/crypto/bls12381" "github.com/ethereum/go-ethereum/crypto/bn256" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/params" "golang.org/x/crypto/ripemd160" ) @@ -90,6 +92,21 @@ var PrecompiledContractsBerlin = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{9}): &blake2F{}, } +// PrecompiledContractsCancun contains the default set of pre-compiled Ethereum +// contracts used in the Cancun release. +var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{ + common.BytesToAddress([]byte{1}): &ecrecover{}, + common.BytesToAddress([]byte{2}): &sha256hash{}, + common.BytesToAddress([]byte{3}): &ripemd160hash{}, + common.BytesToAddress([]byte{4}): &dataCopy{}, + common.BytesToAddress([]byte{5}): &bigModExp{eip2565: true}, + common.BytesToAddress([]byte{6}): &bn256AddIstanbul{}, + common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, + common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, + common.BytesToAddress([]byte{9}): &blake2F{}, + common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{}, +} + // PrecompiledContractsBLS contains the set of pre-compiled Ethereum // contracts specified in EIP-2537. These are exported for testing purposes. var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{ @@ -105,6 +122,7 @@ var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{ } var ( + PrecompiledAddressesCancun []common.Address PrecompiledAddressesBerlin []common.Address PrecompiledAddressesIstanbul []common.Address PrecompiledAddressesByzantium []common.Address @@ -124,11 +142,16 @@ func init() { for k := range PrecompiledContractsBerlin { PrecompiledAddressesBerlin = append(PrecompiledAddressesBerlin, k) } + for k := range PrecompiledContractsCancun { + PrecompiledAddressesCancun = append(PrecompiledAddressesCancun, k) + } } // ActivePrecompiles returns the precompiles enabled with the current configuration. func ActivePrecompiles(rules params.Rules) []common.Address { switch { + case rules.IsCancun: + return PrecompiledAddressesCancun case rules.IsBerlin: return PrecompiledAddressesBerlin case rules.IsIstanbul: @@ -1048,3 +1071,67 @@ func (c *bls12381MapG2) Run(input []byte) ([]byte, error) { // Encode the G2 point to 256 bytes return g.EncodePoint(r), nil } + +// kzgPointEvaluation implements the EIP-4844 point evaluation precompile. +type kzgPointEvaluation struct{} + +// RequiredGas estimates the gas required for running the point evaluation precompile. +func (b *kzgPointEvaluation) RequiredGas(input []byte) uint64 { + return params.BlobTxPointEvaluationPrecompileGas +} + +const ( + blobVerifyInputLength = 192 // Max input length for the point evaluation precompile. + blobCommitmentVersionKZG uint8 = 0x01 // Version byte for the point evaluation precompile. + blobPrecompileReturnValue = "000000000000000000000000000000000000000000000000000000000000100073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001" +) + +var ( + errBlobVerifyInvalidInputLength = errors.New("invalid input length") + errBlobVerifyMismatchedVersion = errors.New("mismatched versioned hash") + errBlobVerifyKZGProof = errors.New("error verifying kzg proof") +) + +// Run executes the point evaluation precompile. +func (b *kzgPointEvaluation) Run(input []byte) ([]byte, error) { + if len(input) != blobVerifyInputLength { + return nil, errBlobVerifyInvalidInputLength + } + // versioned hash: first 32 bytes + var versionedHash common.Hash + copy(versionedHash[:], input[:]) + + var ( + point kzg4844.Point + claim kzg4844.Claim + ) + // Evaluation point: next 32 bytes + copy(point[:], input[32:]) + // Expected output: next 32 bytes + copy(claim[:], input[64:]) + + // input kzg point: next 48 bytes + var commitment kzg4844.Commitment + copy(commitment[:], input[96:]) + if kZGToVersionedHash(commitment) != versionedHash { + return nil, errBlobVerifyMismatchedVersion + } + + // Proof: next 48 bytes + var proof kzg4844.Proof + copy(proof[:], input[144:]) + + if err := kzg4844.VerifyProof(commitment, point, claim, proof); err != nil { + return nil, fmt.Errorf("%w: %v", errBlobVerifyKZGProof, err) + } + + return common.Hex2Bytes(blobPrecompileReturnValue), nil +} + +// kZGToVersionedHash implements kzg_to_versioned_hash from EIP-4844 +func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash { + h := sha256.Sum256(kzg[:]) + h[0] = blobCommitmentVersionKZG + + return h +} diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index b22d999e6..f40e2c8f9 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -56,15 +56,17 @@ var allPrecompiles = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, common.BytesToAddress([]byte{9}): &blake2F{}, - common.BytesToAddress([]byte{10}): &bls12381G1Add{}, - common.BytesToAddress([]byte{11}): &bls12381G1Mul{}, - common.BytesToAddress([]byte{12}): &bls12381G1MultiExp{}, - common.BytesToAddress([]byte{13}): &bls12381G2Add{}, - common.BytesToAddress([]byte{14}): &bls12381G2Mul{}, - common.BytesToAddress([]byte{15}): &bls12381G2MultiExp{}, - common.BytesToAddress([]byte{16}): &bls12381Pairing{}, - common.BytesToAddress([]byte{17}): &bls12381MapG1{}, - common.BytesToAddress([]byte{18}): &bls12381MapG2{}, + common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{}, + + common.BytesToAddress([]byte{0x0f, 0x0a}): &bls12381G1Add{}, + common.BytesToAddress([]byte{0x0f, 0x0b}): &bls12381G1Mul{}, + common.BytesToAddress([]byte{0x0f, 0x0c}): &bls12381G1MultiExp{}, + common.BytesToAddress([]byte{0x0f, 0x0d}): &bls12381G2Add{}, + common.BytesToAddress([]byte{0x0f, 0x0e}): &bls12381G2Mul{}, + common.BytesToAddress([]byte{0x0f, 0x0f}): &bls12381G2MultiExp{}, + common.BytesToAddress([]byte{0x0f, 0x10}): &bls12381Pairing{}, + common.BytesToAddress([]byte{0x0f, 0x11}): &bls12381MapG1{}, + common.BytesToAddress([]byte{0x0f, 0x12}): &bls12381MapG2{}, } // EIP-152 test vectors @@ -302,36 +304,38 @@ func benchJson(name, addr string, b *testing.B) { } } -func TestPrecompiledBLS12381G1Add(t *testing.T) { testJson("blsG1Add", "0a", t) } -func TestPrecompiledBLS12381G1Mul(t *testing.T) { testJson("blsG1Mul", "0b", t) } -func TestPrecompiledBLS12381G1MultiExp(t *testing.T) { testJson("blsG1MultiExp", "0c", t) } -func TestPrecompiledBLS12381G2Add(t *testing.T) { testJson("blsG2Add", "0d", t) } -func TestPrecompiledBLS12381G2Mul(t *testing.T) { testJson("blsG2Mul", "0e", t) } -func TestPrecompiledBLS12381G2MultiExp(t *testing.T) { testJson("blsG2MultiExp", "0f", t) } -func TestPrecompiledBLS12381Pairing(t *testing.T) { testJson("blsPairing", "10", t) } -func TestPrecompiledBLS12381MapG1(t *testing.T) { testJson("blsMapG1", "11", t) } -func TestPrecompiledBLS12381MapG2(t *testing.T) { testJson("blsMapG2", "12", t) } +func TestPrecompiledBLS12381G1Add(t *testing.T) { testJson("blsG1Add", "f0a", t) } +func TestPrecompiledBLS12381G1Mul(t *testing.T) { testJson("blsG1Mul", "f0b", t) } +func TestPrecompiledBLS12381G1MultiExp(t *testing.T) { testJson("blsG1MultiExp", "f0c", t) } +func TestPrecompiledBLS12381G2Add(t *testing.T) { testJson("blsG2Add", "f0d", t) } +func TestPrecompiledBLS12381G2Mul(t *testing.T) { testJson("blsG2Mul", "f0e", t) } +func TestPrecompiledBLS12381G2MultiExp(t *testing.T) { testJson("blsG2MultiExp", "f0f", t) } +func TestPrecompiledBLS12381Pairing(t *testing.T) { testJson("blsPairing", "f10", t) } +func TestPrecompiledBLS12381MapG1(t *testing.T) { testJson("blsMapG1", "f11", t) } +func TestPrecompiledBLS12381MapG2(t *testing.T) { testJson("blsMapG2", "f12", t) } -func BenchmarkPrecompiledBLS12381G1Add(b *testing.B) { benchJson("blsG1Add", "0a", b) } -func BenchmarkPrecompiledBLS12381G1Mul(b *testing.B) { benchJson("blsG1Mul", "0b", b) } -func BenchmarkPrecompiledBLS12381G1MultiExp(b *testing.B) { benchJson("blsG1MultiExp", "0c", b) } -func BenchmarkPrecompiledBLS12381G2Add(b *testing.B) { benchJson("blsG2Add", "0d", b) } -func BenchmarkPrecompiledBLS12381G2Mul(b *testing.B) { benchJson("blsG2Mul", "0e", b) } -func BenchmarkPrecompiledBLS12381G2MultiExp(b *testing.B) { benchJson("blsG2MultiExp", "0f", b) } -func BenchmarkPrecompiledBLS12381Pairing(b *testing.B) { benchJson("blsPairing", "10", b) } -func BenchmarkPrecompiledBLS12381MapG1(b *testing.B) { benchJson("blsMapG1", "11", b) } -func BenchmarkPrecompiledBLS12381MapG2(b *testing.B) { benchJson("blsMapG2", "12", b) } +func TestPrecompiledPointEvaluation(t *testing.T) { testJson("pointEvaluation", "0a", t) } + +func BenchmarkPrecompiledBLS12381G1Add(b *testing.B) { benchJson("blsG1Add", "f0a", b) } +func BenchmarkPrecompiledBLS12381G1Mul(b *testing.B) { benchJson("blsG1Mul", "f0b", b) } +func BenchmarkPrecompiledBLS12381G1MultiExp(b *testing.B) { benchJson("blsG1MultiExp", "f0c", b) } +func BenchmarkPrecompiledBLS12381G2Add(b *testing.B) { benchJson("blsG2Add", "f0d", b) } +func BenchmarkPrecompiledBLS12381G2Mul(b *testing.B) { benchJson("blsG2Mul", "f0e", b) } +func BenchmarkPrecompiledBLS12381G2MultiExp(b *testing.B) { benchJson("blsG2MultiExp", "f0f", b) } +func BenchmarkPrecompiledBLS12381Pairing(b *testing.B) { benchJson("blsPairing", "f10", b) } +func BenchmarkPrecompiledBLS12381MapG1(b *testing.B) { benchJson("blsMapG1", "f11", b) } +func BenchmarkPrecompiledBLS12381MapG2(b *testing.B) { benchJson("blsMapG2", "f12", b) } // Failure tests -func TestPrecompiledBLS12381G1AddFail(t *testing.T) { testJsonFail("blsG1Add", "0a", t) } -func TestPrecompiledBLS12381G1MulFail(t *testing.T) { testJsonFail("blsG1Mul", "0b", t) } -func TestPrecompiledBLS12381G1MultiExpFail(t *testing.T) { testJsonFail("blsG1MultiExp", "0c", t) } -func TestPrecompiledBLS12381G2AddFail(t *testing.T) { testJsonFail("blsG2Add", "0d", t) } -func TestPrecompiledBLS12381G2MulFail(t *testing.T) { testJsonFail("blsG2Mul", "0e", t) } -func TestPrecompiledBLS12381G2MultiExpFail(t *testing.T) { testJsonFail("blsG2MultiExp", "0f", t) } -func TestPrecompiledBLS12381PairingFail(t *testing.T) { testJsonFail("blsPairing", "10", t) } -func TestPrecompiledBLS12381MapG1Fail(t *testing.T) { testJsonFail("blsMapG1", "11", t) } -func TestPrecompiledBLS12381MapG2Fail(t *testing.T) { testJsonFail("blsMapG2", "12", t) } +func TestPrecompiledBLS12381G1AddFail(t *testing.T) { testJsonFail("blsG1Add", "f0a", t) } +func TestPrecompiledBLS12381G1MulFail(t *testing.T) { testJsonFail("blsG1Mul", "f0b", t) } +func TestPrecompiledBLS12381G1MultiExpFail(t *testing.T) { testJsonFail("blsG1MultiExp", "f0c", t) } +func TestPrecompiledBLS12381G2AddFail(t *testing.T) { testJsonFail("blsG2Add", "f0d", t) } +func TestPrecompiledBLS12381G2MulFail(t *testing.T) { testJsonFail("blsG2Mul", "f0e", t) } +func TestPrecompiledBLS12381G2MultiExpFail(t *testing.T) { testJsonFail("blsG2MultiExp", "f0f", t) } +func TestPrecompiledBLS12381PairingFail(t *testing.T) { testJsonFail("blsPairing", "f10", t) } +func TestPrecompiledBLS12381MapG1Fail(t *testing.T) { testJsonFail("blsMapG1", "f11", t) } +func TestPrecompiledBLS12381MapG2Fail(t *testing.T) { testJsonFail("blsMapG2", "f12", t) } func loadJson(name string) ([]precompiledTest, error) { data, err := os.ReadFile(fmt.Sprintf("testdata/precompiles/%v.json", name)) diff --git a/core/vm/eips.go b/core/vm/eips.go index 29ff27c55..704c1ce12 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -26,6 +26,8 @@ import ( ) var activators = map[int]func(*JumpTable){ + 5656: enable5656, + 6780: enable6780, 3855: enable3855, 3860: enable3860, 3529: enable3529, @@ -235,9 +237,69 @@ func opPush0(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by return nil, nil } -// ebnable3860 enables "EIP-3860: Limit and meter initcode" +// enable3860 enables "EIP-3860: Limit and meter initcode" // https://eips.ethereum.org/EIPS/eip-3860 func enable3860(jt *JumpTable) { jt[CREATE].dynamicGas = gasCreateEip3860 jt[CREATE2].dynamicGas = gasCreate2Eip3860 } + +// enable5656 enables EIP-5656 (MCOPY opcode) +// https://eips.ethereum.org/EIPS/eip-5656 +func enable5656(jt *JumpTable) { + jt[MCOPY] = &operation{ + execute: opMcopy, + constantGas: GasFastestStep, + dynamicGas: gasMcopy, + minStack: minStack(3, 0), + maxStack: maxStack(3, 0), + memorySize: memoryMcopy, + } +} + +// opMcopy implements the MCOPY opcode (https://eips.ethereum.org/EIPS/eip-5656) +func opMcopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + var ( + dst = scope.Stack.pop() + src = scope.Stack.pop() + length = scope.Stack.pop() + ) + // These values are checked for overflow during memory expansion calculation + // (the memorySize function on the opcode). + scope.Memory.Copy(dst.Uint64(), src.Uint64(), length.Uint64()) + return nil, nil +} + +// opBlobHash implements the BLOBHASH opcode +func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + index := scope.Stack.peek() + if index.LtUint64(uint64(len(interpreter.evm.TxContext.BlobHashes))) { + blobHash := interpreter.evm.TxContext.BlobHashes[index.Uint64()] + index.SetBytes32(blobHash[:]) + } else { + index.Clear() + } + return nil, nil +} + +// enable4844 applies EIP-4844 (DATAHASH opcode) +func enable4844(jt *JumpTable) { + // New opcode + jt[BLOBHASH] = &operation{ + execute: opBlobHash, + constantGas: GasFastestStep, + minStack: minStack(1, 1), + maxStack: maxStack(1, 1), + } +} + +// enable6780 applies EIP-6780 (deactivate SELFDESTRUCT) +func enable6780(jt *JumpTable) { + jt[SELFDESTRUCT] = &operation{ + execute: opSelfdestruct6780, + dynamicGas: gasSelfdestructEIP3529, + constantGas: params.SelfdestructGasEIP150, + minStack: minStack(1, 0), + maxStack: maxStack(1, 0), + } +} diff --git a/core/vm/evm.go b/core/vm/evm.go index 01017572d..40e2f3554 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -21,15 +21,12 @@ import ( "sync/atomic" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" ) -// emptyCodeHash is used by create to ensure deployment is disallowed to already -// deployed contract addresses (relevant after the account abstraction). -var emptyCodeHash = crypto.Keccak256Hash(nil) - type ( // CanTransferFunc is the signature of a transfer guard function CanTransferFunc func(StateDB, common.Address, *big.Int) bool @@ -43,6 +40,8 @@ type ( func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) { var precompiles map[common.Address]PrecompiledContract switch { + case evm.chainRules.IsCancun: + precompiles = PrecompiledContractsCancun case evm.chainRules.IsBerlin: precompiles = PrecompiledContractsBerlin case evm.chainRules.IsIstanbul: @@ -68,21 +67,23 @@ type BlockContext struct { GetHash GetHashFunc // Block information - Coinbase common.Address // Provides information for COINBASE - GasLimit uint64 // Provides information for GASLIMIT - BlockNumber *big.Int // Provides information for NUMBER - Time uint64 // Provides information for TIME - Difficulty *big.Int // Provides information for DIFFICULTY - BaseFee *big.Int // Provides information for BASEFEE - Random *common.Hash // Provides information for PREVRANDAO + Coinbase common.Address // Provides information for COINBASE + GasLimit uint64 // Provides information for GASLIMIT + BlockNumber *big.Int // Provides information for NUMBER + Time uint64 // Provides information for TIME + Difficulty *big.Int // Provides information for DIFFICULTY + BaseFee *big.Int // Provides information for BASEFEE + Random *common.Hash // Provides information for PREVRANDAO + ExcessBlobGas *uint64 // ExcessBlobGas field in the header, needed to compute the data } // TxContext provides the EVM with information about a transaction. // All fields can change between transactions. type TxContext struct { // Message information - Origin common.Address // Provides information for ORIGIN - GasPrice *big.Int // Provides information for GASPRICE + Origin common.Address // Provides information for ORIGIN + GasPrice *big.Int // Provides information for GASPRICE + BlobHashes []common.Hash // Provides information for BLOBHASH } // EVM is the Ethereum Virtual Machine base object and provides @@ -435,7 +436,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } // Ensure there's no existing contract already at the designated address contractHash := evm.StateDB.GetCodeHash(address) - if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != emptyCodeHash) { + if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) { return nil, common.Address{}, 0, ErrContractAddressCollision } // Create a new account on the state diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 4f961ef4d..5153c8b7a 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -60,6 +60,7 @@ func memoryGasCost(mem *Memory, newMemSize uint64) (uint64, error) { // as argument: // CALLDATACOPY (stack position 2) // CODECOPY (stack position 2) +// MCOPY (stack position 2) // EXTCODECOPY (stack position 3) // RETURNDATACOPY (stack position 2) func memoryCopierGas(stackpos int) gasFunc { @@ -89,6 +90,7 @@ func memoryCopierGas(stackpos int) gasFunc { var ( gasCallDataCopy = memoryCopierGas(2) gasCodeCopy = memoryCopierGas(2) + gasMcopy = memoryCopierGas(2) gasExtCodeCopy = memoryCopierGas(3) gasReturnDataCopy = memoryCopierGas(2) ) @@ -470,7 +472,7 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me } } - if !evm.StateDB.HasSuicided(contract.Address()) { + if !evm.StateDB.HasSelfDestructed(contract.Address()) { evm.StateDB.AddRefund(params.SelfdestructRefundGas) } return gas, nil diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 505aef412..2105201fc 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -408,7 +408,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) // emptyCodeHash. If the precompile account is not transferred any amount on a private or // customized chain, the return value will be zero. // -// 5. Caller tries to get the code hash for an account which is marked as suicided +// 5. Caller tries to get the code hash for an account which is marked as self-destructed // in the current transaction, the code hash of this account should be returned. // // 6. Caller tries to get the code hash for an account which is marked as deleted, this @@ -821,7 +821,23 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext beneficiary := scope.Stack.pop() balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance) - interpreter.evm.StateDB.Suicide(scope.Contract.Address()) + interpreter.evm.StateDB.SelfDestruct(scope.Contract.Address()) + if tracer := interpreter.evm.Config.Tracer; tracer != nil { + tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) + tracer.CaptureExit([]byte{}, 0, nil) + } + return nil, errStopToken +} + +func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + if interpreter.readOnly { + return nil, ErrWriteProtection + } + beneficiary := scope.Stack.pop() + balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) + interpreter.evm.StateDB.SubBalance(scope.Contract.Address(), balance) + interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance) + interpreter.evm.StateDB.Selfdestruct6780(scope.Contract.Address()) if tracer := interpreter.evm.Config.Tracer; tracer != nil { tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) tracer.CaptureExit([]byte{}, 0, nil) diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 2a66f8163..807073336 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -22,9 +22,11 @@ import ( "fmt" "math/big" "os" + "strings" "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -723,7 +725,7 @@ func TestRandom(t *testing.T) { for _, tt := range []testcase{ {name: "empty hash", random: common.Hash{}}, {name: "1", random: common.Hash{0}}, - {name: "emptyCodeHash", random: emptyCodeHash}, + {name: "emptyCodeHash", random: types.EmptyCodeHash}, {name: "hash(0x010203)", random: crypto.Keccak256Hash([]byte{0x01, 0x02, 0x03})}, } { var ( @@ -746,3 +748,183 @@ func TestRandom(t *testing.T) { } } } + +func TestBlobHash(t *testing.T) { + type testcase struct { + name string + idx uint64 + expect common.Hash + hashes []common.Hash + } + var ( + zero = common.Hash{0} + one = common.Hash{1} + two = common.Hash{2} + three = common.Hash{3} + ) + for _, tt := range []testcase{ + {name: "[{1}]", idx: 0, expect: one, hashes: []common.Hash{one}}, + {name: "[1,{2},3]", idx: 2, expect: three, hashes: []common.Hash{one, two, three}}, + {name: "out-of-bounds (empty)", idx: 10, expect: zero, hashes: []common.Hash{}}, + {name: "out-of-bounds", idx: 25, expect: zero, hashes: []common.Hash{one, two, three}}, + {name: "out-of-bounds (nil)", idx: 25, expect: zero, hashes: nil}, + } { + var ( + env = NewEVM(BlockContext{}, TxContext{BlobHashes: tt.hashes}, nil, params.TestChainConfig, Config{}) + stack = newstack() + pc = uint64(0) + evmInterpreter = env.interpreter + ) + stack.push(uint256.NewInt(tt.idx)) + opBlobHash(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) + if len(stack.data) != 1 { + t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data)) + } + actual := stack.pop() + expected, overflow := uint256.FromBig(new(big.Int).SetBytes(tt.expect.Bytes())) + if overflow { + t.Errorf("Testcase %v: invalid overflow", tt.name) + } + if actual.Cmp(expected) != 0 { + t.Errorf("Testcase %v: expected %x, got %x", tt.name, expected, actual) + } + } +} + +func TestOpMCopy(t *testing.T) { + // Test cases from https://eips.ethereum.org/EIPS/eip-5656#test-cases + for i, tc := range []struct { + dst, src, len string + pre string + want string + wantGas uint64 + }{ + { // MCOPY 0 32 32 - copy 32 bytes from offset 32 to offset 0. + dst: "0x0", src: "0x20", len: "0x20", + pre: "0000000000000000000000000000000000000000000000000000000000000000 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", + want: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", + wantGas: 6, + }, + + { // MCOPY 0 0 32 - copy 32 bytes from offset 0 to offset 0. + dst: "0x0", src: "0x0", len: "0x20", + pre: "0101010101010101010101010101010101010101010101010101010101010101", + want: "0101010101010101010101010101010101010101010101010101010101010101", + wantGas: 6, + }, + { // MCOPY 0 1 8 - copy 8 bytes from offset 1 to offset 0 (overlapping). + dst: "0x0", src: "0x1", len: "0x8", + pre: "000102030405060708 000000000000000000000000000000000000000000000000", + want: "010203040506070808 000000000000000000000000000000000000000000000000", + wantGas: 6, + }, + { // MCOPY 1 0 8 - copy 8 bytes from offset 0 to offset 1 (overlapping). + dst: "0x1", src: "0x0", len: "0x8", + pre: "000102030405060708 000000000000000000000000000000000000000000000000", + want: "000001020304050607 000000000000000000000000000000000000000000000000", + wantGas: 6, + }, + // Tests below are not in the EIP, but maybe should be added + { // MCOPY 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds index(overlapping). + dst: "0xFFFFFFFFFFFF", src: "0xFFFFFFFFFFFF", len: "0x0", + pre: "11", + want: "11", + wantGas: 3, + }, + { // MCOPY 0xFFFFFFFFFFFF 0 0 - copy zero bytes from start of mem to out-of-bounds. + dst: "0xFFFFFFFFFFFF", src: "0x0", len: "0x0", + pre: "11", + want: "11", + wantGas: 3, + }, + { // MCOPY 0 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds to start of mem + dst: "0x0", src: "0xFFFFFFFFFFFF", len: "0x0", + pre: "11", + want: "11", + wantGas: 3, + }, + { // MCOPY - copy 1 from space outside of uint64 space + dst: "0x0", src: "0x10000000000000000", len: "0x1", + pre: "0", + }, + { // MCOPY - copy 1 from 0 to space outside of uint64 + dst: "0x10000000000000000", src: "0x0", len: "0x1", + pre: "0", + }, + { // MCOPY - copy nothing from 0 to space outside of uint64 + dst: "0x10000000000000000", src: "0x0", len: "0x0", + pre: "", + want: "", + wantGas: 3, + }, + { // MCOPY - copy 1 from 0x20 to 0x10, with no prior allocated mem + dst: "0x10", src: "0x20", len: "0x1", + pre: "", + // 64 bytes + want: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + wantGas: 12, + }, + { // MCOPY - copy 1 from 0x19 to 0x10, with no prior allocated mem + dst: "0x10", src: "0x19", len: "0x1", + pre: "", + // 32 bytes + want: "0x0000000000000000000000000000000000000000000000000000000000000000", + wantGas: 9, + }, + } { + var ( + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) + stack = newstack() + pc = uint64(0) + evmInterpreter = env.interpreter + ) + data := common.FromHex(strings.ReplaceAll(tc.pre, " ", "")) + // Set pre + mem := NewMemory() + mem.Resize(uint64(len(data))) + mem.Set(0, uint64(len(data)), data) + // Push stack args + len, _ := uint256.FromHex(tc.len) + src, _ := uint256.FromHex(tc.src) + dst, _ := uint256.FromHex(tc.dst) + + stack.push(len) + stack.push(src) + stack.push(dst) + wantErr := (tc.wantGas == 0) + // Calc mem expansion + var memorySize uint64 + if memSize, overflow := memoryMcopy(stack); overflow { + if wantErr { + continue + } + t.Errorf("overflow") + } else { + var overflow bool + if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow { + t.Error(ErrGasUintOverflow) + } + } + // and the dynamic cost + var haveGas uint64 + if dynamicCost, err := gasMcopy(env, nil, stack, mem, memorySize); err != nil { + t.Error(err) + } else { + haveGas = GasFastestStep + dynamicCost + } + // Expand mem + if memorySize > 0 { + mem.Resize(memorySize) + } + // Do the copy + opMcopy(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) + want := common.FromHex(strings.ReplaceAll(tc.want, " ", "")) + if have := mem.store; !bytes.Equal(want, have) { + t.Errorf("case %d: \nwant: %#x\nhave: %#x\n", i, want, have) + } + wantGas := tc.wantGas + if haveGas != wantGas { + t.Errorf("case %d: gas wrong, want %d have %d\n", i, wantGas, haveGas) + } + } +} diff --git a/core/vm/interface.go b/core/vm/interface.go index b83f78307..26814d3d2 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -51,11 +51,13 @@ type StateDB interface { GetTransientState(addr common.Address, key common.Hash) common.Hash SetTransientState(addr common.Address, key, value common.Hash) - Suicide(common.Address) bool - HasSuicided(common.Address) bool + SelfDestruct(common.Address) + HasSelfDestructed(common.Address) bool + + Selfdestruct6780(common.Address) // Exist reports whether the given account exists in state. - // Notably this should also return true for suicided accounts. + // Notably this should also return true for self-destructed accounts. Exist(common.Address) bool // Empty returns whether the given account is empty. Empty // is defined according to EIP161 (balance = nonce = code = 0). diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 5b2082bc9..873337850 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -56,6 +56,8 @@ func NewEVMInterpreter(evm *EVM) *EVMInterpreter { // If jump table was not initialised we set the default one. var table *JumpTable switch { + case evm.chainRules.IsCancun: + table = &cancunInstructionSet case evm.chainRules.IsShanghai: table = &shanghaiInstructionSet case evm.chainRules.IsMerge: diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index a45287de8..702b18661 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -56,6 +56,7 @@ var ( londonInstructionSet = newLondonInstructionSet() mergeInstructionSet = newMergeInstructionSet() shanghaiInstructionSet = newShanghaiInstructionSet() + cancunInstructionSet = newCancunInstructionSet() ) // JumpTable contains the EVM opcodes supported at a given fork. @@ -79,10 +80,20 @@ func validate(jt JumpTable) JumpTable { return jt } +func newCancunInstructionSet() JumpTable { + instructionSet := newShanghaiInstructionSet() + enable4844(&instructionSet) // EIP-4844 (DATAHASH opcode) + enable1153(&instructionSet) // EIP-1153 "Transient Storage" + enable5656(&instructionSet) // EIP-5656 (MCOPY opcode) + enable6780(&instructionSet) // EIP-6780 SELFDESTRUCT only in same transaction + return validate(instructionSet) +} + func newShanghaiInstructionSet() JumpTable { instructionSet := newMergeInstructionSet() enable3855(&instructionSet) // PUSH0 instruction enable3860(&instructionSet) // Limit and meter initcode + return validate(instructionSet) } diff --git a/core/vm/jump_table_export.go b/core/vm/jump_table_export.go index 0d61b00ed..6ea47d63a 100644 --- a/core/vm/jump_table_export.go +++ b/core/vm/jump_table_export.go @@ -26,10 +26,12 @@ import ( // the rules. func LookupInstructionSet(rules params.Rules) (JumpTable, error) { switch { + case rules.IsVerkle: + return newCancunInstructionSet(), errors.New("verkle-fork not defined yet") case rules.IsPrague: - return newShanghaiInstructionSet(), errors.New("prague-fork not defined yet") + return newCancunInstructionSet(), errors.New("prague-fork not defined yet") case rules.IsCancun: - return newShanghaiInstructionSet(), errors.New("cancun-fork not defined yet") + return newCancunInstructionSet(), nil case rules.IsShanghai: return newShanghaiInstructionSet(), nil case rules.IsMerge: diff --git a/core/vm/memory.go b/core/vm/memory.go index 35b729996..e0202fd7c 100644 --- a/core/vm/memory.go +++ b/core/vm/memory.go @@ -103,3 +103,14 @@ func (m *Memory) Len() int { func (m *Memory) Data() []byte { return m.store } + +// Copy copies data from the src position slice into the dst position. +// The source and destination may overlap. +// OBS: This operation assumes that any necessary memory expansion has already been performed, +// and this method may panic otherwise. +func (m *Memory) Copy(dst, src, len uint64) { + if len == 0 { + return + } + copy(m.store[dst:], m.store[src:src+len]) +} diff --git a/core/vm/memory_table.go b/core/vm/memory_table.go index e35ca84e0..61a910a03 100644 --- a/core/vm/memory_table.go +++ b/core/vm/memory_table.go @@ -48,6 +48,14 @@ func memoryMStore(stack *Stack) (uint64, bool) { return calcMemSize64WithUint(stack.Back(0), 32) } +func memoryMcopy(stack *Stack) (uint64, bool) { + mStart := stack.Back(0) // stack[0]: dest + if stack.Back(1).Gt(mStart) { + mStart = stack.Back(1) // stack[1]: source + } + return calcMemSize64(mStart, stack.Back(2)) // stack[2]: length +} + func memoryCreate(stack *Stack) (uint64, bool) { return calcMemSize64(stack.Back(1), stack.Back(2)) } diff --git a/core/vm/memory_test.go b/core/vm/memory_test.go new file mode 100644 index 000000000..ba36f8023 --- /dev/null +++ b/core/vm/memory_test.go @@ -0,0 +1,69 @@ +package vm + +import ( + "bytes" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" +) + +func TestMemoryCopy(t *testing.T) { + // Test cases from https://eips.ethereum.org/EIPS/eip-5656#test-cases + for i, tc := range []struct { + dst, src, len uint64 + pre string + want string + }{ + { // MCOPY 0 32 32 - copy 32 bytes from offset 32 to offset 0. + 0, 32, 32, + "0000000000000000000000000000000000000000000000000000000000000000 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", + "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", + }, + + { // MCOPY 0 0 32 - copy 32 bytes from offset 0 to offset 0. + 0, 0, 32, + "0101010101010101010101010101010101010101010101010101010101010101", + "0101010101010101010101010101010101010101010101010101010101010101", + }, + { // MCOPY 0 1 8 - copy 8 bytes from offset 1 to offset 0 (overlapping). + 0, 1, 8, + "000102030405060708 000000000000000000000000000000000000000000000000", + "010203040506070808 000000000000000000000000000000000000000000000000", + }, + { // MCOPY 1 0 8 - copy 8 bytes from offset 0 to offset 1 (overlapping). + 1, 0, 8, + "000102030405060708 000000000000000000000000000000000000000000000000", + "000001020304050607 000000000000000000000000000000000000000000000000", + }, + // Tests below are not in the EIP, but maybe should be added + { // MCOPY 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds index(overlapping). + 0xFFFFFFFFFFFF, 0xFFFFFFFFFFFF, 0, + "11", + "11", + }, + { // MCOPY 0xFFFFFFFFFFFF 0 0 - copy zero bytes from start of mem to out-of-bounds. + 0xFFFFFFFFFFFF, 0, 0, + "11", + "11", + }, + { // MCOPY 0 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds to start of mem + 0, 0xFFFFFFFFFFFF, 0, + "11", + "11", + }, + } { + m := NewMemory() + // Clean spaces + data := common.FromHex(strings.ReplaceAll(tc.pre, " ", "")) + // Set pre + m.Resize(uint64(len(data))) + m.Set(0, uint64(len(data)), data) + // Do the copy + m.Copy(tc.dst, tc.src, tc.len) + want := common.FromHex(strings.ReplaceAll(tc.want, " ", "")) + if have := m.store; !bytes.Equal(want, have) { + t.Errorf("case %d: want: %#x\nhave: %#x\n", i, want, have) + } + } +} diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index 910491c60..2929b8ce9 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -100,6 +100,7 @@ const ( CHAINID OpCode = 0x46 SELFBALANCE OpCode = 0x47 BASEFEE OpCode = 0x48 + BLOBHASH OpCode = 0x49 ) // 0x50 range - 'storage' and execution. @@ -116,6 +117,9 @@ const ( MSIZE OpCode = 0x59 GAS OpCode = 0x5a JUMPDEST OpCode = 0x5b + TLOAD OpCode = 0x5c + TSTORE OpCode = 0x5d + MCOPY OpCode = 0x5e PUSH0 OpCode = 0x5f ) @@ -204,12 +208,6 @@ const ( LOG4 ) -// 0xb0 range. -const ( - TLOAD OpCode = 0xb3 - TSTORE OpCode = 0xb4 -) - // 0xf0 range - closures. const ( CREATE OpCode = 0xf0 @@ -288,6 +286,7 @@ var opCodeToString = map[OpCode]string{ CHAINID: "CHAINID", SELFBALANCE: "SELFBALANCE", BASEFEE: "BASEFEE", + BLOBHASH: "BLOBHASH", // 0x50 range - 'storage' and execution. POP: "POP", @@ -302,6 +301,9 @@ var opCodeToString = map[OpCode]string{ MSIZE: "MSIZE", GAS: "GAS", JUMPDEST: "JUMPDEST", + TLOAD: "TLOAD", + TSTORE: "TSTORE", + MCOPY: "MCOPY", PUSH0: "PUSH0", // 0x60 range - pushes. @@ -381,10 +383,6 @@ var opCodeToString = map[OpCode]string{ LOG3: "LOG3", LOG4: "LOG4", - // 0xb0 range. - TLOAD: "TLOAD", - TSTORE: "TSTORE", - // 0xf0 range - closures. CREATE: "CREATE", CALL: "CALL", @@ -445,6 +443,7 @@ var stringToOp = map[string]OpCode{ "CALLDATACOPY": CALLDATACOPY, "CHAINID": CHAINID, "BASEFEE": BASEFEE, + "BLOBHASH": BLOBHASH, "DELEGATECALL": DELEGATECALL, "STATICCALL": STATICCALL, "CODESIZE": CODESIZE, @@ -474,6 +473,9 @@ var stringToOp = map[string]OpCode{ "MSIZE": MSIZE, "GAS": GAS, "JUMPDEST": JUMPDEST, + "TLOAD": TLOAD, + "TSTORE": TSTORE, + "MCOPY": MCOPY, "PUSH0": PUSH0, "PUSH1": PUSH1, "PUSH2": PUSH2, @@ -544,8 +546,6 @@ var stringToOp = map[string]OpCode{ "LOG2": LOG2, "LOG3": LOG3, "LOG4": LOG4, - "TLOAD": TLOAD, - "TSTORE": TSTORE, "CREATE": CREATE, "CREATE2": CREATE2, "CALL": CALL, diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 551e1f5f1..04c6409eb 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -235,7 +235,7 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc { if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 { gas += params.CreateBySelfdestructGas } - if refundsEnabled && !evm.StateDB.HasSuicided(contract.Address()) { + if refundsEnabled && !evm.StateDB.HasSelfDestructed(contract.Address()) { evm.StateDB.AddRefund(params.SelfdestructRefundGas) } return gas, nil diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go index dcb097428..7e330e073 100644 --- a/core/vm/runtime/env.go +++ b/core/vm/runtime/env.go @@ -23,8 +23,9 @@ import ( func NewEnv(cfg *Config) *vm.EVM { txContext := vm.TxContext{ - Origin: cfg.Origin, - GasPrice: cfg.GasPrice, + Origin: cfg.Origin, + GasPrice: cfg.GasPrice, + BlobHashes: cfg.BlobHashes, } blockContext := vm.BlockContext{ CanTransfer: core.CanTransfer, @@ -36,6 +37,7 @@ func NewEnv(cfg *Config) *vm.EVM { Difficulty: cfg.Difficulty, GasLimit: cfg.GasLimit, BaseFee: cfg.BaseFee, + Random: cfg.Random, } return vm.NewEVM(blockContext, txContext, cfg.State, cfg.ChainConfig, cfg.EVMConfig) diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 951a472e4..480e5cec6 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -44,6 +44,8 @@ type Config struct { Debug bool EVMConfig vm.Config BaseFee *big.Int + BlobHashes []common.Hash + Random *common.Hash State *state.StateDB GetHashFn func(n uint64) common.Hash diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 7b521a2ea..796d3b443 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -679,30 +679,30 @@ func TestColdAccountAccessCost(t *testing.T) { func TestRuntimeJSTracer(t *testing.T) { jsTracers := []string{ `{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0, - step: function() { this.steps++}, - fault: function() {}, - result: function() { - return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") - }, - enter: function(frame) { - this.enters++; + step: function() { this.steps++}, + fault: function() {}, + result: function() { + return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") + }, + enter: function(frame) { + this.enters++; this.enterGas = frame.getGas(); - }, - exit: function(res) { - this.exits++; + }, + exit: function(res) { + this.exits++; this.gasUsed = res.getGasUsed(); }}`, `{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0, - fault: function() {}, - result: function() { - return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") - }, - enter: function(frame) { - this.enters++; + fault: function() {}, + result: function() { + return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") + }, + enter: function(frame) { + this.enters++; this.enterGas = frame.getGas(); - }, - exit: function(res) { - this.exits++; + }, + exit: function(res) { + this.exits++; this.gasUsed = res.getGasUsed(); }}`} tests := []struct { diff --git a/core/vm/testdata/precompiles/pointEvaluation.json b/core/vm/testdata/precompiles/pointEvaluation.json new file mode 100644 index 000000000..93fc66d83 --- /dev/null +++ b/core/vm/testdata/precompiles/pointEvaluation.json @@ -0,0 +1,9 @@ +[ + { + "Input": "01d18459b334ffe8e2226eef1db874fda6db2bdd9357268b39220af2d59464fb564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d3630624d25032e67a7e6a4910df5834b8fe70e6bcfeeac0352434196bdf4b2485d5a1978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806942307f266e636553e94006d11423f2688945ff3bdf515859eba1005c1a7708d620a94d91a1c0c285f9584e75ec2f82a", + "Expected": "000000000000000000000000000000000000000000000000000000000000100073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", + "Name": "pointEvaluation1", + "Gas": 50000, + "NoBenchmark": false + } +] diff --git a/crypto/bls12381/g2.go b/crypto/bls12381/g2.go index 4d6f1ff11..e5fe75af2 100644 --- a/crypto/bls12381/g2.go +++ b/crypto/bls12381/g2.go @@ -121,7 +121,7 @@ func (g *G2) FromBytes(in []byte) (*PointG2, error) { return p, nil } -// DecodePoint given encoded (x, y) coordinates in 256 bytes returns a valid G1 Point. +// DecodePoint given encoded (x, y) coordinates in 256 bytes returns a valid G2 Point. func (g *G2) DecodePoint(in []byte) (*PointG2, error) { if len(in) != 256 { return nil, errors.New("invalid g2 point length") diff --git a/crypto/bn256/cloudflare/optate.go b/crypto/bn256/cloudflare/optate.go index b71e50e3a..e8caa7a08 100644 --- a/crypto/bn256/cloudflare/optate.go +++ b/crypto/bn256/cloudflare/optate.go @@ -199,9 +199,8 @@ func miller(q *twistPoint, p *curvePoint) *gfP12 { r = newR r2.Square(&minusQ2.y) - a, b, c, newR = lineFunctionAdd(r, minusQ2, bAffine, r2) + a, b, c, _ = lineFunctionAdd(r, minusQ2, bAffine, r2) mulLine(ret, a, b, c) - r = newR return ret } diff --git a/crypto/kzg4844/kzg4844.go b/crypto/kzg4844/kzg4844.go index 3bd814159..5969d1c2c 100644 --- a/crypto/kzg4844/kzg4844.go +++ b/crypto/kzg4844/kzg4844.go @@ -54,7 +54,7 @@ func UseCKZG(use bool) error { useCKZG.Store(use) // Initializing the library can take 2-4 seconds - and can potentially crash - // on CKZG and non-ADX CPUs - so might as well so it now and don't wait until + // on CKZG and non-ADX CPUs - so might as well do it now and don't wait until // a crypto operation is actually needed live. if use { ckzgIniter.Do(ckzgInit) diff --git a/crypto/kzg4844/kzg4844_ckzg_cgo.go b/crypto/kzg4844/kzg4844_ckzg_cgo.go index d62ca3fad..540028569 100644 --- a/crypto/kzg4844/kzg4844_ckzg_cgo.go +++ b/crypto/kzg4844/kzg4844_ckzg_cgo.go @@ -74,6 +74,8 @@ func ckzgBlobToCommitment(blob Blob) (Commitment, error) { // ckzgComputeProof computes the KZG proof at the given point for the polynomial // represented by the blob. func ckzgComputeProof(blob Blob, point Point) (Proof, Claim, error) { + ckzgIniter.Do(ckzgInit) + proof, claim, err := ckzg4844.ComputeKZGProof((ckzg4844.Blob)(blob), (ckzg4844.Bytes32)(point)) if err != nil { return Proof{}, Claim{}, err @@ -84,6 +86,8 @@ func ckzgComputeProof(blob Blob, point Point) (Proof, Claim, error) { // ckzgVerifyProof verifies the KZG proof that the polynomial represented by the blob // evaluated at the given point is the claimed value. func ckzgVerifyProof(commitment Commitment, point Point, claim Claim, proof Proof) error { + ckzgIniter.Do(ckzgInit) + valid, err := ckzg4844.VerifyKZGProof((ckzg4844.Bytes48)(commitment), (ckzg4844.Bytes32)(point), (ckzg4844.Bytes32)(claim), (ckzg4844.Bytes48)(proof)) if err != nil { return err @@ -99,6 +103,8 @@ func ckzgVerifyProof(commitment Commitment, point Point, claim Claim, proof Proo // // This method does not verify that the commitment is correct with respect to blob. func ckzgComputeBlobProof(blob Blob, commitment Commitment) (Proof, error) { + ckzgIniter.Do(ckzgInit) + proof, err := ckzg4844.ComputeBlobKZGProof((ckzg4844.Blob)(blob), (ckzg4844.Bytes48)(commitment)) if err != nil { return Proof{}, err @@ -108,6 +114,8 @@ func ckzgComputeBlobProof(blob Blob, commitment Commitment) (Proof, error) { // ckzgVerifyBlobProof verifies that the blob data corresponds to the provided commitment. func ckzgVerifyBlobProof(blob Blob, commitment Commitment, proof Proof) error { + ckzgIniter.Do(ckzgInit) + valid, err := ckzg4844.VerifyBlobKZGProof((ckzg4844.Blob)(blob), (ckzg4844.Bytes48)(commitment), (ckzg4844.Bytes48)(proof)) if err != nil { return err diff --git a/crypto/kzg4844/kzg4844_test.go b/crypto/kzg4844/kzg4844_test.go index 0d35667a9..fae8a7a76 100644 --- a/crypto/kzg4844/kzg4844_test.go +++ b/crypto/kzg4844/kzg4844_test.go @@ -47,7 +47,6 @@ func randBlob() Blob { func TestCKZGWithPoint(t *testing.T) { testKZGWithPoint(t, true) } func TestGoKZGWithPoint(t *testing.T) { testKZGWithPoint(t, false) } - func testKZGWithPoint(t *testing.T, ckzg bool) { if ckzg && !ckzgAvailable { t.Skip("CKZG unavailable in this test build") @@ -73,7 +72,6 @@ func testKZGWithPoint(t *testing.T, ckzg bool) { func TestCKZGWithBlob(t *testing.T) { testKZGWithBlob(t, true) } func TestGoKZGWithBlob(t *testing.T) { testKZGWithBlob(t, false) } - func testKZGWithBlob(t *testing.T, ckzg bool) { if ckzg && !ckzgAvailable { t.Skip("CKZG unavailable in this test build") @@ -106,6 +104,8 @@ func benchmarkBlobToCommitment(b *testing.B, ckzg bool) { useCKZG.Store(ckzg) blob := randBlob() + + b.ResetTimer() for i := 0; i < b.N; i++ { BlobToCommitment(blob) } @@ -124,6 +124,8 @@ func benchmarkComputeProof(b *testing.B, ckzg bool) { blob = randBlob() point = randFieldElement() ) + + b.ResetTimer() for i := 0; i < b.N; i++ { ComputeProof(blob, point) } @@ -144,6 +146,8 @@ func benchmarkVerifyProof(b *testing.B, ckzg bool) { commitment, _ = BlobToCommitment(blob) proof, claim, _ = ComputeProof(blob, point) ) + + b.ResetTimer() for i := 0; i < b.N; i++ { VerifyProof(commitment, point, claim, proof) } @@ -162,6 +166,8 @@ func benchmarkComputeBlobProof(b *testing.B, ckzg bool) { blob = randBlob() commitment, _ = BlobToCommitment(blob) ) + + b.ResetTimer() for i := 0; i < b.N; i++ { ComputeBlobProof(blob, commitment) } @@ -181,6 +187,8 @@ func benchmarkVerifyBlobProof(b *testing.B, ckzg bool) { commitment, _ = BlobToCommitment(blob) proof, _ = ComputeBlobProof(blob, commitment) ) + + b.ResetTimer() for i := 0; i < b.N; i++ { VerifyBlobProof(blob, commitment, proof) } diff --git a/crypto/secp256k1/secp256.go b/crypto/secp256k1/secp256.go index c9c01b320..61abc1eaf 100644 --- a/crypto/secp256k1/secp256.go +++ b/crypto/secp256k1/secp256.go @@ -21,11 +21,14 @@ package secp256k1 # define USE_SCALAR_8X32 #endif +#ifndef NDEBUG +# define NDEBUG +#endif + #define USE_ENDOMORPHISM #define USE_NUM_NONE #define USE_FIELD_INV_BUILTIN #define USE_SCALAR_INV_BUILTIN -#define NDEBUG #include "./libsecp256k1/src/secp256k1.c" #include "./libsecp256k1/src/modules/recovery/main_impl.h" #include "ext.h" diff --git a/eth/api.go b/eth/api.go index 00ad48fbf..44e934fd0 100644 --- a/eth/api.go +++ b/eth/api.go @@ -17,27 +17,8 @@ package eth import ( - "compress/gzip" - "context" - "errors" - "fmt" - "io" - "math/big" - "os" - "strings" - "time" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/internal/ethapi" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/trie" ) // EthereumAPI provides an API to access Ethereum full node-related information. @@ -50,12 +31,12 @@ func NewEthereumAPI(e *Ethereum) *EthereumAPI { return &EthereumAPI{e} } -// Etherbase is the address that mining rewards will be send to. +// Etherbase is the address that mining rewards will be sent to. func (api *EthereumAPI) Etherbase() (common.Address, error) { return api.e.Etherbase() } -// Coinbase is the address that mining rewards will be send to (alias for Etherbase). +// Coinbase is the address that mining rewards will be sent to (alias for Etherbase). func (api *EthereumAPI) Coinbase() (common.Address, error) { return api.Etherbase() } @@ -69,542 +50,3 @@ func (api *EthereumAPI) Hashrate() hexutil.Uint64 { func (api *EthereumAPI) Mining() bool { return api.e.IsMining() } - -// MinerAPI provides an API to control the miner. -type MinerAPI struct { - e *Ethereum -} - -// NewMinerAPI create a new MinerAPI instance. -func NewMinerAPI(e *Ethereum) *MinerAPI { - return &MinerAPI{e} -} - -// Start starts the miner with the given number of threads. If threads is nil, -// the number of workers started is equal to the number of logical CPUs that are -// usable by this process. If mining is already running, this method adjust the -// number of threads allowed to use and updates the minimum price required by the -// transaction pool. -func (api *MinerAPI) Start() error { - return api.e.StartMining() -} - -// Stop terminates the miner, both at the consensus engine level as well as at -// the block creation level. -func (api *MinerAPI) Stop() { - api.e.StopMining() -} - -// SetExtra sets the extra data string that is included when this miner mines a block. -func (api *MinerAPI) SetExtra(extra string) (bool, error) { - if err := api.e.Miner().SetExtra([]byte(extra)); err != nil { - return false, err - } - return true, nil -} - -// SetGasPrice sets the minimum accepted gas price for the miner. -func (api *MinerAPI) SetGasPrice(gasPrice hexutil.Big) bool { - api.e.lock.Lock() - api.e.gasPrice = (*big.Int)(&gasPrice) - api.e.lock.Unlock() - - api.e.txPool.SetGasPrice((*big.Int)(&gasPrice)) - return true -} - -// SetGasLimit sets the gaslimit to target towards during mining. -func (api *MinerAPI) SetGasLimit(gasLimit hexutil.Uint64) bool { - api.e.Miner().SetGasCeil(uint64(gasLimit)) - return true -} - -// SetEtherbase sets the etherbase of the miner. -func (api *MinerAPI) SetEtherbase(etherbase common.Address) bool { - api.e.SetEtherbase(etherbase) - return true -} - -// SetRecommitInterval updates the interval for miner sealing work recommitting. -func (api *MinerAPI) SetRecommitInterval(interval int) { - api.e.Miner().SetRecommitInterval(time.Duration(interval) * time.Millisecond) -} - -// AdminAPI is the collection of Ethereum full node related APIs for node -// administration. -type AdminAPI struct { - eth *Ethereum -} - -// NewAdminAPI creates a new instance of AdminAPI. -func NewAdminAPI(eth *Ethereum) *AdminAPI { - return &AdminAPI{eth: eth} -} - -// ExportChain exports the current blockchain into a local file, -// or a range of blocks if first and last are non-nil. -func (api *AdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool, error) { - if first == nil && last != nil { - return false, errors.New("last cannot be specified without first") - } - if first != nil && last == nil { - head := api.eth.BlockChain().CurrentHeader().Number.Uint64() - last = &head - } - if _, err := os.Stat(file); err == nil { - // File already exists. Allowing overwrite could be a DoS vector, - // since the 'file' may point to arbitrary paths on the drive. - return false, errors.New("location would overwrite an existing file") - } - // Make sure we can create the file to export into - out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - if err != nil { - return false, err - } - defer out.Close() - - var writer io.Writer = out - if strings.HasSuffix(file, ".gz") { - writer = gzip.NewWriter(writer) - defer writer.(*gzip.Writer).Close() - } - - // Export the blockchain - if first != nil { - if err := api.eth.BlockChain().ExportN(writer, *first, *last); err != nil { - return false, err - } - } else if err := api.eth.BlockChain().Export(writer); err != nil { - return false, err - } - return true, nil -} - -func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool { - for _, b := range bs { - if !chain.HasBlock(b.Hash(), b.NumberU64()) { - return false - } - } - - return true -} - -// ImportChain imports a blockchain from a local file. -func (api *AdminAPI) ImportChain(file string) (bool, error) { - // Make sure the can access the file to import - in, err := os.Open(file) - if err != nil { - return false, err - } - defer in.Close() - - var reader io.Reader = in - if strings.HasSuffix(file, ".gz") { - if reader, err = gzip.NewReader(reader); err != nil { - return false, err - } - } - - // Run actual the import in pre-configured batches - stream := rlp.NewStream(reader, 0) - - blocks, index := make([]*types.Block, 0, 2500), 0 - for batch := 0; ; batch++ { - // Load a batch of blocks from the input file - for len(blocks) < cap(blocks) { - block := new(types.Block) - if err := stream.Decode(block); err == io.EOF { - break - } else if err != nil { - return false, fmt.Errorf("block %d: failed to parse: %v", index, err) - } - blocks = append(blocks, block) - index++ - } - if len(blocks) == 0 { - break - } - - if hasAllBlocks(api.eth.BlockChain(), blocks) { - blocks = blocks[:0] - continue - } - // Import the batch and reset the buffer - if _, err := api.eth.BlockChain().InsertChain(blocks); err != nil { - return false, fmt.Errorf("batch %d: failed to insert: %v", batch, err) - } - blocks = blocks[:0] - } - return true, nil -} - -// DebugAPI is the collection of Ethereum full node APIs for debugging the -// protocol. -type DebugAPI struct { - eth *Ethereum -} - -// NewDebugAPI creates a new DebugAPI instance. -func NewDebugAPI(eth *Ethereum) *DebugAPI { - return &DebugAPI{eth: eth} -} - -// DumpBlock retrieves the entire state of the database at a given block. -func (api *DebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) { - opts := &state.DumpConfig{ - OnlyWithAddresses: true, - Max: AccountRangeMaxResults, // Sanity limit over RPC - } - if blockNr == rpc.PendingBlockNumber { - // If we're dumping the pending state, we need to request - // both the pending block as well as the pending state from - // the miner and operate on those - _, stateDb := api.eth.miner.Pending() - return stateDb.RawDump(opts), nil - } - var header *types.Header - if blockNr == rpc.LatestBlockNumber { - header = api.eth.blockchain.CurrentBlock() - } else if blockNr == rpc.FinalizedBlockNumber { - header = api.eth.blockchain.CurrentFinalBlock() - } else if blockNr == rpc.SafeBlockNumber { - header = api.eth.blockchain.CurrentSafeBlock() - } else { - block := api.eth.blockchain.GetBlockByNumber(uint64(blockNr)) - if block == nil { - return state.Dump{}, fmt.Errorf("block #%d not found", blockNr) - } - header = block.Header() - } - if header == nil { - return state.Dump{}, fmt.Errorf("block #%d not found", blockNr) - } - stateDb, err := api.eth.BlockChain().StateAt(header.Root) - if err != nil { - return state.Dump{}, err - } - return stateDb.RawDump(opts), nil -} - -// Preimage is a debug API function that returns the preimage for a sha3 hash, if known. -func (api *DebugAPI) Preimage(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) { - if preimage := rawdb.ReadPreimage(api.eth.ChainDb(), hash); preimage != nil { - return preimage, nil - } - return nil, errors.New("unknown preimage") -} - -// BadBlockArgs represents the entries in the list returned when bad blocks are queried. -type BadBlockArgs struct { - Hash common.Hash `json:"hash"` - Block map[string]interface{} `json:"block"` - RLP string `json:"rlp"` -} - -// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network -// and returns them as a JSON list of block hashes. -func (api *DebugAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, error) { - var ( - err error - blocks = rawdb.ReadAllBadBlocks(api.eth.chainDb) - results = make([]*BadBlockArgs, 0, len(blocks)) - ) - for _, block := range blocks { - var ( - blockRlp string - blockJSON map[string]interface{} - ) - if rlpBytes, err := rlp.EncodeToBytes(block); err != nil { - blockRlp = err.Error() // Hacky, but hey, it works - } else { - blockRlp = fmt.Sprintf("%#x", rlpBytes) - } - if blockJSON, err = ethapi.RPCMarshalBlock(block, true, true, api.eth.APIBackend.ChainConfig()); err != nil { - blockJSON = map[string]interface{}{"error": err.Error()} - } - results = append(results, &BadBlockArgs{ - Hash: block.Hash(), - RLP: blockRlp, - Block: blockJSON, - }) - } - return results, nil -} - -// AccountRangeMaxResults is the maximum number of results to be returned per call -const AccountRangeMaxResults = 256 - -// AccountRange enumerates all accounts in the given block and start point in paging request -func (api *DebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start hexutil.Bytes, maxResults int, nocode, nostorage, incompletes bool) (state.IteratorDump, error) { - var stateDb *state.StateDB - var err error - - if number, ok := blockNrOrHash.Number(); ok { - if number == rpc.PendingBlockNumber { - // If we're dumping the pending state, we need to request - // both the pending block as well as the pending state from - // the miner and operate on those - _, stateDb = api.eth.miner.Pending() - } else { - var header *types.Header - if number == rpc.LatestBlockNumber { - header = api.eth.blockchain.CurrentBlock() - } else if number == rpc.FinalizedBlockNumber { - header = api.eth.blockchain.CurrentFinalBlock() - } else if number == rpc.SafeBlockNumber { - header = api.eth.blockchain.CurrentSafeBlock() - } else { - block := api.eth.blockchain.GetBlockByNumber(uint64(number)) - if block == nil { - return state.IteratorDump{}, fmt.Errorf("block #%d not found", number) - } - header = block.Header() - } - if header == nil { - return state.IteratorDump{}, fmt.Errorf("block #%d not found", number) - } - stateDb, err = api.eth.BlockChain().StateAt(header.Root) - if err != nil { - return state.IteratorDump{}, err - } - } - } else if hash, ok := blockNrOrHash.Hash(); ok { - block := api.eth.blockchain.GetBlockByHash(hash) - if block == nil { - return state.IteratorDump{}, fmt.Errorf("block %s not found", hash.Hex()) - } - stateDb, err = api.eth.BlockChain().StateAt(block.Root()) - if err != nil { - return state.IteratorDump{}, err - } - } else { - return state.IteratorDump{}, errors.New("either block number or block hash must be specified") - } - - opts := &state.DumpConfig{ - SkipCode: nocode, - SkipStorage: nostorage, - OnlyWithAddresses: !incompletes, - Start: start, - Max: uint64(maxResults), - } - if maxResults > AccountRangeMaxResults || maxResults <= 0 { - opts.Max = AccountRangeMaxResults - } - return stateDb.IteratorDump(opts), nil -} - -// StorageRangeResult is the result of a debug_storageRangeAt API call. -type StorageRangeResult struct { - Storage storageMap `json:"storage"` - NextKey *common.Hash `json:"nextKey"` // nil if Storage includes the last key in the trie. -} - -type storageMap map[common.Hash]storageEntry - -type storageEntry struct { - Key *common.Hash `json:"key"` - Value common.Hash `json:"value"` -} - -// StorageRangeAt returns the storage at the given block height and transaction index. -func (api *DebugAPI) StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex int, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) { - // Retrieve the block - block := api.eth.blockchain.GetBlockByHash(blockHash) - if block == nil { - return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash) - } - _, _, statedb, release, err := api.eth.stateAtTransaction(ctx, block, txIndex, 0) - if err != nil { - return StorageRangeResult{}, err - } - defer release() - - st, err := statedb.StorageTrie(contractAddress) - if err != nil { - return StorageRangeResult{}, err - } - if st == nil { - return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress) - } - return storageRangeAt(st, keyStart, maxResult) -} - -func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeResult, error) { - it := trie.NewIterator(st.NodeIterator(start)) - result := StorageRangeResult{Storage: storageMap{}} - for i := 0; i < maxResult && it.Next(); i++ { - _, content, _, err := rlp.Split(it.Value) - if err != nil { - return StorageRangeResult{}, err - } - e := storageEntry{Value: common.BytesToHash(content)} - if preimage := st.GetKey(it.Key); preimage != nil { - preimage := common.BytesToHash(preimage) - e.Key = &preimage - } - result.Storage[common.BytesToHash(it.Key)] = e - } - // Add the 'next key' so clients can continue downloading. - if it.Next() { - next := common.BytesToHash(it.Key) - result.NextKey = &next - } - return result, nil -} - -// GetModifiedAccountsByNumber returns all accounts that have changed between the -// two blocks specified. A change is defined as a difference in nonce, balance, -// code hash, or storage hash. -// -// With one parameter, returns the list of accounts modified in the specified block. -func (api *DebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) { - var startBlock, endBlock *types.Block - - startBlock = api.eth.blockchain.GetBlockByNumber(startNum) - if startBlock == nil { - return nil, fmt.Errorf("start block %x not found", startNum) - } - - if endNum == nil { - endBlock = startBlock - startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash()) - if startBlock == nil { - return nil, fmt.Errorf("block %x has no parent", endBlock.Number()) - } - } else { - endBlock = api.eth.blockchain.GetBlockByNumber(*endNum) - if endBlock == nil { - return nil, fmt.Errorf("end block %d not found", *endNum) - } - } - return api.getModifiedAccounts(startBlock, endBlock) -} - -// GetModifiedAccountsByHash returns all accounts that have changed between the -// two blocks specified. A change is defined as a difference in nonce, balance, -// code hash, or storage hash. -// -// With one parameter, returns the list of accounts modified in the specified block. -func (api *DebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) { - var startBlock, endBlock *types.Block - startBlock = api.eth.blockchain.GetBlockByHash(startHash) - if startBlock == nil { - return nil, fmt.Errorf("start block %x not found", startHash) - } - - if endHash == nil { - endBlock = startBlock - startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash()) - if startBlock == nil { - return nil, fmt.Errorf("block %x has no parent", endBlock.Number()) - } - } else { - endBlock = api.eth.blockchain.GetBlockByHash(*endHash) - if endBlock == nil { - return nil, fmt.Errorf("end block %x not found", *endHash) - } - } - return api.getModifiedAccounts(startBlock, endBlock) -} - -func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) { - if startBlock.Number().Uint64() >= endBlock.Number().Uint64() { - return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64()) - } - triedb := api.eth.BlockChain().StateCache().TrieDB() - - oldTrie, err := trie.NewStateTrie(trie.StateTrieID(startBlock.Root()), triedb) - if err != nil { - return nil, err - } - newTrie, err := trie.NewStateTrie(trie.StateTrieID(endBlock.Root()), triedb) - if err != nil { - return nil, err - } - diff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{})) - iter := trie.NewIterator(diff) - - var dirty []common.Address - for iter.Next() { - key := newTrie.GetKey(iter.Key) - if key == nil { - return nil, fmt.Errorf("no preimage found for hash %x", iter.Key) - } - dirty = append(dirty, common.BytesToAddress(key)) - } - return dirty, nil -} - -// GetAccessibleState returns the first number where the node has accessible -// state on disk. Note this being the post-state of that block and the pre-state -// of the next block. -// The (from, to) parameters are the sequence of blocks to search, which can go -// either forwards or backwards -func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error) { - db := api.eth.ChainDb() - var pivot uint64 - if p := rawdb.ReadLastPivotNumber(db); p != nil { - pivot = *p - log.Info("Found fast-sync pivot marker", "number", pivot) - } - var resolveNum = func(num rpc.BlockNumber) (uint64, error) { - // We don't have state for pending (-2), so treat it as latest - if num.Int64() < 0 { - block := api.eth.blockchain.CurrentBlock() - if block == nil { - return 0, errors.New("current block missing") - } - return block.Number.Uint64(), nil - } - return uint64(num.Int64()), nil - } - var ( - start uint64 - end uint64 - delta = int64(1) - lastLog time.Time - err error - ) - if start, err = resolveNum(from); err != nil { - return 0, err - } - if end, err = resolveNum(to); err != nil { - return 0, err - } - if start == end { - return 0, errors.New("from and to needs to be different") - } - if start > end { - delta = -1 - } - for i := int64(start); i != int64(end); i += delta { - if time.Since(lastLog) > 8*time.Second { - log.Info("Finding roots", "from", start, "to", end, "at", i) - lastLog = time.Now() - } - if i < int64(pivot) { - continue - } - h := api.eth.BlockChain().GetHeaderByNumber(uint64(i)) - if h == nil { - return 0, fmt.Errorf("missing header %d", i) - } - if ok, _ := api.eth.ChainDb().Has(h.Root[:]); ok { - return uint64(i), nil - } - } - return 0, errors.New("no state found") -} - -// SetTrieFlushInterval configures how often in-memory tries are persisted -// to disk. The value is in terms of block processing time, not wall clock. -func (api *DebugAPI) SetTrieFlushInterval(interval string) error { - t, err := time.ParseDuration(interval) - if err != nil { - return err - } - api.eth.blockchain.SetTrieFlushInterval(t) - return nil -} diff --git a/eth/api_admin.go b/eth/api_admin.go new file mode 100644 index 000000000..4a3ccb84e --- /dev/null +++ b/eth/api_admin.go @@ -0,0 +1,143 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" +) + +// AdminAPI is the collection of Ethereum full node related APIs for node +// administration. +type AdminAPI struct { + eth *Ethereum +} + +// NewAdminAPI creates a new instance of AdminAPI. +func NewAdminAPI(eth *Ethereum) *AdminAPI { + return &AdminAPI{eth: eth} +} + +// ExportChain exports the current blockchain into a local file, +// or a range of blocks if first and last are non-nil. +func (api *AdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool, error) { + if first == nil && last != nil { + return false, errors.New("last cannot be specified without first") + } + if first != nil && last == nil { + head := api.eth.BlockChain().CurrentHeader().Number.Uint64() + last = &head + } + if _, err := os.Stat(file); err == nil { + // File already exists. Allowing overwrite could be a DoS vector, + // since the 'file' may point to arbitrary paths on the drive. + return false, errors.New("location would overwrite an existing file") + } + // Make sure we can create the file to export into + out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return false, err + } + defer out.Close() + + var writer io.Writer = out + if strings.HasSuffix(file, ".gz") { + writer = gzip.NewWriter(writer) + defer writer.(*gzip.Writer).Close() + } + + // Export the blockchain + if first != nil { + if err := api.eth.BlockChain().ExportN(writer, *first, *last); err != nil { + return false, err + } + } else if err := api.eth.BlockChain().Export(writer); err != nil { + return false, err + } + return true, nil +} + +func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool { + for _, b := range bs { + if !chain.HasBlock(b.Hash(), b.NumberU64()) { + return false + } + } + + return true +} + +// ImportChain imports a blockchain from a local file. +func (api *AdminAPI) ImportChain(file string) (bool, error) { + // Make sure the can access the file to import + in, err := os.Open(file) + if err != nil { + return false, err + } + defer in.Close() + + var reader io.Reader = in + if strings.HasSuffix(file, ".gz") { + if reader, err = gzip.NewReader(reader); err != nil { + return false, err + } + } + + // Run actual the import in pre-configured batches + stream := rlp.NewStream(reader, 0) + + blocks, index := make([]*types.Block, 0, 2500), 0 + for batch := 0; ; batch++ { + // Load a batch of blocks from the input file + for len(blocks) < cap(blocks) { + block := new(types.Block) + if err := stream.Decode(block); err == io.EOF { + break + } else if err != nil { + return false, fmt.Errorf("block %d: failed to parse: %v", index, err) + } + // ignore the genesis block when importing blocks + if block.NumberU64() == 0 { + continue + } + blocks = append(blocks, block) + index++ + } + if len(blocks) == 0 { + break + } + + if hasAllBlocks(api.eth.BlockChain(), blocks) { + blocks = blocks[:0] + continue + } + // Import the batch and reset the buffer + if _, err := api.eth.BlockChain().InsertChain(blocks); err != nil { + return false, fmt.Errorf("batch %d: failed to insert: %v", batch, err) + } + blocks = blocks[:0] + } + return true, nil +} diff --git a/eth/api_backend.go b/eth/api_backend.go index 1789f106e..dea745382 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -42,7 +42,7 @@ import ( "github.com/ethereum/go-ethereum/rpc" ) -// EthAPIBackend implements ethapi.Backend for full nodes +// EthAPIBackend implements ethapi.Backend and tracers.Backend for full nodes type EthAPIBackend struct { extRPCEnabled bool allowUnprotectedTxs bool @@ -68,6 +68,9 @@ func (b *EthAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumb // Pending block is only known by the miner if number == rpc.PendingBlockNumber { block := b.eth.miner.PendingBlock() + if block == nil { + return nil, errors.New("pending block is not available") + } return block.Header(), nil } // Otherwise resolve and return the block @@ -75,24 +78,18 @@ func (b *EthAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumb return b.eth.blockchain.CurrentBlock(), nil } if number == rpc.FinalizedBlockNumber { - if !b.eth.Merger().TDDReached() { - return nil, errors.New("'finalized' tag not supported on pre-merge network") - } block := b.eth.blockchain.CurrentFinalBlock() - if block != nil { - return block, nil + if block == nil { + return nil, errors.New("finalized block not found") } - return nil, errors.New("finalized block not found") + return block, nil } if number == rpc.SafeBlockNumber { - if !b.eth.Merger().TDDReached() { - return nil, errors.New("'safe' tag not supported on pre-merge network") - } block := b.eth.blockchain.CurrentSafeBlock() - if block != nil { - return block, nil + if block == nil { + return nil, errors.New("safe block not found") } - return nil, errors.New("safe block not found") + return block, nil } return b.eth.blockchain.GetHeaderByNumber(uint64(number)), nil } @@ -122,6 +119,9 @@ func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumbe // Pending block is only known by the miner if number == rpc.PendingBlockNumber { block := b.eth.miner.PendingBlock() + if block == nil { + return nil, errors.New("pending block is not available") + } return block, nil } // Otherwise resolve and return the block @@ -130,9 +130,6 @@ func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumbe return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil } if number == rpc.FinalizedBlockNumber { - if !b.eth.Merger().TDDReached() { - return nil, errors.New("'finalized' tag not supported on pre-merge network") - } header := b.eth.blockchain.CurrentFinalBlock() if header == nil { return nil, errors.New("finalized block not found") @@ -140,9 +137,6 @@ func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumbe return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil } if number == rpc.SafeBlockNumber { - if !b.eth.Merger().TDDReached() { - return nil, errors.New("'safe' tag not supported on pre-merge network") - } header := b.eth.blockchain.CurrentSafeBlock() if header == nil { return nil, errors.New("safe block not found") @@ -196,6 +190,9 @@ func (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.B // Pending state is only known by the miner if number == rpc.PendingBlockNumber { block, state := b.eth.miner.Pending() + if block == nil || state == nil { + return nil, nil, errors.New("pending state is not available") + } return state, block.Header(), nil } // Otherwise resolve the block number and return its state @@ -236,7 +233,7 @@ func (b *EthAPIBackend) GetReceipts(ctx context.Context, hash common.Hash) (type } func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { - return rawdb.ReadLogs(b.eth.chainDb, hash, number, b.ChainConfig()), nil + return rawdb.ReadLogs(b.eth.chainDb, hash, number), nil } func (b *EthAPIBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { @@ -285,14 +282,18 @@ func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri } func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { - return b.eth.txPool.AddLocal(signedTx) + return b.eth.txPool.Add([]*types.Transaction{signedTx}, true, false)[0] } func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) { pending := b.eth.txPool.Pending(false) var txs types.Transactions for _, batch := range pending { - txs = append(txs, batch...) + for _, lazy := range batch { + if tx := lazy.Resolve(); tx != nil { + txs = append(txs, tx) + } + } } return txs, nil } @@ -310,24 +311,24 @@ func (b *EthAPIBackend) GetPoolNonce(ctx context.Context, addr common.Address) ( return b.eth.txPool.Nonce(addr), nil } -func (b *EthAPIBackend) Stats() (pending int, queued int) { +func (b *EthAPIBackend) Stats() (runnable int, blocked int) { return b.eth.txPool.Stats() } -func (b *EthAPIBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { - return b.eth.TxPool().Content() +func (b *EthAPIBackend) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { + return b.eth.txPool.Content() } -func (b *EthAPIBackend) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) { - return b.eth.TxPool().ContentFrom(addr) +func (b *EthAPIBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { + return b.eth.txPool.ContentFrom(addr) } func (b *EthAPIBackend) TxPool() *txpool.TxPool { - return b.eth.TxPool() + return b.eth.txPool } func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { - return b.eth.TxPool().SubscribeNewTxsEvent(ch) + return b.eth.txPool.SubscribeNewTxsEvent(ch) } func (b *EthAPIBackend) SyncProgress() ethereum.SyncProgress { @@ -402,7 +403,7 @@ func (b *EthAPIBackend) StartMining() error { } func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) { - return b.eth.StateAtBlock(ctx, block, reexec, base, readOnly, preferDisk) + return b.eth.stateAtBlock(ctx, block, reexec, base, readOnly, preferDisk) } func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { diff --git a/eth/api_debug.go b/eth/api_debug.go new file mode 100644 index 000000000..22dd6b0cf --- /dev/null +++ b/eth/api_debug.go @@ -0,0 +1,436 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" +) + +// DebugAPI is the collection of Ethereum full node APIs for debugging the +// protocol. +type DebugAPI struct { + eth *Ethereum +} + +// NewDebugAPI creates a new DebugAPI instance. +func NewDebugAPI(eth *Ethereum) *DebugAPI { + return &DebugAPI{eth: eth} +} + +// DumpBlock retrieves the entire state of the database at a given block. +func (api *DebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) { + opts := &state.DumpConfig{ + OnlyWithAddresses: true, + Max: AccountRangeMaxResults, // Sanity limit over RPC + } + if blockNr == rpc.PendingBlockNumber { + // If we're dumping the pending state, we need to request + // both the pending block as well as the pending state from + // the miner and operate on those + _, stateDb := api.eth.miner.Pending() + if stateDb == nil { + return state.Dump{}, errors.New("pending state is not available") + } + return stateDb.RawDump(opts), nil + } + var header *types.Header + switch blockNr { + case rpc.LatestBlockNumber: + header = api.eth.blockchain.CurrentBlock() + case rpc.FinalizedBlockNumber: + header = api.eth.blockchain.CurrentFinalBlock() + case rpc.SafeBlockNumber: + header = api.eth.blockchain.CurrentSafeBlock() + default: + block := api.eth.blockchain.GetBlockByNumber(uint64(blockNr)) + if block == nil { + return state.Dump{}, fmt.Errorf("block #%d not found", blockNr) + } + header = block.Header() + } + if header == nil { + return state.Dump{}, fmt.Errorf("block #%d not found", blockNr) + } + stateDb, err := api.eth.BlockChain().StateAt(header.Root) + if err != nil { + return state.Dump{}, err + } + return stateDb.RawDump(opts), nil +} + +// Preimage is a debug API function that returns the preimage for a sha3 hash, if known. +func (api *DebugAPI) Preimage(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) { + if preimage := rawdb.ReadPreimage(api.eth.ChainDb(), hash); preimage != nil { + return preimage, nil + } + return nil, errors.New("unknown preimage") +} + +// BadBlockArgs represents the entries in the list returned when bad blocks are queried. +type BadBlockArgs struct { + Hash common.Hash `json:"hash"` + Block map[string]interface{} `json:"block"` + RLP string `json:"rlp"` +} + +// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network +// and returns them as a JSON list of block hashes. +func (api *DebugAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, error) { + var ( + blocks = rawdb.ReadAllBadBlocks(api.eth.chainDb) + results = make([]*BadBlockArgs, 0, len(blocks)) + ) + for _, block := range blocks { + var ( + blockRlp string + blockJSON map[string]interface{} + ) + if rlpBytes, err := rlp.EncodeToBytes(block); err != nil { + blockRlp = err.Error() // Hacky, but hey, it works + } else { + blockRlp = fmt.Sprintf("%#x", rlpBytes) + } + blockJSON = ethapi.RPCMarshalBlock(block, true, true, api.eth.APIBackend.ChainConfig()) + results = append(results, &BadBlockArgs{ + Hash: block.Hash(), + RLP: blockRlp, + Block: blockJSON, + }) + } + return results, nil +} + +// AccountRangeMaxResults is the maximum number of results to be returned per call +const AccountRangeMaxResults = 256 + +// AccountRange enumerates all accounts in the given block and start point in paging request +func (api *DebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start hexutil.Bytes, maxResults int, nocode, nostorage, incompletes bool) (state.IteratorDump, error) { + var stateDb *state.StateDB + var err error + + if number, ok := blockNrOrHash.Number(); ok { + if number == rpc.PendingBlockNumber { + // If we're dumping the pending state, we need to request + // both the pending block as well as the pending state from + // the miner and operate on those + _, stateDb = api.eth.miner.Pending() + if stateDb == nil { + return state.IteratorDump{}, errors.New("pending state is not available") + } + } else { + var header *types.Header + switch number { + case rpc.LatestBlockNumber: + header = api.eth.blockchain.CurrentBlock() + case rpc.FinalizedBlockNumber: + header = api.eth.blockchain.CurrentFinalBlock() + case rpc.SafeBlockNumber: + header = api.eth.blockchain.CurrentSafeBlock() + default: + block := api.eth.blockchain.GetBlockByNumber(uint64(number)) + if block == nil { + return state.IteratorDump{}, fmt.Errorf("block #%d not found", number) + } + header = block.Header() + } + if header == nil { + return state.IteratorDump{}, fmt.Errorf("block #%d not found", number) + } + stateDb, err = api.eth.BlockChain().StateAt(header.Root) + if err != nil { + return state.IteratorDump{}, err + } + } + } else if hash, ok := blockNrOrHash.Hash(); ok { + block := api.eth.blockchain.GetBlockByHash(hash) + if block == nil { + return state.IteratorDump{}, fmt.Errorf("block %s not found", hash.Hex()) + } + stateDb, err = api.eth.BlockChain().StateAt(block.Root()) + if err != nil { + return state.IteratorDump{}, err + } + } else { + return state.IteratorDump{}, errors.New("either block number or block hash must be specified") + } + + opts := &state.DumpConfig{ + SkipCode: nocode, + SkipStorage: nostorage, + OnlyWithAddresses: !incompletes, + Start: start, + Max: uint64(maxResults), + } + if maxResults > AccountRangeMaxResults || maxResults <= 0 { + opts.Max = AccountRangeMaxResults + } + return stateDb.IteratorDump(opts), nil +} + +// StorageRangeResult is the result of a debug_storageRangeAt API call. +type StorageRangeResult struct { + Storage storageMap `json:"storage"` + NextKey *common.Hash `json:"nextKey"` // nil if Storage includes the last key in the trie. +} + +type storageMap map[common.Hash]storageEntry + +type storageEntry struct { + Key *common.Hash `json:"key"` + Value common.Hash `json:"value"` +} + +// StorageRangeAt returns the storage at the given block height and transaction index. +func (api *DebugAPI) StorageRangeAt(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, txIndex int, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) { + var block *types.Block + + block, err := api.eth.APIBackend.BlockByNumberOrHash(ctx, blockNrOrHash) + if err != nil { + return StorageRangeResult{}, err + } + if block == nil { + return StorageRangeResult{}, fmt.Errorf("block %v not found", blockNrOrHash) + } + _, _, statedb, release, err := api.eth.stateAtTransaction(ctx, block, txIndex, 0) + if err != nil { + return StorageRangeResult{}, err + } + defer release() + + return storageRangeAt(statedb, block.Root(), contractAddress, keyStart, maxResult) +} + +func storageRangeAt(statedb *state.StateDB, root common.Hash, address common.Address, start []byte, maxResult int) (StorageRangeResult, error) { + storageRoot := statedb.GetStorageRoot(address) + if storageRoot == types.EmptyRootHash || storageRoot == (common.Hash{}) { + return StorageRangeResult{}, nil // empty storage + } + id := trie.StorageTrieID(root, crypto.Keccak256Hash(address.Bytes()), storageRoot) + tr, err := trie.NewStateTrie(id, statedb.Database().TrieDB()) + if err != nil { + return StorageRangeResult{}, err + } + trieIt, err := tr.NodeIterator(start) + if err != nil { + return StorageRangeResult{}, err + } + it := trie.NewIterator(trieIt) + result := StorageRangeResult{Storage: storageMap{}} + for i := 0; i < maxResult && it.Next(); i++ { + _, content, _, err := rlp.Split(it.Value) + if err != nil { + return StorageRangeResult{}, err + } + e := storageEntry{Value: common.BytesToHash(content)} + if preimage := tr.GetKey(it.Key); preimage != nil { + preimage := common.BytesToHash(preimage) + e.Key = &preimage + } + result.Storage[common.BytesToHash(it.Key)] = e + } + // Add the 'next key' so clients can continue downloading. + if it.Next() { + next := common.BytesToHash(it.Key) + result.NextKey = &next + } + return result, nil +} + +// GetModifiedAccountsByNumber returns all accounts that have changed between the +// two blocks specified. A change is defined as a difference in nonce, balance, +// code hash, or storage hash. +// +// With one parameter, returns the list of accounts modified in the specified block. +func (api *DebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) { + var startBlock, endBlock *types.Block + + startBlock = api.eth.blockchain.GetBlockByNumber(startNum) + if startBlock == nil { + return nil, fmt.Errorf("start block %x not found", startNum) + } + + if endNum == nil { + endBlock = startBlock + startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash()) + if startBlock == nil { + return nil, fmt.Errorf("block %x has no parent", endBlock.Number()) + } + } else { + endBlock = api.eth.blockchain.GetBlockByNumber(*endNum) + if endBlock == nil { + return nil, fmt.Errorf("end block %d not found", *endNum) + } + } + return api.getModifiedAccounts(startBlock, endBlock) +} + +// GetModifiedAccountsByHash returns all accounts that have changed between the +// two blocks specified. A change is defined as a difference in nonce, balance, +// code hash, or storage hash. +// +// With one parameter, returns the list of accounts modified in the specified block. +func (api *DebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) { + var startBlock, endBlock *types.Block + startBlock = api.eth.blockchain.GetBlockByHash(startHash) + if startBlock == nil { + return nil, fmt.Errorf("start block %x not found", startHash) + } + + if endHash == nil { + endBlock = startBlock + startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash()) + if startBlock == nil { + return nil, fmt.Errorf("block %x has no parent", endBlock.Number()) + } + } else { + endBlock = api.eth.blockchain.GetBlockByHash(*endHash) + if endBlock == nil { + return nil, fmt.Errorf("end block %x not found", *endHash) + } + } + return api.getModifiedAccounts(startBlock, endBlock) +} + +func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) { + if startBlock.Number().Uint64() >= endBlock.Number().Uint64() { + return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64()) + } + triedb := api.eth.BlockChain().TrieDB() + + oldTrie, err := trie.NewStateTrie(trie.StateTrieID(startBlock.Root()), triedb) + if err != nil { + return nil, err + } + newTrie, err := trie.NewStateTrie(trie.StateTrieID(endBlock.Root()), triedb) + if err != nil { + return nil, err + } + oldIt, err := oldTrie.NodeIterator([]byte{}) + if err != nil { + return nil, err + } + newIt, err := newTrie.NodeIterator([]byte{}) + if err != nil { + return nil, err + } + diff, _ := trie.NewDifferenceIterator(oldIt, newIt) + iter := trie.NewIterator(diff) + + var dirty []common.Address + for iter.Next() { + key := newTrie.GetKey(iter.Key) + if key == nil { + return nil, fmt.Errorf("no preimage found for hash %x", iter.Key) + } + dirty = append(dirty, common.BytesToAddress(key)) + } + return dirty, nil +} + +// GetAccessibleState returns the first number where the node has accessible +// state on disk. Note this being the post-state of that block and the pre-state +// of the next block. +// The (from, to) parameters are the sequence of blocks to search, which can go +// either forwards or backwards +func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error) { + db := api.eth.ChainDb() + var pivot uint64 + if p := rawdb.ReadLastPivotNumber(db); p != nil { + pivot = *p + log.Info("Found fast-sync pivot marker", "number", pivot) + } + var resolveNum = func(num rpc.BlockNumber) (uint64, error) { + // We don't have state for pending (-2), so treat it as latest + if num.Int64() < 0 { + block := api.eth.blockchain.CurrentBlock() + if block == nil { + return 0, errors.New("current block missing") + } + return block.Number.Uint64(), nil + } + return uint64(num.Int64()), nil + } + var ( + start uint64 + end uint64 + delta = int64(1) + lastLog time.Time + err error + ) + if start, err = resolveNum(from); err != nil { + return 0, err + } + if end, err = resolveNum(to); err != nil { + return 0, err + } + if start == end { + return 0, errors.New("from and to needs to be different") + } + if start > end { + delta = -1 + } + for i := int64(start); i != int64(end); i += delta { + if time.Since(lastLog) > 8*time.Second { + log.Info("Finding roots", "from", start, "to", end, "at", i) + lastLog = time.Now() + } + if i < int64(pivot) { + continue + } + h := api.eth.BlockChain().GetHeaderByNumber(uint64(i)) + if h == nil { + return 0, fmt.Errorf("missing header %d", i) + } + if ok, _ := api.eth.ChainDb().Has(h.Root[:]); ok { + return uint64(i), nil + } + } + return 0, errors.New("no state found") +} + +// SetTrieFlushInterval configures how often in-memory tries are persisted +// to disk. The value is in terms of block processing time, not wall clock. +// If the value is shorter than the block generation time, or even 0 or negative, +// the node will flush trie after processing each block (effectively archive mode). +func (api *DebugAPI) SetTrieFlushInterval(interval string) error { + t, err := time.ParseDuration(interval) + if err != nil { + return err + } + api.eth.blockchain.SetTrieFlushInterval(t) + return nil +} + +// GetTrieFlushInterval gets the current value of in-memory trie flush interval +func (api *DebugAPI) GetTrieFlushInterval() string { + return api.eth.blockchain.GetTrieFlushInterval().String() +} diff --git a/eth/api_test.go b/eth/api_debug_test.go similarity index 79% rename from eth/api_test.go rename to eth/api_debug_test.go index dec1b34dd..3d3444a87 100644 --- a/eth/api_test.go +++ b/eth/api_debug_test.go @@ -21,7 +21,6 @@ import ( "fmt" "math/big" "reflect" - "sort" "testing" "github.com/davecgh/go-spew/spew" @@ -31,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/trie" + "golang.org/x/exp/slices" ) var dumper = spew.ConfigState{Indent: " "} @@ -58,46 +58,40 @@ func accountRangeTest(t *testing.T, trie *state.Trie, statedb *state.StateDB, st return result } -type resultHash []common.Hash - -func (h resultHash) Len() int { return len(h) } -func (h resultHash) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h resultHash) Less(i, j int) bool { return bytes.Compare(h[i].Bytes(), h[j].Bytes()) < 0 } - func TestAccountRange(t *testing.T) { t.Parallel() var ( - statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: true}) - state, _ = state.New(types.EmptyRootHash, statedb, nil) - addrs = [AccountRangeMaxResults * 2]common.Address{} - m = map[common.Address]bool{} + statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: true}) + sdb, _ = state.New(types.EmptyRootHash, statedb, nil) + addrs = [AccountRangeMaxResults * 2]common.Address{} + m = map[common.Address]bool{} ) for i := range addrs { hash := common.HexToHash(fmt.Sprintf("%x", i)) addr := common.BytesToAddress(crypto.Keccak256Hash(hash.Bytes()).Bytes()) addrs[i] = addr - state.SetBalance(addrs[i], big.NewInt(1)) + sdb.SetBalance(addrs[i], big.NewInt(1)) if _, ok := m[addr]; ok { t.Fatalf("bad") } else { m[addr] = true } } - state.Commit(true) - root := state.IntermediateRoot(true) + root, _ := sdb.Commit(0, true) + sdb, _ = state.New(root, statedb, nil) trie, err := statedb.OpenTrie(root) if err != nil { t.Fatal(err) } - accountRangeTest(t, &trie, state, common.Hash{}, AccountRangeMaxResults/2, AccountRangeMaxResults/2) + accountRangeTest(t, &trie, sdb, common.Hash{}, AccountRangeMaxResults/2, AccountRangeMaxResults/2) // test pagination - firstResult := accountRangeTest(t, &trie, state, common.Hash{}, AccountRangeMaxResults, AccountRangeMaxResults) - secondResult := accountRangeTest(t, &trie, state, common.BytesToHash(firstResult.Next), AccountRangeMaxResults, AccountRangeMaxResults) + firstResult := accountRangeTest(t, &trie, sdb, common.Hash{}, AccountRangeMaxResults, AccountRangeMaxResults) + secondResult := accountRangeTest(t, &trie, sdb, common.BytesToHash(firstResult.Next), AccountRangeMaxResults, AccountRangeMaxResults) - hList := make(resultHash, 0) + hList := make([]common.Hash, 0) for addr1 := range firstResult.Accounts { // If address is empty, then it makes no sense to compare // them as they might be two different accounts. @@ -111,9 +105,9 @@ func TestAccountRange(t *testing.T) { } // Test to see if it's possible to recover from the middle of the previous // set and get an even split between the first and second sets. - sort.Sort(hList) + slices.SortFunc(hList, common.Hash.Cmp) middleH := hList[AccountRangeMaxResults/2] - middleResult := accountRangeTest(t, &trie, state, middleH, AccountRangeMaxResults, AccountRangeMaxResults) + middleResult := accountRangeTest(t, &trie, sdb, middleH, AccountRangeMaxResults, AccountRangeMaxResults) missing, infirst, insecond := 0, 0, 0 for h := range middleResult.Accounts { if _, ok := firstResult.Accounts[h]; ok { @@ -142,8 +136,10 @@ func TestEmptyAccountRange(t *testing.T) { statedb = state.NewDatabase(rawdb.NewMemoryDatabase()) st, _ = state.New(types.EmptyRootHash, statedb, nil) ) - st.Commit(true) - st.IntermediateRoot(true) + // Commit(although nothing to flush) and re-init the statedb + st.Commit(0, true) + st, _ = state.New(types.EmptyRootHash, statedb, nil) + results := st.IteratorDump(&state.DumpConfig{ SkipCode: true, SkipStorage: true, @@ -163,9 +159,10 @@ func TestStorageRangeAt(t *testing.T) { // Create a state where account 0x010000... has a few storage entries. var ( - state, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - addr = common.Address{0x01} - keys = []common.Hash{ // hashes of Keys of storage + db = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: true}) + sdb, _ = state.New(types.EmptyRootHash, db, nil) + addr = common.Address{0x01} + keys = []common.Hash{ // hashes of Keys of storage common.HexToHash("340dd630ad21bf010b4e676dbfa9ba9a02175262d1fa356232cfde6cb5b47ef2"), common.HexToHash("426fcb404ab2d5d8e61a3d918108006bbb0a9be65e92235bb10eefbdb6dcd053"), common.HexToHash("48078cfed56339ea54962e72c37c7f588fc4f8e5bc173827ba75cb10a63a96a5"), @@ -179,8 +176,10 @@ func TestStorageRangeAt(t *testing.T) { } ) for _, entry := range storage { - state.SetState(addr, *entry.Key, entry.Value) + sdb.SetState(addr, *entry.Key, entry.Value) } + root, _ := sdb.Commit(0, false) + sdb, _ = state.New(root, db, nil) // Check a few combinations of limit and start/end. tests := []struct { @@ -210,11 +209,7 @@ func TestStorageRangeAt(t *testing.T) { }, } for _, test := range tests { - tr, err := state.StorageTrie(addr) - if err != nil { - t.Error(err) - } - result, err := storageRangeAt(tr, test.start, test.limit) + result, err := storageRangeAt(sdb, root, addr, test.start, test.limit) if err != nil { t.Error(err) } diff --git a/eth/api_miner.go b/eth/api_miner.go new file mode 100644 index 000000000..477531d49 --- /dev/null +++ b/eth/api_miner.go @@ -0,0 +1,85 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +// MinerAPI provides an API to control the miner. +type MinerAPI struct { + e *Ethereum +} + +// NewMinerAPI create a new MinerAPI instance. +func NewMinerAPI(e *Ethereum) *MinerAPI { + return &MinerAPI{e} +} + +// Start starts the miner with the given number of threads. If threads is nil, +// the number of workers started is equal to the number of logical CPUs that are +// usable by this process. If mining is already running, this method adjust the +// number of threads allowed to use and updates the minimum price required by the +// transaction pool. +func (api *MinerAPI) Start() error { + return api.e.StartMining() +} + +// Stop terminates the miner, both at the consensus engine level as well as at +// the block creation level. +func (api *MinerAPI) Stop() { + api.e.StopMining() +} + +// SetExtra sets the extra data string that is included when this miner mines a block. +func (api *MinerAPI) SetExtra(extra string) (bool, error) { + if err := api.e.Miner().SetExtra([]byte(extra)); err != nil { + return false, err + } + return true, nil +} + +// SetGasPrice sets the minimum accepted gas price for the miner. +func (api *MinerAPI) SetGasPrice(gasPrice hexutil.Big) bool { + api.e.lock.Lock() + api.e.gasPrice = (*big.Int)(&gasPrice) + api.e.lock.Unlock() + + api.e.txPool.SetGasTip((*big.Int)(&gasPrice)) + return true +} + +// SetGasLimit sets the gaslimit to target towards during mining. +func (api *MinerAPI) SetGasLimit(gasLimit hexutil.Uint64) bool { + api.e.Miner().SetGasCeil(uint64(gasLimit)) + return true +} + +// SetEtherbase sets the etherbase of the miner. +func (api *MinerAPI) SetEtherbase(etherbase common.Address) bool { + api.e.SetEtherbase(etherbase) + return true +} + +// SetRecommitInterval updates the interval for miner sealing work recommitting. +func (api *MinerAPI) SetRecommitInterval(interval int) { + api.e.Miner().SetRecommitInterval(time.Duration(interval) * time.Millisecond) +} diff --git a/eth/backend.go b/eth/backend.go index 4ba8df951..38c0fa974 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -35,6 +35,8 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state/pruner" "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/txpool/blobpool" + "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/downloader" @@ -66,7 +68,8 @@ type Ethereum struct { config *ethconfig.Config // Handlers - txPool *txpool.TxPool + txPool *txpool.TxPool + blockchain *core.BlockChain handler *handler ethDialCandidates enode.Iterator @@ -130,8 +133,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err != nil { return nil, err } - if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil { - log.Error("Failed to recover state", "error", err) + // Try to recover offline state pruning only in hash-based. + if config.StateScheme == rawdb.HashScheme { + if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb); err != nil { + log.Error("Failed to recover state", "error", err) + } } // Transfer mining-related config to the ethash config. chainConfig, err := core.LoadChainConfig(chainDb, config.Genesis) @@ -158,7 +164,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { p2pServer: stack.Server(), shutdownTracker: shutdowncheck.NewShutdownTracker(chainDb), } - bcVersion := rawdb.ReadDatabaseVersion(chainDb) var dbVer = "" if bcVersion != nil { @@ -182,14 +187,14 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } cacheConfig = &core.CacheConfig{ TrieCleanLimit: config.TrieCleanCache, - TrieCleanJournal: stack.ResolvePath(config.TrieCleanCacheJournal), - TrieCleanRejournal: config.TrieCleanCacheRejournal, TrieCleanNoPrefetch: config.NoPrefetch, TrieDirtyLimit: config.TrieDirtyCache, TrieDirtyDisabled: config.NoPruning, TrieTimeLimit: config.TrieTimeout, SnapshotLimit: config.SnapshotCache, Preimages: config.Preimages, + StateHistory: config.StateHistory, + StateScheme: config.StateScheme, } ) // Override the chain config with provided settings. @@ -197,17 +202,29 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if config.OverrideCancun != nil { overrides.OverrideCancun = config.OverrideCancun } - eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit) + if config.OverrideVerkle != nil { + overrides.OverrideVerkle = config.OverrideVerkle + } + eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TransactionHistory) if err != nil { return nil, err } eth.bloomIndexer.Start(eth.blockchain) + if config.BlobPool.Datadir != "" { + config.BlobPool.Datadir = stack.ResolvePath(config.BlobPool.Datadir) + } + blobPool := blobpool.New(config.BlobPool, eth.blockchain) + if config.TxPool.Journal != "" { config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal) } - eth.txPool = txpool.NewTxPool(config.TxPool, eth.blockchain.Config(), eth.blockchain) + legacyPool := legacypool.New(config.TxPool, eth.blockchain) + eth.txPool, err = txpool.New(new(big.Int).SetUint64(config.TxPool.PriceLimit), eth.blockchain, []txpool.SubPool{legacyPool, blobPool}) + if err != nil { + return nil, err + } // Permit the downloader to use the trie cache allowance during fast sync cacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit + cacheConfig.SnapshotLimit if eth.handler, err = newHandler(&handlerConfig{ @@ -399,7 +416,7 @@ func (s *Ethereum) StartMining() error { s.lock.RLock() price := s.gasPrice s.lock.RUnlock() - s.txPool.SetGasPrice(price) + s.txPool.SetGasTip(price) // Configure the local mining address eb, err := s.Etherbase() @@ -425,7 +442,7 @@ func (s *Ethereum) StartMining() error { } // If mining is started, we can disable the transaction rejection mechanism // introduced to speed sync times. - s.handler.acceptTxs.Store(true) + s.handler.enableSyncedFeatures() go s.miner.Start() } @@ -458,7 +475,7 @@ func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb } func (s *Ethereum) IsListening() bool { return true } // Always listening func (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downloader } func (s *Ethereum) Synced() bool { return s.handler.acceptTxs.Load() } -func (s *Ethereum) SetSynced() { s.handler.acceptTxs.Store(true) } +func (s *Ethereum) SetSynced() { s.handler.enableSyncedFeatures() } func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning } func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer } func (s *Ethereum) Merger() *consensus.Merger { return s.merger } @@ -512,7 +529,7 @@ func (s *Ethereum) Stop() error { // Then stop everything else. s.bloomIndexer.Close() close(s.closeBloomHandler) - s.txPool.Stop() + s.txPool.Close() s.miner.Close() s.blockchain.Stop() s.engine.Close() diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 456bd7741..9690d4330 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -78,11 +78,14 @@ const ( var caps = []string{ "engine_forkchoiceUpdatedV1", "engine_forkchoiceUpdatedV2", + "engine_forkchoiceUpdatedV3", "engine_exchangeTransitionConfigurationV1", "engine_getPayloadV1", "engine_getPayloadV2", + "engine_getPayloadV3", "engine_newPayloadV1", "engine_newPayloadV2", + "engine_newPayloadV3", "engine_getPayloadBodiesByHashV1", "engine_getPayloadBodiesByRangeV1", } @@ -133,6 +136,13 @@ type ConsensusAPI struct { // NewConsensusAPI creates a new consensus api for the given backend. // The underlying blockchain needs to have a valid terminal total difficulty set. func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI { + api := newConsensusAPIWithoutHeartbeat(eth) + go api.heartbeat() + return api +} + +// newConsensusAPIWithoutHeartbeat creates a new consensus api for the SimulatedBeacon Node. +func newConsensusAPIWithoutHeartbeat(eth *eth.Ethereum) *ConsensusAPI { if eth.BlockChain().Config().TerminalTotalDifficulty == nil { log.Warn("Engine API started but chain not configured for merge yet") } @@ -144,8 +154,6 @@ func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI { invalidTipsets: make(map[common.Hash]*types.Header), } eth.Downloader().SetBadBlockCallback(api.setInvalidAncestor) - go api.heartbeat() - return api } @@ -185,17 +193,36 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV2(update engine.ForkchoiceStateV1, pa return api.forkchoiceUpdated(update, payloadAttributes) } +// ForkchoiceUpdatedV3 is equivalent to V2 with the addition of parent beacon block root in the payload attributes. +func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { + if payloadAttributes != nil { + if err := api.verifyPayloadAttributes(payloadAttributes); err != nil { + return engine.STATUS_INVALID, engine.InvalidParams.With(err) + } + } + return api.forkchoiceUpdated(update, payloadAttributes) +} + func (api *ConsensusAPI) verifyPayloadAttributes(attr *engine.PayloadAttributes) error { - if !api.eth.BlockChain().Config().IsShanghai(api.eth.BlockChain().Config().LondonBlock, attr.Timestamp) { - // Reject payload attributes with withdrawals before shanghai - if attr.Withdrawals != nil { - return errors.New("withdrawals before shanghai") - } - } else { - // Reject payload attributes with nil withdrawals after shanghai - if attr.Withdrawals == nil { - return errors.New("missing withdrawals list") - } + c := api.eth.BlockChain().Config() + + // Verify withdrawals attribute for Shanghai. + if err := checkAttribute(c.IsShanghai, attr.Withdrawals != nil, attr.Timestamp); err != nil { + return fmt.Errorf("invalid withdrawals: %w", err) + } + // Verify beacon root attribute for Cancun. + if err := checkAttribute(c.IsCancun, attr.BeaconRoot != nil, attr.Timestamp); err != nil { + return fmt.Errorf("invalid parent beacon block root: %w", err) + } + return nil +} + +func checkAttribute(active func(*big.Int, uint64) bool, exists bool, time uint64) error { + if active(common.Big0, time) && !exists { + return errors.New("fork active, missing expected attribute") + } + if !active(common.Big0, time) && exists { + return errors.New("fork inactive, unexpected attribute set") } return nil } @@ -343,6 +370,7 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl FeeRecipient: payloadAttributes.SuggestedFeeRecipient, Random: payloadAttributes.Random, Withdrawals: payloadAttributes.Withdrawals, + BeaconRoot: payloadAttributes.BeaconRoot, } id := args.Id() // If we already are busy generating this work, then we do not need @@ -393,7 +421,7 @@ func (api *ConsensusAPI) ExchangeTransitionConfigurationV1(config engine.Transit // GetPayloadV1 returns a cached payload by id. func (api *ConsensusAPI) GetPayloadV1(payloadID engine.PayloadID) (*engine.ExecutableData, error) { - data, err := api.getPayload(payloadID) + data, err := api.getPayload(payloadID, false) if err != nil { return nil, err } @@ -402,12 +430,17 @@ func (api *ConsensusAPI) GetPayloadV1(payloadID engine.PayloadID) (*engine.Execu // GetPayloadV2 returns a cached payload by id. func (api *ConsensusAPI) GetPayloadV2(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { - return api.getPayload(payloadID) + return api.getPayload(payloadID, false) } -func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { +// GetPayloadV3 returns a cached payload by id. +func (api *ConsensusAPI) GetPayloadV3(payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { + return api.getPayload(payloadID, false) +} + +func (api *ConsensusAPI) getPayload(payloadID engine.PayloadID, full bool) (*engine.ExecutionPayloadEnvelope, error) { log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID) - data := api.localBlocks.get(payloadID) + data := api.localBlocks.get(payloadID, full) if data == nil { return nil, engine.UnknownPayload } @@ -419,7 +452,7 @@ func (api *ConsensusAPI) NewPayloadV1(params engine.ExecutableData) (engine.Payl if params.Withdrawals != nil { return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("withdrawals not supported in V1")) } - return api.newPayload(params) + return api.newPayload(params, nil, nil) } // NewPayloadV2 creates an Eth1 block, inserts it in the chain, and returns the status of the chain. @@ -431,10 +464,35 @@ func (api *ConsensusAPI) NewPayloadV2(params engine.ExecutableData) (engine.Payl } else if params.Withdrawals != nil { return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil withdrawals pre-shanghai")) } - return api.newPayload(params) + if api.eth.BlockChain().Config().IsCancun(new(big.Int).SetUint64(params.Number), params.Timestamp) { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("newPayloadV2 called post-cancun")) + } + return api.newPayload(params, nil, nil) } -func (api *ConsensusAPI) newPayload(params engine.ExecutableData) (engine.PayloadStatusV1, error) { +// NewPayloadV3 creates an Eth1 block, inserts it in the chain, and returns the status of the chain. +func (api *ConsensusAPI) NewPayloadV3(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) { + if params.ExcessBlobGas == nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil excessBlobGas post-cancun")) + } + if params.BlobGasUsed == nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil params.BlobGasUsed post-cancun")) + } + if versionedHashes == nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun")) + } + if beaconRoot == nil { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil parentBeaconBlockRoot post-cancun")) + } + + if !api.eth.BlockChain().Config().IsCancun(new(big.Int).SetUint64(params.Number), params.Timestamp) { + return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV3 called pre-cancun")) + } + + return api.newPayload(params, versionedHashes, beaconRoot) +} + +func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) { // The locking here is, strictly, not required. Without these locks, this can happen: // // 1. NewPayload( execdata-N ) is invoked from the CL. It goes all the way down to @@ -452,9 +510,9 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData) (engine.Payloa defer api.newPayloadLock.Unlock() log.Trace("Engine API request received", "method", "NewPayload", "number", params.Number, "hash", params.BlockHash) - block, err := engine.ExecutableDataToBlock(params) + block, err := engine.ExecutableDataToBlock(params, versionedHashes, beaconRoot) if err != nil { - log.Debug("Invalid NewPayload params", "params", params, "error", err) + log.Warn("Invalid NewPayload params", "params", params, "error", err) return engine.PayloadStatusV1{Status: engine.INVALID}, nil } // Stash away the last update to warn the user if the beacon client goes offline @@ -715,7 +773,7 @@ func (api *ConsensusAPI) ExchangeCapabilities([]string) []string { return caps } -// GetPayloadBodiesV1 implements engine_getPayloadBodiesByHashV1 which allows for retrieval of a list +// GetPayloadBodiesByHashV1 implements engine_getPayloadBodiesByHashV1 which allows for retrieval of a list // of block bodies by the engine api. func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engine.ExecutionPayloadBodyV1 { var bodies = make([]*engine.ExecutionPayloadBodyV1, len(hashes)) diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index 5bab7ba18..59f44fafe 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -37,15 +37,18 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" + "github.com/mattn/go-colorable" ) var ( @@ -67,8 +70,11 @@ func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block) { engine = beaconConsensus.NewFaker() } genesis := &core.Genesis{ - Config: &config, - Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance}}, + Config: &config, + Alloc: core.GenesisAlloc{ + testAddr: {Balance: testBalance}, + params.BeaconRootsStorageAddress: {Balance: common.Big0, Code: common.Hex2Bytes("3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500")}, + }, ExtraData: []byte("test genesis"), Timestamp: 9000, BaseFee: big.NewInt(params.InitialBaseFee), @@ -106,7 +112,7 @@ func TestEth2AssembleBlock(t *testing.T) { if err != nil { t.Fatalf("error signing transaction, err=%v", err) } - ethservice.TxPool().AddLocal(tx) + ethservice.TxPool().Add([]*types.Transaction{tx}, true, false) blockParams := engine.PayloadAttributes{ Timestamp: blocks[9].Time() + 5, } @@ -142,7 +148,8 @@ func TestEth2AssembleBlockWithAnotherBlocksTxs(t *testing.T) { api := NewConsensusAPI(ethservice) // Put the 10th block's tx in the pool and produce a new block - api.eth.TxPool().AddRemotesSync(blocks[9].Transactions()) + txs := blocks[9].Transactions() + api.eth.TxPool().Add(txs, false, true) blockParams := engine.PayloadAttributes{ Timestamp: blocks[8].Time() + 5, } @@ -181,7 +188,8 @@ func TestEth2PrepareAndGetPayload(t *testing.T) { api := NewConsensusAPI(ethservice) // Put the 10th block's tx in the pool and produce a new block - ethservice.TxPool().AddLocals(blocks[9].Transactions()) + txs := blocks[9].Transactions() + ethservice.TxPool().Add(txs, true, false) blockParams := engine.PayloadAttributes{ Timestamp: blocks[8].Time() + 5, } @@ -201,6 +209,7 @@ func TestEth2PrepareAndGetPayload(t *testing.T) { Timestamp: blockParams.Timestamp, FeeRecipient: blockParams.SuggestedFeeRecipient, Random: blockParams.Random, + BeaconRoot: blockParams.BeaconRoot, }).Id() execData, err := api.GetPayloadV1(payloadID) if err != nil { @@ -303,7 +312,7 @@ func TestEth2NewBlock(t *testing.T) { statedb, _ := ethservice.BlockChain().StateAt(parent.Root()) nonce := statedb.GetNonce(testAddr) tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey) - ethservice.TxPool().AddLocal(tx) + ethservice.TxPool().Add([]*types.Transaction{tx}, true, false) execData, err := assembleWithTransactions(api, parent.Hash(), &engine.PayloadAttributes{ Timestamp: parent.Time() + 5, @@ -311,7 +320,7 @@ func TestEth2NewBlock(t *testing.T) { if err != nil { t.Fatalf("Failed to create the executable data %v", err) } - block, err := engine.ExecutableDataToBlock(*execData) + block, err := engine.ExecutableDataToBlock(*execData, nil, nil) if err != nil { t.Fatalf("Failed to convert executable data to block %v", err) } @@ -353,7 +362,7 @@ func TestEth2NewBlock(t *testing.T) { if err != nil { t.Fatalf("Failed to create the executable data %v", err) } - block, err := engine.ExecutableDataToBlock(*execData) + block, err := engine.ExecutableDataToBlock(*execData, nil, nil) if err != nil { t.Fatalf("Failed to convert executable data to block %v", err) } @@ -472,7 +481,7 @@ func TestFullAPI(t *testing.T) { statedb, _ := ethservice.BlockChain().StateAt(parent.Root) nonce := statedb.GetNonce(testAddr) tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey) - ethservice.TxPool().AddLocal(tx) + ethservice.TxPool().Add([]*types.Transaction{tx}, true, false) } setupBlocks(t, ethservice, 10, parent, callback, nil) @@ -598,7 +607,7 @@ func TestNewPayloadOnInvalidChain(t *testing.T) { GasPrice: big.NewInt(2 * params.InitialBaseFee), Data: logCode, }) - ethservice.TxPool().AddRemotesSync([]*types.Transaction{tx}) + ethservice.TxPool().Add([]*types.Transaction{tx}, false, true) var ( params = engine.PayloadAttributes{ Timestamp: parent.Time + 1, @@ -664,6 +673,7 @@ func assembleBlock(api *ConsensusAPI, parentHash common.Hash, params *engine.Pay FeeRecipient: params.SuggestedFeeRecipient, Random: params.Random, Withdrawals: params.Withdrawals, + BeaconRoot: params.BeaconRoot, } payload, err := api.eth.Miner().BuildPayload(args) if err != nil { @@ -985,7 +995,7 @@ func TestSimultaneousNewBlock(t *testing.T) { t.Fatal(testErr) } } - block, err := engine.ExecutableDataToBlock(*execData) + block, err := engine.ExecutableDataToBlock(*execData, nil, nil) if err != nil { t.Fatalf("Failed to convert executable data to block %v", err) } @@ -1065,6 +1075,7 @@ func TestWithdrawals(t *testing.T) { FeeRecipient: blockParams.SuggestedFeeRecipient, Random: blockParams.Random, Withdrawals: blockParams.Withdrawals, + BeaconRoot: blockParams.BeaconRoot, }).Id() execData, err := api.GetPayloadV2(payloadID) if err != nil { @@ -1112,6 +1123,7 @@ func TestWithdrawals(t *testing.T) { FeeRecipient: blockParams.SuggestedFeeRecipient, Random: blockParams.Random, Withdrawals: blockParams.Withdrawals, + BeaconRoot: blockParams.BeaconRoot, }).Id() execData, err = api.GetPayloadV2(payloadID) if err != nil { @@ -1242,6 +1254,7 @@ func TestNilWithdrawals(t *testing.T) { Timestamp: test.blockParams.Timestamp, FeeRecipient: test.blockParams.SuggestedFeeRecipient, Random: test.blockParams.Random, + BeaconRoot: test.blockParams.BeaconRoot, }).Id() execData, err := api.GetPayloadV2(payloadID) if err != nil { @@ -1272,7 +1285,7 @@ func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) { statedb, _ := ethservice.BlockChain().StateAt(parent.Root) nonce := statedb.GetNonce(testAddr) tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey) - ethservice.TxPool().AddLocal(tx) + ethservice.TxPool().Add([]*types.Transaction{tx}, false, false) } withdrawals := make([][]*types.Withdrawal, 10) @@ -1508,3 +1521,124 @@ func equalBody(a *types.Body, b *engine.ExecutionPayloadBodyV1) bool { } return reflect.DeepEqual(a.Withdrawals, b.Withdrawals) } + +func TestBlockToPayloadWithBlobs(t *testing.T) { + header := types.Header{} + var txs []*types.Transaction + + inner := types.BlobTx{ + BlobHashes: make([]common.Hash, 1), + } + + txs = append(txs, types.NewTx(&inner)) + sidecars := []*types.BlobTxSidecar{ + { + Blobs: make([]kzg4844.Blob, 1), + Commitments: make([]kzg4844.Commitment, 1), + Proofs: make([]kzg4844.Proof, 1), + }, + } + + block := types.NewBlock(&header, txs, nil, nil, trie.NewStackTrie(nil)) + envelope := engine.BlockToExecutableData(block, nil, sidecars) + var want int + for _, tx := range txs { + want += len(tx.BlobHashes()) + } + if got := len(envelope.BlobsBundle.Commitments); got != want { + t.Fatalf("invalid number of commitments: got %v, want %v", got, want) + } + if got := len(envelope.BlobsBundle.Proofs); got != want { + t.Fatalf("invalid number of proofs: got %v, want %v", got, want) + } + if got := len(envelope.BlobsBundle.Blobs); got != want { + t.Fatalf("invalid number of blobs: got %v, want %v", got, want) + } + _, err := engine.ExecutableDataToBlock(*envelope.ExecutionPayload, make([]common.Hash, 1), nil) + if err != nil { + t.Error(err) + } +} + +// This checks that beaconRoot is applied to the state from the engine API. +func TestParentBeaconBlockRoot(t *testing.T) { + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) + + genesis, blocks := generateMergeChain(10, true) + + // Set cancun time to last block + 5 seconds + time := blocks[len(blocks)-1].Time() + 5 + genesis.Config.ShanghaiTime = &time + genesis.Config.CancunTime = &time + + n, ethservice := startEthService(t, genesis, blocks) + ethservice.Merger().ReachTTD() + defer n.Close() + + api := NewConsensusAPI(ethservice) + + // 11: Build Shanghai block with no withdrawals. + parent := ethservice.BlockChain().CurrentHeader() + blockParams := engine.PayloadAttributes{ + Timestamp: parent.Time + 5, + Withdrawals: make([]*types.Withdrawal, 0), + BeaconRoot: &common.Hash{42}, + } + fcState := engine.ForkchoiceStateV1{ + HeadBlockHash: parent.Hash(), + } + resp, err := api.ForkchoiceUpdatedV2(fcState, &blockParams) + if err != nil { + t.Fatalf("error preparing payload, err=%v", err.(*engine.EngineAPIError).ErrorData()) + } + if resp.PayloadStatus.Status != engine.VALID { + t.Fatalf("unexpected status (got: %s, want: %s)", resp.PayloadStatus.Status, engine.VALID) + } + + // 11: verify state root is the same as parent + payloadID := (&miner.BuildPayloadArgs{ + Parent: fcState.HeadBlockHash, + Timestamp: blockParams.Timestamp, + FeeRecipient: blockParams.SuggestedFeeRecipient, + Random: blockParams.Random, + Withdrawals: blockParams.Withdrawals, + BeaconRoot: blockParams.BeaconRoot, + }).Id() + execData, err := api.GetPayloadV3(payloadID) + if err != nil { + t.Fatalf("error getting payload, err=%v", err) + } + + // 11: verify locally built block + if status, err := api.NewPayloadV3(*execData.ExecutionPayload, []common.Hash{}, &common.Hash{42}); err != nil { + t.Fatalf("error validating payload: %v", err) + } else if status.Status != engine.VALID { + t.Fatalf("invalid payload") + } + + fcState.HeadBlockHash = execData.ExecutionPayload.BlockHash + resp, err = api.ForkchoiceUpdatedV3(fcState, nil) + if err != nil { + t.Fatalf("error preparing payload, err=%v", err.(*engine.EngineAPIError).ErrorData()) + } + if resp.PayloadStatus.Status != engine.VALID { + t.Fatalf("unexpected status (got: %s, want: %s)", resp.PayloadStatus.Status, engine.VALID) + } + + // 11: verify beacon root was processed. + db, _, err := ethservice.APIBackend.StateAndHeaderByNumber(context.Background(), rpc.BlockNumber(execData.ExecutionPayload.Number)) + if err != nil { + t.Fatalf("unable to load db: %v", err) + } + var ( + timeIdx = common.BigToHash(big.NewInt(int64(execData.ExecutionPayload.Timestamp % 98304))) + rootIdx = common.BigToHash(big.NewInt(int64((execData.ExecutionPayload.Timestamp % 98304) + 98304))) + ) + + if num := db.GetState(params.BeaconRootsStorageAddress, timeIdx); num != timeIdx { + t.Fatalf("incorrect number stored: want %s, got %s", timeIdx, num) + } + if root := db.GetState(params.BeaconRootsStorageAddress, rootIdx); root != *blockParams.BeaconRoot { + t.Fatalf("incorrect root stored: want %s, got %s", *blockParams.BeaconRoot, root) + } +} diff --git a/eth/catalyst/queue.go b/eth/catalyst/queue.go index e8037aaca..634dc1b2e 100644 --- a/eth/catalyst/queue.go +++ b/eth/catalyst/queue.go @@ -73,7 +73,7 @@ func (q *payloadQueue) put(id engine.PayloadID, payload *miner.Payload) { } // get retrieves a previously stored payload item or nil if it does not exist. -func (q *payloadQueue) get(id engine.PayloadID) *engine.ExecutionPayloadEnvelope { +func (q *payloadQueue) get(id engine.PayloadID, full bool) *engine.ExecutionPayloadEnvelope { q.lock.RLock() defer q.lock.RUnlock() @@ -82,7 +82,10 @@ func (q *payloadQueue) get(id engine.PayloadID) *engine.ExecutionPayloadEnvelope return nil // no more items } if item.id == id { - return item.payload.Resolve() + if !full { + return item.payload.Resolve() + } + return item.payload.ResolveFull() } } return nil diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go new file mode 100644 index 000000000..1f7a3266c --- /dev/null +++ b/eth/catalyst/simulated_beacon.go @@ -0,0 +1,276 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package catalyst + +import ( + "crypto/rand" + "errors" + "sync" + "time" + + "github.com/ethereum/go-ethereum/beacon/engine" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" +) + +const devEpochLength = 32 + +// withdrawalQueue implements a FIFO queue which holds withdrawals that are +// pending inclusion. +type withdrawalQueue struct { + pending chan *types.Withdrawal +} + +// add queues a withdrawal for future inclusion. +func (w *withdrawalQueue) add(withdrawal *types.Withdrawal) error { + select { + case w.pending <- withdrawal: + break + default: + return errors.New("withdrawal queue full") + } + return nil +} + +// gatherPending returns a number of queued withdrawals up to a maximum count. +func (w *withdrawalQueue) gatherPending(maxCount int) []*types.Withdrawal { + withdrawals := []*types.Withdrawal{} + for { + select { + case withdrawal := <-w.pending: + withdrawals = append(withdrawals, withdrawal) + if len(withdrawals) == maxCount { + break + } + default: + return withdrawals + } + } +} + +type SimulatedBeacon struct { + shutdownCh chan struct{} + eth *eth.Ethereum + period uint64 + withdrawals withdrawalQueue + + feeRecipient common.Address + feeRecipientLock sync.Mutex // lock gates concurrent access to the feeRecipient + + engineAPI *ConsensusAPI + curForkchoiceState engine.ForkchoiceStateV1 + lastBlockTime uint64 +} + +func NewSimulatedBeacon(period uint64, eth *eth.Ethereum) (*SimulatedBeacon, error) { + chainConfig := eth.APIBackend.ChainConfig() + if !chainConfig.IsDevMode { + return nil, errors.New("incompatible pre-existing chain configuration") + } + block := eth.BlockChain().CurrentBlock() + current := engine.ForkchoiceStateV1{ + HeadBlockHash: block.Hash(), + SafeBlockHash: block.Hash(), + FinalizedBlockHash: block.Hash(), + } + engineAPI := newConsensusAPIWithoutHeartbeat(eth) + + // if genesis block, send forkchoiceUpdated to trigger transition to PoS + if block.Number.Sign() == 0 { + if _, err := engineAPI.ForkchoiceUpdatedV2(current, nil); err != nil { + return nil, err + } + } + return &SimulatedBeacon{ + eth: eth, + period: period, + shutdownCh: make(chan struct{}), + engineAPI: engineAPI, + lastBlockTime: block.Time, + curForkchoiceState: current, + withdrawals: withdrawalQueue{make(chan *types.Withdrawal, 20)}, + }, nil +} + +func (c *SimulatedBeacon) setFeeRecipient(feeRecipient common.Address) { + c.feeRecipientLock.Lock() + c.feeRecipient = feeRecipient + c.feeRecipientLock.Unlock() +} + +// Start invokes the SimulatedBeacon life-cycle function in a goroutine. +func (c *SimulatedBeacon) Start() error { + if c.period == 0 { + go c.loopOnDemand() + } else { + go c.loop() + } + return nil +} + +// Stop halts the SimulatedBeacon service. +func (c *SimulatedBeacon) Stop() error { + close(c.shutdownCh) + return nil +} + +// sealBlock initiates payload building for a new block and creates a new block +// with the completed payload. +func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal) error { + tstamp := uint64(time.Now().Unix()) + if tstamp <= c.lastBlockTime { + tstamp = c.lastBlockTime + 1 + } + c.feeRecipientLock.Lock() + feeRecipient := c.feeRecipient + c.feeRecipientLock.Unlock() + + // Reset to CurrentBlock in case of the chain was rewound + if header := c.eth.BlockChain().CurrentBlock(); c.curForkchoiceState.HeadBlockHash != header.Hash() { + finalizedHash := c.finalizedBlockHash(header.Number.Uint64()) + c.setCurrentState(header.Hash(), *finalizedHash) + } + + var random [32]byte + rand.Read(random[:]) + fcResponse, err := c.engineAPI.ForkchoiceUpdatedV2(c.curForkchoiceState, &engine.PayloadAttributes{ + Timestamp: tstamp, + SuggestedFeeRecipient: feeRecipient, + Withdrawals: withdrawals, + Random: random, + }) + if err != nil { + return err + } + if fcResponse == engine.STATUS_SYNCING { + return errors.New("chain rewind prevented invocation of payload creation") + } + + envelope, err := c.engineAPI.getPayload(*fcResponse.PayloadID, true) + if err != nil { + return err + } + payload := envelope.ExecutionPayload + + var finalizedHash common.Hash + if payload.Number%devEpochLength == 0 { + finalizedHash = payload.BlockHash + } else { + if fh := c.finalizedBlockHash(payload.Number); fh == nil { + return errors.New("chain rewind interrupted calculation of finalized block hash") + } else { + finalizedHash = *fh + } + } + + // Mark the payload as canon + if _, err = c.engineAPI.NewPayloadV2(*payload); err != nil { + return err + } + c.setCurrentState(payload.BlockHash, finalizedHash) + // Mark the block containing the payload as canonical + if _, err = c.engineAPI.ForkchoiceUpdatedV2(c.curForkchoiceState, nil); err != nil { + return err + } + c.lastBlockTime = payload.Timestamp + return nil +} + +// loopOnDemand runs the block production loop for "on-demand" configuration (period = 0) +func (c *SimulatedBeacon) loopOnDemand() { + var ( + newTxs = make(chan core.NewTxsEvent) + sub = c.eth.TxPool().SubscribeNewTxsEvent(newTxs) + ) + defer sub.Unsubscribe() + + for { + select { + case <-c.shutdownCh: + return + case w := <-c.withdrawals.pending: + withdrawals := append(c.withdrawals.gatherPending(9), w) + if err := c.sealBlock(withdrawals); err != nil { + log.Warn("Error performing sealing work", "err", err) + } + case <-newTxs: + withdrawals := c.withdrawals.gatherPending(10) + if err := c.sealBlock(withdrawals); err != nil { + log.Warn("Error performing sealing work", "err", err) + } + } + } +} + +// loop runs the block production loop for non-zero period configuration +func (c *SimulatedBeacon) loop() { + timer := time.NewTimer(0) + for { + select { + case <-c.shutdownCh: + return + case <-timer.C: + withdrawals := c.withdrawals.gatherPending(10) + if err := c.sealBlock(withdrawals); err != nil { + log.Warn("Error performing sealing work", "err", err) + } else { + timer.Reset(time.Second * time.Duration(c.period)) + } + } + } +} + +// finalizedBlockHash returns the block hash of the finalized block corresponding to the given number +// or nil if doesn't exist in the chain. +func (c *SimulatedBeacon) finalizedBlockHash(number uint64) *common.Hash { + var finalizedNumber uint64 + if number%devEpochLength == 0 { + finalizedNumber = number + } else { + finalizedNumber = (number - 1) / devEpochLength * devEpochLength + } + + if finalizedBlock := c.eth.BlockChain().GetBlockByNumber(finalizedNumber); finalizedBlock != nil { + fh := finalizedBlock.Hash() + return &fh + } + return nil +} + +// setCurrentState sets the current forkchoice state +func (c *SimulatedBeacon) setCurrentState(headHash, finalizedHash common.Hash) { + c.curForkchoiceState = engine.ForkchoiceStateV1{ + HeadBlockHash: headHash, + SafeBlockHash: headHash, + FinalizedBlockHash: finalizedHash, + } +} + +func RegisterSimulatedBeaconAPIs(stack *node.Node, sim *SimulatedBeacon) { + stack.RegisterAPIs([]rpc.API{ + { + Namespace: "dev", + Service: &api{sim}, + Version: "1.0", + }, + }) +} diff --git a/les/downloader/events.go b/eth/catalyst/simulated_beacon_api.go similarity index 61% rename from les/downloader/events.go rename to eth/catalyst/simulated_beacon_api.go index 25255a3a7..93670257f 100644 --- a/les/downloader/events.go +++ b/eth/catalyst/simulated_beacon_api.go @@ -1,4 +1,4 @@ -// Copyright 2015 The go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -14,12 +14,23 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package downloader +package catalyst -import "github.com/ethereum/go-ethereum/core/types" +import ( + "context" -type DoneEvent struct { - Latest *types.Header + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +type api struct { + simBeacon *SimulatedBeacon +} + +func (a *api) AddWithdrawal(ctx context.Context, withdrawal *types.Withdrawal) error { + return a.simBeacon.withdrawals.add(withdrawal) +} + +func (a *api) SetFeeRecipient(ctx context.Context, feeRecipient common.Address) { + a.simBeacon.setFeeRecipient(feeRecipient) } -type StartEvent struct{} -type FailedEvent struct{ Err error } diff --git a/eth/catalyst/simulated_beacon_test.go b/eth/catalyst/simulated_beacon_test.go new file mode 100644 index 000000000..0df195fb9 --- /dev/null +++ b/eth/catalyst/simulated_beacon_test.go @@ -0,0 +1,141 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package catalyst + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/params" +) + +func startSimulatedBeaconEthService(t *testing.T, genesis *core.Genesis) (*node.Node, *eth.Ethereum, *SimulatedBeacon) { + t.Helper() + + n, err := node.New(&node.Config{ + P2P: p2p.Config{ + ListenAddr: "127.0.0.1:8545", + NoDiscovery: true, + MaxPeers: 0, + }, + }) + if err != nil { + t.Fatal("can't create node:", err) + } + + ethcfg := ðconfig.Config{Genesis: genesis, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256} + ethservice, err := eth.New(n, ethcfg) + if err != nil { + t.Fatal("can't create eth service:", err) + } + + simBeacon, err := NewSimulatedBeacon(1, ethservice) + if err != nil { + t.Fatal("can't create simulated beacon:", err) + } + + n.RegisterLifecycle(simBeacon) + + if err := n.Start(); err != nil { + t.Fatal("can't start node:", err) + } + + ethservice.SetSynced() + return n, ethservice, simBeacon +} + +// send 20 transactions, >10 withdrawals and ensure they are included in order +// send enough transactions to fill multiple blocks +func TestSimulatedBeaconSendWithdrawals(t *testing.T) { + var withdrawals []types.Withdrawal + txs := make(map[common.Hash]types.Transaction) + + var ( + // testKey is a private key to use for funding a tester account. + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + + // testAddr is the Ethereum address of the tester account. + testAddr = crypto.PubkeyToAddress(testKey.PublicKey) + ) + + // short period (1 second) for testing purposes + var gasLimit uint64 = 10_000_000 + genesis := core.DeveloperGenesisBlock(gasLimit, testAddr) + node, ethService, mock := startSimulatedBeaconEthService(t, genesis) + _ = mock + defer node.Close() + + chainHeadCh := make(chan core.ChainHeadEvent, 10) + subscription := ethService.BlockChain().SubscribeChainHeadEvent(chainHeadCh) + defer subscription.Unsubscribe() + + // generate some withdrawals + for i := 0; i < 20; i++ { + withdrawals = append(withdrawals, types.Withdrawal{Index: uint64(i)}) + if err := mock.withdrawals.add(&withdrawals[i]); err != nil { + t.Fatal("addWithdrawal failed", err) + } + } + + // generate a bunch of transactions + signer := types.NewEIP155Signer(ethService.BlockChain().Config().ChainID) + for i := 0; i < 20; i++ { + tx, err := types.SignTx(types.NewTransaction(uint64(i), common.Address{}, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, testKey) + if err != nil { + t.Fatalf("error signing transaction, err=%v", err) + } + txs[tx.Hash()] = *tx + + if err := ethService.APIBackend.SendTx(context.Background(), tx); err != nil { + t.Fatal("SendTx failed", err) + } + } + + includedTxs := make(map[common.Hash]struct{}) + var includedWithdrawals []uint64 + + timer := time.NewTimer(12 * time.Second) + for { + select { + case evt := <-chainHeadCh: + for _, includedTx := range evt.Block.Transactions() { + includedTxs[includedTx.Hash()] = struct{}{} + } + for _, includedWithdrawal := range evt.Block.Withdrawals() { + includedWithdrawals = append(includedWithdrawals, includedWithdrawal.Index) + } + + // ensure all withdrawals/txs included. this will take two blocks b/c number of withdrawals > 10 + if len(includedTxs) == len(txs) && len(includedWithdrawals) == len(withdrawals) && evt.Block.Number().Cmp(big.NewInt(2)) == 0 { + return + } + case <-timer.C: + t.Fatal("timed out without including all withdrawals/txs") + } + } +} diff --git a/eth/catalyst/tester.go b/eth/catalyst/tester.go index c4eafd30d..3e9159a17 100644 --- a/eth/catalyst/tester.go +++ b/eth/catalyst/tester.go @@ -42,7 +42,7 @@ type FullSyncTester struct { // stack for launching and stopping the service controlled by node. func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, block *types.Block) (*FullSyncTester, error) { cl := &FullSyncTester{ - api: NewConsensusAPI(backend), + api: newConsensusAPIWithoutHeartbeat(backend), block: block, closed: make(chan struct{}), } @@ -75,7 +75,7 @@ func (tester *FullSyncTester) Start() error { } // Trigger beacon sync with the provided block header as // trusted chain head. - err := tester.api.eth.Downloader().BeaconSync(downloader.FullSync, tester.block.Header(), nil) + err := tester.api.eth.Downloader().BeaconSync(downloader.FullSync, tester.block.Header(), tester.block.Header()) if err != nil { log.Info("Failed to beacon sync", "err", err) } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index e9907297a..e55715879 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/params" ) const ( @@ -796,6 +797,37 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListH return errInvalidBody } } + // Blocks must have a number of blobs corresponding to the header gas usage, + // and zero before the Cancun hardfork. + var blobs int + for _, tx := range txLists[index] { + // Count the number of blobs to validate against the header's blobGasUsed + blobs += len(tx.BlobHashes()) + + // Validate the data blobs individually too + if tx.Type() == types.BlobTxType { + if len(tx.BlobHashes()) == 0 { + return errInvalidBody + } + for _, hash := range tx.BlobHashes() { + if hash[0] != params.BlobTxHashVersion { + return errInvalidBody + } + } + if tx.BlobTxSidecar() != nil { + return errInvalidBody + } + } + } + if header.BlobGasUsed != nil { + if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated + return errInvalidBody + } + } else { + if blobs != 0 { + return errInvalidBody + } + } return nil } diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go index 12eb5700f..59df82bd9 100644 --- a/eth/downloader/skeleton.go +++ b/eth/downloader/skeleton.go @@ -367,13 +367,31 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) { s.filler.resume() } defer func() { - if filled := s.filler.suspend(); filled != nil { - // If something was filled, try to delete stale sync helpers. If - // unsuccessful, warn the user, but not much else we can do (it's - // a programming error, just let users report an issue and don't - // choke in the meantime). - if err := s.cleanStales(filled); err != nil { - log.Error("Failed to clean stale beacon headers", "err", err) + // The filler needs to be suspended, but since it can block for a while + // when there are many blocks queued up for full-sync importing, run it + // on a separate goroutine and consume head messages that need instant + // replies. + done := make(chan struct{}) + go func() { + defer close(done) + if filled := s.filler.suspend(); filled != nil { + // If something was filled, try to delete stale sync helpers. If + // unsuccessful, warn the user, but not much else we can do (it's + // a programming error, just let users report an issue and don't + // choke in the meantime). + if err := s.cleanStales(filled); err != nil { + log.Error("Failed to clean stale beacon headers", "err", err) + } + } + }() + // Wait for the suspend to finish, consuming head events in the meantime + // and dropping them on the floor. + for { + select { + case <-done: + return + case event := <-s.headEvents: + event.errc <- errors.New("beacon syncer reorging") } } }() @@ -630,7 +648,7 @@ func (s *skeleton) processNewHead(head *types.Header, final *types.Header, force } if parent := rawdb.ReadSkeletonHeader(s.db, number-1); parent.Hash() != head.ParentHash { if force { - log.Warn("Beacon chain forked", "ancestor", parent.Number, "hash", parent.Hash(), "want", head.ParentHash) + log.Warn("Beacon chain forked", "ancestor", number-1, "hash", parent.Hash(), "want", head.ParentHash) } return true } diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go index d791723b0..1bf03411d 100644 --- a/eth/downloader/testchain_test.go +++ b/eth/downloader/testchain_test.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) // Test chain parameters. @@ -43,7 +44,7 @@ var ( Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, BaseFee: big.NewInt(params.InitialBaseFee), } - testGenesis = testGspec.MustCommit(testDB) + testGenesis = testGspec.MustCommit(testDB, trie.NewDatabase(testDB, trie.HashDefaults)) ) // The common prefix of all test chains: diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 63c3305a4..5eddd7a19 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -27,7 +27,9 @@ import ( "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/txpool/blobpool" + "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/ethdb" @@ -58,25 +60,26 @@ var LightClientGPO = gasprice.Config{ // Defaults contains default settings for use on the Ethereum main net. var Defaults = Config{ - SyncMode: downloader.SnapSync, - NetworkId: 1, - TxLookupLimit: 2350000, - LightPeers: 100, - UltraLightFraction: 75, - DatabaseCache: 512, - TrieCleanCache: 154, - TrieCleanCacheJournal: "triecache", - TrieCleanCacheRejournal: 60 * time.Minute, - TrieDirtyCache: 256, - TrieTimeout: 60 * time.Minute, - SnapshotCache: 102, - FilterLogCacheSize: 32, - Miner: miner.DefaultConfig, - TxPool: txpool.DefaultConfig, - RPCGasCap: 50000000, - RPCEVMTimeout: 5 * time.Second, - GPO: FullNodeGPO, - RPCTxFeeCap: 1, // 1 ether + SyncMode: downloader.SnapSync, + NetworkId: 1, + TxLookupLimit: 2350000, + TransactionHistory: 2350000, + StateHistory: params.FullImmutabilityThreshold, + StateScheme: rawdb.HashScheme, + LightPeers: 100, + DatabaseCache: 512, + TrieCleanCache: 154, + TrieDirtyCache: 256, + TrieTimeout: 60 * time.Minute, + SnapshotCache: 102, + FilterLogCacheSize: 32, + Miner: miner.DefaultConfig, + TxPool: legacypool.DefaultConfig, + BlobPool: blobpool.DefaultConfig, + RPCGasCap: 50000000, + RPCEVMTimeout: 5 * time.Second, + GPO: FullNodeGPO, + RPCTxFeeCap: 1, // 1 ether } //go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go @@ -99,7 +102,11 @@ type Config struct { NoPruning bool // Whether to disable pruning and flush everything to disk NoPrefetch bool // Whether to disable prefetching and only load state on demand - TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. + // Deprecated, use 'TransactionHistory' instead. + TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. + TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. + StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved. + StateScheme string `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top // RequiredBlocks is a set of block number -> hash mappings which must be in the // canonical chain of all remote peers. Setting the option makes geth verify the @@ -114,24 +121,17 @@ type Config struct { LightNoPrune bool `toml:",omitempty"` // Whether to disable light chain pruning LightNoSyncServe bool `toml:",omitempty"` // Whether to serve light clients before syncing - // Ultra Light client options - UltraLightServers []string `toml:",omitempty"` // List of trusted ultra light servers - UltraLightFraction int `toml:",omitempty"` // Percentage of trusted servers to accept an announcement - UltraLightOnlyAnnounce bool `toml:",omitempty"` // Whether to only announce headers, or also serve them - // Database options SkipBcVersionCheck bool `toml:"-"` DatabaseHandles int `toml:"-"` DatabaseCache int DatabaseFreezer string - TrieCleanCache int - TrieCleanCacheJournal string `toml:",omitempty"` // Disk journal directory for trie cache to survive node restarts - TrieCleanCacheRejournal time.Duration `toml:",omitempty"` // Time interval to regenerate the journal for clean cache - TrieDirtyCache int - TrieTimeout time.Duration - SnapshotCache int - Preimages bool + TrieCleanCache int + TrieDirtyCache int + TrieTimeout time.Duration + SnapshotCache int + Preimages bool // This is the number of blocks for which logs will be cached in the filter system. FilterLogCacheSize int @@ -140,7 +140,8 @@ type Config struct { Miner miner.Config // Transaction pool options - TxPool txpool.Config + TxPool legacypool.Config + BlobPool blobpool.Config // Gas Price Oracle options GPO gasprice.Config @@ -163,6 +164,9 @@ type Config struct { // OverrideCancun (TODO: remove after the fork) OverrideCancun *uint64 `toml:",omitempty"` + + // OverrideVerkle (TODO: remove after the fork) + OverrideVerkle *uint64 `toml:",omitempty"` } // CreateConsensusEngine creates a consensus engine for the given chain config. diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 0e2f03774..2abddc9e0 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -7,7 +7,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/txpool/blobpool" + "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/miner" @@ -24,6 +25,9 @@ func (c Config) MarshalTOML() (interface{}, error) { NoPruning bool NoPrefetch bool TxLookupLimit uint64 `toml:",omitempty"` + TransactionHistory uint64 `toml:",omitempty"` + StateHistory uint64 `toml:",omitempty"` + StateScheme string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` LightServ int `toml:",omitempty"` LightIngress int `toml:",omitempty"` @@ -31,23 +35,19 @@ func (c Config) MarshalTOML() (interface{}, error) { LightPeers int `toml:",omitempty"` LightNoPrune bool `toml:",omitempty"` LightNoSyncServe bool `toml:",omitempty"` - UltraLightServers []string `toml:",omitempty"` - UltraLightFraction int `toml:",omitempty"` - UltraLightOnlyAnnounce bool `toml:",omitempty"` SkipBcVersionCheck bool `toml:"-"` DatabaseHandles int `toml:"-"` DatabaseCache int DatabaseFreezer string TrieCleanCache int - TrieCleanCacheJournal string `toml:",omitempty"` - TrieCleanCacheRejournal time.Duration `toml:",omitempty"` TrieDirtyCache int TrieTimeout time.Duration SnapshotCache int Preimages bool FilterLogCacheSize int Miner miner.Config - TxPool txpool.Config + TxPool legacypool.Config + BlobPool blobpool.Config GPO gasprice.Config EnablePreimageRecording bool DocRoot string `toml:"-"` @@ -55,6 +55,7 @@ func (c Config) MarshalTOML() (interface{}, error) { RPCEVMTimeout time.Duration RPCTxFeeCap float64 OverrideCancun *uint64 `toml:",omitempty"` + OverrideVerkle *uint64 `toml:",omitempty"` } var enc Config enc.Genesis = c.Genesis @@ -65,6 +66,9 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.NoPruning = c.NoPruning enc.NoPrefetch = c.NoPrefetch enc.TxLookupLimit = c.TxLookupLimit + enc.TransactionHistory = c.TransactionHistory + enc.StateHistory = c.StateHistory + enc.StateScheme = c.StateScheme enc.RequiredBlocks = c.RequiredBlocks enc.LightServ = c.LightServ enc.LightIngress = c.LightIngress @@ -72,16 +76,11 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.LightPeers = c.LightPeers enc.LightNoPrune = c.LightNoPrune enc.LightNoSyncServe = c.LightNoSyncServe - enc.UltraLightServers = c.UltraLightServers - enc.UltraLightFraction = c.UltraLightFraction - enc.UltraLightOnlyAnnounce = c.UltraLightOnlyAnnounce enc.SkipBcVersionCheck = c.SkipBcVersionCheck enc.DatabaseHandles = c.DatabaseHandles enc.DatabaseCache = c.DatabaseCache enc.DatabaseFreezer = c.DatabaseFreezer enc.TrieCleanCache = c.TrieCleanCache - enc.TrieCleanCacheJournal = c.TrieCleanCacheJournal - enc.TrieCleanCacheRejournal = c.TrieCleanCacheRejournal enc.TrieDirtyCache = c.TrieDirtyCache enc.TrieTimeout = c.TrieTimeout enc.SnapshotCache = c.SnapshotCache @@ -89,6 +88,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.FilterLogCacheSize = c.FilterLogCacheSize enc.Miner = c.Miner enc.TxPool = c.TxPool + enc.BlobPool = c.BlobPool enc.GPO = c.GPO enc.EnablePreimageRecording = c.EnablePreimageRecording enc.DocRoot = c.DocRoot @@ -96,6 +96,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.RPCEVMTimeout = c.RPCEVMTimeout enc.RPCTxFeeCap = c.RPCTxFeeCap enc.OverrideCancun = c.OverrideCancun + enc.OverrideVerkle = c.OverrideVerkle return &enc, nil } @@ -110,6 +111,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { NoPruning *bool NoPrefetch *bool TxLookupLimit *uint64 `toml:",omitempty"` + TransactionHistory *uint64 `toml:",omitempty"` + StateHistory *uint64 `toml:",omitempty"` + StateScheme *string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` LightServ *int `toml:",omitempty"` LightIngress *int `toml:",omitempty"` @@ -117,23 +121,19 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { LightPeers *int `toml:",omitempty"` LightNoPrune *bool `toml:",omitempty"` LightNoSyncServe *bool `toml:",omitempty"` - UltraLightServers []string `toml:",omitempty"` - UltraLightFraction *int `toml:",omitempty"` - UltraLightOnlyAnnounce *bool `toml:",omitempty"` SkipBcVersionCheck *bool `toml:"-"` DatabaseHandles *int `toml:"-"` DatabaseCache *int DatabaseFreezer *string TrieCleanCache *int - TrieCleanCacheJournal *string `toml:",omitempty"` - TrieCleanCacheRejournal *time.Duration `toml:",omitempty"` TrieDirtyCache *int TrieTimeout *time.Duration SnapshotCache *int Preimages *bool FilterLogCacheSize *int Miner *miner.Config - TxPool *txpool.Config + TxPool *legacypool.Config + BlobPool *blobpool.Config GPO *gasprice.Config EnablePreimageRecording *bool DocRoot *string `toml:"-"` @@ -141,6 +141,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { RPCEVMTimeout *time.Duration RPCTxFeeCap *float64 OverrideCancun *uint64 `toml:",omitempty"` + OverrideVerkle *uint64 `toml:",omitempty"` } var dec Config if err := unmarshal(&dec); err != nil { @@ -170,6 +171,15 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.TxLookupLimit != nil { c.TxLookupLimit = *dec.TxLookupLimit } + if dec.TransactionHistory != nil { + c.TransactionHistory = *dec.TransactionHistory + } + if dec.StateHistory != nil { + c.StateHistory = *dec.StateHistory + } + if dec.StateScheme != nil { + c.StateScheme = *dec.StateScheme + } if dec.RequiredBlocks != nil { c.RequiredBlocks = dec.RequiredBlocks } @@ -191,15 +201,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.LightNoSyncServe != nil { c.LightNoSyncServe = *dec.LightNoSyncServe } - if dec.UltraLightServers != nil { - c.UltraLightServers = dec.UltraLightServers - } - if dec.UltraLightFraction != nil { - c.UltraLightFraction = *dec.UltraLightFraction - } - if dec.UltraLightOnlyAnnounce != nil { - c.UltraLightOnlyAnnounce = *dec.UltraLightOnlyAnnounce - } if dec.SkipBcVersionCheck != nil { c.SkipBcVersionCheck = *dec.SkipBcVersionCheck } @@ -215,12 +216,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.TrieCleanCache != nil { c.TrieCleanCache = *dec.TrieCleanCache } - if dec.TrieCleanCacheJournal != nil { - c.TrieCleanCacheJournal = *dec.TrieCleanCacheJournal - } - if dec.TrieCleanCacheRejournal != nil { - c.TrieCleanCacheRejournal = *dec.TrieCleanCacheRejournal - } if dec.TrieDirtyCache != nil { c.TrieDirtyCache = *dec.TrieDirtyCache } @@ -242,6 +237,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.TxPool != nil { c.TxPool = *dec.TxPool } + if dec.BlobPool != nil { + c.BlobPool = *dec.BlobPool + } if dec.GPO != nil { c.GPO = *dec.GPO } @@ -263,5 +261,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.OverrideCancun != nil { c.OverrideCancun = dec.OverrideCancun } + if dec.OverrideVerkle != nil { + c.OverrideVerkle = dec.OverrideVerkle + } return nil } diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index 6d8516fa6..7c490df3f 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -44,7 +44,7 @@ var ( Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, BaseFee: big.NewInt(params.InitialBaseFee), } - genesis = gspec.MustCommit(testdb) + genesis = gspec.MustCommit(testdb, trie.NewDatabase(testdb, trie.HashDefaults)) unknownBlock = types.NewBlock(&types.Header{Root: types.EmptyRootHash, GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil)) ) diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index 39727e007..95fef0cde 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -294,6 +294,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) otherreject int64 ) batch := txs[i:end] + for j, err := range f.addTxs(batch) { // Track the transaction hash if the price is too low for us. // Avoid re-request this transaction when we receive another diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 122676e96..a5750c193 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -106,32 +106,32 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { } return f.blockLogs(ctx, header) } - // Short-cut if all we care about is pending logs - if f.begin == rpc.PendingBlockNumber.Int64() { - if f.end != rpc.PendingBlockNumber.Int64() { - return nil, errors.New("invalid block range") - } - return f.pendingLogs() - } - // Figure out the limits of the filter range - header, _ := f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) - if header == nil { - return nil, nil - } + var ( - err error - head = header.Number.Int64() - pending = f.end == rpc.PendingBlockNumber.Int64() + beginPending = f.begin == rpc.PendingBlockNumber.Int64() + endPending = f.end == rpc.PendingBlockNumber.Int64() ) + + // special case for pending logs + if beginPending && !endPending { + return nil, errors.New("invalid block range") + } + + // Short-cut if all we care about is pending logs + if beginPending && endPending { + return f.pendingLogs(), nil + } + resolveSpecial := func(number int64) (int64, error) { var hdr *types.Header switch number { - case rpc.LatestBlockNumber.Int64(): - return head, nil - case rpc.PendingBlockNumber.Int64(): + case rpc.LatestBlockNumber.Int64(), rpc.PendingBlockNumber.Int64(): // we should return head here since we've already captured // that we need to get the pending logs in the pending boolean above - return head, nil + hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) + if hdr == nil { + return 0, errors.New("latest header not found") + } case rpc.FinalizedBlockNumber.Int64(): hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.FinalizedBlockNumber) if hdr == nil { @@ -147,57 +147,92 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { } return hdr.Number.Int64(), nil } + + var err error + // range query need to resolve the special begin/end block number if f.begin, err = resolveSpecial(f.begin); err != nil { return nil, err } if f.end, err = resolveSpecial(f.end); err != nil { return nil, err } - // Gather all indexed logs, and finish with non indexed ones + + logChan, errChan := f.rangeLogsAsync(ctx) + var logs []*types.Log + for { + select { + case log := <-logChan: + logs = append(logs, log) + case err := <-errChan: + if err != nil { + // if an error occurs during extraction, we do return the extracted data + return logs, err + } + // Append the pending ones + if endPending { + pendingLogs := f.pendingLogs() + logs = append(logs, pendingLogs...) + } + return logs, nil + } + } +} + +// rangeLogsAsync retrieves block-range logs that match the filter criteria asynchronously, +// it creates and returns two channels: one for delivering log data, and one for reporting errors. +func (f *Filter) rangeLogsAsync(ctx context.Context) (chan *types.Log, chan error) { var ( - logs []*types.Log - end = uint64(f.end) - size, sections = f.sys.backend.BloomStatus() + logChan = make(chan *types.Log) + errChan = make(chan error) ) - if indexed := sections * size; indexed > uint64(f.begin) { - if indexed > end { - logs, err = f.indexedLogs(ctx, end) - } else { - logs, err = f.indexedLogs(ctx, indexed-1) + + go func() { + defer func() { + close(errChan) + close(logChan) + }() + + // Gather all indexed logs, and finish with non indexed ones + var ( + end = uint64(f.end) + size, sections = f.sys.backend.BloomStatus() + err error + ) + if indexed := sections * size; indexed > uint64(f.begin) { + if indexed > end { + indexed = end + 1 + } + if err = f.indexedLogs(ctx, indexed-1, logChan); err != nil { + errChan <- err + return + } } - if err != nil { - return logs, err + + if err := f.unindexedLogs(ctx, end, logChan); err != nil { + errChan <- err + return } - } - rest, err := f.unindexedLogs(ctx, end) - logs = append(logs, rest...) - if pending { - pendingLogs, err := f.pendingLogs() - if err != nil { - return nil, err - } - logs = append(logs, pendingLogs...) - } - return logs, err + + errChan <- nil + }() + + return logChan, errChan } // indexedLogs returns the logs matching the filter criteria based on the bloom // bits indexed available locally or via the network. -func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) { +func (f *Filter) indexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error { // Create a matcher session and request servicing from the backend matches := make(chan uint64, 64) session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches) if err != nil { - return nil, err + return err } defer session.Close() f.sys.backend.ServiceFilter(ctx, session) - // Iterate over the matches until exhausted or context closed - var logs []*types.Log - for { select { case number, ok := <-matches: @@ -207,47 +242,50 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err if err == nil { f.begin = int64(end) + 1 } - return logs, err + return err } f.begin = int64(number) + 1 // Retrieve the suggested block and pull any truly matching logs header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number)) if header == nil || err != nil { - return logs, err + return err } found, err := f.checkMatches(ctx, header) if err != nil { - return logs, err + return err + } + for _, log := range found { + logChan <- log } - logs = append(logs, found...) case <-ctx.Done(): - return logs, ctx.Err() + return ctx.Err() } } } // unindexedLogs returns the logs matching the filter criteria based on raw block // iteration and bloom matching. -func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) { - var logs []*types.Log - +func (f *Filter) unindexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error { for ; f.begin <= int64(end); f.begin++ { - if f.begin%10 == 0 && ctx.Err() != nil { - return logs, ctx.Err() - } header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin)) if header == nil || err != nil { - return logs, err + return err } found, err := f.blockLogs(ctx, header) if err != nil { - return logs, err + return err + } + for _, log := range found { + select { + case logChan <- log: + case <-ctx.Done(): + return ctx.Err() + } } - logs = append(logs, found...) } - return logs, nil + return nil } // blockLogs returns the logs matching the filter criteria within a single block. @@ -294,63 +332,62 @@ func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*typ } // pendingLogs returns the logs matching the filter criteria within the pending block. -func (f *Filter) pendingLogs() ([]*types.Log, error) { +func (f *Filter) pendingLogs() []*types.Log { block, receipts := f.sys.backend.PendingBlockAndReceipts() - if block == nil { - return nil, errors.New("pending state not available") + if block == nil || receipts == nil { + return nil } if bloomFilter(block.Bloom(), f.addresses, f.topics) { var unfiltered []*types.Log for _, r := range receipts { unfiltered = append(unfiltered, r.Logs...) } - return filterLogs(unfiltered, nil, nil, f.addresses, f.topics), nil + return filterLogs(unfiltered, nil, nil, f.addresses, f.topics) } - return nil, nil + return nil } -func includes(addresses []common.Address, a common.Address) bool { - for _, addr := range addresses { - if addr == a { +// includes returns true if the element is present in the list. +func includes[T comparable](things []T, element T) bool { + for _, thing := range things { + if thing == element { return true } } - return false } // filterLogs creates a slice of logs matching the given criteria. func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log { - var ret []*types.Log -Logs: - for _, log := range logs { + var check = func(log *types.Log) bool { if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > log.BlockNumber { - continue + return false } if toBlock != nil && toBlock.Int64() >= 0 && toBlock.Uint64() < log.BlockNumber { - continue + return false } - if len(addresses) > 0 && !includes(addresses, log.Address) { - continue + return false } // If the to filtered topics is greater than the amount of topics in logs, skip. if len(topics) > len(log.Topics) { - continue + return false } for i, sub := range topics { - match := len(sub) == 0 // empty rule set == wildcard - for _, topic := range sub { - if log.Topics[i] == topic { - match = true - break - } + if len(sub) == 0 { + continue // empty rule set == wildcard } - if !match { - continue Logs + if !includes(sub, log.Topics[i]) { + return false } } - ret = append(ret, log) + return true + } + var ret []*types.Log + for _, log := range logs { + if check(log) { + ret = append(ret, log) + } } return ret } diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index 1768681c1..35e396c23 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -444,15 +444,6 @@ func (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) { } } -func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) { - for _, f := range filters[LogsSubscription] { - matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) - if len(matchedLogs) > 0 { - f.logs <- matchedLogs - } - } -} - func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent) { for _, f := range filters[PendingTransactionsSubscription] { f.txs <- ev.Txs @@ -573,7 +564,7 @@ func (es *EventSystem) eventLoop() { case ev := <-es.logsCh: es.handleLogs(index, ev) case ev := <-es.rmLogsCh: - es.handleRemovedLogs(index, ev) + es.handleLogs(index, ev.Logs) case ev := <-es.pendingLogsCh: es.handlePendingLogs(index, ev) case ev := <-es.chainCh: diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index ae0c83eb2..b5d716ae5 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -50,6 +50,8 @@ type testBackend struct { rmLogsFeed event.Feed pendingLogsFeed event.Feed chainFeed event.Feed + pendingBlock *types.Block + pendingReceipts types.Receipts } func (b *testBackend) ChainConfig() *params.ChainConfig { @@ -119,12 +121,12 @@ func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types. } func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { - logs := rawdb.ReadLogs(b.db, hash, number, params.TestChainConfig) + logs := rawdb.ReadLogs(b.db, hash, number) return logs, nil } func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { - return nil, nil + return b.pendingBlock, b.pendingReceipts } func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index 0e42c798e..a076fb737 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -18,18 +18,23 @@ package filters import ( "context" + "encoding/json" "math/big" - "reflect" + "strings" "testing" + "time" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" ) func makeReceipt(addr common.Address) *types.Receipt { @@ -81,7 +86,7 @@ func BenchmarkFilters(b *testing.B) { // The test txs are not properly signed, can't simply create a chain // and then import blocks. TODO(rjl493456442) try to get rid of the // manual database writes. - gspec.MustCommit(db) + gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) for i, block := range chain { rawdb.WriteBlock(db, block) @@ -103,142 +108,264 @@ func BenchmarkFilters(b *testing.B) { func TestFilters(t *testing.T) { var ( - db, _ = rawdb.NewLevelDBDatabase(t.TempDir(), 0, 0, "", false) - _, sys = newTestFilterSystem(t, db, Config{}) + db = rawdb.NewMemoryDatabase() + _, sys = newTestFilterSystem(t, db, Config{}) + // Sender account key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr = crypto.PubkeyToAddress(key1.PublicKey) + signer = types.NewLondonSigner(big.NewInt(1)) + // Logging contract + contract = common.Address{0xfe} + contract2 = common.Address{0xff} + abiStr = `[{"inputs":[],"name":"log0","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"}],"name":"log1","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"}],"name":"log2","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"},{"internalType":"uint256","name":"t3","type":"uint256"}],"name":"log3","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"},{"internalType":"uint256","name":"t3","type":"uint256"},{"internalType":"uint256","name":"t4","type":"uint256"}],"name":"log4","outputs":[],"stateMutability":"nonpayable","type":"function"}]` + /* + // SPDX-License-Identifier: GPL-3.0 + pragma solidity >=0.7.0 <0.9.0; + + contract Logger { + function log0() external { + assembly { + log0(0, 0) + } + } + + function log1(uint t1) external { + assembly { + log1(0, 0, t1) + } + } + + function log2(uint t1, uint t2) external { + assembly { + log2(0, 0, t1, t2) + } + } + + function log3(uint t1, uint t2, uint t3) external { + assembly { + log3(0, 0, t1, t2, t3) + } + } + + function log4(uint t1, uint t2, uint t3, uint t4) external { + assembly { + log4(0, 0, t1, t2, t3, t4) + } + } + } + */ + bytecode = common.FromHex("608060405234801561001057600080fd5b50600436106100575760003560e01c80630aa731851461005c5780632a4c08961461006657806378b9a1f314610082578063c670f8641461009e578063c683d6a3146100ba575b600080fd5b6100646100d6565b005b610080600480360381019061007b9190610143565b6100dc565b005b61009c60048036038101906100979190610196565b6100e8565b005b6100b860048036038101906100b391906101d6565b6100f2565b005b6100d460048036038101906100cf9190610203565b6100fa565b005b600080a0565b808284600080a3505050565b8082600080a25050565b80600080a150565b80828486600080a450505050565b600080fd5b6000819050919050565b6101208161010d565b811461012b57600080fd5b50565b60008135905061013d81610117565b92915050565b60008060006060848603121561015c5761015b610108565b5b600061016a8682870161012e565b935050602061017b8682870161012e565b925050604061018c8682870161012e565b9150509250925092565b600080604083850312156101ad576101ac610108565b5b60006101bb8582860161012e565b92505060206101cc8582860161012e565b9150509250929050565b6000602082840312156101ec576101eb610108565b5b60006101fa8482850161012e565b91505092915050565b6000806000806080858703121561021d5761021c610108565b5b600061022b8782880161012e565b945050602061023c8782880161012e565b935050604061024d8782880161012e565b925050606061025e8782880161012e565b9150509295919450925056fea264697066735822122073a4b156f487e59970dc1ef449cc0d51467268f676033a17188edafcee861f9864736f6c63430008110033") hash1 = common.BytesToHash([]byte("topic1")) hash2 = common.BytesToHash([]byte("topic2")) hash3 = common.BytesToHash([]byte("topic3")) hash4 = common.BytesToHash([]byte("topic4")) + hash5 = common.BytesToHash([]byte("topic5")) gspec = &core.Genesis{ - Config: params.TestChainConfig, - Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(1000000)}}, + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + addr: {Balance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(params.Ether))}, + contract: {Balance: big.NewInt(0), Code: bytecode}, + contract2: {Balance: big.NewInt(0), Code: bytecode}, + }, BaseFee: big.NewInt(params.InitialBaseFee), } ) - defer db.Close() - _, chain, receipts := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), 1000, func(i int, gen *core.BlockGen) { + contractABI, err := abi.JSON(strings.NewReader(abiStr)) + if err != nil { + t.Fatal(err) + } + + // Hack: GenerateChainWithGenesis creates a new db. + // Commit the genesis manually and use GenerateChain. + _, err = gspec.Commit(db, trie.NewDatabase(db, nil)) + if err != nil { + t.Fatal(err) + } + chain, _ := core.GenerateChain(gspec.Config, gspec.ToBlock(), ethash.NewFaker(), db, 1000, func(i int, gen *core.BlockGen) { switch i { case 1: - receipt := types.NewReceipt(nil, false, 0) - receipt.Logs = []*types.Log{ - { - Address: addr, - Topics: []common.Hash{hash1}, - }, + data, err := contractABI.Pack("log1", hash1.Big()) + if err != nil { + t.Fatal(err) } - gen.AddUncheckedReceipt(receipt) - gen.AddUncheckedTx(types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, gen.BaseFee(), nil)) + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ + Nonce: 0, + GasPrice: gen.BaseFee(), + Gas: 30000, + To: &contract, + Data: data, + }), signer, key1) + gen.AddTx(tx) + tx2, _ := types.SignTx(types.NewTx(&types.LegacyTx{ + Nonce: 1, + GasPrice: gen.BaseFee(), + Gas: 30000, + To: &contract2, + Data: data, + }), signer, key1) + gen.AddTx(tx2) case 2: - receipt := types.NewReceipt(nil, false, 0) - receipt.Logs = []*types.Log{ - { - Address: addr, - Topics: []common.Hash{hash2}, - }, + data, err := contractABI.Pack("log2", hash2.Big(), hash1.Big()) + if err != nil { + t.Fatal(err) } - gen.AddUncheckedReceipt(receipt) - gen.AddUncheckedTx(types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, gen.BaseFee(), nil)) - + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ + Nonce: 2, + GasPrice: gen.BaseFee(), + Gas: 30000, + To: &contract, + Data: data, + }), signer, key1) + gen.AddTx(tx) case 998: - receipt := types.NewReceipt(nil, false, 0) - receipt.Logs = []*types.Log{ - { - Address: addr, - Topics: []common.Hash{hash3}, - }, + data, err := contractABI.Pack("log1", hash3.Big()) + if err != nil { + t.Fatal(err) } - gen.AddUncheckedReceipt(receipt) - gen.AddUncheckedTx(types.NewTransaction(998, common.HexToAddress("0x998"), big.NewInt(998), 998, gen.BaseFee(), nil)) + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ + Nonce: 3, + GasPrice: gen.BaseFee(), + Gas: 30000, + To: &contract2, + Data: data, + }), signer, key1) + gen.AddTx(tx) case 999: - receipt := types.NewReceipt(nil, false, 0) - receipt.Logs = []*types.Log{ - { - Address: addr, - Topics: []common.Hash{hash4}, - }, + data, err := contractABI.Pack("log1", hash4.Big()) + if err != nil { + t.Fatal(err) } - gen.AddUncheckedReceipt(receipt) - gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ + Nonce: 4, + GasPrice: gen.BaseFee(), + Gas: 30000, + To: &contract, + Data: data, + }), signer, key1) + gen.AddTx(tx) } }) - // The test txs are not properly signed, can't simply create a chain - // and then import blocks. TODO(rjl493456442) try to get rid of the - // manual database writes. - gspec.MustCommit(db) - for i, block := range chain { - rawdb.WriteBlock(db, block) - rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) - rawdb.WriteHeadBlockHash(db, block.Hash()) - rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i]) + var l uint64 + bc, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l) + if err != nil { + t.Fatal(err) + } + _, err = bc.InsertChain(chain) + if err != nil { + t.Fatal(err) } // Set block 998 as Finalized (-3) - rawdb.WriteFinalizedBlockHash(db, chain[998].Hash()) + bc.SetFinalized(chain[998].Header()) - filter := sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}}) - logs, _ := filter.Logs(context.Background()) - if len(logs) != 4 { - t.Error("expected 4 log, got", len(logs)) - } + // Generate pending block + pchain, preceipts := core.GenerateChain(gspec.Config, chain[len(chain)-1], ethash.NewFaker(), db, 1, func(i int, gen *core.BlockGen) { + data, err := contractABI.Pack("log1", hash5.Big()) + if err != nil { + t.Fatal(err) + } + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ + Nonce: 5, + GasPrice: gen.BaseFee(), + Gas: 30000, + To: &contract, + Data: data, + }), signer, key1) + gen.AddTx(tx) + }) + sys.backend.(*testBackend).pendingBlock = pchain[0] + sys.backend.(*testBackend).pendingReceipts = preceipts[0] for i, tc := range []struct { - f *Filter - wantHashes []common.Hash + f *Filter + want string + err string }{ { - sys.NewRangeFilter(900, 999, []common.Address{addr}, [][]common.Hash{{hash3}}), - []common.Hash{hash3}, + f: sys.NewBlockFilter(chain[2].Hash(), []common.Address{contract}, nil), + want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`, }, { - sys.NewRangeFilter(990, int64(rpc.LatestBlockNumber), []common.Address{addr}, [][]common.Hash{{hash3}}), - []common.Hash{hash3}, + f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{contract}, [][]common.Hash{{hash1, hash2, hash3, hash4}}), + want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, }, { - sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}), - []common.Hash{hash1, hash2}, + f: sys.NewRangeFilter(900, 999, []common.Address{contract}, [][]common.Hash{{hash3}}), }, { - sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}), - nil, + f: sys.NewRangeFilter(990, int64(rpc.LatestBlockNumber), []common.Address{contract2}, [][]common.Hash{{hash3}}), + want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false}]`, }, { - sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil), - nil, + f: sys.NewRangeFilter(1, 10, []common.Address{contract}, [][]common.Hash{{hash2}, {hash1}}), + want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`, }, { - sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}), - nil, + f: sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}), + want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x0","removed":false},{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xdba3e2ea9a7d690b722d70ee605fd67ba4c00d1d3aecd5cf187a7b92ad8eb3df","transactionIndex":"0x1","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x1","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`, }, { - sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), []common.Hash{hash4}, + f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}), }, { - sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), []common.Hash{hash3, hash4}, + f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil), }, { - sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), []common.Hash{hash3}, + f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}), }, { - sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), nil, + f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), + want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, }, { - sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), nil, + f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), + want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, }, { - sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), nil, + f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), + want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false}]`, }, { - sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), nil, + f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), }, { - sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), nil, + f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), + err: "safe header not found", + }, { + f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), + err: "safe header not found", + }, { + f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), + err: "safe header not found", + }, { + f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), + want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696335"],"data":"0x","blockNumber":"0x3e9","transactionHash":"0x4110587c1b8d86edc85dce929a34127f1cb8809515a9f177c91c866de3eb0638","transactionIndex":"0x0","blockHash":"0xc7245899e5817f16fa99cf5ad2d9c1e4b98443a565a673ec9c764640443ef037","logIndex":"0x0","removed":false}]`, + }, { + f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), + want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696335"],"data":"0x","blockNumber":"0x3e9","transactionHash":"0x4110587c1b8d86edc85dce929a34127f1cb8809515a9f177c91c866de3eb0638","transactionIndex":"0x0","blockHash":"0xc7245899e5817f16fa99cf5ad2d9c1e4b98443a565a673ec9c764640443ef037","logIndex":"0x0","removed":false}]`, + }, { + f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), + err: "invalid block range", }, } { - logs, _ := tc.f.Logs(context.Background()) - var haveHashes []common.Hash - for _, l := range logs { - haveHashes = append(haveHashes, l.Topics[0]) + logs, err := tc.f.Logs(context.Background()) + if err == nil && tc.err != "" { + t.Fatalf("test %d, expected error %q, got nil", i, tc.err) + } else if err != nil && err.Error() != tc.err { + t.Fatalf("test %d, expected error %q, got %q", i, tc.err, err.Error()) } - if have, want := len(haveHashes), len(tc.wantHashes); have != want { - t.Fatalf("test %d, have %d logs, want %d", i, have, want) - } - if len(haveHashes) == 0 { + if tc.want == "" && len(logs) == 0 { continue } - if !reflect.DeepEqual(tc.wantHashes, haveHashes) { - t.Fatalf("test %d, have %v want %v", i, haveHashes, tc.wantHashes) + have, err := json.Marshal(logs) + if err != nil { + t.Fatal(err) + } + if string(have) != tc.want { + t.Fatalf("test %d, have:\n%s\nwant:\n%s", i, have, tc.want) } } + + t.Run("timeout", func(t *testing.T) { + f := sys.NewRangeFilter(0, -1, nil, nil) + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Hour)) + defer cancel() + _, err := f.Logs(ctx) + if err == nil { + t.Fatal("expected error") + } + if err != context.DeadlineExceeded { + t.Fatalf("expected context.DeadlineExceeded, got %v", err) + } + }) } diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index ee0b0ebcc..226991b24 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -23,14 +23,14 @@ import ( "fmt" "math" "math/big" - "sort" "sync/atomic" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" + "golang.org/x/exp/slices" ) var ( @@ -69,20 +69,9 @@ type processedFees struct { } // txGasAndReward is sorted in ascending order based on reward -type ( - txGasAndReward struct { - gasUsed uint64 - reward *big.Int - } - sortGasAndReward []txGasAndReward -) - -func (s sortGasAndReward) Len() int { return len(s) } -func (s sortGasAndReward) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s sortGasAndReward) Less(i, j int) bool { - return s[i].reward.Cmp(s[j].reward) < 0 +type txGasAndReward struct { + gasUsed uint64 + reward *big.Int } // processBlock takes a blockFees structure with the blockNumber, the header and optionally @@ -94,7 +83,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { bf.results.baseFee = new(big.Int) } if chainconfig.IsLondon(big.NewInt(int64(bf.blockNumber + 1))) { - bf.results.nextBaseFee = misc.CalcBaseFee(chainconfig, bf.header) + bf.results.nextBaseFee = eip1559.CalcBaseFee(chainconfig, bf.header) } else { bf.results.nextBaseFee = new(big.Int) } @@ -117,12 +106,14 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { return } - sorter := make(sortGasAndReward, len(bf.block.Transactions())) + sorter := make([]txGasAndReward, len(bf.block.Transactions())) for i, tx := range bf.block.Transactions() { reward, _ := tx.EffectiveGasTip(bf.block.BaseFee()) sorter[i] = txGasAndReward{gasUsed: bf.receipts[i].GasUsed, reward: reward} } - sort.Stable(sorter) + slices.SortStableFunc(sorter, func(a, b txGasAndReward) int { + return a.reward.Cmp(b.reward) + }) var txIndex int sumGasUsed := sorter[0].gasUsed diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index a3dd83d79..b71964981 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -19,7 +19,6 @@ package gasprice import ( "context" "math/big" - "sort" "sync" "github.com/ethereum/go-ethereum/common" @@ -30,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + "golang.org/x/exp/slices" ) const sampleNumber = 3 // Number of transactions sampled in a block @@ -208,7 +208,7 @@ func (oracle *Oracle) SuggestTipCap(ctx context.Context) (*big.Int, error) { } price := lastPrice if len(results) > 0 { - sort.Sort(bigIntArray(results)) + slices.SortFunc(results, func(a, b *big.Int) int { return a.Cmp(b) }) price = results[(len(results)-1)*oracle.percentile/100] } if price.Cmp(oracle.maxPrice) > 0 { @@ -227,30 +227,6 @@ type results struct { err error } -type txSorter struct { - txs []*types.Transaction - baseFee *big.Int -} - -func newSorter(txs []*types.Transaction, baseFee *big.Int) *txSorter { - return &txSorter{ - txs: txs, - baseFee: baseFee, - } -} - -func (s *txSorter) Len() int { return len(s.txs) } -func (s *txSorter) Swap(i, j int) { - s.txs[i], s.txs[j] = s.txs[j], s.txs[i] -} -func (s *txSorter) Less(i, j int) bool { - // It's okay to discard the error because a tx would never be - // accepted into a block with an invalid effective tip. - tip1, _ := s.txs[i].EffectiveGasTip(s.baseFee) - tip2, _ := s.txs[j].EffectiveGasTip(s.baseFee) - return tip1.Cmp(tip2) < 0 -} - // getBlockValues calculates the lowest transaction gas price in a given block // and sends it to the result channel. If the block is empty or all transactions // are sent by the miner itself(it doesn't make any sense to include this kind of @@ -267,14 +243,21 @@ func (oracle *Oracle) getBlockValues(ctx context.Context, blockNum uint64, limit signer := types.MakeSigner(oracle.backend.ChainConfig(), block.Number(), block.Time()) // Sort the transaction by effective tip in ascending sort. - txs := make([]*types.Transaction, len(block.Transactions())) - copy(txs, block.Transactions()) - sorter := newSorter(txs, block.BaseFee()) - sort.Sort(sorter) + txs := block.Transactions() + sortedTxs := make([]*types.Transaction, len(txs)) + copy(sortedTxs, txs) + baseFee := block.BaseFee() + slices.SortFunc(sortedTxs, func(a, b *types.Transaction) int { + // It's okay to discard the error because a tx would never be + // accepted into a block with an invalid effective tip. + tip1, _ := a.EffectiveGasTip(baseFee) + tip2, _ := b.EffectiveGasTip(baseFee) + return tip1.Cmp(tip2) + }) var prices []*big.Int - for _, tx := range sorter.txs { - tip, _ := tx.EffectiveGasTip(block.BaseFee()) + for _, tx := range sortedTxs { + tip, _ := tx.EffectiveGasTip(baseFee) if ignoreUnder != nil && tip.Cmp(ignoreUnder) == -1 { continue } @@ -291,9 +274,3 @@ func (oracle *Oracle) getBlockValues(ctx context.Context, blockNum uint64, limit case <-quit: } } - -type bigIntArray []*big.Int - -func (s bigIntArray) Len() int { return len(s) } -func (s bigIntArray) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 } -func (s bigIntArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/eth/handler.go b/eth/handler.go index f0b043166..a629ec5ee 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -29,6 +29,8 @@ import ( "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/fetcher" @@ -37,13 +39,20 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) const ( // txChanSize is the size of channel listening to NewTxsEvent. // The number is referenced from the size of tx pool. txChanSize = 4096 + + // txMaxBroadcastSize is the max size of a transaction that will be broadcasted. + // All transactions with a higher size will be announced and need to be fetched + // by the peer. + txMaxBroadcastSize = 4096 ) var ( @@ -61,12 +70,12 @@ type txPool interface { // tx hash. Get(hash common.Hash) *types.Transaction - // AddRemotes should add the given transactions to the pool. - AddRemotes([]*types.Transaction) []error + // Add should add the given transactions to the pool. + Add(txs []*types.Transaction, local bool, sync bool) []error // Pending should return pending transactions. // The slice should be modifiable by the caller. - Pending(enforceTips bool) map[common.Address]types.Transactions + Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction // SubscribeNewTxsEvent should return an event subscription of // NewTxsEvent and send events to the given channel. @@ -117,7 +126,9 @@ type handler struct { chainSync *chainSyncer wg sync.WaitGroup - peerWG sync.WaitGroup + + handlerStartCh chan struct{} + handlerDoneCh chan struct{} } // newHandler returns a handler for all Ethereum chain management protocol. @@ -137,6 +148,8 @@ func newHandler(config *handlerConfig) (*handler, error) { merger: config.Merger, requiredBlocks: config.RequiredBlocks, quitSync: make(chan struct{}), + handlerDoneCh: make(chan struct{}), + handlerStartCh: make(chan struct{}), } if config.Sync == downloader.FullSync { // The database seems empty as the current block is the genesis. Yet the snap @@ -172,7 +185,7 @@ func newHandler(config *handlerConfig) (*handler, error) { } // If we've successfully finished a sync cycle, accept transactions from // the network - h.acceptTxs.Store(true) + h.enableSyncedFeatures() } // Construct the downloader (long sync) h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, success) @@ -261,7 +274,7 @@ func newHandler(config *handlerConfig) (*handler, error) { } n, err := h.chain.InsertChain(blocks) if err == nil { - h.acceptTxs.Store(true) // Mark initial sync done on any fetcher import + h.enableSyncedFeatures() // Mark initial sync done on any fetcher import } return n, err } @@ -274,14 +287,58 @@ func newHandler(config *handlerConfig) (*handler, error) { } return p.RequestTxs(hashes) } - h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, h.txpool.AddRemotes, fetchTx) + addTxs := func(txs []*types.Transaction) []error { + return h.txpool.Add(txs, false, false) + } + h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx) h.chainSync = newChainSyncer(h) return h, nil } +// protoTracker tracks the number of active protocol handlers. +func (h *handler) protoTracker() { + defer h.wg.Done() + var active int + for { + select { + case <-h.handlerStartCh: + active++ + case <-h.handlerDoneCh: + active-- + case <-h.quitSync: + // Wait for all active handlers to finish. + for ; active > 0; active-- { + <-h.handlerDoneCh + } + return + } + } +} + +// incHandlers signals to increment the number of active handlers if not +// quitting. +func (h *handler) incHandlers() bool { + select { + case h.handlerStartCh <- struct{}{}: + return true + case <-h.quitSync: + return false + } +} + +// decHandlers signals to decrement the number of active handlers. +func (h *handler) decHandlers() { + h.handlerDoneCh <- struct{}{} +} + // runEthPeer registers an eth peer into the joint eth/snap peerset, adds it to // various subsystems and starts handling messages. func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { + if !h.incHandlers() { + return p2p.DiscQuitting + } + defer h.decHandlers() + // If the peer has a `snap` extension, wait for it to connect so we can have // a uniform initialization/teardown mechanism snap, err := h.peers.waitSnapExtension(peer) @@ -289,12 +346,6 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { peer.Log().Error("Snapshot extension barrier failed", "err", err) return err } - // TODO(karalabe): Not sure why this is needed - if !h.chainSync.handlePeerEvent(peer) { - return p2p.DiscQuitting - } - h.peerWG.Add(1) - defer h.peerWG.Done() // Execute the Ethereum handshake var ( @@ -304,7 +355,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { number = head.Number.Uint64() td = h.chain.GetTd(hash, number) ) - forkID := forkid.NewID(h.chain.Config(), genesis.Hash(), number, head.Time) + forkID := forkid.NewID(h.chain.Config(), genesis, number, head.Time) if err := peer.Handshake(h.networkID, td, hash, genesis.Hash(), forkID, h.forkFilter); err != nil { peer.Log().Debug("Ethereum handshake failed", "err", err) return err @@ -350,7 +401,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { return err } } - h.chainSync.handlePeerEvent(peer) + h.chainSync.handlePeerEvent() // Propagate existing transactions. new transactions appearing // after this will be sent via broadcasts. @@ -411,10 +462,19 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { // `eth`, all subsystem registrations and lifecycle management will be done by // the main `eth` handler to prevent strange races. func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error { - h.peerWG.Add(1) - defer h.peerWG.Done() + if !h.incHandlers() { + return p2p.DiscQuitting + } + defer h.decHandlers() if err := h.peers.registerSnapExtension(peer); err != nil { + if metrics.Enabled { + if peer.Inbound() { + snap.IngressRegistrationErrorMeter.Mark(1) + } else { + snap.EgressRegistrationErrorMeter.Mark(1) + } + } peer.Log().Warn("Snapshot extension registration failed", "err", err) return err } @@ -477,6 +537,10 @@ func (h *handler) Start(maxPeers int) { // start sync handlers h.wg.Add(1) go h.chainSync.loop() + + // start peer handler tracker + h.wg.Add(1) + go h.protoTracker() } func (h *handler) Stop() { @@ -486,14 +550,13 @@ func (h *handler) Stop() { // Quit chainSync and txsync64. // After this is done, no new peers will be accepted. close(h.quitSync) - h.wg.Wait() // Disconnect existing sessions. // This also closes the gate for any new registrations on the peer set. // sessions which are already established but not added to h.peers yet // will exit when they try to register. h.peers.close() - h.peerWG.Wait() + h.wg.Wait() log.Info("Ethereum protocol stopped") } @@ -560,8 +623,12 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) { // Broadcast transactions to a batch of peers not knowing about it for _, tx := range txs { peers := h.peers.peersWithoutTransaction(tx.Hash()) + + var numDirect int + if tx.Size() <= txMaxBroadcastSize { + numDirect = int(math.Sqrt(float64(len(peers)))) + } // Send the tx unconditionally to a subset of our peers - numDirect := int(math.Sqrt(float64(len(peers)))) for _, peer := range peers[:numDirect] { txset[peer] = append(txset[peer], tx.Hash()) } @@ -609,3 +676,12 @@ func (h *handler) txBroadcastLoop() { } } } + +// enableSyncedFeatures enables the post-sync functionalities when the initial +// sync is finished. +func (h *handler) enableSyncedFeatures() { + h.acceptTxs.Store(true) + if h.chain.TrieDB().Scheme() == rawdb.PathScheme { + h.chain.TrieDB().SetBufferSize(pathdb.DefaultBufferSize) + } +} diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 00be022d9..3a5e6608b 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -134,7 +134,7 @@ func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, block *types.Block, td // Update the peer's total difficulty if better than the previous if _, td := peer.Head(); trueTD.Cmp(td) > 0 { peer.SetHead(trueHead, trueTD) - h.chainSync.handlePeerEvent(peer) + h.chainSync.handlePeerEvent() } return nil } diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index fd7df2340..41619fe30 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -311,11 +311,10 @@ func testSendTransactions(t *testing.T, protocol uint) { for nonce := range insert { tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, 10240)) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) - insert[nonce] = tx } - go handler.txpool.AddRemotes(insert) // Need goroutine to not block on feed - time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join) + go handler.txpool.Add(insert, false, false) // Need goroutine to not block on feed + time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join) // Create a source handler to send messages through and a sink peer to receive them p2pSrc, p2pSink := p2p.MsgPipe() @@ -437,10 +436,9 @@ func testTransactionPropagation(t *testing.T, protocol uint) { for nonce := range txs { tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) - txs[nonce] = tx } - source.txpool.AddRemotes(txs) + source.txpool.Add(txs, false, false) // Iterate through all the sinks and ensure they all got the transactions for i := range sinks { diff --git a/eth/handler_test.go b/eth/handler_test.go index 8939e53a9..2e0a98845 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -74,13 +75,12 @@ func (p *testTxPool) Has(hash common.Hash) bool { func (p *testTxPool) Get(hash common.Hash) *types.Transaction { p.lock.Lock() defer p.lock.Unlock() - return p.pool[hash] } -// AddRemotes appends a batch of transactions to the pool, and notifies any +// Add appends a batch of transactions to the pool, and notifies any // listeners if the addition channel is non nil -func (p *testTxPool) AddRemotes(txs []*types.Transaction) []error { +func (p *testTxPool) Add(txs []*types.Transaction, local bool, sync bool) []error { p.lock.Lock() defer p.lock.Unlock() @@ -92,11 +92,11 @@ func (p *testTxPool) AddRemotes(txs []*types.Transaction) []error { } // Pending returns all the transactions known to the pool -func (p *testTxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { +func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction { p.lock.RLock() defer p.lock.RUnlock() - batches := make(map[common.Address]types.Transactions) + batches := make(map[common.Address][]*types.Transaction) for _, tx := range p.pool { from, _ := types.Sender(types.HomesteadSigner{}, tx) batches[from] = append(batches[from], tx) @@ -104,7 +104,19 @@ func (p *testTxPool) Pending(enforceTips bool) map[common.Address]types.Transact for _, batch := range batches { sort.Sort(types.TxByNonce(batch)) } - return batches + pending := make(map[common.Address][]*txpool.LazyTransaction) + for addr, batch := range batches { + for _, tx := range batch { + pending[addr] = append(pending[addr], &txpool.LazyTransaction{ + Hash: tx.Hash(), + Tx: tx, + Time: tx.Time(), + GasFeeCap: tx.GasFeeCap(), + GasTipCap: tx.GasTipCap(), + }) + } + } + return pending } // SubscribeNewTxsEvent should return an event subscription of NewTxsEvent and diff --git a/eth/protocols/eth/discovery.go b/eth/protocols/eth/discovery.go index 87857244b..a7bdd47da 100644 --- a/eth/protocols/eth/discovery.go +++ b/eth/protocols/eth/discovery.go @@ -61,6 +61,6 @@ func StartENRUpdater(chain *core.BlockChain, ln *enode.LocalNode) { func currentENREntry(chain *core.BlockChain) *enrEntry { head := chain.CurrentHeader() return &enrEntry{ - ForkID: forkid.NewID(chain.Config(), chain.Genesis().Hash(), head.Number.Uint64(), head.Time), + ForkID: forkid.NewID(chain.Config(), chain.Genesis(), head.Number.Uint64(), head.Time), } } diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index 2f2dd1cf6..b2ce883bc 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" @@ -95,11 +96,15 @@ type TxPool interface { // MakeProtocols constructs the P2P protocol definitions for `eth`. func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol { - protocols := make([]p2p.Protocol, len(ProtocolVersions)) - for i, version := range ProtocolVersions { + protocols := make([]p2p.Protocol, 0, len(ProtocolVersions)) + for _, version := range ProtocolVersions { version := version // Closure - protocols[i] = p2p.Protocol{ + // Path scheme does not support GetNodeData, don't advertise eth66 on it + if version <= ETH66 && backend.Chain().TrieDB().Scheme() == rawdb.PathScheme { + continue + } + protocols = append(protocols, p2p.Protocol{ Name: ProtocolName, Version: version, Length: protocolLengths[version], @@ -119,7 +124,7 @@ func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2 }, Attributes: []enr.Entry{currentENREntry(backend.Chain())}, DialCandidates: dnsdisc, - } + }) } return protocols } @@ -127,7 +132,7 @@ func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2 // NodeInfo represents a short summary of the `eth` sub-protocol metadata // known about the host peer. type NodeInfo struct { - Network uint64 `json:"network"` // Ethereum network ID (1=Mainnet, Rinkeby=4, Goerli=5) + Network uint64 `json:"network"` // Ethereum network ID (1=Mainnet, Goerli=5) Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index bbb9866bd..bf2874721 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -111,21 +112,24 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, panic(err) } for _, block := range bs { - chain.StateCache().TrieDB().Commit(block.Root(), false) + chain.TrieDB().Commit(block.Root(), false) } - txconfig := txpool.DefaultConfig + txconfig := legacypool.DefaultConfig txconfig.Journal = "" // Don't litter the disk with test journals + pool := legacypool.New(txconfig, chain) + txpool, _ := txpool.New(new(big.Int).SetUint64(txconfig.PriceLimit), chain, []txpool.SubPool{pool}) + return &testBackend{ db: db, chain: chain, - txpool: txpool.NewTxPool(txconfig, params.TestChainConfig, chain), + txpool: txpool, } } // close tears down the transaction pool and chain behind the mock backend. func (b *testBackend) close() { - b.txpool.Stop() + b.txpool.Close() b.chain.Stop() } diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index 74e514b86..da741791b 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -246,6 +247,10 @@ func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error { // ServiceGetNodeDataQuery assembles the response to a node data query. It is // exposed to allow external packages to test protocol behavior. func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte { + // Request nodes by hash is not supported in path-based scheme. + if chain.TrieDB().Scheme() == rawdb.PathScheme { + return nil + } // Gather state data until the fetch or network limits is reached var ( bytes int @@ -257,7 +262,7 @@ func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [] break } // Retrieve the requested state entry - entry, err := chain.TrieNode(hash) + entry, err := chain.TrieDB().Node(hash) if len(entry) == 0 || err != nil { // Read the contract code with prefix only to save unnecessary lookups. entry, err = chain.ContractCodeWithPrefix(hash) diff --git a/eth/protocols/eth/handshake.go b/eth/protocols/eth/handshake.go index 9a2769fa0..ea16a85b1 100644 --- a/eth/protocols/eth/handshake.go +++ b/eth/protocols/eth/handshake.go @@ -17,12 +17,14 @@ package eth import ( + "errors" "fmt" "math/big" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" ) @@ -59,9 +61,11 @@ func (p *Peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis select { case err := <-errc: if err != nil { + markError(p, err) return err } case <-timeout.C: + markError(p, p2p.DiscReadTimeout) return p2p.DiscReadTimeout } } @@ -105,3 +109,25 @@ func (p *Peer) readStatus(network uint64, status *StatusPacket, genesis common.H } return nil } + +// markError registers the error with the corresponding metric. +func markError(p *Peer, err error) { + if !metrics.Enabled { + return + } + m := meters.get(p.Inbound()) + switch errors.Unwrap(err) { + case errNetworkIDMismatch: + m.networkIDMismatch.Mark(1) + case errProtocolVersionMismatch: + m.protocolVersionMismatch.Mark(1) + case errGenesisMismatch: + m.genesisMismatch.Mark(1) + case errForkIDRejected: + m.forkidRejected.Mark(1) + case p2p.DiscReadTimeout: + m.timeoutError.Mark(1) + default: + m.peerError.Mark(1) + } +} diff --git a/eth/protocols/eth/handshake_test.go b/eth/protocols/eth/handshake_test.go index 5c6727d91..dca66e0c5 100644 --- a/eth/protocols/eth/handshake_test.go +++ b/eth/protocols/eth/handshake_test.go @@ -40,7 +40,7 @@ func testHandshake(t *testing.T, protocol uint) { genesis = backend.chain.Genesis() head = backend.chain.CurrentBlock() td = backend.chain.GetTd(head.Hash(), head.Number.Uint64()) - forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64(), backend.chain.CurrentHeader().Time) + forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis(), backend.chain.CurrentHeader().Number.Uint64(), backend.chain.CurrentHeader().Time) ) tests := []struct { code uint64 diff --git a/eth/protocols/eth/metrics.go b/eth/protocols/eth/metrics.go new file mode 100644 index 000000000..5e0aee39f --- /dev/null +++ b/eth/protocols/eth/metrics.go @@ -0,0 +1,81 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import "github.com/ethereum/go-ethereum/metrics" + +// meters stores ingress and egress handshake meters. +var meters bidirectionalMeters + +// bidirectionalMeters stores ingress and egress handshake meters. +type bidirectionalMeters struct { + ingress *hsMeters + egress *hsMeters +} + +// get returns the corresponding meter depending if ingress or egress is +// desired. +func (h *bidirectionalMeters) get(ingress bool) *hsMeters { + if ingress { + return h.ingress + } + return h.egress +} + +// hsMeters is a collection of meters which track metrics related to the +// eth subprotocol handshake. +type hsMeters struct { + // peerError measures the number of errors related to incorrect peer + // behaviour, such as invalid message code, size, encoding, etc. + peerError metrics.Meter + + // timeoutError measures the number of timeouts. + timeoutError metrics.Meter + + // networkIDMismatch measures the number of network id mismatch errors. + networkIDMismatch metrics.Meter + + // protocolVersionMismatch measures the number of differing protocol + // versions. + protocolVersionMismatch metrics.Meter + + // genesisMismatch measures the number of differing genesises. + genesisMismatch metrics.Meter + + // forkidRejected measures the number of differing forkids. + forkidRejected metrics.Meter +} + +// newHandshakeMeters registers and returns handshake meters for the given +// base. +func newHandshakeMeters(base string) *hsMeters { + return &hsMeters{ + peerError: metrics.NewRegisteredMeter(base+"error/peer", nil), + timeoutError: metrics.NewRegisteredMeter(base+"error/timeout", nil), + networkIDMismatch: metrics.NewRegisteredMeter(base+"error/network", nil), + protocolVersionMismatch: metrics.NewRegisteredMeter(base+"error/version", nil), + genesisMismatch: metrics.NewRegisteredMeter(base+"error/genesis", nil), + forkidRejected: metrics.NewRegisteredMeter(base+"error/forkid", nil), + } +} + +func init() { + meters = bidirectionalMeters{ + ingress: newHandshakeMeters("eth/protocols/eth/ingress/handshake/"), + egress: newHandshakeMeters("eth/protocols/eth/egress/handshake/"), + } +} diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index 0d4b36898..4b9f5ad6b 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -59,13 +59,13 @@ const ( GetBlockBodiesMsg = 0x05 BlockBodiesMsg = 0x06 NewBlockMsg = 0x07 + NewPooledTransactionHashesMsg = 0x08 + GetPooledTransactionsMsg = 0x09 + PooledTransactionsMsg = 0x0a GetNodeDataMsg = 0x0d NodeDataMsg = 0x0e GetReceiptsMsg = 0x0f ReceiptsMsg = 0x10 - NewPooledTransactionHashesMsg = 0x08 - GetPooledTransactionsMsg = 0x09 - PooledTransactionsMsg = 0x0a ) var ( @@ -363,6 +363,17 @@ func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg } func (*NewBlockPacket) Name() string { return "NewBlock" } func (*NewBlockPacket) Kind() byte { return NewBlockMsg } +func (*NewPooledTransactionHashesPacket66) Name() string { return "NewPooledTransactionHashes" } +func (*NewPooledTransactionHashesPacket66) Kind() byte { return NewPooledTransactionHashesMsg } +func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" } +func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg } + +func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" } +func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg } + +func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" } +func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg } + func (*GetNodeDataPacket) Name() string { return "GetNodeData" } func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg } @@ -374,15 +385,3 @@ func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg } func (*ReceiptsPacket) Name() string { return "Receipts" } func (*ReceiptsPacket) Kind() byte { return ReceiptsMsg } - -func (*NewPooledTransactionHashesPacket66) Name() string { return "NewPooledTransactionHashes" } -func (*NewPooledTransactionHashesPacket66) Kind() byte { return NewPooledTransactionHashesMsg } - -func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" } -func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg } - -func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" } -func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg } - -func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" } -func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg } diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go index 55781ac54..b2fd03766 100644 --- a/eth/protocols/snap/handler.go +++ b/eth/protocols/snap/handler.go @@ -284,7 +284,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac req.Bytes = softResponseLimit } // Retrieve the requested state and bail out if non existent - tr, err := trie.New(trie.StateTrieID(req.Root), chain.StateCache().TrieDB()) + tr, err := trie.New(trie.StateTrieID(req.Root), chain.TrieDB()) if err != nil { return nil, nil } @@ -322,12 +322,12 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac // Generate the Merkle proofs for the first and last account proof := light.NewNodeSet() - if err := tr.Prove(req.Origin[:], 0, proof); err != nil { + if err := tr.Prove(req.Origin[:], proof); err != nil { log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) return nil, nil } if last != (common.Hash{}) { - if err := tr.Prove(last[:], 0, proof); err != nil { + if err := tr.Prove(last[:], proof); err != nil { log.Warn("Failed to prove account range", "last", last, "err", err) return nil, nil } @@ -414,7 +414,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP if origin != (common.Hash{}) || (abort && len(storage) > 0) { // Request started at a non-zero hash or was capped prematurely, add // the endpoint Merkle proofs - accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), chain.StateCache().TrieDB()) + accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), chain.TrieDB()) if err != nil { return nil, nil } @@ -423,17 +423,17 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP return nil, nil } id := trie.StorageTrieID(req.Root, account, acc.Root) - stTrie, err := trie.NewStateTrie(id, chain.StateCache().TrieDB()) + stTrie, err := trie.NewStateTrie(id, chain.TrieDB()) if err != nil { return nil, nil } proof := light.NewNodeSet() - if err := stTrie.Prove(origin[:], 0, proof); err != nil { + if err := stTrie.Prove(origin[:], proof); err != nil { log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) return nil, nil } if last != (common.Hash{}) { - if err := stTrie.Prove(last[:], 0, proof); err != nil { + if err := stTrie.Prove(last[:], proof); err != nil { log.Warn("Failed to prove storage range", "last", last, "err", err) return nil, nil } @@ -487,7 +487,7 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s req.Bytes = softResponseLimit } // Make sure we have the state associated with the request - triedb := chain.StateCache().TrieDB() + triedb := chain.TrieDB() accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), triedb) if err != nil { diff --git a/rpc/constants_unix.go b/eth/protocols/snap/metrics.go similarity index 62% rename from rpc/constants_unix.go rename to eth/protocols/snap/metrics.go index 1f04d15d7..a9f35ca44 100644 --- a/rpc/constants_unix.go +++ b/eth/protocols/snap/metrics.go @@ -1,4 +1,4 @@ -// Copyright 2019 The go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -14,21 +14,16 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -//go:build darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd || solaris -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +package snap -package rpc - -/* -#include - -int max_socket_path_size() { -struct sockaddr_un s; -return sizeof(s.sun_path); -} -*/ -import "C" +import ( + metrics "github.com/ethereum/go-ethereum/metrics" +) var ( - max_path_size = C.max_socket_path_size() + ingressRegistrationErrorName = "eth/protocols/snap/ingress/registration/error" + egressRegistrationErrorName = "eth/protocols/snap/egress/registration/error" + + IngressRegistrationErrorMeter = metrics.NewRegisteredMeter(ingressRegistrationErrorName, nil) + EgressRegistrationErrorMeter = metrics.NewRegisteredMeter(egressRegistrationErrorName, nil) ) diff --git a/eth/protocols/snap/protocol.go b/eth/protocols/snap/protocol.go index 60a254f39..0db206b08 100644 --- a/eth/protocols/snap/protocol.go +++ b/eth/protocols/snap/protocol.go @@ -21,7 +21,7 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/state/snapshot" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" ) @@ -104,7 +104,7 @@ func (p *AccountRangePacket) Unpack() ([]common.Hash, [][]byte, error) { accounts = make([][]byte, len(p.Accounts)) ) for i, acc := range p.Accounts { - val, err := snapshot.FullAccountRLP(acc.Body) + val, err := types.FullAccountRLP(acc.Body) if err != nil { return nil, nil, fmt.Errorf("invalid account %x: %v", acc.Body, err) } diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index e99eb8843..0f5f2ccdf 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -33,7 +33,6 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" @@ -731,6 +730,8 @@ func (s *Syncer) loadSyncStatus() { } s.tasks = progress.Tasks for _, task := range s.tasks { + task := task // closure for task.genBatch in the stacktrie writer callback + task.genBatch = ethdb.HookedBatch{ Batch: s.db.NewBatch(), OnPut: func(key []byte, value []byte) { @@ -742,6 +743,8 @@ func (s *Syncer) loadSyncStatus() { }) for accountHash, subtasks := range task.SubTasks { for _, subtask := range subtasks { + subtask := subtask // closure for subtask.genBatch in the stacktrie writer callback + subtask.genBatch = ethdb.HookedBatch{ Batch: s.db.NewBatch(), OnPut: func(key []byte, value []byte) { @@ -2277,13 +2280,13 @@ func (s *Syncer) forwardAccountTask(task *accountTask) { if task.needCode[i] || task.needState[i] { break } - slim := snapshot.SlimAccountRLP(res.accounts[i].Nonce, res.accounts[i].Balance, res.accounts[i].Root, res.accounts[i].CodeHash) + slim := types.SlimAccountRLP(*res.accounts[i]) rawdb.WriteAccountSnapshot(batch, hash, slim) // If the task is complete, drop it into the stack trie to generate // account trie nodes for it if !task.needHeal[i] { - full, err := snapshot.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted + full, err := types.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted if err != nil { panic(err) // Really shouldn't ever happen } @@ -2902,7 +2905,7 @@ func (s *Syncer) onHealState(paths [][]byte, value []byte) error { if err := rlp.DecodeBytes(value, &account); err != nil { return nil // Returning the error here would drop the remote peer } - blob := snapshot.SlimAccountRLP(account.Nonce, account.Balance, account.Root, account.CodeHash) + blob := types.SlimAccountRLP(account) rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob) s.accountHealed += 1 s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob)) diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 58cde8d1e..1514ad4e1 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -22,7 +22,6 @@ import ( "encoding/binary" "fmt" "math/big" - "sort" "sync" "testing" "time" @@ -36,8 +35,10 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/trie/trienode" "golang.org/x/crypto/sha3" + "golang.org/x/exp/slices" ) func TestHashing(t *testing.T) { @@ -127,9 +128,9 @@ type testPeer struct { remote *Syncer logger log.Logger accountTrie *trie.Trie - accountValues entrySlice + accountValues []*kv storageTries map[common.Hash]*trie.Trie - storageValues map[common.Hash]entrySlice + storageValues map[common.Hash][]*kv accountRequestHandler accountHandlerFunc storageRequestHandler storageHandlerFunc @@ -273,12 +274,12 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H // Actually, we need to supply proofs either way! This seems to be an implementation // quirk in go-ethereum proof := light.NewNodeSet() - if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil { + if err := t.accountTrie.Prove(origin[:], proof); err != nil { t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err) } if len(keys) > 0 { lastK := (keys[len(keys)-1])[:] - if err := t.accountTrie.Prove(lastK, 0, proof); err != nil { + if err := t.accountTrie.Prove(lastK, proof); err != nil { t.logger.Error("Could not prove last item", "error", err) } } @@ -358,12 +359,12 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm // Here's a potential gotcha: when constructing the proof, we cannot // use the 'origin' slice directly, but must use the full 32-byte // hash form. - if err := stTrie.Prove(originHash[:], 0, proof); err != nil { + if err := stTrie.Prove(originHash[:], proof); err != nil { t.logger.Error("Could not prove inexistence of origin", "origin", originHash, "error", err) } if len(keys) > 0 { lastK := (keys[len(keys)-1])[:] - if err := stTrie.Prove(lastK, 0, proof); err != nil { + if err := stTrie.Prove(lastK, proof); err != nil { t.logger.Error("Could not prove last item", "error", err) } } @@ -416,13 +417,13 @@ func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, acco // Here's a potential gotcha: when constructing the proof, we cannot // use the 'origin' slice directly, but must use the full 32-byte // hash form. - if err := stTrie.Prove(origin[:], 0, proof); err != nil { + if err := stTrie.Prove(origin[:], proof); err != nil { t.logger.Error("Could not prove inexistence of origin", "origin", origin, "error", err) } if len(keys) > 0 { lastK := (keys[len(keys)-1])[:] - if err := stTrie.Prove(lastK, 0, proof); err != nil { + if err := stTrie.Prove(lastK, proof); err != nil { t.logger.Error("Could not prove last item", "error", err) } } @@ -561,6 +562,11 @@ func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Has func TestSyncBloatedProof(t *testing.T) { t.Parallel() + testSyncBloatedProof(t, rawdb.HashScheme) + testSyncBloatedProof(t, rawdb.PathScheme) +} + +func testSyncBloatedProof(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -570,7 +576,7 @@ func TestSyncBloatedProof(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme) source := newTestPeer("source", t, term) source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems @@ -594,12 +600,12 @@ func TestSyncBloatedProof(t *testing.T) { } // The proofs proof := light.NewNodeSet() - if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil { + if err := t.accountTrie.Prove(origin[:], proof); err != nil { t.logger.Error("Could not prove origin", "origin", origin, "error", err) } // The bloat: add proof of every single element for _, entry := range t.accountValues { - if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil { + if err := t.accountTrie.Prove(entry.k, proof); err != nil { t.logger.Error("Could not prove item", "error", err) } } @@ -638,6 +644,11 @@ func setupSyncer(scheme string, peers ...*testPeer) *Syncer { func TestSync(t *testing.T) { t.Parallel() + testSync(t, rawdb.HashScheme) + testSync(t, rawdb.PathScheme) +} + +func testSync(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -647,7 +658,7 @@ func TestSync(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -659,7 +670,7 @@ func TestSync(t *testing.T) { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a @@ -667,6 +678,11 @@ func TestSync(t *testing.T) { func TestSyncTinyTriePanic(t *testing.T) { t.Parallel() + testSyncTinyTriePanic(t, rawdb.HashScheme) + testSyncTinyTriePanic(t, rawdb.PathScheme) +} + +func testSyncTinyTriePanic(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -676,7 +692,7 @@ func TestSyncTinyTriePanic(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1, scheme) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -690,13 +706,18 @@ func TestSyncTinyTriePanic(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSync tests a basic sync with multiple peers func TestMultiSync(t *testing.T) { t.Parallel() + testMultiSync(t, rawdb.HashScheme) + testMultiSync(t, rawdb.PathScheme) +} + +func testMultiSync(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -706,7 +727,7 @@ func TestMultiSync(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -720,13 +741,18 @@ func TestMultiSync(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorage tests basic sync using accounts + storage + code func TestSyncWithStorage(t *testing.T) { t.Parallel() + testSyncWithStorage(t, rawdb.HashScheme) + testSyncWithStorage(t, rawdb.PathScheme) +} + +func testSyncWithStorage(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -736,7 +762,7 @@ func TestSyncWithStorage(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -752,13 +778,18 @@ func TestSyncWithStorage(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all func TestMultiSyncManyUseless(t *testing.T) { t.Parallel() + testMultiSyncManyUseless(t, rawdb.HashScheme) + testMultiSyncManyUseless(t, rawdb.PathScheme) +} + +func testMultiSyncManyUseless(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -768,7 +799,7 @@ func TestMultiSyncManyUseless(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false) mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { source := newTestPeer(name, t, term) @@ -801,11 +832,18 @@ func TestMultiSyncManyUseless(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { + t.Parallel() + + testMultiSyncManyUselessWithLowTimeout(t, rawdb.HashScheme) + testMultiSyncManyUselessWithLowTimeout(t, rawdb.PathScheme) +} + +func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -815,7 +853,7 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false) mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { source := newTestPeer(name, t, term) @@ -853,11 +891,18 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all func TestMultiSyncManyUnresponsive(t *testing.T) { + t.Parallel() + + testMultiSyncManyUnresponsive(t, rawdb.HashScheme) + testMultiSyncManyUnresponsive(t, rawdb.PathScheme) +} + +func testMultiSyncManyUnresponsive(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -867,7 +912,7 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false) mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { source := newTestPeer(name, t, term) @@ -903,7 +948,7 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } func checkStall(t *testing.T, term func()) chan struct{} { @@ -925,6 +970,11 @@ func checkStall(t *testing.T, term func()) chan struct{} { func TestSyncBoundaryAccountTrie(t *testing.T) { t.Parallel() + testSyncBoundaryAccountTrie(t, rawdb.HashScheme) + testSyncBoundaryAccountTrie(t, rawdb.PathScheme) +} + +func testSyncBoundaryAccountTrie(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -934,7 +984,7 @@ func TestSyncBoundaryAccountTrie(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(3000) + nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(scheme, 3000) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -952,7 +1002,7 @@ func TestSyncBoundaryAccountTrie(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is @@ -960,6 +1010,11 @@ func TestSyncBoundaryAccountTrie(t *testing.T) { func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { t.Parallel() + testSyncNoStorageAndOneCappedPeer(t, rawdb.HashScheme) + testSyncNoStorageAndOneCappedPeer(t, rawdb.PathScheme) +} + +func testSyncNoStorageAndOneCappedPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -969,7 +1024,7 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme) mkSource := func(name string, slow bool) *testPeer { source := newTestPeer(name, t, term) @@ -994,7 +1049,7 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver @@ -1002,6 +1057,11 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { t.Parallel() + testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.HashScheme) + testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.PathScheme) +} + +func testSyncNoStorageAndOneCodeCorruptPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1011,7 +1071,7 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme) mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { source := newTestPeer(name, t, term) @@ -1034,12 +1094,17 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { t.Parallel() + testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.HashScheme) + testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.PathScheme) +} + +func testSyncNoStorageAndOneAccountCorruptPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1049,7 +1114,7 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme) mkSource := func(name string, accFn accountHandlerFunc) *testPeer { source := newTestPeer(name, t, term) @@ -1072,7 +1137,7 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes @@ -1080,6 +1145,11 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { t.Parallel() + testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.HashScheme) + testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.PathScheme) +} + +func testSyncNoStorageAndOneCodeCappedPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1089,7 +1159,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme) mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { source := newTestPeer(name, t, term) @@ -1123,7 +1193,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { if threshold := 100; counter > threshold { t.Logf("Error, expected < %d invocations, got %d", threshold, counter) } - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the @@ -1131,6 +1201,11 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { func TestSyncBoundaryStorageTrie(t *testing.T) { t.Parallel() + testSyncBoundaryStorageTrie(t, rawdb.HashScheme) + testSyncBoundaryStorageTrie(t, rawdb.PathScheme) +} + +func testSyncBoundaryStorageTrie(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1140,7 +1215,7 @@ func TestSyncBoundaryStorageTrie(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -1160,7 +1235,7 @@ func TestSyncBoundaryStorageTrie(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is @@ -1168,6 +1243,11 @@ func TestSyncBoundaryStorageTrie(t *testing.T) { func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { t.Parallel() + testSyncWithStorageAndOneCappedPeer(t, rawdb.HashScheme) + testSyncWithStorageAndOneCappedPeer(t, rawdb.PathScheme) +} + +func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1177,7 +1257,7 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false) mkSource := func(name string, slow bool) *testPeer { source := newTestPeer(name, t, term) @@ -1202,7 +1282,7 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is @@ -1210,6 +1290,11 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { func TestSyncWithStorageAndCorruptPeer(t *testing.T) { t.Parallel() + testSyncWithStorageAndCorruptPeer(t, rawdb.HashScheme) + testSyncWithStorageAndCorruptPeer(t, rawdb.PathScheme) +} + +func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1219,7 +1304,7 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { source := newTestPeer(name, t, term) @@ -1243,12 +1328,17 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { t.Parallel() + testSyncWithStorageAndNonProvingPeer(t, rawdb.HashScheme) + testSyncWithStorageAndNonProvingPeer(t, rawdb.PathScheme) +} + +func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1258,7 +1348,7 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { source := newTestPeer(name, t, term) @@ -1281,7 +1371,7 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorage tests basic sync using accounts + storage + code, against @@ -1290,6 +1380,12 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { // did not mark the account for healing. func TestSyncWithStorageMisbehavingProve(t *testing.T) { t.Parallel() + + testSyncWithStorageMisbehavingProve(t, rawdb.HashScheme) + testSyncWithStorageMisbehavingProve(t, rawdb.PathScheme) +} + +func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1299,7 +1395,7 @@ func TestSyncWithStorageMisbehavingProve(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(scheme, 10, 30, false) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -1314,19 +1410,16 @@ func TestSyncWithStorageMisbehavingProve(t *testing.T) { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } type kv struct { k, v []byte } -// Some helpers for sorting -type entrySlice []*kv - -func (p entrySlice) Len() int { return len(p) } -func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 } -func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (k *kv) cmp(other *kv) int { + return bytes.Compare(k.k, other.k) +} func key32(i uint64) []byte { key := make([]byte, 32) @@ -1367,11 +1460,11 @@ func getCodeByHash(hash common.Hash) []byte { } // makeAccountTrieNoStorage spits out a trie, along with the leafs -func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) { +func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv) { var ( - db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) accTrie = trie.NewEmpty(db) - entries entrySlice + entries []*kv ) for i := uint64(1); i <= uint64(n); i++ { value, _ := rlp.EncodeToBytes(&types.StateAccount{ @@ -1385,12 +1478,12 @@ func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) { accTrie.MustUpdate(elem.k, elem.v) entries = append(entries, elem) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) // Commit the state changes into db and re-create the trie // for accessing later. - root, nodes := accTrie.Commit(false) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := accTrie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) accTrie, _ = trie.New(trie.StateTrieID(root), db) return db.Scheme(), accTrie, entries @@ -1399,12 +1492,12 @@ func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) { // makeBoundaryAccountTrie constructs an account trie. Instead of filling // accounts normally, this function will fill a few accounts which have // boundary hash. -func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) { +func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) { var ( - entries entrySlice + entries []*kv boundaries []common.Hash - db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) accTrie = trie.NewEmpty(db) ) // Initialize boundaries @@ -1447,12 +1540,12 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) { accTrie.MustUpdate(elem.k, elem.v) entries = append(entries, elem) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) // Commit the state changes into db and re-create the trie // for accessing later. - root, nodes := accTrie.Commit(false) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := accTrie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) accTrie, _ = trie.New(trie.StateTrieID(root), db) return db.Scheme(), accTrie, entries @@ -1460,14 +1553,14 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) { // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts // has a unique storage set. -func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (string, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { +func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) { var ( - db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) accTrie = trie.NewEmpty(db) - entries entrySlice + entries []*kv storageRoots = make(map[common.Hash]common.Hash) storageTries = make(map[common.Hash]*trie.Trie) - storageEntries = make(map[common.Hash]entrySlice) + storageEntries = make(map[common.Hash][]*kv) nodes = trienode.NewMergedNodeSet() ) // Create n accounts in the trie @@ -1494,14 +1587,14 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) storageRoots[common.BytesToHash(key)] = stRoot storageEntries[common.BytesToHash(key)] = stEntries } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) // Commit account trie - root, set := accTrie.Commit(true) + root, set, _ := accTrie.Commit(true) nodes.Merge(set) // Commit gathered dirty nodes into database - db.Update(root, types.EmptyRootHash, nodes) + db.Update(root, types.EmptyRootHash, 0, nodes, nil) // Re-create tries with new root accTrie, _ = trie.New(trie.StateTrieID(root), db) @@ -1515,14 +1608,14 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) } // makeAccountTrieWithStorage spits out a trie, along with the leafs -func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (string, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { +func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) { var ( - db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) accTrie = trie.NewEmpty(db) - entries entrySlice + entries []*kv storageRoots = make(map[common.Hash]common.Hash) storageTries = make(map[common.Hash]*trie.Trie) - storageEntries = make(map[common.Hash]entrySlice) + storageEntries = make(map[common.Hash][]*kv) nodes = trienode.NewMergedNodeSet() ) // Create n accounts in the trie @@ -1536,7 +1629,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin var ( stRoot common.Hash stNodes *trienode.NodeSet - stEntries entrySlice + stEntries []*kv ) if boundary { stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db) @@ -1559,14 +1652,14 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin storageRoots[common.BytesToHash(key)] = stRoot storageEntries[common.BytesToHash(key)] = stEntries } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) // Commit account trie - root, set := accTrie.Commit(true) + root, set, _ := accTrie.Commit(true) nodes.Merge(set) // Commit gathered dirty nodes into database - db.Update(root, types.EmptyRootHash, nodes) + db.Update(root, types.EmptyRootHash, 0, nodes, nil) // Re-create tries with new root accTrie, err := trie.New(trie.StateTrieID(root), db) @@ -1588,9 +1681,9 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin // makeStorageTrieWithSeed fills a storage trie with n items, returning the // not-yet-committed trie and the sorted entries. The seeds can be used to ensure // that tries are unique. -func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trienode.NodeSet, entrySlice) { +func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) { trie, _ := trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db) - var entries entrySlice + var entries []*kv for i := uint64(1); i <= n; i++ { // store 'x' at slot 'x' slotValue := key32(i + seed) @@ -1603,17 +1696,17 @@ func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Databas trie.MustUpdate(elem.k, elem.v) entries = append(entries, elem) } - sort.Sort(entries) - root, nodes := trie.Commit(false) + slices.SortFunc(entries, (*kv).cmp) + root, nodes, _ := trie.Commit(false) return root, nodes, entries } // makeBoundaryStorageTrie constructs a storage trie. Instead of filling // storage slots normally, this function will fill a few slots which have // boundary hash. -func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trienode.NodeSet, entrySlice) { +func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) { var ( - entries entrySlice + entries []*kv boundaries []common.Hash trie, _ = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db) ) @@ -1654,20 +1747,20 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo trie.MustUpdate(elem.k, elem.v) entries = append(entries, elem) } - sort.Sort(entries) - root, nodes := trie.Commit(false) + slices.SortFunc(entries, (*kv).cmp) + root, nodes, _ := trie.Commit(false) return root, nodes, entries } -func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { +func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) { t.Helper() - triedb := trie.NewDatabase(rawdb.NewDatabase(db)) + triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme)) accTrie, err := trie.New(trie.StateTrieID(root), triedb) if err != nil { t.Fatal(err) } accounts, slots := 0, 0 - accIt := trie.NewIterator(accTrie.NodeIterator(nil)) + accIt := trie.NewIterator(accTrie.MustNodeIterator(nil)) for accIt.Next() { var acc struct { Nonce uint64 @@ -1685,7 +1778,7 @@ func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { if err != nil { t.Fatal(err) } - storeIt := trie.NewIterator(storeTrie.NodeIterator(nil)) + storeIt := trie.NewIterator(storeTrie.MustNodeIterator(nil)) for storeIt.Next() { slots++ } @@ -1703,6 +1796,13 @@ func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing // state healing func TestSyncAccountPerformance(t *testing.T) { + t.Parallel() + + testSyncAccountPerformance(t, rawdb.HashScheme) + testSyncAccountPerformance(t, rawdb.PathScheme) +} + +func testSyncAccountPerformance(t *testing.T, scheme string) { // Set the account concurrency to 1. This _should_ result in the // range root to become correct, and there should be no healing needed defer func(old int) { accountConcurrency = old }(accountConcurrency) @@ -1717,7 +1817,7 @@ func TestSyncAccountPerformance(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -1730,7 +1830,7 @@ func TestSyncAccountPerformance(t *testing.T) { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) // The trie root will always be requested, since it is added when the snap // sync cycle starts. When popping the queue, we do not look it up again. // Doing so would bring this number down to zero in this artificial testcase, @@ -1790,3 +1890,10 @@ func TestSlotEstimation(t *testing.T) { } } } + +func newDbConfig(scheme string) *trie.Config { + if scheme == rawdb.HashScheme { + return &trie.Config{} + } + return &trie.Config{PathDB: pathdb.Defaults} +} diff --git a/eth/state_accessor.go b/eth/state_accessor.go index 1b0ab0c54..24694df66 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -36,31 +37,11 @@ import ( // for releasing state. var noopReleaser = tracers.StateReleaseFunc(func() {}) -// StateAtBlock retrieves the state database associated with a certain block. -// If no state is locally available for the given block, a number of blocks -// are attempted to be reexecuted to generate the desired state. The optional -// base layer statedb can be provided which is regarded as the statedb of the -// parent block. -// -// An additional release function will be returned if the requested state is -// available. Release is expected to be invoked when the returned state is no longer needed. -// Its purpose is to prevent resource leaking. Though it can be noop in some cases. -// -// Parameters: -// - block: The block for which we want the state(state = block.Root) -// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state -// - base: If the caller is tracing multiple blocks, the caller can provide the parent -// state continuously from the callsite. -// - readOnly: If true, then the live 'blockchain' state database is used. No mutation should -// be made from caller, e.g. perform Commit or other 'save-to-disk' changes. -// Otherwise, the trash generated by caller may be persisted permanently. -// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is -// provided, it would be preferable to start from a fresh state, if we have it -// on disk. -func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { +func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { var ( current *types.Block database state.Database + triedb *trie.Database report = true origin = block.NumberU64() ) @@ -71,9 +52,9 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe // on top to prevent garbage collection and return a release // function to deref it. if statedb, err = eth.blockchain.StateAt(block.Root()); err == nil { - statedb.Database().TrieDB().Reference(block.Root(), common.Hash{}) + eth.blockchain.TrieDB().Reference(block.Root(), common.Hash{}) return statedb, func() { - statedb.Database().TrieDB().Dereference(block.Root()) + eth.blockchain.TrieDB().Dereference(block.Root()) }, nil } } @@ -84,14 +65,16 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe if preferDisk { // Create an ephemeral trie.Database for isolating the live one. Otherwise // the internal junks created by tracing will be persisted into the disk. - database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) + // TODO(rjl493456442), clean cache is disabled to prevent memory leak, + // please re-enable it for better performance. + database = state.NewDatabaseWithConfig(eth.chainDb, trie.HashDefaults) if statedb, err = state.New(block.Root(), database, nil); err == nil { log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number()) return statedb, noopReleaser, nil } } // The optional base statedb is given, mark the start point as parent block - statedb, database, report = base, base.Database(), false + statedb, database, triedb, report = base, base.Database(), base.Database().TrieDB(), false current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) } else { // Otherwise, try to reexec blocks until we find a state or reach our limit @@ -99,7 +82,10 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe // Create an ephemeral trie.Database for isolating the live one. Otherwise // the internal junks created by tracing will be persisted into the disk. - database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) + // TODO(rjl493456442), clean cache is disabled to prevent memory leak, + // please re-enable it for better performance. + triedb = trie.NewDatabase(eth.chainDb, trie.HashDefaults) + database = state.NewDatabaseWithNodeDB(eth.chainDb, triedb) // If we didn't check the live database, do check state over ephemeral database, // otherwise we would rewind past a persisted block (specific corner case is @@ -164,7 +150,7 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) } // Finalize the state so any modifications are written to the trie - root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(current.Number())) + root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number())) if err != nil { return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w", current.NumberU64(), current.Root().Hex(), err) @@ -175,17 +161,58 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe } // Hold the state reference and also drop the parent state // to prevent accumulating too many nodes in memory. - database.TrieDB().Reference(root, common.Hash{}) + triedb.Reference(root, common.Hash{}) if parent != (common.Hash{}) { - database.TrieDB().Dereference(parent) + triedb.Dereference(parent) } parent = root } if report { - nodes, imgs := database.TrieDB().Size() + _, nodes, imgs := triedb.Size() // all memory is contained within the nodes return in hashdb log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) } - return statedb, func() { database.TrieDB().Dereference(block.Root()) }, nil + return statedb, func() { triedb.Dereference(block.Root()) }, nil +} + +func (eth *Ethereum) pathState(block *types.Block) (*state.StateDB, func(), error) { + // Check if the requested state is available in the live chain. + statedb, err := eth.blockchain.StateAt(block.Root()) + if err == nil { + return statedb, noopReleaser, nil + } + // TODO historic state is not supported in path-based scheme. + // Fully archive node in pbss will be implemented by relying + // on state history, but needs more work on top. + return nil, nil, errors.New("historical state not available in path scheme yet") +} + +// stateAtBlock retrieves the state database associated with a certain block. +// If no state is locally available for the given block, a number of blocks +// are attempted to be reexecuted to generate the desired state. The optional +// base layer statedb can be provided which is regarded as the statedb of the +// parent block. +// +// An additional release function will be returned if the requested state is +// available. Release is expected to be invoked when the returned state is no +// longer needed. Its purpose is to prevent resource leaking. Though it can be +// noop in some cases. +// +// Parameters: +// - block: The block for which we want the state(state = block.Root) +// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state +// - base: If the caller is tracing multiple blocks, the caller can provide the parent +// state continuously from the callsite. +// - readOnly: If true, then the live 'blockchain' state database is used. No mutation should +// be made from caller, e.g. perform Commit or other 'save-to-disk' changes. +// Otherwise, the trash generated by caller may be persisted permanently. +// - preferDisk: This arg can be used by the caller to signal that even though the 'base' is +// provided, it would be preferable to start from a fresh state, if we have it +// on disk. +func (eth *Ethereum) stateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { + if eth.blockchain.TrieDB().Scheme() == rawdb.HashScheme { + return eth.hashState(ctx, block, reexec, base, readOnly, preferDisk) + } + return eth.pathState(block) } // stateAtTransaction returns the execution environment of a certain transaction. @@ -201,7 +228,7 @@ func (eth *Ethereum) stateAtTransaction(ctx context.Context, block *types.Block, } // Lookup the statedb of parent block from the live database, // otherwise regenerate it on the flight. - statedb, release, err := eth.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := eth.stateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, vm.BlockContext{}, nil, nil, err } diff --git a/eth/sync.go b/eth/sync.go index ace107114..ba7a7427a 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -23,7 +23,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/log" @@ -36,27 +35,15 @@ const ( // syncTransactions starts sending all currently pending transactions to the given peer. func (h *handler) syncTransactions(p *eth.Peer) { - // Assemble the set of transaction to broadcast or announce to the remote - // peer. Fun fact, this is quite an expensive operation as it needs to sort - // the transactions if the sorting is not cached yet. However, with a random - // order, insertions could overflow the non-executable queues and get dropped. - // - // TODO(karalabe): Figure out if we could get away with random order somehow - var txs types.Transactions - pending := h.txpool.Pending(false) - for _, batch := range pending { - txs = append(txs, batch...) + var hashes []common.Hash + for _, batch := range h.txpool.Pending(false) { + for _, tx := range batch { + hashes = append(hashes, tx.Hash) + } } - if len(txs) == 0 { + if len(hashes) == 0 { return } - // The eth/65 protocol introduces proper transaction announcements, so instead - // of dripping transactions across multiple peers, just send the entire list as - // an announcement and let the remote side decide what they need (likely nothing). - hashes := make([]common.Hash, len(txs)) - for i, tx := range txs { - hashes[i] = tx.Hash() - } p.AsyncSendPooledTransactionHashes(hashes) } @@ -89,7 +76,7 @@ func newChainSyncer(handler *handler) *chainSyncer { // handlePeerEvent notifies the syncer about a change in the peer set. // This is called for new peers and every time a peer announces a new // chain head. -func (cs *chainSyncer) handlePeerEvent(peer *eth.Peer) bool { +func (cs *chainSyncer) handlePeerEvent() bool { select { case cs.peerEventCh <- struct{}{}: return true diff --git a/eth/tracers/api.go b/eth/tracers/api.go index e4a65e628..6c7023f1c 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -18,7 +18,6 @@ package tracers import ( "bufio" - "bytes" "context" "encoding/json" "errors" @@ -369,8 +368,8 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed // if the relevant state is available in disk. var preferDisk bool if statedb != nil { - s1, s2 := statedb.Database().TrieDB().Size() - preferDisk = s1+s2 > defaultTracechainMemLimit + s1, s2, s3 := statedb.Database().TrieDB().Size() + preferDisk = s1+s2+s3 > defaultTracechainMemLimit } statedb, release, err = api.backend.StateAtBlock(ctx, block, reexec, statedb, false, preferDisk) if err != nil { @@ -453,7 +452,7 @@ func (api *API) TraceBlockByHash(ctx context.Context, hash common.Hash, config * // and returns them as a JSON object. func (api *API) TraceBlock(ctx context.Context, blob hexutil.Bytes, config *TraceConfig) ([]*txTraceResult, error) { block := new(types.Block) - if err := rlp.Decode(bytes.NewReader(blob), block); err != nil { + if err := rlp.DecodeBytes(blob, block); err != nil { return nil, fmt.Errorf("could not decode block: %v", err) } return api.traceBlock(ctx, block, config) @@ -1027,6 +1026,10 @@ func overrideConfig(original *params.ChainConfig, override *params.ChainConfig) copy.PragueTime = timestamp canon = false } + if timestamp := override.VerkleTime; timestamp != nil { + copy.VerkleTime = timestamp + canon = false + } return copy, canon } diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index fdb02b189..0f78af9a0 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -17,7 +17,6 @@ package tracers import ( - "bytes" "context" "crypto/ecdsa" "encoding/json" @@ -25,7 +24,6 @@ import ( "fmt" "math/big" "reflect" - "sort" "sync/atomic" "testing" "time" @@ -45,6 +43,7 @@ import ( "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + "golang.org/x/exp/slices" ) var ( @@ -785,19 +784,13 @@ type Account struct { addr common.Address } -type Accounts []Account - -func (a Accounts) Len() int { return len(a) } -func (a Accounts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 } - -func newAccounts(n int) (accounts Accounts) { +func newAccounts(n int) (accounts []Account) { for i := 0; i < n; i++ { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) accounts = append(accounts, Account{key: key, addr: addr}) } - sort.Sort(accounts) + slices.SortFunc(accounts, func(a, b Account) int { return a.addr.Cmp(b.addr) }) return accounts } diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index bf1c31c08..6df49a90c 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -137,8 +137,10 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) { GasLimit: uint64(test.Context.GasLimit), BaseFee: test.Genesis.BaseFee, } - _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) + triedb, _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme) ) + triedb.Close() + tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig) if err != nil { t.Fatalf("failed to create call tracer: %v", err) @@ -237,7 +239,8 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) { Difficulty: (*big.Int)(test.Context.Difficulty), GasLimit: uint64(test.Context.GasLimit), } - _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) + triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme) + defer triedb.Close() b.ReportAllocs() b.ResetTimer() @@ -302,13 +305,13 @@ func TestInternals(t *testing.T) { byte(vm.CALL), }, tracer: mkTracer("callTracer", nil), - want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0x6cbf","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`, + want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0xe01a","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`, }, { name: "Stack depletion in LOG0", code: []byte{byte(vm.LOG3)}, tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)), - want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0xc350","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`, + want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x13880","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`, }, { name: "Mem expansion in LOG0", @@ -321,11 +324,11 @@ func TestInternals(t *testing.T) { byte(vm.LOG0), }, tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)), - want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}],"value":"0x0","type":"CALL"}`, + want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}],"value":"0x0","type":"CALL"}`, }, { // Leads to OOM on the prestate tracer - name: "Prestate-tracer - mem expansion in CREATE2", + name: "Prestate-tracer - CREATE2 OOM", code: []byte{ byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x0, @@ -339,41 +342,64 @@ func TestInternals(t *testing.T) { byte(vm.PUSH1), 0x0, byte(vm.LOG0), }, - tracer: mkTracer("prestateTracer", json.RawMessage(`{ "withLog": true }`)), - want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52640350"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600164ffffffffff60016000f560ff6000a0"}}`, + tracer: mkTracer("prestateTracer", nil), + want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52647880"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600164ffffffffff60016000f560ff6000a0"}}`, + }, + { + // CREATE2 which requires padding memory by prestate tracer + name: "Prestate-tracer - CREATE2 Memory padding", + code: []byte{ + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0x0, + byte(vm.MSTORE), + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0xff, + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0x0, + byte(vm.CREATE2), + byte(vm.PUSH1), 0xff, + byte(vm.PUSH1), 0x0, + byte(vm.LOG0), + }, + tracer: mkTracer("prestateTracer", nil), + want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52647880"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600160ff60016000f560ff6000a0"},"0x91ff9a805d36f54e3e272e230f3e3f5c1b330804":{"balance":"0x0"}}`, }, } { - _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), - core.GenesisAlloc{ - to: core.GenesisAccount{ - Code: tc.code, - }, - origin: core.GenesisAccount{ - Balance: big.NewInt(500000000000000), - }, - }, false) - evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Tracer: tc.tracer}) - msg := &core.Message{ - To: &to, - From: origin, - Value: big.NewInt(0), - GasLimit: 50000, - GasPrice: big.NewInt(0), - GasFeeCap: big.NewInt(0), - GasTipCap: big.NewInt(0), - SkipAccountChecks: false, - } - st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(msg.GasLimit)) - if _, err := st.TransitionDb(); err != nil { - t.Fatalf("test %v: failed to execute transaction: %v", tc.name, err) - } - // Retrieve the trace result and compare against the expected - res, err := tc.tracer.GetResult() - if err != nil { - t.Fatalf("test %v: failed to retrieve trace result: %v", tc.name, err) - } - if string(res) != tc.want { - t.Fatalf("test %v: trace mismatch\n have: %v\n want: %v\n", tc.name, string(res), tc.want) - } + t.Run(tc.name, func(t *testing.T) { + triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), + core.GenesisAlloc{ + to: core.GenesisAccount{ + Code: tc.code, + }, + origin: core.GenesisAccount{ + Balance: big.NewInt(500000000000000), + }, + }, false, rawdb.HashScheme) + defer triedb.Close() + + evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Tracer: tc.tracer}) + msg := &core.Message{ + To: &to, + From: origin, + Value: big.NewInt(0), + GasLimit: 80000, + GasPrice: big.NewInt(0), + GasFeeCap: big.NewInt(0), + GasTipCap: big.NewInt(0), + SkipAccountChecks: false, + } + st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(msg.GasLimit)) + if _, err := st.TransitionDb(); err != nil { + t.Fatalf("test %v: failed to execute transaction: %v", tc.name, err) + } + // Retrieve the trace result and compare against the expected + res, err := tc.tracer.GetResult() + if err != nil { + t.Fatalf("test %v: failed to retrieve trace result: %v", tc.name, err) + } + if string(res) != tc.want { + t.Errorf("test %v: trace mismatch\n have: %v\n want: %v\n", tc.name, string(res), tc.want) + } + }) } } diff --git a/eth/tracers/internal/tracetest/flat_calltrace_test.go b/eth/tracers/internal/tracetest/flat_calltrace_test.go index 85e95401a..423167b13 100644 --- a/eth/tracers/internal/tracetest/flat_calltrace_test.go +++ b/eth/tracers/internal/tracetest/flat_calltrace_test.go @@ -100,7 +100,8 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string Difficulty: (*big.Int)(test.Context.Difficulty), GasLimit: uint64(test.Context.GasLimit), } - _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) + triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme) + defer triedb.Close() // Create the tracer, the EVM environment and run it tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig) diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go index 991da10b3..b4fa5b627 100644 --- a/eth/tracers/internal/tracetest/prestate_test.go +++ b/eth/tracers/internal/tracetest/prestate_test.go @@ -108,8 +108,10 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) { GasLimit: uint64(test.Context.GasLimit), BaseFee: test.Genesis.BaseFee, } - _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) + triedb, _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme) ) + defer triedb.Close() + tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig) if err != nil { t.Fatalf("failed to create call tracer: %v", err) diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go index 3fa1613da..d22d14098 100644 --- a/eth/tracers/js/goja.go +++ b/eth/tracers/js/goja.go @@ -86,7 +86,7 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b if !obj.Get("constructor").SameAs(bufType) { break } - b := obj.Get("buffer").Export().(goja.ArrayBuffer).Bytes() + b := obj.Export().([]byte) return b, nil } return nil, errors.New("invalid buffer type") @@ -236,7 +236,12 @@ func (t *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr t.ctx["to"] = t.vm.ToValue(to.Bytes()) t.ctx["input"] = t.vm.ToValue(input) t.ctx["gas"] = t.vm.ToValue(t.gasLimit) - t.ctx["gasPrice"] = t.vm.ToValue(env.TxContext.GasPrice) + gasPriceBig, err := t.toBig(t.vm, env.TxContext.GasPrice.String()) + if err != nil { + t.err = err + return + } + t.ctx["gasPrice"] = gasPriceBig valueBig, err := t.toBig(t.vm, value.String()) if err != nil { t.err = err diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go index c7f171c5b..4c9b910a2 100644 --- a/eth/tracers/logger/logger.go +++ b/eth/tracers/logger/logger.go @@ -418,6 +418,7 @@ type StructLogRes struct { Depth int `json:"depth"` Error string `json:"error,omitempty"` Stack *[]string `json:"stack,omitempty"` + ReturnData string `json:"returnData,omitempty"` Memory *[]string `json:"memory,omitempty"` Storage *map[string]string `json:"storage,omitempty"` RefundCounter uint64 `json:"refund,omitempty"` @@ -443,6 +444,9 @@ func formatLogs(logs []StructLog) []StructLogRes { } formatted[index].Stack = &stack } + if trace.ReturnData != nil && len(trace.ReturnData) > 0 { + formatted[index].ReturnData = hexutil.Bytes(trace.ReturnData).String() + } if trace.Memory != nil { memory := make([]string, 0, (len(trace.Memory)+31)/32) for i := 0; i+32 <= len(trace.Memory); i += 32 { diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index 4ac03e512..34cf027ac 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/log" ) //go:generate go run github.com/fjl/gencodec -type callFrame -field-override callFrameMarshaling -out gen_callframe_json.go @@ -183,6 +184,7 @@ func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco data, err := tracers.GetMemoryCopyPadded(scope.Memory, int64(mStart.Uint64()), int64(mSize.Uint64())) if err != nil { // mSize was unrealistically large + log.Warn("failed to copy CREATE2 input", "err", err, "tracer", "callTracer", "offset", mStart, "size", mSize) return } diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go index 8612c2338..266ab9900 100644 --- a/eth/tracers/native/call_flat.go +++ b/eth/tracers/native/call_flat.go @@ -248,7 +248,7 @@ func flatFromNested(input *callFrame, traceAddress []int, convertErrs bool, ctx case vm.CREATE, vm.CREATE2: frame = newFlatCreate(input) case vm.SELFDESTRUCT: - frame = newFlatSuicide(input) + frame = newFlatSelfdestruct(input) case vm.CALL, vm.STATICCALL, vm.CALLCODE, vm.DELEGATECALL: frame = newFlatCall(input) default: @@ -330,7 +330,7 @@ func newFlatCall(input *callFrame) *flatCallFrame { } } -func newFlatSuicide(input *callFrame) *flatCallFrame { +func newFlatSelfdestruct(input *callFrame) *flatCallFrame { return &flatCallFrame{ Type: "suicide", Action: flatCallAction{ diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go index b71d5d621..82451c40a 100644 --- a/eth/tracers/native/prestate.go +++ b/eth/tracers/native/prestate.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/log" ) //go:generate go run github.com/fjl/gencodec -type account -field-override accountMarshaling -out gen_account_json.go @@ -165,7 +166,11 @@ func (t *prestateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, case stackLen >= 4 && op == vm.CREATE2: offset := stackData[stackLen-2] size := stackData[stackLen-3] - init := scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) + init, err := tracers.GetMemoryCopyPadded(scope.Memory, int64(offset.Uint64()), int64(size.Uint64())) + if err != nil { + log.Warn("failed to copy CREATE2 input", "err", err, "tracer", "prestateTracer", "offset", offset, "size", size) + return + } inithash := crypto.Keccak256(init) salt := stackData[stackLen-4] addr := crypto.CreateAddress2(caller, salt.Bytes32(), inithash) diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index 759e3a4dd..b4989ec98 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -79,7 +79,9 @@ func BenchmarkTransactionTrace(b *testing.B) { Code: []byte{}, Balance: big.NewInt(500000000000000), } - _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false) + triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false, rawdb.HashScheme) + defer triedb.Close() + // Create the tracer, the EVM environment and run it tracer := logger.NewStructLogger(&logger.Config{ Debug: false, diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 4508027fa..a21d8ff67 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -108,6 +108,16 @@ func (ec *Client) PeerCount(ctx context.Context) (uint64, error) { return uint64(result), err } +// BlockReceipts returns the receipts of a given block number or hash +func (ec *Client) BlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) { + var r []*types.Receipt + err := ec.c.CallContext(ctx, &r, "eth_getBlockReceipts", blockNrOrHash) + if err == nil && r == nil { + return nil, ethereum.NotFound + } + return r, err +} + type rpcBlock struct { Hash common.Hash `json:"hash"` Transactions []rpcTransaction `json:"transactions"` @@ -609,7 +619,7 @@ func toCallArg(msg ethereum.CallMsg) interface{} { "to": msg.To, } if len(msg.Data) > 0 { - arg["data"] = hexutil.Bytes(msg.Data) + arg["input"] = hexutil.Bytes(msg.Data) } if msg.Value != nil { arg["value"] = (*hexutil.Big)(msg.Value) diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index 385676075..d2f371093 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -250,7 +250,7 @@ func generateTestChain() []*types.Block { func TestEthClient(t *testing.T) { backend, chain := newTestBackend(t) - client, _ := backend.Attach() + client := backend.Attach() defer backend.Close() defer client.Close() diff --git a/ethclient/gethclient/gethclient.go b/ethclient/gethclient/gethclient.go index c02961167..e2c0ef3ed 100644 --- a/ethclient/gethclient/gethclient.go +++ b/ethclient/gethclient/gethclient.go @@ -225,7 +225,7 @@ func toCallArg(msg ethereum.CallMsg) interface{} { "to": msg.To, } if len(msg.Data) > 0 { - arg["data"] = hexutil.Bytes(msg.Data) + arg["input"] = hexutil.Bytes(msg.Data) } if msg.Value != nil { arg["value"] = (*hexutil.Big)(msg.Value) diff --git a/ethclient/gethclient/gethclient_test.go b/ethclient/gethclient/gethclient_test.go index 296d32848..5a0f4d253 100644 --- a/ethclient/gethclient/gethclient_test.go +++ b/ethclient/gethclient/gethclient_test.go @@ -94,10 +94,7 @@ func generateTestChain() (*core.Genesis, []*types.Block) { func TestGethClient(t *testing.T) { backend, _ := newTestBackend(t) - client, err := backend.Attach() - if err != nil { - t.Fatal(err) - } + client := backend.Attach() defer backend.Close() defer client.Close() @@ -108,6 +105,9 @@ func TestGethClient(t *testing.T) { { "TestGetProof", func(t *testing.T) { testGetProof(t, client) }, + }, { + "TestGetProofCanonicalizeKeys", + func(t *testing.T) { testGetProofCanonicalizeKeys(t, client) }, }, { "TestGCStats", func(t *testing.T) { testGCStats(t, client) }, @@ -221,6 +221,7 @@ func testGetProof(t *testing.T, client *rpc.Client) { if result.Balance.Cmp(balance) != 0 { t.Fatalf("invalid balance, want: %v got: %v", balance, result.Balance) } + // test storage if len(result.StorageProof) != 1 { t.Fatalf("invalid storage proof, want 1 proof, got %v proof(s)", len(result.StorageProof)) @@ -231,7 +232,37 @@ func testGetProof(t *testing.T, client *rpc.Client) { t.Fatalf("invalid storage proof value, want: %v, got: %v", slotValue, proof.Value.Bytes()) } if proof.Key != testSlot.String() { - t.Fatalf("invalid storage proof key, want: %v, got: %v", testSlot.String(), proof.Key) + t.Fatalf("invalid storage proof key, want: %q, got: %q", testSlot.String(), proof.Key) + } +} + +func testGetProofCanonicalizeKeys(t *testing.T, client *rpc.Client) { + ec := New(client) + + // Tests with non-canon input for storage keys. + // Here we check that the storage key is canonicalized. + result, err := ec.GetProof(context.Background(), testAddr, []string{"0x0dEadbeef"}, nil) + if err != nil { + t.Fatal(err) + } + if result.StorageProof[0].Key != "0xdeadbeef" { + t.Fatalf("wrong storage key encoding in proof: %q", result.StorageProof[0].Key) + } + if result, err = ec.GetProof(context.Background(), testAddr, []string{"0x000deadbeef"}, nil); err != nil { + t.Fatal(err) + } + if result.StorageProof[0].Key != "0xdeadbeef" { + t.Fatalf("wrong storage key encoding in proof: %q", result.StorageProof[0].Key) + } + + // If the requested storage key is 32 bytes long, it will be returned as is. + hashSizedKey := "0x00000000000000000000000000000000000000000000000000000000deadbeef" + result, err = ec.GetProof(context.Background(), testAddr, []string{hashSizedKey}, nil) + if err != nil { + t.Fatal(err) + } + if result.StorageProof[0].Key != hashSizedKey { + t.Fatalf("wrong storage key encoding in proof: %q", result.StorageProof[0].Key) } } @@ -368,17 +399,17 @@ func testCallContract(t *testing.T, client *rpc.Client) { func TestOverrideAccountMarshal(t *testing.T) { om := map[common.Address]OverrideAccount{ - common.Address{0x11}: OverrideAccount{ + {0x11}: { // Zero-valued nonce is not overriddden, but simply dropped by the encoder. Nonce: 0, }, - common.Address{0xaa}: OverrideAccount{ + {0xaa}: { Nonce: 5, }, - common.Address{0xbb}: OverrideAccount{ + {0xbb}: { Code: []byte{1}, }, - common.Address{0xcc}: OverrideAccount{ + {0xcc}: { // 'code', 'balance', 'state' should be set when input is // a non-nil but empty value. Code: []byte{}, diff --git a/ethdb/database.go b/ethdb/database.go index 361218f24..4d4817daf 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -79,9 +79,10 @@ type AncientReaderOp interface { // AncientRange retrieves multiple items in sequence, starting from the index 'start'. // It will return - // - at most 'count' items, - // - at least 1 item (even if exceeding the maxBytes), but will otherwise - // return as many items as fit into maxBytes. + // - at most 'count' items, + // - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize), + // but will otherwise return as many items as fit into maxByteSize. + // - if maxBytes is not specified, 'count' items will be returned if they are present AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) // Ancients returns the ancient item numbers in the ancient store. @@ -113,14 +114,14 @@ type AncientWriter interface { // TruncateHead discards all but the first n ancient data from the ancient store. // After the truncation, the latest item can be accessed it item_n-1(start from 0). - TruncateHead(n uint64) error + TruncateHead(n uint64) (uint64, error) // TruncateTail discards the first n ancient data from the ancient store. The already // deleted items are ignored. After the truncation, the earliest item can be accessed // is item_n(start from 0). The deleted items may not be removed from the ancient store // immediately, but only when the accumulated deleted data reach the threshold then // will be removed all together. - TruncateTail(n uint64) error + TruncateTail(n uint64) (uint64, error) // Sync flushes all in-memory ancient store data to disk. Sync() error diff --git a/ethdb/dbtest/testsuite.go b/ethdb/dbtest/testsuite.go index d82dbd699..0d3d5f5aa 100644 --- a/ethdb/dbtest/testsuite.go +++ b/ethdb/dbtest/testsuite.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/ethdb" + "golang.org/x/exp/slices" ) // TestDatabaseSuite runs a suite of tests against a KeyValueStore database @@ -526,7 +527,7 @@ func makeDataset(size, ksize, vsize int, order bool) ([][]byte, [][]byte) { vals = append(vals, randBytes(vsize)) } if order { - sort.Slice(keys, func(i, j int) bool { return bytes.Compare(keys[i], keys[j]) < 0 }) + slices.SortFunc(keys, func(a, b []byte) int { return bytes.Compare(a, b) }) } return keys, vals } diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index ce13659d9..c0e0eb250 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -22,8 +22,6 @@ package leveldb import ( "fmt" - "strconv" - "strings" "sync" "time" @@ -77,6 +75,8 @@ type Database struct { seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt manualMemAllocGauge metrics.Gauge // Gauge to track the amount of memory that has been manually allocated (not a part of runtime/GC) + levelsGauge []metrics.Gauge // Gauge for tracking the number of tables in levels + quitLock sync.Mutex // Mutex protecting the quit channel access quitChan chan chan error // Quit channel to stop the metrics collection before closing the database @@ -147,7 +147,7 @@ func NewCustom(file string, namespace string, customize func(options *opt.Option ldb.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil) // Start up the metrics gathering and return - go ldb.meter(metricsGatheringInterval) + go ldb.meter(metricsGatheringInterval, namespace) return ldb, nil } @@ -266,122 +266,63 @@ func (db *Database) Path() string { // meter periodically retrieves internal leveldb counters and reports them to // the metrics subsystem. -// -// This is how a LevelDB stats table looks like (currently): -// -// Compactions -// Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) -// -------+------------+---------------+---------------+---------------+--------------- -// 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 -// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 -// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 -// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 -// -// This is how the write delay look like (currently): -// DelayN:5 Delay:406.604657ms Paused: false -// -// This is how the iostats look like (currently): -// Read(MB):3895.04860 Write(MB):3654.64712 -func (db *Database) meter(refresh time.Duration) { +func (db *Database) meter(refresh time.Duration, namespace string) { // Create the counters to store current and previous compaction values - compactions := make([][]float64, 2) + compactions := make([][]int64, 2) for i := 0; i < 2; i++ { - compactions[i] = make([]float64, 4) + compactions[i] = make([]int64, 4) } - // Create storage for iostats. - var iostats [2]float64 - - // Create storage and warning log tracer for write delay. - var ( - delaystats [2]int64 - lastWritePaused time.Time - ) - + // Create storages for states and warning log tracer. var ( errc chan error merr error - ) + stats leveldb.DBStats + iostats [2]int64 + delaystats [2]int64 + lastWritePaused time.Time + ) timer := time.NewTimer(refresh) defer timer.Stop() // Iterate ad infinitum and collect the stats for i := 1; errc == nil && merr == nil; i++ { // Retrieve the database stats - stats, err := db.db.GetProperty("leveldb.stats") + // Stats method resets buffers inside therefore it's okay to just pass the struct. + err := db.db.Stats(&stats) if err != nil { db.log.Error("Failed to read database stats", "err", err) merr = err continue } - // Find the compaction table, skip the header - lines := strings.Split(stats, "\n") - for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" { - lines = lines[1:] - } - if len(lines) <= 3 { - db.log.Error("Compaction leveldbTable not found") - merr = errors.New("compaction leveldbTable not found") - continue - } - lines = lines[3:] - // Iterate over all the leveldbTable rows, and accumulate the entries for j := 0; j < len(compactions[i%2]); j++ { compactions[i%2][j] = 0 } - for _, line := range lines { - parts := strings.Split(line, "|") - if len(parts) != 6 { - break - } - for idx, counter := range parts[2:] { - value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) - if err != nil { - db.log.Error("Compaction entry parsing failed", "err", err) - merr = err - continue - } - compactions[i%2][idx] += value - } + compactions[i%2][0] = stats.LevelSizes.Sum() + for _, t := range stats.LevelDurations { + compactions[i%2][1] += t.Nanoseconds() } + compactions[i%2][2] = stats.LevelRead.Sum() + compactions[i%2][3] = stats.LevelWrite.Sum() // Update all the requested meters if db.diskSizeGauge != nil { - db.diskSizeGauge.Update(int64(compactions[i%2][0] * 1024 * 1024)) + db.diskSizeGauge.Update(compactions[i%2][0]) } if db.compTimeMeter != nil { - db.compTimeMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1000 * 1000 * 1000)) + db.compTimeMeter.Mark(compactions[i%2][1] - compactions[(i-1)%2][1]) } if db.compReadMeter != nil { - db.compReadMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024)) + db.compReadMeter.Mark(compactions[i%2][2] - compactions[(i-1)%2][2]) } if db.compWriteMeter != nil { - db.compWriteMeter.Mark(int64((compactions[i%2][3] - compactions[(i-1)%2][3]) * 1024 * 1024)) - } - // Retrieve the write delay statistic - writedelay, err := db.db.GetProperty("leveldb.writedelay") - if err != nil { - db.log.Error("Failed to read database write delay statistic", "err", err) - merr = err - continue + db.compWriteMeter.Mark(compactions[i%2][3] - compactions[(i-1)%2][3]) } var ( - delayN int64 - delayDuration string - duration time.Duration - paused bool + delayN = int64(stats.WriteDelayCount) + duration = stats.WriteDelayDuration + paused = stats.WritePaused ) - if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil { - db.log.Error("Write delay statistic not found") - merr = err - continue - } - duration, err = time.ParseDuration(delayDuration) - if err != nil { - db.log.Error("Failed to parse delay duration", "err", err) - merr = err - continue - } if db.writeDelayNMeter != nil { db.writeDelayNMeter.Mark(delayN - delaystats[0]) } @@ -397,60 +338,30 @@ func (db *Database) meter(refresh time.Duration) { } delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() - // Retrieve the database iostats. - ioStats, err := db.db.GetProperty("leveldb.iostats") - if err != nil { - db.log.Error("Failed to read database iostats", "err", err) - merr = err - continue - } - var nRead, nWrite float64 - parts := strings.Split(ioStats, " ") - if len(parts) < 2 { - db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) - merr = fmt.Errorf("bad syntax of ioStats %s", ioStats) - continue - } - if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { - db.log.Error("Bad syntax of read entry", "entry", parts[0]) - merr = err - continue - } - if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { - db.log.Error("Bad syntax of write entry", "entry", parts[1]) - merr = err - continue - } + var ( + nRead = int64(stats.IORead) + nWrite = int64(stats.IOWrite) + ) if db.diskReadMeter != nil { - db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024)) + db.diskReadMeter.Mark(nRead - iostats[0]) } if db.diskWriteMeter != nil { - db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024)) + db.diskWriteMeter.Mark(nWrite - iostats[1]) } iostats[0], iostats[1] = nRead, nWrite - compCount, err := db.db.GetProperty("leveldb.compcount") - if err != nil { - db.log.Error("Failed to read database iostats", "err", err) - merr = err - continue - } + db.memCompGauge.Update(int64(stats.MemComp)) + db.level0CompGauge.Update(int64(stats.Level0Comp)) + db.nonlevel0CompGauge.Update(int64(stats.NonLevel0Comp)) + db.seekCompGauge.Update(int64(stats.SeekComp)) - var ( - memComp uint32 - level0Comp uint32 - nonLevel0Comp uint32 - seekComp uint32 - ) - if n, err := fmt.Sscanf(compCount, "MemComp:%d Level0Comp:%d NonLevel0Comp:%d SeekComp:%d", &memComp, &level0Comp, &nonLevel0Comp, &seekComp); n != 4 || err != nil { - db.log.Error("Compaction count statistic not found") - merr = err - continue + for i, tables := range stats.LevelTablesCounts { + // Append metrics for additional layers + if i >= len(db.levelsGauge) { + db.levelsGauge = append(db.levelsGauge, metrics.NewRegisteredGauge(namespace+fmt.Sprintf("tables/level%v", i), nil)) + } + db.levelsGauge[i].Update(int64(tables)) } - db.memCompGauge.Update(int64(memComp)) - db.level0CompGauge.Update(int64(level0Comp)) - db.nonlevel0CompGauge.Update(int64(nonLevel0Comp)) - db.seekCompGauge.Update(int64(seekComp)) // Sleep a bit, then repeat the stats collection select { diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index b43529edd..12a84cc91 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -84,6 +84,8 @@ type Database struct { writeDelayStartTime time.Time // The start time of the latest write stall writeDelayCount atomic.Int64 // Total number of write stall counts writeDelayTime atomic.Int64 // Total time spent in write stalls + + writeOptions *pebble.WriteOptions } func (d *Database) onCompactionBegin(info pebble.CompactionInfo) { @@ -118,7 +120,7 @@ func (d *Database) onWriteStallEnd() { // New returns a wrapped pebble DB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. -func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) { +func New(file string, cache int, handles int, namespace string, readonly bool, ephemeral bool) (*Database, error) { // Ensure we have some minimal caching and file guarantees if cache < minCache { cache = minCache @@ -142,9 +144,10 @@ func New(file string, cache int, handles int, namespace string, readonly bool) ( memTableSize = maxMemTableSize } db := &Database{ - fn: file, - log: logger, - quitChan: make(chan chan error), + fn: file, + log: logger, + quitChan: make(chan chan error), + writeOptions: &pebble.WriteOptions{Sync: !ephemeral}, } opt := &pebble.Options{ // Pebble has a single combined cache area and the write @@ -279,7 +282,7 @@ func (d *Database) Put(key []byte, value []byte) error { if d.closed { return pebble.ErrClosed } - return d.db.Set(key, value, pebble.NoSync) + return d.db.Set(key, value, d.writeOptions) } // Delete removes the key from the key-value store. @@ -535,7 +538,7 @@ func (b *batch) Write() error { if b.db.closed { return pebble.ErrClosed } - return b.b.Commit(pebble.NoSync) + return b.b.Commit(b.db.writeOptions) } // Reset resets the batch for reuse. diff --git a/ethdb/remotedb/remotedb.go b/ethdb/remotedb/remotedb.go index 9ce657d78..c1c803caf 100644 --- a/ethdb/remotedb/remotedb.go +++ b/ethdb/remotedb/remotedb.go @@ -98,11 +98,11 @@ func (db *Database) ModifyAncients(f func(ethdb.AncientWriteOp) error) (int64, e panic("not supported") } -func (db *Database) TruncateHead(n uint64) error { +func (db *Database) TruncateHead(n uint64) (uint64, error) { panic("not supported") } -func (db *Database) TruncateTail(n uint64) error { +func (db *Database) TruncateTail(n uint64) (uint64, error) { panic("not supported") } diff --git a/event/feed.go b/event/feed.go index 33dafe588..d94bd820f 100644 --- a/event/feed.go +++ b/event/feed.go @@ -57,7 +57,8 @@ func (e feedTypeError) Error() string { return "event: wrong type in " + e.op + " got " + e.got.String() + ", want " + e.want.String() } -func (f *Feed) init() { +func (f *Feed) init(etype reflect.Type) { + f.etype = etype f.removeSub = make(chan interface{}) f.sendLock = make(chan struct{}, 1) f.sendLock <- struct{}{} @@ -70,8 +71,6 @@ func (f *Feed) init() { // The channel should have ample buffer space to avoid blocking other subscribers. // Slow subscribers are not dropped. func (f *Feed) Subscribe(channel interface{}) Subscription { - f.once.Do(f.init) - chanval := reflect.ValueOf(channel) chantyp := chanval.Type() if chantyp.Kind() != reflect.Chan || chantyp.ChanDir()&reflect.SendDir == 0 { @@ -79,11 +78,13 @@ func (f *Feed) Subscribe(channel interface{}) Subscription { } sub := &feedSub{feed: f, channel: chanval, err: make(chan error, 1)} - f.mu.Lock() - defer f.mu.Unlock() - if !f.typecheck(chantyp.Elem()) { + f.once.Do(func() { f.init(chantyp.Elem()) }) + if f.etype != chantyp.Elem() { panic(feedTypeError{op: "Subscribe", got: chantyp, want: reflect.ChanOf(reflect.SendDir, f.etype)}) } + + f.mu.Lock() + defer f.mu.Unlock() // Add the select case to the inbox. // The next Send will add it to f.sendCases. cas := reflect.SelectCase{Dir: reflect.SelectSend, Chan: chanval} @@ -91,15 +92,6 @@ func (f *Feed) Subscribe(channel interface{}) Subscription { return sub } -// note: callers must hold f.mu -func (f *Feed) typecheck(typ reflect.Type) bool { - if f.etype == nil { - f.etype = typ - return true - } - return f.etype == typ -} - func (f *Feed) remove(sub *feedSub) { // Delete from inbox first, which covers channels // that have not been added to f.sendCases yet. @@ -128,19 +120,17 @@ func (f *Feed) remove(sub *feedSub) { func (f *Feed) Send(value interface{}) (nsent int) { rvalue := reflect.ValueOf(value) - f.once.Do(f.init) + f.once.Do(func() { f.init(rvalue.Type()) }) + if f.etype != rvalue.Type() { + panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype}) + } + <-f.sendLock // Add new cases from the inbox after taking the send lock. f.mu.Lock() f.sendCases = append(f.sendCases, f.inbox...) f.inbox = nil - - if !f.typecheck(rvalue.Type()) { - f.sendLock <- struct{}{} - f.mu.Unlock() - panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype}) - } f.mu.Unlock() // Set the sent value on all channels. diff --git a/event/multisub.go b/event/multisub.go new file mode 100644 index 000000000..5c8d2df48 --- /dev/null +++ b/event/multisub.go @@ -0,0 +1,50 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package event + +// JoinSubscriptions joins multiple subscriptions to be able to track them as +// one entity and collectively cancel them of consume any errors from them. +func JoinSubscriptions(subs ...Subscription) Subscription { + return NewSubscription(func(unsubbed <-chan struct{}) error { + // Unsubscribe all subscriptions before returning + defer func() { + for _, sub := range subs { + sub.Unsubscribe() + } + }() + // Wait for an error on any of the subscriptions and propagate up + errc := make(chan error, len(subs)) + for i := range subs { + go func(sub Subscription) { + select { + case err := <-sub.Err(): + if err != nil { + errc <- err + } + case <-unsubbed: + } + }(subs[i]) + } + + select { + case err := <-errc: + return err + case <-unsubbed: + return nil + } + }) +} diff --git a/event/multisub_test.go b/event/multisub_test.go new file mode 100644 index 000000000..c92bcfae9 --- /dev/null +++ b/event/multisub_test.go @@ -0,0 +1,175 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package event + +import ( + "testing" + "time" +) + +func TestMultisub(t *testing.T) { + // Create a double subscription and ensure events propagate through + var ( + feed1 Feed + feed2 Feed + ) + sink1 := make(chan int, 1) + sink2 := make(chan int, 1) + + sub1 := feed1.Subscribe(sink1) + sub2 := feed2.Subscribe(sink2) + + sub := JoinSubscriptions(sub1, sub2) + + feed1.Send(1) + select { + case n := <-sink1: + if n != 1 { + t.Errorf("sink 1 delivery mismatch: have %d, want %d", n, 1) + } + default: + t.Error("sink 1 missing delivery") + } + + feed2.Send(2) + select { + case n := <-sink2: + if n != 2 { + t.Errorf("sink 2 delivery mismatch: have %d, want %d", n, 2) + } + default: + t.Error("sink 2 missing delivery") + } + // Unsubscribe and ensure no more events are delivered + sub.Unsubscribe() + select { + case <-sub.Err(): + case <-time.After(50 * time.Millisecond): + t.Error("multisub didn't propagate closure") + } + + feed1.Send(11) + select { + case n := <-sink1: + t.Errorf("sink 1 unexpected delivery: %d", n) + default: + } + + feed2.Send(22) + select { + case n := <-sink2: + t.Errorf("sink 2 unexpected delivery: %d", n) + default: + } +} + +func TestMutisubPartialUnsubscribe(t *testing.T) { + // Create a double subscription but terminate one half, ensuring no error + // is propagated yet up to the outer subscription + var ( + feed1 Feed + feed2 Feed + ) + sink1 := make(chan int, 1) + sink2 := make(chan int, 1) + + sub1 := feed1.Subscribe(sink1) + sub2 := feed2.Subscribe(sink2) + + sub := JoinSubscriptions(sub1, sub2) + + sub1.Unsubscribe() + select { + case <-sub.Err(): + t.Error("multisub propagated closure") + case <-time.After(50 * time.Millisecond): + } + // Ensure that events cross only the second feed + feed1.Send(1) + select { + case n := <-sink1: + t.Errorf("sink 1 unexpected delivery: %d", n) + default: + } + + feed2.Send(2) + select { + case n := <-sink2: + if n != 2 { + t.Errorf("sink 2 delivery mismatch: have %d, want %d", n, 2) + } + default: + t.Error("sink 2 missing delivery") + } + // Unsubscribe and ensure no more events are delivered + sub.Unsubscribe() + select { + case <-sub.Err(): + case <-time.After(50 * time.Millisecond): + t.Error("multisub didn't propagate closure") + } + + feed1.Send(11) + select { + case n := <-sink1: + t.Errorf("sink 1 unexpected delivery: %d", n) + default: + } + + feed2.Send(22) + select { + case n := <-sink2: + t.Errorf("sink 2 unexpected delivery: %d", n) + default: + } +} + +func TestMultisubFullUnsubscribe(t *testing.T) { + // Create a double subscription and terminate the multi sub, ensuring an + // error is propagated up. + var ( + feed1 Feed + feed2 Feed + ) + sink1 := make(chan int, 1) + sink2 := make(chan int, 1) + + sub1 := feed1.Subscribe(sink1) + sub2 := feed2.Subscribe(sink2) + + sub := JoinSubscriptions(sub1, sub2) + sub.Unsubscribe() + select { + case <-sub.Err(): + case <-time.After(50 * time.Millisecond): + t.Error("multisub didn't propagate closure") + } + // Ensure no more events are delivered + feed1.Send(1) + select { + case n := <-sink1: + t.Errorf("sink 1 unexpected delivery: %d", n) + default: + } + + feed2.Send(2) + select { + case n := <-sink2: + t.Errorf("sink 2 unexpected delivery: %d", n) + default: + } +} diff --git a/go.mod b/go.mod index e258e5191..cb9d6a3bb 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,10 @@ module github.com/ethereum/go-ethereum -go 1.19 +go 1.20 require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 - github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect + github.com/Microsoft/go-winio v0.6.1 github.com/VictoriaMetrics/fastcache v1.6.0 github.com/aws/aws-sdk-go-v2 v1.2.0 github.com/aws/aws-sdk-go-v2/config v1.1.1 @@ -13,20 +13,20 @@ require ( github.com/btcsuite/btcd/btcec/v2 v2.2.0 github.com/cespare/cp v0.1.0 github.com/cloudflare/cloudflare-go v0.14.0 - github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 + github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 github.com/consensys/gnark-crypto v0.10.0 - github.com/crate-crypto/go-kzg-4844 v0.2.0 + github.com/crate-crypto/go-kzg-4844 v0.3.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set/v2 v2.1.0 - github.com/docker/docker v1.6.2 - github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7 - github.com/ethereum/c-kzg-4844 v0.2.0 + github.com/docker/docker v24.0.5+incompatible + github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 + github.com/ethereum/c-kzg-4844 v0.3.1 github.com/fatih/color v1.7.0 github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732 + github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 @@ -37,15 +37,16 @@ require ( github.com/gorilla/websocket v1.4.2 github.com/graph-gophers/graphql-go v1.3.0 github.com/hashicorp/go-bexpr v0.1.10 + github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 github.com/holiman/bloomfilter/v2 v2.0.3 - github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c - github.com/huin/goupnp v1.0.3 + github.com/holiman/uint256 v1.2.3 + github.com/huin/goupnp v1.3.0 github.com/influxdata/influxdb-client-go/v2 v2.4.0 github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c github.com/jackpal/go-nat-pmp v1.0.2 - github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 github.com/julienschmidt/httprouter v1.3.0 - github.com/karalabe/usb v0.0.2 + github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c github.com/kylelemons/godebug v1.1.0 github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.16 @@ -58,17 +59,18 @@ require ( github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible github.com/status-im/keycard-go v0.2.0 github.com/stretchr/testify v1.8.1 - github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b + github.com/supranational/blst v0.3.11 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tyler-smith/go-bip39 v1.1.0 - github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa - golang.org/x/crypto v0.1.0 - golang.org/x/exp v0.0.0-20230206171751-46f607a40771 - golang.org/x/sync v0.1.0 - golang.org/x/sys v0.8.0 - golang.org/x/text v0.9.0 - golang.org/x/time v0.0.0-20220922220347-f3bd1da661af - golang.org/x/tools v0.7.0 + github.com/urfave/cli/v2 v2.24.1 + go.uber.org/automaxprocs v1.5.2 + golang.org/x/crypto v0.12.0 + golang.org/x/exp v0.0.0-20230810033253-352e893a4cad + golang.org/x/sync v0.3.0 + golang.org/x/sys v0.11.0 + golang.org/x/text v0.12.0 + golang.org/x/time v0.3.0 + golang.org/x/tools v0.9.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce gopkg.in/yaml.v3 v3.0.1 @@ -77,36 +79,39 @@ require ( require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect - github.com/DataDog/zstd v1.5.2 // indirect + github.com/DataDog/zstd v1.4.5 // indirect + github.com/StackExchange/wmi v1.2.1 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.1.1 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.1.1 // indirect github.com/aws/smithy-go v1.1.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.7.0 // indirect + github.com/bits-and-blooms/bitset v1.5.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cockroachdb/errors v1.9.1 // indirect - github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/errors v1.8.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect + github.com/cockroachdb/redact v1.0.8 // indirect + github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect - github.com/deepmap/oapi-codegen v1.8.2 // indirect + github.com/deepmap/oapi-codegen v1.6.0 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect - github.com/getsentry/sentry-go v0.18.0 // indirect - github.com/go-ole/go-ole v1.2.1 // indirect + github.com/go-ole/go-ole v1.2.5 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect + github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/kilic/bls12-381 v0.1.0 // indirect github.com/klauspost/compress v1.15.15 // indirect - github.com/kr/pretty v0.3.1 // indirect + github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mitchellh/mapstructure v1.4.1 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect @@ -114,19 +119,19 @@ require ( github.com/opentracing/opentracing-go v1.1.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.39.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect - github.com/rogpeppe/go-internal v1.9.0 // indirect + github.com/prometheus/client_golang v1.12.0 // indirect + github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/tklauser/go-sysconf v0.3.5 // indirect - github.com/tklauser/numcpus v0.2.2 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - golang.org/x/mod v0.9.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect - google.golang.org/protobuf v1.28.1 // indirect + golang.org/x/mod v0.11.0 // indirect + golang.org/x/net v0.10.0 // indirect + google.golang.org/protobuf v1.27.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect + gotest.tools/v3 v3.5.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/graphql/graphql.go b/graphql/graphql.go index 1ac439753..6417fc9ed 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -31,7 +31,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/filters" @@ -183,6 +183,31 @@ func (at *AccessTuple) StorageKeys(ctx context.Context) []common.Hash { return at.storageKeys } +// Withdrawal represents a withdrawal of value from the beacon chain +// by a validator. For details see EIP-4895. +type Withdrawal struct { + index uint64 + validator uint64 + address common.Address + amount uint64 +} + +func (w *Withdrawal) Index(ctx context.Context) hexutil.Uint64 { + return hexutil.Uint64(w.index) +} + +func (w *Withdrawal) Validator(ctx context.Context) hexutil.Uint64 { + return hexutil.Uint64(w.validator) +} + +func (w *Withdrawal) Address(ctx context.Context) common.Address { + return w.address +} + +func (w *Withdrawal) Amount(ctx context.Context) hexutil.Uint64 { + return hexutil.Uint64(w.amount) +} + // Transaction represents an Ethereum transaction. // backend and hash are mandatory; all others will be fetched when required. type Transaction struct { @@ -197,11 +222,11 @@ type Transaction struct { // resolve returns the internal transaction object, fetching it if needed. // It also returns the block the tx belongs to, unless it is a pending tx. -func (t *Transaction) resolve(ctx context.Context) (*types.Transaction, *Block, error) { +func (t *Transaction) resolve(ctx context.Context) (*types.Transaction, *Block) { t.mu.Lock() defer t.mu.Unlock() if t.tx != nil { - return t.tx, t.block, nil + return t.tx, t.block } // Try to return an already finalized transaction tx, blockHash, _, index, err := t.r.backend.GetTransaction(ctx, t.hash) @@ -214,58 +239,58 @@ func (t *Transaction) resolve(ctx context.Context) (*types.Transaction, *Block, hash: blockHash, } t.index = index - return t.tx, t.block, nil + return t.tx, t.block } // No finalized transaction, try to retrieve it from the pool t.tx = t.r.backend.GetPoolTransaction(t.hash) - return t.tx, nil, nil + return t.tx, nil } func (t *Transaction) Hash(ctx context.Context) common.Hash { return t.hash } -func (t *Transaction) InputData(ctx context.Context) (hexutil.Bytes, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return hexutil.Bytes{}, err +func (t *Transaction) InputData(ctx context.Context) hexutil.Bytes { + tx, _ := t.resolve(ctx) + if tx == nil { + return hexutil.Bytes{} } - return tx.Data(), nil + return tx.Data() } -func (t *Transaction) Gas(ctx context.Context) (hexutil.Uint64, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return 0, err +func (t *Transaction) Gas(ctx context.Context) hexutil.Uint64 { + tx, _ := t.resolve(ctx) + if tx == nil { + return 0 } - return hexutil.Uint64(tx.Gas()), nil + return hexutil.Uint64(tx.Gas()) } -func (t *Transaction) GasPrice(ctx context.Context) (hexutil.Big, error) { - tx, block, err := t.resolve(ctx) - if err != nil || tx == nil { - return hexutil.Big{}, err +func (t *Transaction) GasPrice(ctx context.Context) hexutil.Big { + tx, block := t.resolve(ctx) + if tx == nil { + return hexutil.Big{} } switch tx.Type() { case types.AccessListTxType: - return hexutil.Big(*tx.GasPrice()), nil + return hexutil.Big(*tx.GasPrice()) case types.DynamicFeeTxType: if block != nil { if baseFee, _ := block.BaseFeePerGas(ctx); baseFee != nil { - // price = min(tip, gasFeeCap - baseFee) + baseFee - return (hexutil.Big)(*math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee.ToInt()), tx.GasFeeCap())), nil + // price = min(gasTipCap + baseFee, gasFeeCap) + return (hexutil.Big)(*math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee.ToInt()), tx.GasFeeCap())) } } - return hexutil.Big(*tx.GasPrice()), nil + return hexutil.Big(*tx.GasPrice()) default: - return hexutil.Big(*tx.GasPrice()), nil + return hexutil.Big(*tx.GasPrice()) } } func (t *Transaction) EffectiveGasPrice(ctx context.Context) (*hexutil.Big, error) { - tx, block, err := t.resolve(ctx) - if err != nil || tx == nil { - return nil, err + tx, block := t.resolve(ctx) + if tx == nil { + return nil, nil } // Pending tx if block == nil { @@ -281,40 +306,40 @@ func (t *Transaction) EffectiveGasPrice(ctx context.Context) (*hexutil.Big, erro return (*hexutil.Big)(math.BigMin(new(big.Int).Add(tx.GasTipCap(), header.BaseFee), tx.GasFeeCap())), nil } -func (t *Transaction) MaxFeePerGas(ctx context.Context) (*hexutil.Big, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return nil, err +func (t *Transaction) MaxFeePerGas(ctx context.Context) *hexutil.Big { + tx, _ := t.resolve(ctx) + if tx == nil { + return nil } switch tx.Type() { case types.AccessListTxType: - return nil, nil + return nil case types.DynamicFeeTxType: - return (*hexutil.Big)(tx.GasFeeCap()), nil + return (*hexutil.Big)(tx.GasFeeCap()) default: - return nil, nil + return nil } } -func (t *Transaction) MaxPriorityFeePerGas(ctx context.Context) (*hexutil.Big, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return nil, err +func (t *Transaction) MaxPriorityFeePerGas(ctx context.Context) *hexutil.Big { + tx, _ := t.resolve(ctx) + if tx == nil { + return nil } switch tx.Type() { case types.AccessListTxType: - return nil, nil + return nil case types.DynamicFeeTxType: - return (*hexutil.Big)(tx.GasTipCap()), nil + return (*hexutil.Big)(tx.GasTipCap()) default: - return nil, nil + return nil } } func (t *Transaction) EffectiveTip(ctx context.Context) (*hexutil.Big, error) { - tx, block, err := t.resolve(ctx) - if err != nil || tx == nil { - return nil, err + tx, block := t.resolve(ctx) + if tx == nil { + return nil, nil } // Pending tx if block == nil { @@ -336,9 +361,9 @@ func (t *Transaction) EffectiveTip(ctx context.Context) (*hexutil.Big, error) { } func (t *Transaction) Value(ctx context.Context) (hexutil.Big, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return hexutil.Big{}, err + tx, _ := t.resolve(ctx) + if tx == nil { + return hexutil.Big{}, nil } if tx.Value() == nil { return hexutil.Big{}, fmt.Errorf("invalid transaction value %x", t.hash) @@ -346,34 +371,34 @@ func (t *Transaction) Value(ctx context.Context) (hexutil.Big, error) { return hexutil.Big(*tx.Value()), nil } -func (t *Transaction) Nonce(ctx context.Context) (hexutil.Uint64, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return 0, err +func (t *Transaction) Nonce(ctx context.Context) hexutil.Uint64 { + tx, _ := t.resolve(ctx) + if tx == nil { + return 0 } - return hexutil.Uint64(tx.Nonce()), nil + return hexutil.Uint64(tx.Nonce()) } -func (t *Transaction) To(ctx context.Context, args BlockNumberArgs) (*Account, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return nil, err +func (t *Transaction) To(ctx context.Context, args BlockNumberArgs) *Account { + tx, _ := t.resolve(ctx) + if tx == nil { + return nil } to := tx.To() if to == nil { - return nil, nil + return nil } return &Account{ r: t.r, address: *to, blockNrOrHash: args.NumberOrLatest(), - }, nil + } } -func (t *Transaction) From(ctx context.Context, args BlockNumberArgs) (*Account, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return nil, err +func (t *Transaction) From(ctx context.Context, args BlockNumberArgs) *Account { + tx, _ := t.resolve(ctx) + if tx == nil { + return nil } signer := types.LatestSigner(t.r.backend.ChainConfig()) from, _ := types.Sender(signer, tx) @@ -381,36 +406,27 @@ func (t *Transaction) From(ctx context.Context, args BlockNumberArgs) (*Account, r: t.r, address: from, blockNrOrHash: args.NumberOrLatest(), - }, nil + } } -func (t *Transaction) Block(ctx context.Context) (*Block, error) { - _, block, err := t.resolve(ctx) - if err != nil { - return nil, err - } - return block, nil +func (t *Transaction) Block(ctx context.Context) *Block { + _, block := t.resolve(ctx) + return block } -func (t *Transaction) Index(ctx context.Context) (*hexutil.Uint64, error) { - _, block, err := t.resolve(ctx) - if err != nil { - return nil, err - } +func (t *Transaction) Index(ctx context.Context) *hexutil.Uint64 { + _, block := t.resolve(ctx) // Pending tx if block == nil { - return nil, nil + return nil } index := hexutil.Uint64(t.index) - return &index, nil + return &index } // getReceipt returns the receipt associated with this transaction, if any. func (t *Transaction) getReceipt(ctx context.Context) (*types.Receipt, error) { - _, block, err := t.resolve(ctx) - if err != nil { - return nil, err - } + _, block := t.resolve(ctx) // Pending tx if block == nil { return nil, nil @@ -465,10 +481,7 @@ func (t *Transaction) CreatedContract(ctx context.Context, args BlockNumberArgs) } func (t *Transaction) Logs(ctx context.Context) (*[]*Log, error) { - _, block, err := t.resolve(ctx) - if err != nil { - return nil, err - } + _, block := t.resolve(ctx) // Pending tx if block == nil { return nil, nil @@ -504,19 +517,16 @@ func (t *Transaction) getLogs(ctx context.Context, hash common.Hash) (*[]*Log, e return &ret, nil } -func (t *Transaction) Type(ctx context.Context) (*hexutil.Uint64, error) { - tx, _, err := t.resolve(ctx) - if err != nil { - return nil, err - } +func (t *Transaction) Type(ctx context.Context) *hexutil.Uint64 { + tx, _ := t.resolve(ctx) txType := hexutil.Uint64(tx.Type()) - return &txType, nil + return &txType } -func (t *Transaction) AccessList(ctx context.Context) (*[]*AccessTuple, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return nil, err +func (t *Transaction) AccessList(ctx context.Context) *[]*AccessTuple { + tx, _ := t.resolve(ctx) + if tx == nil { + return nil } accessList := tx.AccessList() ret := make([]*AccessTuple, 0, len(accessList)) @@ -526,40 +536,50 @@ func (t *Transaction) AccessList(ctx context.Context) (*[]*AccessTuple, error) { storageKeys: al.StorageKeys, }) } + return &ret +} + +func (t *Transaction) R(ctx context.Context) hexutil.Big { + tx, _ := t.resolve(ctx) + if tx == nil { + return hexutil.Big{} + } + _, r, _ := tx.RawSignatureValues() + return hexutil.Big(*r) +} + +func (t *Transaction) S(ctx context.Context) hexutil.Big { + tx, _ := t.resolve(ctx) + if tx == nil { + return hexutil.Big{} + } + _, _, s := tx.RawSignatureValues() + return hexutil.Big(*s) +} + +func (t *Transaction) V(ctx context.Context) hexutil.Big { + tx, _ := t.resolve(ctx) + if tx == nil { + return hexutil.Big{} + } + v, _, _ := tx.RawSignatureValues() + return hexutil.Big(*v) +} + +func (t *Transaction) YParity(ctx context.Context) (*hexutil.Uint64, error) { + tx, _ := t.resolve(ctx) + if tx == nil || tx.Type() == types.LegacyTxType { + return nil, nil + } + v, _, _ := tx.RawSignatureValues() + ret := hexutil.Uint64(v.Int64()) return &ret, nil } -func (t *Transaction) R(ctx context.Context) (hexutil.Big, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return hexutil.Big{}, err - } - _, r, _ := tx.RawSignatureValues() - return hexutil.Big(*r), nil -} - -func (t *Transaction) S(ctx context.Context) (hexutil.Big, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return hexutil.Big{}, err - } - _, _, s := tx.RawSignatureValues() - return hexutil.Big(*s), nil -} - -func (t *Transaction) V(ctx context.Context) (hexutil.Big, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return hexutil.Big{}, err - } - v, _, _ := tx.RawSignatureValues() - return hexutil.Big(*v), nil -} - func (t *Transaction) Raw(ctx context.Context) (hexutil.Bytes, error) { - tx, _, err := t.resolve(ctx) - if err != nil || tx == nil { - return hexutil.Bytes{}, err + tx, _ := t.resolve(ctx) + if tx == nil { + return hexutil.Bytes{}, nil } return tx.MarshalBinary() } @@ -704,7 +724,7 @@ func (b *Block) NextBaseFeePerGas(ctx context.Context) (*hexutil.Big, error) { return nil, nil } } - nextBaseFee := misc.CalcBaseFee(chaincfg, header) + nextBaseFee := eip1559.CalcBaseFee(chaincfg, header) return (*hexutil.Big)(nextBaseFee), nil } @@ -966,6 +986,39 @@ func (b *Block) OmmerAt(ctx context.Context, args struct{ Index Long }) (*Block, }, nil } +func (b *Block) WithdrawalsRoot(ctx context.Context) (*common.Hash, error) { + header, err := b.resolveHeader(ctx) + if err != nil { + return nil, err + } + // Pre-shanghai blocks + if header.WithdrawalsHash == nil { + return nil, nil + } + return header.WithdrawalsHash, nil +} + +func (b *Block) Withdrawals(ctx context.Context) (*[]*Withdrawal, error) { + block, err := b.resolve(ctx) + if err != nil || block == nil { + return nil, err + } + // Pre-shanghai blocks + if block.Header().WithdrawalsHash == nil { + return nil, nil + } + ret := make([]*Withdrawal, 0, len(block.Withdrawals())) + for _, w := range block.Withdrawals() { + ret = append(ret, &Withdrawal{ + index: w.Index, + validator: w.Validator, + address: w.Address, + amount: w.Amount, + }) + } + return &ret, nil +} + // BlockFilterCriteria encapsulates criteria passed to a `logs` accessor inside // a block. type BlockFilterCriteria struct { @@ -1087,7 +1140,7 @@ func (b *Block) Call(ctx context.Context, args struct { func (b *Block) EstimateGas(ctx context.Context, args struct { Data ethapi.TransactionArgs }) (hexutil.Uint64, error) { - return ethapi.DoEstimateGas(ctx, b.r.backend, args.Data, *b.numberOrHash, b.r.backend.RPCGasCap()) + return ethapi.DoEstimateGas(ctx, b.r.backend, args.Data, *b.numberOrHash, nil, b.r.backend.RPCGasCap()) } type Pending struct { @@ -1151,7 +1204,7 @@ func (p *Pending) EstimateGas(ctx context.Context, args struct { Data ethapi.TransactionArgs }) (hexutil.Uint64, error) { latestBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) - return ethapi.DoEstimateGas(ctx, p.r.backend, args.Data, latestBlockNr, p.r.backend.RPCGasCap()) + return ethapi.DoEstimateGas(ctx, p.r.backend, args.Data, latestBlockNr, nil, p.r.backend.RPCGasCap()) } // Resolver is the top-level object in the GraphQL hierarchy. @@ -1164,6 +1217,9 @@ func (r *Resolver) Block(ctx context.Context, args struct { Number *Long Hash *common.Hash }) (*Block, error) { + if args.Number != nil && args.Hash != nil { + return nil, errors.New("only one of number or hash must be specified") + } var numberOrHash rpc.BlockNumberOrHash if args.Number != nil { if *args.Number < 0 { @@ -1207,7 +1263,7 @@ func (r *Resolver) Blocks(ctx context.Context, args struct { if to < from { return []*Block{}, nil } - ret := make([]*Block, 0, to-from+1) + var ret []*Block for i := from; i <= to; i++ { numberOrHash := rpc.BlockNumberOrHashWithNumber(i) block := &Block{ @@ -1225,6 +1281,9 @@ func (r *Resolver) Blocks(ctx context.Context, args struct { break } ret = append(ret, block) + if err := ctx.Err(); err != nil { + return nil, err + } } return ret, nil } @@ -1233,19 +1292,17 @@ func (r *Resolver) Pending(ctx context.Context) *Pending { return &Pending{r} } -func (r *Resolver) Transaction(ctx context.Context, args struct{ Hash common.Hash }) (*Transaction, error) { +func (r *Resolver) Transaction(ctx context.Context, args struct{ Hash common.Hash }) *Transaction { tx := &Transaction{ r: r, hash: args.Hash, } // Resolve the transaction; if it doesn't exist, return nil. - t, _, err := tx.resolve(ctx) - if err != nil { - return nil, err - } else if t == nil { - return nil, nil + t, _ := tx.resolve(ctx) + if t == nil { + return nil } - return tx, nil + return tx } func (r *Resolver) SendRawTransaction(ctx context.Context, args struct{ Data hexutil.Bytes }) (common.Hash, error) { diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go index 0efee51a6..4bbfb7251 100644 --- a/graphql/graphql_test.go +++ b/graphql/graphql_test.go @@ -28,6 +28,8 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" @@ -67,7 +69,7 @@ func TestGraphQLBlockSerialization(t *testing.T) { GasLimit: 11500000, Difficulty: big.NewInt(1048576), } - newGQLService(t, stack, genesis, 10, func(i int, gen *core.BlockGen) {}) + newGQLService(t, stack, false, genesis, 10, func(i int, gen *core.BlockGen) {}) // start node if err := stack.Start(); err != nil { t.Fatalf("could not start node: %v", err) @@ -191,7 +193,7 @@ func TestGraphQLBlockSerializationEIP2718(t *testing.T) { BaseFee: big.NewInt(params.InitialBaseFee), } signer := types.LatestSigner(genesis.Config) - newGQLService(t, stack, genesis, 1, func(i int, gen *core.BlockGen) { + newGQLService(t, stack, false, genesis, 1, func(i int, gen *core.BlockGen) { gen.SetCoinbase(common.Address{1}) tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ Nonce: uint64(0), @@ -292,7 +294,7 @@ func TestGraphQLConcurrentResolvers(t *testing.T) { defer stack.Close() var tx *types.Transaction - handler, chain := newGQLService(t, stack, genesis, 1, func(i int, gen *core.BlockGen) { + handler, chain := newGQLService(t, stack, false, genesis, 1, func(i int, gen *core.BlockGen) { tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{To: &dad, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)}) gen.AddTx(tx) tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{To: &dad, Nonce: 1, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)}) @@ -360,6 +362,66 @@ func TestGraphQLConcurrentResolvers(t *testing.T) { } } +func TestWithdrawals(t *testing.T) { + var ( + key, _ = crypto.GenerateKey() + addr = crypto.PubkeyToAddress(key.PublicKey) + + genesis = &core.Genesis{ + Config: params.AllEthashProtocolChanges, + GasLimit: 11500000, + Difficulty: common.Big1, + Alloc: core.GenesisAlloc{ + addr: {Balance: big.NewInt(params.Ether)}, + }, + } + signer = types.LatestSigner(genesis.Config) + stack = createNode(t) + ) + defer stack.Close() + + handler, _ := newGQLService(t, stack, true, genesis, 1, func(i int, gen *core.BlockGen) { + tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{To: &common.Address{}, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)}) + gen.AddTx(tx) + gen.AddWithdrawal(&types.Withdrawal{ + Validator: 5, + Address: common.Address{}, + Amount: 10, + }) + }) + // start node + if err := stack.Start(); err != nil { + t.Fatalf("could not start node: %v", err) + } + + for i, tt := range []struct { + body string + want string + }{ + // Genesis block has no withdrawals. + { + body: "{block(number: 0) { withdrawalsRoot withdrawals { index } } }", + want: `{"block":{"withdrawalsRoot":null,"withdrawals":null}}`, + }, + { + body: "{block(number: 1) { withdrawalsRoot withdrawals { validator amount } } }", + want: `{"block":{"withdrawalsRoot":"0x8418fc1a48818928f6692f148e9b10e99a88edc093b095cb8ca97950284b553d","withdrawals":[{"validator":"0x5","amount":"0xa"}]}}`, + }, + } { + res := handler.Schema.Exec(context.Background(), tt.body, "", map[string]interface{}{}) + if res.Errors != nil { + t.Fatalf("failed to execute query for testcase #%d: %v", i, res.Errors) + } + have, err := json.Marshal(res.Data) + if err != nil { + t.Fatalf("failed to encode graphql response for testcase #%d: %s", i, err) + } + if string(have) != tt.want { + t.Errorf("response unmatch for testcase #%d.\nhave:\n%s\nwant:\n%s", i, have, tt.want) + } + } +} + func createNode(t *testing.T) *node.Node { stack, err := node.New(&node.Config{ HTTPHost: "127.0.0.1", @@ -374,16 +436,25 @@ func createNode(t *testing.T) *node.Node { return stack } -func newGQLService(t *testing.T, stack *node.Node, gspec *core.Genesis, genBlocks int, genfunc func(i int, gen *core.BlockGen)) (*handler, []*types.Block) { +func newGQLService(t *testing.T, stack *node.Node, shanghai bool, gspec *core.Genesis, genBlocks int, genfunc func(i int, gen *core.BlockGen)) (*handler, []*types.Block) { ethConf := ðconfig.Config{ - Genesis: gspec, - NetworkId: 1337, - TrieCleanCache: 5, - TrieCleanCacheJournal: "triecache", - TrieCleanCacheRejournal: 60 * time.Minute, - TrieDirtyCache: 5, - TrieTimeout: 60 * time.Minute, - SnapshotCache: 5, + Genesis: gspec, + NetworkId: 1337, + TrieCleanCache: 5, + TrieDirtyCache: 5, + TrieTimeout: 60 * time.Minute, + SnapshotCache: 5, + } + var engine consensus.Engine = ethash.NewFaker() + if shanghai { + engine = beacon.NewFaker() + chainCfg := gspec.Config + chainCfg.TerminalTotalDifficultyPassed = true + chainCfg.TerminalTotalDifficulty = common.Big0 + // GenerateChain will increment timestamps by 10. + // Shanghai upgrade at block 1. + shanghaiTime := uint64(5) + chainCfg.ShanghaiTime = &shanghaiTime } ethBackend, err := eth.New(stack, ethConf) if err != nil { @@ -391,7 +462,7 @@ func newGQLService(t *testing.T, stack *node.Node, gspec *core.Genesis, genBlock } // Create some blocks and import them chain, _ := core.GenerateChain(params.AllEthashProtocolChanges, ethBackend.BlockChain().Genesis(), - ethash.NewFaker(), ethBackend.ChainDb(), genBlocks, genfunc) + engine, ethBackend.ChainDb(), genBlocks, genfunc) _, err = ethBackend.BlockChain().InsertChain(chain) if err != nil { t.Fatalf("could not create import blocks: %v", err) diff --git a/graphql/schema.go b/graphql/schema.go index e4ce7b9af..5de5bad30 100644 --- a/graphql/schema.go +++ b/graphql/schema.go @@ -77,6 +77,18 @@ const schema string = ` storageKeys : [Bytes32!]! } + # EIP-4895 + type Withdrawal { + # Index is a monotonically increasing identifier issued by consensus layer. + index: Long! + # Validator is index of the validator associated with withdrawal. + validator: Long! + # Recipient address of the withdrawn amount. + address: Address! + # Amount is the withdrawal value in Gwei. + amount: Long! + } + # Transaction is an Ethereum transaction. type Transaction { # Hash is the hash of this transaction. @@ -139,6 +151,7 @@ const schema string = ` r: BigInt! s: BigInt! v: BigInt! + yParity: Long # Envelope transaction support type: Long accessList: [AccessTuple!] @@ -248,6 +261,12 @@ const schema string = ` rawHeader: Bytes! # Raw is the RLP encoding of the block. raw: Bytes! + # WithdrawalsRoot is the withdrawals trie root in this block. + # If withdrawals are unavailable for this block, this field will be null. + withdrawalsRoot: Bytes32 + # Withdrawals is a list of withdrawals associated with this block. If + # withdrawals are unavailable for this block, this field will be null. + withdrawals: [Withdrawal!] } # CallData represents the data associated with a local contract call. diff --git a/graphql/service.go b/graphql/service.go index 4ca427658..f33e76305 100644 --- a/graphql/service.go +++ b/graphql/service.go @@ -88,7 +88,9 @@ func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } response := h.Schema.Exec(ctx, params.Query, params.OperationName, params.Variables) - timer.Stop() + if timer != nil { + timer.Stop() + } responded.Do(func() { responseJSON, err := json.Marshal(response) if err != nil { diff --git a/internal/blocktest/test_hash.go b/internal/blocktest/test_hash.go new file mode 100644 index 000000000..4d2b077e8 --- /dev/null +++ b/internal/blocktest/test_hash.go @@ -0,0 +1,59 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package utesting provides a standalone replacement for package testing. +// +// This package exists because package testing cannot easily be embedded into a +// standalone go program. It provides an API that mirrors the standard library +// testing API. + +package blocktest + +import ( + "hash" + + "github.com/ethereum/go-ethereum/common" + "golang.org/x/crypto/sha3" +) + +// testHasher is the helper tool for transaction/receipt list hashing. +// The original hasher is trie, in order to get rid of import cycle, +// use the testing hasher instead. +type testHasher struct { + hasher hash.Hash +} + +// NewHasher returns a new testHasher instance. +func NewHasher() *testHasher { + return &testHasher{hasher: sha3.NewLegacyKeccak256()} +} + +// Reset resets the hash state. +func (h *testHasher) Reset() { + h.hasher.Reset() +} + +// Update updates the hash state with the given key and value. +func (h *testHasher) Update(key, val []byte) error { + h.hasher.Write(key) + h.hasher.Write(val) + return nil +} + +// Hash returns the hash value. +func (h *testHasher) Hash() common.Hash { + return common.BytesToHash(h.hasher.Sum(nil)) +} diff --git a/internal/build/env.go b/internal/build/env.go index d70c0d50a..0854fba24 100644 --- a/internal/build/env.go +++ b/internal/build/env.go @@ -28,12 +28,13 @@ import ( var ( // These flags override values in build env. - GitCommitFlag = flag.String("git-commit", "", `Overrides git commit hash embedded into executables`) - GitBranchFlag = flag.String("git-branch", "", `Overrides git branch being built`) - GitTagFlag = flag.String("git-tag", "", `Overrides git tag being built`) - BuildnumFlag = flag.String("buildnum", "", `Overrides CI build number`) - PullRequestFlag = flag.Bool("pull-request", false, `Overrides pull request status of the build`) - CronJobFlag = flag.Bool("cron-job", false, `Overrides cron job status of the build`) + GitCommitFlag = flag.String("git-commit", "", `Overrides git commit hash embedded into executables`) + GitBranchFlag = flag.String("git-branch", "", `Overrides git branch being built`) + GitTagFlag = flag.String("git-tag", "", `Overrides git tag being built`) + BuildnumFlag = flag.String("buildnum", "", `Overrides CI build number`) + PullRequestFlag = flag.Bool("pull-request", false, `Overrides pull request status of the build`) + CronJobFlag = flag.Bool("cron-job", false, `Overrides cron job status of the build`) + UbuntuVersionFlag = flag.String("ubuntu", "", `Sets the ubuntu version being built for`) ) // Environment contains metadata provided by the build environment. @@ -43,6 +44,7 @@ type Environment struct { Repo string // name of GitHub repo Commit, Date, Branch, Tag string // Git info Buildnum string + UbuntuVersion string // Ubuntu version being built for IsPullRequest bool IsCronJob bool } @@ -168,5 +170,8 @@ func applyEnvFlags(env Environment) Environment { if *CronJobFlag { env.IsCronJob = true } + if *UbuntuVersionFlag != "" { + env.UbuntuVersion = *UbuntuVersionFlag + } return env } diff --git a/internal/build/gotool.go b/internal/build/gotool.go index b5452f80c..296ba8c36 100644 --- a/internal/build/gotool.go +++ b/internal/build/gotool.go @@ -54,8 +54,8 @@ func (g *GoToolchain) Go(command string, args ...string) *exec.Cmd { tool.Env = append(tool.Env, "CC="+os.Getenv("CC")) } // CKZG by default is not portable, append the necessary build flags to make - // it not rely on modern CPU instructions and enable linking against - tool.Env = append(tool.Env, "CGO_CFLAGS=-D__BLST_PORTABLE__") + // it not rely on modern CPU instructions and enable linking against. + tool.Env = append(tool.Env, "CGO_CFLAGS=-O2 -g -D__BLST_PORTABLE__") return tool } diff --git a/internal/build/util.go b/internal/build/util.go index 3a0b384e5..5c77b236d 100644 --- a/internal/build/util.go +++ b/internal/build/util.go @@ -197,6 +197,9 @@ func FindMainPackages(dir string) []string { } for _, cmd := range cmds { pkgdir := filepath.Join(dir, cmd.Name()) + if !cmd.IsDir() { + continue + } pkgs, err := parser.ParseDir(token.NewFileSet(), pkgdir, nil, parser.PackageClauseOnly) if err != nil { log.Fatal(err) diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go index df5107952..43137053c 100644 --- a/internal/cmdtest/test_cmd.go +++ b/internal/cmdtest/test_cmd.go @@ -55,13 +55,13 @@ type TestCmd struct { Err error } -var id int32 +var id atomic.Int32 // Run exec's the current binary using name as argv[0] which will trigger the // reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go) func (tt *TestCmd) Run(name string, args ...string) { - id := atomic.AddInt32(&id, 1) - tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id)} + id.Add(1) + tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id.Load())} tt.cmd = &exec.Cmd{ Path: reexec.Self(), Args: append([]string{name}, args...), diff --git a/internal/debug/flags.go b/internal/debug/flags.go index d425a17db..52a634245 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -19,6 +19,7 @@ package debug import ( "fmt" "io" + "net" "net/http" _ "net/http/pprof" "os" @@ -308,7 +309,7 @@ func Setup(ctx *cli.Context) error { port := ctx.Int(pprofPortFlag.Name) - address := fmt.Sprintf("%s:%d", listenHost, port) + address := net.JoinHostPort(listenHost, fmt.Sprintf("%d", port)) // This context value ("metrics.addr") represents the utils.MetricsHTTPFlag.Name. // It cannot be imported because it will cause a cyclical dependency. StartPProf(address, !ctx.IsSet("metrics.addr")) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 69b5bf130..c37e716de 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -34,7 +34,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -46,6 +46,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" "github.com/tyler-smith/go-bip39" ) @@ -655,54 +656,93 @@ type StorageResult struct { Proof []string `json:"proof"` } +// proofList implements ethdb.KeyValueWriter and collects the proofs as +// hex-strings for delivery to rpc-caller. +type proofList []string + +func (n *proofList) Put(key []byte, value []byte) error { + *n = append(*n, hexutil.Encode(value)) + return nil +} + +func (n *proofList) Delete(key []byte) error { + panic("not supported") +} + // GetProof returns the Merkle-proof for a given account and optionally some storage keys. func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNrOrHash rpc.BlockNumberOrHash) (*AccountResult, error) { - state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) - if state == nil || err != nil { - return nil, err - } - storageTrie, err := state.StorageTrie(address) - if err != nil { - return nil, err - } - storageHash := types.EmptyRootHash - codeHash := state.GetCodeHash(address) - storageProof := make([]StorageResult, len(storageKeys)) + var ( + keys = make([]common.Hash, len(storageKeys)) + keyLengths = make([]int, len(storageKeys)) + storageProof = make([]StorageResult, len(storageKeys)) - // if we have a storageTrie, (which means the account exists), we can update the storagehash - if storageTrie != nil { - storageHash = storageTrie.Hash() - } else { - // no storageTrie means the account does not exist, so the codeHash is the hash of an empty bytearray. - codeHash = crypto.Keccak256Hash(nil) - } - - // create the proof for the storageKeys + storageTrie state.Trie + storageHash = types.EmptyRootHash + codeHash = types.EmptyCodeHash + ) + // Deserialize all keys. This prevents state access on invalid input. for i, hexKey := range storageKeys { - key, err := decodeHash(hexKey) + var err error + keys[i], keyLengths[i], err = decodeHash(hexKey) if err != nil { return nil, err } - if storageTrie != nil { - proof, storageError := state.GetStorageProof(address, key) - if storageError != nil { - return nil, storageError - } - storageProof[i] = StorageResult{hexKey, (*hexutil.Big)(state.GetState(address, key).Big()), toHexSlice(proof)} - } else { - storageProof[i] = StorageResult{hexKey, &hexutil.Big{}, []string{}} + } + state, header, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) + if state == nil || err != nil { + return nil, err + } + if storageRoot := state.GetStorageRoot(address); storageRoot != types.EmptyRootHash && storageRoot != (common.Hash{}) { + id := trie.StorageTrieID(header.Root, crypto.Keccak256Hash(address.Bytes()), storageRoot) + tr, err := trie.NewStateTrie(id, state.Database().TrieDB()) + if err != nil { + return nil, err } + storageTrie = tr + } + // If we have a storageTrie, the account exists and we must update + // the storage root hash and the code hash. + if storageTrie != nil { + storageHash = storageTrie.Hash() + codeHash = state.GetCodeHash(address) + } + // Create the proofs for the storageKeys. + for i, key := range keys { + // Output key encoding is a bit special: if the input was a 32-byte hash, it is + // returned as such. Otherwise, we apply the QUANTITY encoding mandated by the + // JSON-RPC spec for getProof. This behavior exists to preserve backwards + // compatibility with older client versions. + var outputKey string + if keyLengths[i] != 32 { + outputKey = hexutil.EncodeBig(key.Big()) + } else { + outputKey = hexutil.Encode(key[:]) + } + + if storageTrie == nil { + storageProof[i] = StorageResult{outputKey, &hexutil.Big{}, []string{}} + continue + } + var proof proofList + if err := storageTrie.Prove(crypto.Keccak256(key.Bytes()), &proof); err != nil { + return nil, err + } + value := (*hexutil.Big)(state.GetState(address, key).Big()) + storageProof[i] = StorageResult{outputKey, value, proof} } - // create the accountProof - accountProof, proofErr := state.GetProof(address) - if proofErr != nil { - return nil, proofErr + // Create the accountProof. + tr, err := trie.NewStateTrie(trie.StateTrieID(header.Root), state.Database().TrieDB()) + if err != nil { + return nil, err + } + var accountProof proofList + if err := tr.Prove(crypto.Keccak256(address.Bytes()), &accountProof); err != nil { + return nil, err } - return &AccountResult{ Address: address, - AccountProof: toHexSlice(accountProof), + AccountProof: accountProof, Balance: (*hexutil.Big)(state.GetBalance(address)), CodeHash: codeHash, Nonce: hexutil.Uint64(state.GetNonce(address)), @@ -713,7 +753,7 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st // decodeHash parses a hex-encoded 32-byte hash. The input may optionally // be prefixed by 0x and can have a byte length up to 32. -func decodeHash(s string) (common.Hash, error) { +func decodeHash(s string) (h common.Hash, inputLength int, err error) { if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") { s = s[2:] } @@ -722,17 +762,19 @@ func decodeHash(s string) (common.Hash, error) { } b, err := hex.DecodeString(s) if err != nil { - return common.Hash{}, errors.New("hex string invalid") + return common.Hash{}, 0, errors.New("hex string invalid") } if len(b) > 32 { - return common.Hash{}, errors.New("hex string too long, want at most 32 bytes") + return common.Hash{}, len(b), errors.New("hex string too long, want at most 32 bytes") } - return common.BytesToHash(b), nil + return common.BytesToHash(b), len(b), nil } // GetHeaderByNumber returns the requested canonical block header. -// * When blockNr is -1 the chain head is returned. -// * When blockNr is -2 the pending chain head is returned. +// - When blockNr is -1 the chain pending header is returned. +// - When blockNr is -2 the chain latest header is returned. +// - When blockNr is -3 the chain finalized header is returned. +// - When blockNr is -4 the chain safe header is returned. func (s *BlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) { header, err := s.b.HeaderByNumber(ctx, number) if header != nil && err == nil { @@ -758,8 +800,10 @@ func (s *BlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) m } // GetBlockByNumber returns the requested canonical block. -// - When blockNr is -1 the chain head is returned. -// - When blockNr is -2 the pending chain head is returned. +// - When blockNr is -1 the chain pending block is returned. +// - When blockNr is -2 the chain latest block is returned. +// - When blockNr is -3 the chain finalized block is returned. +// - When blockNr is -4 the chain safe block is returned. // - When fullTx is true all transactions in the block are returned, otherwise // only the transaction hash is returned. func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { @@ -853,7 +897,7 @@ func (s *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Address if state == nil || err != nil { return nil, err } - key, err := decodeHash(hexKey) + key, _, err := decodeHash(hexKey) if err != nil { return nil, fmt.Errorf("unable to decode storage key: %s", err) } @@ -861,6 +905,34 @@ func (s *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Address return res[:], state.Error() } +// GetBlockReceipts returns the block receipts for the given block hash or number or tag. +func (s *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]map[string]interface{}, error) { + block, err := s.b.BlockByNumberOrHash(ctx, blockNrOrHash) + if block == nil || err != nil { + // When the block doesn't exist, the RPC method should return JSON null + // as per specification. + return nil, nil + } + receipts, err := s.b.GetReceipts(ctx, block.Hash()) + if err != nil { + return nil, err + } + txs := block.Transactions() + if len(txs) != len(receipts) { + return nil, fmt.Errorf("receipts length mismatch: %d vs %d", len(txs), len(receipts)) + } + + // Derive the sender. + signer := types.MakeSigner(s.b.ChainConfig(), block.Number(), block.Time()) + + result := make([]map[string]interface{}, len(receipts)) + for i, receipt := range receipts { + result[i] = marshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i) + } + + return result, nil +} + // OverrideAccount indicates the overriding fields of account during the execution // of a message call. // Note, state and stateDiff can't be specified at the same time. If state is @@ -988,13 +1060,7 @@ func (context *ChainContext) GetHeader(hash common.Hash, number uint64) *types.H return header } -func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { - defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) - - state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) - if state == nil || err != nil { - return nil, err - } +func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { if err := overrides.Apply(state); err != nil { return nil, err } @@ -1045,6 +1111,17 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash return result, nil } +func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { + defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) + + state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) + if state == nil || err != nil { + return nil, err + } + + return doCall(ctx, b, args, state, header, overrides, blockOverrides, timeout, globalGasCap) +} + func newRevertError(result *core.ExecutionResult) *revertError { reason, errUnpack := abi.UnpackRevert(result.Revert()) err := errors.New("execution reverted") @@ -1093,12 +1170,30 @@ func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrO return result.Return(), result.Err } -func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, gasCap uint64) (hexutil.Uint64, error) { - // Binary search the gas requirement, as it may be higher than the amount used +// executeEstimate is a helper that executes the transaction under a given gas limit and returns +// true if the transaction fails for a reason that might be related to not enough gas. A non-nil +// error means execution failed due to reasons unrelated to the gas limit. +func executeEstimate(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, gasCap uint64, gasLimit uint64) (bool, *core.ExecutionResult, error) { + args.Gas = (*hexutil.Uint64)(&gasLimit) + result, err := doCall(ctx, b, args, state, header, nil, nil, 0, gasCap) + if err != nil { + if errors.Is(err, core.ErrIntrinsicGas) { + return true, nil, nil // Special case, raise gas limit + } + return true, nil, err // Bail out + } + return result.Failed(), result, nil +} + +// DoEstimateGas returns the lowest possible gas limit that allows the transaction to run +// successfully at block `blockNrOrHash`. It returns error if the transaction would revert, or if +// there are unexpected failures. The gas limit is capped by both `args.Gas` (if non-nil & +// non-zero) and `gasCap` (if non-zero). +func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, gasCap uint64) (hexutil.Uint64, error) { + // Binary search the gas limit, as it may need to be higher than the amount used var ( - lo uint64 = params.TxGas - 1 - hi uint64 - cap uint64 + lo uint64 // lowest-known gas limit where tx execution fails + hi uint64 // lowest-known gas limit where tx execution succeeds ) // Use zero address if sender unspecified. if args.From == nil { @@ -1129,12 +1224,17 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr } else { feeCap = common.Big0 } + + state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) + if state == nil || err != nil { + return 0, err + } + if err := overrides.Apply(state); err != nil { + return 0, err + } + // Recap the highest gas limit with account's available balance. if feeCap.BitLen() != 0 { - state, _, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) - if err != nil { - return 0, err - } balance := state.GetBalance(*args.From) // from can't be nil available := new(big.Int).Set(balance) if args.Value != nil { @@ -1161,30 +1261,42 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", gasCap) hi = gasCap } - cap = hi - // Create a helper to check if a gas allowance results in an executable transaction - executable := func(gas uint64) (bool, *core.ExecutionResult, error) { - args.Gas = (*hexutil.Uint64)(&gas) - - result, err := DoCall(ctx, b, args, blockNrOrHash, nil, nil, 0, gasCap) - if err != nil { - if errors.Is(err, core.ErrIntrinsicGas) { - return true, nil, nil // Special case, raise gas limit - } - return true, nil, err // Bail out - } - return result.Failed(), result, nil + // We first execute the transaction at the highest allowable gas limit, since if this fails we + // can return error immediately. + failed, result, err := executeEstimate(ctx, b, args, state.Copy(), header, gasCap, hi) + if err != nil { + return 0, err } - // Execute the binary search and hone in on an executable gas limit + if failed { + if result != nil && result.Err != vm.ErrOutOfGas { + if len(result.Revert()) > 0 { + return 0, newRevertError(result) + } + return 0, result.Err + } + return 0, fmt.Errorf("gas required exceeds allowance (%d)", hi) + } + // For almost any transaction, the gas consumed by the unconstrained execution above + // lower-bounds the gas limit required for it to succeed. One exception is those txs that + // explicitly check gas remaining in order to successfully execute within a given limit, but we + // probably don't want to return a lowest possible gas limit for these cases anyway. + lo = result.UsedGas - 1 + + // Binary search for the smallest gas limit that allows the tx to execute successfully. for lo+1 < hi { mid := (hi + lo) / 2 - failed, _, err := executable(mid) - - // If the error is not nil(consensus error), it means the provided message - // call or transaction will never be accepted no matter how much gas it is - // assigned. Return the error directly, don't struggle any more. + if mid > lo*2 { + // Most txs don't need much higher gas limit than their gas used, and most txs don't + // require near the full block limit of gas, so the selection of where to bisect the + // range here is skewed to favor the low side. + mid = lo * 2 + } + failed, _, err = executeEstimate(ctx, b, args, state.Copy(), header, gasCap, mid) if err != nil { + // This should not happen under normal conditions since if we make it this far the + // transaction had run without error at least once before. + log.Error("execution error in estimate gas", "err", err) return 0, err } if failed { @@ -1193,34 +1305,20 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr hi = mid } } - // Reject the transaction as invalid if it still fails at the highest allowance - if hi == cap { - failed, result, err := executable(hi) - if err != nil { - return 0, err - } - if failed { - if result != nil && result.Err != vm.ErrOutOfGas { - if len(result.Revert()) > 0 { - return 0, newRevertError(result) - } - return 0, result.Err - } - // Otherwise, the specified gas cap is too low - return 0, fmt.Errorf("gas required exceeds allowance (%d)", cap) - } - } return hexutil.Uint64(hi), nil } -// EstimateGas returns an estimate of the amount of gas needed to execute the -// given transaction against the current pending block. -func (s *BlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) { +// EstimateGas returns the lowest possible gas limit that allows the transaction to run +// successfully at block `blockNrOrHash`, or the latest block if `blockNrOrHash` is unspecified. It +// returns error if the transaction would revert or if there are unexpected failures. The returned +// value is capped by both `args.Gas` (if non-nil & non-zero) and the backend's RPCGasCap +// configuration (if non-zero). +func (s *BlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Uint64, error) { bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) if blockNrOrHash != nil { bNrOrHash = *blockNrOrHash } - return DoEstimateGas(ctx, s.b, args, bNrOrHash, s.b.RPCGasCap()) + return DoEstimateGas(ctx, s.b, args, bNrOrHash, overrides, s.b.RPCGasCap()) } // RPCMarshalHeader converts the given header to the RPC output . @@ -1237,29 +1335,34 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} { "miner": head.Coinbase, "difficulty": (*hexutil.Big)(head.Difficulty), "extraData": hexutil.Bytes(head.Extra), - "size": hexutil.Uint64(head.Size()), "gasLimit": hexutil.Uint64(head.GasLimit), "gasUsed": hexutil.Uint64(head.GasUsed), "timestamp": hexutil.Uint64(head.Time), "transactionsRoot": head.TxHash, "receiptsRoot": head.ReceiptHash, } - if head.BaseFee != nil { result["baseFeePerGas"] = (*hexutil.Big)(head.BaseFee) } - if head.WithdrawalsHash != nil { result["withdrawalsRoot"] = head.WithdrawalsHash } - + if head.BlobGasUsed != nil { + result["blobGasUsed"] = hexutil.Uint64(*head.BlobGasUsed) + } + if head.ExcessBlobGas != nil { + result["excessBlobGas"] = hexutil.Uint64(*head.ExcessBlobGas) + } + if head.ParentBeaconRoot != nil { + result["parentBeaconBlockRoot"] = head.ParentBeaconRoot + } return result } // RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // transaction hashes. -func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig) (map[string]interface{}, error) { +func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig) map[string]interface{} { fields := RPCMarshalHeader(block.Header()) fields["size"] = hexutil.Uint64(block.Size()) @@ -1288,7 +1391,7 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param if block.Header().WithdrawalsHash != nil { fields["withdrawals"] = block.Withdrawals() } - return fields, nil + return fields } // rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field, which requires @@ -1302,37 +1405,37 @@ func (s *BlockChainAPI) rpcMarshalHeader(ctx context.Context, header *types.Head // rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field, which requires // a `BlockchainAPI`. func (s *BlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { - fields, err := RPCMarshalBlock(b, inclTx, fullTx, s.b.ChainConfig()) - if err != nil { - return nil, err - } + fields := RPCMarshalBlock(b, inclTx, fullTx, s.b.ChainConfig()) if inclTx { fields["totalDifficulty"] = (*hexutil.Big)(s.b.GetTd(ctx, b.Hash())) } - return fields, err + return fields, nil } // RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction type RPCTransaction struct { - BlockHash *common.Hash `json:"blockHash"` - BlockNumber *hexutil.Big `json:"blockNumber"` - From common.Address `json:"from"` - Gas hexutil.Uint64 `json:"gas"` - GasPrice *hexutil.Big `json:"gasPrice"` - GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"` - GasTipCap *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"` - Hash common.Hash `json:"hash"` - Input hexutil.Bytes `json:"input"` - Nonce hexutil.Uint64 `json:"nonce"` - To *common.Address `json:"to"` - TransactionIndex *hexutil.Uint64 `json:"transactionIndex"` - Value *hexutil.Big `json:"value"` - Type hexutil.Uint64 `json:"type"` - Accesses *types.AccessList `json:"accessList,omitempty"` - ChainID *hexutil.Big `json:"chainId,omitempty"` - V *hexutil.Big `json:"v"` - R *hexutil.Big `json:"r"` - S *hexutil.Big `json:"s"` + BlockHash *common.Hash `json:"blockHash"` + BlockNumber *hexutil.Big `json:"blockNumber"` + From common.Address `json:"from"` + Gas hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice"` + GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"` + GasTipCap *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"` + MaxFeePerBlobGas *hexutil.Big `json:"maxFeePerBlobGas,omitempty"` + Hash common.Hash `json:"hash"` + Input hexutil.Bytes `json:"input"` + Nonce hexutil.Uint64 `json:"nonce"` + To *common.Address `json:"to"` + TransactionIndex *hexutil.Uint64 `json:"transactionIndex"` + Value *hexutil.Big `json:"value"` + Type hexutil.Uint64 `json:"type"` + Accesses *types.AccessList `json:"accessList,omitempty"` + ChainID *hexutil.Big `json:"chainId,omitempty"` + BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"` + V *hexutil.Big `json:"v"` + R *hexutil.Big `json:"r"` + S *hexutil.Big `json:"s"` + YParity *hexutil.Uint64 `json:"yParity,omitempty"` } // newRPCTransaction returns a transaction that will serialize to the RPC @@ -1360,34 +1463,69 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) result.TransactionIndex = (*hexutil.Uint64)(&index) } + switch tx.Type() { case types.LegacyTxType: // if a legacy transaction has an EIP-155 chain id, include it explicitly if id := tx.ChainId(); id.Sign() != 0 { result.ChainID = (*hexutil.Big)(id) } + case types.AccessListTxType: al := tx.AccessList() + yparity := hexutil.Uint64(v.Sign()) result.Accesses = &al result.ChainID = (*hexutil.Big)(tx.ChainId()) + result.YParity = &yparity + case types.DynamicFeeTxType: al := tx.AccessList() + yparity := hexutil.Uint64(v.Sign()) result.Accesses = &al result.ChainID = (*hexutil.Big)(tx.ChainId()) + result.YParity = &yparity result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap()) result.GasTipCap = (*hexutil.Big)(tx.GasTipCap()) // if the transaction has been mined, compute the effective gas price if baseFee != nil && blockHash != (common.Hash{}) { - // price = min(tip, gasFeeCap - baseFee) + baseFee - price := math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee), tx.GasFeeCap()) - result.GasPrice = (*hexutil.Big)(price) + // price = min(gasTipCap + baseFee, gasFeeCap) + result.GasPrice = (*hexutil.Big)(effectiveGasPrice(tx, baseFee)) } else { result.GasPrice = (*hexutil.Big)(tx.GasFeeCap()) } + + case types.BlobTxType: + al := tx.AccessList() + yparity := hexutil.Uint64(v.Sign()) + result.Accesses = &al + result.ChainID = (*hexutil.Big)(tx.ChainId()) + result.YParity = &yparity + result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap()) + result.GasTipCap = (*hexutil.Big)(tx.GasTipCap()) + // if the transaction has been mined, compute the effective gas price + if baseFee != nil && blockHash != (common.Hash{}) { + result.GasPrice = (*hexutil.Big)(effectiveGasPrice(tx, baseFee)) + } else { + result.GasPrice = (*hexutil.Big)(tx.GasFeeCap()) + } + result.MaxFeePerBlobGas = (*hexutil.Big)(tx.BlobGasFeeCap()) + result.BlobVersionedHashes = tx.BlobHashes() } return result } +// effectiveGasPrice computes the transaction gas fee, based on the given basefee value. +// +// price = min(gasTipCap + baseFee, gasFeeCap) +func effectiveGasPrice(tx *types.Transaction, baseFee *big.Int) *big.Int { + fee := tx.GasTipCap() + fee = fee.Add(fee, baseFee) + if tx.GasTipCapIntCmp(fee) < 0 { + return tx.GasTipCap() + } + return fee +} + // NewRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation func NewRPCPendingTransaction(tx *types.Transaction, current *types.Header, config *params.ChainConfig) *RPCTransaction { var ( @@ -1396,7 +1534,7 @@ func NewRPCPendingTransaction(tx *types.Transaction, current *types.Header, conf blockTime = uint64(0) ) if current != nil { - baseFee = misc.CalcBaseFee(config, current) + baseFee = eip1559.CalcBaseFee(config, current) blockNumber = current.Number.Uint64() blockTime = current.Time } @@ -1639,7 +1777,7 @@ func (s *TransactionAPI) GetRawTransactionByHash(ctx context.Context, hash commo // GetTransactionReceipt returns the transaction receipt for the given transaction hash. func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) { tx, blockHash, blockNumber, index, err := s.b.GetTransaction(ctx, hash) - if err != nil { + if tx == nil || err != nil { // When the transaction doesn't exist, the RPC method should return JSON null // as per specification. return nil, nil @@ -1659,13 +1797,18 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common. // Derive the sender. signer := types.MakeSigner(s.b.ChainConfig(), header.Number, header.Time) + return marshalReceipt(receipt, blockHash, blockNumber, signer, tx, int(index)), nil +} + +// marshalReceipt marshals a transaction receipt into a JSON object. +func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber uint64, signer types.Signer, tx *types.Transaction, txIndex int) map[string]interface{} { from, _ := types.Sender(signer, tx) fields := map[string]interface{}{ "blockHash": blockHash, "blockNumber": hexutil.Uint64(blockNumber), - "transactionHash": hash, - "transactionIndex": hexutil.Uint64(index), + "transactionHash": tx.Hash(), + "transactionIndex": hexutil.Uint64(txIndex), "from": from, "to": tx.To(), "gasUsed": hexutil.Uint64(receipt.GasUsed), @@ -1687,11 +1830,16 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common. fields["logs"] = []*types.Log{} } + if tx.Type() == types.BlobTxType { + fields["blobGasUsed"] = hexutil.Uint64(receipt.BlobGasUsed) + fields["blobGasPrice"] = (*hexutil.Big)(receipt.BlobGasPrice) + } + // If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation if receipt.ContractAddress != (common.Address{}) { fields["contractAddress"] = receipt.ContractAddress } - return fields, nil + return fields } // sign is a helper function that signs a transaction with the private key of the given address. @@ -2105,12 +2253,3 @@ func checkTxFee(gasPrice *big.Int, gas uint64, cap float64) error { } return nil } - -// toHexSlice creates a slice of hex-strings based on []byte. -func toHexSlice(b [][]byte) []string { - r := make([]string, len(b)) - for i := range b { - r[i] = hexutil.Encode(b[i]) - } - return r -} diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 61024900b..fc135c377 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -17,15 +17,15 @@ package ethapi import ( - "bytes" "context" "crypto/ecdsa" "encoding/json" "errors" - "hash" + "fmt" "math/big" + "os" + "path/filepath" "reflect" - "sort" "testing" "time" @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/bloombits" @@ -44,22 +45,24 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/internal/blocktest" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" - "golang.org/x/crypto/sha3" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" ) -func TestTransaction_RoundTripRpcJSON(t *testing.T) { +func testTransactionMarshal(t *testing.T, tests []txData, config *params.ChainConfig) { + t.Parallel() var ( - config = params.AllEthashProtocolChanges signer = types.LatestSigner(config) key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - tests = allTransactionTypes(common.Address{0xde, 0xad}, config) ) - t.Parallel() + for i, tt := range tests { var tx2 types.Transaction - tx, err := types.SignNewTx(key, signer, tt) + tx, err := types.SignNewTx(key, signer, tt.Tx) if err != nil { t.Fatalf("test %d: signing failed: %v", i, err) } @@ -72,7 +75,7 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) { t.Fatalf("test %d: stx changed, want %x have %x", i, want, have) } - // rpcTransaction + // rpcTransaction rpcTx := newRPCTransaction(tx, common.Hash{}, 0, 0, 0, nil, config) if data, err := json.Marshal(rpcTx); err != nil { t.Fatalf("test %d: marshalling failed; %v", i, err) @@ -80,117 +83,334 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) { t.Fatalf("test %d: unmarshal failed: %v", i, err) } else if want, have := tx.Hash(), tx2.Hash(); want != have { t.Fatalf("test %d: tx changed, want %x have %x", i, want, have) + } else { + want, have := tt.Want, string(data) + require.JSONEqf(t, want, have, "test %d: rpc json not match, want %s have %s", i, want, have) } } } -func allTransactionTypes(addr common.Address, config *params.ChainConfig) []types.TxData { - return []types.TxData{ - &types.LegacyTx{ - Nonce: 5, - GasPrice: big.NewInt(6), - Gas: 7, - To: &addr, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - V: big.NewInt(9), - R: big.NewInt(10), - S: big.NewInt(11), - }, - &types.LegacyTx{ - Nonce: 5, - GasPrice: big.NewInt(6), - Gas: 7, - To: nil, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), - }, - &types.AccessListTx{ - ChainID: config.ChainID, - Nonce: 5, - GasPrice: big.NewInt(6), - Gas: 7, - To: &addr, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - AccessList: types.AccessList{ - types.AccessTuple{ - Address: common.Address{0x2}, - StorageKeys: []common.Hash{types.EmptyRootHash}, - }, +func TestTransaction_RoundTripRpcJSON(t *testing.T) { + var ( + config = params.AllEthashProtocolChanges + tests = allTransactionTypes(common.Address{0xde, 0xad}, config) + ) + testTransactionMarshal(t, tests, config) +} + +func TestTransactionBlobTx(t *testing.T) { + config := *params.TestChainConfig + config.ShanghaiTime = new(uint64) + config.CancunTime = new(uint64) + tests := allBlobTxs(common.Address{0xde, 0xad}, &config) + + testTransactionMarshal(t, tests, &config) +} + +type txData struct { + Tx types.TxData + Want string +} + +func allTransactionTypes(addr common.Address, config *params.ChainConfig) []txData { + return []txData{ + { + Tx: &types.LegacyTx{ + Nonce: 5, + GasPrice: big.NewInt(6), + Gas: 7, + To: &addr, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + V: big.NewInt(9), + R: big.NewInt(10), + S: big.NewInt(11), }, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), - }, - &types.AccessListTx{ - ChainID: config.ChainID, - Nonce: 5, - GasPrice: big.NewInt(6), - Gas: 7, - To: nil, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - AccessList: types.AccessList{ - types.AccessTuple{ - Address: common.Address{0x2}, - StorageKeys: []common.Hash{types.EmptyRootHash}, - }, + Want: `{ + "blockHash": null, + "blockNumber": null, + "from": "0x71562b71999873db5b286df957af199ec94617f7", + "gas": "0x7", + "gasPrice": "0x6", + "hash": "0x5f3240454cd09a5d8b1c5d651eefae7a339262875bcd2d0e6676f3d989967008", + "input": "0x0001020304", + "nonce": "0x5", + "to": "0xdead000000000000000000000000000000000000", + "transactionIndex": null, + "value": "0x8", + "type": "0x0", + "chainId": "0x539", + "v": "0xa96", + "r": "0xbc85e96592b95f7160825d837abb407f009df9ebe8f1b9158a4b8dd093377f75", + "s": "0x1b55ea3af5574c536967b039ba6999ef6c89cf22fc04bcb296e0e8b0b9b576f5" + }`, + }, { + Tx: &types.LegacyTx{ + Nonce: 5, + GasPrice: big.NewInt(6), + Gas: 7, + To: nil, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), }, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), + Want: `{ + "blockHash": null, + "blockNumber": null, + "from": "0x71562b71999873db5b286df957af199ec94617f7", + "gas": "0x7", + "gasPrice": "0x6", + "hash": "0x806e97f9d712b6cb7e781122001380a2837531b0fc1e5f5d78174ad4cb699873", + "input": "0x0001020304", + "nonce": "0x5", + "to": null, + "transactionIndex": null, + "value": "0x8", + "type": "0x0", + "chainId": "0x539", + "v": "0xa96", + "r": "0x9dc28b267b6ad4e4af6fe9289668f9305c2eb7a3241567860699e478af06835a", + "s": "0xa0b51a071aa9bed2cd70aedea859779dff039e3630ea38497d95202e9b1fec7" + }`, }, - &types.DynamicFeeTx{ - ChainID: config.ChainID, - Nonce: 5, - GasTipCap: big.NewInt(6), - GasFeeCap: big.NewInt(9), - Gas: 7, - To: &addr, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - AccessList: types.AccessList{ - types.AccessTuple{ - Address: common.Address{0x2}, - StorageKeys: []common.Hash{types.EmptyRootHash}, + { + Tx: &types.AccessListTx{ + ChainID: config.ChainID, + Nonce: 5, + GasPrice: big.NewInt(6), + Gas: 7, + To: &addr, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + AccessList: types.AccessList{ + types.AccessTuple{ + Address: common.Address{0x2}, + StorageKeys: []common.Hash{types.EmptyRootHash}, + }, }, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), }, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), + Want: `{ + "blockHash": null, + "blockNumber": null, + "from": "0x71562b71999873db5b286df957af199ec94617f7", + "gas": "0x7", + "gasPrice": "0x6", + "hash": "0x121347468ee5fe0a29f02b49b4ffd1c8342bc4255146bb686cd07117f79e7129", + "input": "0x0001020304", + "nonce": "0x5", + "to": "0xdead000000000000000000000000000000000000", + "transactionIndex": null, + "value": "0x8", + "type": "0x1", + "accessList": [ + { + "address": "0x0200000000000000000000000000000000000000", + "storageKeys": [ + "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + ] + } + ], + "chainId": "0x539", + "v": "0x0", + "r": "0xf372ad499239ae11d91d34c559ffc5dab4daffc0069e03afcabdcdf231a0c16b", + "s": "0x28573161d1f9472fa0fd4752533609e72f06414f7ab5588699a7141f65d2abf", + "yParity": "0x0" + }`, + }, { + Tx: &types.AccessListTx{ + ChainID: config.ChainID, + Nonce: 5, + GasPrice: big.NewInt(6), + Gas: 7, + To: nil, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + AccessList: types.AccessList{ + types.AccessTuple{ + Address: common.Address{0x2}, + StorageKeys: []common.Hash{types.EmptyRootHash}, + }, + }, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), + }, + Want: `{ + "blockHash": null, + "blockNumber": null, + "from": "0x71562b71999873db5b286df957af199ec94617f7", + "gas": "0x7", + "gasPrice": "0x6", + "hash": "0x067c3baebede8027b0f828a9d933be545f7caaec623b00684ac0659726e2055b", + "input": "0x0001020304", + "nonce": "0x5", + "to": null, + "transactionIndex": null, + "value": "0x8", + "type": "0x1", + "accessList": [ + { + "address": "0x0200000000000000000000000000000000000000", + "storageKeys": [ + "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + ] + } + ], + "chainId": "0x539", + "v": "0x1", + "r": "0x542981b5130d4613897fbab144796cb36d3cb3d7807d47d9c7f89ca7745b085c", + "s": "0x7425b9dd6c5deaa42e4ede35d0c4570c4624f68c28d812c10d806ffdf86ce63", + "yParity": "0x1" + }`, + }, { + Tx: &types.DynamicFeeTx{ + ChainID: config.ChainID, + Nonce: 5, + GasTipCap: big.NewInt(6), + GasFeeCap: big.NewInt(9), + Gas: 7, + To: &addr, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + AccessList: types.AccessList{ + types.AccessTuple{ + Address: common.Address{0x2}, + StorageKeys: []common.Hash{types.EmptyRootHash}, + }, + }, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), + }, + Want: `{ + "blockHash": null, + "blockNumber": null, + "from": "0x71562b71999873db5b286df957af199ec94617f7", + "gas": "0x7", + "gasPrice": "0x9", + "maxFeePerGas": "0x9", + "maxPriorityFeePerGas": "0x6", + "hash": "0xb63e0b146b34c3e9cb7fbabb5b3c081254a7ded6f1b65324b5898cc0545d79ff", + "input": "0x0001020304", + "nonce": "0x5", + "to": "0xdead000000000000000000000000000000000000", + "transactionIndex": null, + "value": "0x8", + "type": "0x2", + "accessList": [ + { + "address": "0x0200000000000000000000000000000000000000", + "storageKeys": [ + "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + ] + } + ], + "chainId": "0x539", + "v": "0x1", + "r": "0x3b167e05418a8932cd53d7578711fe1a76b9b96c48642402bb94978b7a107e80", + "s": "0x22f98a332d15ea2cc80386c1ebaa31b0afebfa79ebc7d039a1e0074418301fef", + "yParity": "0x1" + }`, + }, { + Tx: &types.DynamicFeeTx{ + ChainID: config.ChainID, + Nonce: 5, + GasTipCap: big.NewInt(6), + GasFeeCap: big.NewInt(9), + Gas: 7, + To: nil, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + AccessList: types.AccessList{}, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), + }, + Want: `{ + "blockHash": null, + "blockNumber": null, + "from": "0x71562b71999873db5b286df957af199ec94617f7", + "gas": "0x7", + "gasPrice": "0x9", + "maxFeePerGas": "0x9", + "maxPriorityFeePerGas": "0x6", + "hash": "0xcbab17ee031a9d5b5a09dff909f0a28aedb9b295ac0635d8710d11c7b806ec68", + "input": "0x0001020304", + "nonce": "0x5", + "to": null, + "transactionIndex": null, + "value": "0x8", + "type": "0x2", + "accessList": [], + "chainId": "0x539", + "v": "0x0", + "r": "0x6446b8a682db7e619fc6b4f6d1f708f6a17351a41c7fbd63665f469bc78b41b9", + "s": "0x7626abc15834f391a117c63450047309dbf84c5ce3e8e609b607062641e2de43", + "yParity": "0x0" + }`, }, - &types.DynamicFeeTx{ - ChainID: config.ChainID, - Nonce: 5, - GasTipCap: big.NewInt(6), - GasFeeCap: big.NewInt(9), - Gas: 7, - To: nil, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - AccessList: types.AccessList{}, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), + } +} + +func allBlobTxs(addr common.Address, config *params.ChainConfig) []txData { + return []txData{ + { + Tx: &types.BlobTx{ + Nonce: 6, + GasTipCap: uint256.NewInt(1), + GasFeeCap: uint256.NewInt(5), + Gas: 6, + To: addr, + BlobFeeCap: uint256.NewInt(1), + BlobHashes: []common.Hash{{1}}, + Value: new(uint256.Int), + V: uint256.NewInt(32), + R: uint256.NewInt(10), + S: uint256.NewInt(11), + }, + Want: `{ + "blockHash": null, + "blockNumber": null, + "from": "0x71562b71999873db5b286df957af199ec94617f7", + "gas": "0x6", + "gasPrice": "0x5", + "maxFeePerGas": "0x5", + "maxPriorityFeePerGas": "0x1", + "maxFeePerBlobGas": "0x1", + "hash": "0x1f2b59a20e61efc615ad0cbe936379d6bbea6f938aafaf35eb1da05d8e7f46a3", + "input": "0x", + "nonce": "0x6", + "to": "0xdead000000000000000000000000000000000000", + "transactionIndex": null, + "value": "0x0", + "type": "0x3", + "accessList": [], + "chainId": "0x1", + "blobVersionedHashes": [ + "0x0100000000000000000000000000000000000000000000000000000000000000" + ], + "v": "0x0", + "r": "0x618be8908e0e5320f8f3b48042a079fe5a335ebd4ed1422a7d2207cd45d872bc", + "s": "0x27b2bc6c80e849a8e8b764d4549d8c2efac3441e73cf37054eb0a9b9f8e89b27", + "yParity": "0x0" + }`, }, } } type testBackend struct { - db ethdb.Database - chain *core.BlockChain + db ethdb.Database + chain *core.BlockChain + pending *types.Block } -func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { +func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.Engine, generator func(i int, b *core.BlockGen)) *testBackend { var ( - engine = ethash.NewFaker() - backend = &testBackend{ - db: rawdb.NewMemoryDatabase(), - } cacheConfig = &core.CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, @@ -200,18 +420,24 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i } ) // Generate blocks for testing - _, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, n, generator) - chain, err := core.NewBlockChain(backend.db, cacheConfig, gspec, nil, engine, vm.Config{}, nil, nil) + db, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, n, generator) + txlookupLimit := uint64(0) + chain, err := core.NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{}, nil, &txlookupLimit) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } if n, err := chain.InsertChain(blocks); err != nil { t.Fatalf("block %d: failed to insert into chain: %v", n, err) } - backend.chain = chain + + backend := &testBackend{db: db, chain: chain} return backend } +func (b *testBackend) setPendingBlock(block *types.Block) { + b.pending = block +} + func (b testBackend) SyncProgress() ethereum.SyncProgress { return ethereum.SyncProgress{} } func (b testBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { return big.NewInt(0), nil @@ -231,31 +457,46 @@ func (b testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) if number == rpc.LatestBlockNumber { return b.chain.CurrentBlock(), nil } + if number == rpc.PendingBlockNumber && b.pending != nil { + return b.pending.Header(), nil + } return b.chain.GetHeaderByNumber(uint64(number)), nil } func (b testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - panic("implement me") + return b.chain.GetHeaderByHash(hash), nil } func (b testBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { - panic("implement me") + if blockNr, ok := blockNrOrHash.Number(); ok { + return b.HeaderByNumber(ctx, blockNr) + } + if blockHash, ok := blockNrOrHash.Hash(); ok { + return b.HeaderByHash(ctx, blockHash) + } + panic("unknown type rpc.BlockNumberOrHash") } -func (b testBackend) CurrentHeader() *types.Header { panic("implement me") } -func (b testBackend) CurrentBlock() *types.Header { panic("implement me") } +func (b testBackend) CurrentHeader() *types.Header { return b.chain.CurrentBlock() } +func (b testBackend) CurrentBlock() *types.Header { return b.chain.CurrentBlock() } func (b testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { if number == rpc.LatestBlockNumber { head := b.chain.CurrentBlock() return b.chain.GetBlock(head.Hash(), head.Number.Uint64()), nil } + if number == rpc.PendingBlockNumber { + return b.pending, nil + } return b.chain.GetBlockByNumber(uint64(number)), nil } func (b testBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - panic("implement me") + return b.chain.GetBlockByHash(hash), nil } func (b testBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) { if blockNr, ok := blockNrOrHash.Number(); ok { return b.BlockByNumber(ctx, blockNr) } - panic("implement me") + if blockHash, ok := blockNrOrHash.Hash(); ok { + return b.BlockByHash(ctx, blockHash) + } + panic("unknown type rpc.BlockNumberOrHash") } func (b testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { return b.chain.GetBlock(hash, uint64(number.Int64())).Body(), nil @@ -282,9 +523,19 @@ func (b testBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOr } func (b testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { panic("implement me") } func (b testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { - panic("implement me") + header, err := b.HeaderByHash(ctx, hash) + if header == nil || err != nil { + return nil, err + } + receipts := rawdb.ReadReceipts(b.db, hash, header.Number.Uint64(), header.Time, b.chain.Config()) + return receipts, nil +} +func (b testBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { + if b.pending != nil && hash == b.pending.Hash() { + return nil + } + return big.NewInt(1) } -func (b testBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { panic("implement me") } func (b testBackend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockContext *vm.BlockContext) (*vm.EVM, func() error) { vmError := func() error { return nil } if vmConfig == nil { @@ -310,7 +561,8 @@ func (b testBackend) SendTx(ctx context.Context, signedTx *types.Transaction) er panic("implement me") } func (b testBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { - panic("implement me") + tx, blockHash, blockNumber, index := rawdb.ReadTransaction(b.db, txHash) + return tx, blockHash, blockNumber, index, nil } func (b testBackend) GetPoolTransactions() (types.Transactions, error) { panic("implement me") } func (b testBackend) GetPoolTransaction(txHash common.Hash) *types.Transaction { panic("implement me") } @@ -318,10 +570,10 @@ func (b testBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uin panic("implement me") } func (b testBackend) Stats() (pending int, queued int) { panic("implement me") } -func (b testBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { +func (b testBackend) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { panic("implement me") } -func (b testBackend) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) { +func (b testBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { panic("implement me") } func (b testBackend) SubscribeNewTxsEvent(events chan<- core.NewTxsEvent) event.Subscription { @@ -362,7 +614,7 @@ func TestEstimateGas(t *testing.T) { signer = types.HomesteadSigner{} randomAccounts = newAccounts(2) ) - api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, ethash.NewFaker(), func(i int, b *core.BlockGen) { // Transfer from account[0] to account[1] // value: 1000 wei // fee: 0 wei @@ -372,6 +624,7 @@ func TestEstimateGas(t *testing.T) { var testSuite = []struct { blockNumber rpc.BlockNumber call TransactionArgs + overrides StateOverride expectErr error want uint64 }{ @@ -404,9 +657,30 @@ func TestEstimateGas(t *testing.T) { expectErr: nil, want: 53000, }, + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{}, + overrides: StateOverride{ + randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(params.Ether)))}, + }, + expectErr: nil, + want: 53000, + }, + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{ + From: &randomAccounts[0].addr, + To: &randomAccounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + overrides: StateOverride{ + randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(big.NewInt(0))}, + }, + expectErr: core.ErrInsufficientFunds, + }, } for i, tc := range testSuite { - result, err := api.EstimateGas(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}) + result, err := api.EstimateGas(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides) if tc.expectErr != nil { if err == nil { t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr) @@ -443,7 +717,7 @@ func TestCall(t *testing.T) { genBlocks = 10 signer = types.HomesteadSigner{} ) - api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, ethash.NewFaker(), func(i int, b *core.BlockGen) { // Transfer from account[0] to account[1] // value: 1000 wei // fee: 0 wei @@ -601,19 +875,13 @@ type Account struct { addr common.Address } -type Accounts []Account - -func (a Accounts) Len() int { return len(a) } -func (a Accounts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 } - -func newAccounts(n int) (accounts Accounts) { +func newAccounts(n int) (accounts []Account) { for i := 0; i < n; i++ { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) accounts = append(accounts, Account{key: key, addr: addr}) } - sort.Sort(accounts) + slices.SortFunc(accounts, func(a, b Account) int { return a.addr.Cmp(b.addr) }) return accounts } @@ -627,32 +895,8 @@ func hex2Bytes(str string) *hexutil.Bytes { return &rpcBytes } -// testHasher is the helper tool for transaction/receipt list hashing. -// The original hasher is trie, in order to get rid of import cycle, -// use the testing hasher instead. -type testHasher struct { - hasher hash.Hash -} - -func newHasher() *testHasher { - return &testHasher{hasher: sha3.NewLegacyKeccak256()} -} - -func (h *testHasher) Reset() { - h.hasher.Reset() -} - -func (h *testHasher) Update(key, val []byte) error { - h.hasher.Write(key) - h.hasher.Write(val) - return nil -} - -func (h *testHasher) Hash() common.Hash { - return common.BytesToHash(h.hasher.Sum(nil)) -} - func TestRPCMarshalBlock(t *testing.T) { + t.Parallel() var ( txs []*types.Transaction to = common.BytesToAddress([]byte{0x11}) @@ -681,7 +925,7 @@ func TestRPCMarshalBlock(t *testing.T) { } txs = append(txs, tx) } - block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, newHasher()) + block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, blocktest.NewHasher()) var testSuite = []struct { inclTx bool @@ -692,36 +936,705 @@ func TestRPCMarshalBlock(t *testing.T) { { inclTx: false, fullTx: false, - want: `{"difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x296","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`, + want: `{ + "difficulty": "0x0", + "extraData": "0x", + "gasLimit": "0x0", + "gasUsed": "0x0", + "hash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x64", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x296", + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0x0", + "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e", + "uncles": [] + }`, }, // only tx hashes { inclTx: true, fullTx: false, - want: `{"difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x296","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactions":["0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605","0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4","0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5","0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1"],"transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`, + want: `{ + "difficulty": "0x0", + "extraData": "0x", + "gasLimit": "0x0", + "gasUsed": "0x0", + "hash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x64", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x296", + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0x0", + "transactions": [ + "0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605", + "0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4", + "0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5", + "0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1" + ], + "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e", + "uncles": [] + }`, }, - // full tx details { inclTx: true, fullTx: true, - want: `{"difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x296","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactions":[{"blockHash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605","input":"0x111111","nonce":"0x1","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x0","value":"0x6f","type":"0x1","accessList":[],"chainId":"0x539","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4","input":"0x111111","nonce":"0x2","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x1","value":"0x6f","type":"0x0","chainId":"0x7fffffffffffffee","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5","input":"0x111111","nonce":"0x3","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x2","value":"0x6f","type":"0x1","accessList":[],"chainId":"0x539","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1","input":"0x111111","nonce":"0x4","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x3","value":"0x6f","type":"0x0","chainId":"0x7fffffffffffffee","v":"0x0","r":"0x0","s":"0x0"}],"transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`, + want: `{ + "difficulty": "0x0", + "extraData": "0x", + "gasLimit": "0x0", + "gasUsed": "0x0", + "hash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x64", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x296", + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0x0", + "transactions": [ + { + "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", + "blockNumber": "0x64", + "from": "0x0000000000000000000000000000000000000000", + "gas": "0x457", + "gasPrice": "0x2b67", + "hash": "0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605", + "input": "0x111111", + "nonce": "0x1", + "to": "0x0000000000000000000000000000000000000011", + "transactionIndex": "0x0", + "value": "0x6f", + "type": "0x1", + "accessList": [], + "chainId": "0x539", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "yParity": "0x0" + }, + { + "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", + "blockNumber": "0x64", + "from": "0x0000000000000000000000000000000000000000", + "gas": "0x457", + "gasPrice": "0x2b67", + "hash": "0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4", + "input": "0x111111", + "nonce": "0x2", + "to": "0x0000000000000000000000000000000000000011", + "transactionIndex": "0x1", + "value": "0x6f", + "type": "0x0", + "chainId": "0x7fffffffffffffee", + "v": "0x0", + "r": "0x0", + "s": "0x0" + }, + { + "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", + "blockNumber": "0x64", + "from": "0x0000000000000000000000000000000000000000", + "gas": "0x457", + "gasPrice": "0x2b67", + "hash": "0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5", + "input": "0x111111", + "nonce": "0x3", + "to": "0x0000000000000000000000000000000000000011", + "transactionIndex": "0x2", + "value": "0x6f", + "type": "0x1", + "accessList": [], + "chainId": "0x539", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "yParity": "0x0" + }, + { + "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", + "blockNumber": "0x64", + "from": "0x0000000000000000000000000000000000000000", + "gas": "0x457", + "gasPrice": "0x2b67", + "hash": "0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1", + "input": "0x111111", + "nonce": "0x4", + "to": "0x0000000000000000000000000000000000000011", + "transactionIndex": "0x3", + "value": "0x6f", + "type": "0x0", + "chainId": "0x7fffffffffffffee", + "v": "0x0", + "r": "0x0", + "s": "0x0" + } + ], + "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e", + "uncles": [] + }`, }, } for i, tc := range testSuite { - resp, err := RPCMarshalBlock(block, tc.inclTx, tc.fullTx, params.MainnetChainConfig) - if err != nil { - t.Errorf("test %d: got error %v", i, err) - continue - } + resp := RPCMarshalBlock(block, tc.inclTx, tc.fullTx, params.MainnetChainConfig) out, err := json.Marshal(resp) if err != nil { t.Errorf("test %d: json marshal error: %v", i, err) continue } - if have := string(out); have != tc.want { - t.Errorf("test %d: want: %s have: %s", i, tc.want, have) - } + require.JSONEqf(t, tc.want, string(out), "test %d", i) } } + +func TestRPCGetBlockOrHeader(t *testing.T) { + t.Parallel() + + // Initialize test accounts + var ( + acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey) + acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey) + genesis = &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + acc1Addr: {Balance: big.NewInt(params.Ether)}, + acc2Addr: {Balance: big.NewInt(params.Ether)}, + }, + } + genBlocks = 10 + signer = types.HomesteadSigner{} + tx = types.NewTx(&types.LegacyTx{ + Nonce: 11, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &acc2Addr, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) + withdrawal = &types.Withdrawal{ + Index: 0, + Validator: 1, + Address: common.Address{0x12, 0x34}, + Amount: 10, + } + pending = types.NewBlockWithWithdrawals(&types.Header{Number: big.NewInt(11), Time: 42}, []*types.Transaction{tx}, nil, nil, []*types.Withdrawal{withdrawal}, blocktest.NewHasher()) + ) + backend := newTestBackend(t, genBlocks, genesis, ethash.NewFaker(), func(i int, b *core.BlockGen) { + // Transfer from account[0] to account[1] + // value: 1000 wei + // fee: 0 wei + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, acc1Key) + b.AddTx(tx) + }) + backend.setPendingBlock(pending) + api := NewBlockChainAPI(backend) + blockHashes := make([]common.Hash, genBlocks+1) + ctx := context.Background() + for i := 0; i <= genBlocks; i++ { + header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i)) + if err != nil { + t.Errorf("failed to get block: %d err: %v", i, err) + } + blockHashes[i] = header.Hash() + } + pendingHash := pending.Hash() + + var testSuite = []struct { + blockNumber rpc.BlockNumber + blockHash *common.Hash + fullTx bool + reqHeader bool + file string + expectErr error + }{ + // 0. latest header + { + blockNumber: rpc.LatestBlockNumber, + reqHeader: true, + file: "tag-latest", + }, + // 1. genesis header + { + blockNumber: rpc.BlockNumber(0), + reqHeader: true, + file: "number-0", + }, + // 2. #1 header + { + blockNumber: rpc.BlockNumber(1), + reqHeader: true, + file: "number-1", + }, + // 3. latest-1 header + { + blockNumber: rpc.BlockNumber(9), + reqHeader: true, + file: "number-latest-1", + }, + // 4. latest+1 header + { + blockNumber: rpc.BlockNumber(11), + reqHeader: true, + file: "number-latest+1", + }, + // 5. pending header + { + blockNumber: rpc.PendingBlockNumber, + reqHeader: true, + file: "tag-pending", + }, + // 6. latest block + { + blockNumber: rpc.LatestBlockNumber, + file: "tag-latest", + }, + // 7. genesis block + { + blockNumber: rpc.BlockNumber(0), + file: "number-0", + }, + // 8. #1 block + { + blockNumber: rpc.BlockNumber(1), + file: "number-1", + }, + // 9. latest-1 block + { + blockNumber: rpc.BlockNumber(9), + fullTx: true, + file: "number-latest-1", + }, + // 10. latest+1 block + { + blockNumber: rpc.BlockNumber(11), + fullTx: true, + file: "number-latest+1", + }, + // 11. pending block + { + blockNumber: rpc.PendingBlockNumber, + file: "tag-pending", + }, + // 12. pending block + fullTx + { + blockNumber: rpc.PendingBlockNumber, + fullTx: true, + file: "tag-pending-fullTx", + }, + // 13. latest header by hash + { + blockHash: &blockHashes[len(blockHashes)-1], + reqHeader: true, + file: "hash-latest", + }, + // 14. genesis header by hash + { + blockHash: &blockHashes[0], + reqHeader: true, + file: "hash-0", + }, + // 15. #1 header + { + blockHash: &blockHashes[1], + reqHeader: true, + file: "hash-1", + }, + // 16. latest-1 header + { + blockHash: &blockHashes[len(blockHashes)-2], + reqHeader: true, + file: "hash-latest-1", + }, + // 17. empty hash + { + blockHash: &common.Hash{}, + reqHeader: true, + file: "hash-empty", + }, + // 18. pending hash + { + blockHash: &pendingHash, + reqHeader: true, + file: `hash-pending`, + }, + // 19. latest block + { + blockHash: &blockHashes[len(blockHashes)-1], + file: "hash-latest", + }, + // 20. genesis block + { + blockHash: &blockHashes[0], + file: "hash-genesis", + }, + // 21. #1 block + { + blockHash: &blockHashes[1], + file: "hash-1", + }, + // 22. latest-1 block + { + blockHash: &blockHashes[len(blockHashes)-2], + fullTx: true, + file: "hash-latest-1-fullTx", + }, + // 23. empty hash + body + { + blockHash: &common.Hash{}, + fullTx: true, + file: "hash-empty-fullTx", + }, + // 24. pending block + { + blockHash: &pendingHash, + file: `hash-pending`, + }, + // 25. pending block + fullTx + { + blockHash: &pendingHash, + fullTx: true, + file: "hash-pending-fullTx", + }, + } + + for i, tt := range testSuite { + var ( + result map[string]interface{} + err error + rpc string + ) + if tt.blockHash != nil { + if tt.reqHeader { + result = api.GetHeaderByHash(context.Background(), *tt.blockHash) + rpc = "eth_getHeaderByHash" + } else { + result, err = api.GetBlockByHash(context.Background(), *tt.blockHash, tt.fullTx) + rpc = "eth_getBlockByHash" + } + } else { + if tt.reqHeader { + result, err = api.GetHeaderByNumber(context.Background(), tt.blockNumber) + rpc = "eth_getHeaderByNumber" + } else { + result, err = api.GetBlockByNumber(context.Background(), tt.blockNumber, tt.fullTx) + rpc = "eth_getBlockByNumber" + } + } + if tt.expectErr != nil { + if err == nil { + t.Errorf("test %d: want error %v, have nothing", i, tt.expectErr) + continue + } + if !errors.Is(err, tt.expectErr) { + t.Errorf("test %d: error mismatch, want %v, have %v", i, tt.expectErr, err) + } + continue + } + if err != nil { + t.Errorf("test %d: want no error, have %v", i, err) + continue + } + + testRPCResponseWithFile(t, i, result, rpc, tt.file) + } +} + +func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Hash) { + config := *params.TestChainConfig + config.ShanghaiTime = new(uint64) + config.CancunTime = new(uint64) + var ( + acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey) + acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey) + contract = common.HexToAddress("0000000000000000000000000000000000031ec7") + genesis = &core.Genesis{ + Config: &config, + ExcessBlobGas: new(uint64), + BlobGasUsed: new(uint64), + Alloc: core.GenesisAlloc{ + acc1Addr: {Balance: big.NewInt(params.Ether)}, + acc2Addr: {Balance: big.NewInt(params.Ether)}, + // // SPDX-License-Identifier: GPL-3.0 + // pragma solidity >=0.7.0 <0.9.0; + // + // contract Token { + // event Transfer(address indexed from, address indexed to, uint256 value); + // function transfer(address to, uint256 value) public returns (bool) { + // emit Transfer(msg.sender, to, value); + // return true; + // } + // } + contract: {Balance: big.NewInt(params.Ether), Code: common.FromHex("0x608060405234801561001057600080fd5b506004361061002b5760003560e01c8063a9059cbb14610030575b600080fd5b61004a6004803603810190610045919061016a565b610060565b60405161005791906101c5565b60405180910390f35b60008273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040516100bf91906101ef565b60405180910390a36001905092915050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000610101826100d6565b9050919050565b610111816100f6565b811461011c57600080fd5b50565b60008135905061012e81610108565b92915050565b6000819050919050565b61014781610134565b811461015257600080fd5b50565b6000813590506101648161013e565b92915050565b60008060408385031215610181576101806100d1565b5b600061018f8582860161011f565b92505060206101a085828601610155565b9150509250929050565b60008115159050919050565b6101bf816101aa565b82525050565b60006020820190506101da60008301846101b6565b92915050565b6101e981610134565b82525050565b600060208201905061020460008301846101e0565b9291505056fea2646970667358221220b469033f4b77b9565ee84e0a2f04d496b18160d26034d54f9487e57788fd36d564736f6c63430008120033")}, + }, + } + signer = types.LatestSignerForChainID(params.TestChainConfig.ChainID) + txHashes = make([]common.Hash, genBlocks) + ) + + // Set the terminal total difficulty in the config + genesis.Config.TerminalTotalDifficulty = big.NewInt(0) + genesis.Config.TerminalTotalDifficultyPassed = true + backend := newTestBackend(t, genBlocks, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) { + var ( + tx *types.Transaction + err error + ) + switch i { + case 0: + // transfer 1000wei + tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), types.HomesteadSigner{}, acc1Key) + case 1: + // create contract + tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: nil, Gas: 53100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040")}), signer, acc1Key) + case 2: + // with logs + // transfer(address to, uint256 value) + data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:]) + tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &contract, Gas: 60000, GasPrice: b.BaseFee(), Data: common.FromHex(data)}), signer, acc1Key) + case 3: + // dynamic fee with logs + // transfer(address to, uint256 value) + data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:]) + fee := big.NewInt(500) + fee.Add(fee, b.BaseFee()) + tx, err = types.SignTx(types.NewTx(&types.DynamicFeeTx{Nonce: uint64(i), To: &contract, Gas: 60000, Value: big.NewInt(1), GasTipCap: big.NewInt(500), GasFeeCap: fee, Data: common.FromHex(data)}), signer, acc1Key) + case 4: + // access list with contract create + accessList := types.AccessList{{ + Address: contract, + StorageKeys: []common.Hash{{0}}, + }} + tx, err = types.SignTx(types.NewTx(&types.AccessListTx{Nonce: uint64(i), To: nil, Gas: 58100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040"), AccessList: accessList}), signer, acc1Key) + case 5: + // blob tx + fee := big.NewInt(500) + fee.Add(fee, b.BaseFee()) + tx, err = types.SignTx(types.NewTx(&types.BlobTx{ + Nonce: uint64(i), + GasTipCap: uint256.NewInt(1), + GasFeeCap: uint256.MustFromBig(fee), + Gas: params.TxGas, + To: acc2Addr, + BlobFeeCap: uint256.NewInt(1), + BlobHashes: []common.Hash{{1}}, + Value: new(uint256.Int), + }), signer, acc1Key) + } + if err != nil { + t.Errorf("failed to sign tx: %v", err) + } + if tx != nil { + b.AddTx(tx) + txHashes[i] = tx.Hash() + } + if i == 5 { + b.SetBlobGas(params.BlobTxBlobGasPerBlob) + } + b.SetPoS() + }) + return backend, txHashes +} + +func TestRPCGetTransactionReceipt(t *testing.T) { + t.Parallel() + + var ( + backend, txHashes = setupReceiptBackend(t, 6) + api = NewTransactionAPI(backend, new(AddrLocker)) + ) + + var testSuite = []struct { + txHash common.Hash + file string + }{ + // 0. normal success + { + txHash: txHashes[0], + file: "normal-transfer-tx", + }, + // 1. create contract + { + txHash: txHashes[1], + file: "create-contract-tx", + }, + // 2. with logs success + { + txHash: txHashes[2], + file: "with-logs", + }, + // 3. dynamic tx with logs success + { + txHash: txHashes[3], + file: `dynamic-tx-with-logs`, + }, + // 4. access list tx with create contract + { + txHash: txHashes[4], + file: "create-contract-with-access-list", + }, + // 5. txhash empty + { + txHash: common.Hash{}, + file: "txhash-empty", + }, + // 6. txhash not found + { + txHash: common.HexToHash("deadbeef"), + file: "txhash-notfound", + }, + // 7. blob tx + { + txHash: txHashes[5], + file: "blob-tx", + }, + } + + for i, tt := range testSuite { + var ( + result interface{} + err error + ) + result, err = api.GetTransactionReceipt(context.Background(), tt.txHash) + if err != nil { + t.Errorf("test %d: want no error, have %v", i, err) + continue + } + testRPCResponseWithFile(t, i, result, "eth_getTransactionReceipt", tt.file) + } +} + +func TestRPCGetBlockReceipts(t *testing.T) { + t.Parallel() + + var ( + genBlocks = 6 + backend, _ = setupReceiptBackend(t, genBlocks) + api = NewBlockChainAPI(backend) + ) + blockHashes := make([]common.Hash, genBlocks+1) + ctx := context.Background() + for i := 0; i <= genBlocks; i++ { + header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i)) + if err != nil { + t.Errorf("failed to get block: %d err: %v", i, err) + } + blockHashes[i] = header.Hash() + } + + var testSuite = []struct { + test rpc.BlockNumberOrHash + file string + }{ + // 0. block without any txs(hash) + { + test: rpc.BlockNumberOrHashWithHash(blockHashes[0], false), + file: "number-0", + }, + // 1. block without any txs(number) + { + test: rpc.BlockNumberOrHashWithNumber(0), + file: "number-1", + }, + // 2. earliest tag + { + test: rpc.BlockNumberOrHashWithNumber(rpc.EarliestBlockNumber), + file: "tag-earliest", + }, + // 3. latest tag + { + test: rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), + file: "tag-latest", + }, + // 4. block with legacy transfer tx(hash) + { + test: rpc.BlockNumberOrHashWithHash(blockHashes[1], false), + file: "block-with-legacy-transfer-tx", + }, + // 5. block with contract create tx(number) + { + test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(2)), + file: "block-with-contract-create-tx", + }, + // 6. block with legacy contract call tx(hash) + { + test: rpc.BlockNumberOrHashWithHash(blockHashes[3], false), + file: "block-with-legacy-contract-call-tx", + }, + // 7. block with dynamic fee tx(number) + { + test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(4)), + file: "block-with-dynamic-fee-tx", + }, + // 8. block is empty + { + test: rpc.BlockNumberOrHashWithHash(common.Hash{}, false), + file: "hash-empty", + }, + // 9. block is not found + { + test: rpc.BlockNumberOrHashWithHash(common.HexToHash("deadbeef"), false), + file: "hash-notfound", + }, + // 10. block is not found + { + test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(genBlocks + 1)), + file: "block-notfound", + }, + // 11. block with blob tx + { + test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(6)), + file: "block-with-blob-tx", + }, + } + + for i, tt := range testSuite { + var ( + result interface{} + err error + ) + result, err = api.GetBlockReceipts(context.Background(), tt.test) + if err != nil { + t.Errorf("test %d: want no error, have %v", i, err) + continue + } + testRPCResponseWithFile(t, i, result, "eth_getBlockReceipts", tt.file) + } +} + +func testRPCResponseWithFile(t *testing.T, testid int, result interface{}, rpc string, file string) { + data, err := json.MarshalIndent(result, "", " ") + if err != nil { + t.Errorf("test %d: json marshal error", testid) + return + } + outputFile := filepath.Join("testdata", fmt.Sprintf("%s-%s.json", rpc, file)) + if os.Getenv("WRITE_TEST_FILES") != "" { + os.WriteFile(outputFile, data, 0644) + } + want, err := os.ReadFile(outputFile) + if err != nil { + t.Fatalf("error reading expected test file: %s output: %v", outputFile, err) + } + require.JSONEqf(t, string(want), string(data), "test %d: json not match, want: %s, have: %s", testid, string(want), string(data)) +} diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 918b3b630..458fb811e 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -80,8 +80,8 @@ type Backend interface { GetPoolTransaction(txHash common.Hash) *types.Transaction GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) Stats() (pending int, queued int) - TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) - TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) + TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) + TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription ChainConfig() *params.ChainConfig diff --git a/internal/ethapi/testdata/eth_getBlockByHash-hash-1.json b/internal/ethapi/testdata/eth_getBlockByHash-hash-1.json new file mode 100644 index 000000000..379636d5f --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByHash-hash-1.json @@ -0,0 +1,25 @@ +{ + "baseFeePerGas": "0x342770c0", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x5208", + "hash": "0x0da274b315de8e4d5bf8717218ec43540464ef36378cb896469bb731e1d3f3cb", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x1", + "parentHash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x26a", + "stateRoot": "0x92c5c55a698963f5b06e3aee415630f5c48b0760e537af94917ce9c4f42a2e22", + "timestamp": "0xa", + "totalDifficulty": "0x1", + "transactions": [ + "0x644a31c354391520d00e95b9affbbb010fc79ac268144ab8e28207f4cf51097e" + ], + "transactionsRoot": "0xca0ebcce920d2cdfbf9e1dbe90ed3441a1a576f344bd80e60508da814916f4e7", + "uncles": [] +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByHash-hash-empty-fullTx.json b/internal/ethapi/testdata/eth_getBlockByHash-hash-empty-fullTx.json new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByHash-hash-empty-fullTx.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByHash-hash-genesis.json b/internal/ethapi/testdata/eth_getBlockByHash-hash-genesis.json new file mode 100644 index 000000000..759dbf69e --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByHash-hash-genesis.json @@ -0,0 +1,23 @@ +{ + "baseFeePerGas": "0x3b9aca00", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x0", + "hash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x200", + "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2", + "timestamp": "0x0", + "totalDifficulty": "0x1", + "transactions": [], + "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "uncles": [] +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByHash-hash-latest-1-fullTx.json b/internal/ethapi/testdata/eth_getBlockByHash-hash-latest-1-fullTx.json new file mode 100644 index 000000000..3526da121 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByHash-hash-latest-1-fullTx.json @@ -0,0 +1,41 @@ +{ + "baseFeePerGas": "0x121a9cca", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x5208", + "hash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x9", + "parentHash": "0x5abd19c39d9f1c6e52998e135ea14e1fbc5db3fa2a108f4538e238ca5c2e68d7", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x26a", + "stateRoot": "0xbd4aa2c2873df709151075250a8c01c9a14d2b0e2f715dbdd16e0ef8030c2cf0", + "timestamp": "0x5a", + "totalDifficulty": "0x1", + "transactions": [ + { + "blockHash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e", + "blockNumber": "0x9", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gas": "0x5208", + "gasPrice": "0x121a9cca", + "hash": "0xecd155a61a5734b3efab75924e3ae34026c7c4133d8c2a46122bd03d7d199725", + "input": "0x", + "nonce": "0x8", + "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", + "transactionIndex": "0x0", + "value": "0x3e8", + "type": "0x0", + "v": "0x1b", + "r": "0xc6028b8e983d62fa8542f8a7633fb23cc941be2c897134352d95a7d9b19feafd", + "s": "0xeb6adcaaae3bed489c6cce4435f9db05d23a52820c78bd350e31eec65ed809d" + } + ], + "transactionsRoot": "0x0767ed8359337dc6a8fdc77fe52db611bed1be87aac73c4556b1bf1dd3d190a5", + "uncles": [] +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByHash-hash-latest.json b/internal/ethapi/testdata/eth_getBlockByHash-hash-latest.json new file mode 100644 index 000000000..32fee8326 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByHash-hash-latest.json @@ -0,0 +1,25 @@ +{ + "baseFeePerGas": "0xfdc7303", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x5208", + "hash": "0x97f540a3577c0f645c5dada5da86f38350e8f847e71f21124f917835003e2607", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0xa", + "parentHash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x26a", + "stateRoot": "0xbb62872e4023fa8a8b17b9cc37031f4817d9595779748d01cba408b495707a91", + "timestamp": "0x64", + "totalDifficulty": "0x1", + "transactions": [ + "0x3ee4094ca1e0b07a66dd616a057e081e53144ca7e9685a126fd4dda9ca042644" + ], + "transactionsRoot": "0xb0893d21a4a44dc26a962a6e91abae66df87fb61ac9c60e936aee89c76331445", + "uncles": [] +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByHash-hash-pending-fullTx.json b/internal/ethapi/testdata/eth_getBlockByHash-hash-pending-fullTx.json new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByHash-hash-pending-fullTx.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByHash-hash-pending.json b/internal/ethapi/testdata/eth_getBlockByHash-hash-pending.json new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByHash-hash-pending.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByNumber-number-0.json b/internal/ethapi/testdata/eth_getBlockByNumber-number-0.json new file mode 100644 index 000000000..759dbf69e --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByNumber-number-0.json @@ -0,0 +1,23 @@ +{ + "baseFeePerGas": "0x3b9aca00", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x0", + "hash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x200", + "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2", + "timestamp": "0x0", + "totalDifficulty": "0x1", + "transactions": [], + "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "uncles": [] +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByNumber-number-1.json b/internal/ethapi/testdata/eth_getBlockByNumber-number-1.json new file mode 100644 index 000000000..379636d5f --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByNumber-number-1.json @@ -0,0 +1,25 @@ +{ + "baseFeePerGas": "0x342770c0", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x5208", + "hash": "0x0da274b315de8e4d5bf8717218ec43540464ef36378cb896469bb731e1d3f3cb", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x1", + "parentHash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x26a", + "stateRoot": "0x92c5c55a698963f5b06e3aee415630f5c48b0760e537af94917ce9c4f42a2e22", + "timestamp": "0xa", + "totalDifficulty": "0x1", + "transactions": [ + "0x644a31c354391520d00e95b9affbbb010fc79ac268144ab8e28207f4cf51097e" + ], + "transactionsRoot": "0xca0ebcce920d2cdfbf9e1dbe90ed3441a1a576f344bd80e60508da814916f4e7", + "uncles": [] +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByNumber-number-latest+1.json b/internal/ethapi/testdata/eth_getBlockByNumber-number-latest+1.json new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByNumber-number-latest+1.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByNumber-number-latest-1.json b/internal/ethapi/testdata/eth_getBlockByNumber-number-latest-1.json new file mode 100644 index 000000000..3526da121 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByNumber-number-latest-1.json @@ -0,0 +1,41 @@ +{ + "baseFeePerGas": "0x121a9cca", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x5208", + "hash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x9", + "parentHash": "0x5abd19c39d9f1c6e52998e135ea14e1fbc5db3fa2a108f4538e238ca5c2e68d7", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x26a", + "stateRoot": "0xbd4aa2c2873df709151075250a8c01c9a14d2b0e2f715dbdd16e0ef8030c2cf0", + "timestamp": "0x5a", + "totalDifficulty": "0x1", + "transactions": [ + { + "blockHash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e", + "blockNumber": "0x9", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gas": "0x5208", + "gasPrice": "0x121a9cca", + "hash": "0xecd155a61a5734b3efab75924e3ae34026c7c4133d8c2a46122bd03d7d199725", + "input": "0x", + "nonce": "0x8", + "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", + "transactionIndex": "0x0", + "value": "0x3e8", + "type": "0x0", + "v": "0x1b", + "r": "0xc6028b8e983d62fa8542f8a7633fb23cc941be2c897134352d95a7d9b19feafd", + "s": "0xeb6adcaaae3bed489c6cce4435f9db05d23a52820c78bd350e31eec65ed809d" + } + ], + "transactionsRoot": "0x0767ed8359337dc6a8fdc77fe52db611bed1be87aac73c4556b1bf1dd3d190a5", + "uncles": [] +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByNumber-tag-latest.json b/internal/ethapi/testdata/eth_getBlockByNumber-tag-latest.json new file mode 100644 index 000000000..32fee8326 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByNumber-tag-latest.json @@ -0,0 +1,25 @@ +{ + "baseFeePerGas": "0xfdc7303", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x5208", + "hash": "0x97f540a3577c0f645c5dada5da86f38350e8f847e71f21124f917835003e2607", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0xa", + "parentHash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x26a", + "stateRoot": "0xbb62872e4023fa8a8b17b9cc37031f4817d9595779748d01cba408b495707a91", + "timestamp": "0x64", + "totalDifficulty": "0x1", + "transactions": [ + "0x3ee4094ca1e0b07a66dd616a057e081e53144ca7e9685a126fd4dda9ca042644" + ], + "transactionsRoot": "0xb0893d21a4a44dc26a962a6e91abae66df87fb61ac9c60e936aee89c76331445", + "uncles": [] +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending-fullTx.json b/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending-fullTx.json new file mode 100644 index 000000000..1d524d6ec --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending-fullTx.json @@ -0,0 +1,50 @@ +{ + "difficulty": "0x0", + "extraData": "0x", + "gasLimit": "0x0", + "gasUsed": "0x0", + "hash": null, + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": null, + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": null, + "number": "0xb", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x256", + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0x2a", + "totalDifficulty": null, + "transactions": [ + { + "blockHash": "0x6cebd9f966ea686f44b981685e3f0eacea28591a7a86d7fbbe521a86e9f81165", + "blockNumber": "0xb", + "from": "0x0000000000000000000000000000000000000000", + "gas": "0x457", + "gasPrice": "0x2b67", + "hash": "0x4afee081df5dff7a025964032871f7d4ba4d21baf5f6376a2f4a9f79fc506298", + "input": "0x111111", + "nonce": "0xb", + "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", + "transactionIndex": "0x0", + "value": "0x6f", + "type": "0x0", + "chainId": "0x7fffffffffffffee", + "v": "0x0", + "r": "0x0", + "s": "0x0" + } + ], + "transactionsRoot": "0x98d9f6dd0aa479c0fb448f2627e9f1964aca699fccab8f6e95861547a4699e37", + "uncles": [], + "withdrawals": [ + { + "index": "0x0", + "validatorIndex": "0x1", + "address": "0x1234000000000000000000000000000000000000", + "amount": "0xa" + } + ], + "withdrawalsRoot": "0x73d756269cdfc22e7e17a3548e36f42f750ca06d7e3cd98d1b6d0eb5add9dc84" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending.json b/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending.json new file mode 100644 index 000000000..c0e2b07bb --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending.json @@ -0,0 +1,33 @@ +{ + "difficulty": "0x0", + "extraData": "0x", + "gasLimit": "0x0", + "gasUsed": "0x0", + "hash": null, + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": null, + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": null, + "number": "0xb", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x256", + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0x2a", + "totalDifficulty": null, + "transactions": [ + "0x4afee081df5dff7a025964032871f7d4ba4d21baf5f6376a2f4a9f79fc506298" + ], + "transactionsRoot": "0x98d9f6dd0aa479c0fb448f2627e9f1964aca699fccab8f6e95861547a4699e37", + "uncles": [], + "withdrawals": [ + { + "index": "0x0", + "validatorIndex": "0x1", + "address": "0x1234000000000000000000000000000000000000", + "amount": "0xa" + } + ], + "withdrawalsRoot": "0x73d756269cdfc22e7e17a3548e36f42f750ca06d7e3cd98d1b6d0eb5add9dc84" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-block-notfound.json b/internal/ethapi/testdata/eth_getBlockReceipts-block-notfound.json new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockReceipts-block-notfound.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-blob-tx.json b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-blob-tx.json new file mode 100644 index 000000000..591fab673 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-blob-tx.json @@ -0,0 +1,20 @@ +[ + { + "blobGasPrice": "0x1", + "blobGasUsed": "0x20000", + "blockHash": "0xe724dfd4349861f4dceef2bc4df086d0a3d88858214f6bee9fcf1bebd1edc2a6", + "blockNumber": "0x6", + "contractAddress": null, + "cumulativeGasUsed": "0x5208", + "effectiveGasPrice": "0x1b09d63b", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gasUsed": "0x5208", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x1", + "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", + "transactionHash": "0xb51ee3d2a89ba5d5623c73133c8d7a6ba9fb41194c17f4302c21b30994a1180f", + "transactionIndex": "0x0", + "type": "0x3" + } +] \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-contract-create-tx.json b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-contract-create-tx.json new file mode 100644 index 000000000..f1e0db22c --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-contract-create-tx.json @@ -0,0 +1,18 @@ +[ + { + "blockHash": "0x1e7dcf3abe8bf05d32367a5dc387caa32578b15871bf8b3cbeedf2d8d530f844", + "blockNumber": "0x2", + "contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592", + "cumulativeGasUsed": "0xcf50", + "effectiveGasPrice": "0x2db16291", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gasUsed": "0xcf50", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x1", + "to": null, + "transactionHash": "0x340e58cda5086495010b571fe25067fecc9954dc4ee3cedece00691fa3f5904a", + "transactionIndex": "0x0", + "type": "0x0" + } +] \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-dynamic-fee-tx.json b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-dynamic-fee-tx.json new file mode 100644 index 000000000..520e30e4e --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-dynamic-fee-tx.json @@ -0,0 +1,18 @@ +[ + { + "blockHash": "0xffa737e6ce9a9162ffd411dd06169114b3ed5ee9fc1474a2625c92548e4455e0", + "blockNumber": "0x4", + "contractAddress": null, + "cumulativeGasUsed": "0x538d", + "effectiveGasPrice": "0x2325c42f", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gasUsed": "0x538d", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x0", + "to": "0x0000000000000000000000000000000000031ec7", + "transactionHash": "0xdcde2574628c9d7dff22b9afa19f235959a924ceec65a9df903a517ae91f5c84", + "transactionIndex": "0x0", + "type": "0x2" + } +] \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-contract-call-tx.json b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-contract-call-tx.json new file mode 100644 index 000000000..a71cf4b37 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-contract-call-tx.json @@ -0,0 +1,34 @@ +[ + { + "blockHash": "0x173dcd9d22ce71929cd17e84ea88702a0f84d6244c6898d2a4f48722e494fe9c", + "blockNumber": "0x3", + "contractAddress": null, + "cumulativeGasUsed": "0x5e28", + "effectiveGasPrice": "0x281c2585", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gasUsed": "0x5e28", + "logs": [ + { + "address": "0x0000000000000000000000000000000000031ec7", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000703c4b2bd70c169f5717101caee543299fc946c7", + "0x0000000000000000000000000000000000000000000000000000000000000003" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000000d", + "blockNumber": "0x3", + "transactionHash": "0xeaf3921cbf03ba45bad4e6ab807b196ce3b2a0b5bacc355b6272fa96b11b4287", + "transactionIndex": "0x0", + "blockHash": "0x173dcd9d22ce71929cd17e84ea88702a0f84d6244c6898d2a4f48722e494fe9c", + "logIndex": "0x0", + "removed": false + } + ], + "logsBloom": "0x00000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000800000000000000008000000000000000000000000000000000020000000080000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000400000000002000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000", + "status": "0x1", + "to": "0x0000000000000000000000000000000000031ec7", + "transactionHash": "0xeaf3921cbf03ba45bad4e6ab807b196ce3b2a0b5bacc355b6272fa96b11b4287", + "transactionIndex": "0x0", + "type": "0x0" + } +] \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-transfer-tx.json b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-transfer-tx.json new file mode 100644 index 000000000..3e16c3062 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockReceipts-block-with-legacy-transfer-tx.json @@ -0,0 +1,18 @@ +[ + { + "blockHash": "0xa8a067b3cb3b9ddc6cfb8317bfd08b266fcf9994fc870c1f7ed394acecfadf39", + "blockNumber": "0x1", + "contractAddress": null, + "cumulativeGasUsed": "0x5208", + "effectiveGasPrice": "0x342770c0", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gasUsed": "0x5208", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x1", + "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", + "transactionHash": "0x644a31c354391520d00e95b9affbbb010fc79ac268144ab8e28207f4cf51097e", + "transactionIndex": "0x0", + "type": "0x0" + } +] \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-hash-empty.json b/internal/ethapi/testdata/eth_getBlockReceipts-hash-empty.json new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockReceipts-hash-empty.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-hash-notfound.json b/internal/ethapi/testdata/eth_getBlockReceipts-hash-notfound.json new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockReceipts-hash-notfound.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-number-0.json b/internal/ethapi/testdata/eth_getBlockReceipts-number-0.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockReceipts-number-0.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-number-1.json b/internal/ethapi/testdata/eth_getBlockReceipts-number-1.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockReceipts-number-1.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-tag-earliest.json b/internal/ethapi/testdata/eth_getBlockReceipts-tag-earliest.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockReceipts-tag-earliest.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-tag-latest.json b/internal/ethapi/testdata/eth_getBlockReceipts-tag-latest.json new file mode 100644 index 000000000..591fab673 --- /dev/null +++ b/internal/ethapi/testdata/eth_getBlockReceipts-tag-latest.json @@ -0,0 +1,20 @@ +[ + { + "blobGasPrice": "0x1", + "blobGasUsed": "0x20000", + "blockHash": "0xe724dfd4349861f4dceef2bc4df086d0a3d88858214f6bee9fcf1bebd1edc2a6", + "blockNumber": "0x6", + "contractAddress": null, + "cumulativeGasUsed": "0x5208", + "effectiveGasPrice": "0x1b09d63b", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gasUsed": "0x5208", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x1", + "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", + "transactionHash": "0xb51ee3d2a89ba5d5623c73133c8d7a6ba9fb41194c17f4302c21b30994a1180f", + "transactionIndex": "0x0", + "type": "0x3" + } +] \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getHeaderByHash-hash-0.json b/internal/ethapi/testdata/eth_getHeaderByHash-hash-0.json new file mode 100644 index 000000000..dc61aa9a2 --- /dev/null +++ b/internal/ethapi/testdata/eth_getHeaderByHash-hash-0.json @@ -0,0 +1,20 @@ +{ + "baseFeePerGas": "0x3b9aca00", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x0", + "hash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2", + "timestamp": "0x0", + "totalDifficulty": "0x1", + "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getHeaderByHash-hash-1.json b/internal/ethapi/testdata/eth_getHeaderByHash-hash-1.json new file mode 100644 index 000000000..c1dc70f64 --- /dev/null +++ b/internal/ethapi/testdata/eth_getHeaderByHash-hash-1.json @@ -0,0 +1,20 @@ +{ + "baseFeePerGas": "0x342770c0", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x5208", + "hash": "0x0da274b315de8e4d5bf8717218ec43540464ef36378cb896469bb731e1d3f3cb", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x1", + "parentHash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "stateRoot": "0x92c5c55a698963f5b06e3aee415630f5c48b0760e537af94917ce9c4f42a2e22", + "timestamp": "0xa", + "totalDifficulty": "0x1", + "transactionsRoot": "0xca0ebcce920d2cdfbf9e1dbe90ed3441a1a576f344bd80e60508da814916f4e7" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getHeaderByHash-hash-empty.json b/internal/ethapi/testdata/eth_getHeaderByHash-hash-empty.json new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/ethapi/testdata/eth_getHeaderByHash-hash-empty.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getHeaderByHash-hash-latest-1.json b/internal/ethapi/testdata/eth_getHeaderByHash-hash-latest-1.json new file mode 100644 index 000000000..a63ff8670 --- /dev/null +++ b/internal/ethapi/testdata/eth_getHeaderByHash-hash-latest-1.json @@ -0,0 +1,20 @@ +{ + "baseFeePerGas": "0x121a9cca", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x5208", + "hash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x9", + "parentHash": "0x5abd19c39d9f1c6e52998e135ea14e1fbc5db3fa2a108f4538e238ca5c2e68d7", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "stateRoot": "0xbd4aa2c2873df709151075250a8c01c9a14d2b0e2f715dbdd16e0ef8030c2cf0", + "timestamp": "0x5a", + "totalDifficulty": "0x1", + "transactionsRoot": "0x0767ed8359337dc6a8fdc77fe52db611bed1be87aac73c4556b1bf1dd3d190a5" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getHeaderByHash-hash-latest.json b/internal/ethapi/testdata/eth_getHeaderByHash-hash-latest.json new file mode 100644 index 000000000..f2affcc1c --- /dev/null +++ b/internal/ethapi/testdata/eth_getHeaderByHash-hash-latest.json @@ -0,0 +1,20 @@ +{ + "baseFeePerGas": "0xfdc7303", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x5208", + "hash": "0x97f540a3577c0f645c5dada5da86f38350e8f847e71f21124f917835003e2607", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0xa", + "parentHash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "stateRoot": "0xbb62872e4023fa8a8b17b9cc37031f4817d9595779748d01cba408b495707a91", + "timestamp": "0x64", + "totalDifficulty": "0x1", + "transactionsRoot": "0xb0893d21a4a44dc26a962a6e91abae66df87fb61ac9c60e936aee89c76331445" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getHeaderByHash-hash-pending.json b/internal/ethapi/testdata/eth_getHeaderByHash-hash-pending.json new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/ethapi/testdata/eth_getHeaderByHash-hash-pending.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getHeaderByNumber-number-0.json b/internal/ethapi/testdata/eth_getHeaderByNumber-number-0.json new file mode 100644 index 000000000..dc61aa9a2 --- /dev/null +++ b/internal/ethapi/testdata/eth_getHeaderByNumber-number-0.json @@ -0,0 +1,20 @@ +{ + "baseFeePerGas": "0x3b9aca00", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x0", + "hash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2", + "timestamp": "0x0", + "totalDifficulty": "0x1", + "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getHeaderByNumber-number-1.json b/internal/ethapi/testdata/eth_getHeaderByNumber-number-1.json new file mode 100644 index 000000000..c1dc70f64 --- /dev/null +++ b/internal/ethapi/testdata/eth_getHeaderByNumber-number-1.json @@ -0,0 +1,20 @@ +{ + "baseFeePerGas": "0x342770c0", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x5208", + "hash": "0x0da274b315de8e4d5bf8717218ec43540464ef36378cb896469bb731e1d3f3cb", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x1", + "parentHash": "0xbdc7d83b8f876938810462fe8d053263a482e44201e3883d4ae204ff4de7eff5", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "stateRoot": "0x92c5c55a698963f5b06e3aee415630f5c48b0760e537af94917ce9c4f42a2e22", + "timestamp": "0xa", + "totalDifficulty": "0x1", + "transactionsRoot": "0xca0ebcce920d2cdfbf9e1dbe90ed3441a1a576f344bd80e60508da814916f4e7" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getHeaderByNumber-number-latest+1.json b/internal/ethapi/testdata/eth_getHeaderByNumber-number-latest+1.json new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/ethapi/testdata/eth_getHeaderByNumber-number-latest+1.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getHeaderByNumber-number-latest-1.json b/internal/ethapi/testdata/eth_getHeaderByNumber-number-latest-1.json new file mode 100644 index 000000000..a63ff8670 --- /dev/null +++ b/internal/ethapi/testdata/eth_getHeaderByNumber-number-latest-1.json @@ -0,0 +1,20 @@ +{ + "baseFeePerGas": "0x121a9cca", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x5208", + "hash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x9", + "parentHash": "0x5abd19c39d9f1c6e52998e135ea14e1fbc5db3fa2a108f4538e238ca5c2e68d7", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "stateRoot": "0xbd4aa2c2873df709151075250a8c01c9a14d2b0e2f715dbdd16e0ef8030c2cf0", + "timestamp": "0x5a", + "totalDifficulty": "0x1", + "transactionsRoot": "0x0767ed8359337dc6a8fdc77fe52db611bed1be87aac73c4556b1bf1dd3d190a5" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getHeaderByNumber-tag-latest.json b/internal/ethapi/testdata/eth_getHeaderByNumber-tag-latest.json new file mode 100644 index 000000000..f2affcc1c --- /dev/null +++ b/internal/ethapi/testdata/eth_getHeaderByNumber-tag-latest.json @@ -0,0 +1,20 @@ +{ + "baseFeePerGas": "0xfdc7303", + "difficulty": "0x20000", + "extraData": "0x", + "gasLimit": "0x47e7c4", + "gasUsed": "0x5208", + "hash": "0x97f540a3577c0f645c5dada5da86f38350e8f847e71f21124f917835003e2607", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0xa", + "parentHash": "0xda97ed946e0d502fb898b0ac881bd44da3c7fee5eaf184431e1ec3d361dad17e", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "stateRoot": "0xbb62872e4023fa8a8b17b9cc37031f4817d9595779748d01cba408b495707a91", + "timestamp": "0x64", + "totalDifficulty": "0x1", + "transactionsRoot": "0xb0893d21a4a44dc26a962a6e91abae66df87fb61ac9c60e936aee89c76331445" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getHeaderByNumber-tag-pending.json b/internal/ethapi/testdata/eth_getHeaderByNumber-tag-pending.json new file mode 100644 index 000000000..da177f218 --- /dev/null +++ b/internal/ethapi/testdata/eth_getHeaderByNumber-tag-pending.json @@ -0,0 +1,20 @@ +{ + "difficulty": "0x0", + "extraData": "0x", + "gasLimit": "0x0", + "gasUsed": "0x0", + "hash": null, + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": null, + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": null, + "number": "0xb", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0x2a", + "totalDifficulty": null, + "transactionsRoot": "0x98d9f6dd0aa479c0fb448f2627e9f1964aca699fccab8f6e95861547a4699e37", + "withdrawalsRoot": "0x73d756269cdfc22e7e17a3548e36f42f750ca06d7e3cd98d1b6d0eb5add9dc84" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-blob-tx.json b/internal/ethapi/testdata/eth_getTransactionReceipt-blob-tx.json new file mode 100644 index 000000000..c3a4a0dee --- /dev/null +++ b/internal/ethapi/testdata/eth_getTransactionReceipt-blob-tx.json @@ -0,0 +1,18 @@ +{ + "blobGasPrice": "0x1", + "blobGasUsed": "0x20000", + "blockHash": "0xe724dfd4349861f4dceef2bc4df086d0a3d88858214f6bee9fcf1bebd1edc2a6", + "blockNumber": "0x6", + "contractAddress": null, + "cumulativeGasUsed": "0x5208", + "effectiveGasPrice": "0x1b09d63b", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gasUsed": "0x5208", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x1", + "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", + "transactionHash": "0xb51ee3d2a89ba5d5623c73133c8d7a6ba9fb41194c17f4302c21b30994a1180f", + "transactionIndex": "0x0", + "type": "0x3" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-tx.json b/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-tx.json new file mode 100644 index 000000000..ad6d6152e --- /dev/null +++ b/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-tx.json @@ -0,0 +1,16 @@ +{ + "blockHash": "0x1e7dcf3abe8bf05d32367a5dc387caa32578b15871bf8b3cbeedf2d8d530f844", + "blockNumber": "0x2", + "contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592", + "cumulativeGasUsed": "0xcf50", + "effectiveGasPrice": "0x2db16291", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gasUsed": "0xcf50", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x1", + "to": null, + "transactionHash": "0x340e58cda5086495010b571fe25067fecc9954dc4ee3cedece00691fa3f5904a", + "transactionIndex": "0x0", + "type": "0x0" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-with-access-list.json b/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-with-access-list.json new file mode 100644 index 000000000..b3362260a --- /dev/null +++ b/internal/ethapi/testdata/eth_getTransactionReceipt-create-contract-with-access-list.json @@ -0,0 +1,16 @@ +{ + "blockHash": "0x3fadc5bc916018a326732be829a2565b3acb960a8406f0f151a5e1fa971ea7dd", + "blockNumber": "0x5", + "contractAddress": "0xfdaa97661a584d977b4d3abb5370766ff5b86a18", + "cumulativeGasUsed": "0xe01c", + "effectiveGasPrice": "0x1ecb3fb4", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gasUsed": "0xe01c", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x1", + "to": null, + "transactionHash": "0xb5a1148819cfdfff9bfe70035524fec940eb735d89b76960b97751d01ae2a9f2", + "transactionIndex": "0x0", + "type": "0x1" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-dynamic-tx-with-logs.json b/internal/ethapi/testdata/eth_getTransactionReceipt-dynamic-tx-with-logs.json new file mode 100644 index 000000000..cc0be1809 --- /dev/null +++ b/internal/ethapi/testdata/eth_getTransactionReceipt-dynamic-tx-with-logs.json @@ -0,0 +1,16 @@ +{ + "blockHash": "0xffa737e6ce9a9162ffd411dd06169114b3ed5ee9fc1474a2625c92548e4455e0", + "blockNumber": "0x4", + "contractAddress": null, + "cumulativeGasUsed": "0x538d", + "effectiveGasPrice": "0x2325c42f", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gasUsed": "0x538d", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x0", + "to": "0x0000000000000000000000000000000000031ec7", + "transactionHash": "0xdcde2574628c9d7dff22b9afa19f235959a924ceec65a9df903a517ae91f5c84", + "transactionIndex": "0x0", + "type": "0x2" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-normal-transfer-tx.json b/internal/ethapi/testdata/eth_getTransactionReceipt-normal-transfer-tx.json new file mode 100644 index 000000000..d3b6ef1c9 --- /dev/null +++ b/internal/ethapi/testdata/eth_getTransactionReceipt-normal-transfer-tx.json @@ -0,0 +1,16 @@ +{ + "blockHash": "0xa8a067b3cb3b9ddc6cfb8317bfd08b266fcf9994fc870c1f7ed394acecfadf39", + "blockNumber": "0x1", + "contractAddress": null, + "cumulativeGasUsed": "0x5208", + "effectiveGasPrice": "0x342770c0", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gasUsed": "0x5208", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x1", + "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", + "transactionHash": "0x644a31c354391520d00e95b9affbbb010fc79ac268144ab8e28207f4cf51097e", + "transactionIndex": "0x0", + "type": "0x0" +} \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-txhash-empty.json b/internal/ethapi/testdata/eth_getTransactionReceipt-txhash-empty.json new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/ethapi/testdata/eth_getTransactionReceipt-txhash-empty.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-txhash-notfound.json b/internal/ethapi/testdata/eth_getTransactionReceipt-txhash-notfound.json new file mode 100644 index 000000000..ec747fa47 --- /dev/null +++ b/internal/ethapi/testdata/eth_getTransactionReceipt-txhash-notfound.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/internal/ethapi/testdata/eth_getTransactionReceipt-with-logs.json b/internal/ethapi/testdata/eth_getTransactionReceipt-with-logs.json new file mode 100644 index 000000000..45a4f6d67 --- /dev/null +++ b/internal/ethapi/testdata/eth_getTransactionReceipt-with-logs.json @@ -0,0 +1,32 @@ +{ + "blockHash": "0x173dcd9d22ce71929cd17e84ea88702a0f84d6244c6898d2a4f48722e494fe9c", + "blockNumber": "0x3", + "contractAddress": null, + "cumulativeGasUsed": "0x5e28", + "effectiveGasPrice": "0x281c2585", + "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", + "gasUsed": "0x5e28", + "logs": [ + { + "address": "0x0000000000000000000000000000000000031ec7", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000703c4b2bd70c169f5717101caee543299fc946c7", + "0x0000000000000000000000000000000000000000000000000000000000000003" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000000d", + "blockNumber": "0x3", + "transactionHash": "0xeaf3921cbf03ba45bad4e6ab807b196ce3b2a0b5bacc355b6272fa96b11b4287", + "transactionIndex": "0x0", + "blockHash": "0x173dcd9d22ce71929cd17e84ea88702a0f84d6244c6898d2a4f48722e494fe9c", + "logIndex": "0x0", + "removed": false + } + ], + "logsBloom": "0x00000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000800000000000000008000000000000000000000000000000000020000000080000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000400000000002000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000", + "status": "0x1", + "to": "0x0000000000000000000000000000000000031ec7", + "transactionHash": "0xeaf3921cbf03ba45bad4e6ab807b196ce3b2a0b5bacc355b6272fa96b11b4287", + "transactionIndex": "0x0", + "type": "0x0" +} \ No newline at end of file diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index 6e2690e09..e4cf81a3f 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -111,7 +111,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { AccessList: args.AccessList, } pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) - estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, b.RPCGasCap()) + estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, nil, b.RPCGasCap()) if err != nil { return err } diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go index 0868f8762..9161d5e68 100644 --- a/internal/ethapi/transaction_args_test.go +++ b/internal/ethapi/transaction_args_test.go @@ -325,10 +325,10 @@ func (b *backendMock) GetPoolNonce(ctx context.Context, addr common.Address) (ui return 0, nil } func (b *backendMock) Stats() (pending int, queued int) { return 0, 0 } -func (b *backendMock) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { +func (b *backendMock) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { return nil, nil } -func (b *backendMock) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) { +func (b *backendMock) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { return nil, nil } func (b *backendMock) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription { return nil } diff --git a/internal/flags/categories.go b/internal/flags/categories.go index c2db6c6c1..487684d98 100644 --- a/internal/flags/categories.go +++ b/internal/flags/categories.go @@ -22,8 +22,9 @@ const ( EthCategory = "ETHEREUM" LightCategory = "LIGHT CLIENT" DevCategory = "DEVELOPER CHAIN" - EthashCategory = "ETHASH" - TxPoolCategory = "TRANSACTION POOL" + StateCategory = "STATE HISTORY MANAGEMENT" + TxPoolCategory = "TRANSACTION POOL (EVM)" + BlobPoolCategory = "TRANSACTION POOL (BLOB)" PerfCategory = "PERFORMANCE TUNING" AccountCategory = "ACCOUNT" APICategory = "API AND CONSOLE" diff --git a/internal/jsre/deps/web3.js b/internal/jsre/deps/web3.js index a291218ec..7a09fddab 100644 --- a/internal/jsre/deps/web3.js +++ b/internal/jsre/deps/web3.js @@ -518,7 +518,7 @@ var f = require('./formatters'); var SolidityType = require('./type'); /** - * SolidityTypeAddress is a prootype that represents address type + * SolidityTypeAddress is a prototype that represents address type * It matches: * address * address[] @@ -546,7 +546,7 @@ var f = require('./formatters'); var SolidityType = require('./type'); /** - * SolidityTypeBool is a prootype that represents bool type + * SolidityTypeBool is a prototype that represents bool type * It matches: * bool * bool[] @@ -1146,7 +1146,7 @@ var f = require('./formatters'); var SolidityType = require('./type'); /** - * SolidityTypeInt is a prootype that represents int type + * SolidityTypeInt is a prototype that represents int type * It matches: * int * int[] @@ -1334,7 +1334,7 @@ var f = require('./formatters'); var SolidityType = require('./type'); /** - * SolidityTypeReal is a prootype that represents real type + * SolidityTypeReal is a prototype that represents real type * It matches: * real * real[] @@ -1647,7 +1647,7 @@ var f = require('./formatters'); var SolidityType = require('./type'); /** - * SolidityTypeUInt is a prootype that represents uint type + * SolidityTypeUInt is a prototype that represents uint type * It matches: * uint * uint[] @@ -1681,7 +1681,7 @@ var f = require('./formatters'); var SolidityType = require('./type'); /** - * SolidityTypeUReal is a prootype that represents ureal type + * SolidityTypeUReal is a prototype that represents ureal type * It matches: * ureal * ureal[] @@ -2307,7 +2307,7 @@ var toChecksumAddress = function (address) { }; /** - * Transforms given string to valid 20 bytes-length addres with 0x prefix + * Transforms given string to valid 20 bytes-length address with 0x prefix * * @method toAddress * @param {String} address diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index a2fc3b5a9..b86b5909d 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -30,6 +30,7 @@ var Modules = map[string]string{ "txpool": TxpoolJs, "les": LESJs, "vflux": VfluxJs, + "dev": DevJs, } const CliqueJs = ` @@ -495,6 +496,11 @@ web3._extend({ call: 'debug_setTrieFlushInterval', params: 1 }), + new web3._extend.Method({ + name: 'getTrieFlushInterval', + call: 'debug_getTrieFlushInterval', + params: 0 + }), ], properties: [] }); @@ -530,8 +536,8 @@ web3._extend({ new web3._extend.Method({ name: 'estimateGas', call: 'eth_estimateGas', - params: 2, - inputFormatter: [web3._extend.formatters.inputCallFormatter, web3._extend.formatters.inputBlockNumberFormatter], + params: 3, + inputFormatter: [web3._extend.formatters.inputCallFormatter, web3._extend.formatters.inputBlockNumberFormatter, null], outputFormatter: web3._extend.utils.toDecimal }), new web3._extend.Method({ @@ -611,6 +617,11 @@ web3._extend({ params: 4, inputFormatter: [web3._extend.formatters.inputCallFormatter, web3._extend.formatters.inputDefaultBlockNumberFormatter, null, null], }), + new web3._extend.Method({ + name: 'getBlockReceipts', + call: 'eth_getBlockReceipts', + params: 1, + }), ], properties: [ new web3._extend.Property({ @@ -641,8 +652,6 @@ web3._extend({ new web3._extend.Method({ name: 'start', call: 'miner_start', - params: 1, - inputFormatter: [null] }), new web3._extend.Method({ name: 'stop', @@ -883,3 +892,22 @@ web3._extend({ ] }); ` + +const DevJs = ` +web3._extend({ + property: 'dev', + methods: + [ + new web3._extend.Method({ + name: 'addWithdrawal', + call: 'dev_addWithdrawal', + params: 1 + }), + new web3._extend.Method({ + name: 'setFeeRecipient', + call: 'dev_setFeeRecipient', + params: 1 + }), + ], +}); +` diff --git a/les/api_backend.go b/les/api_backend.go index 97665757b..3e9dbadce 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -57,7 +57,6 @@ func (b *LesApiBackend) CurrentBlock() *types.Header { } func (b *LesApiBackend) SetHead(number uint64) { - b.eth.handler.downloader.Cancel() b.eth.blockchain.SetHead(number) } @@ -224,11 +223,11 @@ func (b *LesApiBackend) Stats() (pending int, queued int) { return b.eth.txPool.Stats(), 0 } -func (b *LesApiBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { +func (b *LesApiBackend) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { return b.eth.txPool.Content() } -func (b *LesApiBackend) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) { +func (b *LesApiBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { return b.eth.txPool.ContentFrom(addr) } @@ -264,7 +263,7 @@ func (b *LesApiBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEven } func (b *LesApiBackend) SyncProgress() ethereum.SyncProgress { - return b.eth.Downloader().Progress() + return ethereum.SyncProgress{} } func (b *LesApiBackend) ProtocolVersion() int { diff --git a/les/api_test.go b/les/api_test.go index ac895362a..484c95504 100644 --- a/les/api_test.go +++ b/les/api_test.go @@ -31,9 +31,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/eth" - ethdownloader "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" - "github.com/ethereum/go-ethereum/les/downloader" "github.com/ethereum/go-ethereum/les/flowcontrol" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" @@ -148,7 +147,7 @@ func testCapacityAPI(t *testing.T, clientCount int) { var wg sync.WaitGroup stop := make(chan struct{}) - reqCount := make([]uint64, len(clientRpcClients)) + reqCount := make([]atomic.Uint64, len(clientRpcClients)) // Send light request like crazy. for i, c := range clientRpcClients { @@ -158,7 +157,7 @@ func testCapacityAPI(t *testing.T, clientCount int) { defer wg.Done() queue := make(chan struct{}, 100) - reqCount[i] = 0 + reqCount[i].Store(0) for { select { case queue <- struct{}{}: @@ -174,8 +173,7 @@ func testCapacityAPI(t *testing.T, clientCount int) { wg.Done() <-queue if ok { - count := atomic.AddUint64(&reqCount[i], 1) - if count%10000 == 0 { + if reqCount[i].Add(1)%10000 == 0 { freezeClient(ctx, t, serverRpcClient, clients[i].ID()) } } @@ -193,7 +191,7 @@ func testCapacityAPI(t *testing.T, clientCount int) { processedSince := func(start []uint64) []uint64 { res := make([]uint64, len(reqCount)) for i := range reqCount { - res[i] = atomic.LoadUint64(&reqCount[i]) + res[i] = reqCount[i].Load() if start != nil { res[i] -= start[i] } @@ -293,8 +291,8 @@ func testCapacityAPI(t *testing.T, clientCount int) { close(stop) wg.Wait() - for i, count := range reqCount { - t.Log("client", i, "processed", count) + for i := range reqCount { + t.Log("client", i, "processed", reqCount[i].Load()) } return true }) { @@ -493,13 +491,13 @@ func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir [] func newLesClientService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { config := ethconfig.Defaults - config.SyncMode = (ethdownloader.SyncMode)(downloader.LightSync) + config.SyncMode = downloader.LightSync return New(stack, &config) } func newLesServerService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { config := ethconfig.Defaults - config.SyncMode = (ethdownloader.SyncMode)(downloader.FullSync) + config.SyncMode = downloader.FullSync config.LightServ = testServerCapacity config.LightPeers = testMaxClients ethereum, err := eth.New(stack, &config) diff --git a/les/benchmark.go b/les/benchmark.go index c63a2564d..ab9351834 100644 --- a/les/benchmark.go +++ b/les/benchmark.go @@ -117,7 +117,7 @@ func (b *benchmarkProofsOrCode) request(peer *serverPeer, index int) error { key := make([]byte, 32) crand.Read(key) if b.code { - return peer.requestCode(0, []CodeReq{{BHash: b.headHash, AccKey: key}}) + return peer.requestCode(0, []CodeReq{{BHash: b.headHash, AccountAddress: key}}) } return peer.requestProofs(0, []ProofReq{{BHash: b.headHash, Key: key}}) } diff --git a/les/catalyst/api.go b/les/catalyst/api.go deleted file mode 100644 index 30b41101d..000000000 --- a/les/catalyst/api.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package catalyst implements the temporary eth1/eth2 RPC integration. -package catalyst - -import ( - "errors" - "fmt" - - "github.com/ethereum/go-ethereum/beacon/engine" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/les" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/rpc" -) - -// Register adds catalyst APIs to the light client. -func Register(stack *node.Node, backend *les.LightEthereum) error { - log.Warn("Catalyst mode enabled", "protocol", "les") - stack.RegisterAPIs([]rpc.API{ - { - Namespace: "engine", - Service: NewConsensusAPI(backend), - Authenticated: true, - }, - }) - return nil -} - -type ConsensusAPI struct { - les *les.LightEthereum -} - -// NewConsensusAPI creates a new consensus api for the given backend. -// The underlying blockchain needs to have a valid terminal total difficulty set. -func NewConsensusAPI(les *les.LightEthereum) *ConsensusAPI { - if les.BlockChain().Config().TerminalTotalDifficulty == nil { - log.Warn("Catalyst started without valid total difficulty") - } - return &ConsensusAPI{les: les} -} - -// ForkchoiceUpdatedV1 has several responsibilities: -// -// We try to set our blockchain to the headBlock. -// -// If the method is called with an empty head block: we return success, which can be used -// to check if the catalyst mode is enabled. -// -// If the total difficulty was not reached: we return INVALID. -// -// If the finalizedBlockHash is set: we check if we have the finalizedBlockHash in our db, -// if not we start a sync. -// -// If there are payloadAttributes: we return an error since block creation is not -// supported in les mode. -func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { - if heads.HeadBlockHash == (common.Hash{}) { - log.Warn("Forkchoice requested update to zero hash") - return engine.STATUS_INVALID, nil // TODO(karalabe): Why does someone send us this? - } - if err := api.checkTerminalTotalDifficulty(heads.HeadBlockHash); err != nil { - if header := api.les.BlockChain().GetHeaderByHash(heads.HeadBlockHash); header == nil { - // TODO (MariusVanDerWijden) trigger sync - return engine.STATUS_SYNCING, nil - } - return engine.STATUS_INVALID, err - } - // If the finalized block is set, check if it is in our blockchain - if heads.FinalizedBlockHash != (common.Hash{}) { - if header := api.les.BlockChain().GetHeaderByHash(heads.FinalizedBlockHash); header == nil { - // TODO (MariusVanDerWijden) trigger sync - return engine.STATUS_SYNCING, nil - } - } - // SetHead - if err := api.setCanonical(heads.HeadBlockHash); err != nil { - return engine.STATUS_INVALID, err - } - if payloadAttributes != nil { - return engine.STATUS_INVALID, errors.New("not supported") - } - return api.validForkChoiceResponse(), nil -} - -// GetPayloadV1 returns a cached payload by id. It's not supported in les mode. -func (api *ConsensusAPI) GetPayloadV1(payloadID engine.PayloadID) (*engine.ExecutableData, error) { - return nil, engine.GenericServerError.With(errors.New("not supported in light client mode")) -} - -// ExecutePayloadV1 creates an Eth1 block, inserts it in the chain, and returns the status of the chain. -func (api *ConsensusAPI) ExecutePayloadV1(params engine.ExecutableData) (engine.PayloadStatusV1, error) { - block, err := engine.ExecutableDataToBlock(params) - if err != nil { - return api.invalid(), err - } - if !api.les.BlockChain().HasHeader(block.ParentHash(), block.NumberU64()-1) { - /* - TODO (MariusVanDerWijden) reenable once sync is merged - if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), block.Header()); err != nil { - return SYNCING, err - } - */ - // TODO (MariusVanDerWijden) we should return nil here not empty hash - return engine.PayloadStatusV1{Status: engine.SYNCING, LatestValidHash: nil}, nil - } - parent := api.les.BlockChain().GetHeaderByHash(params.ParentHash) - if parent == nil { - return api.invalid(), fmt.Errorf("could not find parent %x", params.ParentHash) - } - td := api.les.BlockChain().GetTd(parent.Hash(), block.NumberU64()-1) - ttd := api.les.BlockChain().Config().TerminalTotalDifficulty - if td.Cmp(ttd) < 0 { - return api.invalid(), fmt.Errorf("can not execute payload on top of block with low td got: %v threshold %v", td, ttd) - } - if err = api.les.BlockChain().InsertHeader(block.Header()); err != nil { - return api.invalid(), err - } - if merger := api.les.Merger(); !merger.TDDReached() { - merger.ReachTTD() - } - hash := block.Hash() - return engine.PayloadStatusV1{Status: engine.VALID, LatestValidHash: &hash}, nil -} - -func (api *ConsensusAPI) validForkChoiceResponse() engine.ForkChoiceResponse { - currentHash := api.les.BlockChain().CurrentHeader().Hash() - return engine.ForkChoiceResponse{ - PayloadStatus: engine.PayloadStatusV1{Status: engine.VALID, LatestValidHash: ¤tHash}, - } -} - -// invalid returns a response "INVALID" with the latest valid hash set to the current head. -func (api *ConsensusAPI) invalid() engine.PayloadStatusV1 { - currentHash := api.les.BlockChain().CurrentHeader().Hash() - return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: ¤tHash} -} - -func (api *ConsensusAPI) checkTerminalTotalDifficulty(head common.Hash) error { - // shortcut if we entered PoS already - if api.les.Merger().PoSFinalized() { - return nil - } - // make sure the parent has enough terminal total difficulty - header := api.les.BlockChain().GetHeaderByHash(head) - if header == nil { - return errors.New("unknown header") - } - td := api.les.BlockChain().GetTd(header.Hash(), header.Number.Uint64()) - if td != nil && td.Cmp(api.les.BlockChain().Config().TerminalTotalDifficulty) < 0 { - return errors.New("invalid ttd") - } - return nil -} - -// setCanonical is called to perform a force choice. -func (api *ConsensusAPI) setCanonical(newHead common.Hash) error { - log.Info("Setting head", "head", newHead) - - headHeader := api.les.BlockChain().CurrentHeader() - if headHeader.Hash() == newHead { - return nil - } - newHeadHeader := api.les.BlockChain().GetHeaderByHash(newHead) - if newHeadHeader == nil { - return errors.New("unknown header") - } - if err := api.les.BlockChain().SetCanonical(newHeadHeader); err != nil { - return err - } - // Trigger the transition if it's the first `NewHead` event. - if merger := api.les.Merger(); !merger.PoSFinalized() { - merger.FinalizePoS() - } - return nil -} - -// ExchangeTransitionConfigurationV1 checks the given configuration against -// the configuration of the node. -func (api *ConsensusAPI) ExchangeTransitionConfigurationV1(config engine.TransitionConfigurationV1) (*engine.TransitionConfigurationV1, error) { - log.Trace("Engine API request received", "method", "ExchangeTransitionConfiguration", "ttd", config.TerminalTotalDifficulty) - if config.TerminalTotalDifficulty == nil { - return nil, errors.New("invalid terminal total difficulty") - } - - ttd := api.les.BlockChain().Config().TerminalTotalDifficulty - if ttd == nil || ttd.Cmp(config.TerminalTotalDifficulty.ToInt()) != 0 { - log.Warn("Invalid TTD configured", "geth", ttd, "beacon", config.TerminalTotalDifficulty) - return nil, fmt.Errorf("invalid ttd: execution %v consensus %v", ttd, config.TerminalTotalDifficulty) - } - - if config.TerminalBlockHash != (common.Hash{}) { - if hash := api.les.BlockChain().GetCanonicalHash(uint64(config.TerminalBlockNumber)); hash == config.TerminalBlockHash { - return &engine.TransitionConfigurationV1{ - TerminalTotalDifficulty: (*hexutil.Big)(ttd), - TerminalBlockHash: config.TerminalBlockHash, - TerminalBlockNumber: config.TerminalBlockNumber, - }, nil - } - return nil, errors.New("invalid terminal block hash") - } - - return &engine.TransitionConfigurationV1{TerminalTotalDifficulty: (*hexutil.Big)(ttd)}, nil -} diff --git a/les/catalyst/api_test.go b/les/catalyst/api_test.go deleted file mode 100644 index 2405499d3..000000000 --- a/les/catalyst/api_test.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package catalyst - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/beacon/engine" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/downloader" - "github.com/ethereum/go-ethereum/eth/ethconfig" - "github.com/ethereum/go-ethereum/les" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" -) - -var ( - // testKey is a private key to use for funding a tester account. - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - - // testAddr is the Ethereum address of the tester account. - testAddr = crypto.PubkeyToAddress(testKey.PublicKey) - - testBalance = big.NewInt(2e18) -) - -func generatePreMergeChain(pre, post int) (*core.Genesis, []*types.Header, []*types.Block, []*types.Header, []*types.Block) { - config := *params.AllEthashProtocolChanges - genesis := &core.Genesis{ - Config: &config, - Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance}}, - ExtraData: []byte("test genesis"), - Timestamp: 9000, - BaseFee: big.NewInt(params.InitialBaseFee), - } - // Pre-merge blocks - db, preBLocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), pre, nil) - totalDifficulty := new(big.Int).Set(params.GenesisDifficulty) - - var preHeaders []*types.Header - for _, b := range preBLocks { - totalDifficulty.Add(totalDifficulty, b.Difficulty()) - preHeaders = append(preHeaders, b.Header()) - } - config.TerminalTotalDifficulty = totalDifficulty - // Post-merge blocks - postBlocks, _ := core.GenerateChain(genesis.Config, - preBLocks[len(preBLocks)-1], ethash.NewFaker(), db, post, - func(i int, b *core.BlockGen) { - b.SetPoS() - }) - - var postHeaders []*types.Header - for _, b := range postBlocks { - postHeaders = append(postHeaders, b.Header()) - } - - return genesis, preHeaders, preBLocks, postHeaders, postBlocks -} - -func TestSetHeadBeforeTotalDifficulty(t *testing.T) { - genesis, headers, blocks, _, _ := generatePreMergeChain(10, 0) - n, lesService := startLesService(t, genesis, headers) - defer n.Close() - - api := NewConsensusAPI(lesService) - fcState := engine.ForkchoiceStateV1{ - HeadBlockHash: blocks[5].Hash(), - SafeBlockHash: common.Hash{}, - FinalizedBlockHash: common.Hash{}, - } - if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err == nil { - t.Errorf("fork choice updated before total terminal difficulty should fail") - } -} - -func TestExecutePayloadV1(t *testing.T) { - genesis, headers, _, _, postBlocks := generatePreMergeChain(10, 2) - n, lesService := startLesService(t, genesis, headers) - lesService.Merger().ReachTTD() - defer n.Close() - - api := NewConsensusAPI(lesService) - fcState := engine.ForkchoiceStateV1{ - HeadBlockHash: postBlocks[0].Hash(), - SafeBlockHash: common.Hash{}, - FinalizedBlockHash: common.Hash{}, - } - if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { - t.Errorf("Failed to update head %v", err) - } - block := postBlocks[0] - - fakeBlock := types.NewBlock(&types.Header{ - ParentHash: block.ParentHash(), - UncleHash: crypto.Keccak256Hash(nil), - Coinbase: block.Coinbase(), - Root: block.Root(), - TxHash: crypto.Keccak256Hash(nil), - ReceiptHash: crypto.Keccak256Hash(nil), - Bloom: block.Bloom(), - Difficulty: big.NewInt(0), - Number: block.Number(), - GasLimit: block.GasLimit(), - GasUsed: block.GasUsed(), - Time: block.Time(), - Extra: block.Extra(), - MixDigest: block.MixDigest(), - Nonce: types.BlockNonce{}, - BaseFee: block.BaseFee(), - }, nil, nil, nil, trie.NewStackTrie(nil)) - - _, err := api.ExecutePayloadV1(engine.ExecutableData{ - ParentHash: fakeBlock.ParentHash(), - FeeRecipient: fakeBlock.Coinbase(), - StateRoot: fakeBlock.Root(), - ReceiptsRoot: fakeBlock.ReceiptHash(), - LogsBloom: fakeBlock.Bloom().Bytes(), - Random: fakeBlock.MixDigest(), - Number: fakeBlock.NumberU64(), - GasLimit: fakeBlock.GasLimit(), - GasUsed: fakeBlock.GasUsed(), - Timestamp: fakeBlock.Time(), - ExtraData: fakeBlock.Extra(), - BaseFeePerGas: fakeBlock.BaseFee(), - BlockHash: fakeBlock.Hash(), - Transactions: encodeTransactions(fakeBlock.Transactions()), - }) - if err != nil { - t.Errorf("Failed to execute payload %v", err) - } - headHeader := api.les.BlockChain().CurrentHeader() - if headHeader.Number.Uint64() != fakeBlock.NumberU64()-1 { - t.Fatal("Unexpected chain head update") - } - fcState = engine.ForkchoiceStateV1{ - HeadBlockHash: fakeBlock.Hash(), - SafeBlockHash: common.Hash{}, - FinalizedBlockHash: common.Hash{}, - } - if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { - t.Fatal("Failed to update head") - } - headHeader = api.les.BlockChain().CurrentHeader() - if headHeader.Number.Uint64() != fakeBlock.NumberU64() { - t.Fatal("Failed to update chain head") - } -} - -func TestEth2DeepReorg(t *testing.T) { - // TODO (MariusVanDerWijden) TestEth2DeepReorg is currently broken, because it tries to reorg - // before the totalTerminalDifficulty threshold - /* - genesis, preMergeBlocks := generatePreMergeChain(core.TriesInMemory * 2) - n, ethservice := startEthService(t, genesis, preMergeBlocks) - defer n.Close() - - var ( - api = NewConsensusAPI(ethservice, nil) - parent = preMergeBlocks[len(preMergeBlocks)-core.TriesInMemory-1] - head = ethservice.BlockChain().CurrentBlock().NumberU64() - ) - if ethservice.BlockChain().HasBlockAndState(parent.Hash(), parent.NumberU64()) { - t.Errorf("Block %d not pruned", parent.NumberU64()) - } - for i := 0; i < 10; i++ { - execData, err := api.assembleBlock(AssembleBlockParams{ - ParentHash: parent.Hash(), - Timestamp: parent.Time() + 5, - }) - if err != nil { - t.Fatalf("Failed to create the executable data %v", err) - } - block, err := ExecutableDataToBlock(ethservice.BlockChain().Config(), parent.Header(), *execData) - if err != nil { - t.Fatalf("Failed to convert executable data to block %v", err) - } - newResp, err := api.ExecutePayload(*execData) - if err != nil || newResp.Status != "VALID" { - t.Fatalf("Failed to insert block: %v", err) - } - if ethservice.BlockChain().CurrentBlock().NumberU64() != head { - t.Fatalf("Chain head shouldn't be updated") - } - if err := api.setCanonical(block.Hash()); err != nil { - t.Fatalf("Failed to set head: %v", err) - } - if ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64() { - t.Fatalf("Chain head should be updated") - } - parent, head = block, block.NumberU64() - } - */ -} - -// startEthService creates a full node instance for testing. -func startLesService(t *testing.T, genesis *core.Genesis, headers []*types.Header) (*node.Node, *les.LightEthereum) { - t.Helper() - - n, err := node.New(&node.Config{}) - if err != nil { - t.Fatal("can't create node:", err) - } - ethcfg := ðconfig.Config{ - Genesis: genesis, - SyncMode: downloader.LightSync, - TrieDirtyCache: 256, - TrieCleanCache: 256, - LightPeers: 10, - } - lesService, err := les.New(n, ethcfg) - if err != nil { - t.Fatal("can't create eth service:", err) - } - if err := n.Start(); err != nil { - t.Fatal("can't start node:", err) - } - if _, err := lesService.BlockChain().InsertHeaderChain(headers); err != nil { - n.Close() - t.Fatal("can't import test headers:", err) - } - return n, lesService -} - -func encodeTransactions(txs []*types.Transaction) [][]byte { - var enc = make([][]byte, len(txs)) - for i, tx := range txs { - enc[i], _ = tx.MarshalBinary() - } - return enc -} diff --git a/les/client.go b/les/client.go index bd6667429..be5e9fd56 100644 --- a/les/client.go +++ b/les/client.go @@ -36,7 +36,6 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/shutdowncheck" - "github.com/ethereum/go-ethereum/les/downloader" "github.com/ethereum/go-ethereum/les/vflux" vfc "github.com/ethereum/go-ethereum/les/vflux/client" "github.com/ethereum/go-ethereum/light" @@ -64,7 +63,6 @@ type LightEthereum struct { blockchain *light.LightChain serverPool *vfc.ServerPool serverPoolIterator enode.Iterator - pruner *pruner merger *consensus.Merger bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests @@ -97,7 +95,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { if config.OverrideCancun != nil { overrides.OverrideCancun = config.OverrideCancun } - chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, trie.NewDatabase(chainDb), config.Genesis, &overrides) + if config.OverrideVerkle != nil { + overrides.OverrideVerkle = config.OverrideVerkle + } + triedb := trie.NewDatabase(chainDb, trie.HashDefaults) + chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, triedb, config.Genesis, &overrides) if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat { return nil, genesisErr } @@ -143,7 +145,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { if leth.udpEnabled { prenegQuery = leth.prenegQuery } - leth.serverPool, leth.serverPoolIterator = vfc.NewServerPool(lesDb, []byte("serverpool:"), time.Second, prenegQuery, &mclock.System{}, config.UltraLightServers, requestList) + leth.serverPool, leth.serverPoolIterator = vfc.NewServerPool(lesDb, []byte("serverpool:"), time.Second, prenegQuery, &mclock.System{}, nil, requestList) leth.serverPool.AddMetrics(suggestedTimeoutGauge, totalValueGauge, serverSelectableGauge, serverConnectedGauge, sessionValueMeter, serverDialedMeter) leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool.GetTimeout) @@ -167,9 +169,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { leth.chtIndexer.Start(leth.blockchain) leth.bloomIndexer.Start(leth.blockchain) - // Start a light chain pruner to delete useless historical data. - leth.pruner = newPruner(chainDb, leth.chtIndexer, leth.bloomTrieIndexer) - // Rewind the chain in case of an incompatible config upgrade. if compat, ok := genesisErr.(*params.ConfigCompatError); ok { log.Warn("Rewinding chain to upgrade configuration", "err", compat) @@ -188,7 +187,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { } leth.ApiBackend.gpo = gasprice.NewOracle(leth.ApiBackend, gpoParams) - leth.handler = newClientHandler(config.UltraLightServers, config.UltraLightFraction, leth) + leth.handler = newClientHandler(leth) leth.netRPCService = ethapi.NewNetAPI(leth.p2pServer, leth.config.NetworkId) // Register the backend on the node @@ -266,12 +265,12 @@ func (s *LightEthereum) prenegQuery(n *enode.Node) int { type LightDummyAPI struct{} -// Etherbase is the address that mining rewards will be send to +// Etherbase is the address that mining rewards will be sent to func (s *LightDummyAPI) Etherbase() (common.Address, error) { return common.Address{}, errors.New("mining is not supported in light mode") } -// Coinbase is the address that mining rewards will be send to (alias for Etherbase) +// Coinbase is the address that mining rewards will be sent to (alias for Etherbase) func (s *LightDummyAPI) Coinbase() (common.Address, error) { return common.Address{}, errors.New("mining is not supported in light mode") } @@ -295,9 +294,6 @@ func (s *LightEthereum) APIs() []rpc.API { { Namespace: "eth", Service: &LightDummyAPI{}, - }, { - Namespace: "eth", - Service: downloader.NewDownloaderAPI(s.handler.downloader, s.eventMux), }, { Namespace: "net", Service: s.netRPCService, @@ -312,13 +308,12 @@ func (s *LightEthereum) ResetWithGenesisBlock(gb *types.Block) { s.blockchain.ResetWithGenesisBlock(gb) } -func (s *LightEthereum) BlockChain() *light.LightChain { return s.blockchain } -func (s *LightEthereum) TxPool() *light.TxPool { return s.txPool } -func (s *LightEthereum) Engine() consensus.Engine { return s.engine } -func (s *LightEthereum) LesVersion() int { return int(ClientProtocolVersions[0]) } -func (s *LightEthereum) Downloader() *downloader.Downloader { return s.handler.downloader } -func (s *LightEthereum) EventMux() *event.TypeMux { return s.eventMux } -func (s *LightEthereum) Merger() *consensus.Merger { return s.merger } +func (s *LightEthereum) BlockChain() *light.LightChain { return s.blockchain } +func (s *LightEthereum) TxPool() *light.TxPool { return s.txPool } +func (s *LightEthereum) Engine() consensus.Engine { return s.engine } +func (s *LightEthereum) LesVersion() int { return int(ClientProtocolVersions[0]) } +func (s *LightEthereum) EventMux() *event.TypeMux { return s.eventMux } +func (s *LightEthereum) Merger() *consensus.Merger { return s.merger } // Protocols returns all the currently configured network protocols to start. func (s *LightEthereum) Protocols() []p2p.Protocol { @@ -351,7 +346,6 @@ func (s *LightEthereum) Start() error { // Start bloom request workers. s.wg.Add(bloomServiceThreads) s.startBloomHandlers(params.BloomBitsBlocksClient) - s.handler.start() return nil } @@ -371,7 +365,6 @@ func (s *LightEthereum) Stop() error { s.handler.stop() s.txPool.Stop() s.engine.Close() - s.pruner.close() s.eventMux.Stop() // Clean shutdown marker as the last thing before closing db s.shutdownTracker.Stop() diff --git a/les/client_handler.go b/les/client_handler.go index 6e7d88fc5..4cfeba08f 100644 --- a/les/client_handler.go +++ b/les/client_handler.go @@ -17,9 +17,6 @@ package les import ( - "context" - "math/big" - "math/rand" "sync" "time" @@ -27,68 +24,37 @@ import ( "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/les/downloader" "github.com/ethereum/go-ethereum/light" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" ) // clientHandler is responsible for receiving and processing all incoming server // responses. type clientHandler struct { - ulc *ulc forkFilter forkid.Filter - fetcher *lightFetcher - downloader *downloader.Downloader backend *LightEthereum closeCh chan struct{} wg sync.WaitGroup // WaitGroup used to track all connected peers. - - // Hooks used in the testing - syncStart func(header *types.Header) // Hook called when the syncing is started - syncEnd func(header *types.Header) // Hook called when the syncing is done } -func newClientHandler(ulcServers []string, ulcFraction int, backend *LightEthereum) *clientHandler { +func newClientHandler(backend *LightEthereum) *clientHandler { handler := &clientHandler{ forkFilter: forkid.NewFilter(backend.blockchain), backend: backend, closeCh: make(chan struct{}), } - if ulcServers != nil { - ulc, err := newULC(ulcServers, ulcFraction) - if err != nil { - log.Error("Failed to initialize ultra light client") - } - handler.ulc = ulc - log.Info("Enable ultra light client mode") - } - handler.fetcher = newLightFetcher(backend.blockchain, backend.engine, backend.peers, handler.ulc, backend.chainDb, backend.reqDist, handler.synchronise) - handler.downloader = downloader.New(0, backend.chainDb, backend.eventMux, nil, backend.blockchain, handler.removePeer) - handler.backend.peers.subscribe((*downloaderPeerNotify)(handler)) return handler } -func (h *clientHandler) start() { - h.fetcher.start() -} - func (h *clientHandler) stop() { close(h.closeCh) - h.downloader.Terminate() - h.fetcher.stop() h.wg.Wait() } // runPeer is the p2p protocol run function for the given version. func (h *clientHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error { - trusted := false - if h.ulc != nil { - trusted = h.ulc.trusted(p.ID()) - } - peer := newServerPeer(int(version), h.backend.config.NetworkId, trusted, p, newMeteredMsgWriter(rw, int(version))) + peer := newServerPeer(int(version), h.backend.config.NetworkId, false, p, newMeteredMsgWriter(rw, int(version))) defer peer.close() h.wg.Add(1) defer h.wg.Done() @@ -103,7 +69,7 @@ func (h *clientHandler) handle(p *serverPeer, noInitAnnounce bool) error { p.Log().Debug("Light Ethereum peer connected", "name", p.Name()) // Execute the LES handshake - forkid := forkid.NewID(h.backend.blockchain.Config(), h.backend.genesis, h.backend.blockchain.CurrentHeader().Number.Uint64(), h.backend.blockchain.CurrentHeader().Time) + forkid := forkid.NewID(h.backend.blockchain.Config(), h.backend.BlockChain().Genesis(), h.backend.blockchain.CurrentHeader().Number.Uint64(), h.backend.blockchain.CurrentHeader().Time) if err := p.Handshake(h.backend.blockchain.Genesis().Hash(), forkid, h.forkFilter); err != nil { p.Log().Debug("Light Ethereum handshake failed", "err", err) return err @@ -136,12 +102,6 @@ func (h *clientHandler) handle(p *serverPeer, noInitAnnounce bool) error { serverConnectionGauge.Update(int64(h.backend.peers.len())) }() - // Discard all the announces after the transition - // Also discarding initial signal to prevent syncing during testing. - if !(noInitAnnounce || h.backend.merger.TDDReached()) { - h.fetcher.announce(p, &announceData{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td}) - } - // Mark the peer starts to be served. p.serving.Store(true) defer p.serving.Store(false) @@ -206,11 +166,6 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { // Update peer head information first and then notify the announcement p.updateHead(req.Hash, req.Number, req.Td) - - // Discard all the announces after the transition - if !h.backend.merger.TDDReached() { - h.fetcher.announce(p, &req) - } } case msg.Code == BlockHeadersMsg: p.Log().Trace("Received block header response message") @@ -221,28 +176,13 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { if err := msg.Decode(&resp); err != nil { return errResp(ErrDecode, "msg %v: %v", msg, err) } - headers := resp.Headers p.fcServer.ReceivedReply(resp.ReqID, resp.BV) p.answeredRequest(resp.ReqID) - // Filter out the explicitly requested header by the retriever - if h.backend.retriever.requested(resp.ReqID) { - deliverMsg = &Msg{ - MsgType: MsgBlockHeaders, - ReqID: resp.ReqID, - Obj: resp.Headers, - } - } else { - // Filter out any explicitly requested headers, deliver the rest to the downloader - filter := len(headers) == 1 - if filter { - headers = h.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers) - } - if len(headers) != 0 || !filter { - if err := h.downloader.DeliverHeaders(p.id, headers); err != nil { - log.Debug("Failed to deliver headers", "err", err) - } - } + deliverMsg = &Msg{ + MsgType: MsgBlockHeaders, + ReqID: resp.ReqID, + Obj: resp.Headers, } case msg.Code == BlockBodiesMsg: p.Log().Trace("Received block bodies response") @@ -366,117 +306,3 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { } return nil } - -func (h *clientHandler) removePeer(id string) { - h.backend.peers.unregister(id) -} - -type peerConnection struct { - handler *clientHandler - peer *serverPeer -} - -func (pc *peerConnection) Head() (common.Hash, *big.Int) { - return pc.peer.HeadAndTd() -} - -func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { - rq := &distReq{ - getCost: func(dp distPeer) uint64 { - peer := dp.(*serverPeer) - return peer.getRequestCost(GetBlockHeadersMsg, amount) - }, - canSend: func(dp distPeer) bool { - return dp.(*serverPeer) == pc.peer - }, - request: func(dp distPeer) func() { - reqID := rand.Uint64() - peer := dp.(*serverPeer) - cost := peer.getRequestCost(GetBlockHeadersMsg, amount) - peer.fcServer.QueuedRequest(reqID, cost) - return func() { peer.requestHeadersByHash(reqID, origin, amount, skip, reverse) } - }, - } - _, ok := <-pc.handler.backend.reqDist.queue(rq) - if !ok { - return light.ErrNoPeers - } - return nil -} - -func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { - rq := &distReq{ - getCost: func(dp distPeer) uint64 { - peer := dp.(*serverPeer) - return peer.getRequestCost(GetBlockHeadersMsg, amount) - }, - canSend: func(dp distPeer) bool { - return dp.(*serverPeer) == pc.peer - }, - request: func(dp distPeer) func() { - reqID := rand.Uint64() - peer := dp.(*serverPeer) - cost := peer.getRequestCost(GetBlockHeadersMsg, amount) - peer.fcServer.QueuedRequest(reqID, cost) - return func() { peer.requestHeadersByNumber(reqID, origin, amount, skip, reverse) } - }, - } - _, ok := <-pc.handler.backend.reqDist.queue(rq) - if !ok { - return light.ErrNoPeers - } - return nil -} - -// RetrieveSingleHeaderByNumber requests a single header by the specified block -// number. This function will wait the response until it's timeout or delivered. -func (pc *peerConnection) RetrieveSingleHeaderByNumber(context context.Context, number uint64) (*types.Header, error) { - reqID := rand.Uint64() - rq := &distReq{ - getCost: func(dp distPeer) uint64 { - peer := dp.(*serverPeer) - return peer.getRequestCost(GetBlockHeadersMsg, 1) - }, - canSend: func(dp distPeer) bool { - return dp.(*serverPeer) == pc.peer - }, - request: func(dp distPeer) func() { - peer := dp.(*serverPeer) - cost := peer.getRequestCost(GetBlockHeadersMsg, 1) - peer.fcServer.QueuedRequest(reqID, cost) - return func() { peer.requestHeadersByNumber(reqID, number, 1, 0, false) } - }, - } - var header *types.Header - if err := pc.handler.backend.retriever.retrieve(context, reqID, rq, func(peer distPeer, msg *Msg) error { - if msg.MsgType != MsgBlockHeaders { - return errInvalidMessageType - } - headers := msg.Obj.([]*types.Header) - if len(headers) != 1 { - return errInvalidEntryCount - } - header = headers[0] - return nil - }, nil); err != nil { - return nil, err - } - return header, nil -} - -// downloaderPeerNotify implements peerSetNotify -type downloaderPeerNotify clientHandler - -func (d *downloaderPeerNotify) registerPeer(p *serverPeer) { - h := (*clientHandler)(d) - pc := &peerConnection{ - handler: h, - peer: p, - } - h.downloader.RegisterLightPeer(p.id, eth.ETH66, pc) -} - -func (d *downloaderPeerNotify) unregisterPeer(p *serverPeer) { - h := (*clientHandler)(d) - h.downloader.UnregisterPeer(p.id) -} diff --git a/les/commons.go b/les/commons.go index b35d6b51d..cb3fc430b 100644 --- a/les/commons.go +++ b/les/commons.go @@ -58,7 +58,7 @@ type lesCommons struct { // NodeInfo represents a short summary of the Ethereum sub-protocol metadata // known about the host peer. type NodeInfo struct { - Network uint64 `json:"network"` // Ethereum network ID (1=Mainnet, Rinkeby=4, Goerli=5) + Network uint64 `json:"network"` // Ethereum network ID (1=Mainnet, Goerli=5) Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules diff --git a/les/costtracker.go b/les/costtracker.go index 43e32a5b2..695d54e14 100644 --- a/les/costtracker.go +++ b/les/costtracker.go @@ -128,7 +128,7 @@ type costTracker struct { reqInfoCh chan reqInfo totalRechargeCh chan uint64 - stats map[uint64][]uint64 // Used for testing purpose. + stats map[uint64][]atomic.Uint64 // Used for testing purpose. // TestHooks testing bool // Disable real cost evaluation for testing purpose. @@ -152,9 +152,9 @@ func newCostTracker(db ethdb.Database, config *ethconfig.Config) (*costTracker, ct.outSizeFactor = utilTarget / float64(config.LightEgress) } if makeCostStats { - ct.stats = make(map[uint64][]uint64) + ct.stats = make(map[uint64][]atomic.Uint64) for code := range reqAvgTimeCost { - ct.stats[code] = make([]uint64, 10) + ct.stats[code] = make([]atomic.Uint64, 10) } } ct.gfLoop() @@ -423,7 +423,7 @@ func (ct *costTracker) updateStats(code, amount, servingTime, realCost uint64) { l++ realCost >>= 1 } - atomic.AddUint64(&ct.stats[code][l], 1) + ct.stats[code][l].Add(1) } } @@ -454,7 +454,7 @@ func (ct *costTracker) printStats() { return } for code, arr := range ct.stats { - log.Info("Request cost statistics", "code", code, "1/16", arr[0], "1/8", arr[1], "1/4", arr[2], "1/2", arr[3], "1", arr[4], "2", arr[5], "4", arr[6], "8", arr[7], "16", arr[8], ">16", arr[9]) + log.Info("Request cost statistics", "code", code, "1/16", arr[0].Load(), "1/8", arr[1].Load(), "1/4", arr[2].Load(), "1/2", arr[3].Load(), "1", arr[4].Load(), "2", arr[5].Load(), "4", arr[6].Load(), "8", arr[7].Load(), "16", arr[8].Load(), ">16", arr[9].Load()) } } diff --git a/les/downloader/api.go b/les/downloader/api.go deleted file mode 100644 index 21200b676..000000000 --- a/les/downloader/api.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "context" - "sync" - - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/rpc" -) - -// DownloaderAPI provides an API which gives information about the current synchronisation status. -// It offers only methods that operates on data that can be available to anyone without security risks. -type DownloaderAPI struct { - d *Downloader - mux *event.TypeMux - installSyncSubscription chan chan interface{} - uninstallSyncSubscription chan *uninstallSyncSubscriptionRequest -} - -// NewDownloaderAPI create a new PublicDownloaderAPI. The API has an internal event loop that -// listens for events from the downloader through the global event mux. In case it receives one of -// these events it broadcasts it to all syncing subscriptions that are installed through the -// installSyncSubscription channel. -func NewDownloaderAPI(d *Downloader, m *event.TypeMux) *DownloaderAPI { - api := &DownloaderAPI{ - d: d, - mux: m, - installSyncSubscription: make(chan chan interface{}), - uninstallSyncSubscription: make(chan *uninstallSyncSubscriptionRequest), - } - - go api.eventLoop() - - return api -} - -// eventLoop runs a loop until the event mux closes. It will install and uninstall new -// sync subscriptions and broadcasts sync status updates to the installed sync subscriptions. -func (api *DownloaderAPI) eventLoop() { - var ( - sub = api.mux.Subscribe(StartEvent{}, DoneEvent{}, FailedEvent{}) - syncSubscriptions = make(map[chan interface{}]struct{}) - ) - - for { - select { - case i := <-api.installSyncSubscription: - syncSubscriptions[i] = struct{}{} - case u := <-api.uninstallSyncSubscription: - delete(syncSubscriptions, u.c) - close(u.uninstalled) - case event := <-sub.Chan(): - if event == nil { - return - } - - var notification interface{} - switch event.Data.(type) { - case StartEvent: - notification = &SyncingResult{ - Syncing: true, - Status: api.d.Progress(), - } - case DoneEvent, FailedEvent: - notification = false - } - // broadcast - for c := range syncSubscriptions { - c <- notification - } - } - } -} - -// Syncing provides information when this nodes starts synchronising with the Ethereum network and when it's finished. -func (api *DownloaderAPI) Syncing(ctx context.Context) (*rpc.Subscription, error) { - notifier, supported := rpc.NotifierFromContext(ctx) - if !supported { - return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported - } - - rpcSub := notifier.CreateSubscription() - - go func() { - statuses := make(chan interface{}) - sub := api.SubscribeSyncStatus(statuses) - - for { - select { - case status := <-statuses: - notifier.Notify(rpcSub.ID, status) - case <-rpcSub.Err(): - sub.Unsubscribe() - return - case <-notifier.Closed(): - sub.Unsubscribe() - return - } - } - }() - - return rpcSub, nil -} - -// SyncingResult provides information about the current synchronisation status for this node. -type SyncingResult struct { - Syncing bool `json:"syncing"` - Status ethereum.SyncProgress `json:"status"` -} - -// uninstallSyncSubscriptionRequest uninstalls a syncing subscription in the API event loop. -type uninstallSyncSubscriptionRequest struct { - c chan interface{} - uninstalled chan interface{} -} - -// SyncStatusSubscription represents a syncing subscription. -type SyncStatusSubscription struct { - api *DownloaderAPI // register subscription in event loop of this api instance - c chan interface{} // channel where events are broadcasted to - unsubOnce sync.Once // make sure unsubscribe logic is executed once -} - -// Unsubscribe uninstalls the subscription from the DownloadAPI event loop. -// The status channel that was passed to subscribeSyncStatus isn't used anymore -// after this method returns. -func (s *SyncStatusSubscription) Unsubscribe() { - s.unsubOnce.Do(func() { - req := uninstallSyncSubscriptionRequest{s.c, make(chan interface{})} - s.api.uninstallSyncSubscription <- &req - - for { - select { - case <-s.c: - // drop new status events until uninstall confirmation - continue - case <-req.uninstalled: - return - } - } - }) -} - -// SubscribeSyncStatus creates a subscription that will broadcast new synchronisation updates. -// The given channel must receive interface values, the result can either -func (api *DownloaderAPI) SubscribeSyncStatus(status chan interface{}) *SyncStatusSubscription { - api.installSyncSubscription <- status - return &SyncStatusSubscription{api: api, c: status} -} diff --git a/les/downloader/downloader.go b/les/downloader/downloader.go deleted file mode 100644 index 5ffcbfa42..000000000 --- a/les/downloader/downloader.go +++ /dev/null @@ -1,1979 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package downloader is a temporary package whilst working on the eth/66 blocking refactors. -// After that work is done, les needs to be refactored to use the new package, -// or alternatively use a stripped down version of it. Either way, we need to -// keep the changes scoped so duplicating temporarily seems the sanest. -package downloader - -import ( - "errors" - "fmt" - "math/big" - "sync" - "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state/snapshot" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/eth/protocols/snap" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/params" -) - -var ( - MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request - MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request - MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly - MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request - MaxStateFetch = 384 // Amount of node state values to allow fetching per request - - maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) - maxHeadersProcess = 2048 // Number of header download results to import at once into the chain - maxResultsProcess = 2048 // Number of content download results to import at once into the chain - fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) - lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) - - reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection - reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs - - fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected - fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download - fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync -) - -var ( - errBusy = errors.New("busy") - errUnknownPeer = errors.New("peer is unknown or unhealthy") - errBadPeer = errors.New("action from bad peer ignored") - errStallingPeer = errors.New("peer is stalling") - errUnsyncedPeer = errors.New("unsynced peer") - errNoPeers = errors.New("no peers to keep download active") - errTimeout = errors.New("timeout") - errEmptyHeaderSet = errors.New("empty header set by peer") - errPeersUnavailable = errors.New("no peers available or all tried for download") - errInvalidAncestor = errors.New("retrieved ancestor is invalid") - errInvalidChain = errors.New("retrieved hash chain is invalid") - errInvalidBody = errors.New("retrieved block body is invalid") - errInvalidReceipt = errors.New("retrieved receipt is invalid") - errCancelStateFetch = errors.New("state data download canceled (requested)") - errCancelContentProcessing = errors.New("content processing canceled (requested)") - errCanceled = errors.New("syncing canceled (requested)") - errNoSyncActive = errors.New("no sync active") - errTooOld = errors.New("peer's protocol version too old") - errNoAncestorFound = errors.New("no common ancestor found") -) - -type Downloader struct { - mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode - mux *event.TypeMux // Event multiplexer to announce sync operation events - - checkpoint uint64 // Checkpoint block number to enforce head against (e.g. fast sync) - genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) - queue *queue // Scheduler for selecting the hashes to download - peers *peerSet // Set of active peers from which download can proceed - - stateDB ethdb.Database // Database to state sync into (and deduplicate via) - - // Statistics - syncStatsChainOrigin uint64 // Origin block number where syncing started at - syncStatsChainHeight uint64 // Highest block number known when syncing started - syncStatsState stateSyncStats - syncStatsLock sync.RWMutex // Lock protecting the sync stats fields - - lightchain LightChain - blockchain BlockChain - - // Callbacks - dropPeer peerDropFn // Drops a peer for misbehaving - - // Status - synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing - synchronising int32 - notified int32 - committed int32 - ancientLimit uint64 // The maximum block number which can be regarded as ancient data. - - // Channels - headerCh chan dataPack // Channel receiving inbound block headers - bodyCh chan dataPack // Channel receiving inbound block bodies - receiptCh chan dataPack // Channel receiving inbound receipts - bodyWakeCh chan bool // Channel to signal the block body fetcher of new tasks - receiptWakeCh chan bool // Channel to signal the receipt fetcher of new tasks - headerProcCh chan []*types.Header // Channel to feed the header processor new tasks - - // State sync - pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root - pivotLock sync.RWMutex // Lock protecting pivot header reads from updates - - snapSync bool // Whether to run state sync over the snap protocol - SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now - stateSyncStart chan *stateSync - trackStateReq chan *stateReq - stateCh chan dataPack // Channel receiving inbound node state data - - // Cancellation and termination - cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) - cancelCh chan struct{} // Channel to cancel mid-flight syncs - cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers - cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. - - quitCh chan struct{} // Quit channel to signal termination - quitLock sync.Mutex // Lock to prevent double closes - - // Testing hooks - syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run - bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch - receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch - chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) -} - -// LightChain encapsulates functions required to synchronise a light chain. -type LightChain interface { - // HasHeader verifies a header's presence in the local chain. - HasHeader(common.Hash, uint64) bool - - // GetHeaderByHash retrieves a header from the local chain. - GetHeaderByHash(common.Hash) *types.Header - - // CurrentHeader retrieves the head header from the local chain. - CurrentHeader() *types.Header - - // GetTd returns the total difficulty of a local block. - GetTd(common.Hash, uint64) *big.Int - - // InsertHeaderChain inserts a batch of headers into the local chain. - InsertHeaderChain([]*types.Header) (int, error) - - // SetHead rewinds the local chain to a new head. - SetHead(uint64) error -} - -// BlockChain encapsulates functions required to sync a (full or fast) blockchain. -type BlockChain interface { - LightChain - - // HasBlock verifies a block's presence in the local chain. - HasBlock(common.Hash, uint64) bool - - // HasFastBlock verifies a fast block's presence in the local chain. - HasFastBlock(common.Hash, uint64) bool - - // GetBlockByHash retrieves a block from the local chain. - GetBlockByHash(common.Hash) *types.Block - - // CurrentBlock retrieves the head block from the local chain. - CurrentBlock() *types.Block - - // CurrentFastBlock retrieves the head fast block from the local chain. - CurrentFastBlock() *types.Block - - // FastSyncCommitHead directly commits the head block to a certain entity. - FastSyncCommitHead(common.Hash) error - - // InsertChain inserts a batch of blocks into the local chain. - InsertChain(types.Blocks) (int, error) - - // InsertReceiptChain inserts a batch of receipts into the local chain. - InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error) - - // Snapshots returns the blockchain snapshot tree to paused it during sync. - Snapshots() *snapshot.Tree -} - -// New creates a new downloader to fetch hashes and blocks from remote peers. -func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { - if lightchain == nil { - lightchain = chain - } - dl := &Downloader{ - stateDB: stateDb, - mux: mux, - checkpoint: checkpoint, - queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), - peers: newPeerSet(), - blockchain: chain, - lightchain: lightchain, - dropPeer: dropPeer, - headerCh: make(chan dataPack, 1), - bodyCh: make(chan dataPack, 1), - receiptCh: make(chan dataPack, 1), - bodyWakeCh: make(chan bool, 1), - receiptWakeCh: make(chan bool, 1), - headerProcCh: make(chan []*types.Header, 1), - quitCh: make(chan struct{}), - stateCh: make(chan dataPack), - SnapSyncer: snap.NewSyncer(stateDb, rawdb.HashScheme), - stateSyncStart: make(chan *stateSync), - //syncStatsState: stateSyncStats{ - // processed: rawdb.ReadFastTrieProgress(stateDb), - //}, - trackStateReq: make(chan *stateReq), - } - go dl.stateFetcher() - return dl -} - -// Progress retrieves the synchronisation boundaries, specifically the origin -// block where synchronisation started at (may have failed/suspended); the block -// or header sync is currently at; and the latest known block which the sync targets. -// -// In addition, during the state download phase of fast synchronisation the number -// of processed and the total number of known states are also returned. Otherwise -// these are zero. -func (d *Downloader) Progress() ethereum.SyncProgress { - // Lock the current stats and return the progress - d.syncStatsLock.RLock() - defer d.syncStatsLock.RUnlock() - - current := uint64(0) - mode := d.getMode() - switch { - case d.blockchain != nil && mode == FullSync: - current = d.blockchain.CurrentBlock().NumberU64() - case d.blockchain != nil && mode == FastSync: - current = d.blockchain.CurrentFastBlock().NumberU64() - case d.lightchain != nil: - current = d.lightchain.CurrentHeader().Number.Uint64() - default: - log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode) - } - return ethereum.SyncProgress{ - StartingBlock: d.syncStatsChainOrigin, - CurrentBlock: current, - HighestBlock: d.syncStatsChainHeight, - //PulledStates: d.syncStatsState.processed, - //KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, - } -} - -// Synchronising returns whether the downloader is currently retrieving blocks. -func (d *Downloader) Synchronising() bool { - return atomic.LoadInt32(&d.synchronising) > 0 -} - -// RegisterPeer injects a new download peer into the set of block source to be -// used for fetching hashes and blocks from. -func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { - var logger log.Logger - if len(id) < 16 { - // Tests use short IDs, don't choke on them - logger = log.New("peer", id) - } else { - logger = log.New("peer", id[:8]) - } - logger.Trace("Registering sync peer") - if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { - logger.Error("Failed to register sync peer", "err", err) - return err - } - return nil -} - -// RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. -func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error { - return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) -} - -// UnregisterPeer remove a peer from the known list, preventing any action from -// the specified peer. An effort is also made to return any pending fetches into -// the queue. -func (d *Downloader) UnregisterPeer(id string) error { - // Unregister the peer from the active peer set and revoke any fetch tasks - var logger log.Logger - if len(id) < 16 { - // Tests use short IDs, don't choke on them - logger = log.New("peer", id) - } else { - logger = log.New("peer", id[:8]) - } - logger.Trace("Unregistering sync peer") - if err := d.peers.Unregister(id); err != nil { - logger.Error("Failed to unregister sync peer", "err", err) - return err - } - d.queue.Revoke(id) - - return nil -} - -// Synchronise tries to sync up our local block chain with a remote peer, both -// adding various sanity checks as well as wrapping it with various log entries. -func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { - err := d.synchronise(id, head, td, mode) - - switch err { - case nil, errBusy, errCanceled: - return err - } - if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) || - errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) || - errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) { - log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) - if d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) - } else { - d.dropPeer(id) - } - return err - } - log.Warn("Synchronisation failed, retrying", "err", err) - return err -} - -// synchronise will select the peer and use it for synchronising. If an empty string is given -// it will use the best peer possible and synchronize if its TD is higher than our own. If any of the -// checks fail an error will be returned. This method is synchronous -func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { - // Mock out the synchronisation if testing - if d.synchroniseMock != nil { - return d.synchroniseMock(id, hash) - } - // Make sure only one goroutine is ever allowed past this point at once - if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { - return errBusy - } - defer atomic.StoreInt32(&d.synchronising, 0) - - // Post a user notification of the sync (only once per session) - if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { - log.Info("Block synchronisation started") - } - // If snap sync was requested, create the snap scheduler and switch to fast - // sync mode. Long term we could drop fast sync or merge the two together, - // but until snap becomes prevalent, we should support both. TODO(karalabe). - if mode == SnapSync { - if !d.snapSync { - // Snap sync uses the snapshot namespace to store potentially flakey data until - // sync completely heals and finishes. Pause snapshot maintenance in the mean - // time to prevent access. - if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests - snapshots.Disable() - } - log.Warn("Enabling snapshot sync prototype") - d.snapSync = true - } - mode = FastSync - } - // Reset the queue, peer set and wake channels to clean any internal leftover state - d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems) - d.peers.Reset() - - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { - select { - case <-ch: - default: - } - } - for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} { - for empty := false; !empty; { - select { - case <-ch: - default: - empty = true - } - } - } - for empty := false; !empty; { - select { - case <-d.headerProcCh: - default: - empty = true - } - } - // Create cancel channel for aborting mid-flight and mark the master peer - d.cancelLock.Lock() - d.cancelCh = make(chan struct{}) - d.cancelPeer = id - d.cancelLock.Unlock() - - defer d.Cancel() // No matter what, we can't leave the cancel channel open - - // Atomically set the requested sync mode - atomic.StoreUint32(&d.mode, uint32(mode)) - - // Retrieve the origin peer and initiate the downloading process - p := d.peers.Peer(id) - if p == nil { - return errUnknownPeer - } - return d.syncWithPeer(p, hash, td) -} - -func (d *Downloader) getMode() SyncMode { - return SyncMode(atomic.LoadUint32(&d.mode)) -} - -// syncWithPeer starts a block synchronization based on the hash chain from the -// specified peer and head hash. -func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { - d.mux.Post(StartEvent{}) - defer func() { - // reset on error - if err != nil { - d.mux.Post(FailedEvent{err}) - } else { - latest := d.lightchain.CurrentHeader() - d.mux.Post(DoneEvent{latest}) - } - }() - if p.version < eth.ETH66 { - return fmt.Errorf("%w: advertized %d < required %d", errTooOld, p.version, eth.ETH66) - } - mode := d.getMode() - - log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode) - defer func(start time.Time) { - log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start))) - }(time.Now()) - - // Look up the sync boundaries: the common ancestor and the target block - latest, pivot, err := d.fetchHead(p) - if err != nil { - return err - } - if mode == FastSync && pivot == nil { - // If no pivot block was returned, the head is below the min full block - // threshold (i.e. new chain). In that case we won't really fast sync - // anyway, but still need a valid pivot block to avoid some code hitting - // nil panics on an access. - pivot = d.blockchain.CurrentBlock().Header() - } - height := latest.Number.Uint64() - - origin, err := d.findAncestor(p, latest) - if err != nil { - return err - } - d.syncStatsLock.Lock() - if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { - d.syncStatsChainOrigin = origin - } - d.syncStatsChainHeight = height - d.syncStatsLock.Unlock() - - // Ensure our origin point is below any fast sync pivot point - if mode == FastSync { - if height <= uint64(fsMinFullBlocks) { - origin = 0 - } else { - pivotNumber := pivot.Number.Uint64() - if pivotNumber <= origin { - origin = pivotNumber - 1 - } - // Write out the pivot into the database so a rollback beyond it will - // reenable fast sync - rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber) - } - } - d.committed = 1 - if mode == FastSync && pivot.Number.Uint64() != 0 { - d.committed = 0 - } - if mode == FastSync { - // Set the ancient data limitation. - // If we are running fast sync, all block data older than ancientLimit will be - // written to the ancient store. More recent data will be written to the active - // database and will wait for the freezer to migrate. - // - // If there is a checkpoint available, then calculate the ancientLimit through - // that. Otherwise calculate the ancient limit through the advertised height - // of the remote peer. - // - // The reason for picking checkpoint first is that a malicious peer can give us - // a fake (very high) height, forcing the ancient limit to also be very high. - // The peer would start to feed us valid blocks until head, resulting in all of - // the blocks might be written into the ancient store. A following mini-reorg - // could cause issues. - if d.checkpoint != 0 && d.checkpoint > fullMaxForkAncestry+1 { - d.ancientLimit = d.checkpoint - } else if height > fullMaxForkAncestry+1 { - d.ancientLimit = height - fullMaxForkAncestry - 1 - } else { - d.ancientLimit = 0 - } - frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here. - - // If a part of blockchain data has already been written into active store, - // disable the ancient style insertion explicitly. - if origin >= frozen && frozen != 0 { - d.ancientLimit = 0 - log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1) - } else if d.ancientLimit > 0 { - log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit) - } - // Rewind the ancient store and blockchain if reorg happens. - if origin+1 < frozen { - if err := d.lightchain.SetHead(origin + 1); err != nil { - return err - } - } - } - // Initiate the sync using a concurrent header and content retrieval algorithm - d.queue.Prepare(origin+1, mode) - if d.syncInitHook != nil { - d.syncInitHook(origin, height) - } - fetchers := []func() error{ - func() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved - func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync - func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync - func() error { return d.processHeaders(origin+1, td) }, - } - if mode == FastSync { - d.pivotLock.Lock() - d.pivotHeader = pivot - d.pivotLock.Unlock() - - fetchers = append(fetchers, func() error { return d.processFastSyncContent() }) - } else if mode == FullSync { - fetchers = append(fetchers, d.processFullSyncContent) - } - return d.spawnSync(fetchers) -} - -// spawnSync runs d.process and all given fetcher functions to completion in -// separate goroutines, returning the first error that appears. -func (d *Downloader) spawnSync(fetchers []func() error) error { - errc := make(chan error, len(fetchers)) - d.cancelWg.Add(len(fetchers)) - for _, fn := range fetchers { - fn := fn - go func() { defer d.cancelWg.Done(); errc <- fn() }() - } - // Wait for the first error, then terminate the others. - var err error - for i := 0; i < len(fetchers); i++ { - if i == len(fetchers)-1 { - // Close the queue when all fetchers have exited. - // This will cause the block processor to end when - // it has processed the queue. - d.queue.Close() - } - if err = <-errc; err != nil && err != errCanceled { - break - } - } - d.queue.Close() - d.Cancel() - return err -} - -// cancel aborts all of the operations and resets the queue. However, cancel does -// not wait for the running download goroutines to finish. This method should be -// used when cancelling the downloads from inside the downloader. -func (d *Downloader) cancel() { - // Close the current cancel channel - d.cancelLock.Lock() - defer d.cancelLock.Unlock() - - if d.cancelCh != nil { - select { - case <-d.cancelCh: - // Channel was already closed - default: - close(d.cancelCh) - } - } -} - -// Cancel aborts all of the operations and waits for all download goroutines to -// finish before returning. -func (d *Downloader) Cancel() { - d.cancel() - d.cancelWg.Wait() -} - -// Terminate interrupts the downloader, canceling all pending operations. -// The downloader cannot be reused after calling Terminate. -func (d *Downloader) Terminate() { - // Close the termination channel (make sure double close is allowed) - d.quitLock.Lock() - select { - case <-d.quitCh: - default: - close(d.quitCh) - } - d.quitLock.Unlock() - - // Cancel any pending download requests - d.Cancel() -} - -// fetchHead retrieves the head header and prior pivot block (if available) from -// a remote peer. -func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) { - p.log.Debug("Retrieving remote chain head") - mode := d.getMode() - - // Request the advertised remote head block and wait for the response - latest, _ := p.peer.Head() - fetch := 1 - if mode == FastSync { - fetch = 2 // head + pivot headers - } - go p.peer.RequestHeadersByHash(latest, fetch, fsMinFullBlocks-1, true) - - ttl := d.peers.rates.TargetTimeout() - timeout := time.After(ttl) - for { - select { - case <-d.cancelCh: - return nil, nil, errCanceled - - case packet := <-d.headerCh: - // Discard anything not from the origin peer - if packet.PeerId() != p.id { - log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) - break - } - // Make sure the peer gave us at least one and at most the requested headers - headers := packet.(*headerPack).headers - if len(headers) == 0 || len(headers) > fetch { - return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch) - } - // The first header needs to be the head, validate against the checkpoint - // and request. If only 1 header was returned, make sure there's no pivot - // or there was not one requested. - head := headers[0] - if (mode == FastSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint { - return nil, nil, fmt.Errorf("%w: remote head %d below checkpoint %d", errUnsyncedPeer, head.Number, d.checkpoint) - } - if len(headers) == 1 { - if mode == FastSync && head.Number.Uint64() > uint64(fsMinFullBlocks) { - return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer) - } - p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", head.Hash()) - return head, nil, nil - } - // At this point we have 2 headers in total and the first is the - // validated head of the chain. Check the pivot number and return, - pivot := headers[1] - if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) { - return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks)) - } - return head, pivot, nil - - case <-timeout: - p.log.Debug("Waiting for head header timed out", "elapsed", ttl) - return nil, nil, errTimeout - - case <-d.bodyCh: - case <-d.receiptCh: - // Out of bounds delivery, ignore - } - } -} - -// calculateRequestSpan calculates what headers to request from a peer when trying to determine the -// common ancestor. -// It returns parameters to be used for peer.RequestHeadersByNumber: -// -// from - starting block number -// count - number of headers to request -// skip - number of headers to skip -// -// and also returns 'max', the last block which is expected to be returned by the remote peers, -// given the (from,count,skip) -func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { - var ( - from int - count int - MaxCount = MaxHeaderFetch / 16 - ) - // requestHead is the highest block that we will ask for. If requestHead is not offset, - // the highest block that we will get is 16 blocks back from head, which means we - // will fetch 14 or 15 blocks unnecessarily in the case the height difference - // between us and the peer is 1-2 blocks, which is most common - requestHead := int(remoteHeight) - 1 - if requestHead < 0 { - requestHead = 0 - } - // requestBottom is the lowest block we want included in the query - // Ideally, we want to include the one just below our own head - requestBottom := int(localHeight - 1) - if requestBottom < 0 { - requestBottom = 0 - } - totalSpan := requestHead - requestBottom - span := 1 + totalSpan/MaxCount - if span < 2 { - span = 2 - } - if span > 16 { - span = 16 - } - - count = 1 + totalSpan/span - if count > MaxCount { - count = MaxCount - } - if count < 2 { - count = 2 - } - from = requestHead - (count-1)*span - if from < 0 { - from = 0 - } - max := from + (count-1)*span - return int64(from), count, span - 1, uint64(max) -} - -// findAncestor tries to locate the common ancestor link of the local chain and -// a remote peers blockchain. In the general case when our node was in sync and -// on the correct chain, checking the top N links should already get us a match. -// In the rare scenario when we ended up on a long reorganisation (i.e. none of -// the head links match), we do a binary search to find the common ancestor. -func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { - // Figure out the valid ancestor range to prevent rewrite attacks - var ( - floor = int64(-1) - localHeight uint64 - remoteHeight = remoteHeader.Number.Uint64() - ) - mode := d.getMode() - switch mode { - case FullSync: - localHeight = d.blockchain.CurrentBlock().NumberU64() - case FastSync: - localHeight = d.blockchain.CurrentFastBlock().NumberU64() - default: - localHeight = d.lightchain.CurrentHeader().Number.Uint64() - } - p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) - - // Recap floor value for binary search - maxForkAncestry := fullMaxForkAncestry - if d.getMode() == LightSync { - maxForkAncestry = lightMaxForkAncestry - } - if localHeight >= maxForkAncestry { - // We're above the max reorg threshold, find the earliest fork point - floor = int64(localHeight - maxForkAncestry) - } - // If we're doing a light sync, ensure the floor doesn't go below the CHT, as - // all headers before that point will be missing. - if mode == LightSync { - // If we don't know the current CHT position, find it - if d.genesis == 0 { - header := d.lightchain.CurrentHeader() - for header != nil { - d.genesis = header.Number.Uint64() - if floor >= int64(d.genesis)-1 { - break - } - header = d.lightchain.GetHeaderByHash(header.ParentHash) - } - } - // We already know the "genesis" block number, cap floor to that - if floor < int64(d.genesis)-1 { - floor = int64(d.genesis) - 1 - } - } - - ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor) - if err == nil { - return ancestor, nil - } - // The returned error was not nil. - // If the error returned does not reflect that a common ancestor was not found, return it. - // If the error reflects that a common ancestor was not found, continue to binary search, - // where the error value will be reassigned. - if !errors.Is(err, errNoAncestorFound) { - return 0, err - } - - ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor) - if err != nil { - return 0, err - } - return ancestor, nil -} - -func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (commonAncestor uint64, err error) { - from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) - - p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) - go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false) - - // Wait for the remote response to the head fetch - number, hash := uint64(0), common.Hash{} - - ttl := d.peers.rates.TargetTimeout() - timeout := time.After(ttl) - - for finished := false; !finished; { - select { - case <-d.cancelCh: - return 0, errCanceled - - case packet := <-d.headerCh: - // Discard anything not from the origin peer - if packet.PeerId() != p.id { - log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) - break - } - // Make sure the peer actually gave something valid - headers := packet.(*headerPack).headers - if len(headers) == 0 { - p.log.Warn("Empty head header set") - return 0, errEmptyHeaderSet - } - // Make sure the peer's reply conforms to the request - for i, header := range headers { - expectNumber := from + int64(i)*int64(skip+1) - if number := header.Number.Int64(); number != expectNumber { - p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) - return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering")) - } - } - // Check if a common ancestor was found - finished = true - for i := len(headers) - 1; i >= 0; i-- { - // Skip any headers that underflow/overflow our requested set - if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { - continue - } - // Otherwise check if we already know the header or not - h := headers[i].Hash() - n := headers[i].Number.Uint64() - - var known bool - switch mode { - case FullSync: - known = d.blockchain.HasBlock(h, n) - case FastSync: - known = d.blockchain.HasFastBlock(h, n) - default: - known = d.lightchain.HasHeader(h, n) - } - if known { - number, hash = n, h - break - } - } - - case <-timeout: - p.log.Debug("Waiting for head header timed out", "elapsed", ttl) - return 0, errTimeout - - case <-d.bodyCh: - case <-d.receiptCh: - // Out of bounds delivery, ignore - } - } - // If the head fetch already found an ancestor, return - if hash != (common.Hash{}) { - if int64(number) <= floor { - p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) - return 0, errInvalidAncestor - } - p.log.Debug("Found common ancestor", "number", number, "hash", hash) - return number, nil - } - return 0, errNoAncestorFound -} - -func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (commonAncestor uint64, err error) { - hash := common.Hash{} - - // Ancestor not found, we need to binary search over our chain - start, end := uint64(0), remoteHeight - if floor > 0 { - start = uint64(floor) - } - p.log.Trace("Binary searching for common ancestor", "start", start, "end", end) - - for start+1 < end { - // Split our chain interval in two, and request the hash to cross check - check := (start + end) / 2 - - ttl := d.peers.rates.TargetTimeout() - timeout := time.After(ttl) - - go p.peer.RequestHeadersByNumber(check, 1, 0, false) - - // Wait until a reply arrives to this request - for arrived := false; !arrived; { - select { - case <-d.cancelCh: - return 0, errCanceled - - case packet := <-d.headerCh: - // Discard anything not from the origin peer - if packet.PeerId() != p.id { - log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) - break - } - // Make sure the peer actually gave something valid - headers := packet.(*headerPack).headers - if len(headers) != 1 { - p.log.Warn("Multiple headers for single request", "headers", len(headers)) - return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers)) - } - arrived = true - - // Modify the search interval based on the response - h := headers[0].Hash() - n := headers[0].Number.Uint64() - - var known bool - switch mode { - case FullSync: - known = d.blockchain.HasBlock(h, n) - case FastSync: - known = d.blockchain.HasFastBlock(h, n) - default: - known = d.lightchain.HasHeader(h, n) - } - if !known { - end = check - break - } - header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists - if header.Number.Uint64() != check { - p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) - return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number) - } - start = check - hash = h - - case <-timeout: - p.log.Debug("Waiting for search header timed out", "elapsed", ttl) - return 0, errTimeout - - case <-d.bodyCh: - case <-d.receiptCh: - // Out of bounds delivery, ignore - } - } - } - // Ensure valid ancestry and return - if int64(start) <= floor { - p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) - return 0, errInvalidAncestor - } - p.log.Debug("Found common ancestor", "number", start, "hash", hash) - return start, nil -} - -// fetchHeaders keeps retrieving headers concurrently from the number -// requested, until no more are returned, potentially throttling on the way. To -// facilitate concurrency but still protect against malicious nodes sending bad -// headers, we construct a header chain skeleton using the "origin" peer we are -// syncing with, and fill in the missing headers using anyone else. Headers from -// other peers are only accepted if they map cleanly to the skeleton. If no one -// can fill in the skeleton - not even the origin peer - it's assumed invalid and -// the origin is dropped. -func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error { - p.log.Debug("Directing header downloads", "origin", from) - defer p.log.Debug("Header download terminated") - - // Create a timeout timer, and the associated header fetcher - skeleton := true // Skeleton assembly phase or finishing up - pivoting := false // Whether the next request is pivot verification - request := time.Now() // time of the last skeleton fetch request - timeout := time.NewTimer(0) // timer to dump a non-responsive active peer - <-timeout.C // timeout channel should be initially empty - defer timeout.Stop() - - var ttl time.Duration - getHeaders := func(from uint64) { - request = time.Now() - - ttl = d.peers.rates.TargetTimeout() - timeout.Reset(ttl) - - if skeleton { - p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) - go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) - } else { - p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) - go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false) - } - } - getNextPivot := func() { - pivoting = true - request = time.Now() - - ttl = d.peers.rates.TargetTimeout() - timeout.Reset(ttl) - - d.pivotLock.RLock() - pivot := d.pivotHeader.Number.Uint64() - d.pivotLock.RUnlock() - - p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks)) - go p.peer.RequestHeadersByNumber(pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep - } - // Start pulling the header chain skeleton until all is done - ancestor := from - getHeaders(from) - - mode := d.getMode() - for { - select { - case <-d.cancelCh: - return errCanceled - - case packet := <-d.headerCh: - // Make sure the active peer is giving us the skeleton headers - if packet.PeerId() != p.id { - log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId()) - break - } - headerReqTimer.UpdateSince(request) - timeout.Stop() - - // If the pivot is being checked, move if it became stale and run the real retrieval - var pivot uint64 - - d.pivotLock.RLock() - if d.pivotHeader != nil { - pivot = d.pivotHeader.Number.Uint64() - } - d.pivotLock.RUnlock() - - if pivoting { - if packet.Items() == 2 { - // Retrieve the headers and do some sanity checks, just in case - headers := packet.(*headerPack).headers - - if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want { - log.Warn("Peer sent invalid next pivot", "have", have, "want", want) - return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want) - } - if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want { - log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want) - return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want) - } - log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number) - pivot = headers[0].Number.Uint64() - - d.pivotLock.Lock() - d.pivotHeader = headers[0] - d.pivotLock.Unlock() - - // Write out the pivot into the database so a rollback beyond - // it will reenable fast sync and update the state root that - // the state syncer will be downloading. - rawdb.WriteLastPivotNumber(d.stateDB, pivot) - } - pivoting = false - getHeaders(from) - continue - } - // If the skeleton's finished, pull any remaining head headers directly from the origin - if skeleton && packet.Items() == 0 { - skeleton = false - getHeaders(from) - continue - } - // If no more headers are inbound, notify the content fetchers and return - if packet.Items() == 0 { - // Don't abort header fetches while the pivot is downloading - if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { - p.log.Debug("No headers, waiting for pivot commit") - select { - case <-time.After(fsHeaderContCheck): - getHeaders(from) - continue - case <-d.cancelCh: - return errCanceled - } - } - // Pivot done (or not in fast sync) and no more headers, terminate the process - p.log.Debug("No more headers available") - select { - case d.headerProcCh <- nil: - return nil - case <-d.cancelCh: - return errCanceled - } - } - headers := packet.(*headerPack).headers - - // If we received a skeleton batch, resolve internals concurrently - if skeleton { - filled, proced, err := d.fillHeaderSkeleton(from, headers) - if err != nil { - p.log.Debug("Skeleton chain invalid", "err", err) - return fmt.Errorf("%w: %v", errInvalidChain, err) - } - headers = filled[proced:] - from += uint64(proced) - } else { - // If we're closing in on the chain head, but haven't yet reached it, delay - // the last few headers so mini reorgs on the head don't cause invalid hash - // chain errors. - if n := len(headers); n > 0 { - // Retrieve the current head we're at - var head uint64 - if mode == LightSync { - head = d.lightchain.CurrentHeader().Number.Uint64() - } else { - head = d.blockchain.CurrentFastBlock().NumberU64() - if full := d.blockchain.CurrentBlock().NumberU64(); head < full { - head = full - } - } - // If the head is below the common ancestor, we're actually deduplicating - // already existing chain segments, so use the ancestor as the fake head. - // Otherwise we might end up delaying header deliveries pointlessly. - if head < ancestor { - head = ancestor - } - // If the head is way older than this batch, delay the last few headers - if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { - delay := reorgProtHeaderDelay - if delay > n { - delay = n - } - headers = headers[:n-delay] - } - } - } - // Insert all the new headers and fetch the next batch - if len(headers) > 0 { - p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) - select { - case d.headerProcCh <- headers: - case <-d.cancelCh: - return errCanceled - } - from += uint64(len(headers)) - - // If we're still skeleton filling fast sync, check pivot staleness - // before continuing to the next skeleton filling - if skeleton && pivot > 0 { - getNextPivot() - } else { - getHeaders(from) - } - } else { - // No headers delivered, or all of them being delayed, sleep a bit and retry - p.log.Trace("All headers delayed, waiting") - select { - case <-time.After(fsHeaderContCheck): - getHeaders(from) - continue - case <-d.cancelCh: - return errCanceled - } - } - - case <-timeout.C: - if d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id) - break - } - // Header retrieval timed out, consider the peer bad and drop - p.log.Debug("Header request timed out", "elapsed", ttl) - headerTimeoutMeter.Mark(1) - d.dropPeer(p.id) - - // Finish the sync gracefully instead of dumping the gathered data though - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { - select { - case ch <- false: - case <-d.cancelCh: - } - } - select { - case d.headerProcCh <- nil: - case <-d.cancelCh: - } - return fmt.Errorf("%w: header request timed out", errBadPeer) - } - } -} - -// fillHeaderSkeleton concurrently retrieves headers from all our available peers -// and maps them to the provided skeleton header chain. -// -// Any partial results from the beginning of the skeleton is (if possible) forwarded -// immediately to the header processor to keep the rest of the pipeline full even -// in the case of header stalls. -// -// The method returns the entire filled skeleton and also the number of headers -// already forwarded for processing. -func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { - log.Debug("Filling up skeleton", "from", from) - d.queue.ScheduleSkeleton(from, skeleton) - - var ( - deliver = func(packet dataPack) (int, error) { - pack := packet.(*headerPack) - return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh) - } - expire = func() map[string]int { return d.queue.ExpireHeaders(d.peers.rates.TargetTimeout()) } - reserve = func(p *peerConnection, count int) (*fetchRequest, bool, bool) { - return d.queue.ReserveHeaders(p, count), false, false - } - fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } - capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.peers.rates.TargetRoundTrip()) } - setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { - p.SetHeadersIdle(accepted, deliveryTime) - } - ) - err := d.fetchParts(d.headerCh, deliver, d.queue.headerContCh, expire, - d.queue.PendingHeaders, d.queue.InFlightHeaders, reserve, - nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") - - log.Debug("Skeleton fill terminated", "err", err) - - filled, proced := d.queue.RetrieveHeaders() - return filled, proced, err -} - -// fetchBodies iteratively downloads the scheduled block bodies, taking any -// available peers, reserving a chunk of blocks for each, waiting for delivery -// and also periodically checking for timeouts. -func (d *Downloader) fetchBodies(from uint64) error { - log.Debug("Downloading block bodies", "origin", from) - - var ( - deliver = func(packet dataPack) (int, error) { - pack := packet.(*bodyPack) - return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles) - } - expire = func() map[string]int { return d.queue.ExpireBodies(d.peers.rates.TargetTimeout()) } - fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } - capacity = func(p *peerConnection) int { return p.BlockCapacity(d.peers.rates.TargetRoundTrip()) } - setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { p.SetBodiesIdle(accepted, deliveryTime) } - ) - err := d.fetchParts(d.bodyCh, deliver, d.bodyWakeCh, expire, - d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ReserveBodies, - d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies") - - log.Debug("Block body download terminated", "err", err) - return err -} - -// fetchReceipts iteratively downloads the scheduled block receipts, taking any -// available peers, reserving a chunk of receipts for each, waiting for delivery -// and also periodically checking for timeouts. -func (d *Downloader) fetchReceipts(from uint64) error { - log.Debug("Downloading transaction receipts", "origin", from) - - var ( - deliver = func(packet dataPack) (int, error) { - pack := packet.(*receiptPack) - return d.queue.DeliverReceipts(pack.peerID, pack.receipts) - } - expire = func() map[string]int { return d.queue.ExpireReceipts(d.peers.rates.TargetTimeout()) } - fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) } - capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.peers.rates.TargetRoundTrip()) } - setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { - p.SetReceiptsIdle(accepted, deliveryTime) - } - ) - err := d.fetchParts(d.receiptCh, deliver, d.receiptWakeCh, expire, - d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ReserveReceipts, - d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts") - - log.Debug("Transaction receipt download terminated", "err", err) - return err -} - -// fetchParts iteratively downloads scheduled block parts, taking any available -// peers, reserving a chunk of fetch requests for each, waiting for delivery and -// also periodically checking for timeouts. -// -// As the scheduling/timeout logic mostly is the same for all downloaded data -// types, this method is used by each for data gathering and is instrumented with -// various callbacks to handle the slight differences between processing them. -// -// The instrumentation parameters: -// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) -// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) -// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) -// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) -// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) -// - pending: task callback for the number of requests still needing download (detect completion/non-completability) -// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) -// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) -// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) -// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) -// - fetch: network callback to actually send a particular download request to a physical remote peer -// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) -// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) -// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks -// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) -// - kind: textual label of the type being downloaded to display in log messages -func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, - expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool), - fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, - idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int, time.Time), kind string) error { - // Create a ticker to detect expired retrieval tasks - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - update := make(chan struct{}, 1) - - // Prepare the queue and fetch block parts until the block header fetcher's done - finished := false - for { - select { - case <-d.cancelCh: - return errCanceled - - case packet := <-deliveryCh: - deliveryTime := time.Now() - // If the peer was previously banned and failed to deliver its pack - // in a reasonable time frame, ignore its message. - if peer := d.peers.Peer(packet.PeerId()); peer != nil { - // Deliver the received chunk of data and check chain validity - accepted, err := deliver(packet) - if errors.Is(err, errInvalidChain) { - return err - } - // Unless a peer delivered something completely else than requested (usually - // caused by a timed out request which came through in the end), set it to - // idle. If the delivery's stale, the peer should have already been idled. - if !errors.Is(err, errStaleDelivery) { - setIdle(peer, accepted, deliveryTime) - } - // Issue a log to the user to see what's going on - switch { - case err == nil && packet.Items() == 0: - peer.log.Trace("Requested data not delivered", "type", kind) - case err == nil: - peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats()) - default: - peer.log.Debug("Failed to deliver retrieved data", "type", kind, "err", err) - } - } - // Blocks assembled, try to update the progress - select { - case update <- struct{}{}: - default: - } - - case cont := <-wakeCh: - // The header fetcher sent a continuation flag, check if it's done - if !cont { - finished = true - } - // Headers arrive, try to update the progress - select { - case update <- struct{}{}: - default: - } - - case <-ticker.C: - // Sanity check update the progress - select { - case update <- struct{}{}: - default: - } - - case <-update: - // Short circuit if we lost all our peers - if d.peers.Len() == 0 { - return errNoPeers - } - // Check for fetch request timeouts and demote the responsible peers - for pid, fails := range expire() { - if peer := d.peers.Peer(pid); peer != nil { - // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps - // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times - // out that sync wise we need to get rid of the peer. - // - // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth - // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing - // how response times reacts, to it always requests one more than the minimum (i.e. min 2). - if fails > 2 { - peer.log.Trace("Data delivery timed out", "type", kind) - setIdle(peer, 0, time.Now()) - } else { - peer.log.Debug("Stalling delivery, dropping", "type", kind) - - if d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid) - } else { - d.dropPeer(pid) - - // If this peer was the master peer, abort sync immediately - d.cancelLock.RLock() - master := pid == d.cancelPeer - d.cancelLock.RUnlock() - - if master { - d.cancel() - return errTimeout - } - } - } - } - } - // If there's nothing more to fetch, wait or terminate - if pending() == 0 { - if !inFlight() && finished { - log.Debug("Data fetching completed", "type", kind) - return nil - } - break - } - // Send a download request to all idle peers, until throttled - progressed, throttled, running := false, false, inFlight() - idles, total := idle() - pendCount := pending() - for _, peer := range idles { - // Short circuit if throttling activated - if throttled { - break - } - // Short circuit if there is no more available task. - if pendCount = pending(); pendCount == 0 { - break - } - // Reserve a chunk of fetches for a peer. A nil can mean either that - // no more headers are available, or that the peer is known not to - // have them. - request, progress, throttle := reserve(peer, capacity(peer)) - if progress { - progressed = true - } - if throttle { - throttled = true - throttleCounter.Inc(1) - } - if request == nil { - continue - } - if request.From > 0 { - peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From) - } else { - peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number) - } - // Fetch the chunk and make sure any errors return the hashes to the queue - if fetchHook != nil { - fetchHook(request.Headers) - } - if err := fetch(peer, request); err != nil { - // Although we could try and make an attempt to fix this, this error really - // means that we've double allocated a fetch task to a peer. If that is the - // case, the internal state of the downloader and the queue is very wrong so - // better hard crash and note the error instead of silently accumulating into - // a much bigger issue. - panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind)) - } - running = true - } - // Make sure that we have peers available for fetching. If all peers have been tried - // and all failed throw an error - if !progressed && !throttled && !running && len(idles) == total && pendCount > 0 { - return errPeersUnavailable - } - } - } -} - -// processHeaders takes batches of retrieved headers from an input channel and -// keeps processing and scheduling them into the header chain and downloader's -// queue until the stream ends or a failure occurs. -func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { - // Keep a count of uncertain headers to roll back - var ( - rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis) - rollbackErr error - mode = d.getMode() - ) - defer func() { - if rollback > 0 { - lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 - if mode != LightSync { - lastFastBlock = d.blockchain.CurrentFastBlock().Number() - lastBlock = d.blockchain.CurrentBlock().Number() - } - if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block - // We're already unwinding the stack, only print the error to make it more visible - log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err) - } - curFastBlock, curBlock := common.Big0, common.Big0 - if mode != LightSync { - curFastBlock = d.blockchain.CurrentFastBlock().Number() - curBlock = d.blockchain.CurrentBlock().Number() - } - log.Warn("Rolled back chain segment", - "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), - "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), - "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr) - } - }() - // Wait for batches of headers to process - gotHeaders := false - - for { - select { - case <-d.cancelCh: - rollbackErr = errCanceled - return errCanceled - - case headers := <-d.headerProcCh: - // Terminate header processing if we synced up - if len(headers) == 0 { - // Notify everyone that headers are fully processed - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { - select { - case ch <- false: - case <-d.cancelCh: - } - } - // If no headers were retrieved at all, the peer violated its TD promise that it had a - // better chain compared to ours. The only exception is if its promised blocks were - // already imported by other means (e.g. fetcher): - // - // R , L : Both at block 10 - // R: Mine block 11, and propagate it to L - // L: Queue block 11 for import - // L: Notice that R's head and TD increased compared to ours, start sync - // L: Import of block 11 finishes - // L: Sync begins, and finds common ancestor at 11 - // L: Request new headers up from 11 (R's TD was higher, it must have something) - // R: Nothing to give - if mode != LightSync { - head := d.blockchain.CurrentBlock() - if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { - return errStallingPeer - } - } - // If fast or light syncing, ensure promised headers are indeed delivered. This is - // needed to detect scenarios where an attacker feeds a bad pivot and then bails out - // of delivering the post-pivot blocks that would flag the invalid content. - // - // This check cannot be executed "as is" for full imports, since blocks may still be - // queued for processing when the header download completes. However, as long as the - // peer gave us something useful, we're already happy/progressed (above check). - if mode == FastSync || mode == LightSync { - head := d.lightchain.CurrentHeader() - if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { - return errStallingPeer - } - } - // Disable any rollback and return - rollback = 0 - return nil - } - // Otherwise split the chunk of headers into batches and process them - gotHeaders = true - for len(headers) > 0 { - // Terminate if something failed in between processing chunks - select { - case <-d.cancelCh: - rollbackErr = errCanceled - return errCanceled - default: - } - // Select the next chunk of headers to import - limit := maxHeadersProcess - if limit > len(headers) { - limit = len(headers) - } - chunk := headers[:limit] - - // In case of header only syncing, validate the chunk immediately - if mode == FastSync || mode == LightSync { - if n, err := d.lightchain.InsertHeaderChain(chunk); err != nil { - rollbackErr = err - - // If some headers were inserted, track them as uncertain - if mode == FastSync && n > 0 && rollback == 0 { - rollback = chunk[0].Number.Uint64() - } - log.Warn("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err) - return fmt.Errorf("%w: %v", errInvalidChain, err) - } - // All verifications passed, track all headers within the allotted limits - if mode == FastSync { - head := chunk[len(chunk)-1].Number.Uint64() - if head-rollback > uint64(fsHeaderSafetyNet) { - rollback = head - uint64(fsHeaderSafetyNet) - } else { - rollback = 1 - } - } - } - // Unless we're doing light chains, schedule the headers for associated content retrieval - if mode == FullSync || mode == FastSync { - // If we've reached the allowed number of pending headers, stall a bit - for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { - select { - case <-d.cancelCh: - rollbackErr = errCanceled - return errCanceled - case <-time.After(time.Second): - } - } - // Otherwise insert the headers for content retrieval - inserts := d.queue.Schedule(chunk, origin) - if len(inserts) != len(chunk) { - rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunk)) - return fmt.Errorf("%w: stale headers", errBadPeer) - } - } - headers = headers[limit:] - origin += uint64(limit) - } - // Update the highest block number we know if a higher one is found. - d.syncStatsLock.Lock() - if d.syncStatsChainHeight < origin { - d.syncStatsChainHeight = origin - 1 - } - d.syncStatsLock.Unlock() - - // Signal the content downloaders of the availability of new tasks - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { - select { - case ch <- true: - default: - } - } - } - } -} - -// processFullSyncContent takes fetch results from the queue and imports them into the chain. -func (d *Downloader) processFullSyncContent() error { - for { - results := d.queue.Results(true) - if len(results) == 0 { - return nil - } - if d.chainInsertHook != nil { - d.chainInsertHook(results) - } - if err := d.importBlockResults(results); err != nil { - return err - } - } -} - -func (d *Downloader) importBlockResults(results []*fetchResult) error { - // Check for any early termination requests - if len(results) == 0 { - return nil - } - select { - case <-d.quitCh: - return errCancelContentProcessing - default: - } - // Retrieve the a batch of results to import - first, last := results[0].Header, results[len(results)-1].Header - log.Debug("Inserting downloaded chain", "items", len(results), - "firstnum", first.Number, "firsthash", first.Hash(), - "lastnum", last.Number, "lasthash", last.Hash(), - ) - blocks := make([]*types.Block, len(results)) - for i, result := range results { - blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) - } - if index, err := d.blockchain.InsertChain(blocks); err != nil { - if index < len(results) { - log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) - } else { - // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index, - // when it needs to preprocess blocks to import a sidechain. - // The importer will put together a new list of blocks to import, which is a superset - // of the blocks delivered from the downloader, and the indexing will be off. - log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err) - } - return fmt.Errorf("%w: %v", errInvalidChain, err) - } - return nil -} - -// processFastSyncContent takes fetch results from the queue and writes them to the -// database. It also controls the synchronisation of state nodes of the pivot block. -func (d *Downloader) processFastSyncContent() error { - // Start syncing state of the reported head block. This should get us most of - // the state of the pivot block. - d.pivotLock.RLock() - sync := d.syncState(d.pivotHeader.Root) - d.pivotLock.RUnlock() - - defer func() { - // The `sync` object is replaced every time the pivot moves. We need to - // defer close the very last active one, hence the lazy evaluation vs. - // calling defer sync.Cancel() !!! - sync.Cancel() - }() - - closeOnErr := func(s *stateSync) { - if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled { - d.queue.Close() // wake up Results - } - } - go closeOnErr(sync) - - // To cater for moving pivot points, track the pivot block and subsequently - // accumulated download results separately. - var ( - oldPivot *fetchResult // Locked in pivot block, might change eventually - oldTail []*fetchResult // Downloaded content after the pivot - ) - for { - // Wait for the next batch of downloaded data to be available, and if the pivot - // block became stale, move the goalpost - results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness - if len(results) == 0 { - // If pivot sync is done, stop - if oldPivot == nil { - return sync.Cancel() - } - // If sync failed, stop - select { - case <-d.cancelCh: - sync.Cancel() - return errCanceled - default: - } - } - if d.chainInsertHook != nil { - d.chainInsertHook(results) - } - // If we haven't downloaded the pivot block yet, check pivot staleness - // notifications from the header downloader - d.pivotLock.RLock() - pivot := d.pivotHeader - d.pivotLock.RUnlock() - - if oldPivot == nil { - if pivot.Root != sync.root { - sync.Cancel() - sync = d.syncState(pivot.Root) - - go closeOnErr(sync) - } - } else { - results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) - } - // Split around the pivot block and process the two sides via fast/full sync - if atomic.LoadInt32(&d.committed) == 0 { - latest := results[len(results)-1].Header - // If the height is above the pivot block by 2 sets, it means the pivot - // become stale in the network and it was garbage collected, move to a - // new pivot. - // - // Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those - // need to be taken into account, otherwise we're detecting the pivot move - // late and will drop peers due to unavailable state!!! - if height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) { - log.Warn("Pivot became stale, moving", "old", pivot.Number.Uint64(), "new", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay)) - pivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted - - d.pivotLock.Lock() - d.pivotHeader = pivot - d.pivotLock.Unlock() - - // Write out the pivot into the database so a rollback beyond it will - // reenable fast sync - rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64()) - } - } - P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results) - if err := d.commitFastSyncData(beforeP, sync); err != nil { - return err - } - if P != nil { - // If new pivot block found, cancel old state retrieval and restart - if oldPivot != P { - sync.Cancel() - sync = d.syncState(P.Header.Root) - - go closeOnErr(sync) - oldPivot = P - } - // Wait for completion, occasionally checking for pivot staleness - select { - case <-sync.done: - if sync.err != nil { - return sync.err - } - if err := d.commitPivotBlock(P); err != nil { - return err - } - oldPivot = nil - - case <-time.After(time.Second): - oldTail = afterP - continue - } - } - // Fast sync done, pivot commit done, full import - if err := d.importBlockResults(afterP); err != nil { - return err - } - } -} - -func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { - if len(results) == 0 { - return nil, nil, nil - } - if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot { - // the pivot is somewhere in the future - return nil, results, nil - } - // This can also be optimized, but only happens very seldom - for _, result := range results { - num := result.Header.Number.Uint64() - switch { - case num < pivot: - before = append(before, result) - case num == pivot: - p = result - default: - after = append(after, result) - } - } - return p, before, after -} - -func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error { - // Check for any early termination requests - if len(results) == 0 { - return nil - } - select { - case <-d.quitCh: - return errCancelContentProcessing - case <-stateSync.done: - if err := stateSync.Wait(); err != nil { - return err - } - default: - } - // Retrieve the a batch of results to import - first, last := results[0].Header, results[len(results)-1].Header - log.Debug("Inserting fast-sync blocks", "items", len(results), - "firstnum", first.Number, "firsthash", first.Hash(), - "lastnumn", last.Number, "lasthash", last.Hash(), - ) - blocks := make([]*types.Block, len(results)) - receipts := make([]types.Receipts, len(results)) - for i, result := range results { - blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) - receipts[i] = result.Receipts - } - if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil { - log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) - return fmt.Errorf("%w: %v", errInvalidChain, err) - } - return nil -} - -func (d *Downloader) commitPivotBlock(result *fetchResult) error { - block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) - log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash()) - - // Commit the pivot block as the new head, will require full sync from here on - if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil { - return err - } - if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil { - return err - } - atomic.StoreInt32(&d.committed, 1) - return nil -} - -// DeliverHeaders injects a new batch of block headers received from a remote -// node into the download schedule. -func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error { - return d.deliver(d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) -} - -// DeliverBodies injects a new batch of block bodies received from a remote node. -func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) error { - return d.deliver(d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter) -} - -// DeliverReceipts injects a new batch of receipts received from a remote node. -func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) error { - return d.deliver(d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) -} - -// DeliverNodeData injects a new batch of node state data received from a remote node. -func (d *Downloader) DeliverNodeData(id string, data [][]byte) error { - return d.deliver(d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) -} - -// DeliverSnapPacket is invoked from a peer's message handler when it transmits a -// data packet for the local node to consume. -func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error { - switch packet := packet.(type) { - case *snap.AccountRangePacket: - hashes, accounts, err := packet.Unpack() - if err != nil { - return err - } - return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof) - - case *snap.StorageRangesPacket: - hashset, slotset := packet.Unpack() - return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof) - - case *snap.ByteCodesPacket: - return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes) - - case *snap.TrieNodesPacket: - return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes) - - default: - return fmt.Errorf("unexpected snap packet type: %T", packet) - } -} - -// deliver injects a new batch of data received from a remote node. -func (d *Downloader) deliver(destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { - // Update the delivery metrics for both good and failed deliveries - inMeter.Mark(int64(packet.Items())) - defer func() { - if err != nil { - dropMeter.Mark(int64(packet.Items())) - } - }() - // Deliver or abort if the sync is canceled while queuing - d.cancelLock.RLock() - cancel := d.cancelCh - d.cancelLock.RUnlock() - if cancel == nil { - return errNoSyncActive - } - select { - case destCh <- packet: - return nil - case <-cancel: - return errNoSyncActive - } -} diff --git a/les/downloader/downloader_test.go b/les/downloader/downloader_test.go deleted file mode 100644 index ae0d5ccda..000000000 --- a/les/downloader/downloader_test.go +++ /dev/null @@ -1,1621 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "errors" - "fmt" - "math/big" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state/snapshot" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/trie" -) - -// Reduce some of the parameters to make the tester faster. -func init() { - fullMaxForkAncestry = 10000 - lightMaxForkAncestry = 10000 - blockCacheMaxItems = 1024 - fsHeaderContCheck = 500 * time.Millisecond -} - -// downloadTester is a test simulator for mocking out local block chain. -type downloadTester struct { - downloader *Downloader - - genesis *types.Block // Genesis blocks used by the tester and peers - stateDb ethdb.Database // Database used by the tester for syncing from peers - peerDb ethdb.Database // Database of the peers containing all data - peers map[string]*downloadTesterPeer - - ownHashes []common.Hash // Hash chain belonging to the tester - ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester - ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester - ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester - ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain - - ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester - ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester - ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester - ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain - - lock sync.RWMutex -} - -// newTester creates a new downloader test mocker. -func newTester() *downloadTester { - tester := &downloadTester{ - genesis: testGenesis, - peerDb: testDB, - peers: make(map[string]*downloadTesterPeer), - ownHashes: []common.Hash{testGenesis.Hash()}, - ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, - ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, - ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, - ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, - - // Initialize ancient store with test genesis block - ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, - ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, - ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, - ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, - } - tester.stateDb = rawdb.NewMemoryDatabase() - tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00}) - - tester.downloader = New(0, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer) - return tester -} - -// terminate aborts any operations on the embedded downloader and releases all -// held resources. -func (dl *downloadTester) terminate() { - dl.downloader.Terminate() -} - -// sync starts synchronizing with a remote peer, blocking until it completes. -func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { - dl.lock.RLock() - hash := dl.peers[id].chain.headBlock().Hash() - // If no particular TD was requested, load from the peer's blockchain - if td == nil { - td = dl.peers[id].chain.td(hash) - } - dl.lock.RUnlock() - - // Synchronise with the chosen peer and ensure proper cleanup afterwards - err := dl.downloader.synchronise(id, hash, td, mode) - select { - case <-dl.downloader.cancelCh: - // Ok, downloader fully cancelled after sync cycle - default: - // Downloader is still accepting packets, can block a peer up - panic("downloader active post sync cycle") // panic will be caught by tester - } - return err -} - -// HasHeader checks if a header is present in the testers canonical chain. -func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool { - return dl.GetHeaderByHash(hash) != nil -} - -// HasBlock checks if a block is present in the testers canonical chain. -func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool { - return dl.GetBlockByHash(hash) != nil -} - -// HasFastBlock checks if a block is present in the testers canonical chain. -func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool { - dl.lock.RLock() - defer dl.lock.RUnlock() - - if _, ok := dl.ancientReceipts[hash]; ok { - return true - } - _, ok := dl.ownReceipts[hash] - return ok -} - -// GetHeader retrieves a header from the testers canonical chain. -func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header { - dl.lock.RLock() - defer dl.lock.RUnlock() - return dl.getHeaderByHash(hash) -} - -// getHeaderByHash returns the header if found either within ancients or own blocks) -// This method assumes that the caller holds at least the read-lock (dl.lock) -func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header { - header := dl.ancientHeaders[hash] - if header != nil { - return header - } - return dl.ownHeaders[hash] -} - -// GetBlock retrieves a block from the testers canonical chain. -func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block { - dl.lock.RLock() - defer dl.lock.RUnlock() - - block := dl.ancientBlocks[hash] - if block != nil { - return block - } - return dl.ownBlocks[hash] -} - -// CurrentHeader retrieves the current head header from the canonical chain. -func (dl *downloadTester) CurrentHeader() *types.Header { - dl.lock.RLock() - defer dl.lock.RUnlock() - - for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil { - return header - } - if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { - return header - } - } - return dl.genesis.Header() -} - -// CurrentBlock retrieves the current head block from the canonical chain. -func (dl *downloadTester) CurrentBlock() *types.Block { - dl.lock.RLock() - defer dl.lock.RUnlock() - - for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { - if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { - return block - } - return block - } - if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { - if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { - return block - } - } - } - return dl.genesis -} - -// CurrentFastBlock retrieves the current head fast-sync block from the canonical chain. -func (dl *downloadTester) CurrentFastBlock() *types.Block { - dl.lock.RLock() - defer dl.lock.RUnlock() - - for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { - return block - } - if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { - return block - } - } - return dl.genesis -} - -// FastSyncCommitHead manually sets the head block to a given hash. -func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { - // For now only check that the state trie is correct - if block := dl.GetBlockByHash(hash); block != nil { - _, err := trie.NewStateTrie(trie.StateTrieID(block.Root()), trie.NewDatabase(dl.stateDb)) - return err - } - return fmt.Errorf("non existent block: %x", hash[:4]) -} - -// GetTd retrieves the block's total difficulty from the canonical chain. -func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int { - dl.lock.RLock() - defer dl.lock.RUnlock() - - return dl.getTd(hash) -} - -// getTd retrieves the block's total difficulty if found either within -// ancients or own blocks). -// This method assumes that the caller holds at least the read-lock (dl.lock) -func (dl *downloadTester) getTd(hash common.Hash) *big.Int { - if td := dl.ancientChainTd[hash]; td != nil { - return td - } - return dl.ownChainTd[hash] -} - -// InsertHeaderChain injects a new batch of headers into the simulated chain. -func (dl *downloadTester) InsertHeaderChain(headers []*types.Header) (i int, err error) { - dl.lock.Lock() - defer dl.lock.Unlock() - // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors - if dl.getHeaderByHash(headers[0].ParentHash) == nil { - return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number) - } - var hashes []common.Hash - for i := 1; i < len(headers); i++ { - hash := headers[i-1].Hash() - if headers[i].ParentHash != headers[i-1].Hash() { - return i, fmt.Errorf("non-contiguous import at position %d", i) - } - hashes = append(hashes, hash) - } - hashes = append(hashes, headers[len(headers)-1].Hash()) - // Do a full insert if pre-checks passed - for i, header := range headers { - hash := hashes[i] - if dl.getHeaderByHash(hash) != nil { - continue - } - if dl.getHeaderByHash(header.ParentHash) == nil { - // This _should_ be impossible, due to precheck and induction - return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i) - } - dl.ownHashes = append(dl.ownHashes, hash) - dl.ownHeaders[hash] = header - - td := dl.getTd(header.ParentHash) - dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty) - } - return len(headers), nil -} - -// InsertChain injects a new batch of blocks into the simulated chain. -func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) { - dl.lock.Lock() - defer dl.lock.Unlock() - for i, block := range blocks { - if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok { - return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks)) - } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil { - return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err) - } - if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil { - dl.ownHashes = append(dl.ownHashes, block.Hash()) - dl.ownHeaders[block.Hash()] = block.Header() - } - dl.ownBlocks[block.Hash()] = block - dl.ownReceipts[block.Hash()] = make(types.Receipts, 0) - dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) - td := dl.getTd(block.ParentHash()) - dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty()) - } - return len(blocks), nil -} - -// InsertReceiptChain injects a new batch of receipts into the simulated chain. -func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) { - dl.lock.Lock() - defer dl.lock.Unlock() - - for i := 0; i < len(blocks) && i < len(receipts); i++ { - if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok { - return i, errors.New("unknown owner") - } - if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok { - if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { - return i, errors.New("InsertReceiptChain: unknown parent") - } - } - if blocks[i].NumberU64() <= ancientLimit { - dl.ancientBlocks[blocks[i].Hash()] = blocks[i] - dl.ancientReceipts[blocks[i].Hash()] = receipts[i] - - // Migrate from active db to ancient db - dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header() - dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty()) - delete(dl.ownHeaders, blocks[i].Hash()) - delete(dl.ownChainTd, blocks[i].Hash()) - } else { - dl.ownBlocks[blocks[i].Hash()] = blocks[i] - dl.ownReceipts[blocks[i].Hash()] = receipts[i] - } - } - return len(blocks), nil -} - -// SetHead rewinds the local chain to a new head. -func (dl *downloadTester) SetHead(head uint64) error { - dl.lock.Lock() - defer dl.lock.Unlock() - - // Find the hash of the head to reset to - var hash common.Hash - for h, header := range dl.ownHeaders { - if header.Number.Uint64() == head { - hash = h - } - } - for h, header := range dl.ancientHeaders { - if header.Number.Uint64() == head { - hash = h - } - } - if hash == (common.Hash{}) { - return fmt.Errorf("unknown head to set: %d", head) - } - // Find the offset in the header chain - var offset int - for o, h := range dl.ownHashes { - if h == hash { - offset = o - break - } - } - // Remove all the hashes and associated data afterwards - for i := offset + 1; i < len(dl.ownHashes); i++ { - delete(dl.ownChainTd, dl.ownHashes[i]) - delete(dl.ownHeaders, dl.ownHashes[i]) - delete(dl.ownReceipts, dl.ownHashes[i]) - delete(dl.ownBlocks, dl.ownHashes[i]) - - delete(dl.ancientChainTd, dl.ownHashes[i]) - delete(dl.ancientHeaders, dl.ownHashes[i]) - delete(dl.ancientReceipts, dl.ownHashes[i]) - delete(dl.ancientBlocks, dl.ownHashes[i]) - } - dl.ownHashes = dl.ownHashes[:offset+1] - return nil -} - -// Rollback removes some recently added elements from the chain. -func (dl *downloadTester) Rollback(hashes []common.Hash) { -} - -// newPeer registers a new block download source into the downloader. -func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error { - dl.lock.Lock() - defer dl.lock.Unlock() - - peer := &downloadTesterPeer{dl: dl, id: id, chain: chain} - dl.peers[id] = peer - return dl.downloader.RegisterPeer(id, version, peer) -} - -// dropPeer simulates a hard peer removal from the connection pool. -func (dl *downloadTester) dropPeer(id string) { - dl.lock.Lock() - defer dl.lock.Unlock() - - delete(dl.peers, id) - dl.downloader.UnregisterPeer(id) -} - -// Snapshots implements the BlockChain interface for the downloader, but is a noop. -func (dl *downloadTester) Snapshots() *snapshot.Tree { - return nil -} - -type downloadTesterPeer struct { - dl *downloadTester - id string - chain *testChain - missingStates map[common.Hash]bool // State entries that fast sync should not return -} - -// Head constructs a function to retrieve a peer's current head hash -// and total difficulty. -func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { - b := dlp.chain.headBlock() - return b.Hash(), dlp.chain.td(b.Hash()) -} - -// RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed -// origin; associated with a particular peer in the download tester. The returned -// function can be used to retrieve batches of headers from the particular peer. -func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { - result := dlp.chain.headersByHash(origin, amount, skip, reverse) - go dlp.dl.downloader.DeliverHeaders(dlp.id, result) - return nil -} - -// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered -// origin; associated with a particular peer in the download tester. The returned -// function can be used to retrieve batches of headers from the particular peer. -func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { - result := dlp.chain.headersByNumber(origin, amount, skip, reverse) - go dlp.dl.downloader.DeliverHeaders(dlp.id, result) - return nil -} - -// RequestBodies constructs a getBlockBodies method associated with a particular -// peer in the download tester. The returned function can be used to retrieve -// batches of block bodies from the particularly requested peer. -func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error { - txs, uncles := dlp.chain.bodies(hashes) - go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles) - return nil -} - -// RequestReceipts constructs a getReceipts method associated with a particular -// peer in the download tester. The returned function can be used to retrieve -// batches of block receipts from the particularly requested peer. -func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error { - receipts := dlp.chain.receipts(hashes) - go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts) - return nil -} - -// RequestNodeData constructs a getNodeData method associated with a particular -// peer in the download tester. The returned function can be used to retrieve -// batches of node state data from the particularly requested peer. -func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error { - dlp.dl.lock.RLock() - defer dlp.dl.lock.RUnlock() - - results := make([][]byte, 0, len(hashes)) - for _, hash := range hashes { - if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil { - if !dlp.missingStates[hash] { - results = append(results, data) - } - } - } - go dlp.dl.downloader.DeliverNodeData(dlp.id, results) - return nil -} - -// assertOwnChain checks if the local chain contains the correct number of items -// of the various chain components. -func assertOwnChain(t *testing.T, tester *downloadTester, length int) { - // Mark this method as a helper to report errors at callsite, not in here - t.Helper() - - assertOwnForkedChain(t, tester, 1, []int{length}) -} - -// assertOwnForkedChain checks if the local forked chain contains the correct -// number of items of the various chain components. -func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { - // Mark this method as a helper to report errors at callsite, not in here - t.Helper() - - // Initialize the counters for the first fork - headers, blocks, receipts := lengths[0], lengths[0], lengths[0] - - // Update the counters for each subsequent fork - for _, length := range lengths[1:] { - headers += length - common - blocks += length - common - receipts += length - common - } - if tester.downloader.getMode() == LightSync { - blocks, receipts = 1, 1 - } - if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers { - t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) - } - if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks { - t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) - } - if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts { - t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) - } -} - -func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } -func TestCanonicalSynchronisation66Fast(t *testing.T) { testCanonSync(t, eth.ETH66, FastSync) } -func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } - -func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - // Create a small enough block chain to download - chain := testChainBase.shorten(blockCacheMaxItems - 15) - tester.newPeer("peer", protocol, chain) - - // Synchronise with the peer and make sure all relevant data was retrieved - if err := tester.sync("peer", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, chain.len()) -} - -// Tests that if a large batch of blocks are being downloaded, it is throttled -// until the cached blocks are retrieved. -func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } -func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) } - -func testThrottling(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() - - // Create a long block chain to download and the tester - targetBlocks := testChainBase.len() - 1 - tester.newPeer("peer", protocol, testChainBase) - - // Wrap the importer to allow stepping - blocked, proceed := uint32(0), make(chan struct{}) - tester.downloader.chainInsertHook = func(results []*fetchResult) { - atomic.StoreUint32(&blocked, uint32(len(results))) - <-proceed - } - // Start a synchronisation concurrently - errc := make(chan error, 1) - go func() { - errc <- tester.sync("peer", nil, mode) - }() - // Iteratively take some blocks, always checking the retrieval count - for { - // Check the retrieval count synchronously (! reason for this ugly block) - tester.lock.RLock() - retrieved := len(tester.ownBlocks) - tester.lock.RUnlock() - if retrieved >= targetBlocks+1 { - break - } - // Wait a bit for sync to throttle itself - var cached, frozen int - for start := time.Now(); time.Since(start) < 3*time.Second; { - time.Sleep(25 * time.Millisecond) - - tester.lock.Lock() - tester.downloader.queue.lock.Lock() - tester.downloader.queue.resultCache.lock.Lock() - { - cached = tester.downloader.queue.resultCache.countCompleted() - frozen = int(atomic.LoadUint32(&blocked)) - retrieved = len(tester.ownBlocks) - } - tester.downloader.queue.resultCache.lock.Unlock() - tester.downloader.queue.lock.Unlock() - tester.lock.Unlock() - - if cached == blockCacheMaxItems || - cached == blockCacheMaxItems-reorgProtHeaderDelay || - retrieved+cached+frozen == targetBlocks+1 || - retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay { - break - } - } - // Make sure we filled up the cache, then exhaust it - time.Sleep(25 * time.Millisecond) // give it a chance to screw up - tester.lock.RLock() - retrieved = len(tester.ownBlocks) - tester.lock.RUnlock() - if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { - t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1) - } - - // Permit the blocked blocks to import - if atomic.LoadUint32(&blocked) > 0 { - atomic.StoreUint32(&blocked, uint32(0)) - proceed <- struct{}{} - } - } - // Check that we haven't pulled more blocks than available - assertOwnChain(t, tester, targetBlocks+1) - if err := <-errc; err != nil { - t.Fatalf("block synchronization failed: %v", err) - } - tester.terminate() -} - -// Tests that simple synchronization against a forked chain works correctly. In -// this test common ancestor lookup should *not* be short circuited, and a full -// binary search should be executed. -func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } -func TestForkedSync66Fast(t *testing.T) { testForkedSync(t, eth.ETH66, FastSync) } -func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } - -func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - chainA := testChainForkLightA.shorten(testChainBase.len() + 80) - chainB := testChainForkLightB.shorten(testChainBase.len() + 80) - tester.newPeer("fork A", protocol, chainA) - tester.newPeer("fork B", protocol, chainB) - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("fork A", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, chainA.len()) - - // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("fork B", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) -} - -// Tests that synchronising against a much shorter but much heavier fork works -// correctly and is not dropped. -func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } -func TestHeavyForkedSync66Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FastSync) } -func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } - -func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - chainA := testChainForkLightA.shorten(testChainBase.len() + 80) - chainB := testChainForkHeavy.shorten(testChainBase.len() + 80) - tester.newPeer("light", protocol, chainA) - tester.newPeer("heavy", protocol, chainB) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("light", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, chainA.len()) - - // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("heavy", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) -} - -// Tests that chain forks are contained within a certain interval of the current -// chain head, ensuring that malicious peers cannot waste resources by feeding -// long dead chains. -func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } -func TestBoundedForkedSync66Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FastSync) } -func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } - -func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - chainA := testChainForkLightA - chainB := testChainForkLightB - tester.newPeer("original", protocol, chainA) - tester.newPeer("rewriter", protocol, chainB) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("original", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, chainA.len()) - - // Synchronise with the second peer and ensure that the fork is rejected to being too old - if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { - t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) - } -} - -// Tests that chain forks are contained within a certain interval of the current -// chain head for short but heavy forks too. These are a bit special because they -// take different ancestor lookup paths. -func TestBoundedHeavyForkedSync66Full(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) -} -func TestBoundedHeavyForkedSync66Fast(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, FastSync) -} -func TestBoundedHeavyForkedSync66Light(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) -} - -func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() - - // Create a long enough forked chain - chainA := testChainForkLightA - chainB := testChainForkHeavy - tester.newPeer("original", protocol, chainA) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("original", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, chainA.len()) - - tester.newPeer("heavy-rewriter", protocol, chainB) - // Synchronise with the second peer and ensure that the fork is rejected to being too old - if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { - t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) - } - tester.terminate() -} - -// Tests that an inactive downloader will not accept incoming block headers, -// bodies and receipts. -func TestInactiveDownloader63(t *testing.T) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - // Check that neither block headers nor bodies are accepted - if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } - if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } - if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } -} - -// Tests that a canceled download wipes all previously accumulated state. -func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } -func TestCancel66Fast(t *testing.T) { testCancel(t, eth.ETH66, FastSync) } -func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } - -func testCancel(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - chain := testChainBase.shorten(MaxHeaderFetch) - tester.newPeer("peer", protocol, chain) - - // Make sure canceling works with a pristine downloader - tester.downloader.Cancel() - if !tester.downloader.queue.Idle() { - t.Errorf("download queue not idle") - } - // Synchronise with the peer, but cancel afterwards - if err := tester.sync("peer", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - tester.downloader.Cancel() - if !tester.downloader.queue.Idle() { - t.Errorf("download queue not idle") - } -} - -// Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } -func TestMultiSynchronisation66Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FastSync) } -func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } - -func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - // Create various peers with various parts of the chain - targetPeers := 8 - chain := testChainBase.shorten(targetPeers * 100) - - for i := 0; i < targetPeers; i++ { - id := fmt.Sprintf("peer #%d", i) - tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1))) - } - if err := tester.sync("peer #0", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, chain.len()) -} - -// Tests that synchronisations behave well in multi-version protocol environments -// and not wreak havoc on other nodes in the network. -func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } -func TestMultiProtoSynchronisation66Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FastSync) } -func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } - -func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - // Create a small enough block chain to download - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Create peers of every type - tester.newPeer("peer 66", eth.ETH66, chain) - //tester.newPeer("peer 65", eth.ETH67, chain) - - // Synchronise with the requested peer and make sure all blocks were retrieved - if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, chain.len()) - - // Check that no peers have been dropped off - for _, version := range []int{66} { - peer := fmt.Sprintf("peer %d", version) - if _, ok := tester.peers[peer]; !ok { - t.Errorf("%s dropped", peer) - } - } -} - -// Tests that if a block is empty (e.g. header only), no body request should be -// made, and instead the header should be assembled into a whole block in itself. -func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } -func TestEmptyShortCircuit66Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FastSync) } -func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } - -func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - // Create a block chain to download - chain := testChainBase - tester.newPeer("peer", protocol, chain) - - // Instrument the downloader to signal body requests - bodiesHave, receiptsHave := int32(0), int32(0) - tester.downloader.bodyFetchHook = func(headers []*types.Header) { - atomic.AddInt32(&bodiesHave, int32(len(headers))) - } - tester.downloader.receiptFetchHook = func(headers []*types.Header) { - atomic.AddInt32(&receiptsHave, int32(len(headers))) - } - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("peer", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, chain.len()) - - // Validate the number of block bodies that should have been requested - bodiesNeeded, receiptsNeeded := 0, 0 - for _, block := range chain.blockm { - if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { - bodiesNeeded++ - } - } - for _, receipt := range chain.receiptm { - if mode == FastSync && len(receipt) > 0 { - receiptsNeeded++ - } - } - if int(bodiesHave) != bodiesNeeded { - t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded) - } - if int(receiptsHave) != receiptsNeeded { - t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded) - } -} - -// Tests that headers are enqueued continuously, preventing malicious nodes from -// stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } -func TestMissingHeaderAttack66Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FastSync) } -func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } - -func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - brokenChain := chain.shorten(chain.len()) - delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2]) - tester.newPeer("attack", protocol, brokenChain) - - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, chain.len()) -} - -// Tests that if requested headers are shifted (i.e. first is missing), the queue -// detects the invalid numbering. -func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } -func TestShiftedHeaderAttack66Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FastSync) } -func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } - -func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Attempt a full sync with an attacker feeding shifted headers - brokenChain := chain.shorten(chain.len()) - delete(brokenChain.headerm, brokenChain.chain[1]) - delete(brokenChain.blockm, brokenChain.chain[1]) - delete(brokenChain.receiptm, brokenChain.chain[1]) - tester.newPeer("attack", protocol, brokenChain) - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, chain.len()) -} - -// Tests that upon detecting an invalid header, the recent ones are rolled back -// for various failure scenarios. Afterwards a full sync is attempted to make -// sure no state was corrupted. -func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) } - -func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - - // Create a small enough block chain to download - targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks - chain := testChainBase.shorten(targetBlocks) - - // Attempt to sync with an attacker that feeds junk during the fast sync phase. - // This should result in the last fsHeaderSafetyNet headers being rolled back. - missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 - fastAttackChain := chain.shorten(chain.len()) - delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) - tester.newPeer("fast-attack", protocol, fastAttackChain) - - if err := tester.sync("fast-attack", nil, mode); err == nil { - t.Fatalf("succeeded fast attacker synchronisation") - } - if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { - t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) - } - - // Attempt to sync with an attacker that feeds junk during the block import phase. - // This should result in both the last fsHeaderSafetyNet number of headers being - // rolled back, and also the pivot point being reverted to a non-block status. - missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 - blockAttackChain := chain.shorten(chain.len()) - delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in - delete(blockAttackChain.headerm, blockAttackChain.chain[missing]) - tester.newPeer("block-attack", protocol, blockAttackChain) - - if err := tester.sync("block-attack", nil, mode); err == nil { - t.Fatalf("succeeded block attacker synchronisation") - } - if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { - t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) - } - if mode == FastSync { - if head := tester.CurrentBlock().NumberU64(); head != 0 { - t.Errorf("fast sync pivot block #%d not rolled back", head) - } - } - - // Attempt to sync with an attacker that withholds promised blocks after the - // fast sync pivot point. This could be a trial to leave the node with a bad - // but already imported pivot block. - withholdAttackChain := chain.shorten(chain.len()) - tester.newPeer("withhold-attack", protocol, withholdAttackChain) - tester.downloader.syncInitHook = func(uint64, uint64) { - for i := missing; i < withholdAttackChain.len(); i++ { - delete(withholdAttackChain.headerm, withholdAttackChain.chain[i]) - } - tester.downloader.syncInitHook = nil - } - if err := tester.sync("withhold-attack", nil, mode); err == nil { - t.Fatalf("succeeded withholding attacker synchronisation") - } - if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { - t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) - } - if mode == FastSync { - if head := tester.CurrentBlock().NumberU64(); head != 0 { - t.Errorf("fast sync pivot block #%d not rolled back", head) - } - } - - // synchronise with the valid peer and make sure sync succeeds. Since the last rollback - // should also disable fast syncing for this process, verify that we did a fresh full - // sync. Note, we can't assert anything about the receipts since we won't purge the - // database of them, hence we can't use assertOwnChain. - tester.newPeer("valid", protocol, chain) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - if hs := len(tester.ownHeaders); hs != chain.len() { - t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len()) - } - if mode != LightSync { - if bs := len(tester.ownBlocks); bs != chain.len() { - t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len()) - } - } - tester.terminate() -} - -// Tests that a peer advertising a high TD doesn't get to stall the downloader -// afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack66Full(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, FullSync) -} -func TestHighTDStarvationAttack66Fast(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, FastSync) -} -func TestHighTDStarvationAttack66Light(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, LightSync) -} - -func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - - chain := testChainBase.shorten(1) - tester.newPeer("attack", protocol, chain) - if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) - } - tester.terminate() -} - -// Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } - -func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { - t.Parallel() - - // Define the disconnection requirement for individual hash fetch errors - tests := []struct { - result error - drop bool - }{ - {nil, false}, // Sync succeeded, all is well - {errBusy, false}, // Sync is already in progress, no problem - {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop - {errBadPeer, true}, // Peer was deemed bad for some reason, drop it - {errStallingPeer, true}, // Peer was detected to be stalling, drop it - {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it - {errNoPeers, false}, // No peers to download from, soft race, no issue - {errTimeout, true}, // No hashes received in due time, drop the peer - {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end - {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser - {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter - {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop - {errInvalidBody, false}, // A bad peer was detected, but not the sync origin - {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin - {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop - } - // Run the tests and check disconnection status - tester := newTester() - defer tester.terminate() - chain := testChainBase.shorten(1) - - for i, tt := range tests { - // Register a new peer and ensure its presence - id := fmt.Sprintf("test %d", i) - if err := tester.newPeer(id, protocol, chain); err != nil { - t.Fatalf("test %d: failed to register new peer: %v", i, err) - } - if _, ok := tester.peers[id]; !ok { - t.Fatalf("test %d: registered peer not found", i) - } - // Simulate a synchronisation and check the required result - tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } - - tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync) - if _, ok := tester.peers[id]; !ok != tt.drop { - t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) - } - } -} - -// Tests that synchronisation progress (origin block number, current block number -// and highest block number) is tracked and updated correctly. -func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } -func TestSyncProgress66Fast(t *testing.T) { testSyncProgress(t, eth.ETH66, FastSync) } -func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } - -func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Synchronise half the blocks and check initial progress - tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2)) - pending := new(sync.WaitGroup) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("peer-half", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(chain.len()/2 - 1), - }) - progress <- struct{}{} - pending.Wait() - - // Synchronise all the blocks and check continuation progress - tester.newPeer("peer-full", protocol, chain) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("peer-full", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ - StartingBlock: uint64(chain.len()/2 - 1), - CurrentBlock: uint64(chain.len()/2 - 1), - HighestBlock: uint64(chain.len() - 1), - }) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(chain.len()/2 - 1), - CurrentBlock: uint64(chain.len() - 1), - HighestBlock: uint64(chain.len() - 1), - }) -} - -func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { - // Mark this method as a helper to report errors at callsite, not in here - t.Helper() - - p := d.Progress() - //p.KnownStates, p.PulledStates = 0, 0 - //want.KnownStates, want.PulledStates = 0, 0 - if p != want { - t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) - } -} - -// Tests that synchronisation progress (origin block number and highest block -// number) is tracked and updated correctly in case of a fork (or manual head -// revertal). -func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } -func TestForkedSyncProgress66Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FastSync) } -func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } - -func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch) - chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Synchronise with one of the forks and check progress - tester.newPeer("fork A", protocol, chainA) - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("fork A", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(chainA.len() - 1), - }) - progress <- struct{}{} - pending.Wait() - - // Simulate a successful sync above the fork - tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight - - // Synchronise with the second fork and check progress resets - tester.newPeer("fork B", protocol, chainB) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("fork B", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ - StartingBlock: uint64(testChainBase.len()) - 1, - CurrentBlock: uint64(chainA.len() - 1), - HighestBlock: uint64(chainB.len() - 1), - }) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(testChainBase.len()) - 1, - CurrentBlock: uint64(chainB.len() - 1), - HighestBlock: uint64(chainB.len() - 1), - }) -} - -// Tests that if synchronisation is aborted due to some failure, then the progress -// origin is not updated in the next sync cycle, as it should be considered the -// continuation of the previous sync and not a new instance. -func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } -func TestFailedSyncProgress66Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FastSync) } -func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } - -func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Attempt a full sync with a faulty peer - brokenChain := chain.shorten(chain.len()) - missing := brokenChain.len() / 2 - delete(brokenChain.headerm, brokenChain.chain[missing]) - delete(brokenChain.blockm, brokenChain.chain[missing]) - delete(brokenChain.receiptm, brokenChain.chain[missing]) - tester.newPeer("faulty", protocol, brokenChain) - - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("faulty", nil, mode); err == nil { - panic("succeeded faulty synchronisation") - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(brokenChain.len() - 1), - }) - progress <- struct{}{} - pending.Wait() - afterFailedSync := tester.downloader.Progress() - - // Synchronise with a good peer and check that the progress origin remind the same - // after a failure - tester.newPeer("valid", protocol, chain) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("valid", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", afterFailedSync) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(chain.len() - 1), - HighestBlock: uint64(chain.len() - 1), - }) -} - -// Tests that if an attacker fakes a chain height, after the attack is detected, -// the progress height is successfully reduced at the next sync invocation. -func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } -func TestFakedSyncProgress66Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FastSync) } -func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } - -func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Create and sync with an attacker that promises a higher chain than available. - brokenChain := chain.shorten(chain.len()) - numMissing := 5 - for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- { - delete(brokenChain.headerm, brokenChain.chain[i]) - } - tester.newPeer("attack", protocol, brokenChain) - - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("attack", nil, mode); err == nil { - panic("succeeded attacker synchronisation") - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(brokenChain.len() - 1), - }) - progress <- struct{}{} - pending.Wait() - afterFailedSync := tester.downloader.Progress() - - // Synchronise with a good peer and check that the progress height has been reduced to - // the true value. - validChain := chain.shorten(chain.len() - numMissing) - tester.newPeer("valid", protocol, validChain) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("valid", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ - CurrentBlock: afterFailedSync.CurrentBlock, - HighestBlock: uint64(validChain.len() - 1), - }) - - // Check final progress after successful sync. - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(validChain.len() - 1), - HighestBlock: uint64(validChain.len() - 1), - }) -} - -// This test reproduces an issue where unexpected deliveries would -// block indefinitely if they arrived at the right time. -func TestDeliverHeadersHang66Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FullSync) } -func TestDeliverHeadersHang66Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FastSync) } -func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) } - -func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - master := newTester() - defer master.terminate() - chain := testChainBase.shorten(15) - - for i := 0; i < 200; i++ { - tester := newTester() - tester.peerDb = master.peerDb - tester.newPeer("peer", protocol, chain) - - // Whenever the downloader requests headers, flood it with - // a lot of unrequested header deliveries. - tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{ - peer: tester.downloader.peers.peers["peer"].peer, - tester: tester, - } - if err := tester.sync("peer", nil, mode); err != nil { - t.Errorf("test %d: sync failed: %v", i, err) - } - tester.terminate() - } -} - -type floodingTestPeer struct { - peer Peer - tester *downloadTester -} - -func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() } -func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error { - return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse) -} -func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error { - return ftp.peer.RequestBodies(hashes) -} -func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error { - return ftp.peer.RequestReceipts(hashes) -} -func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error { - return ftp.peer.RequestNodeData(hashes) -} - -func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error { - deliveriesDone := make(chan struct{}, 500) - for i := 0; i < cap(deliveriesDone)-1; i++ { - peer := fmt.Sprintf("fake-peer%d", i) - go func() { - ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}}) - deliveriesDone <- struct{}{} - }() - } - - // None of the extra deliveries should block. - timeout := time.After(60 * time.Second) - launched := false - for i := 0; i < cap(deliveriesDone); i++ { - select { - case <-deliveriesDone: - if !launched { - // Start delivering the requested headers - // after one of the flooding responses has arrived. - go func() { - ftp.peer.RequestHeadersByNumber(from, count, skip, reverse) - deliveriesDone <- struct{}{} - }() - launched = true - } - case <-timeout: - panic("blocked") - } - } - return nil -} - -func TestRemoteHeaderRequestSpan(t *testing.T) { - testCases := []struct { - remoteHeight uint64 - localHeight uint64 - expected []int - }{ - // Remote is way higher. We should ask for the remote head and go backwards - {1500, 1000, - []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, - }, - {15000, 13006, - []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, - }, - // Remote is pretty close to us. We don't have to fetch as many - {1200, 1150, - []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, - }, - // Remote is equal to us (so on a fork with higher td) - // We should get the closest couple of ancestors - {1500, 1500, - []int{1497, 1499}, - }, - // We're higher than the remote! Odd - {1000, 1500, - []int{997, 999}, - }, - // Check some weird edgecases that it behaves somewhat rationally - {0, 1500, - []int{0, 2}, - }, - {6000000, 0, - []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, - }, - {0, 0, - []int{0, 2}, - }, - } - reqs := func(from, count, span int) []int { - var r []int - num := from - for len(r) < count { - r = append(r, num) - num += span + 1 - } - return r - } - for i, tt := range testCases { - from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) - data := reqs(int(from), count, span) - - if max != uint64(data[len(data)-1]) { - t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) - } - failed := false - if len(data) != len(tt.expected) { - failed = true - t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) - } else { - for j, n := range data { - if n != tt.expected[j] { - failed = true - break - } - } - } - if failed { - res := strings.ReplaceAll(fmt.Sprint(data), " ", ",") - exp := strings.ReplaceAll(fmt.Sprint(tt.expected), " ", ",") - t.Logf("got: %v\n", res) - t.Logf("exp: %v\n", exp) - t.Errorf("test %d: wrong values", i) - } - } -} - -// Tests that peers below a pre-configured checkpoint block are prevented from -// being fast-synced from, avoiding potential cheap eclipse attacks. -func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) } -func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) } -func TestCheckpointEnforcement66Light(t *testing.T) { - testCheckpointEnforcement(t, eth.ETH66, LightSync) -} - -func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - // Create a new tester with a particular hard coded checkpoint block - tester := newTester() - defer tester.terminate() - - tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256 - chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1) - - // Attempt to sync with the peer and validate the result - tester.newPeer("peer", protocol, chain) - - var expect error - if mode == FastSync || mode == LightSync { - expect = errUnsyncedPeer - } - if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) { - t.Fatalf("block sync error mismatch: have %v, want %v", err, expect) - } - if mode == FastSync || mode == LightSync { - assertOwnChain(t, tester, 1) - } else { - assertOwnChain(t, tester, chain.len()) - } -} diff --git a/les/downloader/metrics.go b/les/downloader/metrics.go deleted file mode 100644 index c38732043..000000000 --- a/les/downloader/metrics.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Contains the metrics collected by the downloader. - -package downloader - -import ( - "github.com/ethereum/go-ethereum/metrics" -) - -var ( - headerInMeter = metrics.NewRegisteredMeter("eth/downloader/headers/in", nil) - headerReqTimer = metrics.NewRegisteredTimer("eth/downloader/headers/req", nil) - headerDropMeter = metrics.NewRegisteredMeter("eth/downloader/headers/drop", nil) - headerTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/headers/timeout", nil) - - bodyInMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/in", nil) - bodyReqTimer = metrics.NewRegisteredTimer("eth/downloader/bodies/req", nil) - bodyDropMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/drop", nil) - bodyTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/timeout", nil) - - receiptInMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/in", nil) - receiptReqTimer = metrics.NewRegisteredTimer("eth/downloader/receipts/req", nil) - receiptDropMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/drop", nil) - receiptTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/timeout", nil) - - stateInMeter = metrics.NewRegisteredMeter("eth/downloader/states/in", nil) - stateDropMeter = metrics.NewRegisteredMeter("eth/downloader/states/drop", nil) - - throttleCounter = metrics.NewRegisteredCounter("eth/downloader/throttle", nil) -) diff --git a/les/downloader/modes.go b/les/downloader/modes.go deleted file mode 100644 index 3ea14d22d..000000000 --- a/les/downloader/modes.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import "fmt" - -// SyncMode represents the synchronisation mode of the downloader. -// It is a uint32 as it is used with atomic operations. -type SyncMode uint32 - -const ( - FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks - FastSync // Quickly download the headers, full sync only at the chain - SnapSync // Download the chain and the state via compact snapshots - LightSync // Download only the headers and terminate afterwards -) - -func (mode SyncMode) IsValid() bool { - return mode >= FullSync && mode <= LightSync -} - -// String implements the stringer interface. -func (mode SyncMode) String() string { - switch mode { - case FullSync: - return "full" - case FastSync: - return "fast" - case SnapSync: - return "snap" - case LightSync: - return "light" - default: - return "unknown" - } -} - -func (mode SyncMode) MarshalText() ([]byte, error) { - switch mode { - case FullSync: - return []byte("full"), nil - case FastSync: - return []byte("fast"), nil - case SnapSync: - return []byte("snap"), nil - case LightSync: - return []byte("light"), nil - default: - return nil, fmt.Errorf("unknown sync mode %d", mode) - } -} - -func (mode *SyncMode) UnmarshalText(text []byte) error { - switch string(text) { - case "full": - *mode = FullSync - case "fast": - *mode = FastSync - case "snap": - *mode = SnapSync - case "light": - *mode = LightSync - default: - return fmt.Errorf(`unknown sync mode %q, want "full", "fast" or "light"`, text) - } - return nil -} diff --git a/les/downloader/peer.go b/les/downloader/peer.go deleted file mode 100644 index c2161e2da..000000000 --- a/les/downloader/peer.go +++ /dev/null @@ -1,502 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Contains the active peer-set of the downloader, maintaining both failures -// as well as reputation metrics to prioritize the block retrievals. - -package downloader - -import ( - "errors" - "math/big" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/msgrate" -) - -const ( - maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items -) - -var ( - errAlreadyFetching = errors.New("already fetching blocks from peer") - errAlreadyRegistered = errors.New("peer is already registered") - errNotRegistered = errors.New("peer is not registered") -) - -// peerConnection represents an active peer from which hashes and blocks are retrieved. -type peerConnection struct { - id string // Unique identifier of the peer - - headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) - blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) - receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) - stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) - - headerStarted time.Time // Time instance when the last header fetch was started - blockStarted time.Time // Time instance when the last block (body) fetch was started - receiptStarted time.Time // Time instance when the last receipt fetch was started - stateStarted time.Time // Time instance when the last node data fetch was started - - rates *msgrate.Tracker // Tracker to hone in on the number of items retrievable per second - lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously) - - peer Peer - - version uint // Eth protocol version number to switch strategies - log log.Logger // Contextual logger to add extra infos to peer logs - lock sync.RWMutex -} - -// LightPeer encapsulates the methods required to synchronise with a remote light peer. -type LightPeer interface { - Head() (common.Hash, *big.Int) - RequestHeadersByHash(common.Hash, int, int, bool) error - RequestHeadersByNumber(uint64, int, int, bool) error -} - -// Peer encapsulates the methods required to synchronise with a remote full peer. -type Peer interface { - LightPeer - RequestBodies([]common.Hash) error - RequestReceipts([]common.Hash) error - RequestNodeData([]common.Hash) error -} - -// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods. -type lightPeerWrapper struct { - peer LightPeer -} - -func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() } -func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error { - return w.peer.RequestHeadersByHash(h, amount, skip, reverse) -} -func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error { - return w.peer.RequestHeadersByNumber(i, amount, skip, reverse) -} -func (w *lightPeerWrapper) RequestBodies([]common.Hash) error { - panic("RequestBodies not supported in light client mode sync") -} -func (w *lightPeerWrapper) RequestReceipts([]common.Hash) error { - panic("RequestReceipts not supported in light client mode sync") -} -func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error { - panic("RequestNodeData not supported in light client mode sync") -} - -// newPeerConnection creates a new downloader peer. -func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection { - return &peerConnection{ - id: id, - lacking: make(map[common.Hash]struct{}), - peer: peer, - version: version, - log: logger, - } -} - -// Reset clears the internal state of a peer entity. -func (p *peerConnection) Reset() { - p.lock.Lock() - defer p.lock.Unlock() - - atomic.StoreInt32(&p.headerIdle, 0) - atomic.StoreInt32(&p.blockIdle, 0) - atomic.StoreInt32(&p.receiptIdle, 0) - atomic.StoreInt32(&p.stateIdle, 0) - - p.lacking = make(map[common.Hash]struct{}) -} - -// FetchHeaders sends a header retrieval request to the remote peer. -func (p *peerConnection) FetchHeaders(from uint64, count int) error { - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { - return errAlreadyFetching - } - p.headerStarted = time.Now() - - // Issue the header retrieval request (absolute upwards without gaps) - go p.peer.RequestHeadersByNumber(from, count, 0, false) - - return nil -} - -// FetchBodies sends a block body retrieval request to the remote peer. -func (p *peerConnection) FetchBodies(request *fetchRequest) error { - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { - return errAlreadyFetching - } - p.blockStarted = time.Now() - - go func() { - // Convert the header set to a retrievable slice - hashes := make([]common.Hash, 0, len(request.Headers)) - for _, header := range request.Headers { - hashes = append(hashes, header.Hash()) - } - p.peer.RequestBodies(hashes) - }() - - return nil -} - -// FetchReceipts sends a receipt retrieval request to the remote peer. -func (p *peerConnection) FetchReceipts(request *fetchRequest) error { - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { - return errAlreadyFetching - } - p.receiptStarted = time.Now() - - go func() { - // Convert the header set to a retrievable slice - hashes := make([]common.Hash, 0, len(request.Headers)) - for _, header := range request.Headers { - hashes = append(hashes, header.Hash()) - } - p.peer.RequestReceipts(hashes) - }() - - return nil -} - -// FetchNodeData sends a node state data retrieval request to the remote peer. -func (p *peerConnection) FetchNodeData(hashes []common.Hash) error { - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { - return errAlreadyFetching - } - p.stateStarted = time.Now() - - go p.peer.RequestNodeData(hashes) - - return nil -} - -// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval -// requests. Its estimated header retrieval throughput is updated with that measured -// just now. -func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) { - p.rates.Update(eth.BlockHeadersMsg, deliveryTime.Sub(p.headerStarted), delivered) - atomic.StoreInt32(&p.headerIdle, 0) -} - -// SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval -// requests. Its estimated body retrieval throughput is updated with that measured -// just now. -func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) { - p.rates.Update(eth.BlockBodiesMsg, deliveryTime.Sub(p.blockStarted), delivered) - atomic.StoreInt32(&p.blockIdle, 0) -} - -// SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt -// retrieval requests. Its estimated receipt retrieval throughput is updated -// with that measured just now. -func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) { - p.rates.Update(eth.ReceiptsMsg, deliveryTime.Sub(p.receiptStarted), delivered) - atomic.StoreInt32(&p.receiptIdle, 0) -} - -// SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie -// data retrieval requests. Its estimated state retrieval throughput is updated -// with that measured just now. -func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) { - p.rates.Update(eth.NodeDataMsg, deliveryTime.Sub(p.stateStarted), delivered) - atomic.StoreInt32(&p.stateIdle, 0) -} - -// HeaderCapacity retrieves the peers header download allowance based on its -// previously discovered throughput. -func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { - cap := p.rates.Capacity(eth.BlockHeadersMsg, targetRTT) - if cap > MaxHeaderFetch { - cap = MaxHeaderFetch - } - return cap -} - -// BlockCapacity retrieves the peers block download allowance based on its -// previously discovered throughput. -func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int { - cap := p.rates.Capacity(eth.BlockBodiesMsg, targetRTT) - if cap > MaxBlockFetch { - cap = MaxBlockFetch - } - return cap -} - -// ReceiptCapacity retrieves the peers receipt download allowance based on its -// previously discovered throughput. -func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { - cap := p.rates.Capacity(eth.ReceiptsMsg, targetRTT) - if cap > MaxReceiptFetch { - cap = MaxReceiptFetch - } - return cap -} - -// NodeDataCapacity retrieves the peers state download allowance based on its -// previously discovered throughput. -func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int { - cap := p.rates.Capacity(eth.NodeDataMsg, targetRTT) - if cap > MaxStateFetch { - cap = MaxStateFetch - } - return cap -} - -// MarkLacking appends a new entity to the set of items (blocks, receipts, states) -// that a peer is known not to have (i.e. have been requested before). If the -// set reaches its maximum allowed capacity, items are randomly dropped off. -func (p *peerConnection) MarkLacking(hash common.Hash) { - p.lock.Lock() - defer p.lock.Unlock() - - for len(p.lacking) >= maxLackingHashes { - for drop := range p.lacking { - delete(p.lacking, drop) - break - } - } - p.lacking[hash] = struct{}{} -} - -// Lacks retrieves whether the hash of a blockchain item is on the peers lacking -// list (i.e. whether we know that the peer does not have it). -func (p *peerConnection) Lacks(hash common.Hash) bool { - p.lock.RLock() - defer p.lock.RUnlock() - - _, ok := p.lacking[hash] - return ok -} - -// peerSet represents the collection of active peer participating in the chain -// download procedure. -type peerSet struct { - peers map[string]*peerConnection - rates *msgrate.Trackers // Set of rate trackers to give the sync a common beat - - newPeerFeed event.Feed - peerDropFeed event.Feed - - lock sync.RWMutex -} - -// newPeerSet creates a new peer set top track the active download sources. -func newPeerSet() *peerSet { - return &peerSet{ - peers: make(map[string]*peerConnection), - rates: msgrate.NewTrackers(log.New("proto", "eth")), - } -} - -// SubscribeNewPeers subscribes to peer arrival events. -func (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription { - return ps.newPeerFeed.Subscribe(ch) -} - -// SubscribePeerDrops subscribes to peer departure events. -func (ps *peerSet) SubscribePeerDrops(ch chan<- *peerConnection) event.Subscription { - return ps.peerDropFeed.Subscribe(ch) -} - -// Reset iterates over the current peer set, and resets each of the known peers -// to prepare for a next batch of block retrieval. -func (ps *peerSet) Reset() { - ps.lock.RLock() - defer ps.lock.RUnlock() - - for _, peer := range ps.peers { - peer.Reset() - } -} - -// Register injects a new peer into the working set, or returns an error if the -// peer is already known. -// -// The method also sets the starting throughput values of the new peer to the -// average of all existing peers, to give it a realistic chance of being used -// for data retrievals. -func (ps *peerSet) Register(p *peerConnection) error { - // Register the new peer with some meaningful defaults - ps.lock.Lock() - if _, ok := ps.peers[p.id]; ok { - ps.lock.Unlock() - return errAlreadyRegistered - } - p.rates = msgrate.NewTracker(ps.rates.MeanCapacities(), ps.rates.MedianRoundTrip()) - if err := ps.rates.Track(p.id, p.rates); err != nil { - ps.lock.Unlock() - return err - } - ps.peers[p.id] = p - ps.lock.Unlock() - - ps.newPeerFeed.Send(p) - return nil -} - -// Unregister removes a remote peer from the active set, disabling any further -// actions to/from that particular entity. -func (ps *peerSet) Unregister(id string) error { - ps.lock.Lock() - p, ok := ps.peers[id] - if !ok { - ps.lock.Unlock() - return errNotRegistered - } - delete(ps.peers, id) - ps.rates.Untrack(id) - ps.lock.Unlock() - - ps.peerDropFeed.Send(p) - return nil -} - -// Peer retrieves the registered peer with the given id. -func (ps *peerSet) Peer(id string) *peerConnection { - ps.lock.RLock() - defer ps.lock.RUnlock() - - return ps.peers[id] -} - -// Len returns if the current number of peers in the set. -func (ps *peerSet) Len() int { - ps.lock.RLock() - defer ps.lock.RUnlock() - - return len(ps.peers) -} - -// AllPeers retrieves a flat list of all the peers within the set. -func (ps *peerSet) AllPeers() []*peerConnection { - ps.lock.RLock() - defer ps.lock.RUnlock() - - list := make([]*peerConnection, 0, len(ps.peers)) - for _, p := range ps.peers { - list = append(list, p) - } - return list -} - -// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers -// within the active peer set, ordered by their reputation. -func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { - idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.headerIdle) == 0 - } - throughput := func(p *peerConnection) int { - return p.rates.Capacity(eth.BlockHeadersMsg, time.Second) - } - return ps.idlePeers(eth.ETH66, eth.ETH67, idle, throughput) -} - -// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within -// the active peer set, ordered by their reputation. -func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { - idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.blockIdle) == 0 - } - throughput := func(p *peerConnection) int { - return p.rates.Capacity(eth.BlockBodiesMsg, time.Second) - } - return ps.idlePeers(eth.ETH66, eth.ETH67, idle, throughput) -} - -// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers -// within the active peer set, ordered by their reputation. -func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { - idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.receiptIdle) == 0 - } - throughput := func(p *peerConnection) int { - return p.rates.Capacity(eth.ReceiptsMsg, time.Second) - } - return ps.idlePeers(eth.ETH66, eth.ETH67, idle, throughput) -} - -// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle -// peers within the active peer set, ordered by their reputation. -func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { - idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.stateIdle) == 0 - } - throughput := func(p *peerConnection) int { - return p.rates.Capacity(eth.NodeDataMsg, time.Second) - } - return ps.idlePeers(eth.ETH66, eth.ETH67, idle, throughput) -} - -// idlePeers retrieves a flat list of all currently idle peers satisfying the -// protocol version constraints, using the provided function to check idleness. -// The resulting set of peers are sorted by their capacity. -func (ps *peerSet) idlePeers(minProtocol, maxProtocol uint, idleCheck func(*peerConnection) bool, capacity func(*peerConnection) int) ([]*peerConnection, int) { - ps.lock.RLock() - defer ps.lock.RUnlock() - - var ( - total = 0 - idle = make([]*peerConnection, 0, len(ps.peers)) - tps = make([]int, 0, len(ps.peers)) - ) - for _, p := range ps.peers { - if p.version >= minProtocol && p.version <= maxProtocol { - if idleCheck(p) { - idle = append(idle, p) - tps = append(tps, capacity(p)) - } - total++ - } - } - - // And sort them - sortPeers := &peerCapacitySort{idle, tps} - sort.Sort(sortPeers) - return sortPeers.p, total -} - -// peerCapacitySort implements sort.Interface. -// It sorts peer connections by capacity (descending). -type peerCapacitySort struct { - p []*peerConnection - tp []int -} - -func (ps *peerCapacitySort) Len() int { - return len(ps.p) -} - -func (ps *peerCapacitySort) Less(i, j int) bool { - return ps.tp[i] > ps.tp[j] -} - -func (ps *peerCapacitySort) Swap(i, j int) { - ps.p[i], ps.p[j] = ps.p[j], ps.p[i] - ps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i] -} diff --git a/les/downloader/queue.go b/les/downloader/queue.go deleted file mode 100644 index 6896b09b3..000000000 --- a/les/downloader/queue.go +++ /dev/null @@ -1,913 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Contains the block download scheduler to collect download tasks and schedule -// them in an ordered, and throttled way. - -package downloader - -import ( - "errors" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/prque" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/trie" -) - -const ( - bodyType = uint(0) - receiptType = uint(1) -) - -var ( - blockCacheMaxItems = 8192 // Maximum number of blocks to cache before throttling the download - blockCacheInitialItems = 2048 // Initial number of blocks to start fetching, before we know the sizes of the blocks - blockCacheMemory = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching - blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones -) - -var ( - errNoFetchesPending = errors.New("no fetches pending") - errStaleDelivery = errors.New("stale delivery") -) - -// fetchRequest is a currently running data retrieval operation. -type fetchRequest struct { - Peer *peerConnection // Peer to which the request was sent - From uint64 // [eth/62] Requested chain element index (used for skeleton fills only) - Headers []*types.Header // [eth/62] Requested headers, sorted by request order - Time time.Time // Time when the request was made -} - -// fetchResult is a struct collecting partial results from data fetchers until -// all outstanding pieces complete and the result as a whole can be processed. -type fetchResult struct { - pending int32 // Flag telling what deliveries are outstanding - - Header *types.Header - Uncles []*types.Header - Transactions types.Transactions - Receipts types.Receipts -} - -func newFetchResult(header *types.Header, fastSync bool) *fetchResult { - item := &fetchResult{ - Header: header, - } - if !header.EmptyBody() { - item.pending |= (1 << bodyType) - } - if fastSync && !header.EmptyReceipts() { - item.pending |= (1 << receiptType) - } - return item -} - -// SetBodyDone flags the body as finished. -func (f *fetchResult) SetBodyDone() { - if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 { - atomic.AddInt32(&f.pending, -1) - } -} - -// AllDone checks if item is done. -func (f *fetchResult) AllDone() bool { - return atomic.LoadInt32(&f.pending) == 0 -} - -// SetReceiptsDone flags the receipts as finished. -func (f *fetchResult) SetReceiptsDone() { - if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 { - atomic.AddInt32(&f.pending, -2) - } -} - -// Done checks if the given type is done already -func (f *fetchResult) Done(kind uint) bool { - v := atomic.LoadInt32(&f.pending) - return v&(1< 0 -} - -// InFlightBlocks retrieves whether there are block fetch requests currently in -// flight. -func (q *queue) InFlightBlocks() bool { - q.lock.Lock() - defer q.lock.Unlock() - - return len(q.blockPendPool) > 0 -} - -// InFlightReceipts retrieves whether there are receipt fetch requests currently -// in flight. -func (q *queue) InFlightReceipts() bool { - q.lock.Lock() - defer q.lock.Unlock() - - return len(q.receiptPendPool) > 0 -} - -// Idle returns if the queue is fully idle or has some data still inside. -func (q *queue) Idle() bool { - q.lock.Lock() - defer q.lock.Unlock() - - queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() - pending := len(q.blockPendPool) + len(q.receiptPendPool) - - return (queued + pending) == 0 -} - -// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill -// up an already retrieved header skeleton. -func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { - q.lock.Lock() - defer q.lock.Unlock() - - // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug) - if q.headerResults != nil { - panic("skeleton assembly already in progress") - } - // Schedule all the header retrieval tasks for the skeleton assembly - q.headerTaskPool = make(map[uint64]*types.Header) - q.headerTaskQueue = prque.New[int64, uint64](nil) - q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains - q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) - q.headerProced = 0 - q.headerOffset = from - q.headerContCh = make(chan bool, 1) - - for i, header := range skeleton { - index := from + uint64(i*MaxHeaderFetch) - - q.headerTaskPool[index] = header - q.headerTaskQueue.Push(index, -int64(index)) - } -} - -// RetrieveHeaders retrieves the header chain assemble based on the scheduled -// skeleton. -func (q *queue) RetrieveHeaders() ([]*types.Header, int) { - q.lock.Lock() - defer q.lock.Unlock() - - headers, proced := q.headerResults, q.headerProced - q.headerResults, q.headerProced = nil, 0 - - return headers, proced -} - -// Schedule adds a set of headers for the download queue for scheduling, returning -// the new headers encountered. -func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { - q.lock.Lock() - defer q.lock.Unlock() - - // Insert all the headers prioritised by the contained block number - inserts := make([]*types.Header, 0, len(headers)) - for _, header := range headers { - // Make sure chain order is honoured and preserved throughout - hash := header.Hash() - if header.Number == nil || header.Number.Uint64() != from { - log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) - break - } - if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { - log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) - break - } - // Make sure no duplicate requests are executed - // We cannot skip this, even if the block is empty, since this is - // what triggers the fetchResult creation. - if _, ok := q.blockTaskPool[hash]; ok { - log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) - } else { - q.blockTaskPool[hash] = header - q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) - } - // Queue for receipt retrieval - if q.mode == FastSync && !header.EmptyReceipts() { - if _, ok := q.receiptTaskPool[hash]; ok { - log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) - } else { - q.receiptTaskPool[hash] = header - q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) - } - } - inserts = append(inserts, header) - q.headerHead = hash - from++ - } - return inserts -} - -// Results retrieves and permanently removes a batch of fetch results from -// the cache. the result slice will be empty if the queue has been closed. -// Results can be called concurrently with Deliver and Schedule, -// but assumes that there are not two simultaneous callers to Results -func (q *queue) Results(block bool) []*fetchResult { - // Abort early if there are no items and non-blocking requested - if !block && !q.resultCache.HasCompletedItems() { - return nil - } - closed := false - for !closed && !q.resultCache.HasCompletedItems() { - // In order to wait on 'active', we need to obtain the lock. - // That may take a while, if someone is delivering at the same - // time, so after obtaining the lock, we check again if there - // are any results to fetch. - // Also, in-between we ask for the lock and the lock is obtained, - // someone can have closed the queue. In that case, we should - // return the available results and stop blocking - q.lock.Lock() - if q.resultCache.HasCompletedItems() || q.closed { - q.lock.Unlock() - break - } - // No items available, and not closed - q.active.Wait() - closed = q.closed - q.lock.Unlock() - } - // Regardless if closed or not, we can still deliver whatever we have - results := q.resultCache.GetCompleted(maxResultsProcess) - for _, result := range results { - // Recalculate the result item weights to prevent memory exhaustion - size := result.Header.Size() - for _, uncle := range result.Uncles { - size += uncle.Size() - } - for _, receipt := range result.Receipts { - size += receipt.Size() - } - for _, tx := range result.Transactions { - size += common.StorageSize(tx.Size()) - } - q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + - (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize - } - // Using the newly calibrated resultsize, figure out the new throttle limit - // on the result cache - throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize) - throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold) - - // Log some info at certain times - if time.Since(q.lastStatLog) > 60*time.Second { - q.lastStatLog = time.Now() - info := q.Stats() - info = append(info, "throttle", throttleThreshold) - log.Info("Downloader queue stats", info...) - } - return results -} - -func (q *queue) Stats() []interface{} { - q.lock.RLock() - defer q.lock.RUnlock() - - return q.stats() -} - -func (q *queue) stats() []interface{} { - return []interface{}{ - "receiptTasks", q.receiptTaskQueue.Size(), - "blockTasks", q.blockTaskQueue.Size(), - "itemSize", q.resultSize, - } -} - -// ReserveHeaders reserves a set of headers for the given peer, skipping any -// previously failed batches. -func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest { - q.lock.Lock() - defer q.lock.Unlock() - - // Short circuit if the peer's already downloading something (sanity check to - // not corrupt state) - if _, ok := q.headerPendPool[p.id]; ok { - return nil - } - // Retrieve a batch of hashes, skipping previously failed ones - send, skip := uint64(0), []uint64{} - for send == 0 && !q.headerTaskQueue.Empty() { - from, _ := q.headerTaskQueue.Pop() - if q.headerPeerMiss[p.id] != nil { - if _, ok := q.headerPeerMiss[p.id][from]; ok { - skip = append(skip, from) - continue - } - } - send = from - } - // Merge all the skipped batches back - for _, from := range skip { - q.headerTaskQueue.Push(from, -int64(from)) - } - // Assemble and return the block download request - if send == 0 { - return nil - } - request := &fetchRequest{ - Peer: p, - From: send, - Time: time.Now(), - } - q.headerPendPool[p.id] = request - return request -} - -// ReserveBodies reserves a set of body fetches for the given peer, skipping any -// previously failed downloads. Beside the next batch of needed fetches, it also -// returns a flag whether empty blocks were queued requiring processing. -func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) { - q.lock.Lock() - defer q.lock.Unlock() - - return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType) -} - -// ReserveReceipts reserves a set of receipt fetches for the given peer, skipping -// any previously failed downloads. Beside the next batch of needed fetches, it -// also returns a flag whether empty receipts were queued requiring importing. -func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) { - q.lock.Lock() - defer q.lock.Unlock() - - return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType) -} - -// reserveHeaders reserves a set of data download operations for a given peer, -// skipping any previously failed ones. This method is a generic version used -// by the individual special reservation functions. -// -// Note, this method expects the queue lock to be already held for writing. The -// reason the lock is not obtained in here is because the parameters already need -// to access the queue, so they already need a lock anyway. -// -// Returns: -// -// item - the fetchRequest -// progress - whether any progress was made -// throttle - if the caller should throttle for a while -func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque[int64, *types.Header], - pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) { - // Short circuit if the pool has been depleted, or if the peer's already - // downloading something (sanity check not to corrupt state) - if taskQueue.Empty() { - return nil, false, true - } - if _, ok := pendPool[p.id]; ok { - return nil, false, false - } - // Retrieve a batch of tasks, skipping previously failed ones - send := make([]*types.Header, 0, count) - skip := make([]*types.Header, 0) - progress := false - throttled := false - for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ { - // the task queue will pop items in order, so the highest prio block - // is also the lowest block number. - header, _ := taskQueue.Peek() - - // we can ask the resultcache if this header is within the - // "prioritized" segment of blocks. If it is not, we need to throttle - - stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync) - if stale { - // Don't put back in the task queue, this item has already been - // delivered upstream - taskQueue.PopItem() - progress = true - delete(taskPool, header.Hash()) - proc = proc - 1 - log.Error("Fetch reservation already delivered", "number", header.Number.Uint64()) - continue - } - if throttle { - // There are no resultslots available. Leave it in the task queue - // However, if there are any left as 'skipped', we should not tell - // the caller to throttle, since we still want some other - // peer to fetch those for us - throttled = len(skip) == 0 - break - } - if err != nil { - // this most definitely should _not_ happen - log.Warn("Failed to reserve headers", "err", err) - // There are no resultslots available. Leave it in the task queue - break - } - if item.Done(kind) { - // If it's a noop, we can skip this task - delete(taskPool, header.Hash()) - taskQueue.PopItem() - proc = proc - 1 - progress = true - continue - } - // Remove it from the task queue - taskQueue.PopItem() - // Otherwise unless the peer is known not to have the data, add to the retrieve list - if p.Lacks(header.Hash()) { - skip = append(skip, header) - } else { - send = append(send, header) - } - } - // Merge all the skipped headers back - for _, header := range skip { - taskQueue.Push(header, -int64(header.Number.Uint64())) - } - if q.resultCache.HasCompletedItems() { - // Wake Results, resultCache was modified - q.active.Signal() - } - // Assemble and return the block download request - if len(send) == 0 { - return nil, progress, throttled - } - request := &fetchRequest{ - Peer: p, - Headers: send, - Time: time.Now(), - } - pendPool[p.id] = request - return request, progress, throttled -} - -// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. -func (q *queue) CancelHeaders(request *fetchRequest) { - q.lock.Lock() - defer q.lock.Unlock() - q.cancel(request, q.headerTaskQueue, q.headerPendPool) -} - -// CancelBodies aborts a body fetch request, returning all pending headers to the -// task queue. -func (q *queue) CancelBodies(request *fetchRequest) { - q.lock.Lock() - defer q.lock.Unlock() - q.cancel(request, q.blockTaskQueue, q.blockPendPool) -} - -// CancelReceipts aborts a body fetch request, returning all pending headers to -// the task queue. -func (q *queue) CancelReceipts(request *fetchRequest) { - q.lock.Lock() - defer q.lock.Unlock() - q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) -} - -// Cancel aborts a fetch request, returning all pending hashes to the task queue. -func (q *queue) cancel(request *fetchRequest, taskQueue interface{}, pendPool map[string]*fetchRequest) { - if request.From > 0 { - taskQueue.(*prque.Prque[int64, uint64]).Push(request.From, -int64(request.From)) - } - for _, header := range request.Headers { - taskQueue.(*prque.Prque[int64, *types.Header]).Push(header, -int64(header.Number.Uint64())) - } - delete(pendPool, request.Peer.id) -} - -// Revoke cancels all pending requests belonging to a given peer. This method is -// meant to be called during a peer drop to quickly reassign owned data fetches -// to remaining nodes. -func (q *queue) Revoke(peerID string) { - q.lock.Lock() - defer q.lock.Unlock() - - if request, ok := q.blockPendPool[peerID]; ok { - for _, header := range request.Headers { - q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) - } - delete(q.blockPendPool, peerID) - } - if request, ok := q.receiptPendPool[peerID]; ok { - for _, header := range request.Headers { - q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) - } - delete(q.receiptPendPool, peerID) - } -} - -// ExpireHeaders checks for in flight requests that exceeded a timeout allowance, -// canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { - q.lock.Lock() - defer q.lock.Unlock() - - return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) -} - -// ExpireBodies checks for in flight block body requests that exceeded a timeout -// allowance, canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { - q.lock.Lock() - defer q.lock.Unlock() - - return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) -} - -// ExpireReceipts checks for in flight receipt requests that exceeded a timeout -// allowance, canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { - q.lock.Lock() - defer q.lock.Unlock() - - return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) -} - -// expire is the generic check that move expired tasks from a pending pool back -// into a task pool, returning all entities caught with expired tasks. -// -// Note, this method expects the queue lock to be already held. The -// reason the lock is not obtained in here is because the parameters already need -// to access the queue, so they already need a lock anyway. -func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue interface{}, timeoutMeter metrics.Meter) map[string]int { - // Iterate over the expired requests and return each to the queue - expiries := make(map[string]int) - for id, request := range pendPool { - if time.Since(request.Time) > timeout { - // Update the metrics with the timeout - timeoutMeter.Mark(1) - - // Return any non satisfied requests to the pool - if request.From > 0 { - taskQueue.(*prque.Prque[int64, uint64]).Push(request.From, -int64(request.From)) - } - for _, header := range request.Headers { - taskQueue.(*prque.Prque[int64, *types.Header]).Push(header, -int64(header.Number.Uint64())) - } - // Add the peer to the expiry report along the number of failed requests - expiries[id] = len(request.Headers) - - // Remove the expired requests from the pending pool directly - delete(pendPool, id) - } - } - return expiries -} - -// DeliverHeaders injects a header retrieval response into the header results -// cache. This method either accepts all headers it received, or none of them -// if they do not map correctly to the skeleton. -// -// If the headers are accepted, the method makes an attempt to deliver the set -// of ready headers to the processor to keep the pipeline full. However it will -// not block to prevent stalling other pending deliveries. -func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { - q.lock.Lock() - defer q.lock.Unlock() - - var logger log.Logger - if len(id) < 16 { - // Tests use short IDs, don't choke on them - logger = log.New("peer", id) - } else { - logger = log.New("peer", id[:16]) - } - // Short circuit if the data was never requested - request := q.headerPendPool[id] - if request == nil { - return 0, errNoFetchesPending - } - headerReqTimer.UpdateSince(request.Time) - delete(q.headerPendPool, id) - - // Ensure headers can be mapped onto the skeleton chain - target := q.headerTaskPool[request.From].Hash() - - accepted := len(headers) == MaxHeaderFetch - if accepted { - if headers[0].Number.Uint64() != request.From { - logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", headers[0].Hash(), "expected", request.From) - accepted = false - } else if headers[len(headers)-1].Hash() != target { - logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) - accepted = false - } - } - if accepted { - parentHash := headers[0].Hash() - for i, header := range headers[1:] { - hash := header.Hash() - if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { - logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want) - accepted = false - break - } - if parentHash != header.ParentHash { - logger.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) - accepted = false - break - } - // Set-up parent hash for next round - parentHash = hash - } - } - // If the batch of headers wasn't accepted, mark as unavailable - if !accepted { - logger.Trace("Skeleton filling not accepted", "from", request.From) - - miss := q.headerPeerMiss[id] - if miss == nil { - q.headerPeerMiss[id] = make(map[uint64]struct{}) - miss = q.headerPeerMiss[id] - } - miss[request.From] = struct{}{} - - q.headerTaskQueue.Push(request.From, -int64(request.From)) - return 0, errors.New("delivery not accepted") - } - // Clean up a successful fetch and try to deliver any sub-results - copy(q.headerResults[request.From-q.headerOffset:], headers) - delete(q.headerTaskPool, request.From) - - ready := 0 - for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { - ready += MaxHeaderFetch - } - if ready > 0 { - // Headers are ready for delivery, gather them and push forward (non blocking) - process := make([]*types.Header, ready) - copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) - - select { - case headerProcCh <- process: - logger.Trace("Pre-scheduled new headers", "count", len(process), "from", process[0].Number) - q.headerProced += len(process) - default: - } - } - // Check for termination and return - if len(q.headerTaskPool) == 0 { - q.headerContCh <- false - } - return len(headers), nil -} - -// DeliverBodies injects a block body retrieval response into the results queue. -// The method returns the number of blocks bodies accepted from the delivery and -// also wakes any threads waiting for data delivery. -func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { - q.lock.Lock() - defer q.lock.Unlock() - trieHasher := trie.NewStackTrie(nil) - validate := func(index int, header *types.Header) error { - if types.DeriveSha(types.Transactions(txLists[index]), trieHasher) != header.TxHash { - return errInvalidBody - } - if types.CalcUncleHash(uncleLists[index]) != header.UncleHash { - return errInvalidBody - } - return nil - } - - reconstruct := func(index int, result *fetchResult) { - result.Transactions = txLists[index] - result.Uncles = uncleLists[index] - result.SetBodyDone() - } - return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, - bodyReqTimer, len(txLists), validate, reconstruct) -} - -// DeliverReceipts injects a receipt retrieval response into the results queue. -// The method returns the number of transaction receipts accepted from the delivery -// and also wakes any threads waiting for data delivery. -func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { - q.lock.Lock() - defer q.lock.Unlock() - trieHasher := trie.NewStackTrie(nil) - validate := func(index int, header *types.Header) error { - if types.DeriveSha(types.Receipts(receiptList[index]), trieHasher) != header.ReceiptHash { - return errInvalidReceipt - } - return nil - } - reconstruct := func(index int, result *fetchResult) { - result.Receipts = receiptList[index] - result.SetReceiptsDone() - } - return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, - receiptReqTimer, len(receiptList), validate, reconstruct) -} - -// deliver injects a data retrieval response into the results queue. -// -// Note, this method expects the queue lock to be already held for writing. The -// reason this lock is not obtained in here is because the parameters already need -// to access the queue, so they already need a lock anyway. -func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, - taskQueue *prque.Prque[int64, *types.Header], pendPool map[string]*fetchRequest, reqTimer metrics.Timer, - results int, validate func(index int, header *types.Header) error, - reconstruct func(index int, result *fetchResult)) (int, error) { - // Short circuit if the data was never requested - request := pendPool[id] - if request == nil { - return 0, errNoFetchesPending - } - reqTimer.UpdateSince(request.Time) - delete(pendPool, id) - - // If no data items were retrieved, mark them as unavailable for the origin peer - if results == 0 { - for _, header := range request.Headers { - request.Peer.MarkLacking(header.Hash()) - } - } - // Assemble each of the results with their headers and retrieved data parts - var ( - accepted int - failure error - i int - hashes []common.Hash - ) - for _, header := range request.Headers { - // Short circuit assembly if no more fetch results are found - if i >= results { - break - } - // Validate the fields - if err := validate(i, header); err != nil { - failure = err - break - } - hashes = append(hashes, header.Hash()) - i++ - } - - for _, header := range request.Headers[:i] { - if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil && !stale { - reconstruct(accepted, res) - } else { - // else: between here and above, some other peer filled this result, - // or it was indeed a no-op. This should not happen, but if it does it's - // not something to panic about - log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err) - failure = errStaleDelivery - } - // Clean up a successful fetch - delete(taskPool, hashes[accepted]) - accepted++ - } - // Return all failed or missing fetches to the queue - for _, header := range request.Headers[accepted:] { - taskQueue.Push(header, -int64(header.Number.Uint64())) - } - // Wake up Results - if accepted > 0 { - q.active.Signal() - } - if failure == nil { - return accepted, nil - } - // If none of the data was good, it's a stale delivery - if accepted > 0 { - return accepted, fmt.Errorf("partial failure: %v", failure) - } - return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery) -} - -// Prepare configures the result cache to allow accepting and caching inbound -// fetch results. -func (q *queue) Prepare(offset uint64, mode SyncMode) { - q.lock.Lock() - defer q.lock.Unlock() - - // Prepare the queue for sync results - q.resultCache.Prepare(offset) - q.mode = mode -} diff --git a/les/downloader/queue_test.go b/les/downloader/queue_test.go deleted file mode 100644 index 6bffa9021..000000000 --- a/les/downloader/queue_test.go +++ /dev/null @@ -1,438 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "fmt" - "math/big" - "math/rand" - "sync" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" -) - -// makeChain creates a chain of n blocks starting at and including parent. -// the returned hash chain is ordered head->parent. In addition, every 3rd block -// contains a transaction and every 5th an uncle to allow testing correct block -// reassembly. -func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) { - blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) { - block.SetCoinbase(common.Address{seed}) - // Add one tx to every secondblock - if !empty && i%2 == 0 { - signer := types.MakeSigner(params.TestChainConfig, block.Number(), block.Timestamp()) - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) - if err != nil { - panic(err) - } - block.AddTx(tx) - } - }) - return blocks, receipts -} - -type chainData struct { - blocks []*types.Block - offset int -} - -var chain *chainData -var emptyChain *chainData - -func init() { - // Create a chain of blocks to import - targetBlocks := 128 - blocks, _ := makeChain(targetBlocks, 0, testGenesis, false) - chain = &chainData{blocks, 0} - - blocks, _ = makeChain(targetBlocks, 0, testGenesis, true) - emptyChain = &chainData{blocks, 0} -} - -func (chain *chainData) headers() []*types.Header { - hdrs := make([]*types.Header, len(chain.blocks)) - for i, b := range chain.blocks { - hdrs[i] = b.Header() - } - return hdrs -} - -func (chain *chainData) Len() int { - return len(chain.blocks) -} - -func dummyPeer(id string) *peerConnection { - p := &peerConnection{ - id: id, - lacking: make(map[common.Hash]struct{}), - } - return p -} - -func TestBasics(t *testing.T) { - numOfBlocks := len(emptyChain.blocks) - numOfReceipts := len(emptyChain.blocks) / 2 - - q := newQueue(10, 10) - if !q.Idle() { - t.Errorf("new queue should be idle") - } - q.Prepare(1, FastSync) - if res := q.Results(false); len(res) != 0 { - t.Fatal("new queue should have 0 results") - } - - // Schedule a batch of headers - q.Schedule(chain.headers(), 1) - if q.Idle() { - t.Errorf("queue should not be idle") - } - if got, exp := q.PendingBlocks(), chain.Len(); got != exp { - t.Errorf("wrong pending block count, got %d, exp %d", got, exp) - } - // Only non-empty receipts get added to task-queue - if got, exp := q.PendingReceipts(), 64; got != exp { - t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) - } - // Items are now queued for downloading, next step is that we tell the - // queue that a certain peer will deliver them for us - { - peer := dummyPeer("peer-1") - fetchReq, _, throttle := q.ReserveBodies(peer, 50) - if !throttle { - // queue size is only 10, so throttling should occur - t.Fatal("should throttle") - } - // But we should still get the first things to fetch - if got, exp := len(fetchReq.Headers), 5; got != exp { - t.Fatalf("expected %d requests, got %d", exp, got) - } - if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp { - t.Fatalf("expected header %d, got %d", exp, got) - } - } - if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { - t.Errorf("expected block task queue to be %d, got %d", exp, got) - } - if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got { - t.Errorf("expected receipt task queue to be %d, got %d", exp, got) - } - { - peer := dummyPeer("peer-2") - fetchReq, _, throttle := q.ReserveBodies(peer, 50) - - // The second peer should hit throttling - if !throttle { - t.Fatalf("should throttle") - } - // And not get any fetches at all, since it was throttled to begin with - if fetchReq != nil { - t.Fatalf("should have no fetches, got %d", len(fetchReq.Headers)) - } - } - if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { - t.Errorf("expected block task queue to be %d, got %d", exp, got) - } - if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got { - t.Errorf("expected receipt task queue to be %d, got %d", exp, got) - } - { - // The receipt delivering peer should not be affected - // by the throttling of body deliveries - peer := dummyPeer("peer-3") - fetchReq, _, throttle := q.ReserveReceipts(peer, 50) - if !throttle { - // queue size is only 10, so throttling should occur - t.Fatal("should throttle") - } - // But we should still get the first things to fetch - if got, exp := len(fetchReq.Headers), 5; got != exp { - t.Fatalf("expected %d requests, got %d", exp, got) - } - if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp { - t.Fatalf("expected header %d, got %d", exp, got) - } - } - if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { - t.Errorf("expected block task queue to be %d, got %d", exp, got) - } - if exp, got := q.receiptTaskQueue.Size(), numOfReceipts-5; exp != got { - t.Errorf("expected receipt task queue to be %d, got %d", exp, got) - } - if got, exp := q.resultCache.countCompleted(), 0; got != exp { - t.Errorf("wrong processable count, got %d, exp %d", got, exp) - } -} - -func TestEmptyBlocks(t *testing.T) { - numOfBlocks := len(emptyChain.blocks) - - q := newQueue(10, 10) - - q.Prepare(1, FastSync) - // Schedule a batch of headers - q.Schedule(emptyChain.headers(), 1) - if q.Idle() { - t.Errorf("queue should not be idle") - } - if got, exp := q.PendingBlocks(), len(emptyChain.blocks); got != exp { - t.Errorf("wrong pending block count, got %d, exp %d", got, exp) - } - if got, exp := q.PendingReceipts(), 0; got != exp { - t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) - } - // They won't be processable, because the fetchresults haven't been - // created yet - if got, exp := q.resultCache.countCompleted(), 0; got != exp { - t.Errorf("wrong processable count, got %d, exp %d", got, exp) - } - - // Items are now queued for downloading, next step is that we tell the - // queue that a certain peer will deliver them for us - // That should trigger all of them to suddenly become 'done' - { - // Reserve blocks - peer := dummyPeer("peer-1") - fetchReq, _, _ := q.ReserveBodies(peer, 50) - - // there should be nothing to fetch, blocks are empty - if fetchReq != nil { - t.Fatal("there should be no body fetch tasks remaining") - } - } - if q.blockTaskQueue.Size() != numOfBlocks-10 { - t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) - } - if q.receiptTaskQueue.Size() != 0 { - t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) - } - { - peer := dummyPeer("peer-3") - fetchReq, _, _ := q.ReserveReceipts(peer, 50) - - // there should be nothing to fetch, blocks are empty - if fetchReq != nil { - t.Fatal("there should be no receipt fetch tasks remaining") - } - } - if q.blockTaskQueue.Size() != numOfBlocks-10 { - t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) - } - if q.receiptTaskQueue.Size() != 0 { - t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) - } - if got, exp := q.resultCache.countCompleted(), 10; got != exp { - t.Errorf("wrong processable count, got %d, exp %d", got, exp) - } -} - -// XTestDelivery does some more extensive testing of events that happen, -// blocks that become known and peers that make reservations and deliveries. -// disabled since it's not really a unit-test, but can be executed to test -// some more advanced scenarios -func XTestDelivery(t *testing.T) { - // the outside network, holding blocks - blo, rec := makeChain(128, 0, testGenesis, false) - world := newNetwork() - world.receipts = rec - world.chain = blo - world.progress(10) - if false { - log.Root().SetHandler(log.StdoutHandler) - } - q := newQueue(10, 10) - var wg sync.WaitGroup - q.Prepare(1, FastSync) - wg.Add(1) - go func() { - // deliver headers - defer wg.Done() - c := 1 - for { - //fmt.Printf("getting headers from %d\n", c) - hdrs := world.headers(c) - l := len(hdrs) - //fmt.Printf("scheduling %d headers, first %d last %d\n", - // l, hdrs[0].Number.Uint64(), hdrs[len(hdrs)-1].Number.Uint64()) - q.Schedule(hdrs, uint64(c)) - c += l - } - }() - wg.Add(1) - go func() { - // collect results - defer wg.Done() - tot := 0 - for { - res := q.Results(true) - tot += len(res) - fmt.Printf("got %d results, %d tot\n", len(res), tot) - // Now we can forget about these - world.forget(res[len(res)-1].Header.Number.Uint64()) - } - }() - wg.Add(1) - go func() { - defer wg.Done() - // reserve body fetch - i := 4 - for { - peer := dummyPeer(fmt.Sprintf("peer-%d", i)) - f, _, _ := q.ReserveBodies(peer, rand.Intn(30)) - if f != nil { - var emptyList []*types.Header - var txs [][]*types.Transaction - var uncles [][]*types.Header - numToSkip := rand.Intn(len(f.Headers)) - for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] { - txs = append(txs, world.getTransactions(hdr.Number.Uint64())) - uncles = append(uncles, emptyList) - } - time.Sleep(100 * time.Millisecond) - _, err := q.DeliverBodies(peer.id, txs, uncles) - if err != nil { - fmt.Printf("delivered %d bodies %v\n", len(txs), err) - } - } else { - i++ - time.Sleep(200 * time.Millisecond) - } - } - }() - go func() { - defer wg.Done() - // reserve receiptfetch - peer := dummyPeer("peer-3") - for { - f, _, _ := q.ReserveReceipts(peer, rand.Intn(50)) - if f != nil { - var rcs [][]*types.Receipt - for _, hdr := range f.Headers { - rcs = append(rcs, world.getReceipts(hdr.Number.Uint64())) - } - _, err := q.DeliverReceipts(peer.id, rcs) - if err != nil { - fmt.Printf("delivered %d receipts %v\n", len(rcs), err) - } - time.Sleep(100 * time.Millisecond) - } else { - time.Sleep(200 * time.Millisecond) - } - } - }() - wg.Add(1) - go func() { - defer wg.Done() - for i := 0; i < 50; i++ { - time.Sleep(300 * time.Millisecond) - //world.tick() - //fmt.Printf("trying to progress\n") - world.progress(rand.Intn(100)) - } - for i := 0; i < 50; i++ { - time.Sleep(2990 * time.Millisecond) - } - }() - wg.Add(1) - go func() { - defer wg.Done() - for { - time.Sleep(990 * time.Millisecond) - fmt.Printf("world block tip is %d\n", - world.chain[len(world.chain)-1].Header().Number.Uint64()) - fmt.Println(q.Stats()) - } - }() - wg.Wait() -} - -func newNetwork() *network { - var l sync.RWMutex - return &network{ - cond: sync.NewCond(&l), - offset: 1, // block 1 is at blocks[0] - } -} - -// represents the network -type network struct { - offset int - chain []*types.Block - receipts []types.Receipts - lock sync.RWMutex - cond *sync.Cond -} - -func (n *network) getTransactions(blocknum uint64) types.Transactions { - index := blocknum - uint64(n.offset) - return n.chain[index].Transactions() -} -func (n *network) getReceipts(blocknum uint64) types.Receipts { - index := blocknum - uint64(n.offset) - if got := n.chain[index].Header().Number.Uint64(); got != blocknum { - fmt.Printf("Err, got %d exp %d\n", got, blocknum) - panic("sd") - } - return n.receipts[index] -} - -func (n *network) forget(blocknum uint64) { - index := blocknum - uint64(n.offset) - n.chain = n.chain[index:] - n.receipts = n.receipts[index:] - n.offset = int(blocknum) -} -func (n *network) progress(numBlocks int) { - n.lock.Lock() - defer n.lock.Unlock() - //fmt.Printf("progressing...\n") - newBlocks, newR := makeChain(numBlocks, 0, n.chain[len(n.chain)-1], false) - n.chain = append(n.chain, newBlocks...) - n.receipts = append(n.receipts, newR...) - n.cond.Broadcast() -} - -func (n *network) headers(from int) []*types.Header { - numHeaders := 128 - var hdrs []*types.Header - index := from - n.offset - - for index >= len(n.chain) { - // wait for progress - n.cond.L.Lock() - //fmt.Printf("header going into wait\n") - n.cond.Wait() - index = from - n.offset - n.cond.L.Unlock() - } - n.lock.RLock() - defer n.lock.RUnlock() - for i, b := range n.chain[index:] { - hdrs = append(hdrs, b.Header()) - if i >= numHeaders { - break - } - } - return hdrs -} diff --git a/les/downloader/resultstore.go b/les/downloader/resultstore.go deleted file mode 100644 index 7fcade294..000000000 --- a/les/downloader/resultstore.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "fmt" - "sync" - "sync/atomic" - - "github.com/ethereum/go-ethereum/core/types" -) - -// resultStore implements a structure for maintaining fetchResults, tracking their -// download-progress and delivering (finished) results. -type resultStore struct { - items []*fetchResult // Downloaded but not yet delivered fetch results - resultOffset uint64 // Offset of the first cached fetch result in the block chain - - // Internal index of first non-completed entry, updated atomically when needed. - // If all items are complete, this will equal length(items), so - // *important* : is not safe to use for indexing without checking against length - indexIncomplete int32 // atomic access - - // throttleThreshold is the limit up to which we _want_ to fill the - // results. If blocks are large, we want to limit the results to less - // than the number of available slots, and maybe only fill 1024 out of - // 8192 possible places. The queue will, at certain times, recalibrate - // this index. - throttleThreshold uint64 - - lock sync.RWMutex -} - -func newResultStore(size int) *resultStore { - return &resultStore{ - resultOffset: 0, - items: make([]*fetchResult, size), - throttleThreshold: uint64(size), - } -} - -// SetThrottleThreshold updates the throttling threshold based on the requested -// limit and the total queue capacity. It returns the (possibly capped) threshold -func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 { - r.lock.Lock() - defer r.lock.Unlock() - - limit := uint64(len(r.items)) - if threshold >= limit { - threshold = limit - } - r.throttleThreshold = threshold - return r.throttleThreshold -} - -// AddFetch adds a header for body/receipt fetching. This is used when the queue -// wants to reserve headers for fetching. -// -// It returns the following: -// -// stale - if true, this item is already passed, and should not be requested again -// throttled - if true, the store is at capacity, this particular header is not prio now -// item - the result to store data into -// err - any error that occurred -func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) { - r.lock.Lock() - defer r.lock.Unlock() - - var index int - item, index, stale, throttled, err = r.getFetchResult(header.Number.Uint64()) - if err != nil || stale || throttled { - return stale, throttled, item, err - } - if item == nil { - item = newFetchResult(header, fastSync) - r.items[index] = item - } - return stale, throttled, item, err -} - -// GetDeliverySlot returns the fetchResult for the given header. If the 'stale' flag -// is true, that means the header has already been delivered 'upstream'. This method -// does not bubble up the 'throttle' flag, since it's moot at the point in time when -// the item is downloaded and ready for delivery -func (r *resultStore) GetDeliverySlot(headerNumber uint64) (*fetchResult, bool, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - res, _, stale, _, err := r.getFetchResult(headerNumber) - return res, stale, err -} - -// getFetchResult returns the fetchResult corresponding to the given item, and -// the index where the result is stored. -func (r *resultStore) getFetchResult(headerNumber uint64) (item *fetchResult, index int, stale, throttle bool, err error) { - index = int(int64(headerNumber) - int64(r.resultOffset)) - throttle = index >= int(r.throttleThreshold) - stale = index < 0 - - if index >= len(r.items) { - err = fmt.Errorf("%w: index allocation went beyond available resultStore space "+ - "(index [%d] = header [%d] - resultOffset [%d], len(resultStore) = %d", errInvalidChain, - index, headerNumber, r.resultOffset, len(r.items)) - return nil, index, stale, throttle, err - } - if stale { - return nil, index, stale, throttle, nil - } - item = r.items[index] - return item, index, stale, throttle, nil -} - -// HasCompletedItems returns true if there are processable items available -// this method is cheaper than countCompleted -func (r *resultStore) HasCompletedItems() bool { - r.lock.RLock() - defer r.lock.RUnlock() - - if len(r.items) == 0 { - return false - } - if item := r.items[0]; item != nil && item.AllDone() { - return true - } - return false -} - -// countCompleted returns the number of items ready for delivery, stopping at -// the first non-complete item. -// -// The method assumes (at least) rlock is held. -func (r *resultStore) countCompleted() int { - // We iterate from the already known complete point, and see - // if any more has completed since last count - index := atomic.LoadInt32(&r.indexIncomplete) - for ; ; index++ { - if index >= int32(len(r.items)) { - break - } - result := r.items[index] - if result == nil || !result.AllDone() { - break - } - } - atomic.StoreInt32(&r.indexIncomplete, index) - return int(index) -} - -// GetCompleted returns the next batch of completed fetchResults -func (r *resultStore) GetCompleted(limit int) []*fetchResult { - r.lock.Lock() - defer r.lock.Unlock() - - completed := r.countCompleted() - if limit > completed { - limit = completed - } - results := make([]*fetchResult, limit) - copy(results, r.items[:limit]) - - // Delete the results from the cache and clear the tail. - copy(r.items, r.items[limit:]) - for i := len(r.items) - limit; i < len(r.items); i++ { - r.items[i] = nil - } - // Advance the expected block number of the first cache entry - r.resultOffset += uint64(limit) - atomic.AddInt32(&r.indexIncomplete, int32(-limit)) - - return results -} - -// Prepare initialises the offset with the given block number -func (r *resultStore) Prepare(offset uint64) { - r.lock.Lock() - defer r.lock.Unlock() - - if r.resultOffset < offset { - r.resultOffset = offset - } -} diff --git a/les/downloader/statesync.go b/les/downloader/statesync.go deleted file mode 100644 index 4dacade3f..000000000 --- a/les/downloader/statesync.go +++ /dev/null @@ -1,638 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "fmt" - "sync" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/trie" - "golang.org/x/crypto/sha3" -) - -// stateReq represents a batch of state fetch requests grouped together into -// a single data retrieval network packet. -type stateReq struct { - nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient) - trieTasks map[string]*trieTask // Trie node download tasks to track previous attempts - codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts - timeout time.Duration // Maximum round trip time for this to complete - timer *time.Timer // Timer to fire when the RTT timeout expires - peer *peerConnection // Peer that we're requesting from - delivered time.Time // Time when the packet was delivered (independent when we process it) - response [][]byte // Response data of the peer (nil for timeouts) - dropped bool // Flag whether the peer dropped off early -} - -// timedOut returns if this request timed out. -func (req *stateReq) timedOut() bool { - return req.response == nil -} - -// stateSyncStats is a collection of progress stats to report during a state trie -// sync to RPC requests as well as to display in user logs. -type stateSyncStats struct { - processed uint64 // Number of state entries processed - duplicate uint64 // Number of state entries downloaded twice - unexpected uint64 // Number of non-requested state entries received - pending uint64 // Number of still pending state entries -} - -// syncState starts downloading state with the given root hash. -func (d *Downloader) syncState(root common.Hash) *stateSync { - // Create the state sync - s := newStateSync(d, root) - select { - case d.stateSyncStart <- s: - // If we tell the statesync to restart with a new root, we also need - // to wait for it to actually also start -- when old requests have timed - // out or been delivered - <-s.started - case <-d.quitCh: - s.err = errCancelStateFetch - close(s.done) - } - return s -} - -// stateFetcher manages the active state sync and accepts requests -// on its behalf. -func (d *Downloader) stateFetcher() { - for { - select { - case s := <-d.stateSyncStart: - for next := s; next != nil; { - next = d.runStateSync(next) - } - case <-d.stateCh: - // Ignore state responses while no sync is running. - case <-d.quitCh: - return - } - } -} - -// runStateSync runs a state synchronisation until it completes or another root -// hash is requested to be switched over to. -func (d *Downloader) runStateSync(s *stateSync) *stateSync { - var ( - active = make(map[string]*stateReq) // Currently in-flight requests - finished []*stateReq // Completed or failed requests - timeout = make(chan *stateReq) // Timed out active requests - ) - log.Trace("State sync starting", "root", s.root) - - defer func() { - // Cancel active request timers on exit. Also set peers to idle so they're - // available for the next sync. - for _, req := range active { - req.timer.Stop() - req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) - } - }() - go s.run() - defer s.Cancel() - - // Listen for peer departure events to cancel assigned tasks - peerDrop := make(chan *peerConnection, 1024) - peerSub := s.d.peers.SubscribePeerDrops(peerDrop) - defer peerSub.Unsubscribe() - - for { - // Enable sending of the first buffered element if there is one. - var ( - deliverReq *stateReq - deliverReqCh chan *stateReq - ) - if len(finished) > 0 { - deliverReq = finished[0] - deliverReqCh = s.deliver - } - - select { - // The stateSync lifecycle: - case next := <-d.stateSyncStart: - d.spindownStateSync(active, finished, timeout, peerDrop) - return next - - case <-s.done: - d.spindownStateSync(active, finished, timeout, peerDrop) - return nil - - // Send the next finished request to the current sync: - case deliverReqCh <- deliverReq: - // Shift out the first request, but also set the emptied slot to nil for GC - copy(finished, finished[1:]) - finished[len(finished)-1] = nil - finished = finished[:len(finished)-1] - - // Handle incoming state packs: - case pack := <-d.stateCh: - // Discard any data not requested (or previously timed out) - req := active[pack.PeerId()] - if req == nil { - log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items()) - continue - } - // Finalize the request and queue up for processing - req.timer.Stop() - req.response = pack.(*statePack).states - req.delivered = time.Now() - - finished = append(finished, req) - delete(active, pack.PeerId()) - - // Handle dropped peer connections: - case p := <-peerDrop: - // Skip if no request is currently pending - req := active[p.id] - if req == nil { - continue - } - // Finalize the request and queue up for processing - req.timer.Stop() - req.dropped = true - req.delivered = time.Now() - - finished = append(finished, req) - delete(active, p.id) - - // Handle timed-out requests: - case req := <-timeout: - // If the peer is already requesting something else, ignore the stale timeout. - // This can happen when the timeout and the delivery happens simultaneously, - // causing both pathways to trigger. - if active[req.peer.id] != req { - continue - } - req.delivered = time.Now() - // Move the timed out data back into the download queue - finished = append(finished, req) - delete(active, req.peer.id) - - // Track outgoing state requests: - case req := <-d.trackStateReq: - // If an active request already exists for this peer, we have a problem. In - // theory the trie node schedule must never assign two requests to the same - // peer. In practice however, a peer might receive a request, disconnect and - // immediately reconnect before the previous times out. In this case the first - // request is never honored, alas we must not silently overwrite it, as that - // causes valid requests to go missing and sync to get stuck. - if old := active[req.peer.id]; old != nil { - log.Warn("Busy peer assigned new state fetch", "peer", old.peer.id) - // Move the previous request to the finished set - old.timer.Stop() - old.dropped = true - old.delivered = time.Now() - finished = append(finished, old) - } - // Start a timer to notify the sync loop if the peer stalled. - req.timer = time.AfterFunc(req.timeout, func() { - timeout <- req - }) - active[req.peer.id] = req - } - } -} - -// spindownStateSync 'drains' the outstanding requests; some will be delivered and other -// will time out. This is to ensure that when the next stateSync starts working, all peers -// are marked as idle and de facto _are_ idle. -func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) { - log.Trace("State sync spinning down", "active", len(active), "finished", len(finished)) - for len(active) > 0 { - var ( - req *stateReq - reason string - ) - select { - // Handle (drop) incoming state packs: - case pack := <-d.stateCh: - req = active[pack.PeerId()] - reason = "delivered" - // Handle dropped peer connections: - case p := <-peerDrop: - req = active[p.id] - reason = "peerdrop" - // Handle timed-out requests: - case req = <-timeout: - reason = "timeout" - } - if req == nil { - continue - } - req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason) - req.timer.Stop() - delete(active, req.peer.id) - req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) - } - // The 'finished' set contains deliveries that we were going to pass to processing. - // Those are now moot, but we still need to set those peers as idle, which would - // otherwise have been done after processing - for _, req := range finished { - req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) - } -} - -// stateSync schedules requests for downloading a particular state trie defined -// by a given state root. -type stateSync struct { - d *Downloader // Downloader instance to access and manage current peerset - - root common.Hash // State root currently being synced - sched *trie.Sync // State trie sync scheduler defining the tasks - keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with - - trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval, indexed by path - codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash - - numUncommitted int - bytesUncommitted int - - started chan struct{} // Started is signalled once the sync loop starts - - deliver chan *stateReq // Delivery channel multiplexing peer responses - cancel chan struct{} // Channel to signal a termination request - cancelOnce sync.Once // Ensures cancel only ever gets called once - done chan struct{} // Channel to signal termination completion - err error // Any error hit during sync (set before completion) -} - -// trieTask represents a single trie node download task, containing a set of -// peers already attempted retrieval from to detect stalled syncs and abort. -type trieTask struct { - hash common.Hash - path [][]byte - attempts map[string]struct{} -} - -// codeTask represents a single byte code download task, containing a set of -// peers already attempted retrieval from to detect stalled syncs and abort. -type codeTask struct { - attempts map[string]struct{} -} - -// newStateSync creates a new state trie download scheduler. This method does not -// yet start the sync. The user needs to call run to initiate. -func newStateSync(d *Downloader, root common.Hash) *stateSync { - // Hack the node scheme here. It's a dead code is not used - // by light client at all. Just aim for passing tests. - return &stateSync{ - d: d, - root: root, - sched: state.NewStateSync(root, d.stateDB, nil, rawdb.HashScheme), - keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), - trieTasks: make(map[string]*trieTask), - codeTasks: make(map[common.Hash]*codeTask), - deliver: make(chan *stateReq), - cancel: make(chan struct{}), - done: make(chan struct{}), - started: make(chan struct{}), - } -} - -// run starts the task assignment and response processing loop, blocking until -// it finishes, and finally notifying any goroutines waiting for the loop to -// finish. -func (s *stateSync) run() { - close(s.started) - if s.d.snapSync { - s.err = s.d.SnapSyncer.Sync(s.root, s.cancel) - } else { - s.err = s.loop() - } - close(s.done) -} - -// Wait blocks until the sync is done or canceled. -func (s *stateSync) Wait() error { - <-s.done - return s.err -} - -// Cancel cancels the sync and waits until it has shut down. -func (s *stateSync) Cancel() error { - s.cancelOnce.Do(func() { - close(s.cancel) - }) - return s.Wait() -} - -// loop is the main event loop of a state trie sync. It it responsible for the -// assignment of new tasks to peers (including sending it to them) as well as -// for the processing of inbound data. Note, that the loop does not directly -// receive data from peers, rather those are buffered up in the downloader and -// pushed here async. The reason is to decouple processing from data receipt -// and timeouts. -func (s *stateSync) loop() (err error) { - // Listen for new peer events to assign tasks to them - newPeer := make(chan *peerConnection, 1024) - peerSub := s.d.peers.SubscribeNewPeers(newPeer) - defer peerSub.Unsubscribe() - defer func() { - cerr := s.commit(true) - if err == nil { - err = cerr - } - }() - - // Keep assigning new tasks until the sync completes or aborts - for s.sched.Pending() > 0 { - if err = s.commit(false); err != nil { - return err - } - s.assignTasks() - // Tasks assigned, wait for something to happen - select { - case <-newPeer: - // New peer arrived, try to assign it download tasks - - case <-s.cancel: - return errCancelStateFetch - - case <-s.d.cancelCh: - return errCanceled - - case req := <-s.deliver: - // Response, disconnect or timeout triggered, drop the peer if stalling - log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut()) - if req.nItems <= 2 && !req.dropped && req.timedOut() { - // 2 items are the minimum requested, if even that times out, we've no use of - // this peer at the moment. - log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id) - if s.d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - req.peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id) - } else { - s.d.dropPeer(req.peer.id) - - // If this peer was the master peer, abort sync immediately - s.d.cancelLock.RLock() - master := req.peer.id == s.d.cancelPeer - s.d.cancelLock.RUnlock() - - if master { - s.d.cancel() - return errTimeout - } - } - } - // Process all the received blobs and check for stale delivery - delivered, err := s.process(req) - req.peer.SetNodeDataIdle(delivered, req.delivered) - if err != nil { - log.Warn("Node data write error", "err", err) - return err - } - } - } - return nil -} - -func (s *stateSync) commit(force bool) error { - if !force && s.bytesUncommitted < ethdb.IdealBatchSize { - return nil - } - start := time.Now() - b := s.d.stateDB.NewBatch() - if err := s.sched.Commit(b); err != nil { - return err - } - if err := b.Write(); err != nil { - return fmt.Errorf("DB write error: %v", err) - } - s.updateStats(s.numUncommitted, 0, 0, time.Since(start)) - s.numUncommitted = 0 - s.bytesUncommitted = 0 - return nil -} - -// assignTasks attempts to assign new tasks to all idle peers, either from the -// batch currently being retried, or fetching new data from the trie sync itself. -func (s *stateSync) assignTasks() { - // Iterate over all idle peers and try to assign them state fetches - peers, _ := s.d.peers.NodeDataIdlePeers() - for _, p := range peers { - // Assign a batch of fetches proportional to the estimated latency/bandwidth - cap := p.NodeDataCapacity(s.d.peers.rates.TargetRoundTrip()) - req := &stateReq{peer: p, timeout: s.d.peers.rates.TargetTimeout()} - - nodes, _, codes := s.fillTasks(cap, req) - - // If the peer was assigned tasks to fetch, send the network request - if len(nodes)+len(codes) > 0 { - req.peer.log.Trace("Requesting batch of state data", "nodes", len(nodes), "codes", len(codes), "root", s.root) - select { - case s.d.trackStateReq <- req: - req.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under eth/6x - case <-s.cancel: - case <-s.d.cancelCh: - } - } - } -} - -// fillTasks fills the given request object with a maximum of n state download -// tasks to send to the remote peer. -func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { - // Refill available tasks from the scheduler. - if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 { - paths, hashes, codes := s.sched.Missing(fill) - for i, path := range paths { - s.trieTasks[path] = &trieTask{ - hash: hashes[i], - path: trie.NewSyncPath([]byte(path)), - attempts: make(map[string]struct{}), - } - } - for _, hash := range codes { - s.codeTasks[hash] = &codeTask{ - attempts: make(map[string]struct{}), - } - } - } - // Find tasks that haven't been tried with the request's peer. Prefer code - // over trie nodes as those can be written to disk and forgotten about. - nodes = make([]common.Hash, 0, n) - paths = make([]trie.SyncPath, 0, n) - codes = make([]common.Hash, 0, n) - - req.trieTasks = make(map[string]*trieTask, n) - req.codeTasks = make(map[common.Hash]*codeTask, n) - - for hash, t := range s.codeTasks { - // Stop when we've gathered enough requests - if len(nodes)+len(codes) == n { - break - } - // Skip any requests we've already tried from this peer - if _, ok := t.attempts[req.peer.id]; ok { - continue - } - // Assign the request to this peer - t.attempts[req.peer.id] = struct{}{} - codes = append(codes, hash) - req.codeTasks[hash] = t - delete(s.codeTasks, hash) - } - for path, t := range s.trieTasks { - // Stop when we've gathered enough requests - if len(nodes)+len(codes) == n { - break - } - // Skip any requests we've already tried from this peer - if _, ok := t.attempts[req.peer.id]; ok { - continue - } - // Assign the request to this peer - t.attempts[req.peer.id] = struct{}{} - - nodes = append(nodes, t.hash) - paths = append(paths, t.path) - - req.trieTasks[path] = t - delete(s.trieTasks, path) - } - req.nItems = uint16(len(nodes) + len(codes)) - return nodes, paths, codes -} - -// process iterates over a batch of delivered state data, injecting each item -// into a running state sync, re-queuing any items that were requested but not -// delivered. Returns whether the peer actually managed to deliver anything of -// value, and any error that occurred. -func (s *stateSync) process(req *stateReq) (int, error) { - // Collect processing stats and update progress if valid data was received - duplicate, unexpected, successful := 0, 0, 0 - - defer func(start time.Time) { - if duplicate > 0 || unexpected > 0 { - s.updateStats(0, duplicate, unexpected, time.Since(start)) - } - }(time.Now()) - - // Iterate over all the delivered data and inject one-by-one into the trie - for _, blob := range req.response { - hash, err := s.processNodeData(req.trieTasks, req.codeTasks, blob) - switch err { - case nil: - s.numUncommitted++ - s.bytesUncommitted += len(blob) - successful++ - case trie.ErrNotRequested: - unexpected++ - case trie.ErrAlreadyProcessed: - duplicate++ - default: - return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) - } - } - // Put unfulfilled tasks back into the retry queue - npeers := s.d.peers.Len() - for path, task := range req.trieTasks { - // If the node did deliver something, missing items may be due to a protocol - // limit or a previous timeout + delayed delivery. Both cases should permit - // the node to retry the missing items (to avoid single-peer stalls). - if len(req.response) > 0 || req.timedOut() { - delete(task.attempts, req.peer.id) - } - // If we've requested the node too many times already, it may be a malicious - // sync where nobody has the right data. Abort. - if len(task.attempts) >= npeers { - return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", task.hash.TerminalString(), len(task.attempts), npeers) - } - // Missing item, place into the retry queue. - s.trieTasks[path] = task - } - for hash, task := range req.codeTasks { - // If the node did deliver something, missing items may be due to a protocol - // limit or a previous timeout + delayed delivery. Both cases should permit - // the node to retry the missing items (to avoid single-peer stalls). - if len(req.response) > 0 || req.timedOut() { - delete(task.attempts, req.peer.id) - } - // If we've requested the node too many times already, it may be a malicious - // sync where nobody has the right data. Abort. - if len(task.attempts) >= npeers { - return successful, fmt.Errorf("byte code %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) - } - // Missing item, place into the retry queue. - s.codeTasks[hash] = task - } - return successful, nil -} - -// processNodeData tries to inject a trie node data blob delivered from a remote -// peer into the state trie, returning whether anything useful was written or any -// error occurred. -// -// If multiple requests correspond to the same hash, this method will inject the -// blob as a result for the first one only, leaving the remaining duplicates to -// be fetched again. -func (s *stateSync) processNodeData(nodeTasks map[string]*trieTask, codeTasks map[common.Hash]*codeTask, blob []byte) (common.Hash, error) { - var hash common.Hash - s.keccak.Reset() - s.keccak.Write(blob) - s.keccak.Read(hash[:]) - - if _, present := codeTasks[hash]; present { - err := s.sched.ProcessCode(trie.CodeSyncResult{ - Hash: hash, - Data: blob, - }) - delete(codeTasks, hash) - return hash, err - } - for path, task := range nodeTasks { - if task.hash == hash { - err := s.sched.ProcessNode(trie.NodeSyncResult{ - Path: path, - Data: blob, - }) - delete(nodeTasks, path) - return hash, err - } - } - return common.Hash{}, trie.ErrNotRequested -} - -// updateStats bumps the various state sync progress counters and displays a log -// message for the user to see. -func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) { - s.d.syncStatsLock.Lock() - defer s.d.syncStatsLock.Unlock() - - s.d.syncStatsState.pending = uint64(s.sched.Pending()) - s.d.syncStatsState.processed += uint64(written) - s.d.syncStatsState.duplicate += uint64(duplicate) - s.d.syncStatsState.unexpected += uint64(unexpected) - - if written > 0 || duplicate > 0 || unexpected > 0 { - log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected) - } - //if written > 0 { - //rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed) - //} -} diff --git a/les/downloader/testchain_test.go b/les/downloader/testchain_test.go deleted file mode 100644 index dff0844cb..000000000 --- a/les/downloader/testchain_test.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "fmt" - "math/big" - "sync" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" -) - -// Test chain parameters. -var ( - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - testAddress = crypto.PubkeyToAddress(testKey.PublicKey) - testDB = rawdb.NewMemoryDatabase() - - gspec = core.Genesis{ - Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - testGenesis = gspec.MustCommit(testDB) -) - -// The common prefix of all test chains: -var testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis) - -// Different forks on top of the base chain: -var testChainForkLightA, testChainForkLightB, testChainForkHeavy *testChain - -func init() { - var forkLen = int(fullMaxForkAncestry + 50) - var wg sync.WaitGroup - wg.Add(3) - go func() { testChainForkLightA = testChainBase.makeFork(forkLen, false, 1); wg.Done() }() - go func() { testChainForkLightB = testChainBase.makeFork(forkLen, false, 2); wg.Done() }() - go func() { testChainForkHeavy = testChainBase.makeFork(forkLen, true, 3); wg.Done() }() - wg.Wait() -} - -type testChain struct { - genesis *types.Block - chain []common.Hash - headerm map[common.Hash]*types.Header - blockm map[common.Hash]*types.Block - receiptm map[common.Hash][]*types.Receipt - tdm map[common.Hash]*big.Int -} - -// newTestChain creates a blockchain of the given length. -func newTestChain(length int, genesis *types.Block) *testChain { - tc := new(testChain).copy(length) - tc.genesis = genesis - tc.chain = append(tc.chain, genesis.Hash()) - tc.headerm[tc.genesis.Hash()] = tc.genesis.Header() - tc.tdm[tc.genesis.Hash()] = tc.genesis.Difficulty() - tc.blockm[tc.genesis.Hash()] = tc.genesis - tc.generate(length-1, 0, genesis, false) - return tc -} - -// makeFork creates a fork on top of the test chain. -func (tc *testChain) makeFork(length int, heavy bool, seed byte) *testChain { - fork := tc.copy(tc.len() + length) - fork.generate(length, seed, tc.headBlock(), heavy) - return fork -} - -// shorten creates a copy of the chain with the given length. It panics if the -// length is longer than the number of available blocks. -func (tc *testChain) shorten(length int) *testChain { - if length > tc.len() { - panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, tc.len())) - } - return tc.copy(length) -} - -func (tc *testChain) copy(newlen int) *testChain { - cpy := &testChain{ - genesis: tc.genesis, - headerm: make(map[common.Hash]*types.Header, newlen), - blockm: make(map[common.Hash]*types.Block, newlen), - receiptm: make(map[common.Hash][]*types.Receipt, newlen), - tdm: make(map[common.Hash]*big.Int, newlen), - } - for i := 0; i < len(tc.chain) && i < newlen; i++ { - hash := tc.chain[i] - cpy.chain = append(cpy.chain, tc.chain[i]) - cpy.tdm[hash] = tc.tdm[hash] - cpy.blockm[hash] = tc.blockm[hash] - cpy.headerm[hash] = tc.headerm[hash] - cpy.receiptm[hash] = tc.receiptm[hash] - } - return cpy -} - -// generate creates a chain of n blocks starting at and including parent. -// the returned hash chain is ordered head->parent. In addition, every 22th block -// contains a transaction and every 5th an uncle to allow testing correct block -// reassembly. -func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) { - // start := time.Now() - // defer func() { fmt.Printf("test chain generated in %v\n", time.Since(start)) }() - - blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) { - block.SetCoinbase(common.Address{seed}) - // If a heavy chain is requested, delay blocks to raise difficulty - if heavy { - block.OffsetTime(-1) - } - // Include transactions to the miner to make blocks more interesting. - if parent == tc.genesis && i%22 == 0 { - signer := types.MakeSigner(params.TestChainConfig, block.Number(), block.Timestamp()) - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) - if err != nil { - panic(err) - } - block.AddTx(tx) - } - // if the block number is a multiple of 5, add a bonus uncle to the block - if i > 0 && i%5 == 0 { - block.AddUncle(&types.Header{ - ParentHash: block.PrevBlock(i - 1).Hash(), - Number: big.NewInt(block.Number().Int64() - 1), - }) - } - }) - - // Convert the block-chain into a hash-chain and header/block maps - td := new(big.Int).Set(tc.td(parent.Hash())) - for i, b := range blocks { - td := td.Add(td, b.Difficulty()) - hash := b.Hash() - tc.chain = append(tc.chain, hash) - tc.blockm[hash] = b - tc.headerm[hash] = b.Header() - tc.receiptm[hash] = receipts[i] - tc.tdm[hash] = new(big.Int).Set(td) - } -} - -// len returns the total number of blocks in the chain. -func (tc *testChain) len() int { - return len(tc.chain) -} - -// headBlock returns the head of the chain. -func (tc *testChain) headBlock() *types.Block { - return tc.blockm[tc.chain[len(tc.chain)-1]] -} - -// td returns the total difficulty of the given block. -func (tc *testChain) td(hash common.Hash) *big.Int { - return tc.tdm[hash] -} - -// headersByHash returns headers in order from the given hash. -func (tc *testChain) headersByHash(origin common.Hash, amount int, skip int, reverse bool) []*types.Header { - num, _ := tc.hashToNumber(origin) - return tc.headersByNumber(num, amount, skip, reverse) -} - -// headersByNumber returns headers from the given number. -func (tc *testChain) headersByNumber(origin uint64, amount int, skip int, reverse bool) []*types.Header { - result := make([]*types.Header, 0, amount) - - if !reverse { - for num := origin; num < uint64(len(tc.chain)) && len(result) < amount; num += uint64(skip) + 1 { - if header, ok := tc.headerm[tc.chain[int(num)]]; ok { - result = append(result, header) - } - } - } else { - for num := int64(origin); num >= 0 && len(result) < amount; num -= int64(skip) + 1 { - if header, ok := tc.headerm[tc.chain[int(num)]]; ok { - result = append(result, header) - } - } - } - return result -} - -// receipts returns the receipts of the given block hashes. -func (tc *testChain) receipts(hashes []common.Hash) [][]*types.Receipt { - results := make([][]*types.Receipt, 0, len(hashes)) - for _, hash := range hashes { - if receipt, ok := tc.receiptm[hash]; ok { - results = append(results, receipt) - } - } - return results -} - -// bodies returns the block bodies of the given block hashes. -func (tc *testChain) bodies(hashes []common.Hash) ([][]*types.Transaction, [][]*types.Header) { - transactions := make([][]*types.Transaction, 0, len(hashes)) - uncles := make([][]*types.Header, 0, len(hashes)) - for _, hash := range hashes { - if block, ok := tc.blockm[hash]; ok { - transactions = append(transactions, block.Transactions()) - uncles = append(uncles, block.Uncles()) - } - } - return transactions, uncles -} - -func (tc *testChain) hashToNumber(target common.Hash) (uint64, bool) { - for num, hash := range tc.chain { - if hash == target { - return uint64(num), true - } - } - return 0, false -} diff --git a/les/downloader/types.go b/les/downloader/types.go deleted file mode 100644 index ff70bfa0e..000000000 --- a/les/downloader/types.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/core/types" -) - -// peerDropFn is a callback type for dropping a peer detected as malicious. -type peerDropFn func(id string) - -// dataPack is a data message returned by a peer for some query. -type dataPack interface { - PeerId() string - Items() int - Stats() string -} - -// headerPack is a batch of block headers returned by a peer. -type headerPack struct { - peerID string - headers []*types.Header -} - -func (p *headerPack) PeerId() string { return p.peerID } -func (p *headerPack) Items() int { return len(p.headers) } -func (p *headerPack) Stats() string { return fmt.Sprintf("%d", len(p.headers)) } - -// bodyPack is a batch of block bodies returned by a peer. -type bodyPack struct { - peerID string - transactions [][]*types.Transaction - uncles [][]*types.Header -} - -func (p *bodyPack) PeerId() string { return p.peerID } -func (p *bodyPack) Items() int { - if len(p.transactions) <= len(p.uncles) { - return len(p.transactions) - } - return len(p.uncles) -} -func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactions), len(p.uncles)) } - -// receiptPack is a batch of receipts returned by a peer. -type receiptPack struct { - peerID string - receipts [][]*types.Receipt -} - -func (p *receiptPack) PeerId() string { return p.peerID } -func (p *receiptPack) Items() int { return len(p.receipts) } -func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) } - -// statePack is a batch of states returned by a peer. -type statePack struct { - peerID string - states [][]byte -} - -func (p *statePack) PeerId() string { return p.peerID } -func (p *statePack) Items() int { return len(p.states) } -func (p *statePack) Stats() string { return fmt.Sprintf("%d", len(p.states)) } diff --git a/les/fetcher.go b/les/fetcher.go deleted file mode 100644 index df81ca4d9..000000000 --- a/les/fetcher.go +++ /dev/null @@ -1,563 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package les - -import ( - "math/big" - "math/rand" - "sync" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/les/fetcher" - "github.com/ethereum/go-ethereum/light" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/enode" -) - -const ( - blockDelayTimeout = 10 * time.Second // Timeout for retrieving the headers from the peer - gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired requests - cachedAnnosThreshold = 64 // The maximum queued announcements -) - -// announce represents an new block announcement from the les server. -type announce struct { - data *announceData - trust bool - peerid enode.ID -} - -// request represents a record when the header request is sent. -type request struct { - reqid uint64 - peerid enode.ID - sendAt time.Time - hash common.Hash -} - -// response represents a response packet from network as well as a channel -// to return all un-requested data. -type response struct { - reqid uint64 - headers []*types.Header - peerid enode.ID - remain chan []*types.Header -} - -// fetcherPeer holds the fetcher-specific information for each active peer -type fetcherPeer struct { - latest *announceData // The latest announcement sent from the peer - - // These following two fields can track the latest announces - // from the peer with limited size for caching. We hold the - // assumption that all enqueued announces are td-monotonic. - announces map[common.Hash]*announce // Announcement map - fifo []common.Hash // FIFO announces list -} - -// addAnno enqueues an new trusted announcement. If the queued announces overflow, -// evict from the oldest. -func (fp *fetcherPeer) addAnno(anno *announce) { - // Short circuit if the anno already exists. In normal case it should - // never happen since only monotonic anno is accepted. But the adversary - // may feed us fake announces with higher td but same hash. In this case, - // ignore the anno anyway. - hash := anno.data.Hash - if _, exist := fp.announces[hash]; exist { - return - } - fp.announces[hash] = anno - fp.fifo = append(fp.fifo, hash) - - // Evict oldest if the announces are oversized. - if len(fp.fifo)-cachedAnnosThreshold > 0 { - for i := 0; i < len(fp.fifo)-cachedAnnosThreshold; i++ { - delete(fp.announces, fp.fifo[i]) - } - copy(fp.fifo, fp.fifo[len(fp.fifo)-cachedAnnosThreshold:]) - fp.fifo = fp.fifo[:cachedAnnosThreshold] - } -} - -// forwardAnno removes all announces from the map with a number lower than -// the provided threshold. -func (fp *fetcherPeer) forwardAnno(td *big.Int) []*announce { - var ( - cutset int - evicted []*announce - ) - for ; cutset < len(fp.fifo); cutset++ { - anno := fp.announces[fp.fifo[cutset]] - if anno == nil { - continue // In theory it should never ever happen - } - if anno.data.Td.Cmp(td) > 0 { - break - } - evicted = append(evicted, anno) - delete(fp.announces, anno.data.Hash) - } - if cutset > 0 { - copy(fp.fifo, fp.fifo[cutset:]) - fp.fifo = fp.fifo[:len(fp.fifo)-cutset] - } - return evicted -} - -// lightFetcher implements retrieval of newly announced headers. It reuses -// the eth.BlockFetcher as the underlying fetcher but adding more additional -// rules: e.g. evict "timeout" peers. -type lightFetcher struct { - // Various handlers - ulc *ulc - chaindb ethdb.Database - reqDist *requestDistributor - peerset *serverPeerSet // The global peerset of light client which shared by all components - chain *light.LightChain // The local light chain which maintains the canonical header chain. - fetcher *fetcher.BlockFetcher // The underlying fetcher which takes care block header retrieval. - - // Peerset maintained by fetcher - plock sync.RWMutex - peers map[enode.ID]*fetcherPeer - - // Various channels - announceCh chan *announce - requestCh chan *request - deliverCh chan *response - syncDone chan *types.Header - - closeCh chan struct{} - wg sync.WaitGroup - - // Callback - synchronise func(peer *serverPeer) - - // Test fields or hooks - newHeadHook func(*types.Header) -} - -// newLightFetcher creates a light fetcher instance. -func newLightFetcher(chain *light.LightChain, engine consensus.Engine, peers *serverPeerSet, ulc *ulc, chaindb ethdb.Database, reqDist *requestDistributor, syncFn func(p *serverPeer)) *lightFetcher { - // Construct the fetcher by offering all necessary APIs - validator := func(header *types.Header) error { - // Disable seal verification explicitly if we are running in ulc mode. - return engine.VerifyHeader(chain, header) - } - heighter := func() uint64 { return chain.CurrentHeader().Number.Uint64() } - dropper := func(id string) { peers.unregister(id) } - inserter := func(headers []*types.Header) (int, error) { return chain.InsertHeaderChain(headers) } - f := &lightFetcher{ - ulc: ulc, - peerset: peers, - chaindb: chaindb, - chain: chain, - reqDist: reqDist, - fetcher: fetcher.NewBlockFetcher(true, chain.GetHeaderByHash, nil, validator, nil, heighter, inserter, nil, dropper), - peers: make(map[enode.ID]*fetcherPeer), - synchronise: syncFn, - announceCh: make(chan *announce), - requestCh: make(chan *request), - deliverCh: make(chan *response), - syncDone: make(chan *types.Header), - closeCh: make(chan struct{}), - } - peers.subscribe(f) - return f -} - -func (f *lightFetcher) start() { - f.wg.Add(1) - f.fetcher.Start() - go f.mainloop() -} - -func (f *lightFetcher) stop() { - close(f.closeCh) - f.fetcher.Stop() - f.wg.Wait() -} - -// registerPeer adds an new peer to the fetcher's peer set -func (f *lightFetcher) registerPeer(p *serverPeer) { - f.plock.Lock() - defer f.plock.Unlock() - - f.peers[p.ID()] = &fetcherPeer{announces: make(map[common.Hash]*announce)} -} - -// unregisterPeer removes the specified peer from the fetcher's peer set -func (f *lightFetcher) unregisterPeer(p *serverPeer) { - f.plock.Lock() - defer f.plock.Unlock() - - delete(f.peers, p.ID()) -} - -// peer returns the peer from the fetcher peerset. -func (f *lightFetcher) peer(id enode.ID) *fetcherPeer { - f.plock.RLock() - defer f.plock.RUnlock() - - return f.peers[id] -} - -// forEachPeer iterates the fetcher peerset, abort the iteration if the -// callback returns false. -func (f *lightFetcher) forEachPeer(check func(id enode.ID, p *fetcherPeer) bool) { - f.plock.RLock() - defer f.plock.RUnlock() - - for id, peer := range f.peers { - if !check(id, peer) { - return - } - } -} - -// mainloop is the main event loop of the light fetcher, which is responsible for -// -// - announcement maintenance(ulc) -// -// If we are running in ultra light client mode, then all announcements from -// the trusted servers are maintained. If the same announcements from trusted -// servers reach the threshold, then the relevant header is requested for retrieval. -// -// - block header retrieval -// Whenever we receive announce with higher td compared with local chain, the -// request will be made for header retrieval. -// -// - re-sync trigger -// If the local chain lags too much, then the fetcher will enter "synchronise" -// mode to retrieve missing headers in batch. -func (f *lightFetcher) mainloop() { - defer f.wg.Done() - - var ( - syncInterval = uint64(1) // Interval used to trigger a light resync. - syncing bool // Indicator whether the client is syncing - - ulc = f.ulc != nil - headCh = make(chan core.ChainHeadEvent, 100) - fetching = make(map[uint64]*request) - requestTimer = time.NewTimer(0) - - // Local status - localHead = f.chain.CurrentHeader() - localTd = f.chain.GetTd(localHead.Hash(), localHead.Number.Uint64()) - ) - defer requestTimer.Stop() - sub := f.chain.SubscribeChainHeadEvent(headCh) - defer sub.Unsubscribe() - - // reset updates the local status with given header. - reset := func(header *types.Header) { - localHead = header - localTd = f.chain.GetTd(header.Hash(), header.Number.Uint64()) - } - // trustedHeader returns an indicator whether the header is regarded as - // trusted. If we are running in the ulc mode, only when we receive enough - // same announcement from trusted server, the header will be trusted. - trustedHeader := func(hash common.Hash, number uint64) (bool, []enode.ID) { - var ( - agreed []enode.ID - trusted bool - ) - f.forEachPeer(func(id enode.ID, p *fetcherPeer) bool { - if anno := p.announces[hash]; anno != nil && anno.trust && anno.data.Number == number { - agreed = append(agreed, id) - if 100*len(agreed)/len(f.ulc.keys) >= f.ulc.fraction { - trusted = true - return false // abort iteration - } - } - return true - }) - return trusted, agreed - } - for { - select { - case anno := <-f.announceCh: - peerid, data := anno.peerid, anno.data - log.Debug("Received new announce", "peer", peerid, "number", data.Number, "hash", data.Hash, "reorg", data.ReorgDepth) - - peer := f.peer(peerid) - if peer == nil { - log.Debug("Receive announce from unknown peer", "peer", peerid) - continue - } - // Announced tds should be strictly monotonic, drop the peer if - // the announce is out-of-order. - if peer.latest != nil && data.Td.Cmp(peer.latest.Td) <= 0 { - f.peerset.unregister(peerid.String()) - log.Debug("Non-monotonic td", "peer", peerid, "current", data.Td, "previous", peer.latest.Td) - continue - } - peer.latest = data - - // Filter out any stale announce, the local chain is ahead of announce - if localTd != nil && data.Td.Cmp(localTd) <= 0 { - continue - } - peer.addAnno(anno) - - // If we are not syncing, try to trigger a single retrieval or re-sync - if !ulc && !syncing { - // Two scenarios lead to re-sync: - // - reorg happens - // - local chain lags - // We can't retrieve the parent of the announce by single retrieval - // in both cases, so resync is necessary. - if data.Number > localHead.Number.Uint64()+syncInterval || data.ReorgDepth > 0 { - syncing = true - go f.startSync(peerid) - log.Debug("Trigger light sync", "peer", peerid, "local", localHead.Number, "localhash", localHead.Hash(), "remote", data.Number, "remotehash", data.Hash) - continue - } - f.fetcher.Notify(peerid.String(), data.Hash, data.Number, time.Now(), f.requestHeaderByHash(peerid), nil) - log.Debug("Trigger header retrieval", "peer", peerid, "number", data.Number, "hash", data.Hash) - } - // Keep collecting announces from trusted server even we are syncing. - if ulc && anno.trust { - // Notify underlying fetcher to retrieve header or trigger a resync if - // we have receive enough announcements from trusted server. - trusted, agreed := trustedHeader(data.Hash, data.Number) - if trusted && !syncing { - if data.Number > localHead.Number.Uint64()+syncInterval || data.ReorgDepth > 0 { - syncing = true - go f.startSync(peerid) - log.Debug("Trigger trusted light sync", "local", localHead.Number, "localhash", localHead.Hash(), "remote", data.Number, "remotehash", data.Hash) - continue - } - p := agreed[rand.Intn(len(agreed))] - f.fetcher.Notify(p.String(), data.Hash, data.Number, time.Now(), f.requestHeaderByHash(p), nil) - log.Debug("Trigger trusted header retrieval", "number", data.Number, "hash", data.Hash) - } - } - - case req := <-f.requestCh: - fetching[req.reqid] = req // Tracking all in-flight requests for response latency statistic. - if len(fetching) == 1 { - f.rescheduleTimer(fetching, requestTimer) - } - - case <-requestTimer.C: - for reqid, request := range fetching { - if time.Since(request.sendAt) > blockDelayTimeout-gatherSlack { - delete(fetching, reqid) - f.peerset.unregister(request.peerid.String()) - log.Debug("Request timeout", "peer", request.peerid, "reqid", reqid) - } - } - f.rescheduleTimer(fetching, requestTimer) - - case resp := <-f.deliverCh: - if req := fetching[resp.reqid]; req != nil { - delete(fetching, resp.reqid) - f.rescheduleTimer(fetching, requestTimer) - - // The underlying fetcher does not check the consistency of request and response. - // The adversary can send the fake announces with invalid hash and number but always - // delivery some mismatched header. So it can't be punished by the underlying fetcher. - // We have to add two more rules here to detect. - if len(resp.headers) != 1 { - f.peerset.unregister(req.peerid.String()) - log.Debug("Deliver more than requested", "peer", req.peerid, "reqid", req.reqid) - continue - } - if resp.headers[0].Hash() != req.hash { - f.peerset.unregister(req.peerid.String()) - log.Debug("Deliver invalid header", "peer", req.peerid, "reqid", req.reqid) - continue - } - resp.remain <- f.fetcher.FilterHeaders(resp.peerid.String(), resp.headers, time.Now()) - } else { - // Discard the entire packet no matter it's a timeout response or unexpected one. - resp.remain <- resp.headers - } - - case ev := <-headCh: - // Short circuit if we are still syncing. - if syncing { - continue - } - reset(ev.Block.Header()) - - // Clean stale announcements from les-servers. - var droplist []enode.ID - f.forEachPeer(func(id enode.ID, p *fetcherPeer) bool { - removed := p.forwardAnno(localTd) - for _, anno := range removed { - if header := f.chain.GetHeaderByHash(anno.data.Hash); header != nil { - if header.Number.Uint64() != anno.data.Number { - droplist = append(droplist, id) - break - } - // In theory td should exists. - td := f.chain.GetTd(anno.data.Hash, anno.data.Number) - if td != nil && td.Cmp(anno.data.Td) != 0 { - droplist = append(droplist, id) - break - } - } - } - return true - }) - for _, id := range droplist { - f.peerset.unregister(id.String()) - log.Debug("Kicked out peer for invalid announcement") - } - if f.newHeadHook != nil { - f.newHeadHook(localHead) - } - - case origin := <-f.syncDone: - syncing = false // Reset the status - - // Rewind all untrusted headers for ulc mode. - if ulc { - head := f.chain.CurrentHeader() - ancestor := rawdb.FindCommonAncestor(f.chaindb, origin, head) - - // Recap the ancestor with genesis header in case the ancestor - // is not found. It can happen the original head is before the - // checkpoint while the synced headers are after it. In this - // case there is no ancestor between them. - if ancestor == nil { - ancestor = f.chain.Genesis().Header() - } - var untrusted []common.Hash - for head.Number.Cmp(ancestor.Number) > 0 { - hash, number := head.Hash(), head.Number.Uint64() - if trusted, _ := trustedHeader(hash, number); trusted { - break - } - untrusted = append(untrusted, hash) - head = f.chain.GetHeader(head.ParentHash, number-1) - if head == nil { - break // all the synced headers will be dropped - } - } - if len(untrusted) > 0 { - for i, j := 0, len(untrusted)-1; i < j; i, j = i+1, j-1 { - untrusted[i], untrusted[j] = untrusted[j], untrusted[i] - } - f.chain.Rollback(untrusted) - } - } - // Reset local status. - reset(f.chain.CurrentHeader()) - if f.newHeadHook != nil { - f.newHeadHook(localHead) - } - log.Debug("light sync finished", "number", localHead.Number, "hash", localHead.Hash()) - - case <-f.closeCh: - return - } - } -} - -// announce processes a new announcement message received from a peer. -func (f *lightFetcher) announce(p *serverPeer, head *announceData) { - select { - case f.announceCh <- &announce{peerid: p.ID(), trust: p.trusted, data: head}: - case <-f.closeCh: - return - } -} - -// trackRequest sends a reqID to main loop for in-flight request tracking. -func (f *lightFetcher) trackRequest(peerid enode.ID, reqid uint64, hash common.Hash) { - select { - case f.requestCh <- &request{reqid: reqid, peerid: peerid, sendAt: time.Now(), hash: hash}: - case <-f.closeCh: - } -} - -// requestHeaderByHash constructs a header retrieval request and sends it to -// local request distributor. -// -// Note, we rely on the underlying eth/fetcher to retrieve and validate the -// response, so that we have to obey the rule of eth/fetcher which only accepts -// the response from given peer. -func (f *lightFetcher) requestHeaderByHash(peerid enode.ID) func(common.Hash) error { - return func(hash common.Hash) error { - req := &distReq{ - getCost: func(dp distPeer) uint64 { return dp.(*serverPeer).getRequestCost(GetBlockHeadersMsg, 1) }, - canSend: func(dp distPeer) bool { return dp.(*serverPeer).ID() == peerid }, - request: func(dp distPeer) func() { - peer, id := dp.(*serverPeer), rand.Uint64() - cost := peer.getRequestCost(GetBlockHeadersMsg, 1) - peer.fcServer.QueuedRequest(id, cost) - - return func() { - f.trackRequest(peer.ID(), id, hash) - peer.requestHeadersByHash(id, hash, 1, 0, false) - } - }, - } - f.reqDist.queue(req) - return nil - } -} - -// startSync invokes synchronisation callback to start syncing. -func (f *lightFetcher) startSync(id enode.ID) { - defer func(header *types.Header) { - f.syncDone <- header - }(f.chain.CurrentHeader()) - - peer := f.peerset.peer(id.String()) - if peer == nil || peer.onlyAnnounce { - return - } - f.synchronise(peer) -} - -// deliverHeaders delivers header download request responses for processing -func (f *lightFetcher) deliverHeaders(peer *serverPeer, reqid uint64, headers []*types.Header) []*types.Header { - remain := make(chan []*types.Header, 1) - select { - case f.deliverCh <- &response{reqid: reqid, headers: headers, peerid: peer.ID(), remain: remain}: - case <-f.closeCh: - return nil - } - return <-remain -} - -// rescheduleTimer resets the specified timeout timer to the next request timeout. -func (f *lightFetcher) rescheduleTimer(requests map[uint64]*request, timer *time.Timer) { - // Short circuit if no inflight requests - if len(requests) == 0 { - timer.Stop() - return - } - // Otherwise find the earliest expiring request - earliest := time.Now() - for _, req := range requests { - if earliest.After(req.sendAt) { - earliest = req.sendAt - } - } - timer.Reset(blockDelayTimeout - time.Since(earliest)) -} diff --git a/les/fetcher/block_fetcher.go b/les/fetcher/block_fetcher.go deleted file mode 100644 index 085ecb2d6..000000000 --- a/les/fetcher/block_fetcher.go +++ /dev/null @@ -1,888 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package fetcher is a temporary package whilst working on the eth/66 blocking refactors. -// After that work is done, les needs to be refactored to use the new package, -// or alternatively use a stripped down version of it. Either way, we need to -// keep the changes scoped so duplicating temporarily seems the sanest. -package fetcher - -import ( - "errors" - "math/rand" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/prque" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/trie" -) - -const ( - lightTimeout = time.Millisecond // Time allowance before an announced header is explicitly requested - arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested - gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches - fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block/transaction -) - -const ( - maxUncleDist = 7 // Maximum allowed backward distance from the chain head - maxQueueDist = 32 // Maximum allowed distance from the chain head to queue - hashLimit = 256 // Maximum number of unique blocks or headers a peer may have announced - blockLimit = 64 // Maximum number of unique blocks a peer may have delivered -) - -var ( - blockAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil) - blockAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil) - blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil) - blockAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil) - - blockBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil) - blockBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil) - blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil) - blockBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/dos", nil) - - headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil) - bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil) - - headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil) - headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil) - bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil) - bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil) -) - -var errTerminated = errors.New("terminated") - -// HeaderRetrievalFn is a callback type for retrieving a header from the local chain. -type HeaderRetrievalFn func(common.Hash) *types.Header - -// blockRetrievalFn is a callback type for retrieving a block from the local chain. -type blockRetrievalFn func(common.Hash) *types.Block - -// headerRequesterFn is a callback type for sending a header retrieval request. -type headerRequesterFn func(common.Hash) error - -// bodyRequesterFn is a callback type for sending a body retrieval request. -type bodyRequesterFn func([]common.Hash) error - -// headerVerifierFn is a callback type to verify a block's header for fast propagation. -type headerVerifierFn func(header *types.Header) error - -// blockBroadcasterFn is a callback type for broadcasting a block to connected peers. -type blockBroadcasterFn func(block *types.Block, propagate bool) - -// chainHeightFn is a callback type to retrieve the current chain height. -type chainHeightFn func() uint64 - -// headersInsertFn is a callback type to insert a batch of headers into the local chain. -type headersInsertFn func(headers []*types.Header) (int, error) - -// chainInsertFn is a callback type to insert a batch of blocks into the local chain. -type chainInsertFn func(types.Blocks) (int, error) - -// peerDropFn is a callback type for dropping a peer detected as malicious. -type peerDropFn func(id string) - -// blockAnnounce is the hash notification of the availability of a new block in the -// network. -type blockAnnounce struct { - hash common.Hash // Hash of the block being announced - number uint64 // Number of the block being announced (0 = unknown | old protocol) - header *types.Header // Header of the block partially reassembled (new protocol) - time time.Time // Timestamp of the announcement - - origin string // Identifier of the peer originating the notification - - fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block - fetchBodies bodyRequesterFn // Fetcher function to retrieve the body of an announced block -} - -// headerFilterTask represents a batch of headers needing fetcher filtering. -type headerFilterTask struct { - peer string // The source peer of block headers - headers []*types.Header // Collection of headers to filter - time time.Time // Arrival time of the headers -} - -// bodyFilterTask represents a batch of block bodies (transactions and uncles) -// needing fetcher filtering. -type bodyFilterTask struct { - peer string // The source peer of block bodies - transactions [][]*types.Transaction // Collection of transactions per block bodies - uncles [][]*types.Header // Collection of uncles per block bodies - time time.Time // Arrival time of the blocks' contents -} - -// blockOrHeaderInject represents a schedules import operation. -type blockOrHeaderInject struct { - origin string - - header *types.Header // Used for light mode fetcher which only cares about header. - block *types.Block // Used for normal mode fetcher which imports full block. -} - -// number returns the block number of the injected object. -func (inject *blockOrHeaderInject) number() uint64 { - if inject.header != nil { - return inject.header.Number.Uint64() - } - return inject.block.NumberU64() -} - -// number returns the block hash of the injected object. -func (inject *blockOrHeaderInject) hash() common.Hash { - if inject.header != nil { - return inject.header.Hash() - } - return inject.block.Hash() -} - -// BlockFetcher is responsible for accumulating block announcements from various peers -// and scheduling them for retrieval. -type BlockFetcher struct { - light bool // The indicator whether it's a light fetcher or normal one. - - // Various event channels - notify chan *blockAnnounce - inject chan *blockOrHeaderInject - - headerFilter chan chan *headerFilterTask - bodyFilter chan chan *bodyFilterTask - - done chan common.Hash - quit chan struct{} - - // Announce states - announces map[string]int // Per peer blockAnnounce counts to prevent memory exhaustion - announced map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching - fetching map[common.Hash]*blockAnnounce // Announced blocks, currently fetching - fetched map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval - completing map[common.Hash]*blockAnnounce // Blocks with headers, currently body-completing - - // Block cache - queue *prque.Prque[int64, *blockOrHeaderInject] // Queue containing the import operations (block number sorted) - queues map[string]int // Per peer block counts to prevent memory exhaustion - queued map[common.Hash]*blockOrHeaderInject // Set of already queued blocks (to dedup imports) - - // Callbacks - getHeader HeaderRetrievalFn // Retrieves a header from the local chain - getBlock blockRetrievalFn // Retrieves a block from the local chain - verifyHeader headerVerifierFn // Checks if a block's headers have a valid proof of work - broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers - chainHeight chainHeightFn // Retrieves the current chain's height - insertHeaders headersInsertFn // Injects a batch of headers into the chain - insertChain chainInsertFn // Injects a batch of blocks into the chain - dropPeer peerDropFn // Drops a peer for misbehaving - - // Testing hooks - announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list - queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue - fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch - completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62) - importedHook func(*types.Header, *types.Block) // Method to call upon successful header or block import (both eth/61 and eth/62) -} - -// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements. -func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher { - return &BlockFetcher{ - light: light, - notify: make(chan *blockAnnounce), - inject: make(chan *blockOrHeaderInject), - headerFilter: make(chan chan *headerFilterTask), - bodyFilter: make(chan chan *bodyFilterTask), - done: make(chan common.Hash), - quit: make(chan struct{}), - announces: make(map[string]int), - announced: make(map[common.Hash][]*blockAnnounce), - fetching: make(map[common.Hash]*blockAnnounce), - fetched: make(map[common.Hash][]*blockAnnounce), - completing: make(map[common.Hash]*blockAnnounce), - queue: prque.New[int64, *blockOrHeaderInject](nil), - queues: make(map[string]int), - queued: make(map[common.Hash]*blockOrHeaderInject), - getHeader: getHeader, - getBlock: getBlock, - verifyHeader: verifyHeader, - broadcastBlock: broadcastBlock, - chainHeight: chainHeight, - insertHeaders: insertHeaders, - insertChain: insertChain, - dropPeer: dropPeer, - } -} - -// Start boots up the announcement based synchroniser, accepting and processing -// hash notifications and block fetches until termination requested. -func (f *BlockFetcher) Start() { - go f.loop() -} - -// Stop terminates the announcement based synchroniser, canceling all pending -// operations. -func (f *BlockFetcher) Stop() { - close(f.quit) -} - -// Notify announces the fetcher of the potential availability of a new block in -// the network. -func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time, - headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error { - block := &blockAnnounce{ - hash: hash, - number: number, - time: time, - origin: peer, - fetchHeader: headerFetcher, - fetchBodies: bodyFetcher, - } - select { - case f.notify <- block: - return nil - case <-f.quit: - return errTerminated - } -} - -// Enqueue tries to fill gaps the fetcher's future import queue. -func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error { - op := &blockOrHeaderInject{ - origin: peer, - block: block, - } - select { - case f.inject <- op: - return nil - case <-f.quit: - return errTerminated - } -} - -// FilterHeaders extracts all the headers that were explicitly requested by the fetcher, -// returning those that should be handled differently. -func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header { - log.Trace("Filtering headers", "peer", peer, "headers", len(headers)) - - // Send the filter channel to the fetcher - filter := make(chan *headerFilterTask) - - select { - case f.headerFilter <- filter: - case <-f.quit: - return nil - } - // Request the filtering of the header list - select { - case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}: - case <-f.quit: - return nil - } - // Retrieve the headers remaining after filtering - select { - case task := <-filter: - return task.headers - case <-f.quit: - return nil - } -} - -// FilterBodies extracts all the block bodies that were explicitly requested by -// the fetcher, returning those that should be handled differently. -func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) { - log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles)) - - // Send the filter channel to the fetcher - filter := make(chan *bodyFilterTask) - - select { - case f.bodyFilter <- filter: - case <-f.quit: - return nil, nil - } - // Request the filtering of the body list - select { - case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}: - case <-f.quit: - return nil, nil - } - // Retrieve the bodies remaining after filtering - select { - case task := <-filter: - return task.transactions, task.uncles - case <-f.quit: - return nil, nil - } -} - -// Loop is the main fetcher loop, checking and processing various notification -// events. -func (f *BlockFetcher) loop() { - // Iterate the block fetching until a quit is requested - var ( - fetchTimer = time.NewTimer(0) - completeTimer = time.NewTimer(0) - ) - <-fetchTimer.C // clear out the channel - <-completeTimer.C - defer fetchTimer.Stop() - defer completeTimer.Stop() - - for { - // Clean up any expired block fetches - for hash, announce := range f.fetching { - if time.Since(announce.time) > fetchTimeout { - f.forgetHash(hash) - } - } - // Import any queued blocks that could potentially fit - height := f.chainHeight() - for !f.queue.Empty() { - op := f.queue.PopItem() - hash := op.hash() - if f.queueChangeHook != nil { - f.queueChangeHook(hash, false) - } - // If too high up the chain or phase, continue later - number := op.number() - if number > height+1 { - f.queue.Push(op, -int64(number)) - if f.queueChangeHook != nil { - f.queueChangeHook(hash, true) - } - break - } - // Otherwise if fresh and still unknown, try and import - if (number+maxUncleDist < height) || (f.light && f.getHeader(hash) != nil) || (!f.light && f.getBlock(hash) != nil) { - f.forgetBlock(hash) - continue - } - if f.light { - f.importHeaders(op.origin, op.header) - } else { - f.importBlocks(op.origin, op.block) - } - } - // Wait for an outside event to occur - select { - case <-f.quit: - // BlockFetcher terminating, abort all operations - return - - case notification := <-f.notify: - // A block was announced, make sure the peer isn't DOSing us - blockAnnounceInMeter.Mark(1) - - count := f.announces[notification.origin] + 1 - if count > hashLimit { - log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit) - blockAnnounceDOSMeter.Mark(1) - break - } - // If we have a valid block number, check that it's potentially useful - if notification.number > 0 { - if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { - log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist) - blockAnnounceDropMeter.Mark(1) - break - } - } - // All is well, schedule the announce if block's not yet downloading - if _, ok := f.fetching[notification.hash]; ok { - break - } - if _, ok := f.completing[notification.hash]; ok { - break - } - f.announces[notification.origin] = count - f.announced[notification.hash] = append(f.announced[notification.hash], notification) - if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 { - f.announceChangeHook(notification.hash, true) - } - if len(f.announced) == 1 { - f.rescheduleFetch(fetchTimer) - } - - case op := <-f.inject: - // A direct block insertion was requested, try and fill any pending gaps - blockBroadcastInMeter.Mark(1) - - // Now only direct block injection is allowed, drop the header injection - // here silently if we receive. - if f.light { - continue - } - f.enqueue(op.origin, nil, op.block) - - case hash := <-f.done: - // A pending import finished, remove all traces of the notification - f.forgetHash(hash) - f.forgetBlock(hash) - - case <-fetchTimer.C: - // At least one block's timer ran out, check for needing retrieval - request := make(map[string][]common.Hash) - - for hash, announces := range f.announced { - // In current LES protocol(les2/les3), only header announce is - // available, no need to wait too much time for header broadcast. - timeout := arriveTimeout - gatherSlack - if f.light { - timeout = 0 - } - if time.Since(announces[0].time) > timeout { - // Pick a random peer to retrieve from, reset all others - announce := announces[rand.Intn(len(announces))] - f.forgetHash(hash) - - // If the block still didn't arrive, queue for fetching - if (f.light && f.getHeader(hash) == nil) || (!f.light && f.getBlock(hash) == nil) { - request[announce.origin] = append(request[announce.origin], hash) - f.fetching[hash] = announce - } - } - } - // Send out all block header requests - for peer, hashes := range request { - log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes) - - // Create a closure of the fetch and schedule in on a new thread - fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes - go func() { - if f.fetchingHook != nil { - f.fetchingHook(hashes) - } - for _, hash := range hashes { - headerFetchMeter.Mark(1) - fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals - } - }() - } - // Schedule the next fetch if blocks are still pending - f.rescheduleFetch(fetchTimer) - - case <-completeTimer.C: - // At least one header's timer ran out, retrieve everything - request := make(map[string][]common.Hash) - - for hash, announces := range f.fetched { - // Pick a random peer to retrieve from, reset all others - announce := announces[rand.Intn(len(announces))] - f.forgetHash(hash) - - // If the block still didn't arrive, queue for completion - if f.getBlock(hash) == nil { - request[announce.origin] = append(request[announce.origin], hash) - f.completing[hash] = announce - } - } - // Send out all block body requests - for peer, hashes := range request { - log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes) - - // Create a closure of the fetch and schedule in on a new thread - if f.completingHook != nil { - f.completingHook(hashes) - } - bodyFetchMeter.Mark(int64(len(hashes))) - go f.completing[hashes[0]].fetchBodies(hashes) - } - // Schedule the next fetch if blocks are still pending - f.rescheduleComplete(completeTimer) - - case filter := <-f.headerFilter: - // Headers arrived from a remote peer. Extract those that were explicitly - // requested by the fetcher, and return everything else so it's delivered - // to other parts of the system. - var task *headerFilterTask - select { - case task = <-filter: - case <-f.quit: - return - } - headerFilterInMeter.Mark(int64(len(task.headers))) - - // Split the batch of headers into unknown ones (to return to the caller), - // known incomplete ones (requiring body retrievals) and completed blocks. - unknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{} - for _, header := range task.headers { - hash := header.Hash() - - // Filter fetcher-requested headers from other synchronisation algorithms - if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil { - // If the delivered header does not match the promised number, drop the announcer - if header.Number.Uint64() != announce.number { - log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number) - f.dropPeer(announce.origin) - f.forgetHash(hash) - continue - } - // Collect all headers only if we are running in light - // mode and the headers are not imported by other means. - if f.light { - if f.getHeader(hash) == nil { - announce.header = header - lightHeaders = append(lightHeaders, announce) - } - f.forgetHash(hash) - continue - } - // Only keep if not imported by other means - if f.getBlock(hash) == nil { - announce.header = header - announce.time = task.time - - // If the block is empty (header only), short circuit into the final import queue - if header.TxHash == types.EmptyTxsHash && header.UncleHash == types.EmptyUncleHash { - log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) - - block := types.NewBlockWithHeader(header) - block.ReceivedAt = task.time - - complete = append(complete, block) - f.completing[hash] = announce - continue - } - // Otherwise add to the list of blocks needing completion - incomplete = append(incomplete, announce) - } else { - log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) - f.forgetHash(hash) - } - } else { - // BlockFetcher doesn't know about it, add to the return list - unknown = append(unknown, header) - } - } - headerFilterOutMeter.Mark(int64(len(unknown))) - select { - case filter <- &headerFilterTask{headers: unknown, time: task.time}: - case <-f.quit: - return - } - // Schedule the retrieved headers for body completion - for _, announce := range incomplete { - hash := announce.header.Hash() - if _, ok := f.completing[hash]; ok { - continue - } - f.fetched[hash] = append(f.fetched[hash], announce) - if len(f.fetched) == 1 { - f.rescheduleComplete(completeTimer) - } - } - // Schedule the header for light fetcher import - for _, announce := range lightHeaders { - f.enqueue(announce.origin, announce.header, nil) - } - // Schedule the header-only blocks for import - for _, block := range complete { - if announce := f.completing[block.Hash()]; announce != nil { - f.enqueue(announce.origin, nil, block) - } - } - - case filter := <-f.bodyFilter: - // Block bodies arrived, extract any explicitly requested blocks, return the rest - var task *bodyFilterTask - select { - case task = <-filter: - case <-f.quit: - return - } - bodyFilterInMeter.Mark(int64(len(task.transactions))) - blocks := []*types.Block{} - // abort early if there's nothing explicitly requested - if len(f.completing) > 0 { - for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ { - // Match up a body to any possible completion request - var ( - matched = false - uncleHash common.Hash // calculated lazily and reused - txnHash common.Hash // calculated lazily and reused - ) - for hash, announce := range f.completing { - if f.queued[hash] != nil || announce.origin != task.peer { - continue - } - if uncleHash == (common.Hash{}) { - uncleHash = types.CalcUncleHash(task.uncles[i]) - } - if uncleHash != announce.header.UncleHash { - continue - } - if txnHash == (common.Hash{}) { - txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), trie.NewStackTrie(nil)) - } - if txnHash != announce.header.TxHash { - continue - } - // Mark the body matched, reassemble if still unknown - matched = true - if f.getBlock(hash) == nil { - block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i]) - block.ReceivedAt = task.time - blocks = append(blocks, block) - } else { - f.forgetHash(hash) - } - } - if matched { - task.transactions = append(task.transactions[:i], task.transactions[i+1:]...) - task.uncles = append(task.uncles[:i], task.uncles[i+1:]...) - i-- - continue - } - } - } - bodyFilterOutMeter.Mark(int64(len(task.transactions))) - select { - case filter <- task: - case <-f.quit: - return - } - // Schedule the retrieved blocks for ordered import - for _, block := range blocks { - if announce := f.completing[block.Hash()]; announce != nil { - f.enqueue(announce.origin, nil, block) - } - } - } - } -} - -// rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout. -func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) { - // Short circuit if no blocks are announced - if len(f.announced) == 0 { - return - } - // Schedule announcement retrieval quickly for light mode - // since server won't send any headers to client. - if f.light { - fetch.Reset(lightTimeout) - return - } - // Otherwise find the earliest expiring announcement - earliest := time.Now() - for _, announces := range f.announced { - if earliest.After(announces[0].time) { - earliest = announces[0].time - } - } - fetch.Reset(arriveTimeout - time.Since(earliest)) -} - -// rescheduleComplete resets the specified completion timer to the next fetch timeout. -func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) { - // Short circuit if no headers are fetched - if len(f.fetched) == 0 { - return - } - // Otherwise find the earliest expiring announcement - earliest := time.Now() - for _, announces := range f.fetched { - if earliest.After(announces[0].time) { - earliest = announces[0].time - } - } - complete.Reset(gatherSlack - time.Since(earliest)) -} - -// enqueue schedules a new header or block import operation, if the component -// to be imported has not yet been seen. -func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.Block) { - var ( - hash common.Hash - number uint64 - ) - if header != nil { - hash, number = header.Hash(), header.Number.Uint64() - } else { - hash, number = block.Hash(), block.NumberU64() - } - // Ensure the peer isn't DOSing us - count := f.queues[peer] + 1 - if count > blockLimit { - log.Debug("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit) - blockBroadcastDOSMeter.Mark(1) - f.forgetHash(hash) - return - } - // Discard any past or too distant blocks - if dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { - log.Debug("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist) - blockBroadcastDropMeter.Mark(1) - f.forgetHash(hash) - return - } - // Schedule the block for future importing - if _, ok := f.queued[hash]; !ok { - op := &blockOrHeaderInject{origin: peer} - if header != nil { - op.header = header - } else { - op.block = block - } - f.queues[peer] = count - f.queued[hash] = op - f.queue.Push(op, -int64(number)) - if f.queueChangeHook != nil { - f.queueChangeHook(hash, true) - } - log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size()) - } -} - -// importHeaders spawns a new goroutine to run a header insertion into the chain. -// If the header's number is at the same height as the current import phase, it -// updates the phase states accordingly. -func (f *BlockFetcher) importHeaders(peer string, header *types.Header) { - hash := header.Hash() - log.Debug("Importing propagated header", "peer", peer, "number", header.Number, "hash", hash) - - go func() { - defer func() { f.done <- hash }() - // If the parent's unknown, abort insertion - parent := f.getHeader(header.ParentHash) - if parent == nil { - log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash) - return - } - // Validate the header and if something went wrong, drop the peer - if err := f.verifyHeader(header); err != nil && err != consensus.ErrFutureBlock { - log.Debug("Propagated header verification failed", "peer", peer, "number", header.Number, "hash", hash, "err", err) - f.dropPeer(peer) - return - } - // Run the actual import and log any issues - if _, err := f.insertHeaders([]*types.Header{header}); err != nil { - log.Debug("Propagated header import failed", "peer", peer, "number", header.Number, "hash", hash, "err", err) - return - } - // Invoke the testing hook if needed - if f.importedHook != nil { - f.importedHook(header, nil) - } - }() -} - -// importBlocks spawns a new goroutine to run a block insertion into the chain. If the -// block's number is at the same height as the current import phase, it updates -// the phase states accordingly. -func (f *BlockFetcher) importBlocks(peer string, block *types.Block) { - hash := block.Hash() - - // Run the import on a new thread - log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash) - go func() { - defer func() { f.done <- hash }() - - // If the parent's unknown, abort insertion - parent := f.getBlock(block.ParentHash()) - if parent == nil { - log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash()) - return - } - // Quickly validate the header and propagate the block if it passes - switch err := f.verifyHeader(block.Header()); err { - case nil: - // All ok, quickly propagate to our peers - blockBroadcastOutTimer.UpdateSince(block.ReceivedAt) - go f.broadcastBlock(block, true) - - case consensus.ErrFutureBlock: - // Weird future block, don't fail, but neither propagate - - default: - // Something went very wrong, drop the peer - log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) - f.dropPeer(peer) - return - } - // Run the actual import and log any issues - if _, err := f.insertChain(types.Blocks{block}); err != nil { - log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) - return - } - // If import succeeded, broadcast the block - blockAnnounceOutTimer.UpdateSince(block.ReceivedAt) - go f.broadcastBlock(block, false) - - // Invoke the testing hook if needed - if f.importedHook != nil { - f.importedHook(nil, block) - } - }() -} - -// forgetHash removes all traces of a block announcement from the fetcher's -// internal state. -func (f *BlockFetcher) forgetHash(hash common.Hash) { - // Remove all pending announces and decrement DOS counters - if announceMap, ok := f.announced[hash]; ok { - for _, announce := range announceMap { - f.announces[announce.origin]-- - if f.announces[announce.origin] <= 0 { - delete(f.announces, announce.origin) - } - } - delete(f.announced, hash) - if f.announceChangeHook != nil { - f.announceChangeHook(hash, false) - } - } - // Remove any pending fetches and decrement the DOS counters - if announce := f.fetching[hash]; announce != nil { - f.announces[announce.origin]-- - if f.announces[announce.origin] <= 0 { - delete(f.announces, announce.origin) - } - delete(f.fetching, hash) - } - - // Remove any pending completion requests and decrement the DOS counters - for _, announce := range f.fetched[hash] { - f.announces[announce.origin]-- - if f.announces[announce.origin] <= 0 { - delete(f.announces, announce.origin) - } - } - delete(f.fetched, hash) - - // Remove any pending completions and decrement the DOS counters - if announce := f.completing[hash]; announce != nil { - f.announces[announce.origin]-- - if f.announces[announce.origin] <= 0 { - delete(f.announces, announce.origin) - } - delete(f.completing, hash) - } -} - -// forgetBlock removes all traces of a queued block from the fetcher's internal -// state. -func (f *BlockFetcher) forgetBlock(hash common.Hash) { - if insert := f.queued[hash]; insert != nil { - f.queues[insert.origin]-- - if f.queues[insert.origin] == 0 { - delete(f.queues, insert.origin) - } - delete(f.queued, hash) - } -} diff --git a/les/fetcher/block_fetcher_test.go b/les/fetcher/block_fetcher_test.go deleted file mode 100644 index f29cf5880..000000000 --- a/les/fetcher/block_fetcher_test.go +++ /dev/null @@ -1,901 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package fetcher - -import ( - "errors" - "math/big" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" -) - -var ( - testdb = rawdb.NewMemoryDatabase() - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - testAddress = crypto.PubkeyToAddress(testKey.PublicKey) - - gspec = core.Genesis{ - Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - genesis = gspec.MustCommit(testdb) - unknownBlock = types.NewBlock(&types.Header{Root: types.EmptyRootHash, GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil)) -) - -// makeChain creates a chain of n blocks starting at and including parent. -// the returned hash chain is ordered head->parent. In addition, every 3rd block -// contains a transaction and every 5th an uncle to allow testing correct block -// reassembly. -func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) { - blocks, _ := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) { - block.SetCoinbase(common.Address{seed}) - - // If the block number is multiple of 3, send a bonus transaction to the miner - if parent == genesis && i%3 == 0 { - signer := types.MakeSigner(params.TestChainConfig, block.Number(), block.Timestamp()) - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) - if err != nil { - panic(err) - } - block.AddTx(tx) - } - // If the block number is a multiple of 5, add a bonus uncle to the block - if i > 0 && i%5 == 0 { - block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i - 1))}) - } - }) - hashes := make([]common.Hash, n+1) - hashes[len(hashes)-1] = parent.Hash() - blockm := make(map[common.Hash]*types.Block, n+1) - blockm[parent.Hash()] = parent - for i, b := range blocks { - hashes[len(hashes)-i-2] = b.Hash() - blockm[b.Hash()] = b - } - return hashes, blockm -} - -// fetcherTester is a test simulator for mocking out local block chain. -type fetcherTester struct { - fetcher *BlockFetcher - - hashes []common.Hash // Hash chain belonging to the tester - headers map[common.Hash]*types.Header // Headers belonging to the tester - blocks map[common.Hash]*types.Block // Blocks belonging to the tester - drops map[string]bool // Map of peers dropped by the fetcher - - lock sync.RWMutex -} - -// newTester creates a new fetcher test mocker. -func newTester(light bool) *fetcherTester { - tester := &fetcherTester{ - hashes: []common.Hash{genesis.Hash()}, - headers: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, - blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, - drops: make(map[string]bool), - } - tester.fetcher = NewBlockFetcher(light, tester.getHeader, tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertHeaders, tester.insertChain, tester.dropPeer) - tester.fetcher.Start() - - return tester -} - -// getHeader retrieves a header from the tester's block chain. -func (f *fetcherTester) getHeader(hash common.Hash) *types.Header { - f.lock.RLock() - defer f.lock.RUnlock() - - return f.headers[hash] -} - -// getBlock retrieves a block from the tester's block chain. -func (f *fetcherTester) getBlock(hash common.Hash) *types.Block { - f.lock.RLock() - defer f.lock.RUnlock() - - return f.blocks[hash] -} - -// verifyHeader is a nop placeholder for the block header verification. -func (f *fetcherTester) verifyHeader(header *types.Header) error { - return nil -} - -// broadcastBlock is a nop placeholder for the block broadcasting. -func (f *fetcherTester) broadcastBlock(block *types.Block, propagate bool) { -} - -// chainHeight retrieves the current height (block number) of the chain. -func (f *fetcherTester) chainHeight() uint64 { - f.lock.RLock() - defer f.lock.RUnlock() - - if f.fetcher.light { - return f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64() - } - return f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() -} - -// insertChain injects a new headers into the simulated chain. -func (f *fetcherTester) insertHeaders(headers []*types.Header) (int, error) { - f.lock.Lock() - defer f.lock.Unlock() - - for i, header := range headers { - // Make sure the parent in known - if _, ok := f.headers[header.ParentHash]; !ok { - return i, errors.New("unknown parent") - } - // Discard any new blocks if the same height already exists - if header.Number.Uint64() <= f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64() { - return i, nil - } - // Otherwise build our current chain - f.hashes = append(f.hashes, header.Hash()) - f.headers[header.Hash()] = header - } - return 0, nil -} - -// insertChain injects a new blocks into the simulated chain. -func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) { - f.lock.Lock() - defer f.lock.Unlock() - - for i, block := range blocks { - // Make sure the parent in known - if _, ok := f.blocks[block.ParentHash()]; !ok { - return i, errors.New("unknown parent") - } - // Discard any new blocks if the same height already exists - if block.NumberU64() <= f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() { - return i, nil - } - // Otherwise build our current chain - f.hashes = append(f.hashes, block.Hash()) - f.blocks[block.Hash()] = block - } - return 0, nil -} - -// dropPeer is an emulator for the peer removal, simply accumulating the various -// peers dropped by the fetcher. -func (f *fetcherTester) dropPeer(peer string) { - f.lock.Lock() - defer f.lock.Unlock() - - f.drops[peer] = true -} - -// makeHeaderFetcher retrieves a block header fetcher associated with a simulated peer. -func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*types.Block, drift time.Duration) headerRequesterFn { - closure := make(map[common.Hash]*types.Block) - for hash, block := range blocks { - closure[hash] = block - } - // Create a function that return a header from the closure - return func(hash common.Hash) error { - // Gather the blocks to return - headers := make([]*types.Header, 0, 1) - if block, ok := closure[hash]; ok { - headers = append(headers, block.Header()) - } - // Return on a new thread - go f.fetcher.FilterHeaders(peer, headers, time.Now().Add(drift)) - - return nil - } -} - -// makeBodyFetcher retrieves a block body fetcher associated with a simulated peer. -func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*types.Block, drift time.Duration) bodyRequesterFn { - closure := make(map[common.Hash]*types.Block) - for hash, block := range blocks { - closure[hash] = block - } - // Create a function that returns blocks from the closure - return func(hashes []common.Hash) error { - // Gather the block bodies to return - transactions := make([][]*types.Transaction, 0, len(hashes)) - uncles := make([][]*types.Header, 0, len(hashes)) - - for _, hash := range hashes { - if block, ok := closure[hash]; ok { - transactions = append(transactions, block.Transactions()) - uncles = append(uncles, block.Uncles()) - } - } - // Return on a new thread - go f.fetcher.FilterBodies(peer, transactions, uncles, time.Now().Add(drift)) - - return nil - } -} - -// verifyFetchingEvent verifies that one single event arrive on a fetching channel. -func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) { - if arrive { - select { - case <-fetching: - case <-time.After(time.Second): - t.Fatalf("fetching timeout") - } - } else { - select { - case <-fetching: - t.Fatalf("fetching invoked") - case <-time.After(10 * time.Millisecond): - } - } -} - -// verifyCompletingEvent verifies that one single event arrive on an completing channel. -func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) { - if arrive { - select { - case <-completing: - case <-time.After(time.Second): - t.Fatalf("completing timeout") - } - } else { - select { - case <-completing: - t.Fatalf("completing invoked") - case <-time.After(10 * time.Millisecond): - } - } -} - -// verifyImportEvent verifies that one single event arrive on an import channel. -func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) { - if arrive { - select { - case <-imported: - case <-time.After(time.Second): - t.Fatalf("import timeout") - } - } else { - select { - case <-imported: - t.Fatalf("import invoked") - case <-time.After(20 * time.Millisecond): - } - } -} - -// verifyImportCount verifies that exactly count number of events arrive on an -// import hook channel. -func verifyImportCount(t *testing.T, imported chan interface{}, count int) { - for i := 0; i < count; i++ { - select { - case <-imported: - case <-time.After(time.Second): - t.Fatalf("block %d: import timeout", i+1) - } - } - verifyImportDone(t, imported) -} - -// verifyImportDone verifies that no more events are arriving on an import channel. -func verifyImportDone(t *testing.T, imported chan interface{}) { - select { - case <-imported: - t.Fatalf("extra block imported") - case <-time.After(50 * time.Millisecond): - } -} - -// verifyChainHeight verifies the chain height is as expected. -func verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) { - if fetcher.chainHeight() != height { - t.Fatalf("chain height mismatch, got %d, want %d", fetcher.chainHeight(), height) - } -} - -// Tests that a fetcher accepts block/header announcements and initiates retrievals -// for them, successfully importing into the local chain. -func TestFullSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, false) } -func TestLightSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, true) } - -func testSequentialAnnouncements(t *testing.T, light bool) { - // Create a chain of blocks to import - targetBlocks := 4 * hashLimit - hashes, blocks := makeChain(targetBlocks, 0, genesis) - - tester := newTester(light) - headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) - - // Iteratively announce blocks until all are imported - imported := make(chan interface{}) - tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { - if light { - if header == nil { - t.Fatalf("Fetcher try to import empty header") - } - imported <- header - } else { - if block == nil { - t.Fatalf("Fetcher try to import empty block") - } - imported <- block - } - } - for i := len(hashes) - 2; i >= 0; i-- { - tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - verifyImportEvent(t, imported, true) - } - verifyImportDone(t, imported) - verifyChainHeight(t, tester, uint64(len(hashes)-1)) -} - -// Tests that if blocks are announced by multiple peers (or even the same buggy -// peer), they will only get downloaded at most once. -func TestFullConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, false) } -func TestLightConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, true) } - -func testConcurrentAnnouncements(t *testing.T, light bool) { - // Create a chain of blocks to import - targetBlocks := 4 * hashLimit - hashes, blocks := makeChain(targetBlocks, 0, genesis) - - // Assemble a tester with a built in counter for the requests - tester := newTester(light) - firstHeaderFetcher := tester.makeHeaderFetcher("first", blocks, -gatherSlack) - firstBodyFetcher := tester.makeBodyFetcher("first", blocks, 0) - secondHeaderFetcher := tester.makeHeaderFetcher("second", blocks, -gatherSlack) - secondBodyFetcher := tester.makeBodyFetcher("second", blocks, 0) - - counter := uint32(0) - firstHeaderWrapper := func(hash common.Hash) error { - atomic.AddUint32(&counter, 1) - return firstHeaderFetcher(hash) - } - secondHeaderWrapper := func(hash common.Hash) error { - atomic.AddUint32(&counter, 1) - return secondHeaderFetcher(hash) - } - // Iteratively announce blocks until all are imported - imported := make(chan interface{}) - tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { - if light { - if header == nil { - t.Fatalf("Fetcher try to import empty header") - } - imported <- header - } else { - if block == nil { - t.Fatalf("Fetcher try to import empty block") - } - imported <- block - } - } - for i := len(hashes) - 2; i >= 0; i-- { - tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), firstHeaderWrapper, firstBodyFetcher) - tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), secondHeaderWrapper, secondBodyFetcher) - tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), secondHeaderWrapper, secondBodyFetcher) - verifyImportEvent(t, imported, true) - } - verifyImportDone(t, imported) - - // Make sure no blocks were retrieved twice - if int(counter) != targetBlocks { - t.Fatalf("retrieval count mismatch: have %v, want %v", counter, targetBlocks) - } - verifyChainHeight(t, tester, uint64(len(hashes)-1)) -} - -// Tests that announcements arriving while a previous is being fetched still -// results in a valid import. -func TestFullOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, false) } -func TestLightOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, true) } - -func testOverlappingAnnouncements(t *testing.T, light bool) { - // Create a chain of blocks to import - targetBlocks := 4 * hashLimit - hashes, blocks := makeChain(targetBlocks, 0, genesis) - - tester := newTester(light) - headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) - - // Iteratively announce blocks, but overlap them continuously - overlap := 16 - imported := make(chan interface{}, len(hashes)-1) - for i := 0; i < overlap; i++ { - imported <- nil - } - tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { - if light { - if header == nil { - t.Fatalf("Fetcher try to import empty header") - } - imported <- header - } else { - if block == nil { - t.Fatalf("Fetcher try to import empty block") - } - imported <- block - } - } - - for i := len(hashes) - 2; i >= 0; i-- { - tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - select { - case <-imported: - case <-time.After(time.Second): - t.Fatalf("block %d: import timeout", len(hashes)-i) - } - } - // Wait for all the imports to complete and check count - verifyImportCount(t, imported, overlap) - verifyChainHeight(t, tester, uint64(len(hashes)-1)) -} - -// Tests that announces already being retrieved will not be duplicated. -func TestFullPendingDeduplication(t *testing.T) { testPendingDeduplication(t, false) } -func TestLightPendingDeduplication(t *testing.T) { testPendingDeduplication(t, true) } - -func testPendingDeduplication(t *testing.T, light bool) { - // Create a hash and corresponding block - hashes, blocks := makeChain(1, 0, genesis) - - // Assemble a tester with a built in counter and delayed fetcher - tester := newTester(light) - headerFetcher := tester.makeHeaderFetcher("repeater", blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher("repeater", blocks, 0) - - delay := 50 * time.Millisecond - counter := uint32(0) - headerWrapper := func(hash common.Hash) error { - atomic.AddUint32(&counter, 1) - - // Simulate a long running fetch - go func() { - time.Sleep(delay) - headerFetcher(hash) - }() - return nil - } - checkNonExist := func() bool { - return tester.getBlock(hashes[0]) == nil - } - if light { - checkNonExist = func() bool { - return tester.getHeader(hashes[0]) == nil - } - } - // Announce the same block many times until it's fetched (wait for any pending ops) - for checkNonExist() { - tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher) - time.Sleep(time.Millisecond) - } - time.Sleep(delay) - - // Check that all blocks were imported and none fetched twice - if int(counter) != 1 { - t.Fatalf("retrieval count mismatch: have %v, want %v", counter, 1) - } - verifyChainHeight(t, tester, 1) -} - -// Tests that announcements retrieved in a random order are cached and eventually -// imported when all the gaps are filled in. -func TestFullRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, false) } -func TestLightRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, true) } - -func testRandomArrivalImport(t *testing.T, light bool) { - // Create a chain of blocks to import, and choose one to delay - targetBlocks := maxQueueDist - hashes, blocks := makeChain(targetBlocks, 0, genesis) - skip := targetBlocks / 2 - - tester := newTester(light) - headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) - - // Iteratively announce blocks, skipping one entry - imported := make(chan interface{}, len(hashes)-1) - tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { - if light { - if header == nil { - t.Fatalf("Fetcher try to import empty header") - } - imported <- header - } else { - if block == nil { - t.Fatalf("Fetcher try to import empty block") - } - imported <- block - } - } - for i := len(hashes) - 1; i >= 0; i-- { - if i != skip { - tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - time.Sleep(time.Millisecond) - } - } - // Finally announce the skipped entry and check full import - tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - verifyImportCount(t, imported, len(hashes)-1) - verifyChainHeight(t, tester, uint64(len(hashes)-1)) -} - -// Tests that direct block enqueues (due to block propagation vs. hash announce) -// are correctly schedule, filling and import queue gaps. -func TestQueueGapFill(t *testing.T) { - // Create a chain of blocks to import, and choose one to not announce at all - targetBlocks := maxQueueDist - hashes, blocks := makeChain(targetBlocks, 0, genesis) - skip := targetBlocks / 2 - - tester := newTester(false) - headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) - - // Iteratively announce blocks, skipping one entry - imported := make(chan interface{}, len(hashes)-1) - tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } - - for i := len(hashes) - 1; i >= 0; i-- { - if i != skip { - tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - time.Sleep(time.Millisecond) - } - } - // Fill the missing block directly as if propagated - tester.fetcher.Enqueue("valid", blocks[hashes[skip]]) - verifyImportCount(t, imported, len(hashes)-1) - verifyChainHeight(t, tester, uint64(len(hashes)-1)) -} - -// Tests that blocks arriving from various sources (multiple propagations, hash -// announces, etc) do not get scheduled for import multiple times. -func TestImportDeduplication(t *testing.T) { - // Create two blocks to import (one for duplication, the other for stalling) - hashes, blocks := makeChain(2, 0, genesis) - - // Create the tester and wrap the importer with a counter - tester := newTester(false) - headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) - - counter := uint32(0) - tester.fetcher.insertChain = func(blocks types.Blocks) (int, error) { - atomic.AddUint32(&counter, uint32(len(blocks))) - return tester.insertChain(blocks) - } - // Instrument the fetching and imported events - fetching := make(chan []common.Hash) - imported := make(chan interface{}, len(hashes)-1) - tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes } - tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } - - // Announce the duplicating block, wait for retrieval, and also propagate directly - tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - <-fetching - - tester.fetcher.Enqueue("valid", blocks[hashes[0]]) - tester.fetcher.Enqueue("valid", blocks[hashes[0]]) - tester.fetcher.Enqueue("valid", blocks[hashes[0]]) - - // Fill the missing block directly as if propagated, and check import uniqueness - tester.fetcher.Enqueue("valid", blocks[hashes[1]]) - verifyImportCount(t, imported, 2) - - if counter != 2 { - t.Fatalf("import invocation count mismatch: have %v, want %v", counter, 2) - } -} - -// Tests that blocks with numbers much lower or higher than out current head get -// discarded to prevent wasting resources on useless blocks from faulty peers. -func TestDistantPropagationDiscarding(t *testing.T) { - // Create a long chain to import and define the discard boundaries - hashes, blocks := makeChain(3*maxQueueDist, 0, genesis) - head := hashes[len(hashes)/2] - - low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1 - - // Create a tester and simulate a head block being the middle of the above chain - tester := newTester(false) - - tester.lock.Lock() - tester.hashes = []common.Hash{head} - tester.blocks = map[common.Hash]*types.Block{head: blocks[head]} - tester.lock.Unlock() - - // Ensure that a block with a lower number than the threshold is discarded - tester.fetcher.Enqueue("lower", blocks[hashes[low]]) - time.Sleep(10 * time.Millisecond) - if !tester.fetcher.queue.Empty() { - t.Fatalf("fetcher queued stale block") - } - // Ensure that a block with a higher number than the threshold is discarded - tester.fetcher.Enqueue("higher", blocks[hashes[high]]) - time.Sleep(10 * time.Millisecond) - if !tester.fetcher.queue.Empty() { - t.Fatalf("fetcher queued future block") - } -} - -// Tests that announcements with numbers much lower or higher than out current -// head get discarded to prevent wasting resources on useless blocks from faulty -// peers. -func TestFullDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, false) } -func TestLightDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, true) } - -func testDistantAnnouncementDiscarding(t *testing.T, light bool) { - // Create a long chain to import and define the discard boundaries - hashes, blocks := makeChain(3*maxQueueDist, 0, genesis) - head := hashes[len(hashes)/2] - - low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1 - - // Create a tester and simulate a head block being the middle of the above chain - tester := newTester(light) - - tester.lock.Lock() - tester.hashes = []common.Hash{head} - tester.headers = map[common.Hash]*types.Header{head: blocks[head].Header()} - tester.blocks = map[common.Hash]*types.Block{head: blocks[head]} - tester.lock.Unlock() - - headerFetcher := tester.makeHeaderFetcher("lower", blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher("lower", blocks, 0) - - fetching := make(chan struct{}, 2) - tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- struct{}{} } - - // Ensure that a block with a lower number than the threshold is discarded - tester.fetcher.Notify("lower", hashes[low], blocks[hashes[low]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - select { - case <-time.After(50 * time.Millisecond): - case <-fetching: - t.Fatalf("fetcher requested stale header") - } - // Ensure that a block with a higher number than the threshold is discarded - tester.fetcher.Notify("higher", hashes[high], blocks[hashes[high]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - select { - case <-time.After(50 * time.Millisecond): - case <-fetching: - t.Fatalf("fetcher requested future header") - } -} - -// Tests that peers announcing blocks with invalid numbers (i.e. not matching -// the headers provided afterwards) get dropped as malicious. -func TestFullInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, false) } -func TestLightInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, true) } - -func testInvalidNumberAnnouncement(t *testing.T, light bool) { - // Create a single block to import and check numbers against - hashes, blocks := makeChain(1, 0, genesis) - - tester := newTester(light) - badHeaderFetcher := tester.makeHeaderFetcher("bad", blocks, -gatherSlack) - badBodyFetcher := tester.makeBodyFetcher("bad", blocks, 0) - - imported := make(chan interface{}) - announced := make(chan interface{}) - tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { - if light { - if header == nil { - t.Fatalf("Fetcher try to import empty header") - } - imported <- header - } else { - if block == nil { - t.Fatalf("Fetcher try to import empty block") - } - imported <- block - } - } - // Announce a block with a bad number, check for immediate drop - tester.fetcher.announceChangeHook = func(hash common.Hash, b bool) { - announced <- nil - } - tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), badHeaderFetcher, badBodyFetcher) - verifyAnnounce := func() { - for i := 0; i < 2; i++ { - select { - case <-announced: - continue - case <-time.After(1 * time.Second): - t.Fatal("announce timeout") - return - } - } - } - verifyAnnounce() - verifyImportEvent(t, imported, false) - tester.lock.RLock() - dropped := tester.drops["bad"] - tester.lock.RUnlock() - - if !dropped { - t.Fatalf("peer with invalid numbered announcement not dropped") - } - goodHeaderFetcher := tester.makeHeaderFetcher("good", blocks, -gatherSlack) - goodBodyFetcher := tester.makeBodyFetcher("good", blocks, 0) - // Make sure a good announcement passes without a drop - tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), goodHeaderFetcher, goodBodyFetcher) - verifyAnnounce() - verifyImportEvent(t, imported, true) - - tester.lock.RLock() - dropped = tester.drops["good"] - tester.lock.RUnlock() - - if dropped { - t.Fatalf("peer with valid numbered announcement dropped") - } - verifyImportDone(t, imported) -} - -// Tests that if a block is empty (i.e. header only), no body request should be -// made, and instead the header should be assembled into a whole block in itself. -func TestEmptyBlockShortCircuit(t *testing.T) { - // Create a chain of blocks to import - hashes, blocks := makeChain(32, 0, genesis) - - tester := newTester(false) - headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) - bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) - - // Add a monitoring hook for all internal events - fetching := make(chan []common.Hash) - tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes } - - completing := make(chan []common.Hash) - tester.fetcher.completingHook = func(hashes []common.Hash) { completing <- hashes } - - imported := make(chan interface{}) - tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { - if block == nil { - t.Fatalf("Fetcher try to import empty block") - } - imported <- block - } - // Iteratively announce blocks until all are imported - for i := len(hashes) - 2; i >= 0; i-- { - tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) - - // All announces should fetch the header - verifyFetchingEvent(t, fetching, true) - - // Only blocks with data contents should request bodies - verifyCompletingEvent(t, completing, len(blocks[hashes[i]].Transactions()) > 0 || len(blocks[hashes[i]].Uncles()) > 0) - - // Irrelevant of the construct, import should succeed - verifyImportEvent(t, imported, true) - } - verifyImportDone(t, imported) -} - -// Tests that a peer is unable to use unbounded memory with sending infinite -// block announcements to a node, but that even in the face of such an attack, -// the fetcher remains operational. -func TestHashMemoryExhaustionAttack(t *testing.T) { - // Create a tester with instrumented import hooks - tester := newTester(false) - - imported, announces := make(chan interface{}), int32(0) - tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } - tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) { - if added { - atomic.AddInt32(&announces, 1) - } else { - atomic.AddInt32(&announces, -1) - } - } - // Create a valid chain and an infinite junk chain - targetBlocks := hashLimit + 2*maxQueueDist - hashes, blocks := makeChain(targetBlocks, 0, genesis) - validHeaderFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) - validBodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) - - attack, _ := makeChain(targetBlocks, 0, unknownBlock) - attackerHeaderFetcher := tester.makeHeaderFetcher("attacker", nil, -gatherSlack) - attackerBodyFetcher := tester.makeBodyFetcher("attacker", nil, 0) - - // Feed the tester a huge hashset from the attacker, and a limited from the valid peer - for i := 0; i < len(attack); i++ { - if i < maxQueueDist { - tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], uint64(i+1), time.Now(), validHeaderFetcher, validBodyFetcher) - } - tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), attackerHeaderFetcher, attackerBodyFetcher) - } - if count := atomic.LoadInt32(&announces); count != hashLimit+maxQueueDist { - t.Fatalf("queued announce count mismatch: have %d, want %d", count, hashLimit+maxQueueDist) - } - // Wait for fetches to complete - verifyImportCount(t, imported, maxQueueDist) - - // Feed the remaining valid hashes to ensure DOS protection state remains clean - for i := len(hashes) - maxQueueDist - 2; i >= 0; i-- { - tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), validHeaderFetcher, validBodyFetcher) - verifyImportEvent(t, imported, true) - } - verifyImportDone(t, imported) -} - -// Tests that blocks sent to the fetcher (either through propagation or via hash -// announces and retrievals) don't pile up indefinitely, exhausting available -// system memory. -func TestBlockMemoryExhaustionAttack(t *testing.T) { - // Create a tester with instrumented import hooks - tester := newTester(false) - - imported, enqueued := make(chan interface{}), int32(0) - tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } - tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) { - if added { - atomic.AddInt32(&enqueued, 1) - } else { - atomic.AddInt32(&enqueued, -1) - } - } - // Create a valid chain and a batch of dangling (but in range) blocks - targetBlocks := hashLimit + 2*maxQueueDist - hashes, blocks := makeChain(targetBlocks, 0, genesis) - attack := make(map[common.Hash]*types.Block) - for i := byte(0); len(attack) < blockLimit+2*maxQueueDist; i++ { - hashes, blocks := makeChain(maxQueueDist-1, i, unknownBlock) - for _, hash := range hashes[:maxQueueDist-2] { - attack[hash] = blocks[hash] - } - } - // Try to feed all the attacker blocks make sure only a limited batch is accepted - for _, block := range attack { - tester.fetcher.Enqueue("attacker", block) - } - time.Sleep(200 * time.Millisecond) - if queued := atomic.LoadInt32(&enqueued); queued != blockLimit { - t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit) - } - // Queue up a batch of valid blocks, and check that a new peer is allowed to do so - for i := 0; i < maxQueueDist-1; i++ { - tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-3-i]]) - } - time.Sleep(100 * time.Millisecond) - if queued := atomic.LoadInt32(&enqueued); queued != blockLimit+maxQueueDist-1 { - t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit+maxQueueDist-1) - } - // Insert the missing piece (and sanity check the import) - tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2]]) - verifyImportCount(t, imported, maxQueueDist) - - // Insert the remaining blocks in chunks to ensure clean DOS protection - for i := maxQueueDist; i < len(hashes)-1; i++ { - tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2-i]]) - verifyImportEvent(t, imported, true) - } - verifyImportDone(t, imported) -} diff --git a/les/fetcher_test.go b/les/fetcher_test.go deleted file mode 100644 index 7c523f797..000000000 --- a/les/fetcher_test.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package les - -import ( - "math/big" - "testing" - "time" - - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/params" -) - -// verifyImportEvent verifies that one single event arrive on an import channel. -func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) { - if arrive { - select { - case <-imported: - case <-time.After(time.Second): - t.Fatalf("import timeout") - } - } else { - select { - case <-imported: - t.Fatalf("import invoked") - case <-time.After(20 * time.Millisecond): - } - } -} - -// verifyImportDone verifies that no more events are arriving on an import channel. -func verifyImportDone(t *testing.T, imported chan interface{}) { - select { - case <-imported: - t.Fatalf("extra block imported") - case <-time.After(50 * time.Millisecond): - } -} - -// verifyChainHeight verifies the chain height is as expected. -func verifyChainHeight(t *testing.T, fetcher *lightFetcher, height uint64) { - local := fetcher.chain.CurrentHeader().Number.Uint64() - if local != height { - t.Fatalf("chain height mismatch, got %d, want %d", local, height) - } -} - -func TestSequentialAnnouncementsLes2(t *testing.T) { testSequentialAnnouncements(t, 2) } -func TestSequentialAnnouncementsLes3(t *testing.T) { testSequentialAnnouncements(t, 3) } - -func testSequentialAnnouncements(t *testing.T, protocol int) { - netconfig := testnetConfig{ - blocks: 4, - protocol: protocol, - nopruning: true, - } - s, c, teardown := newClientServerEnv(t, netconfig) - defer teardown() - - // Create connected peer pair, the initial signal from LES server - // is discarded to prevent syncing. - p1, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler, true) - if err != nil { - t.Fatalf("Failed to create peer pair %v", err) - } - importCh := make(chan interface{}) - c.handler.fetcher.newHeadHook = func(header *types.Header) { - importCh <- header - } - for i := uint64(1); i <= s.backend.Blockchain().CurrentHeader().Number.Uint64(); i++ { - header := s.backend.Blockchain().GetHeaderByNumber(i) - hash, number := header.Hash(), header.Number.Uint64() - td := rawdb.ReadTd(s.db, hash, number) - - announce := announceData{hash, number, td, 0, nil} - if p1.cpeer.announceType == announceTypeSigned { - announce.sign(s.handler.server.privateKey) - } - p1.cpeer.sendAnnounce(announce) - verifyImportEvent(t, importCh, true) - } - verifyImportDone(t, importCh) - verifyChainHeight(t, c.handler.fetcher, 4) -} - -func TestGappedAnnouncementsLes2(t *testing.T) { testGappedAnnouncements(t, 2) } -func TestGappedAnnouncementsLes3(t *testing.T) { testGappedAnnouncements(t, 3) } - -func testGappedAnnouncements(t *testing.T, protocol int) { - netconfig := testnetConfig{ - blocks: 4, - protocol: protocol, - nopruning: true, - } - s, c, teardown := newClientServerEnv(t, netconfig) - defer teardown() - - // Create connected peer pair, the initial signal from LES server - // is discarded to prevent syncing. - peer, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler, true) - if err != nil { - t.Fatalf("Failed to create peer pair %v", err) - } - done := make(chan *types.Header, 1) - c.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header } - - // Prepare announcement by latest header. - latest := s.backend.Blockchain().CurrentHeader() - hash, number := latest.Hash(), latest.Number.Uint64() - td := rawdb.ReadTd(s.db, hash, number) - - // Sign the announcement if necessary. - announce := announceData{hash, number, td, 0, nil} - if peer.cpeer.announceType == announceTypeSigned { - announce.sign(s.handler.server.privateKey) - } - peer.cpeer.sendAnnounce(announce) - - <-done // Wait syncing - verifyChainHeight(t, c.handler.fetcher, 4) - - // Send a reorged announcement - blocks, _ := core.GenerateChain(rawdb.ReadChainConfig(s.db, s.backend.Blockchain().Genesis().Hash()), s.backend.Blockchain().GetBlockByNumber(3), - ethash.NewFaker(), s.db, 2, func(i int, gen *core.BlockGen) { - gen.OffsetTime(-9) // higher block difficulty - }) - s.backend.Blockchain().InsertChain(blocks) - - <-done // Wait syncing - verifyChainHeight(t, c.handler.fetcher, 5) -} - -func TestInvalidAnnouncesLES2(t *testing.T) { testInvalidAnnounces(t, lpv2) } -func TestInvalidAnnouncesLES3(t *testing.T) { testInvalidAnnounces(t, lpv3) } -func TestInvalidAnnouncesLES4(t *testing.T) { testInvalidAnnounces(t, lpv4) } - -func testInvalidAnnounces(t *testing.T, protocol int) { - netconfig := testnetConfig{ - blocks: 4, - protocol: protocol, - nopruning: true, - } - s, c, teardown := newClientServerEnv(t, netconfig) - defer teardown() - - // Create connected peer pair, the initial signal from LES server - // is discarded to prevent syncing. - peer, _, err := newTestPeerPair("peer", lpv3, s.handler, c.handler, true) - if err != nil { - t.Fatalf("Failed to create peer pair %v", err) - } - done := make(chan *types.Header, 1) - c.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header } - - // Prepare announcement by latest header. - headerOne := s.backend.Blockchain().GetHeaderByNumber(1) - hash, number := headerOne.Hash(), headerOne.Number.Uint64() - td := big.NewInt(params.GenesisDifficulty.Int64() + 200) // bad td - - // Sign the announcement if necessary. - announce := announceData{hash, number, td, 0, nil} - if peer.cpeer.announceType == announceTypeSigned { - announce.sign(s.handler.server.privateKey) - } - peer.cpeer.sendAnnounce(announce) - <-done // Wait syncing - - // Ensure the bad peer is evicted - if c.handler.backend.peers.len() != 0 { - t.Fatalf("Failed to evict invalid peer") - } -} diff --git a/les/handler_test.go b/les/handler_test.go index 6d0b17144..26a083f47 100644 --- a/les/handler_test.go +++ b/les/handler_test.go @@ -31,7 +31,7 @@ import ( "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/les/downloader" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/params" @@ -295,8 +295,8 @@ func testGetCode(t *testing.T, protocol int) { for i := uint64(0); i <= bc.CurrentBlock().Number.Uint64(); i++ { header := bc.GetHeaderByNumber(i) req := &CodeReq{ - BHash: header.Hash(), - AccKey: crypto.Keccak256(testContractAddr[:]), + BHash: header.Hash(), + AccountAddress: testContractAddr[:], } codereqs = append(codereqs, req) if i >= testContractDeployed { @@ -331,8 +331,8 @@ func testGetStaleCode(t *testing.T, protocol int) { check := func(number uint64, expected [][]byte) { req := &CodeReq{ - BHash: bc.GetHeaderByNumber(number).Hash(), - AccKey: crypto.Keccak256(testContractAddr[:]), + BHash: bc.GetHeaderByNumber(number).Hash(), + AccountAddress: testContractAddr[:], } sendRequest(rawPeer.app, GetCodeMsg, 42, []*CodeReq{req}) if err := expectResponse(rawPeer.app, CodeMsg, 42, testBufLimit, expected); err != nil { @@ -406,7 +406,7 @@ func testGetProofs(t *testing.T, protocol int) { accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}} for i := uint64(0); i <= bc.CurrentBlock().Number.Uint64(); i++ { header := bc.GetHeaderByNumber(i) - trie, _ := trie.New(trie.StateTrieID(header.Root), trie.NewDatabase(server.db)) + trie, _ := trie.New(trie.StateTrieID(header.Root), server.backend.Blockchain().TrieDB()) for _, acc := range accounts { req := ProofReq{ @@ -414,7 +414,7 @@ func testGetProofs(t *testing.T, protocol int) { Key: crypto.Keccak256(acc[:]), } proofreqs = append(proofreqs, req) - trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2) + trie.Prove(crypto.Keccak256(acc[:]), proofsV2) } } // Send the proof request and verify the response @@ -457,8 +457,8 @@ func testGetStaleProof(t *testing.T, protocol int) { var expected []rlp.RawValue if wantOK { proofsV2 := light.NewNodeSet() - t, _ := trie.New(trie.StateTrieID(header.Root), trie.NewDatabase(server.db)) - t.Prove(account, 0, proofsV2) + t, _ := trie.New(trie.StateTrieID(header.Root), server.backend.Blockchain().TrieDB()) + t.Prove(account, proofsV2) expected = proofsV2.NodeList() } if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil { @@ -513,8 +513,8 @@ func testGetCHTProofs(t *testing.T, protocol int) { AuxData: [][]byte{rlp}, } root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash()) - trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, string(rawdb.ChtTablePrefix)))) - trie.Prove(key, 0, &proofsV2.Proofs) + trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, string(rawdb.ChtTablePrefix)), trie.HashDefaults)) + trie.Prove(key, &proofsV2.Proofs) // Assemble the requests for the different protocols requestsV2 := []HelperTrieReq{{ Type: htCanonical, @@ -578,8 +578,8 @@ func testGetBloombitsProofs(t *testing.T, protocol int) { var proofs HelperTrieResps root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash()) - trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, string(rawdb.BloomTrieTablePrefix)))) - trie.Prove(key, 0, &proofs.Proofs) + trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, string(rawdb.BloomTrieTablePrefix)), trie.HashDefaults)) + trie.Prove(key, &proofs.Proofs) // Send the proof request and verify the response sendRequest(rawPeer.app, GetHelperTrieProofsMsg, 42, requests) @@ -611,6 +611,8 @@ func testTransactionStatus(t *testing.T, protocol int) { var reqID uint64 test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) { + t.Helper() + reqID++ if send { sendRequest(rawPeer.app, SendTxV2Msg, reqID, types.Transactions{tx}) @@ -618,14 +620,14 @@ func testTransactionStatus(t *testing.T, protocol int) { sendRequest(rawPeer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()}) } if err := expectResponse(rawPeer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil { - t.Errorf("transaction status mismatch") + t.Errorf("transaction status mismatch: %v", err) } } signer := types.HomesteadSigner{} // test error status by sending an underpriced transaction tx0, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, nil, nil), signer, bankKey) - test(tx0, true, light.TxStatus{Status: txpool.TxStatusUnknown, Error: txpool.ErrUnderpriced.Error()}) + test(tx0, true, light.TxStatus{Status: txpool.TxStatusUnknown, Error: "transaction underpriced: tip needed 1, tip permitted 0"}) tx1, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey) test(tx1, false, light.TxStatus{Status: txpool.TxStatusUnknown}) // query before sending, should be unknown diff --git a/les/odr_requests.go b/les/odr_requests.go index d8b094b72..2b23e0540 100644 --- a/les/odr_requests.go +++ b/les/odr_requests.go @@ -183,9 +183,9 @@ func (r *ReceiptsRequest) Validate(db ethdb.Database, msg *Msg) error { } type ProofReq struct { - BHash common.Hash - AccKey, Key []byte - FromLevel uint + BHash common.Hash + AccountAddress, Key []byte + FromLevel uint } // ODR request type for state/storage trie entries, see LesOdrRequest interface @@ -206,9 +206,9 @@ func (r *TrieRequest) CanSend(peer *serverPeer) bool { func (r *TrieRequest) Request(reqID uint64, peer *serverPeer) error { peer.Log().Debug("Requesting trie proof", "root", r.Id.Root, "key", r.Key) req := ProofReq{ - BHash: r.Id.BlockHash, - AccKey: r.Id.AccKey, - Key: r.Key, + BHash: r.Id.BlockHash, + AccountAddress: r.Id.AccountAddress, + Key: r.Key, } return peer.requestProofs(reqID, []ProofReq{req}) } @@ -238,8 +238,8 @@ func (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error { } type CodeReq struct { - BHash common.Hash - AccKey []byte + BHash common.Hash + AccountAddress []byte } // CodeRequest is the ODR request type for node data (used for retrieving contract code), see LesOdrRequest interface @@ -260,8 +260,8 @@ func (r *CodeRequest) CanSend(peer *serverPeer) bool { func (r *CodeRequest) Request(reqID uint64, peer *serverPeer) error { peer.Log().Debug("Requesting code data", "hash", r.Hash) req := CodeReq{ - BHash: r.Id.BlockHash, - AccKey: r.Id.AccKey, + BHash: r.Id.BlockHash, + AccountAddress: r.Id.AccountAddress, } return peer.requestCode(reqID, []CodeReq{req}) } diff --git a/les/odr_test.go b/les/odr_test.go index 2db9acd43..69824a92d 100644 --- a/les/odr_test.go +++ b/les/odr_test.go @@ -16,6 +16,10 @@ package les +// Note: these tests are disabled now because they cannot work with the old sync +// mechanism removed but will be useful again once the PoS ultralight mode is implemented + +/* import ( "bytes" "context" @@ -100,7 +104,7 @@ func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainCon for _, addr := range acc { if bc != nil { header := bc.GetHeaderByHash(bhash) - st, err = state.New(header.Root, state.NewDatabase(db), nil) + st, err = state.New(header.Root, bc.StateCache(), nil) } else { header := lc.GetHeaderByHash(bhash) st = light.NewState(ctx, header, lc.Odr()) @@ -451,3 +455,4 @@ func randomHash() common.Hash { } return hash } +*/ diff --git a/les/peer.go b/les/peer.go index 909d912e9..48381689e 100644 --- a/les/peer.go +++ b/les/peer.go @@ -998,9 +998,6 @@ func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge recentTx -= blockSafetyMargin - txIndexRecentOffset } } - if server.config.UltraLightOnlyAnnounce { - recentTx = txIndexDisabled - } if recentTx != txIndexUnlimited && p.version < lpv4 { return errors.New("Cannot serve old clients without a complete tx index") } @@ -1009,20 +1006,18 @@ func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge p.headInfo = blockInfo{Hash: head, Number: headNum, Td: td} return p.handshake(td, head, headNum, genesis, forkID, forkFilter, func(lists *keyValueList) { // Add some information which services server can offer. - if !server.config.UltraLightOnlyAnnounce { - *lists = (*lists).add("serveHeaders", nil) - *lists = (*lists).add("serveChainSince", uint64(0)) - *lists = (*lists).add("serveStateSince", uint64(0)) + *lists = (*lists).add("serveHeaders", nil) + *lists = (*lists).add("serveChainSince", uint64(0)) + *lists = (*lists).add("serveStateSince", uint64(0)) - // If local ethereum node is running in archive mode, advertise ourselves we have - // all version state data. Otherwise only recent state is available. - stateRecent := uint64(core.TriesInMemory - blockSafetyMargin) - if server.archiveMode { - stateRecent = 0 - } - *lists = (*lists).add("serveRecentState", stateRecent) - *lists = (*lists).add("txRelay", nil) + // If local ethereum node is running in archive mode, advertise ourselves we have + // all version state data. Otherwise only recent state is available. + stateRecent := uint64(core.TriesInMemory - blockSafetyMargin) + if server.archiveMode { + stateRecent = 0 } + *lists = (*lists).add("serveRecentState", stateRecent) + *lists = (*lists).add("txRelay", nil) if p.version >= lpv4 { *lists = (*lists).add("recentTxLookup", recentTx) } diff --git a/les/peer_test.go b/les/peer_test.go index 021d5cb59..0881dd292 100644 --- a/les/peer_test.go +++ b/les/peer_test.go @@ -124,8 +124,8 @@ func TestHandshake(t *testing.T) { genesis = common.HexToHash("cafebabe") chain1, chain2 = &fakeChain{}, &fakeChain{} - forkID1 = forkid.NewID(chain1.Config(), chain1.Genesis().Hash(), chain1.CurrentHeader().Number.Uint64(), chain1.CurrentHeader().Time) - forkID2 = forkid.NewID(chain2.Config(), chain2.Genesis().Hash(), chain2.CurrentHeader().Number.Uint64(), chain2.CurrentHeader().Time) + forkID1 = forkid.NewID(chain1.Config(), chain1.Genesis(), chain1.CurrentHeader().Number.Uint64(), chain1.CurrentHeader().Time) + forkID2 = forkid.NewID(chain2.Config(), chain2.Genesis(), chain2.CurrentHeader().Number.Uint64(), chain2.CurrentHeader().Time) filter1, filter2 = forkid.NewFilter(chain1), forkid.NewFilter(chain2) ) diff --git a/les/protocol.go b/les/protocol.go index dced7039e..cfebdbfb9 100644 --- a/les/protocol.go +++ b/les/protocol.go @@ -40,9 +40,8 @@ const ( // Supported versions of the les protocol (first is primary) var ( - ClientProtocolVersions = []uint{lpv2, lpv3, lpv4} - ServerProtocolVersions = []uint{lpv2, lpv3, lpv4} - AdvertiseProtocolVersions = []uint{lpv2} // clients are searching for the first advertised protocol in the list + ClientProtocolVersions = []uint{lpv2, lpv3, lpv4} + ServerProtocolVersions = []uint{lpv2, lpv3, lpv4} ) // ProtocolLengths is the number of implemented message corresponding to different protocol versions. diff --git a/les/pruner.go b/les/pruner.go deleted file mode 100644 index d115a61a7..000000000 --- a/les/pruner.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package les - -import ( - "sync" - "time" - - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" -) - -// pruner is responsible for pruning historical light chain data. -type pruner struct { - db ethdb.Database - indexers []*core.ChainIndexer - closeCh chan struct{} - wg sync.WaitGroup -} - -// newPruner returns a light chain pruner instance. -func newPruner(db ethdb.Database, indexers ...*core.ChainIndexer) *pruner { - pruner := &pruner{ - db: db, - indexers: indexers, - closeCh: make(chan struct{}), - } - pruner.wg.Add(1) - go pruner.loop() - return pruner -} - -// close notifies all background goroutines belonging to pruner to exit. -func (p *pruner) close() { - close(p.closeCh) - p.wg.Wait() -} - -// loop periodically queries the status of chain indexers and prunes useless -// historical chain data. Notably, whenever Geth restarts, it will iterate -// all historical sections even they don't exist at all(below checkpoint) so -// that light client can prune cached chain data that was ODRed after pruning -// that section. -func (p *pruner) loop() { - defer p.wg.Done() - - // cleanTicker is the ticker used to trigger a history clean 2 times a day. - var cleanTicker = time.NewTicker(12 * time.Hour) - defer cleanTicker.Stop() - - // pruning finds the sections that have been processed by all indexers - // and deletes all historical chain data. - // Note, if some indexers don't support pruning(e.g. eth.BloomIndexer), - // pruning operations can be silently ignored. - pruning := func() { - min := uint64(math.MaxUint64) - for _, indexer := range p.indexers { - sections, _, _ := indexer.Sections() - if sections < min { - min = sections - } - } - // Always keep the latest section data in database. - if min < 2 || len(p.indexers) == 0 { - return - } - for _, indexer := range p.indexers { - if err := indexer.Prune(min - 2); err != nil { - log.Debug("Failed to prune historical data", "err", err) - return - } - } - p.db.Compact(nil, nil) // Compact entire database, ensure all removed data are deleted. - } - for { - pruning() - select { - case <-cleanTicker.C: - case <-p.closeCh: - return - } - } -} diff --git a/les/pruner_test.go b/les/pruner_test.go deleted file mode 100644 index 167241493..000000000 --- a/les/pruner_test.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package les - -import ( - "bytes" - "context" - "encoding/binary" - "testing" - "time" - - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/light" -) - -func TestLightPruner(t *testing.T) { - var ( - waitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { - for { - cs, _, _ := cIndexer.Sections() - bts, _, _ := btIndexer.Sections() - if cs >= 3 && bts >= 3 { - break - } - time.Sleep(10 * time.Millisecond) - } - } - config = light.TestClientIndexerConfig - netconfig = testnetConfig{ - blocks: int(3*config.ChtSize + config.ChtConfirms), - protocol: 3, - indexFn: waitIndexers, - connect: true, - } - ) - server, client, tearDown := newClientServerEnv(t, netconfig) - defer tearDown() - - // checkDB iterates the chain with given prefix, resolves the block number - // with given callback and ensures this entry should exist or not. - checkDB := func(from, to uint64, prefix []byte, resolve func(key, value []byte) *uint64, exist bool) bool { - it := client.db.NewIterator(prefix, nil) - defer it.Release() - - var next = from - for it.Next() { - number := resolve(it.Key(), it.Value()) - if number == nil || *number < from { - continue - } else if *number > to { - return true - } - if exist { - if *number != next { - return false - } - next++ - } else { - return false - } - } - return true - } - // checkPruned checks and ensures the stale chain data has been pruned. - checkPruned := func(from, to uint64) { - // Iterate canonical hash - if !checkDB(from, to, []byte("h"), func(key, value []byte) *uint64 { - if len(key) == 1+8+1 && bytes.Equal(key[9:10], []byte("n")) { - n := binary.BigEndian.Uint64(key[1:9]) - return &n - } - return nil - }, false) { - t.Fatalf("canonical hash mappings are not properly pruned") - } - // Iterate header - if !checkDB(from, to, []byte("h"), func(key, value []byte) *uint64 { - if len(key) == 1+8+32 { - n := binary.BigEndian.Uint64(key[1:9]) - return &n - } - return nil - }, false) { - t.Fatalf("headers are not properly pruned") - } - // Iterate body - if !checkDB(from, to, []byte("b"), func(key, value []byte) *uint64 { - if len(key) == 1+8+32 { - n := binary.BigEndian.Uint64(key[1:9]) - return &n - } - return nil - }, false) { - t.Fatalf("block bodies are not properly pruned") - } - // Iterate receipts - if !checkDB(from, to, []byte("r"), func(key, value []byte) *uint64 { - if len(key) == 1+8+32 { - n := binary.BigEndian.Uint64(key[1:9]) - return &n - } - return nil - }, false) { - t.Fatalf("receipts are not properly pruned") - } - // Iterate td - if !checkDB(from, to, []byte("h"), func(key, value []byte) *uint64 { - if len(key) == 1+8+32+1 && bytes.Equal(key[41:42], []byte("t")) { - n := binary.BigEndian.Uint64(key[1:9]) - return &n - } - return nil - }, false) { - t.Fatalf("tds are not properly pruned") - } - } - // Start light pruner. - time.Sleep(1500 * time.Millisecond) // Ensure light client has finished the syncing and indexing - newPruner(client.db, client.chtIndexer, client.bloomTrieIndexer) - - time.Sleep(1500 * time.Millisecond) // Ensure pruner have enough time to prune data. - checkPruned(1, config.ChtSize-1) - - // Ensure all APIs still work after pruning. - var cases = []struct { - from, to uint64 - methodName string - method func(uint64) bool - }{ - { - 1, 10, "GetHeaderByNumber", - func(n uint64) bool { - _, err := light.GetHeaderByNumber(context.Background(), client.handler.backend.odr, n) - return err == nil - }, - }, - { - 11, 20, "GetCanonicalHash", - func(n uint64) bool { - _, err := light.GetCanonicalHash(context.Background(), client.handler.backend.odr, n) - return err == nil - }, - }, - { - 21, 30, "GetTd", - func(n uint64) bool { - _, err := light.GetTd(context.Background(), client.handler.backend.odr, server.handler.blockchain.GetHeaderByNumber(n).Hash(), n) - return err == nil - }, - }, - { - 31, 40, "GetBodyRLP", - func(n uint64) bool { - _, err := light.GetBodyRLP(context.Background(), client.handler.backend.odr, server.handler.blockchain.GetHeaderByNumber(n).Hash(), n) - return err == nil - }, - }, - { - 41, 50, "GetBlock", - func(n uint64) bool { - _, err := light.GetBlock(context.Background(), client.handler.backend.odr, server.handler.blockchain.GetHeaderByNumber(n).Hash(), n) - return err == nil - }, - }, - { - 51, 60, "GetBlockReceipts", - func(n uint64) bool { - _, err := light.GetBlockReceipts(context.Background(), client.handler.backend.odr, server.handler.blockchain.GetHeaderByNumber(n).Hash(), n) - return err == nil - }, - }, - } - for _, c := range cases { - for i := c.from; i <= c.to; i++ { - if !c.method(i) { - t.Fatalf("rpc method %s failed, number %d", c.methodName, i) - } - } - } - // Check GetBloombits - _, err := light.GetBloomBits(context.Background(), client.handler.backend.odr, 0, []uint64{0}) - if err != nil { - t.Fatalf("Failed to retrieve bloombits of pruned section: %v", err) - } - - // Ensure the ODR cached data can be cleaned by pruner. - newPruner(client.db, client.chtIndexer, client.bloomTrieIndexer) - time.Sleep(50 * time.Millisecond) // Ensure pruner have enough time to prune data. - checkPruned(1, config.ChtSize-1) // Ensure all cached data(by odr) is cleaned. -} diff --git a/les/request_test.go b/les/request_test.go index 9b52e6bd8..5e354b7ef 100644 --- a/les/request_test.go +++ b/les/request_test.go @@ -16,6 +16,10 @@ package les +// Note: these tests are disabled now because they cannot work with the old sync +// mechanism removed but will be useful again once the PoS ultralight mode is implemented + +/* import ( "context" "testing" @@ -23,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/light" @@ -77,7 +82,7 @@ func tfCodeAccess(db ethdb.Database, bhash common.Hash, num uint64) light.OdrReq return nil } sti := light.StateTrieID(header) - ci := light.StorageTrieID(sti, crypto.Keccak256Hash(testContractAddr[:]), common.Hash{}) + ci := light.StorageTrieID(sti, testContractAddr, types.EmptyRootHash) return &light.CodeRequest{Id: ci, Hash: crypto.Keccak256Hash(testContractCodeDeployed)} } @@ -121,3 +126,4 @@ func testAccess(t *testing.T, protocol int, fn accessTestFn) { } test(5) } +*/ diff --git a/les/retrieve.go b/les/retrieve.go index 2b9e239e9..728f960a5 100644 --- a/les/retrieve.go +++ b/les/retrieve.go @@ -153,15 +153,6 @@ func (rm *retrieveManager) sendReq(reqID uint64, req *distReq, val validatorFunc return r } -// requested reports whether the request with given reqid is sent by the retriever. -func (rm *retrieveManager) requested(reqId uint64) bool { - rm.lock.RLock() - defer rm.lock.RUnlock() - - _, ok := rm.sentReqs[reqId] - return ok -} - // deliver is called by the LES protocol manager to deliver reply messages to waiting requests func (rm *retrieveManager) deliver(peer distPeer, msg *Msg) error { rm.lock.RLock() diff --git a/les/server_handler.go b/les/server_handler.go index 418974c37..5b3505064 100644 --- a/les/server_handler.go +++ b/les/server_handler.go @@ -18,6 +18,7 @@ package les import ( "errors" + "fmt" "sync" "time" @@ -34,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) @@ -116,7 +116,7 @@ func (h *serverHandler) handle(p *clientPeer) error { hash = head.Hash() number = head.Number.Uint64() td = h.blockchain.GetTd(hash, number) - forkID = forkid.NewID(h.blockchain.Config(), h.blockchain.Genesis().Hash(), number, head.Time) + forkID = forkid.NewID(h.blockchain.Config(), h.blockchain.Genesis(), number, head.Time) ) if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), forkID, h.forkFilter, h.server); err != nil { p.Log().Debug("Light Ethereum handshake failed", "err", err) @@ -358,20 +358,19 @@ func (h *serverHandler) AddTxsSync() bool { } // getAccount retrieves an account from the state based on root. -func getAccount(triedb *trie.Database, root, hash common.Hash) (types.StateAccount, error) { - trie, err := trie.New(trie.StateTrieID(root), triedb) +func getAccount(triedb *trie.Database, root common.Hash, addr common.Address) (types.StateAccount, error) { + trie, err := trie.NewStateTrie(trie.StateTrieID(root), triedb) if err != nil { return types.StateAccount{}, err } - blob, err := trie.Get(hash[:]) + acc, err := trie.GetAccount(addr) if err != nil { return types.StateAccount{}, err } - var acc types.StateAccount - if err = rlp.DecodeBytes(blob, &acc); err != nil { - return types.StateAccount{}, err + if acc == nil { + return types.StateAccount{}, fmt.Errorf("account %#x is not present", addr) } - return acc, nil + return *acc, nil } // GetHelperTrie returns the post-processed trie root for the given trie ID and section index @@ -391,7 +390,8 @@ func (h *serverHandler) GetHelperTrie(typ uint, index uint64) *trie.Trie { if root == (common.Hash{}) { return nil } - trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix))) + triedb := trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix), trie.HashDefaults) + trie, _ := trie.New(trie.TrieID(root), triedb) return trie } diff --git a/les/server_requests.go b/les/server_requests.go index 033b11d79..485be6d9e 100644 --- a/les/server_requests.go +++ b/les/server_requests.go @@ -303,17 +303,16 @@ func handleGetCode(msg Decoder) (serveRequestFn, uint64, uint64, error) { p.bumpInvalid() continue } - triedb := bc.StateCache().TrieDB() - - account, err := getAccount(triedb, header.Root, common.BytesToHash(request.AccKey)) + address := common.BytesToAddress(request.AccountAddress) + account, err := getAccount(bc.TrieDB(), header.Root, address) if err != nil { - p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err) + p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", address, "err", err) p.bumpInvalid() continue } - code, err := bc.StateCache().ContractCode(common.BytesToHash(request.AccKey), common.BytesToHash(account.CodeHash)) + code, err := bc.StateCache().ContractCode(address, common.BytesToHash(account.CodeHash)) if err != nil { - p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "codehash", common.BytesToHash(account.CodeHash), "err", err) + p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", address, "codehash", common.BytesToHash(account.CodeHash), "err", err) continue } // Accumulate the code and abort if enough data was retrieved @@ -413,7 +412,7 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) { statedb := bc.StateCache() var trie state.Trie - switch len(request.AccKey) { + switch len(request.AccountAddress) { case 0: // No account key specified, open an account trie trie, err = statedb.OpenTrie(root) @@ -423,20 +422,21 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) { } default: // Account key specified, open a storage trie - account, err := getAccount(statedb.TrieDB(), root, common.BytesToHash(request.AccKey)) + address := common.BytesToAddress(request.AccountAddress) + account, err := getAccount(bc.TrieDB(), root, address) if err != nil { - p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err) + p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", address, "err", err) p.bumpInvalid() continue } - trie, err = statedb.OpenStorageTrie(root, common.BytesToHash(request.AccKey), account.Root) + trie, err = statedb.OpenStorageTrie(root, address, account.Root) if trie == nil || err != nil { - p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "root", account.Root, "err", err) + p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", address, "root", account.Root, "err", err) continue } } // Prove the user's request from the account or storage trie - if err := trie.Prove(request.Key, request.FromLevel, nodes); err != nil { + if err := trie.Prove(request.Key, nodes); err != nil { p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err) continue } @@ -480,7 +480,7 @@ func handleGetHelperTrieProofs(msg Decoder) (serveRequestFn, uint64, uint64, err // the headers with no valid proof. Keep the compatibility for // legacy les protocol and drop this hack when the les2/3 are // not supported. - err := auxTrie.Prove(request.Key, request.FromLevel, nodes) + err := auxTrie.Prove(request.Key, nodes) if p.version >= lpv4 && err != nil { return nil } @@ -518,12 +518,7 @@ func handleSendTx(msg Decoder) (serveRequestFn, uint64, uint64, error) { hash := tx.Hash() stats[i] = txStatus(backend, hash) if stats[i].Status == txpool.TxStatusUnknown { - addFn := backend.TxPool().AddRemotes - // Add txs synchronously for testing purpose - if backend.AddTxsSync() { - addFn = backend.TxPool().AddRemotesSync - } - if errs := addFn([]*types.Transaction{tx}); errs[0] != nil { + if errs := backend.TxPool().Add([]*types.Transaction{tx}, false, backend.AddTxsSync()); errs[0] != nil { stats[i].Error = errs[0].Error() continue } @@ -556,7 +551,7 @@ func handleGetTxStatus(msg Decoder) (serveRequestFn, uint64, uint64, error) { func txStatus(b serverBackend, hash common.Hash) light.TxStatus { var stat light.TxStatus // Looking the transaction in txpool first. - stat.Status = b.TxPool().Status([]common.Hash{hash})[0] + stat.Status = b.TxPool().Status(hash) // If the transaction is unknown to the pool, try looking it up locally. if stat.Status == txpool.TxStatusUnknown { diff --git a/les/servingqueue.go b/les/servingqueue.go index b4b53d8df..b258fc3ca 100644 --- a/les/servingqueue.go +++ b/les/servingqueue.go @@ -17,21 +17,22 @@ package les import ( - "sort" "sync" "sync/atomic" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/prque" + "golang.org/x/exp/slices" ) // servingQueue allows running tasks in a limited number of threads and puts the // waiting tasks in a priority queue type servingQueue struct { - recentTime, queuedTime, servingTimeDiff uint64 - burstLimit, burstDropLimit uint64 - burstDecRate float64 - lastUpdate mclock.AbsTime + recentTime, queuedTime uint64 + servingTimeDiff atomic.Uint64 + burstLimit, burstDropLimit uint64 + burstDecRate float64 + lastUpdate mclock.AbsTime queueAddCh, queueBestCh chan *servingTask stopThreadCh, quit chan struct{} @@ -100,7 +101,7 @@ func (t *servingTask) done() uint64 { t.timeAdded = t.servingTime if t.expTime > diff { t.expTime -= diff - atomic.AddUint64(&t.sq.servingTimeDiff, t.expTime) + t.sq.servingTimeDiff.Add(t.expTime) } else { t.expTime = 0 } @@ -180,35 +181,19 @@ func (sq *servingQueue) threadController() { } } -type ( - // peerTasks lists the tasks received from a given peer when selecting peers to freeze - peerTasks struct { - peer *clientPeer - list []*servingTask - sumTime uint64 - priority float64 - } - // peerList is a sortable list of peerTasks - peerList []*peerTasks -) - -func (l peerList) Len() int { - return len(l) -} - -func (l peerList) Less(i, j int) bool { - return l[i].priority < l[j].priority -} - -func (l peerList) Swap(i, j int) { - l[i], l[j] = l[j], l[i] +// peerTasks lists the tasks received from a given peer when selecting peers to freeze +type peerTasks struct { + peer *clientPeer + list []*servingTask + sumTime uint64 + priority float64 } // freezePeers selects the peers with the worst priority queued tasks and freezes // them until burstTime goes under burstDropLimit or all peers are frozen func (sq *servingQueue) freezePeers() { peerMap := make(map[*clientPeer]*peerTasks) - var peerList peerList + var peerList []*peerTasks if sq.best != nil { sq.queue.Push(sq.best, sq.best.priority) } @@ -231,7 +216,15 @@ func (sq *servingQueue) freezePeers() { tasks.list = append(tasks.list, task) tasks.sumTime += task.expTime } - sort.Sort(peerList) + slices.SortFunc(peerList, func(a, b *peerTasks) int { + if a.priority < b.priority { + return -1 + } + if a.priority > b.priority { + return 1 + } + return 0 + }) drop := true for _, tasks := range peerList { if drop { @@ -257,7 +250,7 @@ func (sq *servingQueue) freezePeers() { // updateRecentTime recalculates the recent serving time value func (sq *servingQueue) updateRecentTime() { - subTime := atomic.SwapUint64(&sq.servingTimeDiff, 0) + subTime := sq.servingTimeDiff.Swap(0) now := mclock.Now() dt := now - sq.lastUpdate sq.lastUpdate = now diff --git a/les/sync.go b/les/sync.go deleted file mode 100644 index bd06077aa..000000000 --- a/les/sync.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package les - -import ( - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/les/downloader" - "github.com/ethereum/go-ethereum/log" -) - -// synchronise tries to sync up our local chain with a remote peer. -func (h *clientHandler) synchronise(peer *serverPeer) { - // Short circuit if the peer is nil. - if peer == nil { - return - } - // Make sure the peer's TD is higher than our own. - latest := h.backend.blockchain.CurrentHeader() - currentTd := rawdb.ReadTd(h.backend.chainDb, latest.Hash(), latest.Number.Uint64()) - if currentTd != nil && peer.Td().Cmp(currentTd) < 0 { - return - } - // Notify testing framework if syncing has completed (for testing purpose). - defer func() { - if h.syncEnd != nil { - h.syncEnd(h.backend.blockchain.CurrentHeader()) - } - }() - start := time.Now() - if h.syncStart != nil { - h.syncStart(h.backend.blockchain.CurrentHeader()) - } - // Fetch the remaining block headers based on the current chain header. - if err := h.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), downloader.LightSync); err != nil { - log.Debug("Synchronise failed", "reason", err) - return - } - log.Debug("Synchronise finished", "elapsed", common.PrettyDuration(time.Since(start))) -} diff --git a/les/sync_test.go b/les/sync_test.go deleted file mode 100644 index 356a88ffb..000000000 --- a/les/sync_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package les - -import ( - "fmt" - "testing" - "time" - - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/light" -) - -// Test light syncing which will download all headers from genesis. -func TestLightSyncingLes3(t *testing.T) { testSyncing(t, lpv3) } - -func testSyncing(t *testing.T, protocol int) { - config := light.TestServerIndexerConfig - - waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { - for { - cs, _, _ := cIndexer.Sections() - bts, _, _ := btIndexer.Sections() - if cs >= 1 && bts >= 1 { - break - } - time.Sleep(10 * time.Millisecond) - } - } - // Generate 128+1 blocks (totally 1 CHT section) - netconfig := testnetConfig{ - blocks: int(config.ChtSize + config.ChtConfirms), - protocol: protocol, - indexFn: waitIndexers, - nopruning: true, - } - server, client, tearDown := newClientServerEnv(t, netconfig) - defer tearDown() - - expected := config.ChtSize + config.ChtConfirms - - done := make(chan error) - client.handler.syncEnd = func(header *types.Header) { - if header.Number.Uint64() == expected { - done <- nil - } else { - done <- fmt.Errorf("blockchain length mismatch, want %d, got %d", expected, header.Number) - } - } - - // Create connected peer pair. - peer1, peer2, err := newTestPeerPair("peer", protocol, server.handler, client.handler, false) - if err != nil { - t.Fatalf("Failed to connect testing peers %v", err) - } - defer peer1.close() - defer peer2.close() - - select { - case err := <-done: - if err != nil { - t.Error("sync failed", err) - } - return - case <-time.NewTimer(10 * time.Second).C: - t.Error("checkpoint syncing timeout") - } -} diff --git a/les/test_helper.go b/les/test_helper.go index ead97ddd1..6be13eaec 100644 --- a/les/test_helper.go +++ b/les/test_helper.go @@ -37,6 +37,7 @@ import ( "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/ethconfig" @@ -48,6 +49,7 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -176,7 +178,7 @@ func testIndexers(db ethdb.Database, odr light.OdrBackend, config *light.Indexer return indexers[:] } -func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, indexers []*core.ChainIndexer, db ethdb.Database, peers *serverPeerSet, ulcServers []string, ulcFraction int) (*clientHandler, func()) { +func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, indexers []*core.ChainIndexer, db ethdb.Database, peers *serverPeerSet) (*clientHandler, func()) { var ( evmux = new(event.TypeMux) engine = ethash.NewFaker() @@ -187,7 +189,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index BaseFee: big.NewInt(params.InitialBaseFee), } ) - genesis := gspec.MustCommit(db) + genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) chain, _ := light.NewLightChain(odr, gspec.Config, engine) client := &LightEthereum{ @@ -209,9 +211,8 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index eventMux: evmux, merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), } - client.handler = newClientHandler(ulcServers, ulcFraction, client) + client.handler = newClientHandler(client) - client.handler.start() return client.handler, func() { client.handler.stop() } @@ -226,15 +227,17 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da BaseFee: big.NewInt(params.InitialBaseFee), } ) - genesis := gspec.MustCommit(db) + genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) // create a simulation backend and pre-commit several customized block to the database. simulation := backends.NewSimulatedBackendWithDatabase(db, gspec.Alloc, 100000000) prepare(blocks, simulation) - txpoolConfig := txpool.DefaultConfig + txpoolConfig := legacypool.DefaultConfig txpoolConfig.Journal = "" - txpool := txpool.NewTxPool(txpoolConfig, gspec.Config, simulation.Blockchain()) + + pool := legacypool.New(txpoolConfig, simulation.Blockchain()) + txpool, _ := txpool.New(new(big.Int).SetUint64(txpoolConfig.PriceLimit), simulation.Blockchain(), []txpool.SubPool{pool}) server := &LesServer{ lesCommons: lesCommons{ @@ -304,7 +307,8 @@ func (p *testPeer) handshakeWithServer(t *testing.T, td *big.Int, head common.Ha } // handshakeWithClient executes the handshake with the remote client peer. -func (p *testPeer) handshakeWithClient(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, costList RequestCostList, recentTxLookup uint64) { +// (used by temporarily disabled tests) +/*func (p *testPeer) handshakeWithClient(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, costList RequestCostList, recentTxLookup uint64) { // It only works for the simulated client peer if p.speer == nil { t.Fatal("handshake for server peer only") @@ -334,7 +338,7 @@ func (p *testPeer) handshakeWithClient(t *testing.T, td *big.Int, head common.Ha if err := p2p.Send(p.app, StatusMsg, &sendList); err != nil { t.Fatalf("status send: %v", err) } -} +}*/ // close terminates the local side of the peer, notifying the remote protocol // manager of termination. @@ -402,7 +406,8 @@ type testClient struct { } // newRawPeer creates a new server peer connects to the server and do the handshake. -func (client *testClient) newRawPeer(t *testing.T, name string, version int, recentTxLookup uint64) (*testPeer, func(), <-chan error) { +// (used by temporarily disabled tests) +/*func (client *testClient) newRawPeer(t *testing.T, name string, version int, recentTxLookup uint64) (*testPeer, func(), <-chan error) { // Create a message pipe to communicate through app, net := p2p.MsgPipe() @@ -450,7 +455,7 @@ func (client *testClient) newRawPeer(t *testing.T, name string, version int, rec tp.close() } return tp, closePeer, errCh -} +}*/ // testServer represents a server object for testing with necessary auxiliary fields. type testServer struct { @@ -494,7 +499,7 @@ func (server *testServer) newRawPeer(t *testing.T, name string, version int) (*t head = server.handler.blockchain.CurrentHeader() td = server.handler.blockchain.GetTd(head.Hash(), head.Number.Uint64()) ) - forkID := forkid.NewID(server.handler.blockchain.Config(), genesis.Hash(), head.Number.Uint64(), head.Time) + forkID := forkid.NewID(server.handler.blockchain.Config(), genesis, head.Number.Uint64(), head.Time) tp.handshakeWithServer(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID) // Ensure the connection is established or exits when any error occurs @@ -518,14 +523,12 @@ func (server *testServer) newRawPeer(t *testing.T, name string, version int) (*t // testnetConfig wraps all the configurations for testing network. type testnetConfig struct { - blocks int - protocol int - indexFn indexerCallback - ulcServers []string - ulcFraction int - simClock bool - connect bool - nopruning bool + blocks int + protocol int + indexFn indexerCallback + simClock bool + connect bool + nopruning bool } func newClientServerEnv(t *testing.T, config testnetConfig) (*testServer, *testClient, func()) { @@ -550,7 +553,7 @@ func newClientServerEnv(t *testing.T, config testnetConfig) (*testServer, *testC odr.SetIndexers(ccIndexer, cbIndexer, cbtIndexer) server, b, serverClose := newTestServerHandler(config.blocks, sindexers, sdb, clock) - client, clientClose := newTestClientHandler(b, odr, cIndexers, cdb, speers, config.ulcServers, config.ulcFraction) + client, clientClose := newTestClientHandler(b, odr, cIndexers, cdb, speers) scIndexer.Start(server.blockchain) sbIndexer.Start(server.blockchain) @@ -566,7 +569,6 @@ func newClientServerEnv(t *testing.T, config testnetConfig) (*testServer, *testC ) if config.connect { done := make(chan struct{}) - client.syncEnd = func(_ *types.Header) { close(done) } cpeer, speer, err = newTestPeerPair("peer", config.protocol, server, client, false) if err != nil { t.Fatalf("Failed to connect testing peers %v", err) diff --git a/les/ulc.go b/les/ulc.go deleted file mode 100644 index b97217e79..000000000 --- a/les/ulc.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package les - -import ( - "errors" - - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/enode" -) - -type ulc struct { - keys map[string]bool - fraction int -} - -// newULC creates and returns an ultra light client instance. -func newULC(servers []string, fraction int) (*ulc, error) { - keys := make(map[string]bool) - for _, id := range servers { - node, err := enode.Parse(enode.ValidSchemes, id) - if err != nil { - log.Warn("Failed to parse trusted server", "id", id, "err", err) - continue - } - keys[node.ID().String()] = true - } - if len(keys) == 0 { - return nil, errors.New("no trusted servers") - } - return &ulc{ - keys: keys, - fraction: fraction, - }, nil -} - -// trusted return an indicator that whether the specified peer is trusted. -func (u *ulc) trusted(p enode.ID) bool { - return u.keys[p.String()] -} diff --git a/les/ulc_test.go b/les/ulc_test.go deleted file mode 100644 index 791bc2885..000000000 --- a/les/ulc_test.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package les - -import ( - "crypto/rand" - "fmt" - "net" - "testing" - "time" - - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" -) - -func TestULCAnnounceThresholdLes2(t *testing.T) { testULCAnnounceThreshold(t, 2) } -func TestULCAnnounceThresholdLes3(t *testing.T) { testULCAnnounceThreshold(t, 3) } - -func testULCAnnounceThreshold(t *testing.T, protocol int) { - // todo figure out why it takes fetcher so longer to fetcher the announced header. - t.Skip("Sometimes it can failed") - - // newTestLightPeer creates node with light sync mode - newTestLightPeer := func(t *testing.T, protocol int, ulcServers []string, ulcFraction int) (*testClient, func()) { - netconfig := testnetConfig{ - protocol: protocol, - ulcServers: ulcServers, - ulcFraction: ulcFraction, - nopruning: true, - } - _, c, teardown := newClientServerEnv(t, netconfig) - return c, teardown - } - - var cases = []struct { - height []int - threshold int - expect uint64 - }{ - {[]int{1}, 100, 1}, - {[]int{0, 0, 0}, 100, 0}, - {[]int{1, 2, 3}, 30, 3}, - {[]int{1, 2, 3}, 60, 2}, - {[]int{3, 2, 1}, 67, 1}, - {[]int{3, 2, 1}, 100, 1}, - } - for _, testcase := range cases { - var ( - servers []*testServer - teardowns []func() - nodes []*enode.Node - ids []string - ) - for i := 0; i < len(testcase.height); i++ { - s, n, teardown := newTestServerPeer(t, 0, protocol, nil) - - servers = append(servers, s) - nodes = append(nodes, n) - teardowns = append(teardowns, teardown) - ids = append(ids, n.String()) - } - c, teardown := newTestLightPeer(t, protocol, ids, testcase.threshold) - - // Connect all servers. - for i := 0; i < len(servers); i++ { - connect(servers[i].handler, nodes[i].ID(), c.handler, protocol, false) - } - for i := 0; i < len(servers); i++ { - for j := 0; j < testcase.height[i]; j++ { - servers[i].backend.Commit() - } - } - time.Sleep(1500 * time.Millisecond) // Ensure the fetcher has done its work. - head := c.handler.backend.blockchain.CurrentHeader().Number.Uint64() - if head != testcase.expect { - t.Fatalf("chain height mismatch, want %d, got %d", testcase.expect, head) - } - - // Release all servers and client resources. - teardown() - for i := 0; i < len(teardowns); i++ { - teardowns[i]() - } - } -} - -func connect(server *serverHandler, serverId enode.ID, client *clientHandler, protocol int, noInitAnnounce bool) (*serverPeer, *clientPeer, error) { - // Create a message pipe to communicate through - app, net := p2p.MsgPipe() - - var id enode.ID - rand.Read(id[:]) - - peer1 := newServerPeer(protocol, NetworkId, true, p2p.NewPeer(serverId, "", nil), net) // Mark server as trusted - peer2 := newClientPeer(protocol, NetworkId, p2p.NewPeer(id, "", nil), app) - - // Start the peerLight on a new thread - errc1 := make(chan error, 1) - errc2 := make(chan error, 1) - go func() { - select { - case <-server.closeCh: - errc1 <- p2p.DiscQuitting - case errc1 <- server.handle(peer2): - } - }() - go func() { - select { - case <-client.closeCh: - errc1 <- p2p.DiscQuitting - case errc1 <- client.handle(peer1, noInitAnnounce): - } - }() - // Ensure the connection is established or exits when any error occurs - for { - select { - case err := <-errc1: - return nil, nil, fmt.Errorf("failed to establish protocol connection %v", err) - case err := <-errc2: - return nil, nil, fmt.Errorf("failed to establish protocol connection %v", err) - default: - } - if peer1.serving.Load() && peer2.serving.Load() { - break - } - time.Sleep(50 * time.Millisecond) - } - return peer1, peer2, nil -} - -// newTestServerPeer creates server peer. -func newTestServerPeer(t *testing.T, blocks int, protocol int, indexFn indexerCallback) (*testServer, *enode.Node, func()) { - netconfig := testnetConfig{ - blocks: blocks, - protocol: protocol, - indexFn: indexFn, - nopruning: true, - } - s, _, teardown := newClientServerEnv(t, netconfig) - key, err := crypto.GenerateKey() - if err != nil { - t.Fatal("generate key err:", err) - } - s.handler.server.privateKey = key - n := enode.NewV4(&key.PublicKey, net.ParseIP("127.0.0.1"), 35000, 35000) - return s, n, teardown -} diff --git a/les/utils/limiter.go b/les/utils/limiter.go index 84d186efd..70b7ff64f 100644 --- a/les/utils/limiter.go +++ b/les/utils/limiter.go @@ -17,10 +17,10 @@ package utils import ( - "sort" "sync" "github.com/ethereum/go-ethereum/p2p/enode" + "golang.org/x/exp/slices" ) const maxSelectionWeight = 1000000000 // maximum selection weight of each individual node/address group @@ -340,24 +340,9 @@ func (l *Limiter) Stop() { l.cond.Signal() } -type ( - dropList []dropListItem - dropListItem struct { - nq *nodeQueue - priority float64 - } -) - -func (l dropList) Len() int { - return len(l) -} - -func (l dropList) Less(i, j int) bool { - return l[i].priority < l[j].priority -} - -func (l dropList) Swap(i, j int) { - l[i], l[j] = l[j], l[i] +type dropListItem struct { + nq *nodeQueue + priority float64 } // dropRequests selects the nodes with the highest queued request cost to selection @@ -366,7 +351,7 @@ func (l dropList) Swap(i, j int) { func (l *Limiter) dropRequests() { var ( sumValue float64 - list dropList + list []dropListItem ) for _, nq := range l.nodes { sumValue += nq.value @@ -384,7 +369,15 @@ func (l *Limiter) dropRequests() { priority: w / float64(nq.sumCost), }) } - sort.Sort(list) + slices.SortFunc(list, func(a, b dropListItem) int { + if a.priority < b.priority { + return -1 + } + if a.priority < b.priority { + return 1 + } + return 0 + }) for _, item := range list { for _, request := range item.nq.queue { close(request.process) diff --git a/light/lightchain_test.go b/light/lightchain_test.go index e3d756f80..5694ca72c 100644 --- a/light/lightchain_test.go +++ b/light/lightchain_test.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) // So we can deterministically seed different blockchains @@ -55,7 +56,7 @@ func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) [ func newCanonical(n int) (ethdb.Database, *LightChain, error) { db := rawdb.NewMemoryDatabase() gspec := core.Genesis{Config: params.TestChainConfig} - genesis := gspec.MustCommit(db) + genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) blockchain, _ := NewLightChain(&dummyOdr{db: db, indexerConfig: TestClientIndexerConfig}, gspec.Config, ethash.NewFaker()) // Create and inject the requested chain @@ -75,7 +76,7 @@ func newTestLightChain() *LightChain { Difficulty: big.NewInt(1), Config: params.TestChainConfig, } - gspec.MustCommit(db) + gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) lc, err := NewLightChain(&dummyOdr{db: db}, gspec.Config, ethash.NewFullFaker()) if err != nil { panic(err) diff --git a/light/odr.go b/light/odr.go index d331d3c9b..259702743 100644 --- a/light/odr.go +++ b/light/odr.go @@ -54,35 +54,35 @@ type OdrRequest interface { // TrieID identifies a state or account storage trie type TrieID struct { - BlockHash common.Hash - BlockNumber uint64 - StateRoot common.Hash - Root common.Hash - AccKey []byte + BlockHash common.Hash + BlockNumber uint64 + StateRoot common.Hash + Root common.Hash + AccountAddress []byte } // StateTrieID returns a TrieID for a state trie belonging to a certain block // header. func StateTrieID(header *types.Header) *TrieID { return &TrieID{ - BlockHash: header.Hash(), - BlockNumber: header.Number.Uint64(), - StateRoot: header.Root, - Root: header.Root, - AccKey: nil, + BlockHash: header.Hash(), + BlockNumber: header.Number.Uint64(), + StateRoot: header.Root, + Root: header.Root, + AccountAddress: nil, } } // StorageTrieID returns a TrieID for a contract storage trie at a given account // of a given state trie. It also requires the root hash of the trie for // checking Merkle proofs. -func StorageTrieID(state *TrieID, addrHash, root common.Hash) *TrieID { +func StorageTrieID(state *TrieID, address common.Address, root common.Hash) *TrieID { return &TrieID{ - BlockHash: state.BlockHash, - BlockNumber: state.BlockNumber, - StateRoot: state.StateRoot, - AccKey: addrHash[:], - Root: root, + BlockHash: state.BlockHash, + BlockNumber: state.BlockNumber, + StateRoot: state.StateRoot, + AccountAddress: address[:], + Root: root, } } diff --git a/light/odr_test.go b/light/odr_test.go index 0df1fbf2f..d8a7f1067 100644 --- a/light/odr_test.go +++ b/light/odr_test.go @@ -36,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -86,8 +87,8 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error { err error t state.Trie ) - if len(req.Id.AccKey) > 0 { - t, err = odr.serverState.OpenStorageTrie(req.Id.StateRoot, common.BytesToHash(req.Id.AccKey), req.Id.Root) + if len(req.Id.AccountAddress) > 0 { + t, err = odr.serverState.OpenStorageTrie(req.Id.StateRoot, common.BytesToAddress(req.Id.AccountAddress), req.Id.Root) } else { t, err = odr.serverState.OpenTrie(req.Id.Root) } @@ -95,7 +96,7 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error { panic(err) } nodes := NewNodeSet() - t.Prove(req.Key, 0, nodes) + t.Prove(req.Key, nodes) req.Proof = nodes case *CodeRequest: req.Data = rawdb.ReadCode(odr.sdb, req.Hash) @@ -282,7 +283,7 @@ func testChainOdr(t *testing.T, protocol int, fn odrTestFn) { t.Fatal(err) } - gspec.MustCommit(ldb) + gspec.MustCommit(ldb, trie.NewDatabase(ldb, trie.HashDefaults)) odr := &testOdr{sdb: sdb, ldb: ldb, serverState: blockchain.StateCache(), indexerConfig: TestClientIndexerConfig} lightchain, err := NewLightChain(odr, gspec.Config, ethash.NewFullFaker()) if err != nil { diff --git a/light/odr_util.go b/light/odr_util.go index 8e2047f00..9cac7df4f 100644 --- a/light/odr_util.go +++ b/light/odr_util.go @@ -17,12 +17,12 @@ package light import ( - "bytes" "context" "errors" "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" @@ -125,7 +125,7 @@ func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash, number uint6 return nil, err } body := new(types.Body) - if err := rlp.Decode(bytes.NewReader(data), body); err != nil { + if err := rlp.DecodeBytes(data, body); err != nil { return nil, err } return body, nil @@ -175,7 +175,13 @@ func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, num genesis := rawdb.ReadCanonicalHash(odr.Database(), 0) config := rawdb.ReadChainConfig(odr.Database(), genesis) - if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), block.Transactions()); err != nil { + var blobGasPrice *big.Int + excessBlobGas := block.ExcessBlobGas() + if excessBlobGas != nil { + blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas) + } + + if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, block.Transactions()); err != nil { return nil, err } rawdb.WriteReceipts(odr.Database(), hash, number, receipts) diff --git a/light/postprocess.go b/light/postprocess.go index 001df209b..13d75f861 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -145,7 +145,7 @@ func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64, dis diskdb: db, odr: odr, trieTable: trieTable, - triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down + triedb: trie.NewDatabase(trieTable, trie.HashDefaults), sectionSize: size, disablePruning: disablePruning, } @@ -214,10 +214,13 @@ func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) e // Commit implements core.ChainIndexerBackend func (c *ChtIndexerBackend) Commit() error { - root, nodes := c.trie.Commit(false) + root, nodes, err := c.trie.Commit(false) + if err != nil { + return err + } // Commit trie changes into trie database in case it's not nil. if nodes != nil { - if err := c.triedb.Update(root, c.originRoot, trienode.NewWithNodeSet(nodes)); err != nil { + if err := c.triedb.Update(root, c.originRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { return err } if err := c.triedb.Commit(root, false); err != nil { @@ -225,7 +228,6 @@ func (c *ChtIndexerBackend) Commit() error { } } // Re-create trie with newly generated root and updated database. - var err error c.trie, err = trie.New(trie.TrieID(root), c.triedb) if err != nil { return err @@ -346,7 +348,7 @@ func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uin diskdb: db, odr: odr, trieTable: trieTable, - triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down + triedb: trie.NewDatabase(trieTable, trie.HashDefaults), parentSize: parentSize, size: size, disablePruning: disablePruning, @@ -465,10 +467,13 @@ func (b *BloomTrieIndexerBackend) Commit() error { return terr } } - root, nodes := b.trie.Commit(false) + root, nodes, err := b.trie.Commit(false) + if err != nil { + return err + } // Commit trie changes into trie database in case it's not nil. if nodes != nil { - if err := b.triedb.Update(root, b.originRoot, trienode.NewWithNodeSet(nodes)); err != nil { + if err := b.triedb.Update(root, b.originRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { return err } if err := b.triedb.Commit(root, false); err != nil { @@ -476,7 +481,6 @@ func (b *BloomTrieIndexerBackend) Commit() error { } } // Re-create trie with newly generated root and updated database. - var err error b.trie, err = trie.New(trie.TrieID(root), b.triedb) if err != nil { return err diff --git a/light/trie.go b/light/trie.go index 6d802a5fb..1847f1e71 100644 --- a/light/trie.go +++ b/light/trie.go @@ -55,8 +55,8 @@ func (db *odrDatabase) OpenTrie(root common.Hash) (state.Trie, error) { return &odrTrie{db: db, id: db.id}, nil } -func (db *odrDatabase) OpenStorageTrie(state, addrHash, root common.Hash) (state.Trie, error) { - return &odrTrie{db: db, id: StorageTrieID(db.id, addrHash, root)}, nil +func (db *odrDatabase) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (state.Trie, error) { + return &odrTrie{db: db, id: StorageTrieID(db.id, address, root)}, nil } func (db *odrDatabase) CopyTrie(t state.Trie) state.Trie { @@ -72,7 +72,7 @@ func (db *odrDatabase) CopyTrie(t state.Trie) state.Trie { } } -func (db *odrDatabase) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) { +func (db *odrDatabase) ContractCode(addr common.Address, codeHash common.Hash) ([]byte, error) { if codeHash == sha3Nil { return nil, nil } @@ -81,14 +81,14 @@ func (db *odrDatabase) ContractCode(addrHash, codeHash common.Hash) ([]byte, err return code, nil } id := *db.id - id.AccKey = addrHash[:] + id.AccountAddress = addr[:] req := &CodeRequest{Id: &id, Hash: codeHash} err := db.backend.Retrieve(db.ctx, req) return req.Data, err } -func (db *odrDatabase) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) { - code, err := db.ContractCode(addrHash, codeHash) +func (db *odrDatabase) ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) { + code, err := db.ContractCode(addr, codeHash) return len(code), err } @@ -108,28 +108,35 @@ type odrTrie struct { func (t *odrTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) { key = crypto.Keccak256(key) - var res []byte + var enc []byte err := t.do(key, func() (err error) { - res, err = t.trie.Get(key) + enc, err = t.trie.Get(key) return err }) - return res, err + if err != nil || len(enc) == 0 { + return nil, err + } + _, content, _, err := rlp.Split(enc) + return content, err } func (t *odrTrie) GetAccount(address common.Address) (*types.StateAccount, error) { - var res types.StateAccount - key := crypto.Keccak256(address.Bytes()) + var ( + enc []byte + key = crypto.Keccak256(address.Bytes()) + ) err := t.do(key, func() (err error) { - value, err := t.trie.Get(key) - if err != nil { - return err - } - if value == nil { - return nil - } - return rlp.DecodeBytes(value, &res) + enc, err = t.trie.Get(key) + return err }) - return &res, err + if err != nil || len(enc) == 0 { + return nil, err + } + acct := new(types.StateAccount) + if err := rlp.DecodeBytes(enc, acct); err != nil { + return nil, err + } + return acct, nil } func (t *odrTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error { @@ -143,10 +150,15 @@ func (t *odrTrie) UpdateAccount(address common.Address, acc *types.StateAccount) }) } +func (t *odrTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error { + return nil +} + func (t *odrTrie) UpdateStorage(_ common.Address, key, value []byte) error { key = crypto.Keccak256(key) + v, _ := rlp.EncodeToBytes(value) return t.do(key, func() error { - return t.trie.Update(key, value) + return t.trie.Update(key, v) }) } @@ -165,9 +177,9 @@ func (t *odrTrie) DeleteAccount(address common.Address) error { }) } -func (t *odrTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) { +func (t *odrTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { if t.trie == nil { - return t.id.Root, nil + return t.id.Root, nil, nil } return t.trie.Commit(collectLeaf) } @@ -179,15 +191,15 @@ func (t *odrTrie) Hash() common.Hash { return t.trie.Hash() } -func (t *odrTrie) NodeIterator(startkey []byte) trie.NodeIterator { - return newNodeIterator(t, startkey) +func (t *odrTrie) NodeIterator(startkey []byte) (trie.NodeIterator, error) { + return newNodeIterator(t, startkey), nil } func (t *odrTrie) GetKey(sha []byte) []byte { return nil } -func (t *odrTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { +func (t *odrTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { return errors.New("not implemented, needs client/server interface split") } @@ -198,12 +210,13 @@ func (t *odrTrie) do(key []byte, fn func() error) error { var err error if t.trie == nil { var id *trie.ID - if len(t.id.AccKey) > 0 { - id = trie.StorageTrieID(t.id.StateRoot, common.BytesToHash(t.id.AccKey), t.id.Root) + if len(t.id.AccountAddress) > 0 { + id = trie.StorageTrieID(t.id.StateRoot, crypto.Keccak256Hash(t.id.AccountAddress), t.id.Root) } else { id = trie.StateTrieID(t.id.StateRoot) } - t.trie, err = trie.New(id, trie.NewDatabase(t.db.backend.Database())) + triedb := trie.NewDatabase(t.db.backend.Database(), trie.HashDefaults) + t.trie, err = trie.New(id, triedb) } if err == nil { err = fn() @@ -230,12 +243,13 @@ func newNodeIterator(t *odrTrie, startkey []byte) trie.NodeIterator { if t.trie == nil { it.do(func() error { var id *trie.ID - if len(t.id.AccKey) > 0 { - id = trie.StorageTrieID(t.id.StateRoot, common.BytesToHash(t.id.AccKey), t.id.Root) + if len(t.id.AccountAddress) > 0 { + id = trie.StorageTrieID(t.id.StateRoot, crypto.Keccak256Hash(t.id.AccountAddress), t.id.Root) } else { id = trie.StateTrieID(t.id.StateRoot) } - t, err := trie.New(id, trie.NewDatabase(t.db.backend.Database())) + triedb := trie.NewDatabase(t.db.backend.Database(), trie.HashDefaults) + t, err := trie.New(id, triedb) if err == nil { it.t.trie = t } @@ -243,7 +257,11 @@ func newNodeIterator(t *odrTrie, startkey []byte) trie.NodeIterator { }) } it.do(func() error { - it.NodeIterator = it.t.trie.NodeIterator(startkey) + var err error + it.NodeIterator, err = it.t.trie.NodeIterator(startkey) + if err != nil { + return err + } return it.NodeIterator.Error() }) return it diff --git a/light/trie_test.go b/light/trie_test.go index 0a24609cb..fe724e9ee 100644 --- a/light/trie_test.go +++ b/light/trie_test.go @@ -50,7 +50,7 @@ func TestNodeIterator(t *testing.T) { panic(err) } - gspec.MustCommit(lightdb) + gspec.MustCommit(lightdb, trie.NewDatabase(lightdb, trie.HashDefaults)) ctx := context.Background() odr := &testOdr{sdb: fulldb, ldb: lightdb, serverState: blockchain.StateCache(), indexerConfig: TestClientIndexerConfig} head := blockchain.CurrentHeader() @@ -62,8 +62,16 @@ func TestNodeIterator(t *testing.T) { } func diffTries(t1, t2 state.Trie) error { - i1 := trie.NewIterator(t1.NodeIterator(nil)) - i2 := trie.NewIterator(t2.NodeIterator(nil)) + trieIt1, err := t1.NodeIterator(nil) + if err != nil { + return err + } + trieIt2, err := t2.NodeIterator(nil) + if err != nil { + return err + } + i1 := trie.NewIterator(trieIt1) + i2 := trie.NewIterator(trieIt2) for i1.Next() && i2.Next() { if !bytes.Equal(i1.Key, i2.Key) { spew.Dump(i2) diff --git a/light/txpool.go b/light/txpool.go index 8d2a189c0..b792d70b1 100644 --- a/light/txpool.go +++ b/light/txpool.go @@ -494,29 +494,29 @@ func (pool *TxPool) GetTransactions() (txs types.Transactions, err error) { // Content retrieves the data content of the transaction pool, returning all the // pending as well as queued transactions, grouped by account and nonce. -func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { +func (pool *TxPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { pool.mu.RLock() defer pool.mu.RUnlock() // Retrieve all the pending transactions and sort by account and by nonce - pending := make(map[common.Address]types.Transactions) + pending := make(map[common.Address][]*types.Transaction) for _, tx := range pool.pending { account, _ := types.Sender(pool.signer, tx) pending[account] = append(pending[account], tx) } // There are no queued transactions in a light pool, just return an empty map - queued := make(map[common.Address]types.Transactions) + queued := make(map[common.Address][]*types.Transaction) return pending, queued } // ContentFrom retrieves the data content of the transaction pool, returning the // pending as well as queued transactions of this address, grouped by nonce. -func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { +func (pool *TxPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { pool.mu.RLock() defer pool.mu.RUnlock() // Retrieve the pending transactions and sort by nonce - var pending types.Transactions + var pending []*types.Transaction for _, tx := range pool.pending { account, _ := types.Sender(pool.signer, tx) if account != addr { @@ -525,7 +525,7 @@ func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types. pending = append(pending, tx) } // There are no queued transactions in a light pool, just return an empty map - return pending, types.Transactions{} + return pending, []*types.Transaction{} } // RemoveTransactions removes all given transactions from the pool. diff --git a/light/txpool_test.go b/light/txpool_test.go index 1181e3889..1eec7bc42 100644 --- a/light/txpool_test.go +++ b/light/txpool_test.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) type testTxRelay struct { @@ -96,7 +97,7 @@ func TestTxPool(t *testing.T) { panic(err) } - gspec.MustCommit(ldb) + gspec.MustCommit(ldb, trie.NewDatabase(ldb, trie.HashDefaults)) odr := &testOdr{sdb: sdb, ldb: ldb, serverState: blockchain.StateCache(), indexerConfig: TestClientIndexerConfig} relay := &testTxRelay{ send: make(chan int, 1), diff --git a/log/format.go b/log/format.go index fb476b73e..1adf79c17 100644 --- a/log/format.go +++ b/log/format.go @@ -32,20 +32,24 @@ var locationTrims = []string{ // PrintOrigins sets or unsets log location (file:line) printing for terminal // format output. func PrintOrigins(print bool) { + locationEnabled.Store(print) if print { - atomic.StoreUint32(&locationEnabled, 1) - } else { - atomic.StoreUint32(&locationEnabled, 0) + stackEnabled.Store(true) } } +// stackEnabled is an atomic flag controlling whether the log handler needs +// to store the callsite stack. This is needed in case any handler wants to +// print locations (locationEnabled), use vmodule, or print full stacks (BacktraceAt). +var stackEnabled atomic.Bool + // locationEnabled is an atomic flag controlling whether the terminal formatter // should append the log locations too when printing entries. -var locationEnabled uint32 +var locationEnabled atomic.Bool // locationLength is the maxmimum path length encountered, which all logs are // padded to to aid in alignment. -var locationLength uint32 +var locationLength atomic.Uint32 // fieldPadding is a global map with maximum field value lengths seen until now // to allow padding log contexts in a bit smarter way. @@ -109,17 +113,17 @@ func TerminalFormat(usecolor bool) Format { b := &bytes.Buffer{} lvl := r.Lvl.AlignedString() - if atomic.LoadUint32(&locationEnabled) != 0 { + if locationEnabled.Load() { // Log origin printing was requested, format the location path and line number location := fmt.Sprintf("%+v", r.Call) for _, prefix := range locationTrims { location = strings.TrimPrefix(location, prefix) } // Maintain the maximum location length for fancyer alignment - align := int(atomic.LoadUint32(&locationLength)) + align := int(locationLength.Load()) if align < len(location) { align = len(location) - atomic.StoreUint32(&locationLength, uint32(align)) + locationLength.Store(uint32(align)) } padding := strings.Repeat(" ", align-len(location)) diff --git a/log/handler_glog.go b/log/handler_glog.go index b5186d4b2..afca0808b 100644 --- a/log/handler_glog.go +++ b/log/handler_glog.go @@ -39,9 +39,9 @@ var errTraceSyntax = errors.New("expect file.go:234") type GlogHandler struct { origin Handler // The origin handler this wraps - level uint32 // Current log level, atomically accessible - override uint32 // Flag whether overrides are used, atomically accessible - backtrace uint32 // Flag whether backtrace location is set + level atomic.Uint32 // Current log level, atomically accessible + override atomic.Bool // Flag whether overrides are used, atomically accessible + backtrace atomic.Bool // Flag whether backtrace location is set patterns []pattern // Current list of patterns to override with siteCache map[uintptr]Lvl // Cache of callsite pattern evaluations @@ -72,7 +72,7 @@ type pattern struct { // Verbosity sets the glog verbosity ceiling. The verbosity of individual packages // and source files can be raised using Vmodule. func (h *GlogHandler) Verbosity(level Lvl) { - atomic.StoreUint32(&h.level, uint32(level)) + h.level.Store(uint32(level)) } // Vmodule sets the glog verbosity pattern. @@ -138,8 +138,11 @@ func (h *GlogHandler) Vmodule(ruleset string) error { h.patterns = filter h.siteCache = make(map[uintptr]Lvl) - atomic.StoreUint32(&h.override, uint32(len(filter))) - + h.override.Store(len(filter) != 0) + // Enable location storage (globally) + if len(h.patterns) > 0 { + stackEnabled.Store(true) + } return nil } @@ -171,8 +174,9 @@ func (h *GlogHandler) BacktraceAt(location string) error { defer h.lock.Unlock() h.location = location - atomic.StoreUint32(&h.backtrace, uint32(len(location))) - + h.backtrace.Store(len(location) > 0) + // Enable location storage (globally) + stackEnabled.Store(true) return nil } @@ -180,7 +184,7 @@ func (h *GlogHandler) BacktraceAt(location string) error { // and backtrace filters, finally emitting it if either allow it through. func (h *GlogHandler) Log(r *Record) error { // If backtracing is requested, check whether this is the callsite - if atomic.LoadUint32(&h.backtrace) > 0 { + if h.backtrace.Load() { // Everything below here is slow. Although we could cache the call sites the // same way as for vmodule, backtracing is so rare it's not worth the extra // complexity. @@ -198,11 +202,11 @@ func (h *GlogHandler) Log(r *Record) error { } } // If the global log level allows, fast track logging - if atomic.LoadUint32(&h.level) >= uint32(r.Lvl) { + if h.level.Load() >= uint32(r.Lvl) { return h.origin.Log(r) } // If no local overrides are present, fast track skipping - if atomic.LoadUint32(&h.override) == 0 { + if !h.override.Load() { return nil } // Check callsite cache for previously calculated log levels diff --git a/log/logger.go b/log/logger.go index 4e471a22d..42e7e375d 100644 --- a/log/logger.go +++ b/log/logger.go @@ -177,19 +177,22 @@ type logger struct { } func (l *logger) write(msg string, lvl Lvl, ctx []interface{}, skip int) { - l.h.Log(&Record{ + record := &Record{ Time: time.Now(), Lvl: lvl, Msg: msg, Ctx: newContext(l.ctx, ctx), - Call: stack.Caller(skip), KeyNames: RecordKeyNames{ Time: timeKey, Msg: msgKey, Lvl: lvlKey, Ctx: ctxKey, }, - }) + } + if stackEnabled.Load() { + record.Call = stack.Caller(skip) + } + l.h.Log(record) } func (l *logger) New(ctx ...interface{}) Logger { diff --git a/log/logger_test.go b/log/logger_test.go new file mode 100644 index 000000000..2e59b3fdf --- /dev/null +++ b/log/logger_test.go @@ -0,0 +1,67 @@ +package log + +import ( + "bytes" + "os" + "strings" + "testing" +) + +// TestLoggingWithTrace checks that if BackTraceAt is set, then the +// gloghandler is capable of spitting out a stacktrace +func TestLoggingWithTrace(t *testing.T) { + defer stackEnabled.Store(stackEnabled.Load()) + out := new(bytes.Buffer) + logger := New() + { + glog := NewGlogHandler(StreamHandler(out, TerminalFormat(false))) + glog.Verbosity(LvlTrace) + if err := glog.BacktraceAt("logger_test.go:24"); err != nil { + t.Fatal(err) + } + logger.SetHandler(glog) + } + logger.Trace("a message", "foo", "bar") // Will be bumped to INFO + have := out.String() + if !strings.HasPrefix(have, "INFO") { + t.Fatalf("backtraceat should bump level to info: %s", have) + } + // The timestamp is locale-dependent, so we want to trim that off + // "INFO [01-01|00:00:00.000] a messag ..." -> "a messag..." + have = strings.Split(have, "]")[1] + wantPrefix := " a message\n\ngoroutine" + if !strings.HasPrefix(have, wantPrefix) { + t.Errorf("\nhave: %q\nwant: %q\n", have, wantPrefix) + } +} + +// TestLoggingWithVmodule checks that vmodule works. +func TestLoggingWithVmodule(t *testing.T) { + defer stackEnabled.Store(stackEnabled.Load()) + out := new(bytes.Buffer) + logger := New() + { + glog := NewGlogHandler(StreamHandler(out, TerminalFormat(false))) + glog.Verbosity(LvlCrit) + logger.SetHandler(glog) + logger.Warn("This should not be seen", "ignored", "true") + glog.Vmodule("logger_test.go=5") + } + logger.Trace("a message", "foo", "bar") + have := out.String() + // The timestamp is locale-dependent, so we want to trim that off + // "INFO [01-01|00:00:00.000] a messag ..." -> "a messag..." + have = strings.Split(have, "]")[1] + want := " a message foo=bar\n" + if have != want { + t.Errorf("\nhave: %q\nwant: %q\n", have, want) + } +} + +func BenchmarkTraceLogging(b *testing.B) { + Root().SetHandler(LvlFilterHandler(LvlInfo, StreamHandler(os.Stderr, TerminalFormat(true)))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Trace("a message", "v", i) + } +} diff --git a/metrics/exp/exp.go b/metrics/exp/exp.go index 2b04eeab2..9e850f96b 100644 --- a/metrics/exp/exp.go +++ b/metrics/exp/exp.go @@ -95,6 +95,20 @@ func (exp *exp) getFloat(name string) *expvar.Float { return v } +func (exp *exp) getInfo(name string) *expvar.String { + var v *expvar.String + exp.expvarLock.Lock() + p := expvar.Get(name) + if p != nil { + v = p.(*expvar.String) + } else { + v = new(expvar.String) + expvar.Publish(name, v) + } + exp.expvarLock.Unlock() + return v +} + func (exp *exp) publishCounter(name string, metric metrics.Counter) { v := exp.getInt(name) v.Set(metric.Count()) @@ -113,6 +127,10 @@ func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64) { exp.getFloat(name).Set(metric.Value()) } +func (exp *exp) publishGaugeInfo(name string, metric metrics.GaugeInfo) { + exp.getInfo(name).Set(metric.Value().String()) +} + func (exp *exp) publishHistogram(name string, metric metrics.Histogram) { h := metric.Snapshot() ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) @@ -178,6 +196,8 @@ func (exp *exp) syncToExpvar() { exp.publishGauge(name, i) case metrics.GaugeFloat64: exp.publishGaugeFloat64(name, i) + case metrics.GaugeInfo: + exp.publishGaugeInfo(name, i) case metrics.Histogram: exp.publishHistogram(name, i) case metrics.Meter: diff --git a/metrics/gauge_info.go b/metrics/gauge_info.go new file mode 100644 index 000000000..f1b207593 --- /dev/null +++ b/metrics/gauge_info.go @@ -0,0 +1,144 @@ +package metrics + +import ( + "encoding/json" + "sync" +) + +// GaugeInfos hold a GaugeInfoValue value that can be set arbitrarily. +type GaugeInfo interface { + Snapshot() GaugeInfo + Update(GaugeInfoValue) + Value() GaugeInfoValue +} + +// GaugeInfoValue is a mappng of (string) keys to (string) values +type GaugeInfoValue map[string]string + +func (val GaugeInfoValue) String() string { + data, _ := json.Marshal(val) + return string(data) +} + +// GetOrRegisterGaugeInfo returns an existing GaugeInfo or constructs and registers a +// new StandardGaugeInfo. +func GetOrRegisterGaugeInfo(name string, r Registry) GaugeInfo { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGaugeInfo()).(GaugeInfo) +} + +// NewGaugeInfo constructs a new StandardGaugeInfo. +func NewGaugeInfo() GaugeInfo { + if !Enabled { + return NilGaugeInfo{} + } + return &StandardGaugeInfo{ + value: GaugeInfoValue{}, + } +} + +// NewRegisteredGaugeInfo constructs and registers a new StandardGaugeInfo. +func NewRegisteredGaugeInfo(name string, r Registry) GaugeInfo { + c := NewGaugeInfo() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGaugeInfo(f func() GaugeInfoValue) GaugeInfo { + if !Enabled { + return NilGaugeInfo{} + } + return &FunctionalGaugeInfo{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGaugeInfo(name string, r Registry, f func() GaugeInfoValue) GaugeInfo { + c := NewFunctionalGaugeInfo(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeInfoSnapshot is a read-only copy of another GaugeInfo. +type GaugeInfoSnapshot GaugeInfoValue + +// Snapshot returns the snapshot. +func (g GaugeInfoSnapshot) Snapshot() GaugeInfo { return g } + +// Update panics. +func (GaugeInfoSnapshot) Update(GaugeInfoValue) { + panic("Update called on a GaugeInfoSnapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeInfoSnapshot) Value() GaugeInfoValue { return GaugeInfoValue(g) } + +// NilGauge is a no-op Gauge. +type NilGaugeInfo struct{} + +// Snapshot is a no-op. +func (NilGaugeInfo) Snapshot() GaugeInfo { return NilGaugeInfo{} } + +// Update is a no-op. +func (NilGaugeInfo) Update(v GaugeInfoValue) {} + +// Value is a no-op. +func (NilGaugeInfo) Value() GaugeInfoValue { return GaugeInfoValue{} } + +// StandardGaugeInfo is the standard implementation of a GaugeInfo and uses +// sync.Mutex to manage a single string value. +type StandardGaugeInfo struct { + mutex sync.Mutex + value GaugeInfoValue +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGaugeInfo) Snapshot() GaugeInfo { + return GaugeInfoSnapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGaugeInfo) Update(v GaugeInfoValue) { + g.mutex.Lock() + defer g.mutex.Unlock() + g.value = v +} + +// Value returns the gauge's current value. +func (g *StandardGaugeInfo) Value() GaugeInfoValue { + g.mutex.Lock() + defer g.mutex.Unlock() + return g.value +} + +// FunctionalGaugeInfo returns value from given function +type FunctionalGaugeInfo struct { + value func() GaugeInfoValue +} + +// Value returns the gauge's current value. +func (g FunctionalGaugeInfo) Value() GaugeInfoValue { + return g.value() +} + +// Value returns the gauge's current value in JSON string format +func (g FunctionalGaugeInfo) ValueJsonString() string { + data, _ := json.Marshal(g.value()) + return string(data) +} + +// Snapshot returns the snapshot. +func (g FunctionalGaugeInfo) Snapshot() GaugeInfo { return GaugeInfoSnapshot(g.Value()) } + +// Update panics. +func (FunctionalGaugeInfo) Update(GaugeInfoValue) { + panic("Update called on a FunctionalGaugeInfo") +} diff --git a/metrics/gauge_info_test.go b/metrics/gauge_info_test.go new file mode 100644 index 000000000..4227a6a85 --- /dev/null +++ b/metrics/gauge_info_test.go @@ -0,0 +1,75 @@ +package metrics + +import ( + "strconv" + "testing" +) + +func TestGaugeInfoJsonString(t *testing.T) { + g := NewGaugeInfo() + g.Update(GaugeInfoValue{ + "chain_id": "5", + "anotherKey": "any_string_value", + "third_key": "anything", + }, + ) + want := `{"anotherKey":"any_string_value","chain_id":"5","third_key":"anything"}` + if have := g.Value().String(); have != want { + t.Errorf("\nhave: %v\nwant: %v\n", have, want) + } +} + +func TestGaugeInfoSnapshot(t *testing.T) { + g := NewGaugeInfo() + g.Update(GaugeInfoValue{"value": "original"}) + snapshot := g.Snapshot() // Snapshot @chainid 5 + g.Update(GaugeInfoValue{"value": "updated"}) + // The 'g' should be updated + if have, want := g.Value().String(), `{"value":"updated"}`; have != want { + t.Errorf("\nhave: %v\nwant: %v\n", have, want) + } + // Snapshot should be unupdated + if have, want := snapshot.Value().String(), `{"value":"original"}`; have != want { + t.Errorf("\nhave: %v\nwant: %v\n", have, want) + } +} + +func TestGetOrRegisterGaugeInfo(t *testing.T) { + r := NewRegistry() + NewRegisteredGaugeInfo("foo", r).Update( + GaugeInfoValue{"chain_id": "5"}) + g := GetOrRegisterGaugeInfo("foo", r) + if have, want := g.Value().String(), `{"chain_id":"5"}`; have != want { + t.Errorf("have\n%v\nwant\n%v\n", have, want) + } +} + +func TestFunctionalGaugeInfo(t *testing.T) { + info := GaugeInfoValue{"chain_id": "0"} + counter := 1 + // A "functional" gauge invokes the method to obtain the value + fg := NewFunctionalGaugeInfo(func() GaugeInfoValue { + info["chain_id"] = strconv.Itoa(counter) + counter++ + return info + }) + fg.Value() + fg.Value() + if have, want := info["chain_id"], "2"; have != want { + t.Errorf("have %v want %v", have, want) + } +} + +func TestGetOrRegisterFunctionalGaugeInfo(t *testing.T) { + r := NewRegistry() + NewRegisteredFunctionalGaugeInfo("foo", r, func() GaugeInfoValue { + return GaugeInfoValue{ + "chain_id": "5", + } + }) + want := `{"chain_id":"5"}` + have := GetOrRegisterGaugeInfo("foo", r).Value().String() + if have != want { + t.Errorf("have\n%v\nwant\n%v\n", have, want) + } +} diff --git a/metrics/graphite.go b/metrics/graphite.go index 29f72b0c4..4e3dd3b3b 100644 --- a/metrics/graphite.go +++ b/metrics/graphite.go @@ -73,6 +73,8 @@ func graphite(c *GraphiteConfig) error { fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) case GaugeFloat64: fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) + case GaugeInfo: + fmt.Fprintf(w, "%s.%s.value %s %d\n", c.Prefix, name, metric.Value().String(), now) case Histogram: h := metric.Snapshot() ps := h.Percentiles(c.Percentiles) diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go index 5dfbbab3e..9354f1a63 100644 --- a/metrics/influxdb/influxdb.go +++ b/metrics/influxdb/influxdb.go @@ -32,6 +32,13 @@ func readMeter(namespace, name string, i interface{}) (string, map[string]interf "value": metric.Snapshot().Value(), } return measurement, fields + case metrics.GaugeInfo: + ms := metric.Snapshot() + measurement := fmt.Sprintf("%s%s.gauge", namespace, name) + fields := map[string]interface{}{ + "value": ms.Value().String(), + } + return measurement, fields case metrics.Histogram: ms := metric.Snapshot() if ms.Count() <= 0 { diff --git a/metrics/influxdb/influxdb_test.go b/metrics/influxdb/influxdb_test.go new file mode 100644 index 000000000..beeb36a53 --- /dev/null +++ b/metrics/influxdb/influxdb_test.go @@ -0,0 +1,114 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package influxdb + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/metrics/internal" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" +) + +func TestMain(m *testing.M) { + metrics.Enabled = true + os.Exit(m.Run()) +} + +func TestExampleV1(t *testing.T) { + r := internal.ExampleMetrics() + var have, want string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + haveB, _ := io.ReadAll(r.Body) + have = string(haveB) + r.Body.Close() + })) + defer ts.Close() + u, _ := url.Parse(ts.URL) + rep := &reporter{ + reg: r, + url: *u, + namespace: "goth.", + } + if err := rep.makeClient(); err != nil { + t.Fatal(err) + } + if err := rep.send(978307200); err != nil { + t.Fatal(err) + } + if wantB, err := os.ReadFile("./testdata/influxdbv1.want"); err != nil { + t.Fatal(err) + } else { + want = string(wantB) + } + if have != want { + t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want) + t.Logf("have vs want:\n%v", findFirstDiffPos(have, want)) + } +} + +func TestExampleV2(t *testing.T) { + r := internal.ExampleMetrics() + var have, want string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + haveB, _ := io.ReadAll(r.Body) + have = string(haveB) + r.Body.Close() + })) + defer ts.Close() + + rep := &v2Reporter{ + reg: r, + endpoint: ts.URL, + namespace: "goth.", + } + rep.client = influxdb2.NewClient(rep.endpoint, rep.token) + defer rep.client.Close() + rep.write = rep.client.WriteAPI(rep.organization, rep.bucket) + + rep.send(978307200) + + if wantB, err := os.ReadFile("./testdata/influxdbv2.want"); err != nil { + t.Fatal(err) + } else { + want = string(wantB) + } + if have != want { + t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want) + t.Logf("have vs want:\n %v", findFirstDiffPos(have, want)) + } +} + +func findFirstDiffPos(a, b string) string { + yy := strings.Split(b, "\n") + for i, x := range strings.Split(a, "\n") { + if i >= len(yy) { + return fmt.Sprintf("have:%d: %s\nwant:%d: ", i, x, i) + } + if y := yy[i]; x != y { + return fmt.Sprintf("have:%d: %s\nwant:%d: %s", i, x, i, y) + } + } + return "" +} diff --git a/metrics/influxdb/influxdbv1.go b/metrics/influxdb/influxdbv1.go index f65d30ef9..ac5828080 100644 --- a/metrics/influxdb/influxdbv1.go +++ b/metrics/influxdb/influxdbv1.go @@ -79,7 +79,7 @@ func InfluxDBWithTagsOnce(r metrics.Registry, url, database, username, password, return fmt.Errorf("unable to make InfluxDB client. err: %v", err) } - if err := rep.send(); err != nil { + if err := rep.send(0); err != nil { return fmt.Errorf("unable to send to InfluxDB. err: %v", err) } @@ -107,7 +107,7 @@ func (r *reporter) run() { for { select { case <-intervalTicker.C: - if err := r.send(); err != nil { + if err := r.send(0); err != nil { log.Warn("Unable to send to InfluxDB", "err", err) } case <-pingTicker.C: @@ -123,7 +123,9 @@ func (r *reporter) run() { } } -func (r *reporter) send() error { +// send sends the measurements. If provided tstamp is >0, it is used. Otherwise, +// a 'fresh' timestamp is used. +func (r *reporter) send(tstamp int64) error { bps, err := client.NewBatchPoints( client.BatchPointsConfig{ Database: r.database, @@ -132,7 +134,12 @@ func (r *reporter) send() error { return err } r.reg.Each(func(name string, i interface{}) { - now := time.Now() + var now time.Time + if tstamp <= 0 { + now = time.Now() + } else { + now = time.Unix(tstamp, 0) + } measurement, fields := readMeter(r.namespace, name, i) if fields == nil { return diff --git a/metrics/influxdb/influxdbv2.go b/metrics/influxdb/influxdbv2.go index 7984898f3..0be5137d5 100644 --- a/metrics/influxdb/influxdbv2.go +++ b/metrics/influxdb/influxdbv2.go @@ -64,7 +64,7 @@ func (r *v2Reporter) run() { for { select { case <-intervalTicker.C: - r.send() + r.send(0) case <-pingTicker.C: _, err := r.client.Health(context.Background()) if err != nil { @@ -74,9 +74,16 @@ func (r *v2Reporter) run() { } } -func (r *v2Reporter) send() { +// send sends the measurements. If provided tstamp is >0, it is used. Otherwise, +// a 'fresh' timestamp is used. +func (r *v2Reporter) send(tstamp int64) { r.reg.Each(func(name string, i interface{}) { - now := time.Now() + var now time.Time + if tstamp <= 0 { + now = time.Now() + } else { + now = time.Unix(tstamp, 0) + } measurement, fields := readMeter(r.namespace, name, i) if fields == nil { return diff --git a/metrics/influxdb/testdata/influxdbv1.want b/metrics/influxdb/testdata/influxdbv1.want new file mode 100644 index 000000000..5efffb959 --- /dev/null +++ b/metrics/influxdb/testdata/influxdbv1.want @@ -0,0 +1,9 @@ +goth.test/counter.count value=12345 978307200000000000 +goth.test/counter_float64.count value=54321.98 978307200000000000 +goth.test/gauge.gauge value=23456i 978307200000000000 +goth.test/gauge_float64.gauge value=34567.89 978307200000000000 +goth.test/gauge_info.gauge value="{\"arch\":\"amd64\",\"commit\":\"7caa2d8163ae3132c1c2d6978c76610caee2d949\",\"os\":\"linux\",\"protocol_versions\":\"64 65 66\",\"version\":\"1.10.18-unstable\"}" 978307200000000000 +goth.test/histogram.histogram count=3i,max=3i,mean=2,min=1i,p25=1,p50=2,p75=3,p95=3,p99=3,p999=3,p9999=3,stddev=0.816496580927726,variance=0.6666666666666666 978307200000000000 +goth.test/meter.meter count=0i,m1=0,m15=0,m5=0,mean=0 978307200000000000 +goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12000000i,p95=120000000i,p99=120000000i 978307200000000000 +goth.test/timer.timer count=6i,m1=0,m15=0,m5=0,max=120000000i,mean=38333333.333333336,meanrate=0,min=20000000i,p50=22500000,p75=48000000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000,stddev=36545253.529775314,variance=1335555555555555.2 978307200000000000 diff --git a/metrics/influxdb/testdata/influxdbv2.want b/metrics/influxdb/testdata/influxdbv2.want new file mode 100644 index 000000000..5efffb959 --- /dev/null +++ b/metrics/influxdb/testdata/influxdbv2.want @@ -0,0 +1,9 @@ +goth.test/counter.count value=12345 978307200000000000 +goth.test/counter_float64.count value=54321.98 978307200000000000 +goth.test/gauge.gauge value=23456i 978307200000000000 +goth.test/gauge_float64.gauge value=34567.89 978307200000000000 +goth.test/gauge_info.gauge value="{\"arch\":\"amd64\",\"commit\":\"7caa2d8163ae3132c1c2d6978c76610caee2d949\",\"os\":\"linux\",\"protocol_versions\":\"64 65 66\",\"version\":\"1.10.18-unstable\"}" 978307200000000000 +goth.test/histogram.histogram count=3i,max=3i,mean=2,min=1i,p25=1,p50=2,p75=3,p95=3,p99=3,p999=3,p9999=3,stddev=0.816496580927726,variance=0.6666666666666666 978307200000000000 +goth.test/meter.meter count=0i,m1=0,m15=0,m5=0,mean=0 978307200000000000 +goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12000000i,p95=120000000i,p99=120000000i 978307200000000000 +goth.test/timer.timer count=6i,m1=0,m15=0,m5=0,max=120000000i,mean=38333333.333333336,meanrate=0,min=20000000i,p50=22500000,p75=48000000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000,stddev=36545253.529775314,variance=1335555555555555.2 978307200000000000 diff --git a/metrics/internal/sampledata.go b/metrics/internal/sampledata.go new file mode 100644 index 000000000..9ace06957 --- /dev/null +++ b/metrics/internal/sampledata.go @@ -0,0 +1,64 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package internal + +import ( + "time" + + "github.com/ethereum/go-ethereum/metrics" +) + +// ExampleMetrics returns an ordered registry populated with a sample of metrics. +func ExampleMetrics() metrics.Registry { + var registry = metrics.NewOrderedRegistry() + + metrics.NewRegisteredCounterFloat64("test/counter", registry).Inc(12345) + metrics.NewRegisteredCounterFloat64("test/counter_float64", registry).Inc(54321.98) + metrics.NewRegisteredGauge("test/gauge", registry).Update(23456) + metrics.NewRegisteredGaugeFloat64("test/gauge_float64", registry).Update(34567.89) + metrics.NewRegisteredGaugeInfo("test/gauge_info", registry).Update( + metrics.GaugeInfoValue{ + "version": "1.10.18-unstable", + "arch": "amd64", + "os": "linux", + "commit": "7caa2d8163ae3132c1c2d6978c76610caee2d949", + "protocol_versions": "64 65 66", + }) + metrics.NewRegisteredHistogram("test/histogram", registry, metrics.NewSampleSnapshot(3, []int64{1, 2, 3})) + registry.Register("test/meter", metrics.NewInactiveMeter()) + { + timer := metrics.NewRegisteredResettingTimer("test/resetting_timer", registry) + timer.Update(10 * time.Millisecond) + timer.Update(11 * time.Millisecond) + timer.Update(12 * time.Millisecond) + timer.Update(120 * time.Millisecond) + timer.Update(13 * time.Millisecond) + timer.Update(14 * time.Millisecond) + } + { + timer := metrics.NewRegisteredTimer("test/timer", registry) + timer.Update(20 * time.Millisecond) + timer.Update(21 * time.Millisecond) + timer.Update(22 * time.Millisecond) + timer.Update(120 * time.Millisecond) + timer.Update(23 * time.Millisecond) + timer.Update(24 * time.Millisecond) + timer.Stop() + } + registry.Register("test/empty_resetting_timer", metrics.NewResettingTimer().Snapshot()) + return registry +} diff --git a/metrics/librato/librato.go b/metrics/librato/librato.go index 3d45f4c7b..fa9859599 100644 --- a/metrics/librato/librato.go +++ b/metrics/librato/librato.go @@ -126,6 +126,10 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B measurement[Name] = name measurement[Value] = m.Value() snapshot.Gauges = append(snapshot.Gauges, measurement) + case metrics.GaugeInfo: + measurement[Name] = name + measurement[Value] = m.Value() + snapshot.Gauges = append(snapshot.Gauges, measurement) case metrics.Histogram: if m.Count() > 0 { gauges := make([]Measurement, histogramGaugeCount) diff --git a/metrics/log.go b/metrics/log.go index d1ce627a8..d71a1c3d9 100644 --- a/metrics/log.go +++ b/metrics/log.go @@ -33,6 +33,9 @@ func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { case GaugeFloat64: l.Printf("gauge %s\n", name) l.Printf(" value: %f\n", metric.Value()) + case GaugeInfo: + l.Printf("gauge %s\n", name) + l.Printf(" value: %s\n", metric.Value()) case Healthcheck: metric.Check() l.Printf("healthcheck %s\n", name) diff --git a/metrics/meter.go b/metrics/meter.go index e8564d6a5..8a89dc427 100644 --- a/metrics/meter.go +++ b/metrics/meter.go @@ -58,6 +58,16 @@ func NewMeter() Meter { return m } +// NewInactiveMeter returns a meter but does not start any goroutines. This +// method is mainly intended for testing. +func NewInactiveMeter() Meter { + if !Enabled { + return NilMeter{} + } + m := newStandardMeter() + return m +} + // NewMeterForced constructs a new StandardMeter and launches a goroutine no matter // the global switch is enabled or not. // Be sure to call Stop() once the meter is of no use to allow for garbage collection. diff --git a/metrics/opentsdb.go b/metrics/opentsdb.go index c9fd2e75d..4d2e20923 100644 --- a/metrics/opentsdb.go +++ b/metrics/opentsdb.go @@ -3,6 +3,7 @@ package metrics import ( "bufio" "fmt" + "io" "log" "net" "os" @@ -57,16 +58,10 @@ func getShortHostname() string { return shortHostName } -func openTSDB(c *OpenTSDBConfig) error { - shortHostname := getShortHostname() - now := time.Now().Unix() +// writeRegistry writes the registry-metrics on the opentsb format. +func (c *OpenTSDBConfig) writeRegistry(w io.Writer, now int64, shortHostname string) { du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { switch metric := i.(type) { case Counter: @@ -77,6 +72,8 @@ func openTSDB(c *OpenTSDBConfig) error { fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) case GaugeFloat64: fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case GaugeInfo: + fmt.Fprintf(w, "put %s.%s.value %d %s host=%s\n", c.Prefix, name, now, metric.Value().String(), shortHostname) case Histogram: h := metric.Snapshot() ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) @@ -115,7 +112,17 @@ func openTSDB(c *OpenTSDBConfig) error { fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) } - w.Flush() }) +} + +func openTSDB(c *OpenTSDBConfig) error { + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.writeRegistry(w, time.Now().Unix(), getShortHostname()) + w.Flush() return nil } diff --git a/metrics/opentsdb_test.go b/metrics/opentsdb_test.go index c43728960..c02b98af0 100644 --- a/metrics/opentsdb_test.go +++ b/metrics/opentsdb_test.go @@ -2,6 +2,9 @@ package metrics import ( "net" + "os" + "strings" + "testing" "time" ) @@ -19,3 +22,30 @@ func ExampleOpenTSDBWithConfig() { DurationUnit: time.Millisecond, }) } + +func TestExampleOpenTSB(t *testing.T) { + r := NewOrderedRegistry() + NewRegisteredGaugeInfo("foo", r).Update(GaugeInfoValue{"chain_id": "5"}) + NewRegisteredGaugeFloat64("pi", r).Update(3.14) + NewRegisteredCounter("months", r).Inc(12) + NewRegisteredCounterFloat64("tau", r).Inc(1.57) + NewRegisteredMeter("elite", r).Mark(1337) + NewRegisteredTimer("second", r).Update(time.Second) + NewRegisteredCounterFloat64("tau", r).Inc(1.57) + NewRegisteredCounterFloat64("tau", r).Inc(1.57) + + w := new(strings.Builder) + (&OpenTSDBConfig{ + Registry: r, + DurationUnit: time.Millisecond, + Prefix: "pre", + }).writeRegistry(w, 978307200, "hal9000") + + wantB, err := os.ReadFile("./testdata/opentsb.want") + if err != nil { + t.Fatal(err) + } + if have, want := w.String(), string(wantB); have != want { + t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want) + } +} diff --git a/metrics/prometheus/collector.go b/metrics/prometheus/collector.go index 2bd9bf22c..8624311c4 100644 --- a/metrics/prometheus/collector.go +++ b/metrics/prometheus/collector.go @@ -19,6 +19,7 @@ package prometheus import ( "bytes" "fmt" + "sort" "strconv" "strings" @@ -46,6 +47,34 @@ func newCollector() *collector { } } +// Add adds the metric i to the collector. This method returns an error if the +// metric type is not supported/known. +func (c *collector) Add(name string, i any) error { + switch m := i.(type) { + case metrics.Counter: + c.addCounter(name, m.Snapshot()) + case metrics.CounterFloat64: + c.addCounterFloat64(name, m.Snapshot()) + case metrics.Gauge: + c.addGauge(name, m.Snapshot()) + case metrics.GaugeFloat64: + c.addGaugeFloat64(name, m.Snapshot()) + case metrics.GaugeInfo: + c.addGaugeInfo(name, m.Snapshot()) + case metrics.Histogram: + c.addHistogram(name, m.Snapshot()) + case metrics.Meter: + c.addMeter(name, m.Snapshot()) + case metrics.Timer: + c.addTimer(name, m.Snapshot()) + case metrics.ResettingTimer: + c.addResettingTimer(name, m.Snapshot()) + default: + return fmt.Errorf("unknown prometheus metric type %T", i) + } + return nil +} + func (c *collector) addCounter(name string, m metrics.Counter) { c.writeGaugeCounter(name, m.Count()) } @@ -62,6 +91,10 @@ func (c *collector) addGaugeFloat64(name string, m metrics.GaugeFloat64) { c.writeGaugeCounter(name, m.Value()) } +func (c *collector) addGaugeInfo(name string, m metrics.GaugeInfo) { + c.writeGaugeInfo(name, m.Value()) +} + func (c *collector) addHistogram(name string, m metrics.Histogram) { pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999} ps := m.Percentiles(pv) @@ -102,6 +135,19 @@ func (c *collector) addResettingTimer(name string, m metrics.ResettingTimer) { c.buff.WriteRune('\n') } +func (c *collector) writeGaugeInfo(name string, value metrics.GaugeInfoValue) { + name = mutateKey(name) + c.buff.WriteString(fmt.Sprintf(typeGaugeTpl, name)) + c.buff.WriteString(name) + c.buff.WriteString(" ") + var kvs []string + for k, v := range value { + kvs = append(kvs, fmt.Sprintf("%v=%q", k, v)) + } + sort.Strings(kvs) + c.buff.WriteString(fmt.Sprintf("{%v} 1\n\n", strings.Join(kvs, ", "))) +} + func (c *collector) writeGaugeCounter(name string, value interface{}) { name = mutateKey(name) c.buff.WriteString(fmt.Sprintf(typeGaugeTpl, name)) diff --git a/metrics/prometheus/collector_test.go b/metrics/prometheus/collector_test.go index ff87c8e76..3d7903d4a 100644 --- a/metrics/prometheus/collector_test.go +++ b/metrics/prometheus/collector_test.go @@ -1,11 +1,29 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + package prometheus import ( + "fmt" "os" + "strings" "testing" - "time" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/metrics/internal" ) func TestMain(m *testing.M) { @@ -14,104 +32,34 @@ func TestMain(m *testing.M) { } func TestCollector(t *testing.T) { - c := newCollector() - - counter := metrics.NewCounter() - counter.Inc(12345) - c.addCounter("test/counter", counter) - - counterfloat64 := metrics.NewCounterFloat64() - counterfloat64.Inc(54321.98) - c.addCounterFloat64("test/counter_float64", counterfloat64) - - gauge := metrics.NewGauge() - gauge.Update(23456) - c.addGauge("test/gauge", gauge) - - gaugeFloat64 := metrics.NewGaugeFloat64() - gaugeFloat64.Update(34567.89) - c.addGaugeFloat64("test/gauge_float64", gaugeFloat64) - - histogram := metrics.NewHistogram(&metrics.NilSample{}) - c.addHistogram("test/histogram", histogram) - - meter := metrics.NewMeter() - defer meter.Stop() - meter.Mark(9999999) - c.addMeter("test/meter", meter) - - timer := metrics.NewTimer() - defer timer.Stop() - timer.Update(20 * time.Millisecond) - timer.Update(21 * time.Millisecond) - timer.Update(22 * time.Millisecond) - timer.Update(120 * time.Millisecond) - timer.Update(23 * time.Millisecond) - timer.Update(24 * time.Millisecond) - c.addTimer("test/timer", timer) - - resettingTimer := metrics.NewResettingTimer() - resettingTimer.Update(10 * time.Millisecond) - resettingTimer.Update(11 * time.Millisecond) - resettingTimer.Update(12 * time.Millisecond) - resettingTimer.Update(120 * time.Millisecond) - resettingTimer.Update(13 * time.Millisecond) - resettingTimer.Update(14 * time.Millisecond) - c.addResettingTimer("test/resetting_timer", resettingTimer.Snapshot()) - - emptyResettingTimer := metrics.NewResettingTimer().Snapshot() - c.addResettingTimer("test/empty_resetting_timer", emptyResettingTimer) - - const expectedOutput = `# TYPE test_counter gauge -test_counter 12345 - -# TYPE test_counter_float64 gauge -test_counter_float64 54321.98 - -# TYPE test_gauge gauge -test_gauge 23456 - -# TYPE test_gauge_float64 gauge -test_gauge_float64 34567.89 - -# TYPE test_histogram_count counter -test_histogram_count 0 - -# TYPE test_histogram summary -test_histogram {quantile="0.5"} 0 -test_histogram {quantile="0.75"} 0 -test_histogram {quantile="0.95"} 0 -test_histogram {quantile="0.99"} 0 -test_histogram {quantile="0.999"} 0 -test_histogram {quantile="0.9999"} 0 - -# TYPE test_meter gauge -test_meter 9999999 - -# TYPE test_timer_count counter -test_timer_count 6 - -# TYPE test_timer summary -test_timer {quantile="0.5"} 2.25e+07 -test_timer {quantile="0.75"} 4.8e+07 -test_timer {quantile="0.95"} 1.2e+08 -test_timer {quantile="0.99"} 1.2e+08 -test_timer {quantile="0.999"} 1.2e+08 -test_timer {quantile="0.9999"} 1.2e+08 - -# TYPE test_resetting_timer_count counter -test_resetting_timer_count 6 - -# TYPE test_resetting_timer summary -test_resetting_timer {quantile="0.50"} 12000000 -test_resetting_timer {quantile="0.95"} 120000000 -test_resetting_timer {quantile="0.99"} 120000000 - -` - exp := c.buff.String() - if exp != expectedOutput { - t.Log("Expected Output:\n", expectedOutput) - t.Log("Actual Output:\n", exp) - t.Fatal("unexpected collector output") + var ( + c = newCollector() + want string + ) + internal.ExampleMetrics().Each(func(name string, i interface{}) { + c.Add(name, i) + }) + if wantB, err := os.ReadFile("./testdata/prometheus.want"); err != nil { + t.Fatal(err) + } else { + want = string(wantB) + } + if have := c.buff.String(); have != want { + t.Logf("have\n%v", have) + t.Logf("have vs want:\n%v", findFirstDiffPos(have, want)) + t.Fatalf("unexpected collector output") } } + +func findFirstDiffPos(a, b string) string { + yy := strings.Split(b, "\n") + for i, x := range strings.Split(a, "\n") { + if i >= len(yy) { + return fmt.Sprintf("a:%d: %s\nb:%d: ", i, x, i) + } + if y := yy[i]; x != y { + return fmt.Sprintf("a:%d: %s\nb:%d: %s", i, x, i, y) + } + } + return "" +} diff --git a/metrics/prometheus/prometheus.go b/metrics/prometheus/prometheus.go index d966fa9a8..dbdeae6c7 100644 --- a/metrics/prometheus/prometheus.go +++ b/metrics/prometheus/prometheus.go @@ -41,25 +41,7 @@ func Handler(reg metrics.Registry) http.Handler { for _, name := range names { i := reg.Get(name) - - switch m := i.(type) { - case metrics.Counter: - c.addCounter(name, m.Snapshot()) - case metrics.CounterFloat64: - c.addCounterFloat64(name, m.Snapshot()) - case metrics.Gauge: - c.addGauge(name, m.Snapshot()) - case metrics.GaugeFloat64: - c.addGaugeFloat64(name, m.Snapshot()) - case metrics.Histogram: - c.addHistogram(name, m.Snapshot()) - case metrics.Meter: - c.addMeter(name, m.Snapshot()) - case metrics.Timer: - c.addTimer(name, m.Snapshot()) - case metrics.ResettingTimer: - c.addResettingTimer(name, m.Snapshot()) - default: + if err := c.Add(name, i); err != nil { log.Warn("Unknown Prometheus metric type", "type", fmt.Sprintf("%T", i)) } } diff --git a/metrics/prometheus/testdata/prometheus.want b/metrics/prometheus/testdata/prometheus.want new file mode 100644 index 000000000..f35496e61 --- /dev/null +++ b/metrics/prometheus/testdata/prometheus.want @@ -0,0 +1,48 @@ +# TYPE test_counter gauge +test_counter 12345 + +# TYPE test_counter_float64 gauge +test_counter_float64 54321.98 + +# TYPE test_gauge gauge +test_gauge 23456 + +# TYPE test_gauge_float64 gauge +test_gauge_float64 34567.89 + +# TYPE test_gauge_info gauge +test_gauge_info {arch="amd64", commit="7caa2d8163ae3132c1c2d6978c76610caee2d949", os="linux", protocol_versions="64 65 66", version="1.10.18-unstable"} 1 + +# TYPE test_histogram_count counter +test_histogram_count 3 + +# TYPE test_histogram summary +test_histogram {quantile="0.5"} 2 +test_histogram {quantile="0.75"} 3 +test_histogram {quantile="0.95"} 3 +test_histogram {quantile="0.99"} 3 +test_histogram {quantile="0.999"} 3 +test_histogram {quantile="0.9999"} 3 + +# TYPE test_meter gauge +test_meter 0 + +# TYPE test_resetting_timer_count counter +test_resetting_timer_count 6 + +# TYPE test_resetting_timer summary +test_resetting_timer {quantile="0.50"} 12000000 +test_resetting_timer {quantile="0.95"} 120000000 +test_resetting_timer {quantile="0.99"} 120000000 + +# TYPE test_timer_count counter +test_timer_count 6 + +# TYPE test_timer summary +test_timer {quantile="0.5"} 2.25e+07 +test_timer {quantile="0.75"} 4.8e+07 +test_timer {quantile="0.95"} 1.2e+08 +test_timer {quantile="0.99"} 1.2e+08 +test_timer {quantile="0.999"} 1.2e+08 +test_timer {quantile="0.9999"} 1.2e+08 + diff --git a/metrics/registry.go b/metrics/registry.go index ec6e37c54..66dbc890c 100644 --- a/metrics/registry.go +++ b/metrics/registry.go @@ -3,6 +3,7 @@ package metrics import ( "fmt" "reflect" + "sort" "strings" "sync" ) @@ -47,17 +48,39 @@ type Registry interface { Unregister(string) } +type orderedRegistry struct { + StandardRegistry +} + +// Call the given function for each registered metric. +func (r *orderedRegistry) Each(f func(string, interface{})) { + var names []string + reg := r.registered() + for name := range reg { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + f(name, reg[name]) + } +} + +// NewRegistry creates a new registry. +func NewRegistry() Registry { + return new(StandardRegistry) +} + +// NewOrderedRegistry creates a new ordered registry (for testing). +func NewOrderedRegistry() Registry { + return new(orderedRegistry) +} + // The standard implementation of a Registry uses sync.map // of names to metrics. type StandardRegistry struct { metrics sync.Map } -// Create a new registry. -func NewRegistry() Registry { - return &StandardRegistry{} -} - // Call the given function for each registered metric. func (r *StandardRegistry) Each(f func(string, interface{})) { for name, i := range r.registered() { @@ -191,7 +214,7 @@ func (r *StandardRegistry) Unregister(name string) { func (r *StandardRegistry) loadOrRegister(name string, i interface{}) (interface{}, bool, bool) { switch i.(type) { - case Counter, CounterFloat64, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer, ResettingTimer: + case Counter, CounterFloat64, Gauge, GaugeFloat64, GaugeInfo, Healthcheck, Histogram, Meter, Timer, ResettingTimer: default: return nil, false, false } diff --git a/metrics/resetting_timer.go b/metrics/resetting_timer.go index e5327d3bd..8e23c8eee 100644 --- a/metrics/resetting_timer.go +++ b/metrics/resetting_timer.go @@ -2,9 +2,10 @@ package metrics import ( "math" - "sort" "sync" "time" + + "golang.org/x/exp/slices" ) // Initial slice capacity for the values stored in a ResettingTimer @@ -65,7 +66,7 @@ func (NilResettingTimer) Snapshot() ResettingTimer { } // Time is a no-op. -func (NilResettingTimer) Time(func()) {} +func (NilResettingTimer) Time(f func()) { f() } // Update is a no-op. func (NilResettingTimer) Update(time.Duration) {} @@ -186,7 +187,7 @@ func (t *ResettingTimerSnapshot) Mean() float64 { } func (t *ResettingTimerSnapshot) calc(percentiles []float64) { - sort.Sort(Int64Slice(t.values)) + slices.Sort(t.values) count := len(t.values) if count > 0 { @@ -232,10 +233,3 @@ func (t *ResettingTimerSnapshot) calc(percentiles []float64) { t.calculated = true } - -// Int64Slice attaches the methods of sort.Interface to []int64, sorting in increasing order. -type Int64Slice []int64 - -func (s Int64Slice) Len() int { return len(s) } -func (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s Int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/metrics/sample.go b/metrics/sample.go index afcaa2118..252a878f5 100644 --- a/metrics/sample.go +++ b/metrics/sample.go @@ -3,9 +3,10 @@ package metrics import ( "math" "math/rand" - "sort" "sync" "time" + + "golang.org/x/exp/slices" ) const rescaleThreshold = time.Hour @@ -282,17 +283,17 @@ func SampleMin(values []int64) int64 { } // SamplePercentiles returns an arbitrary percentile of the slice of int64. -func SamplePercentile(values int64Slice, p float64) float64 { +func SamplePercentile(values []int64, p float64) float64 { return SamplePercentiles(values, []float64{p})[0] } // SamplePercentiles returns a slice of arbitrary percentiles of the slice of // int64. -func SamplePercentiles(values int64Slice, ps []float64) []float64 { +func SamplePercentiles(values []int64, ps []float64) []float64 { scores := make([]float64, len(ps)) size := len(values) if size > 0 { - sort.Sort(values) + slices.Sort(values) for i, p := range ps { pos := p * float64(size+1) if pos < 1.0 { @@ -633,9 +634,3 @@ func (h *expDecaySampleHeap) down(i, n int) { i = j } } - -type int64Slice []int64 - -func (p int64Slice) Len() int { return len(p) } -func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/metrics/syslog.go b/metrics/syslog.go index f23b07e19..76c849056 100644 --- a/metrics/syslog.go +++ b/metrics/syslog.go @@ -23,6 +23,8 @@ func Syslog(r Registry, d time.Duration, w *syslog.Writer) { w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) case GaugeFloat64: w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) + case GaugeInfo: + w.Info(fmt.Sprintf("gauge %s: value: %s", name, metric.Value())) case Healthcheck: metric.Check() w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) diff --git a/metrics/testdata/opentsb.want b/metrics/testdata/opentsb.want new file mode 100644 index 000000000..c8e40a525 --- /dev/null +++ b/metrics/testdata/opentsb.want @@ -0,0 +1,23 @@ +put pre.elite.count 978307200 0 host=hal9000 +put pre.elite.one-minute 978307200 0.00 host=hal9000 +put pre.elite.five-minute 978307200 0.00 host=hal9000 +put pre.elite.fifteen-minute 978307200 0.00 host=hal9000 +put pre.elite.mean 978307200 0.00 host=hal9000 +put pre.foo.value 978307200 {"chain_id":"5"} host=hal9000 +put pre.months.count 978307200 12 host=hal9000 +put pre.pi.value 978307200 3.140000 host=hal9000 +put pre.second.count 978307200 1 host=hal9000 +put pre.second.min 978307200 1000 host=hal9000 +put pre.second.max 978307200 1000 host=hal9000 +put pre.second.mean 978307200 1000.00 host=hal9000 +put pre.second.std-dev 978307200 0.00 host=hal9000 +put pre.second.50-percentile 978307200 1000.00 host=hal9000 +put pre.second.75-percentile 978307200 1000.00 host=hal9000 +put pre.second.95-percentile 978307200 1000.00 host=hal9000 +put pre.second.99-percentile 978307200 1000.00 host=hal9000 +put pre.second.999-percentile 978307200 1000.00 host=hal9000 +put pre.second.one-minute 978307200 0.00 host=hal9000 +put pre.second.five-minute 978307200 0.00 host=hal9000 +put pre.second.fifteen-minute 978307200 0.00 host=hal9000 +put pre.second.mean-rate 978307200 0.00 host=hal9000 +put pre.tau.count 978307200 1.570000 host=hal9000 diff --git a/metrics/timer.go b/metrics/timer.go index a63c9dfb6..2e1a9be47 100644 --- a/metrics/timer.go +++ b/metrics/timer.go @@ -123,7 +123,7 @@ func (NilTimer) Stop() {} func (NilTimer) Sum() int64 { return 0 } // Time is a no-op. -func (NilTimer) Time(func()) {} +func (NilTimer) Time(f func()) { f() } // Update is a no-op. func (NilTimer) Update(time.Duration) {} diff --git a/metrics/writer.go b/metrics/writer.go index 256fbd14c..ec2e4f8c6 100644 --- a/metrics/writer.go +++ b/metrics/writer.go @@ -3,8 +3,10 @@ package metrics import ( "fmt" "io" - "sort" + "strings" "time" + + "golang.org/x/exp/slices" ) // Write sorts writes each metric in the given registry periodically to the @@ -18,12 +20,11 @@ func Write(r Registry, d time.Duration, w io.Writer) { // WriteOnce sorts and writes metrics in the given registry to the given // io.Writer. func WriteOnce(r Registry, w io.Writer) { - var namedMetrics namedMetricSlice + var namedMetrics []namedMetric r.Each(func(name string, i interface{}) { namedMetrics = append(namedMetrics, namedMetric{name, i}) }) - - sort.Sort(namedMetrics) + slices.SortFunc(namedMetrics, namedMetric.cmp) for _, namedMetric := range namedMetrics { switch metric := namedMetric.m.(type) { case Counter: @@ -38,6 +39,9 @@ func WriteOnce(r Registry, w io.Writer) { case GaugeFloat64: fmt.Fprintf(w, "gauge %s\n", namedMetric.name) fmt.Fprintf(w, " value: %f\n", metric.Value()) + case GaugeInfo: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %s\n", metric.Value().String()) case Healthcheck: metric.Check() fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) @@ -91,13 +95,6 @@ type namedMetric struct { m interface{} } -// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. -type namedMetricSlice []namedMetric - -func (nms namedMetricSlice) Len() int { return len(nms) } - -func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } - -func (nms namedMetricSlice) Less(i, j int) bool { - return nms[i].name < nms[j].name +func (m namedMetric) cmp(other namedMetric) int { + return strings.Compare(m.name, other.name) } diff --git a/metrics/writer_test.go b/metrics/writer_test.go index 1aacc2871..8376bf897 100644 --- a/metrics/writer_test.go +++ b/metrics/writer_test.go @@ -1,19 +1,20 @@ package metrics import ( - "sort" "testing" + + "golang.org/x/exp/slices" ) func TestMetricsSorting(t *testing.T) { - var namedMetrics = namedMetricSlice{ + var namedMetrics = []namedMetric{ {name: "zzz"}, {name: "bbb"}, {name: "fff"}, {name: "ggg"}, } - sort.Sort(namedMetrics) + slices.SortFunc(namedMetrics, namedMetric.cmp) for i, name := range []string{"bbb", "fff", "ggg", "zzz"} { if namedMetrics[i].name != name { t.Fail() diff --git a/miner/miner.go b/miner/miner.go index b1d1f7c4c..b7273948f 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -131,16 +131,22 @@ func (miner *Miner) update() { shouldStart = true log.Info("Mining aborted due to sync") } + miner.worker.syncing.Store(true) + case downloader.FailedEvent: canStart = true if shouldStart { miner.worker.start() } + miner.worker.syncing.Store(false) + case downloader.DoneEvent: canStart = true if shouldStart { miner.worker.start() } + miner.worker.syncing.Store(false) + // Stop reacting to downloader events events.Unsubscribe() } @@ -196,12 +202,14 @@ func (miner *Miner) SetRecommitInterval(interval time.Duration) { miner.worker.setRecommitInterval(interval) } -// Pending returns the currently pending block and associated state. +// Pending returns the currently pending block and associated state. The returned +// values can be nil in case the pending block is not initialized func (miner *Miner) Pending() (*types.Block, *state.StateDB) { return miner.worker.pending() } -// PendingBlock returns the currently pending block. +// PendingBlock returns the currently pending block. The returned block can be +// nil in case the pending block is not initialized. // // Note, to access both the pending block and the pending state // simultaneously, please use Pending(), as the pending state can @@ -211,6 +219,7 @@ func (miner *Miner) PendingBlock() *types.Block { } // PendingBlockAndReceipts returns the currently pending block and corresponding receipts. +// The returned values can be nil in case the pending block is not initialized. func (miner *Miner) PendingBlockAndReceipts() (*types.Block, types.Receipts) { return miner.worker.pendingBlockAndReceipts() } @@ -225,23 +234,6 @@ func (miner *Miner) SetGasCeil(ceil uint64) { miner.worker.setGasCeil(ceil) } -// EnablePreseal turns on the preseal mining feature. It's enabled by default. -// Note this function shouldn't be exposed to API, it's unnecessary for users -// (miners) to actually know the underlying detail. It's only for outside project -// which uses this library. -func (miner *Miner) EnablePreseal() { - miner.worker.enablePreseal() -} - -// DisablePreseal turns off the preseal mining feature. It's necessary for some -// fake consensus engine which can seal blocks instantaneously. -// Note this function shouldn't be exposed to API, it's unnecessary for users -// (miners) to actually know the underlying detail. It's only for outside project -// which uses this library. -func (miner *Miner) DisablePreseal() { - miner.worker.disablePreseal() -} - // SubscribePendingLogs starts delivering logs from pending transactions // to the given channel. func (miner *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription { diff --git a/miner/miner_test.go b/miner/miner_test.go index 5ea6e1830..489bc46a9 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -29,10 +29,13 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" ) @@ -61,11 +64,16 @@ func (m *mockBackend) StateAtBlock(block *types.Block, reexec uint64, base *stat } type testBlockChain struct { + config *params.ChainConfig statedb *state.StateDB gasLimit uint64 chainHeadFeed *event.Feed } +func (bc *testBlockChain) Config() *params.ChainConfig { + return bc.config +} + func (bc *testBlockChain) CurrentBlock() *types.Header { return &types.Header{ Number: new(big.Int), @@ -245,6 +253,34 @@ func waitForMiningState(t *testing.T, m *Miner, mining bool) { t.Fatalf("Mining() == %t, want %t", state, mining) } +func minerTestGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *core.Genesis { + config := *params.AllCliqueProtocolChanges + config.Clique = ¶ms.CliqueConfig{ + Period: period, + Epoch: config.Clique.Epoch, + } + + // Assemble and return the genesis with the precompiles and faucet pre-funded + return &core.Genesis{ + Config: &config, + ExtraData: append(append(make([]byte, 32), faucet[:]...), make([]byte, crypto.SignatureLength)...), + GasLimit: gasLimit, + BaseFee: big.NewInt(params.InitialBaseFee), + Difficulty: big.NewInt(1), + Alloc: map[common.Address]core.GenesisAccount{ + common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover + common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256 + common.BytesToAddress([]byte{3}): {Balance: big.NewInt(1)}, // RIPEMD + common.BytesToAddress([]byte{4}): {Balance: big.NewInt(1)}, // Identity + common.BytesToAddress([]byte{5}): {Balance: big.NewInt(1)}, // ModExp + common.BytesToAddress([]byte{6}): {Balance: big.NewInt(1)}, // ECAdd + common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul + common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing + common.BytesToAddress([]byte{9}): {Balance: big.NewInt(1)}, // BLAKE2b + faucet: {Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))}, + }, + } +} func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) { // Create Ethash config config := Config{ @@ -252,8 +288,9 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) { } // Create chainConfig chainDB := rawdb.NewMemoryDatabase() - genesis := core.DeveloperGenesisBlock(15, 11_500_000, common.HexToAddress("12345")) - chainConfig, _, err := core.SetupGenesisBlock(chainDB, trie.NewDatabase(chainDB), genesis) + triedb := trie.NewDatabase(chainDB, nil) + genesis := minerTestGenesisBlock(15, 11_500_000, common.HexToAddress("12345")) + chainConfig, _, err := core.SetupGenesisBlock(chainDB, triedb, genesis) if err != nil { t.Fatalf("can't create new chain config: %v", err) } @@ -264,11 +301,13 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) { if err != nil { t.Fatalf("can't create new chain %v", err) } - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(chainDB), nil) - blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)} + statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil) + blockchain := &testBlockChain{chainConfig, statedb, 10000000, new(event.Feed)} - pool := txpool.NewTxPool(testTxPoolConfig, chainConfig, blockchain) - backend := NewMockBackend(bc, pool) + pool := legacypool.New(testTxPoolConfig, blockchain) + txpool, _ := txpool.New(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain, []txpool.SubPool{pool}) + + backend := NewMockBackend(bc, txpool) // Create event Mux mux := new(event.TypeMux) // Create Miner @@ -276,7 +315,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) { cleanup := func(skipMiner bool) { bc.Stop() engine.Close() - pool.Stop() + txpool.Close() if !skipMiner { miner.Close() } diff --git a/miner/ordering.go b/miner/ordering.go new file mode 100644 index 000000000..4c3055f0d --- /dev/null +++ b/miner/ordering.go @@ -0,0 +1,147 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package miner + +import ( + "container/heap" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" +) + +// txWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap +type txWithMinerFee struct { + tx *txpool.LazyTransaction + from common.Address + fees *big.Int +} + +// newTxWithMinerFee creates a wrapped transaction, calculating the effective +// miner gasTipCap if a base fee is provided. +// Returns error in case of a negative effective miner gasTipCap. +func newTxWithMinerFee(tx *txpool.LazyTransaction, from common.Address, baseFee *big.Int) (*txWithMinerFee, error) { + tip := new(big.Int).Set(tx.GasTipCap) + if baseFee != nil { + if tx.GasFeeCap.Cmp(baseFee) < 0 { + return nil, types.ErrGasFeeCapTooLow + } + tip = math.BigMin(tx.GasTipCap, new(big.Int).Sub(tx.GasFeeCap, baseFee)) + } + return &txWithMinerFee{ + tx: tx, + from: from, + fees: tip, + }, nil +} + +// txByPriceAndTime implements both the sort and the heap interface, making it useful +// for all at once sorting as well as individually adding and removing elements. +type txByPriceAndTime []*txWithMinerFee + +func (s txByPriceAndTime) Len() int { return len(s) } +func (s txByPriceAndTime) Less(i, j int) bool { + // If the prices are equal, use the time the transaction was first seen for + // deterministic sorting + cmp := s[i].fees.Cmp(s[j].fees) + if cmp == 0 { + return s[i].tx.Time.Before(s[j].tx.Time) + } + return cmp > 0 +} +func (s txByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s *txByPriceAndTime) Push(x interface{}) { + *s = append(*s, x.(*txWithMinerFee)) +} + +func (s *txByPriceAndTime) Pop() interface{} { + old := *s + n := len(old) + x := old[n-1] + old[n-1] = nil + *s = old[0 : n-1] + return x +} + +// transactionsByPriceAndNonce represents a set of transactions that can return +// transactions in a profit-maximizing sorted order, while supporting removing +// entire batches of transactions for non-executable accounts. +type transactionsByPriceAndNonce struct { + txs map[common.Address][]*txpool.LazyTransaction // Per account nonce-sorted list of transactions + heads txByPriceAndTime // Next transaction for each unique account (price heap) + signer types.Signer // Signer for the set of transactions + baseFee *big.Int // Current base fee +} + +// newTransactionsByPriceAndNonce creates a transaction set that can retrieve +// price sorted transactions in a nonce-honouring way. +// +// Note, the input map is reowned so the caller should not interact any more with +// if after providing it to the constructor. +func newTransactionsByPriceAndNonce(signer types.Signer, txs map[common.Address][]*txpool.LazyTransaction, baseFee *big.Int) *transactionsByPriceAndNonce { + // Initialize a price and received time based heap with the head transactions + heads := make(txByPriceAndTime, 0, len(txs)) + for from, accTxs := range txs { + wrapped, err := newTxWithMinerFee(accTxs[0], from, baseFee) + if err != nil { + delete(txs, from) + continue + } + heads = append(heads, wrapped) + txs[from] = accTxs[1:] + } + heap.Init(&heads) + + // Assemble and return the transaction set + return &transactionsByPriceAndNonce{ + txs: txs, + heads: heads, + signer: signer, + baseFee: baseFee, + } +} + +// Peek returns the next transaction by price. +func (t *transactionsByPriceAndNonce) Peek() *txpool.LazyTransaction { + if len(t.heads) == 0 { + return nil + } + return t.heads[0].tx +} + +// Shift replaces the current best head with the next one from the same account. +func (t *transactionsByPriceAndNonce) Shift() { + acc := t.heads[0].from + if txs, ok := t.txs[acc]; ok && len(txs) > 0 { + if wrapped, err := newTxWithMinerFee(txs[0], acc, t.baseFee); err == nil { + t.heads[0], t.txs[acc] = wrapped, txs[1:] + heap.Fix(&t.heads, 0) + return + } + } + heap.Pop(&t.heads) +} + +// Pop removes the best transaction, *not* replacing it with the next one from +// the same account. This should be used when a transaction cannot be executed +// and hence all subsequent ones should be discarded from the same account. +func (t *transactionsByPriceAndNonce) Pop() { + heap.Pop(&t.heads) +} diff --git a/miner/ordering_test.go b/miner/ordering_test.go new file mode 100644 index 000000000..bdbdc3214 --- /dev/null +++ b/miner/ordering_test.go @@ -0,0 +1,188 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package miner + +import ( + "crypto/ecdsa" + "math/big" + "math/rand" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" +) + +func TestTransactionPriceNonceSortLegacy(t *testing.T) { + testTransactionPriceNonceSort(t, nil) +} + +func TestTransactionPriceNonceSort1559(t *testing.T) { + testTransactionPriceNonceSort(t, big.NewInt(0)) + testTransactionPriceNonceSort(t, big.NewInt(5)) + testTransactionPriceNonceSort(t, big.NewInt(50)) +} + +// Tests that transactions can be correctly sorted according to their price in +// decreasing order, but at the same time with increasing nonces when issued by +// the same account. +func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { + // Generate a batch of accounts to start with + keys := make([]*ecdsa.PrivateKey, 25) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + } + signer := types.LatestSignerForChainID(common.Big1) + + // Generate a batch of transactions with overlapping values, but shifted nonces + groups := map[common.Address][]*txpool.LazyTransaction{} + expectedCount := 0 + for start, key := range keys { + addr := crypto.PubkeyToAddress(key.PublicKey) + count := 25 + for i := 0; i < 25; i++ { + var tx *types.Transaction + gasFeeCap := rand.Intn(50) + if baseFee == nil { + tx = types.NewTx(&types.LegacyTx{ + Nonce: uint64(start + i), + To: &common.Address{}, + Value: big.NewInt(100), + Gas: 100, + GasPrice: big.NewInt(int64(gasFeeCap)), + Data: nil, + }) + } else { + tx = types.NewTx(&types.DynamicFeeTx{ + Nonce: uint64(start + i), + To: &common.Address{}, + Value: big.NewInt(100), + Gas: 100, + GasFeeCap: big.NewInt(int64(gasFeeCap)), + GasTipCap: big.NewInt(int64(rand.Intn(gasFeeCap + 1))), + Data: nil, + }) + if count == 25 && int64(gasFeeCap) < baseFee.Int64() { + count = i + } + } + tx, err := types.SignTx(tx, signer, key) + if err != nil { + t.Fatalf("failed to sign tx: %s", err) + } + groups[addr] = append(groups[addr], &txpool.LazyTransaction{ + Hash: tx.Hash(), + Tx: tx, + Time: tx.Time(), + GasFeeCap: tx.GasFeeCap(), + GasTipCap: tx.GasTipCap(), + }) + } + expectedCount += count + } + // Sort the transactions and cross check the nonce ordering + txset := newTransactionsByPriceAndNonce(signer, groups, baseFee) + + txs := types.Transactions{} + for tx := txset.Peek(); tx != nil; tx = txset.Peek() { + txs = append(txs, tx.Tx) + txset.Shift() + } + if len(txs) != expectedCount { + t.Errorf("expected %d transactions, found %d", expectedCount, len(txs)) + } + for i, txi := range txs { + fromi, _ := types.Sender(signer, txi) + + // Make sure the nonce order is valid + for j, txj := range txs[i+1:] { + fromj, _ := types.Sender(signer, txj) + if fromi == fromj && txi.Nonce() > txj.Nonce() { + t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce()) + } + } + // If the next tx has different from account, the price must be lower than the current one + if i+1 < len(txs) { + next := txs[i+1] + fromNext, _ := types.Sender(signer, next) + tip, err := txi.EffectiveGasTip(baseFee) + nextTip, nextErr := next.EffectiveGasTip(baseFee) + if err != nil || nextErr != nil { + t.Errorf("error calculating effective tip: %v, %v", err, nextErr) + } + if fromi != fromNext && tip.Cmp(nextTip) < 0 { + t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice()) + } + } + } +} + +// Tests that if multiple transactions have the same price, the ones seen earlier +// are prioritized to avoid network spam attacks aiming for a specific ordering. +func TestTransactionTimeSort(t *testing.T) { + // Generate a batch of accounts to start with + keys := make([]*ecdsa.PrivateKey, 5) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + } + signer := types.HomesteadSigner{} + + // Generate a batch of transactions with overlapping prices, but different creation times + groups := map[common.Address][]*txpool.LazyTransaction{} + for start, key := range keys { + addr := crypto.PubkeyToAddress(key.PublicKey) + + tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 100, big.NewInt(1), nil), signer, key) + tx.SetTime(time.Unix(0, int64(len(keys)-start))) + + groups[addr] = append(groups[addr], &txpool.LazyTransaction{ + Hash: tx.Hash(), + Tx: tx, + Time: tx.Time(), + GasFeeCap: tx.GasFeeCap(), + GasTipCap: tx.GasTipCap(), + }) + } + // Sort the transactions and cross check the nonce ordering + txset := newTransactionsByPriceAndNonce(signer, groups, nil) + + txs := types.Transactions{} + for tx := txset.Peek(); tx != nil; tx = txset.Peek() { + txs = append(txs, tx.Tx) + txset.Shift() + } + if len(txs) != len(keys) { + t.Errorf("expected %d transactions, found %d", len(keys), len(txs)) + } + for i, txi := range txs { + fromi, _ := types.Sender(signer, txi) + if i+1 < len(txs) { + next := txs[i+1] + fromNext, _ := types.Sender(signer, next) + + if txi.GasPrice().Cmp(next.GasPrice()) < 0 { + t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice()) + } + // Make sure time order is ascending if the txs have the same gas price + if txi.GasPrice().Cmp(next.GasPrice()) == 0 && txi.Time().After(next.Time()) { + t.Errorf("invalid received time ordering: tx #%d (A=%x T=%v) > tx #%d (A=%x T=%v)", i, fromi[:4], txi.Time(), i+1, fromNext[:4], next.Time()) + } + } + } +} diff --git a/miner/payload_building.go b/miner/payload_building.go index f84d908e8..7d8c4368b 100644 --- a/miner/payload_building.go +++ b/miner/payload_building.go @@ -40,6 +40,7 @@ type BuildPayloadArgs struct { FeeRecipient common.Address // The provided recipient address for collecting transaction fee Random common.Hash // The provided randomness value Withdrawals types.Withdrawals // The provided withdrawals + BeaconRoot *common.Hash // The provided beaconRoot (Cancun) } // Id computes an 8-byte identifier by hashing the components of the payload arguments. @@ -51,6 +52,9 @@ func (args *BuildPayloadArgs) Id() engine.PayloadID { hasher.Write(args.Random[:]) hasher.Write(args.FeeRecipient[:]) rlp.Encode(hasher, args.Withdrawals) + if args.BeaconRoot != nil { + hasher.Write(args.BeaconRoot[:]) + } var out engine.PayloadID copy(out[:], hasher.Sum(nil)[:8]) return out @@ -65,6 +69,7 @@ type Payload struct { id engine.PayloadID empty *types.Block full *types.Block + sidecars []*types.BlobTxSidecar fullFees *big.Int stop chan struct{} lock sync.Mutex @@ -84,7 +89,7 @@ func newPayload(empty *types.Block, id engine.PayloadID) *Payload { } // update updates the full-block with latest built version. -func (payload *Payload) update(block *types.Block, fees *big.Int, elapsed time.Duration) { +func (payload *Payload) update(r *newPayloadResult, elapsed time.Duration) { payload.lock.Lock() defer payload.lock.Unlock() @@ -96,14 +101,23 @@ func (payload *Payload) update(block *types.Block, fees *big.Int, elapsed time.D // Ensure the newly provided full block has a higher transaction fee. // In post-merge stage, there is no uncle reward anymore and transaction // fee(apart from the mev revenue) is the only indicator for comparison. - if payload.full == nil || fees.Cmp(payload.fullFees) > 0 { - payload.full = block - payload.fullFees = fees + if payload.full == nil || r.fees.Cmp(payload.fullFees) > 0 { + payload.full = r.block + payload.fullFees = r.fees + payload.sidecars = r.sidecars - feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether)) - log.Info("Updated payload", "id", payload.id, "number", block.NumberU64(), "hash", block.Hash(), - "txs", len(block.Transactions()), "gas", block.GasUsed(), "fees", feesInEther, - "root", block.Root(), "elapsed", common.PrettyDuration(elapsed)) + feesInEther := new(big.Float).Quo(new(big.Float).SetInt(r.fees), big.NewFloat(params.Ether)) + log.Info("Updated payload", + "id", payload.id, + "number", r.block.NumberU64(), + "hash", r.block.Hash(), + "txs", len(r.block.Transactions()), + "withdrawals", len(r.block.Withdrawals()), + "gas", r.block.GasUsed(), + "fees", feesInEther, + "root", r.block.Root(), + "elapsed", common.PrettyDuration(elapsed), + ) } payload.cond.Broadcast() // fire signal for notifying full block } @@ -120,9 +134,9 @@ func (payload *Payload) Resolve() *engine.ExecutionPayloadEnvelope { close(payload.stop) } if payload.full != nil { - return engine.BlockToExecutableData(payload.full, payload.fullFees) + return engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars) } - return engine.BlockToExecutableData(payload.empty, big.NewInt(0)) + return engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil) } // ResolveEmpty is basically identical to Resolve, but it expects empty block only. @@ -131,11 +145,11 @@ func (payload *Payload) ResolveEmpty() *engine.ExecutionPayloadEnvelope { payload.lock.Lock() defer payload.lock.Unlock() - return engine.BlockToExecutableData(payload.empty, big.NewInt(0)) + return engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil) } // ResolveFull is basically identical to Resolve, but it expects full block only. -// It's only used in tests. +// Don't call Resolve until ResolveFull returns, otherwise it might block forever. func (payload *Payload) ResolveFull() *engine.ExecutionPayloadEnvelope { payload.lock.Lock() defer payload.lock.Unlock() @@ -146,9 +160,18 @@ func (payload *Payload) ResolveFull() *engine.ExecutionPayloadEnvelope { return nil default: } + // Wait the full payload construction. Note it might block + // forever if Resolve is called in the meantime which + // terminates the background construction process. payload.cond.Wait() } - return engine.BlockToExecutableData(payload.full, payload.fullFees) + // Terminate the background payload construction + select { + case <-payload.stop: + default: + close(payload.stop) + } + return engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars) } // buildPayload builds the payload according to the provided parameters. @@ -156,12 +179,23 @@ func (w *worker) buildPayload(args *BuildPayloadArgs) (*Payload, error) { // Build the initial version with no transaction included. It should be fast // enough to run. The empty payload can at least make sure there is something // to deliver for not missing slot. - empty, _, err := w.getSealingBlock(args.Parent, args.Timestamp, args.FeeRecipient, args.Random, args.Withdrawals, true) - if err != nil { - return nil, err + emptyParams := &generateParams{ + timestamp: args.Timestamp, + forceTime: true, + parentHash: args.Parent, + coinbase: args.FeeRecipient, + random: args.Random, + withdrawals: args.Withdrawals, + beaconRoot: args.BeaconRoot, + noTxs: true, } + empty := w.getSealingBlock(emptyParams) + if empty.err != nil { + return nil, empty.err + } + // Construct a payload object for return. - payload := newPayload(empty, args.Id()) + payload := newPayload(empty.block, args.Id()) // Spin up a routine for updating the payload in background. This strategy // can maximum the revenue for including transactions with highest fee. @@ -176,13 +210,24 @@ func (w *worker) buildPayload(args *BuildPayloadArgs) (*Payload, error) { // by the timestamp parameter. endTimer := time.NewTimer(time.Second * 12) + fullParams := &generateParams{ + timestamp: args.Timestamp, + forceTime: true, + parentHash: args.Parent, + coinbase: args.FeeRecipient, + random: args.Random, + withdrawals: args.Withdrawals, + beaconRoot: args.BeaconRoot, + noTxs: false, + } + for { select { case <-timer.C: start := time.Now() - block, fees, err := w.getSealingBlock(args.Parent, args.Timestamp, args.FeeRecipient, args.Random, args.Withdrawals, false) - if err == nil { - payload.update(block, fees, time.Since(start)) + r := w.getSealingBlock(fullParams) + if r.err == nil { + payload.update(r, time.Since(start)) } timer.Reset(w.recommit) case <-payload.stop: diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go index b2f1d68f3..6f5736344 100644 --- a/miner/payload_building_test.go +++ b/miner/payload_building_test.go @@ -84,48 +84,48 @@ func TestBuildPayload(t *testing.T) { func TestPayloadId(t *testing.T) { ids := make(map[string]int) for i, tt := range []*BuildPayloadArgs{ - &BuildPayloadArgs{ + { Parent: common.Hash{1}, Timestamp: 1, Random: common.Hash{0x1}, FeeRecipient: common.Address{0x1}, }, // Different parent - &BuildPayloadArgs{ + { Parent: common.Hash{2}, Timestamp: 1, Random: common.Hash{0x1}, FeeRecipient: common.Address{0x1}, }, // Different timestamp - &BuildPayloadArgs{ + { Parent: common.Hash{2}, Timestamp: 2, Random: common.Hash{0x1}, FeeRecipient: common.Address{0x1}, }, // Different Random - &BuildPayloadArgs{ + { Parent: common.Hash{2}, Timestamp: 2, Random: common.Hash{0x2}, FeeRecipient: common.Address{0x1}, }, // Different fee-recipient - &BuildPayloadArgs{ + { Parent: common.Hash{2}, Timestamp: 2, Random: common.Hash{0x2}, FeeRecipient: common.Address{0x2}, }, // Different withdrawals (non-empty) - &BuildPayloadArgs{ + { Parent: common.Hash{2}, Timestamp: 2, Random: common.Hash{0x2}, FeeRecipient: common.Address{0x2}, Withdrawals: []*types.Withdrawal{ - &types.Withdrawal{ + { Index: 0, Validator: 0, Address: common.Address{}, @@ -134,13 +134,13 @@ func TestPayloadId(t *testing.T) { }, }, // Different withdrawals (non-empty) - &BuildPayloadArgs{ + { Parent: common.Hash{2}, Timestamp: 2, Random: common.Hash{0x2}, FeeRecipient: common.Address{0x2}, Withdrawals: []*types.Withdrawal{ - &types.Withdrawal{ + { Index: 2, Validator: 0, Address: common.Address{}, diff --git a/miner/stress/clique/main.go b/miner/stress/clique/main.go index 16a8687da..7b29e63df 100644 --- a/miner/stress/clique/main.go +++ b/miner/stress/clique/main.go @@ -30,7 +30,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/fdlimit" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" @@ -57,7 +57,7 @@ func main() { for i := 0; i < len(sealers); i++ { sealers[i], _ = crypto.GenerateKey() } - // Create a Clique network based off of the Rinkeby config + // Create a Clique network based off of the Sepolia config genesis := makeGenesis(faucets, sealers) // Handle interrupts. @@ -132,7 +132,7 @@ func main() { if err != nil { panic(err) } - if err := backend.TxPool().AddLocal(tx); err != nil { + if err := backend.TxPool().Add([]*types.Transaction{tx}, true, false); err != nil { panic(err) } nonces[index]++ @@ -147,8 +147,8 @@ func main() { // makeGenesis creates a custom Clique genesis block based on some pre-defined // signer and faucet accounts. func makeGenesis(faucets []*ecdsa.PrivateKey, sealers []*ecdsa.PrivateKey) *core.Genesis { - // Create a Clique network based off of the Rinkeby config - genesis := core.DefaultRinkebyGenesisBlock() + // Create a Clique network based off of the Sepolia config + genesis := core.DefaultSepoliaGenesisBlock() genesis.GasLimit = 25000000 genesis.Config.ChainID = big.NewInt(18) @@ -206,7 +206,7 @@ func makeSealer(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) { SyncMode: downloader.FullSync, DatabaseCache: 256, DatabaseHandles: 256, - TxPool: txpool.DefaultConfig, + TxPool: legacypool.DefaultConfig, GPO: ethconfig.Defaults.GPO, Miner: miner.Config{ GasCeil: genesis.GasLimit * 11 / 10, diff --git a/miner/unconfirmed.go b/miner/unconfirmed.go deleted file mode 100644 index 0489f1ea4..000000000 --- a/miner/unconfirmed.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package miner - -import ( - "container/ring" - "sync" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" -) - -// chainRetriever is used by the unconfirmed block set to verify whether a previously -// mined block is part of the canonical chain or not. -type chainRetriever interface { - // GetHeaderByNumber retrieves the canonical header associated with a block number. - GetHeaderByNumber(number uint64) *types.Header - - // GetBlockByNumber retrieves the canonical block associated with a block number. - GetBlockByNumber(number uint64) *types.Block -} - -// unconfirmedBlock is a small collection of metadata about a locally mined block -// that is placed into a unconfirmed set for canonical chain inclusion tracking. -type unconfirmedBlock struct { - index uint64 - hash common.Hash -} - -// unconfirmedBlocks implements a data structure to maintain locally mined blocks -// have not yet reached enough maturity to guarantee chain inclusion. It is -// used by the miner to provide logs to the user when a previously mined block -// has a high enough guarantee to not be reorged out of the canonical chain. -type unconfirmedBlocks struct { - chain chainRetriever // Blockchain to verify canonical status through - depth uint // Depth after which to discard previous blocks - blocks *ring.Ring // Block infos to allow canonical chain cross checks - lock sync.Mutex // Protects the fields from concurrent access -} - -// newUnconfirmedBlocks returns new data structure to track currently unconfirmed blocks. -func newUnconfirmedBlocks(chain chainRetriever, depth uint) *unconfirmedBlocks { - return &unconfirmedBlocks{ - chain: chain, - depth: depth, - } -} - -// Insert adds a new block to the set of unconfirmed ones. -func (set *unconfirmedBlocks) Insert(index uint64, hash common.Hash) { - // If a new block was mined locally, shift out any old enough blocks - set.Shift(index) - - // Create the new item as its own ring - item := ring.New(1) - item.Value = &unconfirmedBlock{ - index: index, - hash: hash, - } - // Set as the initial ring or append to the end - set.lock.Lock() - defer set.lock.Unlock() - - if set.blocks == nil { - set.blocks = item - } else { - set.blocks.Move(-1).Link(item) - } - // Display a log for the user to notify of a new mined block unconfirmed - log.Info("🔨 mined potential block", "number", index, "hash", hash) -} - -// Shift drops all unconfirmed blocks from the set which exceed the unconfirmed sets depth -// allowance, checking them against the canonical chain for inclusion or staleness -// report. -func (set *unconfirmedBlocks) Shift(height uint64) { - set.lock.Lock() - defer set.lock.Unlock() - - for set.blocks != nil { - // Retrieve the next unconfirmed block and abort if too fresh - next := set.blocks.Value.(*unconfirmedBlock) - if next.index+uint64(set.depth) > height { - break - } - // Block seems to exceed depth allowance, check for canonical status - header := set.chain.GetHeaderByNumber(next.index) - switch { - case header == nil: - log.Warn("Failed to retrieve header of mined block", "number", next.index, "hash", next.hash) - case header.Hash() == next.hash: - log.Info("🔗 block reached canonical chain", "number", next.index, "hash", next.hash) - default: - // Block is not canonical, check whether we have an uncle or a lost block - included := false - for number := next.index; !included && number < next.index+uint64(set.depth) && number <= height; number++ { - if block := set.chain.GetBlockByNumber(number); block != nil { - for _, uncle := range block.Uncles() { - if uncle.Hash() == next.hash { - included = true - break - } - } - } - } - if included { - log.Info("â‘‚ block became an uncle", "number", next.index, "hash", next.hash) - } else { - log.Info("😱 block lost", "number", next.index, "hash", next.hash) - } - } - // Drop the block out of the ring - if set.blocks.Value == set.blocks.Next().Value { - set.blocks = nil - } else { - set.blocks = set.blocks.Move(-1) - set.blocks.Unlink(1) - set.blocks = set.blocks.Move(1) - } - } -} diff --git a/miner/unconfirmed_test.go b/miner/unconfirmed_test.go deleted file mode 100644 index 60958f658..000000000 --- a/miner/unconfirmed_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package miner - -import ( - "testing" - - "github.com/ethereum/go-ethereum/core/types" -) - -// noopChainRetriever is an implementation of headerRetriever that always -// returns nil for any requested headers. -type noopChainRetriever struct{} - -func (r *noopChainRetriever) GetHeaderByNumber(number uint64) *types.Header { - return nil -} -func (r *noopChainRetriever) GetBlockByNumber(number uint64) *types.Block { - return nil -} - -// Tests that inserting blocks into the unconfirmed set accumulates them until -// the desired depth is reached, after which they begin to be dropped. -func TestUnconfirmedInsertBounds(t *testing.T) { - limit := uint(10) - - pool := newUnconfirmedBlocks(new(noopChainRetriever), limit) - for depth := uint64(0); depth < 2*uint64(limit); depth++ { - // Insert multiple blocks for the same level just to stress it - for i := 0; i < int(depth); i++ { - pool.Insert(depth, [32]byte{byte(depth), byte(i)}) - } - // Validate that no blocks below the depth allowance are left in - pool.blocks.Do(func(block interface{}) { - if block := block.(*unconfirmedBlock); block.index+uint64(limit) <= depth { - t.Errorf("depth %d: block %x not dropped", depth, block.hash) - } - }) - } -} - -// Tests that shifting blocks out of the unconfirmed set works both for normal -// cases as well as for corner cases such as empty sets, empty shifts or full -// shifts. -func TestUnconfirmedShifts(t *testing.T) { - // Create a pool with a few blocks on various depths - limit, start := uint(10), uint64(25) - - pool := newUnconfirmedBlocks(new(noopChainRetriever), limit) - for depth := start; depth < start+uint64(limit); depth++ { - pool.Insert(depth, [32]byte{byte(depth)}) - } - // Try to shift below the limit and ensure no blocks are dropped - pool.Shift(start + uint64(limit) - 1) - if n := pool.blocks.Len(); n != int(limit) { - t.Errorf("unconfirmed count mismatch: have %d, want %d", n, limit) - } - // Try to shift half the blocks out and verify remainder - pool.Shift(start + uint64(limit) - 1 + uint64(limit/2)) - if n := pool.blocks.Len(); n != int(limit)/2 { - t.Errorf("unconfirmed count mismatch: have %d, want %d", n, limit/2) - } - // Try to shift all the remaining blocks out and verify emptiness - pool.Shift(start + 2*uint64(limit)) - if n := pool.blocks.Len(); n != 0 { - t.Errorf("unconfirmed count mismatch: have %d, want %d", n, 0) - } - // Try to shift out from the empty set and make sure it doesn't break - pool.Shift(start + 3*uint64(limit)) - if n := pool.blocks.Len(); n != 0 { - t.Errorf("unconfirmed count mismatch: have %d, want %d", n, 0) - } -} diff --git a/miner/worker.go b/miner/worker.go index 936a9e74a..711149232 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -24,13 +24,15 @@ import ( "sync/atomic" "time" - mapset "github.com/deckarep/golang-set/v2" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -48,15 +50,9 @@ const ( // chainHeadChanSize is the size of channel listening to ChainHeadEvent. chainHeadChanSize = 10 - // chainSideChanSize is the size of channel listening to ChainSideEvent. - chainSideChanSize = 10 - // resubmitAdjustChanSize is the size of resubmitting interval adjustment channel. resubmitAdjustChanSize = 10 - // sealingLogAtDepth is the number of confirmations before logging successful sealing. - sealingLogAtDepth = 7 - // minRecommitInterval is the minimal time interval to recreate the sealing block with // any newly arrived transactions. minRecommitInterval = 1 * time.Second @@ -86,55 +82,40 @@ var ( // environment is the worker's current environment and holds all // information of the sealing block generation. type environment struct { - signer types.Signer - - state *state.StateDB // apply state changes here - ancestors mapset.Set[common.Hash] // ancestor set (used for checking uncle parent validity) - family mapset.Set[common.Hash] // family set (used for checking uncle invalidity) - tcount int // tx count in cycle - gasPool *core.GasPool // available gas used to pack transactions - coinbase common.Address + signer types.Signer + state *state.StateDB // apply state changes here + tcount int // tx count in cycle + gasPool *core.GasPool // available gas used to pack transactions + coinbase common.Address header *types.Header txs []*types.Transaction receipts []*types.Receipt - uncles map[common.Hash]*types.Header + sidecars []*types.BlobTxSidecar + blobs int } // copy creates a deep copy of environment. func (env *environment) copy() *environment { cpy := &environment{ - signer: env.signer, - state: env.state.Copy(), - ancestors: env.ancestors.Clone(), - family: env.family.Clone(), - tcount: env.tcount, - coinbase: env.coinbase, - header: types.CopyHeader(env.header), - receipts: copyReceipts(env.receipts), + signer: env.signer, + state: env.state.Copy(), + tcount: env.tcount, + coinbase: env.coinbase, + header: types.CopyHeader(env.header), + receipts: copyReceipts(env.receipts), } if env.gasPool != nil { gasPool := *env.gasPool cpy.gasPool = &gasPool } - // The content of txs and uncles are immutable, unnecessary - // to do the expensive deep copy for them. cpy.txs = make([]*types.Transaction, len(env.txs)) copy(cpy.txs, env.txs) - cpy.uncles = make(map[common.Hash]*types.Header) - for hash, uncle := range env.uncles { - cpy.uncles[hash] = uncle - } - return cpy -} -// unclelist returns the contained uncles as the list format. -func (env *environment) unclelist() []*types.Header { - var uncles []*types.Header - for _, uncle := range env.uncles { - uncles = append(uncles, uncle) - } - return uncles + cpy.sidecars = make([]*types.BlobTxSidecar, len(env.sidecars)) + copy(cpy.sidecars, env.sidecars) + + return cpy } // discard terminates the background prefetcher go-routine. It should @@ -165,15 +146,15 @@ const ( // newWorkReq represents a request for new sealing work submitting with relative interrupt notifier. type newWorkReq struct { interrupt *atomic.Int32 - noempty bool timestamp int64 } -// newPayloadResult represents a result struct corresponds to payload generation. +// newPayloadResult is the result of payload generation. type newPayloadResult struct { - err error - block *types.Block - fees *big.Int + err error + block *types.Block + fees *big.Int // total block fees + sidecars []*types.BlobTxSidecar // collected blobs of blob transactions } // getWorkReq represents a request for getting a new sealing work with provided parameters. @@ -206,8 +187,6 @@ type worker struct { txsSub event.Subscription chainHeadCh chan core.ChainHeadEvent chainHeadSub event.Subscription - chainSideCh chan core.ChainSideEvent - chainSideSub event.Subscription // Channels newWorkCh chan *newWorkReq @@ -221,10 +200,7 @@ type worker struct { wg sync.WaitGroup - current *environment // An environment for current running cycle. - localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. - remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. - unconfirmed *unconfirmedBlocks // A set of locally mined blocks pending canonicalness confirmations. + current *environment // An environment for current running cycle. mu sync.RWMutex // The lock used to protect the coinbase and extra fields coinbase common.Address @@ -241,13 +217,7 @@ type worker struct { // atomic status counters running atomic.Bool // The indicator whether the consensus engine is running or not. newTxs atomic.Int32 // New arrival transaction count since last sealing work submitting. - - // noempty is the flag used to control whether the feature of pre-seal empty - // block is enabled. The default value is false(pre-seal is enabled by default). - // But in some special scenario the consensus engine will seal blocks instantaneously, - // in this case this feature will add all empty blocks into canonical chain - // non-stop and no real transaction will be included. - noempty atomic.Bool + syncing atomic.Bool // The indicator whether the node is still syncing. // newpayloadTimeout is the maximum timeout allowance for creating payload. // The default value is 2 seconds but node operator can set it to arbitrary @@ -279,15 +249,11 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus chain: eth.BlockChain(), mux: mux, isLocalBlock: isLocalBlock, - localUncles: make(map[common.Hash]*types.Block), - remoteUncles: make(map[common.Hash]*types.Block), - unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), sealingLogAtDepth), coinbase: config.Etherbase, extra: config.ExtraData, pendingTasks: make(map[common.Hash]*task), txsCh: make(chan core.NewTxsEvent, txChanSize), chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), - chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), newWorkCh: make(chan *newWorkReq), getWorkCh: make(chan *getWorkReq), taskCh: make(chan *task), @@ -301,7 +267,6 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) // Subscribe events for blockchain worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) - worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) // Sanitize recommit interval if the user-specified one is too short. recommit := worker.config.Recommit @@ -370,19 +335,9 @@ func (w *worker) setRecommitInterval(interval time.Duration) { } } -// disablePreseal disables pre-sealing feature -func (w *worker) disablePreseal() { - w.noempty.Store(true) -} - -// enablePreseal enables pre-sealing feature -func (w *worker) enablePreseal() { - w.noempty.Store(false) -} - -// pending returns the pending state and corresponding block. +// pending returns the pending state and corresponding block. The returned +// values can be nil in case the pending block is not initialized. func (w *worker) pending() (*types.Block, *state.StateDB) { - // return a snapshot to avoid contention on currentMu mutex w.snapshotMu.RLock() defer w.snapshotMu.RUnlock() if w.snapshotState == nil { @@ -391,17 +346,17 @@ func (w *worker) pending() (*types.Block, *state.StateDB) { return w.snapshotBlock, w.snapshotState.Copy() } -// pendingBlock returns pending block. +// pendingBlock returns pending block. The returned block can be nil in case the +// pending block is not initialized. func (w *worker) pendingBlock() *types.Block { - // return a snapshot to avoid contention on currentMu mutex w.snapshotMu.RLock() defer w.snapshotMu.RUnlock() return w.snapshotBlock } // pendingBlockAndReceipts returns pending block and corresponding receipts. +// The returned values can be nil in case the pending block is not initialized. func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) { - // return a snapshot to avoid contention on currentMu mutex w.snapshotMu.RLock() defer w.snapshotMu.RUnlock() return w.snapshotBlock, w.snapshotReceipts @@ -467,13 +422,13 @@ func (w *worker) newWorkLoop(recommit time.Duration) { <-timer.C // discard the initial tick // commit aborts in-flight transaction execution with given signal and resubmits a new one. - commit := func(noempty bool, s int32) { + commit := func(s int32) { if interrupt != nil { interrupt.Store(s) } interrupt = new(atomic.Int32) select { - case w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp}: + case w.newWorkCh <- &newWorkReq{interrupt: interrupt, timestamp: timestamp}: case <-w.exitCh: return } @@ -496,12 +451,12 @@ func (w *worker) newWorkLoop(recommit time.Duration) { case <-w.startCh: clearPending(w.chain.CurrentBlock().Number.Uint64()) timestamp = time.Now().Unix() - commit(false, commitInterruptNewHead) + commit(commitInterruptNewHead) case head := <-w.chainHeadCh: clearPending(head.Block.NumberU64()) timestamp = time.Now().Unix() - commit(false, commitInterruptNewHead) + commit(commitInterruptNewHead) case <-timer.C: // If sealing is running resubmit a new work cycle periodically to pull in @@ -512,7 +467,7 @@ func (w *worker) newWorkLoop(recommit time.Duration) { timer.Reset(recommit) continue } - commit(true, commitInterruptResubmit) + commit(commitInterruptResubmit) } case interval := <-w.resubmitIntervalCh: @@ -558,64 +513,19 @@ func (w *worker) mainLoop() { defer w.wg.Done() defer w.txsSub.Unsubscribe() defer w.chainHeadSub.Unsubscribe() - defer w.chainSideSub.Unsubscribe() defer func() { if w.current != nil { w.current.discard() } }() - cleanTicker := time.NewTicker(time.Second * 10) - defer cleanTicker.Stop() - for { select { case req := <-w.newWorkCh: - w.commitWork(req.interrupt, req.noempty, req.timestamp) + w.commitWork(req.interrupt, req.timestamp) case req := <-w.getWorkCh: - block, fees, err := w.generateWork(req.params) - req.result <- &newPayloadResult{ - err: err, - block: block, - fees: fees, - } - case ev := <-w.chainSideCh: - // Short circuit for duplicate side blocks - if _, exist := w.localUncles[ev.Block.Hash()]; exist { - continue - } - if _, exist := w.remoteUncles[ev.Block.Hash()]; exist { - continue - } - // Add side block to possible uncle block set depending on the author. - if w.isLocalBlock != nil && w.isLocalBlock(ev.Block.Header()) { - w.localUncles[ev.Block.Hash()] = ev.Block - } else { - w.remoteUncles[ev.Block.Hash()] = ev.Block - } - // If our sealing block contains less than 2 uncle blocks, - // add the new uncle block if valid and regenerate a new - // sealing block for higher profit. - if w.isRunning() && w.current != nil && len(w.current.uncles) < 2 { - start := time.Now() - if err := w.commitUncle(w.current, ev.Block.Header()); err == nil { - w.commit(w.current.copy(), nil, true, start) - } - } - - case <-cleanTicker.C: - chainHead := w.chain.CurrentBlock() - for hash, uncle := range w.localUncles { - if uncle.NumberU64()+staleThreshold <= chainHead.Number.Uint64() { - delete(w.localUncles, hash) - } - } - for hash, uncle := range w.remoteUncles { - if uncle.NumberU64()+staleThreshold <= chainHead.Number.Uint64() { - delete(w.remoteUncles, hash) - } - } + req.result <- w.generateWork(req.params) case ev := <-w.txsCh: // Apply transactions to the pending state if we're not sealing @@ -628,12 +538,18 @@ func (w *worker) mainLoop() { if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { continue } - txs := make(map[common.Address]types.Transactions, len(ev.Txs)) + txs := make(map[common.Address][]*txpool.LazyTransaction, len(ev.Txs)) for _, tx := range ev.Txs { acc, _ := types.Sender(w.current.signer, tx) - txs[acc] = append(txs[acc], tx) + txs[acc] = append(txs[acc], &txpool.LazyTransaction{ + Hash: tx.Hash(), + Tx: tx.WithoutBlobTxSidecar(), + Time: tx.Time(), + GasFeeCap: tx.GasFeeCap(), + GasTipCap: tx.GasTipCap(), + }) } - txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee) + txset := newTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee) tcount := w.current.tcount w.commitTransactions(w.current, txset, nil) @@ -647,7 +563,7 @@ func (w *worker) mainLoop() { // submit sealing work here since all empty submission will be rejected // by clique. Of course the advance sealing(empty submission) is disabled. if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 { - w.commitWork(nil, true, time.Now().Unix()) + w.commitWork(nil, time.Now().Unix()) } } w.newTxs.Add(int32(len(ev.Txs))) @@ -659,8 +575,6 @@ func (w *worker) mainLoop() { return case <-w.chainHeadSub.Err(): return - case <-w.chainSideSub.Err(): - return } } } @@ -780,9 +694,6 @@ func (w *worker) resultLoop() { // Broadcast the block and announce chain insertion event w.mux.Post(core.NewMinedBlockEvent{Block: block}) - // Insert the block into the set of pending ones to resultLoop for confirmations - w.unconfirmed.Insert(block.NumberU64(), block.Hash()) - case <-w.exitCh: return } @@ -801,49 +712,16 @@ func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase co // Note the passed coinbase may be different with header.Coinbase. env := &environment{ - signer: types.MakeSigner(w.chainConfig, header.Number, header.Time), - state: state, - coinbase: coinbase, - ancestors: mapset.NewSet[common.Hash](), - family: mapset.NewSet[common.Hash](), - header: header, - uncles: make(map[common.Hash]*types.Header), - } - // when 08 is processed ancestors contain 07 (quick block) - for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { - for _, uncle := range ancestor.Uncles() { - env.family.Add(uncle.Hash()) - } - env.family.Add(ancestor.Hash()) - env.ancestors.Add(ancestor.Hash()) + signer: types.MakeSigner(w.chainConfig, header.Number, header.Time), + state: state, + coinbase: coinbase, + header: header, } // Keep track of transactions which return errors so they can be removed env.tcount = 0 return env, nil } -// commitUncle adds the given block to uncle block set, returns error if failed to add. -func (w *worker) commitUncle(env *environment, uncle *types.Header) error { - if w.isTTDReached(env.header) { - return errors.New("ignore uncle for beacon block") - } - hash := uncle.Hash() - if _, exist := env.uncles[hash]; exist { - return errors.New("uncle not unique") - } - if env.header.ParentHash == uncle.ParentHash { - return errors.New("uncle is sibling") - } - if !env.ancestors.Contains(uncle.ParentHash) { - return errors.New("uncle's parent unknown") - } - if env.family.Contains(hash) { - return errors.New("uncle already included") - } - env.uncles[hash] = uncle - return nil -} - // updateSnapshot updates pending snapshot block, receipts and state. func (w *worker) updateSnapshot(env *environment) { w.snapshotMu.Lock() @@ -852,7 +730,7 @@ func (w *worker) updateSnapshot(env *environment) { w.snapshotBlock = types.NewBlock( env.header, env.txs, - env.unclelist(), + nil, env.receipts, trie.NewStackTrie(nil), ) @@ -861,6 +739,46 @@ func (w *worker) updateSnapshot(env *environment) { } func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) { + if tx.Type() == types.BlobTxType { + return w.commitBlobTransaction(env, tx) + } + + receipt, err := w.applyTransaction(env, tx) + if err != nil { + return nil, err + } + env.txs = append(env.txs, tx) + env.receipts = append(env.receipts, receipt) + return receipt.Logs, nil +} + +func (w *worker) commitBlobTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) { + sc := tx.BlobTxSidecar() + if sc == nil { + panic("blob transaction without blobs in miner") + } + // Checking against blob gas limit: It's kind of ugly to perform this check here, but there + // isn't really a better place right now. The blob gas limit is checked at block validation time + // and not during execution. This means core.ApplyTransaction will not return an error if the + // tx has too many blobs. So we have to explicitly check it here. + if (env.blobs+len(sc.Blobs))*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { + return nil, errors.New("max data blobs reached") + } + + receipt, err := w.applyTransaction(env, tx) + if err != nil { + return nil, err + } + env.txs = append(env.txs, tx.WithoutBlobTxSidecar()) + env.receipts = append(env.receipts, receipt) + env.sidecars = append(env.sidecars, sc) + env.blobs += len(sc.Blobs) + *env.header.BlobGasUsed += receipt.BlobGasUsed + return receipt.Logs, nil +} + +// applyTransaction runs the transaction. If execution fails, state and gas pool are reverted. +func (w *worker) applyTransaction(env *environment, tx *types.Transaction) (*types.Receipt, error) { var ( snap = env.state.Snapshot() gp = env.gasPool.Gas() @@ -869,15 +787,11 @@ func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]* if err != nil { env.state.RevertToSnapshot(snap) env.gasPool.SetGas(gp) - return nil, err } - env.txs = append(env.txs, tx) - env.receipts = append(env.receipts, receipt) - - return receipt.Logs, nil + return receipt, err } -func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *atomic.Int32) error { +func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAndNonce, interrupt *atomic.Int32) error { gasLimit := env.header.GasLimit if env.gasPool == nil { env.gasPool = new(core.GasPool).AddGas(gasLimit) @@ -897,10 +811,17 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP break } // Retrieve the next transaction and abort if all done. - tx := txs.Peek() - if tx == nil { + ltx := txs.Peek() + if ltx == nil { break } + tx := ltx.Resolve() + if tx == nil { + log.Warn("Ignoring evicted transaction") + txs.Pop() + continue + } + // Error may be ignored here. The error has already been checked // during transaction acceptance is the transaction pool. from, _ := types.Sender(env.signer, tx) @@ -908,11 +829,11 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP // Check whether the tx is replay protected. If we're not in the EIP155 hf // phase, start ignoring the sender until we do. if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { - log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) - + log.Trace("Ignoring replay protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) txs.Pop() continue } + // Start executing the transaction env.state.SetTxContext(tx.Hash(), env.tcount) @@ -962,7 +883,7 @@ type generateParams struct { coinbase common.Address // The fee recipient address for including transaction random common.Hash // The randomness generated by beacon chain, empty before the merge withdrawals types.Withdrawals // List of withdrawals to include in block. - noUncle bool // Flag whether the uncle block inclusion is allowed + beaconRoot *common.Hash // The beacon root (cancun field). noTxs bool // Flag whether an empty block without any transaction is expected } @@ -1009,12 +930,25 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { } // Set baseFee and GasLimit if we are on an EIP-1559 chain if w.chainConfig.IsLondon(header.Number) { - header.BaseFee = misc.CalcBaseFee(w.chainConfig, parent) + header.BaseFee = eip1559.CalcBaseFee(w.chainConfig, parent) if !w.chainConfig.IsLondon(parent.Number) { parentGasLimit := parent.GasLimit * w.chainConfig.ElasticityMultiplier() header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil) } } + // Apply EIP-4844, EIP-4788. + if w.chainConfig.IsCancun(header.Number, header.Time) { + var excessBlobGas uint64 + if w.chainConfig.IsCancun(parent.Number, parent.Time) { + excessBlobGas = eip4844.CalcExcessBlobGas(*parent.ExcessBlobGas, *parent.BlobGasUsed) + } else { + // For the first post-fork block, both parent.data_gas_used and parent.excess_data_gas are evaluated as 0 + excessBlobGas = eip4844.CalcExcessBlobGas(0, 0) + } + header.BlobGasUsed = new(uint64) + header.ExcessBlobGas = &excessBlobGas + header.ParentBeaconRoot = genParams.beaconRoot + } // Run the consensus preparation with the default or customized consensus engine. if err := w.engine.Prepare(w.chain, header); err != nil { log.Error("Failed to prepare header for sealing", "err", err) @@ -1028,23 +962,10 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { log.Error("Failed to create sealing context", "err", err) return nil, err } - // Accumulate the uncles for the sealing work only if it's allowed. - if !genParams.noUncle { - commitUncles := func(blocks map[common.Hash]*types.Block) { - for hash, uncle := range blocks { - if len(env.uncles) == 2 { - break - } - if err := w.commitUncle(env, uncle.Header()); err != nil { - log.Trace("Possible uncle rejected", "hash", hash, "reason", err) - } else { - log.Debug("Committing new uncle to block", "hash", hash) - } - } - } - // Prefer to locally generated uncle - commitUncles(w.localUncles) - commitUncles(w.remoteUncles) + if header.ParentBeaconRoot != nil { + context := core.NewEVMBlockContext(header, w.chain, nil) + vmenv := vm.NewEVM(context, vm.TxContext{}, env.state, w.chainConfig, vm.Config{}) + core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv, env.state) } return env, nil } @@ -1053,24 +974,26 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { // into the given sealing block. The transaction selection and ordering strategy can // be customized with the plugin in the future. func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) error { - // Split the pending transactions into locals and remotes - // Fill the block with all available pending transactions. pending := w.eth.TxPool().Pending(true) - localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending + + // Split the pending transactions into locals and remotes. + localTxs, remoteTxs := make(map[common.Address][]*txpool.LazyTransaction), pending for _, account := range w.eth.TxPool().Locals() { if txs := remoteTxs[account]; len(txs) > 0 { delete(remoteTxs, account) localTxs[account] = txs } } + + // Fill the block with all available pending transactions. if len(localTxs) > 0 { - txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee) + txs := newTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee) if err := w.commitTransactions(env, txs, interrupt); err != nil { return err } } if len(remoteTxs) > 0 { - txs := types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee) + txs := newTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee) if err := w.commitTransactions(env, txs, interrupt); err != nil { return err } @@ -1079,10 +1002,10 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err } // generateWork generates a sealing block based on the given parameters. -func (w *worker) generateWork(params *generateParams) (*types.Block, *big.Int, error) { +func (w *worker) generateWork(params *generateParams) *newPayloadResult { work, err := w.prepareWork(params) if err != nil { - return nil, nil, err + return &newPayloadResult{err: err} } defer work.discard() @@ -1098,16 +1021,24 @@ func (w *worker) generateWork(params *generateParams) (*types.Block, *big.Int, e log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(w.newpayloadTimeout)) } } - block, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, work.unclelist(), work.receipts, params.withdrawals) + block, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, nil, work.receipts, params.withdrawals) if err != nil { - return nil, nil, err + return &newPayloadResult{err: err} + } + return &newPayloadResult{ + block: block, + fees: totalFees(block, work.receipts), + sidecars: work.sidecars, } - return block, totalFees(block, work.receipts), nil } // commitWork generates several new sealing tasks based on the parent block // and submit them to the sealer. -func (w *worker) commitWork(interrupt *atomic.Int32, noempty bool, timestamp int64) { +func (w *worker) commitWork(interrupt *atomic.Int32, timestamp int64) { + // Abort committing if node is still syncing + if w.syncing.Load() { + return + } start := time.Now() // Set the coinbase if the worker is running or it's required @@ -1126,11 +1057,6 @@ func (w *worker) commitWork(interrupt *atomic.Int32, noempty bool, timestamp int if err != nil { return } - // Create an empty block based on temporary copied state for - // sealing in advance without waiting block execution finished. - if !noempty && !w.noempty.Load() { - w.commit(work.copy(), nil, false, start) - } // Fill pending transactions from the txpool into the block. err = w.fillTransactions(interrupt, work) switch { @@ -1184,7 +1110,7 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti // https://github.com/ethereum/go-ethereum/issues/24299 env := env.copy() // Withdrawals are set to nil here, because this is only called in PoW. - block, err := w.engine.FinalizeAndAssemble(w.chain, env.header, env.state, env.txs, env.unclelist(), env.receipts, nil) + block, err := w.engine.FinalizeAndAssemble(w.chain, env.header, env.state, env.txs, nil, env.receipts, nil) if err != nil { return err } @@ -1192,13 +1118,10 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti if !w.isTTDReached(block.Header()) { select { case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now()}: - w.unconfirmed.Shift(block.NumberU64() - 1) - fees := totalFees(block, env.receipts) feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether)) log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), - "uncles", len(env.uncles), "txs", env.tcount, - "gas", block.GasUsed(), "fees", feesInEther, + "txs", env.tcount, "gas", block.GasUsed(), "fees", feesInEther, "elapsed", common.PrettyDuration(time.Since(start))) case <-w.exitCh: @@ -1215,29 +1138,16 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti // getSealingBlock generates the sealing block based on the given parameters. // The generation result will be passed back via the given channel no matter // the generation itself succeeds or not. -func (w *worker) getSealingBlock(parent common.Hash, timestamp uint64, coinbase common.Address, random common.Hash, withdrawals types.Withdrawals, noTxs bool) (*types.Block, *big.Int, error) { +func (w *worker) getSealingBlock(params *generateParams) *newPayloadResult { req := &getWorkReq{ - params: &generateParams{ - timestamp: timestamp, - forceTime: true, - parentHash: parent, - coinbase: coinbase, - random: random, - withdrawals: withdrawals, - noUncle: true, - noTxs: noTxs, - }, + params: params, result: make(chan *newPayloadResult, 1), } select { case w.getWorkCh <- req: - result := <-req.result - if result.err != nil { - return nil, nil, result.err - } - return result.block, result.fees, nil + return <-req.result case <-w.exitCh: - return nil, nil, errors.New("miner closed") + return &newPayloadResult{err: errors.New("miner closed")} } } @@ -1258,14 +1168,6 @@ func copyReceipts(receipts []*types.Receipt) []*types.Receipt { return result } -// postSideBlock fires a side chain event, only use it for testing. -func (w *worker) postSideBlock(event core.ChainSideEvent) { - select { - case w.chainSideCh <- event: - case <-w.exitCh: - } -} - // totalFees computes total consumed miner fees in Wei. Block transactions and receipts have to have the same order. func totalFees(block *types.Block, receipts []*types.Receipt) *big.Int { feesWei := new(big.Int) diff --git a/miner/worker_test.go b/miner/worker_test.go index 683d019d2..9c4694c0e 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -17,8 +17,6 @@ package miner import ( - "crypto/rand" - "errors" "math/big" "sync/atomic" "testing" @@ -31,8 +29,8 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -52,7 +50,7 @@ const ( var ( // Test chain configurations - testTxPoolConfig txpool.Config + testTxPoolConfig legacypool.Config ethashChainConfig *params.ChainConfig cliqueChainConfig *params.ChainConfig @@ -75,7 +73,7 @@ var ( ) func init() { - testTxPoolConfig = txpool.DefaultConfig + testTxPoolConfig = legacypool.DefaultConfig testTxPoolConfig.Journal = "" ethashChainConfig = new(params.ChainConfig) *ethashChainConfig = *params.TestChainConfig @@ -109,11 +107,10 @@ func init() { // testWorkerBackend implements worker.Backend interfaces and wraps all information needed during the testing. type testWorkerBackend struct { - db ethdb.Database - txPool *txpool.TxPool - chain *core.BlockChain - genesis *core.Genesis - uncleBlock *types.Block + db ethdb.Database + txPool *txpool.TxPool + chain *core.BlockChain + genesis *core.Genesis } func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, n int) *testWorkerBackend { @@ -136,58 +133,19 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine if err != nil { t.Fatalf("core.NewBlockChain failed: %v", err) } - txpool := txpool.NewTxPool(testTxPoolConfig, chainConfig, chain) + pool := legacypool.New(testTxPoolConfig, chain) + txpool, _ := txpool.New(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), chain, []txpool.SubPool{pool}) - // Generate a small n-block chain and an uncle block for it - var uncle *types.Block - if n > 0 { - genDb, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, n, func(i int, gen *core.BlockGen) { - gen.SetCoinbase(testBankAddress) - }) - if _, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("failed to insert origin chain: %v", err) - } - parent := chain.GetBlockByHash(chain.CurrentBlock().ParentHash) - blocks, _ = core.GenerateChain(chainConfig, parent, engine, genDb, 1, func(i int, gen *core.BlockGen) { - gen.SetCoinbase(testUserAddress) - }) - uncle = blocks[0] - } else { - _, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, 1, func(i int, gen *core.BlockGen) { - gen.SetCoinbase(testUserAddress) - }) - uncle = blocks[0] - } return &testWorkerBackend{ - db: db, - chain: chain, - txPool: txpool, - genesis: gspec, - uncleBlock: uncle, + db: db, + chain: chain, + txPool: txpool, + genesis: gspec, } } func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain } func (b *testWorkerBackend) TxPool() *txpool.TxPool { return b.txPool } -func (b *testWorkerBackend) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) { - return nil, errors.New("not supported") -} - -func (b *testWorkerBackend) newRandomUncle() *types.Block { - var parent *types.Block - cur := b.chain.CurrentBlock() - if cur.Number.Uint64() == 0 { - parent = b.chain.Genesis() - } else { - parent = b.chain.GetBlockByHash(b.chain.CurrentBlock().ParentHash) - } - blocks, _ := core.GenerateChain(b.chain.Config(), parent, b.chain.Engine(), b.db, 1, func(i int, gen *core.BlockGen) { - var addr = make([]byte, common.AddressLength) - rand.Read(addr) - gen.SetCoinbase(common.BytesToAddress(addr)) - }) - return blocks[0] -} func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { var tx *types.Transaction @@ -202,31 +160,21 @@ func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) { backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) - backend.txPool.AddLocals(pendingTxs) + backend.txPool.Add(pendingTxs, true, false) w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false) w.setEtherbase(testBankAddress) return w, backend } -func TestGenerateBlockAndImportClique(t *testing.T) { - testGenerateBlockAndImport(t, true) -} - -func testGenerateBlockAndImport(t *testing.T, isClique bool) { +func TestGenerateAndImportBlock(t *testing.T) { var ( - engine consensus.Engine - chainConfig params.ChainConfig - db = rawdb.NewMemoryDatabase() + db = rawdb.NewMemoryDatabase() + config = *params.AllCliqueProtocolChanges ) - if isClique { - chainConfig = *params.AllCliqueProtocolChanges - chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} - engine = clique.New(chainConfig.Clique, db) - } else { - chainConfig = *params.AllEthashProtocolChanges - engine = ethash.NewFaker() - } - w, b := newTestWorker(t, &chainConfig, engine, db, 0) + config.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} + engine := clique.New(config.Clique, db) + + w, b := newTestWorker(t, &config, engine, db, 0) defer w.close() // This test chain imports the mined blocks. @@ -246,10 +194,8 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool) { w.start() for i := 0; i < 5; i++ { - b.txPool.AddLocal(b.newRandomTx(true)) - b.txPool.AddLocal(b.newRandomTx(false)) - w.postSideBlock(core.ChainSideEvent{Block: b.newRandomUncle()}) - w.postSideBlock(core.ChainSideEvent{Block: b.newRandomUncle()}) + b.txPool.Add([]*types.Transaction{b.newRandomTx(true)}, true, false) + b.txPool.Add([]*types.Transaction{b.newRandomTx(false)}, true, false) select { case ev := <-sub.Chan(): @@ -276,17 +222,10 @@ func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consens w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) defer w.close() - var ( - taskIndex int - taskCh = make(chan struct{}, 2) - ) - checkEqual := func(t *testing.T, task *task, index int) { - // The first empty work without any txs included - receiptLen, balance := 0, big.NewInt(0) - if index == 1 { - // The second full work with 1 tx included - receiptLen, balance = 1, big.NewInt(1000) - } + taskCh := make(chan struct{}, 2) + checkEqual := func(t *testing.T, task *task) { + // The work should contain 1 tx + receiptLen, balance := 1, big.NewInt(1000) if len(task.receipts) != receiptLen { t.Fatalf("receipt number mismatch: have %d, want %d", len(task.receipts), receiptLen) } @@ -296,8 +235,7 @@ func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consens } w.newTaskHook = func(task *task) { if task.block.NumberU64() == 1 { - checkEqual(t, task, taskIndex) - taskIndex += 1 + checkEqual(t, task) taskCh <- struct{}{} } } @@ -306,122 +244,9 @@ func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consens time.Sleep(100 * time.Millisecond) } w.start() // Start mining! - for i := 0; i < 2; i += 1 { - select { - case <-taskCh: - case <-time.NewTimer(3 * time.Second).C: - t.Error("new task timeout") - } - } -} - -func TestStreamUncleBlock(t *testing.T) { - ethash := ethash.NewFaker() - defer ethash.Close() - - w, b := newTestWorker(t, ethashChainConfig, ethash, rawdb.NewMemoryDatabase(), 1) - defer w.close() - - var taskCh = make(chan struct{}, 3) - - taskIndex := 0 - w.newTaskHook = func(task *task) { - if task.block.NumberU64() == 2 { - // The first task is an empty task, the second - // one has 1 pending tx, the third one has 1 tx - // and 1 uncle. - if taskIndex == 2 { - have := task.block.Header().UncleHash - want := types.CalcUncleHash([]*types.Header{b.uncleBlock.Header()}) - if have != want { - t.Errorf("uncle hash mismatch: have %s, want %s", have.Hex(), want.Hex()) - } - } - taskCh <- struct{}{} - taskIndex += 1 - } - } - w.skipSealHook = func(task *task) bool { - return true - } - w.fullTaskHook = func() { - time.Sleep(100 * time.Millisecond) - } - w.start() - - for i := 0; i < 2; i += 1 { - select { - case <-taskCh: - case <-time.NewTimer(time.Second).C: - t.Error("new task timeout") - } - } - - w.postSideBlock(core.ChainSideEvent{Block: b.uncleBlock}) - select { case <-taskCh: - case <-time.NewTimer(time.Second).C: - t.Error("new task timeout") - } -} - -func TestRegenerateMiningBlockEthash(t *testing.T) { - testRegenerateMiningBlock(t, ethashChainConfig, ethash.NewFaker()) -} - -func TestRegenerateMiningBlockClique(t *testing.T) { - testRegenerateMiningBlock(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase())) -} - -func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { - defer engine.Close() - - w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) - defer w.close() - - var taskCh = make(chan struct{}, 3) - - taskIndex := 0 - w.newTaskHook = func(task *task) { - if task.block.NumberU64() == 1 { - // The first task is an empty task, the second - // one has 1 pending tx, the third one has 2 txs - if taskIndex == 2 { - receiptLen, balance := 2, big.NewInt(2000) - if len(task.receipts) != receiptLen { - t.Errorf("receipt number mismatch: have %d, want %d", len(task.receipts), receiptLen) - } - if task.state.GetBalance(testUserAddress).Cmp(balance) != 0 { - t.Errorf("account balance mismatch: have %d, want %d", task.state.GetBalance(testUserAddress), balance) - } - } - taskCh <- struct{}{} - taskIndex += 1 - } - } - w.skipSealHook = func(task *task) bool { - return true - } - w.fullTaskHook = func() { - time.Sleep(100 * time.Millisecond) - } - - w.start() - // Ignore the first two works - for i := 0; i < 2; i += 1 { - select { - case <-taskCh: - case <-time.NewTimer(time.Second).C: - t.Error("new task timeout") - } - } - b.txPool.AddLocals(newTxs) - time.Sleep(time.Second) - - select { - case <-taskCh: - case <-time.NewTimer(time.Second).C: + case <-time.NewTimer(3 * time.Second).C: t.Error("new task timeout") } } @@ -542,7 +367,6 @@ func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine co defer w.close() w.setExtra([]byte{0x01, 0x02}) - w.postSideBlock(core.ChainSideEvent{Block: b.uncleBlock}) w.skipSealHook = func(task *task) bool { return true @@ -557,9 +381,6 @@ func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine co // is even smaller than parent block's. It's OK. t.Logf("Invalid timestamp, want %d, get %d", timestamp, block.Time()) } - if len(block.Uncles()) != 0 { - t.Error("Unexpected uncle block") - } _, isClique := engine.(*clique.Clique) if !isClique { if len(block.Extra()) != 2 { @@ -631,32 +452,50 @@ func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine co // This API should work even when the automatic sealing is not enabled for _, c := range cases { - block, _, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random, nil, false) + r := w.getSealingBlock(&generateParams{ + parentHash: c.parent, + timestamp: timestamp, + coinbase: c.coinbase, + random: c.random, + withdrawals: nil, + beaconRoot: nil, + noTxs: false, + forceTime: true, + }) if c.expectErr { - if err == nil { + if r.err == nil { t.Error("Expect error but get nil") } } else { - if err != nil { - t.Errorf("Unexpected error %v", err) + if r.err != nil { + t.Errorf("Unexpected error %v", r.err) } - assertBlock(block, c.expectNumber, c.coinbase, c.random) + assertBlock(r.block, c.expectNumber, c.coinbase, c.random) } } // This API should work even when the automatic sealing is enabled w.start() for _, c := range cases { - block, _, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random, nil, false) + r := w.getSealingBlock(&generateParams{ + parentHash: c.parent, + timestamp: timestamp, + coinbase: c.coinbase, + random: c.random, + withdrawals: nil, + beaconRoot: nil, + noTxs: false, + forceTime: true, + }) if c.expectErr { - if err == nil { + if r.err == nil { t.Error("Expect error but get nil") } } else { - if err != nil { - t.Errorf("Unexpected error %v", err) + if r.err != nil { + t.Errorf("Unexpected error %v", r.err) } - assertBlock(block, c.expectNumber, c.coinbase, c.random) + assertBlock(r.block, c.expectNumber, c.coinbase, c.random) } } } diff --git a/node/api.go b/node/api.go index 15892a270..f81f394be 100644 --- a/node/api.go +++ b/node/api.go @@ -176,6 +176,10 @@ func (api *adminAPI) StartHTTP(host *string, port *int, cors *string, apis *stri CorsAllowedOrigins: api.node.config.HTTPCors, Vhosts: api.node.config.HTTPVirtualHosts, Modules: api.node.config.HTTPModules, + rpcEndpointConfig: rpcEndpointConfig{ + batchItemLimit: api.node.config.BatchRequestLimit, + batchResponseSizeLimit: api.node.config.BatchResponseMaxSize, + }, } if cors != nil { config.CorsAllowedOrigins = nil @@ -250,6 +254,10 @@ func (api *adminAPI) StartWS(host *string, port *int, allowedOrigins *string, ap Modules: api.node.config.WSModules, Origins: api.node.config.WSOrigins, // ExposeAll: api.node.config.WSExposeAll, + rpcEndpointConfig: rpcEndpointConfig{ + batchItemLimit: api.node.config.BatchRequestLimit, + batchResponseSizeLimit: api.node.config.BatchResponseMaxSize, + }, } if apis != nil { config.Modules = nil diff --git a/node/config.go b/node/config.go index 1765811e8..949db887e 100644 --- a/node/config.go +++ b/node/config.go @@ -19,6 +19,7 @@ package node import ( "crypto/ecdsa" "fmt" + "net" "os" "path/filepath" "runtime" @@ -197,6 +198,12 @@ type Config struct { // AllowUnprotectedTxs allows non EIP-155 protected transactions to be send over RPC. AllowUnprotectedTxs bool `toml:",omitempty"` + // BatchRequestLimit is the maximum number of requests in a batch. + BatchRequestLimit int `toml:",omitempty"` + + // BatchResponseMaxSize is the maximum number of bytes returned from a batched rpc call. + BatchResponseMaxSize int `toml:",omitempty"` + // JWTSecret is the path to the hex-encoded jwt secret. JWTSecret string `toml:",omitempty"` @@ -257,7 +264,7 @@ func (c *Config) HTTPEndpoint() string { if c.HTTPHost == "" { return "" } - return fmt.Sprintf("%s:%d", c.HTTPHost, c.HTTPPort) + return net.JoinHostPort(c.HTTPHost, fmt.Sprintf("%d", c.HTTPPort)) } // DefaultHTTPEndpoint returns the HTTP endpoint used by default. @@ -272,7 +279,7 @@ func (c *Config) WSEndpoint() string { if c.WSHost == "" { return "" } - return fmt.Sprintf("%s:%d", c.WSHost, c.WSPort) + return net.JoinHostPort(c.WSHost, fmt.Sprintf("%d", c.WSPort)) } // DefaultWSEndpoint returns the websocket endpoint used by default. diff --git a/node/defaults.go b/node/defaults.go index fcfbc934b..42d9d4cde 100644 --- a/node/defaults.go +++ b/node/defaults.go @@ -36,6 +36,13 @@ const ( DefaultAuthPort = 8551 // Default port for the authenticated apis ) +const ( + // Engine API batch limits: these are not configurable by users, and should cover the + // needs of all CLs. + engineAPIBatchItemLimit = 2000 + engineAPIBatchResponseSizeLimit = 250 * 1000 * 1000 +) + var ( DefaultAuthCors = []string{"localhost"} // Default cors domain for the authenticated apis DefaultAuthVhosts = []string{"localhost"} // Default virtual hosts for the authenticated apis @@ -46,17 +53,19 @@ var ( // DefaultConfig contains reasonable default settings. var DefaultConfig = Config{ - DataDir: DefaultDataDir(), - HTTPPort: DefaultHTTPPort, - AuthAddr: DefaultAuthHost, - AuthPort: DefaultAuthPort, - AuthVirtualHosts: DefaultAuthVhosts, - HTTPModules: []string{"net", "web3"}, - HTTPVirtualHosts: []string{"localhost"}, - HTTPTimeouts: rpc.DefaultHTTPTimeouts, - WSPort: DefaultWSPort, - WSModules: []string{"net", "web3"}, - GraphQLVirtualHosts: []string{"localhost"}, + DataDir: DefaultDataDir(), + HTTPPort: DefaultHTTPPort, + AuthAddr: DefaultAuthHost, + AuthPort: DefaultAuthPort, + AuthVirtualHosts: DefaultAuthVhosts, + HTTPModules: []string{"net", "web3"}, + HTTPVirtualHosts: []string{"localhost"}, + HTTPTimeouts: rpc.DefaultHTTPTimeouts, + WSPort: DefaultWSPort, + WSModules: []string{"net", "web3"}, + BatchRequestLimit: 1000, + BatchResponseMaxSize: 25 * 1000 * 1000, + GraphQLVirtualHosts: []string{"localhost"}, P2P: p2p.Config{ ListenAddr: ":30303", MaxPeers: 50, diff --git a/node/node.go b/node/node.go index e8494ac3b..41c9971fe 100644 --- a/node/node.go +++ b/node/node.go @@ -101,10 +101,11 @@ func New(conf *Config) (*Node, error) { if strings.HasSuffix(conf.Name, ".ipc") { return nil, errors.New(`Config.Name cannot end in ".ipc"`) } - + server := rpc.NewServer() + server.SetBatchLimits(conf.BatchRequestLimit, conf.BatchResponseMaxSize) node := &Node{ config: conf, - inprocHandler: rpc.NewServer(), + inprocHandler: server, eventmux: new(event.TypeMux), log: conf.Logger, stop: make(chan struct{}), @@ -403,6 +404,11 @@ func (n *Node) startRPC() error { openAPIs, allAPIs = n.getAPIs() ) + rpcConfig := rpcEndpointConfig{ + batchItemLimit: n.config.BatchRequestLimit, + batchResponseSizeLimit: n.config.BatchResponseMaxSize, + } + initHttp := func(server *httpServer, port int) error { if err := server.setListenAddr(n.config.HTTPHost, port); err != nil { return err @@ -412,6 +418,7 @@ func (n *Node) startRPC() error { Vhosts: n.config.HTTPVirtualHosts, Modules: n.config.HTTPModules, prefix: n.config.HTTPPathPrefix, + rpcEndpointConfig: rpcConfig, }); err != nil { return err } @@ -425,9 +432,10 @@ func (n *Node) startRPC() error { return err } if err := server.enableWS(openAPIs, wsConfig{ - Modules: n.config.WSModules, - Origins: n.config.WSOrigins, - prefix: n.config.WSPathPrefix, + Modules: n.config.WSModules, + Origins: n.config.WSOrigins, + prefix: n.config.WSPathPrefix, + rpcEndpointConfig: rpcConfig, }); err != nil { return err } @@ -441,26 +449,32 @@ func (n *Node) startRPC() error { if err := server.setListenAddr(n.config.AuthAddr, port); err != nil { return err } + sharedConfig := rpcEndpointConfig{ + jwtSecret: secret, + batchItemLimit: engineAPIBatchItemLimit, + batchResponseSizeLimit: engineAPIBatchResponseSizeLimit, + } if err := server.enableRPC(allAPIs, httpConfig{ CorsAllowedOrigins: DefaultAuthCors, Vhosts: n.config.AuthVirtualHosts, Modules: DefaultAuthModules, prefix: DefaultAuthPrefix, - jwtSecret: secret, + rpcEndpointConfig: sharedConfig, }); err != nil { return err } servers = append(servers, server) + // Enable auth via WS server = n.wsServerForPort(port, true) if err := server.setListenAddr(n.config.AuthAddr, port); err != nil { return err } if err := server.enableWS(allAPIs, wsConfig{ - Modules: DefaultAuthModules, - Origins: DefaultAuthOrigins, - prefix: DefaultAuthPrefix, - jwtSecret: secret, + Modules: DefaultAuthModules, + Origins: DefaultAuthOrigins, + prefix: DefaultAuthPrefix, + rpcEndpointConfig: sharedConfig, }); err != nil { return err } @@ -605,8 +619,8 @@ func (n *Node) RegisterHandler(name, path string, handler http.Handler) { } // Attach creates an RPC client attached to an in-process API handler. -func (n *Node) Attach() (*rpc.Client, error) { - return rpc.DialInProc(n.inprocHandler), nil +func (n *Node) Attach() *rpc.Client { + return rpc.DialInProc(n.inprocHandler) } // RPCHandler returns the in-process RPC request handler. diff --git a/node/rpcstack.go b/node/rpcstack.go index 97d591642..b33c23805 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -41,15 +41,21 @@ type httpConfig struct { CorsAllowedOrigins []string Vhosts []string prefix string // path prefix on which to mount http handler - jwtSecret []byte // optional JWT secret + rpcEndpointConfig } // wsConfig is the JSON-RPC/Websocket configuration type wsConfig struct { - Origins []string - Modules []string - prefix string // path prefix on which to mount ws handler - jwtSecret []byte // optional JWT secret + Origins []string + Modules []string + prefix string // path prefix on which to mount ws handler + rpcEndpointConfig +} + +type rpcEndpointConfig struct { + jwtSecret []byte // optional JWT secret + batchItemLimit int + batchResponseSizeLimit int } type rpcHandler struct { @@ -106,7 +112,7 @@ func (h *httpServer) setListenAddr(host string, port int) error { } h.host, h.port = host, port - h.endpoint = fmt.Sprintf("%s:%d", host, port) + h.endpoint = net.JoinHostPort(host, fmt.Sprintf("%d", port)) return nil } @@ -297,6 +303,7 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error { // Create RPC server and handler. srv := rpc.NewServer() + srv.SetBatchLimits(config.batchItemLimit, config.batchResponseSizeLimit) if err := RegisterApis(apis, config.Modules, srv); err != nil { return err } @@ -328,6 +335,7 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error { } // Create RPC server and handler. srv := rpc.NewServer() + srv.SetBatchLimits(config.batchItemLimit, config.batchResponseSizeLimit) if err := RegisterApis(apis, config.Modules, srv); err != nil { return err } diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go index 0790dddec..e41cc51ad 100644 --- a/node/rpcstack_test.go +++ b/node/rpcstack_test.go @@ -339,8 +339,10 @@ func TestJWT(t *testing.T) { ss, _ := jwt.NewWithClaims(method, testClaim(input)).SignedString(secret) return ss } - srv := createAndStartServer(t, &httpConfig{jwtSecret: []byte("secret")}, - true, &wsConfig{Origins: []string{"*"}, jwtSecret: []byte("secret")}, nil) + cfg := rpcEndpointConfig{jwtSecret: []byte("secret")} + httpcfg := &httpConfig{rpcEndpointConfig: cfg} + wscfg := &wsConfig{Origins: []string{"*"}, rpcEndpointConfig: cfg} + srv := createAndStartServer(t, httpcfg, true, wscfg, nil) wsUrl := fmt.Sprintf("ws://%v", srv.listenAddr()) htUrl := fmt.Sprintf("http://%v", srv.listenAddr()) diff --git a/p2p/dial.go b/p2p/dial.go index 134e6e2ea..5e4ab1d50 100644 --- a/p2p/dial.go +++ b/p2p/dial.go @@ -521,13 +521,14 @@ func (t *dialTask) resolve(d *dialScheduler) bool { // dial performs the actual connection attempt. func (t *dialTask) dial(d *dialScheduler, dest *enode.Node) error { + dialMeter.Mark(1) fd, err := d.dialer.Dial(d.ctx, t.dest) if err != nil { d.log.Trace("Dial error", "id", t.dest.ID(), "addr", nodeAddr(t.dest), "conn", t.flags, "err", cleanupDialErr(err)) + dialConnectionError.Mark(1) return &dialError{err} } - mfd := newMeteredConn(fd, false, &net.TCPAddr{IP: dest.IP(), Port: dest.TCP()}) - return d.setupFunc(mfd, t.flags, dest) + return d.setupFunc(newMeteredConn(fd), t.flags, dest) } func (t *dialTask) String() string { diff --git a/p2p/discover/common.go b/p2p/discover/common.go index c36e8dcc3..c9f0477de 100644 --- a/p2p/discover/common.go +++ b/p2p/discover/common.go @@ -19,6 +19,7 @@ package discover import ( "crypto/ecdsa" "net" + "time" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/log" @@ -35,29 +36,39 @@ type UDPConn interface { LocalAddr() net.Addr } -type V5Config struct { - ProtocolID *[6]byte -} - // Config holds settings for the discovery listener. type Config struct { // These settings are required and configure the UDP listener: PrivateKey *ecdsa.PrivateKey - // These settings are optional: + // All remaining settings are optional. + + // Packet handling configuration: NetRestrict *netutil.Netlist // list of allowed IP networks - Bootnodes []*enode.Node // list of bootstrap nodes Unhandled chan<- ReadPacket // unhandled packets are sent on this channel - Log log.Logger // if set, log messages go here - // V5ProtocolID configures the discv5 protocol identifier. + // Node table configuration: + Bootnodes []*enode.Node // list of bootstrap nodes + PingInterval time.Duration // speed of node liveness check + RefreshInterval time.Duration // used in bucket refresh + + // The options below are useful in very specific cases, like in unit tests. V5ProtocolID *[6]byte - + Log log.Logger // if set, log messages go here ValidSchemes enr.IdentityScheme // allowed identity schemes Clock mclock.Clock } func (cfg Config) withDefaults() Config { + // Node table configuration: + if cfg.PingInterval == 0 { + cfg.PingInterval = 10 * time.Second + } + if cfg.RefreshInterval == 0 { + cfg.RefreshInterval = 30 * time.Minute + } + + // Debug/test settings: if cfg.Log == nil { cfg.Log = log.Root() } diff --git a/p2p/discover/metrics.go b/p2p/discover/metrics.go index bf1a2fa2b..da8e9cb81 100644 --- a/p2p/discover/metrics.go +++ b/p2p/discover/metrics.go @@ -17,6 +17,7 @@ package discover import ( + "fmt" "net" "github.com/ethereum/go-ethereum/metrics" @@ -32,10 +33,17 @@ const ( ) var ( + bucketsCounter []metrics.Counter ingressTrafficMeter = metrics.NewRegisteredMeter(ingressMeterName, nil) egressTrafficMeter = metrics.NewRegisteredMeter(egressMeterName, nil) ) +func init() { + for i := 0; i < nBuckets; i++ { + bucketsCounter = append(bucketsCounter, metrics.NewRegisteredCounter(fmt.Sprintf("%s/bucket/%d/count", moduleName, i), nil)) + } +} + // meteredConn is a wrapper around a net.UDPConn that meters both the // inbound and outbound network traffic. type meteredUdpConn struct { diff --git a/p2p/discover/ntp.go b/p2p/discover/ntp.go index 48ceffe95..3f9157808 100644 --- a/p2p/discover/ntp.go +++ b/p2p/discover/ntp.go @@ -22,10 +22,10 @@ package discover import ( "fmt" "net" - "sort" "time" "github.com/ethereum/go-ethereum/log" + "golang.org/x/exp/slices" ) const ( @@ -33,14 +33,6 @@ const ( ntpChecks = 3 // Number of measurements to do against the NTP server ) -// durationSlice attaches the methods of sort.Interface to []time.Duration, -// sorting in increasing order. -type durationSlice []time.Duration - -func (s durationSlice) Len() int { return len(s) } -func (s durationSlice) Less(i, j int) bool { return s[i] < s[j] } -func (s durationSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // checkClockDrift queries an NTP server for clock drifts and warns the user if // one large enough is detected. func checkClockDrift() { @@ -109,7 +101,7 @@ func sntpDrift(measurements int) (time.Duration, error) { drifts = append(drifts, sent.Sub(t)+elapsed/2) } // Calculate average drift (drop two extremities to avoid outliers) - sort.Sort(durationSlice(drifts)) + slices.Sort(drifts) drift := time.Duration(0) for i := 1; i < len(drifts)-1; i++ { diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 41d5ac6e3..f476d2079 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/netutil" ) @@ -53,12 +54,10 @@ const ( bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 tableIPLimit, tableSubnet = 10, 24 - refreshInterval = 30 * time.Minute - revalidateInterval = 10 * time.Second - copyNodesInterval = 30 * time.Second - seedMinTableTime = 5 * time.Minute - seedCount = 30 - seedMaxAge = 5 * 24 * time.Hour + copyNodesInterval = 30 * time.Second + seedMinTableTime = 5 * time.Minute + seedCount = 30 + seedMaxAge = 5 * 24 * time.Hour ) // Table is the 'node table', a Kademlia-like index of neighbor nodes. The table keeps @@ -71,15 +70,19 @@ type Table struct { rand *mrand.Rand // source of randomness, periodically reseeded ips netutil.DistinctNetSet - log log.Logger - db *enode.DB // database of known nodes - net transport + db *enode.DB // database of known nodes + net transport + cfg Config + log log.Logger + + // loop channels refreshReq chan chan struct{} initDone chan struct{} closeReq chan struct{} closed chan struct{} - nodeAddedHook func(*node) // for testing + nodeAddedHook func(*bucket, *node) + nodeRemovedHook func(*bucket, *node) } // transport is implemented by the UDP transports. @@ -97,26 +100,30 @@ type bucket struct { entries []*node // live entries, sorted by time of last contact replacements []*node // recently seen nodes to be used if revalidation fails ips netutil.DistinctNetSet + index int } -func newTable(t transport, db *enode.DB, bootnodes []*enode.Node, log log.Logger) (*Table, error) { +func newTable(t transport, db *enode.DB, cfg Config) (*Table, error) { + cfg = cfg.withDefaults() tab := &Table{ net: t, db: db, + cfg: cfg, + log: cfg.Log, refreshReq: make(chan chan struct{}), initDone: make(chan struct{}), closeReq: make(chan struct{}), closed: make(chan struct{}), rand: mrand.New(mrand.NewSource(0)), ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, - log: log, } - if err := tab.setFallbackNodes(bootnodes); err != nil { + if err := tab.setFallbackNodes(cfg.Bootnodes); err != nil { return nil, err } for i := range tab.buckets { tab.buckets[i] = &bucket{ - ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit}, + index: i, + ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit}, } } tab.seedRand() @@ -125,6 +132,40 @@ func newTable(t transport, db *enode.DB, bootnodes []*enode.Node, log log.Logger return tab, nil } +func newMeteredTable(t transport, db *enode.DB, cfg Config) (*Table, error) { + tab, err := newTable(t, db, cfg) + if err != nil { + return nil, err + } + if metrics.Enabled { + tab.nodeAddedHook = func(b *bucket, n *node) { + bucketsCounter[b.index].Inc(1) + } + tab.nodeRemovedHook = func(b *bucket, n *node) { + bucketsCounter[b.index].Dec(1) + } + } + return tab, nil +} + +// Nodes returns all nodes contained in the table. +func (tab *Table) Nodes() []*enode.Node { + if !tab.isInitDone() { + return nil + } + + tab.mutex.Lock() + defer tab.mutex.Unlock() + + var nodes []*enode.Node + for _, b := range &tab.buckets { + for _, n := range b.entries { + nodes = append(nodes, unwrapNode(n)) + } + } + return nodes +} + func (tab *Table) self() *enode.Node { return tab.net.Self() } @@ -138,29 +179,6 @@ func (tab *Table) seedRand() { tab.mutex.Unlock() } -// ReadRandomNodes fills the given slice with random nodes from the table. The results -// are guaranteed to be unique for a single invocation, no node will appear twice. -func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) { - if !tab.isInitDone() { - return 0 - } - tab.mutex.Lock() - defer tab.mutex.Unlock() - - var nodes []*enode.Node - for _, b := range &tab.buckets { - for _, n := range b.entries { - nodes = append(nodes, unwrapNode(n)) - } - } - // Shuffle. - for i := 0; i < len(nodes); i++ { - j := tab.rand.Intn(len(nodes)) - nodes[i], nodes[j] = nodes[j], nodes[i] - } - return copy(buf, nodes) -} - // getNode returns the node with the given ID or nil if it isn't in the table. func (tab *Table) getNode(id enode.ID) *enode.Node { tab.mutex.Lock() @@ -185,12 +203,18 @@ func (tab *Table) close() { // are used to connect to the network if the table is empty and there // are no known nodes in the database. func (tab *Table) setFallbackNodes(nodes []*enode.Node) error { + nursery := make([]*node, 0, len(nodes)) for _, n := range nodes { if err := n.ValidateComplete(); err != nil { return fmt.Errorf("bad bootstrap node %q: %v", n, err) } + if tab.cfg.NetRestrict != nil && !tab.cfg.NetRestrict.Contains(n.IP()) { + tab.log.Error("Bootstrap node filtered by netrestrict", "id", n.ID(), "ip", n.IP()) + continue + } + nursery = append(nursery, wrapNode(n)) } - tab.nursery = wrapNodes(nodes) + tab.nursery = nursery return nil } @@ -218,7 +242,7 @@ func (tab *Table) refresh() <-chan struct{} { func (tab *Table) loop() { var ( revalidate = time.NewTimer(tab.nextRevalidateTime()) - refresh = time.NewTicker(refreshInterval) + refresh = time.NewTimer(tab.nextRefreshTime()) copyNodes = time.NewTicker(copyNodesInterval) refreshDone = make(chan struct{}) // where doRefresh reports completion revalidateDone chan struct{} // where doRevalidate reports completion @@ -251,6 +275,7 @@ loop: close(ch) } waiting, refreshDone = nil, nil + refresh.Reset(tab.nextRefreshTime()) case <-revalidate.C: revalidateDone = make(chan struct{}) go tab.doRevalidate(revalidateDone) @@ -373,7 +398,15 @@ func (tab *Table) nextRevalidateTime() time.Duration { tab.mutex.Lock() defer tab.mutex.Unlock() - return time.Duration(tab.rand.Int63n(int64(revalidateInterval))) + return time.Duration(tab.rand.Int63n(int64(tab.cfg.PingInterval))) +} + +func (tab *Table) nextRefreshTime() time.Duration { + tab.mutex.Lock() + defer tab.mutex.Unlock() + + half := tab.cfg.RefreshInterval / 2 + return half + time.Duration(tab.rand.Int63n(int64(half))) } // copyLiveNodes adds nodes from the table to the database if they have been in the table @@ -481,12 +514,14 @@ func (tab *Table) addSeenNode(n *node) { // Can't add: IP limit reached. return } + // Add to end of bucket: b.entries = append(b.entries, n) b.replacements = deleteNode(b.replacements, n) n.addedAt = time.Now() + if tab.nodeAddedHook != nil { - tab.nodeAddedHook(n) + tab.nodeAddedHook(b, n) } } @@ -523,12 +558,14 @@ func (tab *Table) addVerifiedNode(n *node) { // Can't add: IP limit reached. return } + // Add to front of bucket. b.entries, _ = pushNode(b.entries, n, bucketSize) b.replacements = deleteNode(b.replacements, n) n.addedAt = time.Now() + if tab.nodeAddedHook != nil { - tab.nodeAddedHook(n) + tab.nodeAddedHook(b, n) } } @@ -627,8 +664,16 @@ func (tab *Table) bumpInBucket(b *bucket, n *node) bool { } func (tab *Table) deleteInBucket(b *bucket, n *node) { + // Check if the node is actually in the bucket so the removed hook + // isn't called multiple times for the same node. + if !contains(b.entries, n.ID()) { + return + } b.entries = deleteNode(b.entries, n) tab.removeIP(b, n.IP()) + if tab.nodeRemovedHook != nil { + tab.nodeRemovedHook(b, n) + } } func contains(ns []*node, id enode.ID) bool { diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go index 1ef63fe01..2781dd422 100644 --- a/p2p/discover/table_test.go +++ b/p2p/discover/table_test.go @@ -247,41 +247,6 @@ func TestTable_findnodeByID(t *testing.T) { } } -func TestTable_ReadRandomNodesGetAll(t *testing.T) { - cfg := &quick.Config{ - MaxCount: 200, - Rand: rand.New(rand.NewSource(time.Now().Unix())), - Values: func(args []reflect.Value, rand *rand.Rand) { - args[0] = reflect.ValueOf(make([]*enode.Node, rand.Intn(1000))) - }, - } - test := func(buf []*enode.Node) bool { - transport := newPingRecorder() - tab, db := newTestTable(transport) - defer db.Close() - defer tab.close() - <-tab.initDone - - for i := 0; i < len(buf); i++ { - ld := cfg.Rand.Intn(len(tab.buckets)) - fillTable(tab, []*node{nodeAtDistance(tab.self().ID(), ld, intIP(ld))}) - } - gotN := tab.ReadRandomNodes(buf) - if gotN != tab.len() { - t.Errorf("wrong number of nodes, got %d, want %d", gotN, tab.len()) - return false - } - if hasDuplicates(wrapNodes(buf[:gotN])) { - t.Errorf("result contains duplicates") - return false - } - return true - } - if err := quick.Check(test, cfg); err != nil { - t.Error(err) - } -} - type closeTest struct { Self enode.ID Target enode.ID diff --git a/p2p/discover/table_util_test.go b/p2p/discover/table_util_test.go index 527afbb77..8f3813bdc 100644 --- a/p2p/discover/table_util_test.go +++ b/p2p/discover/table_util_test.go @@ -24,13 +24,12 @@ import ( "fmt" "math/rand" "net" - "sort" "sync" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" + "golang.org/x/exp/slices" ) var nullNode *enode.Node @@ -42,8 +41,9 @@ func init() { } func newTestTable(t transport) (*Table, *enode.DB) { + cfg := Config{} db, _ := enode.OpenDB("") - tab, _ := newTable(t, db, nil, log.Root()) + tab, _ := newTable(t, db, cfg) go tab.loop() return tab, db } @@ -217,14 +217,14 @@ func nodeEqual(n1 *enode.Node, n2 *enode.Node) bool { } func sortByID(nodes []*enode.Node) { - sort.Slice(nodes, func(i, j int) bool { - return string(nodes[i].ID().Bytes()) < string(nodes[j].ID().Bytes()) + slices.SortFunc(nodes, func(a, b *enode.Node) int { + return bytes.Compare(a.ID().Bytes(), b.ID().Bytes()) }) } func sortedByDistanceTo(distbase enode.ID, slice []*node) bool { - return sort.SliceIsSorted(slice, func(i, j int) bool { - return enode.DistCmp(distbase, slice[i].ID(), slice[j].ID()) < 0 + return slices.IsSortedFunc(slice, func(a, b *node) int { + return enode.DistCmp(distbase, a.ID(), b.ID()) }) } diff --git a/p2p/discover/v4_lookup_test.go b/p2p/discover/v4_lookup_test.go index a00de9ca1..1f9ad69d0 100644 --- a/p2p/discover/v4_lookup_test.go +++ b/p2p/discover/v4_lookup_test.go @@ -20,13 +20,13 @@ import ( "crypto/ecdsa" "fmt" "net" - "sort" "testing" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/p2p/discover/v4wire" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" + "golang.org/x/exp/slices" ) func TestUDPv4_Lookup(t *testing.T) { @@ -302,8 +302,8 @@ func (tn *preminedTestnet) closest(n int) (nodes []*enode.Node) { nodes = append(nodes, tn.node(d, i)) } } - sort.Slice(nodes, func(i, j int) bool { - return enode.DistCmp(tn.target.id(), nodes[i].ID(), nodes[j].ID()) < 0 + slices.SortFunc(nodes, func(a, b *enode.Node) int { + return enode.DistCmp(tn.target.id(), a.ID(), b.ID()) }) return nodes[:n] } diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index 21fee9c79..988f16b01 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -142,7 +142,7 @@ func ListenV4(c UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv4, error) { log: cfg.Log, } - tab, err := newTable(t, ln.Database(), cfg.Bootnodes, t.log) + tab, err := newMeteredTable(t, ln.Database(), cfg) if err != nil { return nil, err } @@ -643,13 +643,13 @@ type packetHandlerV4 struct { func (t *UDPv4) verifyPing(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { req := h.Packet.(*v4wire.Ping) + if v4wire.Expired(req.Expiration) { + return errExpired + } senderKey, err := v4wire.DecodePubkey(crypto.S256(), fromKey) if err != nil { return err } - if v4wire.Expired(req.Expiration) { - return errExpired - } h.senderKey = senderKey return nil } diff --git a/p2p/discover/v4_udp_test.go b/p2p/discover/v4_udp_test.go index 21f0d7517..5add9cefa 100644 --- a/p2p/discover/v4_udp_test.go +++ b/p2p/discover/v4_udp_test.go @@ -394,7 +394,7 @@ func TestUDPv4_pingMatchIP(t *testing.T) { func TestUDPv4_successfulPing(t *testing.T) { test := newUDPTest(t) added := make(chan *node, 1) - test.table.nodeAddedHook = func(n *node) { added <- n } + test.table.nodeAddedHook = func(b *bucket, n *node) { added <- n } defer test.close() // The remote side sends a ping packet to initiate the exchange. diff --git a/p2p/discover/v4wire/v4wire.go b/p2p/discover/v4wire/v4wire.go index 3935068cd..9c59359fb 100644 --- a/p2p/discover/v4wire/v4wire.go +++ b/p2p/discover/v4wire/v4wire.go @@ -238,6 +238,8 @@ func Decode(input []byte) (Packet, Pubkey, []byte, error) { default: return nil, fromKey, hash, fmt.Errorf("unknown type: %d", ptype) } + // Here we use NewStream to allow for additional data after the first + // RLP object (forward-compatibility). s := rlp.NewStream(bytes.NewReader(sigdata[1:]), 0) err = s.Decode(req) return req, fromKey, hash, err diff --git a/p2p/discover/v5_udp.go b/p2p/discover/v5_udp.go index 6a86e72a5..6ba7a9061 100644 --- a/p2p/discover/v5_udp.go +++ b/p2p/discover/v5_udp.go @@ -174,7 +174,7 @@ func newUDPv5(conn UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv5, error) { cancelCloseCtx: cancelCloseCtx, } t.talk = newTalkSystem(t) - tab, err := newTable(t, t.db, cfg.Bootnodes, cfg.Log) + tab, err := newMeteredTable(t, t.db, cfg) if err != nil { return nil, err } diff --git a/p2p/discover/v5_udp_test.go b/p2p/discover/v5_udp_test.go index 392a95b61..880b71a99 100644 --- a/p2p/discover/v5_udp_test.go +++ b/p2p/discover/v5_udp_test.go @@ -24,7 +24,6 @@ import ( "math/rand" "net" "reflect" - "sort" "testing" "time" @@ -35,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" ) // Real sockets, real crypto: this test checks end-to-end connectivity for UDPv5. @@ -61,8 +61,8 @@ func TestUDPv5_lookupE2E(t *testing.T) { for i := range nodes { expectedResult[i] = nodes[i].Self() } - sort.Slice(expectedResult, func(i, j int) bool { - return enode.DistCmp(target.ID(), expectedResult[i].ID(), expectedResult[j].ID()) < 0 + slices.SortFunc(expectedResult, func(a, b *enode.Node) int { + return enode.DistCmp(target.ID(), a.ID(), b.ID()) }) // Do the lookup. diff --git a/p2p/dnsdisc/tree.go b/p2p/dnsdisc/tree.go index a3f426e42..06b7681f1 100644 --- a/p2p/dnsdisc/tree.go +++ b/p2p/dnsdisc/tree.go @@ -23,7 +23,6 @@ import ( "encoding/base64" "fmt" "io" - "sort" "strings" "github.com/ethereum/go-ethereum/crypto" @@ -31,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/rlp" "golang.org/x/crypto/sha3" + "golang.org/x/exp/slices" ) // Tree is a merkle tree of node records. @@ -214,8 +214,8 @@ func (t *Tree) build(entries []entry) entry { } func sortByID(nodes []*enode.Node) []*enode.Node { - sort.Slice(nodes, func(i, j int) bool { - return bytes.Compare(nodes[i].ID().Bytes(), nodes[j].ID().Bytes()) < 0 + slices.SortFunc(nodes, func(a, b *enode.Node) int { + return bytes.Compare(a.ID().Bytes(), b.ID().Bytes()) }) return nodes } diff --git a/p2p/message.go b/p2p/message.go index 24f21456d..3ab56ee35 100644 --- a/p2p/message.go +++ b/p2p/message.go @@ -156,7 +156,7 @@ func MsgPipe() (*MsgPipeRW, *MsgPipeRW) { var ( c1, c2 = make(chan Msg), make(chan Msg) closing = make(chan struct{}) - closed = new(int32) + closed = new(atomic.Bool) rw1 = &MsgPipeRW{c1, c2, closing, closed} rw2 = &MsgPipeRW{c2, c1, closing, closed} ) @@ -172,13 +172,13 @@ type MsgPipeRW struct { w chan<- Msg r <-chan Msg closing chan struct{} - closed *int32 + closed *atomic.Bool } // WriteMsg sends a message on the pipe. // It blocks until the receiver has consumed the message payload. func (p *MsgPipeRW) WriteMsg(msg Msg) error { - if atomic.LoadInt32(p.closed) == 0 { + if !p.closed.Load() { consumed := make(chan struct{}, 1) msg.Payload = &eofSignal{msg.Payload, msg.Size, consumed} select { @@ -199,7 +199,7 @@ func (p *MsgPipeRW) WriteMsg(msg Msg) error { // ReadMsg returns a message sent on the other end of the pipe. func (p *MsgPipeRW) ReadMsg() (Msg, error) { - if atomic.LoadInt32(p.closed) == 0 { + if !p.closed.Load() { select { case msg := <-p.r: return msg, nil @@ -213,9 +213,8 @@ func (p *MsgPipeRW) ReadMsg() (Msg, error) { // of the pipe. They will return ErrPipeClosed. Close also // interrupts any reads from a message payload. func (p *MsgPipeRW) Close() error { - if atomic.AddInt32(p.closed, 1) != 1 { + if p.closed.Swap(true) { // someone else is already closing - atomic.StoreInt32(p.closed, 1) // avoid overflow return nil } close(p.closing) diff --git a/p2p/metrics.go b/p2p/metrics.go index 1bb505cdf..a6e36b91a 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -19,30 +19,86 @@ package p2p import ( + "errors" "net" "github.com/ethereum/go-ethereum/metrics" ) const ( + // HandleHistName is the prefix of the per-packet serving time histograms. + HandleHistName = "p2p/handle" + // ingressMeterName is the prefix of the per-packet inbound metrics. ingressMeterName = "p2p/ingress" // egressMeterName is the prefix of the per-packet outbound metrics. egressMeterName = "p2p/egress" - - // HandleHistName is the prefix of the per-packet serving time histograms. - HandleHistName = "p2p/handle" ) var ( - ingressConnectMeter = metrics.NewRegisteredMeter("p2p/serves", nil) - ingressTrafficMeter = metrics.NewRegisteredMeter(ingressMeterName, nil) - egressConnectMeter = metrics.NewRegisteredMeter("p2p/dials", nil) - egressTrafficMeter = metrics.NewRegisteredMeter(egressMeterName, nil) - activePeerGauge = metrics.NewRegisteredGauge("p2p/peers", nil) + activePeerGauge metrics.Gauge = metrics.NilGauge{} + + ingressTrafficMeter = metrics.NewRegisteredMeter("p2p/ingress", nil) + egressTrafficMeter = metrics.NewRegisteredMeter("p2p/egress", nil) + + // general ingress/egress connection meters + serveMeter metrics.Meter = metrics.NilMeter{} + serveSuccessMeter metrics.Meter = metrics.NilMeter{} + dialMeter metrics.Meter = metrics.NilMeter{} + dialSuccessMeter metrics.Meter = metrics.NilMeter{} + dialConnectionError metrics.Meter = metrics.NilMeter{} + + // handshake error meters + dialTooManyPeers = metrics.NewRegisteredMeter("p2p/dials/error/saturated", nil) + dialAlreadyConnected = metrics.NewRegisteredMeter("p2p/dials/error/known", nil) + dialSelf = metrics.NewRegisteredMeter("p2p/dials/error/self", nil) + dialUselessPeer = metrics.NewRegisteredMeter("p2p/dials/error/useless", nil) + dialUnexpectedIdentity = metrics.NewRegisteredMeter("p2p/dials/error/id/unexpected", nil) + dialEncHandshakeError = metrics.NewRegisteredMeter("p2p/dials/error/rlpx/enc", nil) + dialProtoHandshakeError = metrics.NewRegisteredMeter("p2p/dials/error/rlpx/proto", nil) ) +func init() { + if !metrics.Enabled { + return + } + + activePeerGauge = metrics.NewRegisteredGauge("p2p/peers", nil) + serveMeter = metrics.NewRegisteredMeter("p2p/serves", nil) + serveSuccessMeter = metrics.NewRegisteredMeter("p2p/serves/success", nil) + dialMeter = metrics.NewRegisteredMeter("p2p/dials", nil) + dialSuccessMeter = metrics.NewRegisteredMeter("p2p/dials/success", nil) + dialConnectionError = metrics.NewRegisteredMeter("p2p/dials/error/connection", nil) +} + +// markDialError matches errors that occur while setting up a dial connection +// to the corresponding meter. +func markDialError(err error) { + if !metrics.Enabled { + return + } + if err2 := errors.Unwrap(err); err2 != nil { + err = err2 + } + switch err { + case DiscTooManyPeers: + dialTooManyPeers.Mark(1) + case DiscAlreadyConnected: + dialAlreadyConnected.Mark(1) + case DiscSelf: + dialSelf.Mark(1) + case DiscUselessPeer: + dialUselessPeer.Mark(1) + case DiscUnexpectedIdentity: + dialUnexpectedIdentity.Mark(1) + case errEncHandshakeError: + dialEncHandshakeError.Mark(1) + case errProtoHandshakeError: + dialProtoHandshakeError.Mark(1) + } +} + // meteredConn is a wrapper around a net.Conn that meters both the // inbound and outbound network traffic. type meteredConn struct { @@ -52,18 +108,10 @@ type meteredConn struct { // newMeteredConn creates a new metered connection, bumps the ingress or egress // connection meter and also increases the metered peer count. If the metrics // system is disabled, function returns the original connection. -func newMeteredConn(conn net.Conn, ingress bool, addr *net.TCPAddr) net.Conn { - // Short circuit if metrics are disabled +func newMeteredConn(conn net.Conn) net.Conn { if !metrics.Enabled { return conn } - // Bump the connection counters and wrap the connection - if ingress { - ingressConnectMeter.Mark(1) - } else { - egressConnectMeter.Mark(1) - } - activePeerGauge.Inc(1) return &meteredConn{Conn: conn} } @@ -82,13 +130,3 @@ func (c *meteredConn) Write(b []byte) (n int, err error) { egressTrafficMeter.Mark(int64(n)) return n, err } - -// Close delegates a close operation to the underlying connection, unregisters -// the peer from the traffic registries and emits close event. -func (c *meteredConn) Close() error { - err := c.Conn.Close() - if err == nil { - activePeerGauge.Dec(1) - } - return err -} diff --git a/p2p/nat/nat.go b/p2p/nat/nat.go index ad4c36582..61b692298 100644 --- a/p2p/nat/nat.go +++ b/p2p/nat/nat.go @@ -38,7 +38,7 @@ type Interface interface { // protocol is "UDP" or "TCP". Some implementations allow setting // a display name for the mapping. The mapping may be removed by // the gateway when its lifetime ends. - AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) error + AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) (uint16, error) DeleteMapping(protocol string, extport, intport int) error // ExternalIP should return the external (Internet-facing) @@ -91,20 +91,23 @@ func Parse(spec string) (Interface, error) { } const ( - mapTimeout = 10 * time.Minute + DefaultMapTimeout = 10 * time.Minute ) // Map adds a port mapping on m and keeps it alive until c is closed. // This function is typically invoked in its own goroutine. +// +// Note that Map does not handle the situation where the NAT interface assigns a different +// external port than the requested one. func Map(m Interface, c <-chan struct{}, protocol string, extport, intport int, name string) { log := log.New("proto", protocol, "extport", extport, "intport", intport, "interface", m) - refresh := time.NewTimer(mapTimeout) + refresh := time.NewTimer(DefaultMapTimeout) defer func() { refresh.Stop() log.Debug("Deleting port mapping") m.DeleteMapping(protocol, extport, intport) }() - if err := m.AddMapping(protocol, extport, intport, name, mapTimeout); err != nil { + if _, err := m.AddMapping(protocol, extport, intport, name, DefaultMapTimeout); err != nil { log.Debug("Couldn't add port mapping", "err", err) } else { log.Info("Mapped network port") @@ -117,10 +120,10 @@ func Map(m Interface, c <-chan struct{}, protocol string, extport, intport int, } case <-refresh.C: log.Trace("Refreshing port mapping") - if err := m.AddMapping(protocol, extport, intport, name, mapTimeout); err != nil { + if _, err := m.AddMapping(protocol, extport, intport, name, DefaultMapTimeout); err != nil { log.Debug("Couldn't add port mapping", "err", err) } - refresh.Reset(mapTimeout) + refresh.Reset(DefaultMapTimeout) } } } @@ -135,8 +138,8 @@ func (n ExtIP) String() string { return fmt.Sprintf("ExtIP(%v)", ne // These do nothing. -func (ExtIP) AddMapping(string, int, int, string, time.Duration) error { return nil } -func (ExtIP) DeleteMapping(string, int, int) error { return nil } +func (ExtIP) AddMapping(string, int, int, string, time.Duration) (uint16, error) { return 0, nil } +func (ExtIP) DeleteMapping(string, int, int) error { return nil } // Any returns a port mapper that tries to discover any supported // mechanism on the local network. @@ -193,9 +196,9 @@ func startautodisc(what string, doit func() Interface) Interface { return &autodisc{what: what, doit: doit} } -func (n *autodisc) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) error { +func (n *autodisc) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) (uint16, error) { if err := n.wait(); err != nil { - return err + return 0, err } return n.found.AddMapping(protocol, extport, intport, name, lifetime) } diff --git a/p2p/nat/natpmp.go b/p2p/nat/natpmp.go index 40f2aff44..97601c99d 100644 --- a/p2p/nat/natpmp.go +++ b/p2p/nat/natpmp.go @@ -44,28 +44,21 @@ func (n *pmp) ExternalIP() (net.IP, error) { return response.ExternalIPAddress[:], nil } -func (n *pmp) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) error { +func (n *pmp) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) (uint16, error) { if lifetime <= 0 { - return fmt.Errorf("lifetime must not be <= 0") + return 0, fmt.Errorf("lifetime must not be <= 0") } // Note order of port arguments is switched between our // AddMapping and the client's AddPortMapping. res, err := n.c.AddPortMapping(strings.ToLower(protocol), intport, extport, int(lifetime/time.Second)) if err != nil { - return err + return 0, err } - // NAT-PMP maps an alternative available port number if the requested - // port is already mapped to another address and returns success. In this - // case, we return an error because there is no way to return the new port - // to the caller. - if uint16(extport) != res.MappedExternalPort { - // Destroy the mapping in NAT device. - n.c.AddPortMapping(strings.ToLower(protocol), intport, 0, 0) - return fmt.Errorf("port %d already mapped to another address (%s)", extport, protocol) - } - - return nil + // NAT-PMP maps an alternative available port number if the requested port + // is already mapped to another address and returns success. Handling of + // alternate port numbers is done by the caller. + return res.MappedExternalPort, nil } func (n *pmp) DeleteMapping(protocol string, extport, intport int) (err error) { diff --git a/p2p/nat/natupnp.go b/p2p/nat/natupnp.go index a8de00e97..c90c4f3de 100644 --- a/p2p/nat/natupnp.go +++ b/p2p/nat/natupnp.go @@ -19,6 +19,8 @@ package nat import ( "errors" "fmt" + "math" + "math/rand" "net" "strings" "sync" @@ -40,6 +42,7 @@ type upnp struct { client upnpClient mu sync.Mutex lastReqTime time.Time + rand *rand.Rand } type upnpClient interface { @@ -76,18 +79,50 @@ func (n *upnp) ExternalIP() (addr net.IP, err error) { return ip, nil } -func (n *upnp) AddMapping(protocol string, extport, intport int, desc string, lifetime time.Duration) error { +func (n *upnp) AddMapping(protocol string, extport, intport int, desc string, lifetime time.Duration) (uint16, error) { ip, err := n.internalAddress() if err != nil { - return nil // TODO: Shouldn't we return the error? + return 0, nil // TODO: Shouldn't we return the error? } protocol = strings.ToUpper(protocol) lifetimeS := uint32(lifetime / time.Second) n.DeleteMapping(protocol, extport, intport) - return n.withRateLimit(func() error { + err = n.withRateLimit(func() error { return n.client.AddPortMapping("", uint16(extport), protocol, uint16(intport), ip.String(), true, desc, lifetimeS) }) + if err == nil { + return uint16(extport), nil + } + + return uint16(extport), n.withRateLimit(func() error { + p, err := n.addAnyPortMapping(protocol, extport, intport, ip, desc, lifetimeS) + if err == nil { + extport = int(p) + } + return err + }) +} + +func (n *upnp) addAnyPortMapping(protocol string, extport, intport int, ip net.IP, desc string, lifetimeS uint32) (uint16, error) { + if client, ok := n.client.(*internetgateway2.WANIPConnection2); ok { + return client.AddAnyPortMapping("", uint16(extport), protocol, uint16(intport), ip.String(), true, desc, lifetimeS) + } + // It will retry with a random port number if the client does + // not support AddAnyPortMapping. + extport = n.randomPort() + err := n.client.AddPortMapping("", uint16(extport), protocol, uint16(intport), ip.String(), true, desc, lifetimeS) + if err != nil { + return 0, err + } + return uint16(extport), nil +} + +func (n *upnp) randomPort() int { + if n.rand == nil { + n.rand = rand.New(rand.NewSource(time.Now().UnixNano())) + } + return n.rand.Intn(math.MaxUint16-10000) + 10000 } func (n *upnp) internalAddress() (net.IP, error) { diff --git a/p2p/peer.go b/p2p/peer.go index a4b22fb6d..65a7903f5 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -21,7 +21,6 @@ import ( "fmt" "io" "net" - "sort" "sync" "time" @@ -32,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/exp/slices" ) var ( @@ -112,6 +112,7 @@ type Peer struct { wg sync.WaitGroup protoErr chan error closed chan struct{} + pingRecv chan struct{} disc chan DiscReason // events receives message send / receive events if set @@ -233,6 +234,7 @@ func newPeer(log log.Logger, conn *conn, protocols []Protocol) *Peer { disc: make(chan DiscReason), protoErr: make(chan error, len(protomap)+1), // protocols + pingLoop closed: make(chan struct{}), + pingRecv: make(chan struct{}, 16), log: log.New("id", conn.node.ID(), "conn", conn.flags), } return p @@ -293,9 +295,11 @@ loop: } func (p *Peer) pingLoop() { - ping := time.NewTimer(pingInterval) defer p.wg.Done() + + ping := time.NewTimer(pingInterval) defer ping.Stop() + for { select { case <-ping.C: @@ -304,6 +308,10 @@ func (p *Peer) pingLoop() { return } ping.Reset(pingInterval) + + case <-p.pingRecv: + SendItems(p.rw, pongMsg) + case <-p.closed: return } @@ -330,7 +338,10 @@ func (p *Peer) handle(msg Msg) error { switch { case msg.Code == pingMsg: msg.Discard() - go SendItems(p.rw, pongMsg) + select { + case p.pingRecv <- struct{}{}: + case <-p.closed: + } case msg.Code == discMsg: // This is the last message. We don't need to discard or // check errors because, the connection will be closed after it. @@ -375,7 +386,7 @@ func countMatchingProtocols(protocols []Protocol, caps []Cap) int { // matchProtocols creates structures for matching named subprotocols. func matchProtocols(protocols []Protocol, caps []Cap, rw MsgReadWriter) map[string]*protoRW { - sort.Sort(capsByNameAndVersion(caps)) + slices.SortFunc(caps, Cap.Cmp) offset := baseProtocolLength result := make(map[string]*protoRW) diff --git a/p2p/protocol.go b/p2p/protocol.go index fa23a087c..9bb6785a2 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -18,6 +18,7 @@ package p2p import ( "fmt" + "strings" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" @@ -77,10 +78,16 @@ func (cap Cap) String() string { return fmt.Sprintf("%s/%d", cap.Name, cap.Version) } -type capsByNameAndVersion []Cap - -func (cs capsByNameAndVersion) Len() int { return len(cs) } -func (cs capsByNameAndVersion) Swap(i, j int) { cs[i], cs[j] = cs[j], cs[i] } -func (cs capsByNameAndVersion) Less(i, j int) bool { - return cs[i].Name < cs[j].Name || (cs[i].Name == cs[j].Name && cs[i].Version < cs[j].Version) +// Cmp defines the canonical sorting order of capabilities. +func (cap Cap) Cmp(other Cap) int { + if cap.Name == other.Name { + if cap.Version < other.Version { + return -1 + } + if cap.Version > other.Version { + return 1 + } + return 0 + } + return strings.Compare(cap.Name, other.Name) } diff --git a/p2p/server.go b/p2p/server.go index f7bf948b6..8f42765a8 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -24,7 +24,6 @@ import ( "errors" "fmt" "net" - "sort" "sync" "sync/atomic" "time" @@ -39,6 +38,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" + "golang.org/x/exp/slices" ) const ( @@ -64,7 +64,11 @@ const ( frameWriteTimeout = 20 * time.Second ) -var errServerStopped = errors.New("server stopped") +var ( + errServerStopped = errors.New("server stopped") + errEncHandshakeError = errors.New("rlpx enc error") + errProtoHandshakeError = errors.New("rlpx proto error") +) // Config holds Server options. type Config struct { @@ -89,6 +93,9 @@ type Config struct { // Disabling is useful for protocol debugging (manual topology). NoDiscovery bool + // DiscoveryV4 specifies whether V4 discovery should be started. + DiscoveryV4 bool `toml:",omitempty"` + // DiscoveryV5 specifies whether the new topic-discovery based V5 discovery // protocol should be started or not. DiscoveryV5 bool `toml:",omitempty"` @@ -188,6 +195,9 @@ type Server struct { discmix *enode.FairMix dialsched *dialScheduler + // This is read by the NAT port mapping loop. + portMappingRegister chan *portMapping + // Channels into the run loop. quit chan struct{} addtrusted chan *enode.Node @@ -476,6 +486,8 @@ func (srv *Server) Start() (err error) { if err := srv.setupLocalNode(); err != nil { return err } + srv.setupPortMapping() + if srv.ListenAddr != "" { if err := srv.setupListening(); err != nil { return err @@ -498,7 +510,7 @@ func (srv *Server) setupLocalNode() error { for _, p := range srv.Protocols { srv.ourHandshake.Caps = append(srv.ourHandshake.Caps, p.cap()) } - sort.Sort(capsByNameAndVersion(srv.ourHandshake.Caps)) + slices.SortFunc(srv.ourHandshake.Caps, Cap.Cmp) // Create the local node. db, err := enode.OpenDB(srv.NodeDatabase) @@ -514,81 +526,34 @@ func (srv *Server) setupLocalNode() error { srv.localnode.Set(e) } } - switch srv.NAT.(type) { - case nil: - // No NAT interface, do nothing. - case nat.ExtIP: - // ExtIP doesn't block, set the IP right away. - ip, _ := srv.NAT.ExternalIP() - srv.localnode.SetStaticIP(ip) - default: - // Ask the router about the IP. This takes a while and blocks startup, - // do it in the background. - srv.loopWG.Add(1) - go func() { - defer srv.loopWG.Done() - if ip, err := srv.NAT.ExternalIP(); err == nil { - srv.localnode.SetStaticIP(ip) - } - }() - } return nil } func (srv *Server) setupDiscovery() error { srv.discmix = enode.NewFairMix(discmixTimeout) - // Add protocol-specific discovery sources. - added := make(map[string]bool) - for _, proto := range srv.Protocols { - if proto.DialCandidates != nil && !added[proto.Name] { - srv.discmix.AddSource(proto.DialCandidates) - added[proto.Name] = true - } - } - // Don't listen on UDP endpoint if DHT is disabled. - if srv.NoDiscovery && !srv.DiscoveryV5 { + if srv.NoDiscovery { return nil } - - listenAddr := srv.ListenAddr - - // Use an alternate listening address for UDP if - // a custom discovery address is configured. - if srv.DiscAddr != "" { - listenAddr = srv.DiscAddr - } - - addr, err := net.ResolveUDPAddr("udp", listenAddr) + conn, err := srv.setupUDPListening() if err != nil { return err } - conn, err := net.ListenUDP("udp", addr) - if err != nil { - return err - } - realaddr := conn.LocalAddr().(*net.UDPAddr) - srv.log.Debug("UDP listener up", "addr", realaddr) - if srv.NAT != nil { - if !realaddr.IP.IsLoopback() { - srv.loopWG.Add(1) - go func() { - nat.Map(srv.NAT, srv.quit, "udp", realaddr.Port, realaddr.Port, "ethereum discovery") - srv.loopWG.Done() - }() - } - } - srv.localnode.SetFallbackUDP(realaddr.Port) - // Discovery V4 - var unhandled chan discover.ReadPacket - var sconn *sharedUDPConn - if !srv.NoDiscovery { - if srv.DiscoveryV5 { - unhandled = make(chan discover.ReadPacket, 100) - sconn = &sharedUDPConn{conn, unhandled} - } + var ( + sconn discover.UDPConn = conn + unhandled chan discover.ReadPacket + ) + // If both versions of discovery are running, setup a shared + // connection, so v5 can read unhandled messages from v4. + if srv.DiscoveryV4 && srv.DiscoveryV5 { + unhandled = make(chan discover.ReadPacket, 100) + sconn = &sharedUDPConn{conn, unhandled} + } + + // Start discovery services. + if srv.DiscoveryV4 { cfg := discover.Config{ PrivateKey: srv.PrivateKey, NetRestrict: srv.NetRestrict, @@ -603,8 +568,6 @@ func (srv *Server) setupDiscovery() error { srv.ntab = ntab srv.discmix.AddSource(ntab.RandomNodes()) } - - // Discovery V5 if srv.DiscoveryV5 { cfg := discover.Config{ PrivateKey: srv.PrivateKey, @@ -612,16 +575,20 @@ func (srv *Server) setupDiscovery() error { Bootnodes: srv.BootstrapNodesV5, Log: srv.log, } - var err error - if sconn != nil { - srv.DiscV5, err = discover.ListenV5(sconn, srv.localnode, cfg) - } else { - srv.DiscV5, err = discover.ListenV5(conn, srv.localnode, cfg) - } + srv.DiscV5, err = discover.ListenV5(sconn, srv.localnode, cfg) if err != nil { return err } } + + // Add protocol-specific discovery sources. + added := make(map[string]bool) + for _, proto := range srv.Protocols { + if proto.DialCandidates != nil && !added[proto.Name] { + srv.discmix.AddSource(proto.DialCandidates) + added[proto.Name] = true + } + } return nil } @@ -676,14 +643,15 @@ func (srv *Server) setupListening() error { srv.ListenAddr = listener.Addr().String() // Update the local node record and map the TCP listening port if NAT is configured. - if tcp, ok := listener.Addr().(*net.TCPAddr); ok { + tcp, isTCP := listener.Addr().(*net.TCPAddr) + if isTCP { srv.localnode.Set(enr.TCP(tcp.Port)) - if !tcp.IP.IsLoopback() && srv.NAT != nil { - srv.loopWG.Add(1) - go func() { - nat.Map(srv.NAT, srv.quit, "tcp", tcp.Port, tcp.Port, "ethereum p2p") - srv.loopWG.Done() - }() + if !tcp.IP.IsLoopback() && !tcp.IP.IsPrivate() { + srv.portMappingRegister <- &portMapping{ + protocol: "TCP", + name: "ethereum p2p", + port: tcp.Port, + } } } @@ -692,6 +660,36 @@ func (srv *Server) setupListening() error { return nil } +func (srv *Server) setupUDPListening() (*net.UDPConn, error) { + listenAddr := srv.ListenAddr + + // Use an alternate listening address for UDP if + // a custom discovery address is configured. + if srv.DiscAddr != "" { + listenAddr = srv.DiscAddr + } + addr, err := net.ResolveUDPAddr("udp", listenAddr) + if err != nil { + return nil, err + } + conn, err := net.ListenUDP("udp", addr) + if err != nil { + return nil, err + } + laddr := conn.LocalAddr().(*net.UDPAddr) + srv.localnode.SetFallbackUDP(laddr.Port) + srv.log.Debug("UDP listener up", "addr", laddr) + if !laddr.IP.IsLoopback() && !laddr.IP.IsPrivate() { + srv.portMappingRegister <- &portMapping{ + protocol: "UDP", + name: "ethereum peer discovery", + port: laddr.Port, + } + } + + return conn, nil +} + // doPeerOp runs fn on the main loop. func (srv *Server) doPeerOp(fn peerOpFunc) { select { @@ -772,7 +770,11 @@ running: srv.dialsched.peerAdded(c) if p.Inbound() { inboundCount++ + serveSuccessMeter.Mark(1) + } else { + dialSuccessMeter.Mark(1) } + activePeerGauge.Inc(1) } c.cont <- err @@ -785,6 +787,7 @@ running: if pd.Inbound() { inboundCount-- } + activePeerGauge.Dec(1) } } @@ -894,11 +897,8 @@ func (srv *Server) listenLoop() { continue } if remoteIP != nil { - var addr *net.TCPAddr - if tcp, ok := fd.RemoteAddr().(*net.TCPAddr); ok { - addr = tcp - } - fd = newMeteredConn(fd, true, addr) + fd = newMeteredConn(fd) + serveMeter.Mark(1) srv.log.Trace("Accepted connection", "addr", fd.RemoteAddr()) } go func() { @@ -939,6 +939,9 @@ func (srv *Server) SetupConn(fd net.Conn, flags connFlag, dialDest *enode.Node) err := srv.setupConn(c, flags, dialDest) if err != nil { + if !c.is(inboundConn) { + markDialError(err) + } c.close(err) } return err @@ -957,7 +960,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro if dialDest != nil { dialPubkey := new(ecdsa.PublicKey) if err := dialDest.Load((*enode.Secp256k1)(dialPubkey)); err != nil { - err = errors.New("dial destination doesn't have a secp256k1 public key") + err = fmt.Errorf("%w: dial destination doesn't have a secp256k1 public key", errEncHandshakeError) srv.log.Trace("Setting up connection failed", "addr", c.fd.RemoteAddr(), "conn", c.flags, "err", err) return err } @@ -967,7 +970,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro remotePubkey, err := c.doEncHandshake(srv.PrivateKey) if err != nil { srv.log.Trace("Failed RLPx handshake", "addr", c.fd.RemoteAddr(), "conn", c.flags, "err", err) - return err + return fmt.Errorf("%w: %v", errEncHandshakeError, err) } if dialDest != nil { c.node = dialDest @@ -985,7 +988,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro phs, err := c.doProtoHandshake(srv.ourHandshake) if err != nil { clog.Trace("Failed p2p handshake", "err", err) - return err + return fmt.Errorf("%w: %v", errProtoHandshakeError, err) } if id := c.node.ID(); !bytes.Equal(crypto.Keccak256(phs.ID), id[:]) { clog.Trace("Wrong devp2p handshake identity", "phsid", hex.EncodeToString(phs.ID)) diff --git a/p2p/server_nat.go b/p2p/server_nat.go new file mode 100644 index 000000000..354597cc7 --- /dev/null +++ b/p2p/server_nat.go @@ -0,0 +1,187 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package p2p + +import ( + "net" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/ethereum/go-ethereum/p2p/nat" +) + +const ( + portMapDuration = 10 * time.Minute + portMapRefreshInterval = 8 * time.Minute + portMapRetryInterval = 5 * time.Minute + extipRetryInterval = 2 * time.Minute +) + +type portMapping struct { + protocol string + name string + port int + + // for use by the portMappingLoop goroutine: + extPort int // the mapped port returned by the NAT interface + nextTime mclock.AbsTime +} + +// setupPortMapping starts the port mapping loop if necessary. +// Note: this needs to be called after the LocalNode instance has been set on the server. +func (srv *Server) setupPortMapping() { + // portMappingRegister will receive up to two values: one for the TCP port if + // listening is enabled, and one more for enabling UDP port mapping if discovery is + // enabled. We make it buffered to avoid blocking setup while a mapping request is in + // progress. + srv.portMappingRegister = make(chan *portMapping, 2) + + switch srv.NAT.(type) { + case nil: + // No NAT interface configured. + srv.loopWG.Add(1) + go srv.consumePortMappingRequests() + + case nat.ExtIP: + // ExtIP doesn't block, set the IP right away. + ip, _ := srv.NAT.ExternalIP() + srv.localnode.SetStaticIP(ip) + srv.loopWG.Add(1) + go srv.consumePortMappingRequests() + + default: + srv.loopWG.Add(1) + go srv.portMappingLoop() + } +} + +func (srv *Server) consumePortMappingRequests() { + defer srv.loopWG.Done() + for { + select { + case <-srv.quit: + return + case <-srv.portMappingRegister: + } + } +} + +// portMappingLoop manages port mappings for UDP and TCP. +func (srv *Server) portMappingLoop() { + defer srv.loopWG.Done() + + newLogger := func(p string, e int, i int) log.Logger { + return log.New("proto", p, "extport", e, "intport", i, "interface", srv.NAT) + } + + var ( + mappings = make(map[string]*portMapping, 2) + refresh = mclock.NewAlarm(srv.clock) + extip = mclock.NewAlarm(srv.clock) + lastExtIP net.IP + ) + extip.Schedule(srv.clock.Now()) + defer func() { + refresh.Stop() + extip.Stop() + for _, m := range mappings { + if m.extPort != 0 { + log := newLogger(m.protocol, m.extPort, m.port) + log.Debug("Deleting port mapping") + srv.NAT.DeleteMapping(m.protocol, m.extPort, m.port) + } + } + }() + + for { + // Schedule refresh of existing mappings. + for _, m := range mappings { + refresh.Schedule(m.nextTime) + } + + select { + case <-srv.quit: + return + + case <-extip.C(): + extip.Schedule(srv.clock.Now().Add(extipRetryInterval)) + ip, err := srv.NAT.ExternalIP() + if err != nil { + log.Debug("Couldn't get external IP", "err", err, "interface", srv.NAT) + } else if !ip.Equal(lastExtIP) { + log.Debug("External IP changed", "ip", extip, "interface", srv.NAT) + } else { + return + } + // Here, we either failed to get the external IP, or it has changed. + lastExtIP = ip + srv.localnode.SetStaticIP(ip) + // Ensure port mappings are refreshed in case we have moved to a new network. + for _, m := range mappings { + m.nextTime = srv.clock.Now() + } + + case m := <-srv.portMappingRegister: + if m.protocol != "TCP" && m.protocol != "UDP" { + panic("unknown NAT protocol name: " + m.protocol) + } + mappings[m.protocol] = m + m.nextTime = srv.clock.Now() + + case <-refresh.C(): + for _, m := range mappings { + if srv.clock.Now() < m.nextTime { + continue + } + + external := m.port + if m.extPort != 0 { + external = m.extPort + } + log := newLogger(m.protocol, external, m.port) + + log.Trace("Attempting port mapping") + p, err := srv.NAT.AddMapping(m.protocol, external, m.port, m.name, portMapDuration) + if err != nil { + log.Debug("Couldn't add port mapping", "err", err) + m.extPort = 0 + m.nextTime = srv.clock.Now().Add(portMapRetryInterval) + continue + } + // It was mapped! + m.extPort = int(p) + m.nextTime = srv.clock.Now().Add(portMapRefreshInterval) + if external != m.extPort { + log = newLogger(m.protocol, m.extPort, m.port) + log.Info("NAT mapped alternative port") + } else { + log.Info("NAT mapped port") + } + + // Update port in local ENR. + switch m.protocol { + case "TCP": + srv.localnode.Set(enr.TCP(m.extPort)) + case "UDP": + srv.localnode.SetFallbackUDP(m.extPort) + } + } + } + } +} diff --git a/p2p/server_nat_test.go b/p2p/server_nat_test.go new file mode 100644 index 000000000..de935fcfc --- /dev/null +++ b/p2p/server_nat_test.go @@ -0,0 +1,102 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package p2p + +import ( + "net" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/internal/testlog" + "github.com/ethereum/go-ethereum/log" +) + +func TestServerPortMapping(t *testing.T) { + clock := new(mclock.Simulated) + mockNAT := &mockNAT{mappedPort: 30000} + srv := Server{ + Config: Config{ + PrivateKey: newkey(), + NoDial: true, + ListenAddr: ":0", + NAT: mockNAT, + Logger: testlog.Logger(t, log.LvlTrace), + clock: clock, + }, + } + err := srv.Start() + if err != nil { + t.Fatal(err) + } + defer srv.Stop() + + // Wait for the port mapping to be registered. Synchronization with the port mapping + // goroutine works like this: For each iteration, we allow other goroutines to run and + // also advance the virtual clock by 1 second. Waiting stops when the NAT interface + // has received some requests, or when the clock reaches a timeout. + deadline := clock.Now().Add(portMapRefreshInterval) + for clock.Now() < deadline && mockNAT.mapRequests.Load() < 2 { + time.Sleep(10 * time.Millisecond) + clock.Run(1 * time.Second) + } + + if mockNAT.ipRequests.Load() == 0 { + t.Fatal("external IP was never requested") + } + reqCount := mockNAT.mapRequests.Load() + if reqCount != 2 { + t.Error("wrong request count:", reqCount) + } + enr := srv.LocalNode().Node() + if enr.IP().String() != "192.0.2.0" { + t.Error("wrong IP in ENR:", enr.IP()) + } + if enr.TCP() != 30000 { + t.Error("wrong TCP port in ENR:", enr.TCP()) + } + if enr.UDP() != 30000 { + t.Error("wrong UDP port in ENR:", enr.UDP()) + } +} + +type mockNAT struct { + mappedPort uint16 + mapRequests atomic.Int32 + unmapRequests atomic.Int32 + ipRequests atomic.Int32 +} + +func (m *mockNAT) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) (uint16, error) { + m.mapRequests.Add(1) + return m.mappedPort, nil +} + +func (m *mockNAT) DeleteMapping(protocol string, extport, intport int) error { + m.unmapRequests.Add(1) + return nil +} + +func (m *mockNAT) ExternalIP() (net.IP, error) { + m.ipRequests.Add(1) + return net.ParseIP("192.0.2.0"), nil +} + +func (m *mockNAT) String() string { + return "mockNAT" +} diff --git a/p2p/server_test.go b/p2p/server_test.go index f6f5700c5..a0491e984 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -24,6 +24,8 @@ import ( "math/rand" "net" "reflect" + "strconv" + "strings" "testing" "time" @@ -224,6 +226,14 @@ func TestServerRemovePeerDisconnect(t *testing.T) { srv2.Start() defer srv2.Stop() + s := strings.Split(srv2.ListenAddr, ":") + if len(s) != 2 { + t.Fatal("invalid ListenAddr") + } + if port, err := strconv.Atoi(s[1]); err == nil { + srv2.localnode.Set(enr.TCP(uint16(port))) + } + if !syncAddPeer(srv1, srv2.Self()) { t.Fatal("peer not connected") } @@ -370,8 +380,6 @@ func TestServerSetupConn(t *testing.T) { clientkey, srvkey = newkey(), newkey() clientpub = &clientkey.PublicKey srvpub = &srvkey.PublicKey - fooErr = errors.New("foo") - readErr = errors.New("read error") ) tests := []struct { dontstart bool @@ -389,10 +397,10 @@ func TestServerSetupConn(t *testing.T) { wantCloseErr: errServerStopped, }, { - tt: &setupTransport{pubkey: clientpub, encHandshakeErr: readErr}, + tt: &setupTransport{pubkey: clientpub, encHandshakeErr: errEncHandshakeError}, flags: inboundConn, wantCalls: "doEncHandshake,close,", - wantCloseErr: readErr, + wantCloseErr: errEncHandshakeError, }, { tt: &setupTransport{pubkey: clientpub, phs: protoHandshake{ID: randomID().Bytes()}}, @@ -402,11 +410,11 @@ func TestServerSetupConn(t *testing.T) { wantCloseErr: DiscUnexpectedIdentity, }, { - tt: &setupTransport{pubkey: clientpub, protoHandshakeErr: fooErr}, + tt: &setupTransport{pubkey: clientpub, protoHandshakeErr: errProtoHandshakeError}, dialDest: enode.NewV4(clientpub, nil, 0, 0), flags: dynDialedConn, wantCalls: "doEncHandshake,doProtoHandshake,close,", - wantCloseErr: fooErr, + wantCloseErr: errProtoHandshakeError, }, { tt: &setupTransport{pubkey: srvpub, phs: protoHandshake{ID: crypto.FromECDSAPub(srvpub)[1:]}}, diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go index 36b528651..c52917fd0 100644 --- a/p2p/simulations/adapters/inproc.go +++ b/p2p/simulations/adapters/inproc.go @@ -147,7 +147,7 @@ func (s *SimAdapter) DialRPC(id enode.ID) (*rpc.Client, error) { if !ok { return nil, fmt.Errorf("unknown node: %s", id) } - return node.node.Attach() + return node.node.Attach(), nil } // GetNode returns the node with the given ID if it exists @@ -274,10 +274,7 @@ func (sn *SimNode) Start(snapshots map[string][]byte) error { } // create an in-process RPC client - client, err := sn.node.Attach() - if err != nil { - return err - } + client := sn.node.Attach() sn.lock.Lock() sn.client = client sn.lock.Unlock() diff --git a/p2p/simulations/examples/ping-pong.go b/p2p/simulations/examples/ping-pong.go index d9b51dc09..f6cf5113a 100644 --- a/p2p/simulations/examples/ping-pong.go +++ b/p2p/simulations/examples/ping-pong.go @@ -91,7 +91,7 @@ func main() { type pingPongService struct { id enode.ID log log.Logger - received int64 + received atomic.Int64 } func newPingPongService(id enode.ID) *pingPongService { @@ -125,7 +125,7 @@ func (p *pingPongService) Info() interface{} { return struct { Received int64 `json:"received"` }{ - atomic.LoadInt64(&p.received), + p.received.Load(), } } @@ -162,7 +162,7 @@ func (p *pingPongService) Run(peer *p2p.Peer, rw p2p.MsgReadWriter) error { return } log.Info("received message", "msg.code", msg.Code, "msg.payload", string(payload)) - atomic.AddInt64(&p.received, 1) + p.received.Add(1) if msg.Code == pingMsgCode { log.Info("sending pong") go p2p.Send(rw, pongMsgCode, "PONG") diff --git a/params/bootnodes.go b/params/bootnodes.go index 4ae94cfbd..a84389691 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -28,6 +28,14 @@ var MainnetBootnodes = []string{ "enode://4aeb4ab6c14b23e2c4cfdce879c04b0748a20d8e9b59e25ded2a08143e265c6c25936e74cbc8e641e3312ca288673d91f2f93f8e277de3cfa444ecdaaf982052@157.90.35.166:30303", // bootnode-hetzner-fsn } +// HoleskyBootnodes are the enode URLs of the P2P bootstrap nodes running on the +// Holesky test network. +var HoleskyBootnodes = []string{ + // EF DevOps + "enode://ac906289e4b7f12df423d654c5a962b6ebe5b3a74cc9e06292a85221f9a64a6f1cfdd6b714ed6dacef51578f92b34c60ee91e9ede9c7f8fadc4d347326d95e2b@146.190.13.128:30303", + "enode://a3435a0155a3e837c02f5e7f5662a2f1fbc25b48e4dc232016e1c51b544cb5b4510ef633ea3278c0e970fa8ad8141e2d4d0f9f95456c537ff05fdf9b31c15072@178.128.136.233:30303", +} + // SepoliaBootnodes are the enode URLs of the P2P bootstrap nodes running on the // Sepolia test network. var SepoliaBootnodes = []string{ @@ -39,13 +47,6 @@ var SepoliaBootnodes = []string{ "enode://9e9492e2e8836114cc75f5b929784f4f46c324ad01daf87d956f98b3b6c5fcba95524d6e5cf9861dc96a2c8a171ea7105bb554a197455058de185fa870970c7c@138.68.123.152:30303", // sepolia-bootnode-1-ams3 } -// RinkebyBootnodes are the enode URLs of the P2P bootstrap nodes running on the -// Rinkeby test network. -var RinkebyBootnodes = []string{ - "enode://a24ac7c5484ef4ed0c5eb2d36620ba4e4aa13b8c84684e1b4aab0cebea2ae45cb4d375b77eab56516d34bfbd3c1a833fc51296ff084b770b94fb9028c4d25ccf@52.169.42.101:30303", // IE - "enode://b6b28890b006743680c52e64e0d16db57f28124885595fa03a562be1d2bf0f3a1da297d56b13da25fb992888fd556d4c1a27b1f39d531bde7de1921c90061cc6@159.89.28.211:30303", // AKASHA -} - // GoerliBootnodes are the enode URLs of the P2P bootstrap nodes running on the // Görli test network. var GoerliBootnodes = []string{ @@ -91,12 +92,12 @@ func KnownDNSNetwork(genesis common.Hash, protocol string) string { switch genesis { case MainnetGenesisHash: net = "mainnet" - case RinkebyGenesisHash: - net = "rinkeby" case GoerliGenesisHash: net = "goerli" case SepoliaGenesisHash: net = "sepolia" + case HoleskyGenesisHash: + net = "holesky" default: return "" } diff --git a/params/config.go b/params/config.go index 08e5ea0be..5ce091fc4 100644 --- a/params/config.go +++ b/params/config.go @@ -26,8 +26,8 @@ import ( // Genesis hashes to enforce below configs on. var ( MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") + HoleskyGenesisHash = common.HexToHash("0xff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d") SepoliaGenesisHash = common.HexToHash("0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9") - RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177") GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") ) @@ -59,6 +59,31 @@ var ( ShanghaiTime: newUint64(1681338455), Ethash: new(EthashConfig), } + // HoleskyChainConfig contains the chain parameters to run a node on the Holesky test network. + HoleskyChainConfig = &ChainConfig{ + ChainID: big.NewInt(17000), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: nil, + DAOForkSupport: true, + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: nil, + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: nil, + GrayGlacierBlock: nil, + TerminalTotalDifficulty: big.NewInt(0), + TerminalTotalDifficultyPassed: true, + MergeNetsplitBlock: nil, + ShanghaiTime: newUint64(1694790240), + CancunTime: newUint64(2000000000), + Ethash: new(EthashConfig), + } // SepoliaChainConfig contains the chain parameters to run a node on the Sepolia test network. SepoliaChainConfig = &ChainConfig{ ChainID: big.NewInt(11155111), @@ -75,34 +100,14 @@ var ( MuirGlacierBlock: big.NewInt(0), BerlinBlock: big.NewInt(0), LondonBlock: big.NewInt(0), + ArrowGlacierBlock: nil, + GrayGlacierBlock: nil, TerminalTotalDifficulty: big.NewInt(17_000_000_000_000_000), TerminalTotalDifficultyPassed: true, MergeNetsplitBlock: big.NewInt(1735371), ShanghaiTime: newUint64(1677557088), Ethash: new(EthashConfig), } - // RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network. - RinkebyChainConfig = &ChainConfig{ - ChainID: big.NewInt(4), - HomesteadBlock: big.NewInt(1), - DAOForkBlock: nil, - DAOForkSupport: true, - EIP150Block: big.NewInt(2), - EIP155Block: big.NewInt(3), - EIP158Block: big.NewInt(3), - ByzantiumBlock: big.NewInt(1_035_301), - ConstantinopleBlock: big.NewInt(3_660_663), - PetersburgBlock: big.NewInt(4_321_234), - IstanbulBlock: big.NewInt(5_435_345), - MuirGlacierBlock: nil, - BerlinBlock: big.NewInt(8_290_928), - LondonBlock: big.NewInt(8_897_988), - ArrowGlacierBlock: nil, - Clique: &CliqueConfig{ - Period: 15, - Epoch: 30000, - }, - } // GoerliChainConfig contains the chain parameters to run a node on the Görli test network. GoerliChainConfig = &ChainConfig{ ChainID: big.NewInt(5), @@ -151,12 +156,34 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, + VerkleTime: nil, TerminalTotalDifficulty: nil, TerminalTotalDifficultyPassed: true, Ethash: new(EthashConfig), Clique: nil, } + AllDevChainProtocolChanges = &ChainConfig{ + ChainID: big.NewInt(1337), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + GrayGlacierBlock: big.NewInt(0), + ShanghaiTime: newUint64(0), + TerminalTotalDifficulty: big.NewInt(0), + TerminalTotalDifficultyPassed: true, + IsDevMode: true, + } + // AllCliqueProtocolChanges contains every protocol change (EIPs) introduced // and accepted by the Ethereum core developers into the Clique consensus. AllCliqueProtocolChanges = &ChainConfig{ @@ -180,6 +207,7 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, + VerkleTime: nil, TerminalTotalDifficulty: nil, TerminalTotalDifficultyPassed: false, Ethash: nil, @@ -209,6 +237,7 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, + VerkleTime: nil, TerminalTotalDifficulty: nil, TerminalTotalDifficultyPassed: false, Ethash: new(EthashConfig), @@ -238,6 +267,7 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, + VerkleTime: nil, TerminalTotalDifficulty: nil, TerminalTotalDifficultyPassed: false, Ethash: new(EthashConfig), @@ -249,9 +279,9 @@ var ( // NetworkNames are user friendly names to use in the chain spec banner. var NetworkNames = map[string]string{ MainnetChainConfig.ChainID.String(): "mainnet", - RinkebyChainConfig.ChainID.String(): "rinkeby", GoerliChainConfig.ChainID.String(): "goerli", SepoliaChainConfig.ChainID.String(): "sepolia", + HoleskyChainConfig.ChainID.String(): "holesky", } // ChainConfig is the core config which determines the blockchain settings. @@ -288,6 +318,7 @@ type ChainConfig struct { ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai) CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun) PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague) + VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle) // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. @@ -299,8 +330,9 @@ type ChainConfig struct { TerminalTotalDifficultyPassed bool `json:"terminalTotalDifficultyPassed,omitempty"` // Various consensus engines - Ethash *EthashConfig `json:"ethash,omitempty"` - Clique *CliqueConfig `json:"clique,omitempty"` + Ethash *EthashConfig `json:"ethash,omitempty"` + Clique *CliqueConfig `json:"clique,omitempty"` + IsDevMode bool `json:"isDev,omitempty"` } // EthashConfig is the consensus engine configs for proof-of-work based sealing. @@ -408,6 +440,9 @@ func (c *ChainConfig) Description() string { if c.PragueTime != nil { banner += fmt.Sprintf(" - Prague: @%-10v\n", *c.PragueTime) } + if c.VerkleTime != nil { + banner += fmt.Sprintf(" - Verkle: @%-10v\n", *c.VerkleTime) + } return banner } @@ -506,6 +541,11 @@ func (c *ChainConfig) IsPrague(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.PragueTime, time) } +// IsVerkle returns whether num is either equal to the Verkle fork time or greater. +func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool { + return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time) +} + // CheckCompatible checks whether scheduled fork transitions have been imported // with a mismatching chain configuration. func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time uint64) *ConfigCompatError { @@ -560,6 +600,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "shanghaiTime", timestamp: c.ShanghaiTime}, {name: "cancunTime", timestamp: c.CancunTime, optional: true}, {name: "pragueTime", timestamp: c.PragueTime, optional: true}, + {name: "verkleTime", timestamp: c.VerkleTime, optional: true}, } { if lastFork.name != "" { switch { @@ -663,6 +704,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, headNumber *big.Int, if isForkTimestampIncompatible(c.PragueTime, newcfg.PragueTime, headTimestamp) { return newTimestampCompatError("Prague fork timestamp", c.PragueTime, newcfg.PragueTime) } + if isForkTimestampIncompatible(c.VerkleTime, newcfg.VerkleTime, headTimestamp) { + return newTimestampCompatError("Verkle fork timestamp", c.VerkleTime, newcfg.VerkleTime) + } return nil } @@ -808,6 +852,7 @@ type Rules struct { IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool IsBerlin, IsLondon bool IsMerge, IsShanghai, IsCancun, IsPrague bool + IsVerkle bool } // Rules ensures c's ChainID is not nil. @@ -832,5 +877,6 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules IsShanghai: c.IsShanghai(num, timestamp), IsCancun: c.IsCancun(num, timestamp), IsPrague: c.IsPrague(num, timestamp), + IsVerkle: c.IsVerkle(num, timestamp), } } diff --git a/params/protocol_params.go b/params/protocol_params.go index 1fb258c1f..353ad1e03 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -16,7 +16,11 @@ package params -import "math/big" +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) const ( GasLimitBoundDivisor uint64 = 1024 // The bound divisor of the gas limit, used in update calculations. @@ -160,9 +164,16 @@ const ( RefundQuotient uint64 = 2 RefundQuotientEIP3529 uint64 = 5 - BlobTxDataGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size) - BlobTxMinDataGasprice = 1 // Minimum gas price for data blobs - BlobTxDataGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for data gas price + BlobTxBytesPerFieldElement = 32 // Size in bytes of a field element + BlobTxFieldElementsPerBlob = 4096 // Number of field elements stored in a single data blob + BlobTxHashVersion = 0x01 // Version byte of the commitment hash + BlobTxBlobGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size) + BlobTxMinBlobGasprice = 1 // Minimum gas price for data blobs + BlobTxBlobGaspriceUpdateFraction = 3338477 // Controls the maximum rate of change for blob gas price + BlobTxPointEvaluationPrecompileGas = 50000 // Gas price for the point evaluation precompile. + + BlobTxTargetBlobGasPerBlock = 3 * BlobTxBlobGasPerBlob // Target consumable blob gas for data blobs per block (for 1559-like pricing) + MaxBlobGasPerBlock = 6 * BlobTxBlobGasPerBlob // Maximum consumable blob gas for data blobs per block ) // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations @@ -173,4 +184,9 @@ var ( GenesisDifficulty = big.NewInt(131072) // Difficulty of the Genesis block. MinimumDifficulty = big.NewInt(131072) // The minimum that the difficulty may ever be. DurationLimit = big.NewInt(13) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not. + + // BeaconRootsStorageAddress is the address where historical beacon roots are stored as per EIP-4788 + BeaconRootsStorageAddress = common.HexToAddress("0xbEac00dDB15f3B6d645C48263dC93862413A222D") + // SystemAddress is where the system-transaction is sent from as per EIP-4788 + SystemAddress common.Address = common.HexToAddress("0xfffffffffffffffffffffffffffffffffffffffe") ) diff --git a/params/version.go b/params/version.go index 33c5399d1..385ec2208 100644 --- a/params/version.go +++ b/params/version.go @@ -22,7 +22,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release - VersionMinor = 12 // Minor version component of the current release + VersionMinor = 13 // Minor version component of the current release VersionPatch = 0 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/rlp/decode.go b/rlp/decode.go index c9b50e8c1..9b17d2d81 100644 --- a/rlp/decode.go +++ b/rlp/decode.go @@ -90,7 +90,7 @@ func Decode(r io.Reader, val interface{}) error { // DecodeBytes parses RLP data from b into val. Please see package-level documentation for // the decoding rules. The input must contain exactly one value and no trailing data. func DecodeBytes(b []byte, val interface{}) error { - r := bytes.NewReader(b) + r := (*sliceReader)(&b) stream := streamPool.Get().(*Stream) defer streamPool.Put(stream) @@ -99,7 +99,7 @@ func DecodeBytes(b []byte, val interface{}) error { if err := stream.Decode(val); err != nil { return err } - if r.Len() > 0 { + if len(b) > 0 { return ErrMoreThanOneValue } return nil @@ -1182,3 +1182,23 @@ func (s *Stream) listLimit() (inList bool, limit uint64) { } return true, s.stack[len(s.stack)-1] } + +type sliceReader []byte + +func (sr *sliceReader) Read(b []byte) (int, error) { + if len(*sr) == 0 { + return 0, io.EOF + } + n := copy(b, *sr) + *sr = (*sr)[n:] + return n, nil +} + +func (sr *sliceReader) ReadByte() (byte, error) { + if len(*sr) == 0 { + return 0, io.EOF + } + b := (*sr)[0] + *sr = (*sr)[1:] + return b, nil +} diff --git a/rlp/encbuffer.go b/rlp/encbuffer.go index 9ac4fcb2c..8d3a3b229 100644 --- a/rlp/encbuffer.go +++ b/rlp/encbuffer.go @@ -56,18 +56,18 @@ func (buf *encBuffer) size() int { } // makeBytes creates the encoder output. -func (w *encBuffer) makeBytes() []byte { - out := make([]byte, w.size()) - w.copyTo(out) +func (buf *encBuffer) makeBytes() []byte { + out := make([]byte, buf.size()) + buf.copyTo(out) return out } -func (w *encBuffer) copyTo(dst []byte) { +func (buf *encBuffer) copyTo(dst []byte) { strpos := 0 pos := 0 - for _, head := range w.lheads { + for _, head := range buf.lheads { // write string data before header - n := copy(dst[pos:], w.str[strpos:head.offset]) + n := copy(dst[pos:], buf.str[strpos:head.offset]) pos += n strpos += n // write the header @@ -75,7 +75,7 @@ func (w *encBuffer) copyTo(dst []byte) { pos += len(enc) } // copy string data after the last list header - copy(dst[pos:], w.str[strpos:]) + copy(dst[pos:], buf.str[strpos:]) } // writeTo writes the encoder output to w. @@ -149,34 +149,34 @@ func (buf *encBuffer) writeString(s string) { const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8 // writeBigInt writes i as an integer. -func (w *encBuffer) writeBigInt(i *big.Int) { +func (buf *encBuffer) writeBigInt(i *big.Int) { bitlen := i.BitLen() if bitlen <= 64 { - w.writeUint64(i.Uint64()) + buf.writeUint64(i.Uint64()) return } // Integer is larger than 64 bits, encode from i.Bits(). // The minimal byte length is bitlen rounded up to the next // multiple of 8, divided by 8. length := ((bitlen + 7) & -8) >> 3 - w.encodeStringHeader(length) - w.str = append(w.str, make([]byte, length)...) + buf.encodeStringHeader(length) + buf.str = append(buf.str, make([]byte, length)...) index := length - buf := w.str[len(w.str)-length:] + bytesBuf := buf.str[len(buf.str)-length:] for _, d := range i.Bits() { for j := 0; j < wordBytes && index > 0; j++ { index-- - buf[index] = byte(d) + bytesBuf[index] = byte(d) d >>= 8 } } } // writeUint256 writes z as an integer. -func (w *encBuffer) writeUint256(z *uint256.Int) { +func (buf *encBuffer) writeUint256(z *uint256.Int) { bitlen := z.BitLen() if bitlen <= 64 { - w.writeUint64(z.Uint64()) + buf.writeUint64(z.Uint64()) return } nBytes := byte((bitlen + 7) / 8) @@ -186,7 +186,7 @@ func (w *encBuffer) writeUint256(z *uint256.Int) { binary.BigEndian.PutUint64(b[17:25], z[1]) binary.BigEndian.PutUint64(b[25:33], z[0]) b[32-nBytes] = 0x80 + nBytes - w.str = append(w.str, b[32-nBytes:]...) + buf.str = append(buf.str, b[32-nBytes:]...) } // list adds a new list header to the header stack. It returns the index of the header. diff --git a/rlp/encode.go b/rlp/encode.go index 3fac0bd2d..ffb42b299 100644 --- a/rlp/encode.go +++ b/rlp/encode.go @@ -419,7 +419,7 @@ func makeEncoderWriter(typ reflect.Type) writer { // package json simply doesn't call MarshalJSON for this case, but encodes the // value as if it didn't implement the interface. We don't want to handle it that // way. - return fmt.Errorf("rlp: unadressable value of type %v, EncodeRLP is pointer method", val.Type()) + return fmt.Errorf("rlp: unaddressable value of type %v, EncodeRLP is pointer method", val.Type()) } return val.Addr().Interface().(Encoder).EncodeRLP(w) } diff --git a/rlp/encode_test.go b/rlp/encode_test.go index 02be47d0e..314958eb5 100644 --- a/rlp/encode_test.go +++ b/rlp/encode_test.go @@ -396,7 +396,7 @@ var encTests = []encTest{ {val: &struct{ TE testEncoder }{testEncoder{errors.New("test error")}}, error: "test error"}, // Verify the error for non-addressable non-pointer Encoder. - {val: testEncoder{}, error: "rlp: unadressable value of type rlp.testEncoder, EncodeRLP is pointer method"}, + {val: testEncoder{}, error: "rlp: unaddressable value of type rlp.testEncoder, EncodeRLP is pointer method"}, // Verify Encoder takes precedence over []byte. {val: []byteEncoder{0, 1, 2, 3, 4}, output: "C5C0C0C0C0C0"}, diff --git a/rpc/client.go b/rpc/client.go index fae8536b2..2b0016db8 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -34,14 +34,15 @@ import ( var ( ErrBadResult = errors.New("bad result in JSON-RPC response") ErrClientQuit = errors.New("client is closed") - ErrNoResult = errors.New("no result in JSON-RPC response") + ErrNoResult = errors.New("JSON-RPC response has no result") + ErrMissingBatchResponse = errors.New("response batch did not contain a response to this call") ErrSubscriptionQueueOverflow = errors.New("subscription queue overflow") errClientReconnected = errors.New("client reconnected") errDead = errors.New("connection lost") ) +// Timeouts const ( - // Timeouts defaultDialTimeout = 10 * time.Second // used if context has no deadline subscribeTimeout = 10 * time.Second // overall timeout eth_subscribe, rpc_modules calls ) @@ -84,6 +85,10 @@ type Client struct { // This function, if non-nil, is called when the connection is lost. reconnectFunc reconnectFunc + // config fields + batchItemLimit int + batchResponseMaxSize int + // writeConn is used for writing to the connection on the caller's goroutine. It should // only be accessed outside of dispatch, with the write lock held. The write lock is // taken by sending on reqInit and released by sending on reqSent. @@ -114,7 +119,7 @@ func (c *Client) newClientConn(conn ServerCodec) *clientConn { ctx := context.Background() ctx = context.WithValue(ctx, clientContextKey{}, c) ctx = context.WithValue(ctx, peerInfoContextKey{}, conn.peerInfo()) - handler := newHandler(ctx, conn, c.idgen, c.services) + handler := newHandler(ctx, conn, c.idgen, c.services, c.batchItemLimit, c.batchResponseMaxSize) return &clientConn{conn, handler} } @@ -128,14 +133,17 @@ type readOp struct { batch bool } +// requestOp represents a pending request. This is used for both batch and non-batch +// requests. type requestOp struct { - ids []json.RawMessage - err error - resp chan *jsonrpcMessage // receives up to len(ids) responses - sub *ClientSubscription // only set for EthSubscribe requests + ids []json.RawMessage + err error + resp chan []*jsonrpcMessage // the response goes here + sub *ClientSubscription // set for Subscribe requests. + hadResponse bool // true when the request was responded to } -func (op *requestOp) wait(ctx context.Context, c *Client) (*jsonrpcMessage, error) { +func (op *requestOp) wait(ctx context.Context, c *Client) ([]*jsonrpcMessage, error) { select { case <-ctx.Done(): // Send the timeout to dispatch so it can remove the request IDs. @@ -211,7 +219,7 @@ func DialOptions(ctx context.Context, rawurl string, options ...ClientOption) (* return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme) } - return newClient(ctx, reconnect) + return newClient(ctx, cfg, reconnect) } // ClientFromContext retrieves the client from the context, if any. This can be used to perform @@ -221,33 +229,42 @@ func ClientFromContext(ctx context.Context) (*Client, bool) { return client, ok } -func newClient(initctx context.Context, connect reconnectFunc) (*Client, error) { +func newClient(initctx context.Context, cfg *clientConfig, connect reconnectFunc) (*Client, error) { conn, err := connect(initctx) if err != nil { return nil, err } - c := initClient(conn, randomIDGenerator(), new(serviceRegistry)) + c := initClient(conn, new(serviceRegistry), cfg) c.reconnectFunc = connect return c, nil } -func initClient(conn ServerCodec, idgen func() ID, services *serviceRegistry) *Client { +func initClient(conn ServerCodec, services *serviceRegistry, cfg *clientConfig) *Client { _, isHTTP := conn.(*httpConn) c := &Client{ - isHTTP: isHTTP, - idgen: idgen, - services: services, - writeConn: conn, - close: make(chan struct{}), - closing: make(chan struct{}), - didClose: make(chan struct{}), - reconnected: make(chan ServerCodec), - readOp: make(chan readOp), - readErr: make(chan error), - reqInit: make(chan *requestOp), - reqSent: make(chan error, 1), - reqTimeout: make(chan *requestOp), + isHTTP: isHTTP, + services: services, + idgen: cfg.idgen, + batchItemLimit: cfg.batchItemLimit, + batchResponseMaxSize: cfg.batchResponseLimit, + writeConn: conn, + close: make(chan struct{}), + closing: make(chan struct{}), + didClose: make(chan struct{}), + reconnected: make(chan ServerCodec), + readOp: make(chan readOp), + readErr: make(chan error), + reqInit: make(chan *requestOp), + reqSent: make(chan error, 1), + reqTimeout: make(chan *requestOp), } + + // Set defaults. + if c.idgen == nil { + c.idgen = randomIDGenerator() + } + + // Launch the main loop. if !isHTTP { go c.dispatch(conn) } @@ -325,7 +342,10 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str if err != nil { return err } - op := &requestOp{ids: []json.RawMessage{msg.ID}, resp: make(chan *jsonrpcMessage, 1)} + op := &requestOp{ + ids: []json.RawMessage{msg.ID}, + resp: make(chan []*jsonrpcMessage, 1), + } if c.isHTTP { err = c.sendHTTP(ctx, op, msg) @@ -337,9 +357,12 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str } // dispatch has accepted the request and will close the channel when it quits. - switch resp, err := op.wait(ctx, c); { - case err != nil: + batchresp, err := op.wait(ctx, c) + if err != nil { return err + } + resp := batchresp[0] + switch { case resp.Error != nil: return resp.Error case len(resp.Result) == 0: @@ -380,7 +403,7 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error { ) op := &requestOp{ ids: make([]json.RawMessage, len(b)), - resp: make(chan *jsonrpcMessage, len(b)), + resp: make(chan []*jsonrpcMessage, 1), } for i, elem := range b { msg, err := c.newMessage(elem.Method, elem.Args...) @@ -398,28 +421,48 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error { } else { err = c.send(ctx, op, msgs) } + if err != nil { + return err + } + + batchresp, err := op.wait(ctx, c) + if err != nil { + return err + } // Wait for all responses to come back. - for n := 0; n < len(b) && err == nil; n++ { - var resp *jsonrpcMessage - resp, err = op.wait(ctx, c) - if err != nil { - break + for n := 0; n < len(batchresp) && err == nil; n++ { + resp := batchresp[n] + if resp == nil { + // Ignore null responses. These can happen for batches sent via HTTP. + continue } + // Find the element corresponding to this response. - // The element is guaranteed to be present because dispatch - // only sends valid IDs to our channel. - elem := &b[byID[string(resp.ID)]] - if resp.Error != nil { + index, ok := byID[string(resp.ID)] + if !ok { + continue + } + delete(byID, string(resp.ID)) + + // Assign result and error. + elem := &b[index] + switch { + case resp.Error != nil: elem.Error = resp.Error - continue - } - if len(resp.Result) == 0 { + case resp.Result == nil: elem.Error = ErrNoResult - continue + default: + elem.Error = json.Unmarshal(resp.Result, elem.Result) } - elem.Error = json.Unmarshal(resp.Result, elem.Result) } + + // Check that all expected responses have been received. + for _, index := range byID { + elem := &b[index] + elem.Error = ErrMissingBatchResponse + } + return err } @@ -480,7 +523,7 @@ func (c *Client) Subscribe(ctx context.Context, namespace string, channel interf } op := &requestOp{ ids: []json.RawMessage{msg.ID}, - resp: make(chan *jsonrpcMessage), + resp: make(chan []*jsonrpcMessage, 1), sub: newClientSubscription(c, namespace, chanVal), } @@ -495,6 +538,13 @@ func (c *Client) Subscribe(ctx context.Context, namespace string, channel interf return op.sub, nil } +// SupportsSubscriptions reports whether subscriptions are supported by the client +// transport. When this returns false, Subscribe and related methods will return +// ErrNotificationsUnsupported. +func (c *Client) SupportsSubscriptions() bool { + return !c.isHTTP +} + func (c *Client) newMessage(method string, paramsIn ...interface{}) (*jsonrpcMessage, error) { msg := &jsonrpcMessage{Version: vsn, ID: c.nextID(), Method: method} if paramsIn != nil { // prevent sending "params":null diff --git a/rpc/client_opt.go b/rpc/client_opt.go index 5ad7c22b3..5bef08cca 100644 --- a/rpc/client_opt.go +++ b/rpc/client_opt.go @@ -28,11 +28,18 @@ type ClientOption interface { } type clientConfig struct { + // HTTP settings httpClient *http.Client httpHeaders http.Header httpAuth HTTPAuth + // WebSocket options wsDialer *websocket.Dialer + + // RPC handler options + idgen func() ID + batchItemLimit int + batchResponseLimit int } func (cfg *clientConfig) initHeaders() { @@ -104,3 +111,25 @@ func WithHTTPAuth(a HTTPAuth) ClientOption { // Usually, HTTPAuth functions will call h.Set("authorization", "...") to add // auth information to the request. type HTTPAuth func(h http.Header) error + +// WithBatchItemLimit changes the maximum number of items allowed in batch requests. +// +// Note: this option applies when processing incoming batch requests. It does not affect +// batch requests sent by the client. +func WithBatchItemLimit(limit int) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.batchItemLimit = limit + }) +} + +// WithBatchResponseSizeLimit changes the maximum number of response bytes that can be +// generated for batch requests. When this limit is reached, further calls in the batch +// will not be processed. +// +// Note: this option applies when processing incoming batch requests. It does not affect +// batch requests sent by the client. +func WithBatchResponseSizeLimit(sizeLimit int) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.batchResponseLimit = sizeLimit + }) +} diff --git a/rpc/client_test.go b/rpc/client_test.go index a94a54929..7c96b2d66 100644 --- a/rpc/client_test.go +++ b/rpc/client_test.go @@ -169,10 +169,12 @@ func TestClientBatchRequest(t *testing.T) { } } +// This checks that, for HTTP connections, the length of batch responses is validated to +// match the request exactly. func TestClientBatchRequest_len(t *testing.T) { b, err := json.Marshal([]jsonrpcMessage{ - {Version: "2.0", ID: json.RawMessage("1"), Method: "foo", Result: json.RawMessage(`"0x1"`)}, - {Version: "2.0", ID: json.RawMessage("2"), Method: "bar", Result: json.RawMessage(`"0x2"`)}, + {Version: "2.0", ID: json.RawMessage("1"), Result: json.RawMessage(`"0x1"`)}, + {Version: "2.0", ID: json.RawMessage("2"), Result: json.RawMessage(`"0x2"`)}, }) if err != nil { t.Fatal("failed to encode jsonrpc message:", err) @@ -185,37 +187,102 @@ func TestClientBatchRequest_len(t *testing.T) { })) t.Cleanup(s.Close) - client, err := Dial(s.URL) - if err != nil { - t.Fatal("failed to dial test server:", err) - } - defer client.Close() - t.Run("too-few", func(t *testing.T) { + client, err := Dial(s.URL) + if err != nil { + t.Fatal("failed to dial test server:", err) + } + defer client.Close() + batch := []BatchElem{ - {Method: "foo"}, - {Method: "bar"}, - {Method: "baz"}, + {Method: "foo", Result: new(string)}, + {Method: "bar", Result: new(string)}, + {Method: "baz", Result: new(string)}, } ctx, cancelFn := context.WithTimeout(context.Background(), time.Second) defer cancelFn() - if err := client.BatchCallContext(ctx, batch); !errors.Is(err, ErrBadResult) { - t.Errorf("expected %q but got: %v", ErrBadResult, err) + + if err := client.BatchCallContext(ctx, batch); err != nil { + t.Fatal("error:", err) + } + for i, elem := range batch[:2] { + if elem.Error != nil { + t.Errorf("expected no error for batch element %d, got %q", i, elem.Error) + } + } + for i, elem := range batch[2:] { + if elem.Error != ErrMissingBatchResponse { + t.Errorf("wrong error %q for batch element %d", elem.Error, i+2) + } } }) t.Run("too-many", func(t *testing.T) { + client, err := Dial(s.URL) + if err != nil { + t.Fatal("failed to dial test server:", err) + } + defer client.Close() + batch := []BatchElem{ - {Method: "foo"}, + {Method: "foo", Result: new(string)}, } ctx, cancelFn := context.WithTimeout(context.Background(), time.Second) defer cancelFn() - if err := client.BatchCallContext(ctx, batch); !errors.Is(err, ErrBadResult) { - t.Errorf("expected %q but got: %v", ErrBadResult, err) + + if err := client.BatchCallContext(ctx, batch); err != nil { + t.Fatal("error:", err) + } + for i, elem := range batch[:1] { + if elem.Error != nil { + t.Errorf("expected no error for batch element %d, got %q", i, elem.Error) + } + } + for i, elem := range batch[1:] { + if elem.Error != ErrMissingBatchResponse { + t.Errorf("wrong error %q for batch element %d", elem.Error, i+2) + } } }) } +// This checks that the client can handle the case where the server doesn't +// respond to all requests in a batch. +func TestClientBatchRequestLimit(t *testing.T) { + server := newTestServer() + defer server.Stop() + server.SetBatchLimits(2, 100000) + client := DialInProc(server) + + batch := []BatchElem{ + {Method: "foo"}, + {Method: "bar"}, + {Method: "baz"}, + } + err := client.BatchCall(batch) + if err != nil { + t.Fatal("unexpected error:", err) + } + + // Check that the first response indicates an error with batch size. + var err0 Error + if !errors.As(batch[0].Error, &err0) { + t.Log("error zero:", batch[0].Error) + t.Fatalf("batch elem 0 has wrong error type: %T", batch[0].Error) + } else { + if err0.ErrorCode() != -32600 || err0.Error() != errMsgBatchTooLarge { + t.Fatalf("wrong error on batch elem zero: %v", err0) + } + } + + // Check that remaining response batch elements are reported as absent. + for i, elem := range batch[1:] { + if elem.Error != ErrMissingBatchResponse { + t.Fatalf("batch elem %d has unexpected error: %v", i+1, elem.Error) + } + } +} + func TestClientNotify(t *testing.T) { server := newTestServer() defer server.Stop() @@ -310,7 +377,7 @@ func testClientCancel(transport string, t *testing.T) { _, hasDeadline := ctx.Deadline() t.Errorf("no error for call with %v wait time (deadline: %v)", timeout, hasDeadline) // default: - // t.Logf("got expected error with %v wait time: %v", timeout, err) + // t.Logf("got expected error with %v wait time: %v", timeout, err) } cancel() } @@ -487,7 +554,8 @@ func TestClientSubscriptionUnsubscribeServer(t *testing.T) { defer srv.Stop() // Create the client on the other end of the pipe. - client, _ := newClient(context.Background(), func(context.Context) (ServerCodec, error) { + cfg := new(clientConfig) + client, _ := newClient(context.Background(), cfg, func(context.Context) (ServerCodec, error) { return NewCodec(p2), nil }) defer client.Close() diff --git a/rpc/constants_unix_nocgo.go b/rpc/constants_unix_nocgo.go deleted file mode 100644 index a62e4ee52..000000000 --- a/rpc/constants_unix_nocgo.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build !cgo && !windows -// +build !cgo,!windows - -package rpc - -var ( - // On Linux, sun_path is 108 bytes in size - // see http://man7.org/linux/man-pages/man7/unix.7.html - max_path_size = 108 -) diff --git a/rpc/errors.go b/rpc/errors.go index 7188332d5..438aff218 100644 --- a/rpc/errors.go +++ b/rpc/errors.go @@ -58,15 +58,19 @@ var ( ) const ( - errcodeDefault = -32000 - errcodeNotificationsUnsupported = -32001 - errcodeTimeout = -32002 - errcodePanic = -32603 - errcodeMarshalError = -32603 + errcodeDefault = -32000 + errcodeTimeout = -32002 + errcodeResponseTooLarge = -32003 + errcodePanic = -32603 + errcodeMarshalError = -32603 + + legacyErrcodeNotificationsUnsupported = -32001 ) const ( - errMsgTimeout = "request timed out" + errMsgTimeout = "request timed out" + errMsgResponseTooLarge = "response too large" + errMsgBatchTooLarge = "batch too large" ) type methodNotFoundError struct{ method string } @@ -77,6 +81,34 @@ func (e *methodNotFoundError) Error() string { return fmt.Sprintf("the method %s does not exist/is not available", e.method) } +type notificationsUnsupportedError struct{} + +func (e notificationsUnsupportedError) Error() string { + return "notifications not supported" +} + +func (e notificationsUnsupportedError) ErrorCode() int { return -32601 } + +// Is checks for equivalence to another error. Here we define that all errors with code +// -32601 (method not found) are equivalent to notificationsUnsupportedError. This is +// done to enable the following pattern: +// +// sub, err := client.Subscribe(...) +// if errors.Is(err, rpc.ErrNotificationsUnsupported) { +// // server doesn't support subscriptions +// } +func (e notificationsUnsupportedError) Is(other error) bool { + if other == (notificationsUnsupportedError{}) { + return true + } + rpcErr, ok := other.(Error) + if ok { + code := rpcErr.ErrorCode() + return code == -32601 || code == legacyErrcodeNotificationsUnsupported + } + return false +} + type subscriptionNotFoundError struct{ namespace, subscription string } func (e *subscriptionNotFoundError) ErrorCode() int { return -32601 } diff --git a/rpc/handler.go b/rpc/handler.go index 7285ff040..04a9c9b05 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -49,17 +49,19 @@ import ( // h.removeRequestOp(op) // timeout, etc. // } type handler struct { - reg *serviceRegistry - unsubscribeCb *callback - idgen func() ID // subscription ID generator - respWait map[string]*requestOp // active client requests - clientSubs map[string]*ClientSubscription // active client subscriptions - callWG sync.WaitGroup // pending call goroutines - rootCtx context.Context // canceled by close() - cancelRoot func() // cancel function for rootCtx - conn jsonWriter // where responses will be sent - log log.Logger - allowSubscribe bool + reg *serviceRegistry + unsubscribeCb *callback + idgen func() ID // subscription ID generator + respWait map[string]*requestOp // active client requests + clientSubs map[string]*ClientSubscription // active client subscriptions + callWG sync.WaitGroup // pending call goroutines + rootCtx context.Context // canceled by close() + cancelRoot func() // cancel function for rootCtx + conn jsonWriter // where responses will be sent + log log.Logger + allowSubscribe bool + batchRequestLimit int + batchResponseMaxSize int subLock sync.Mutex serverSubs map[ID]*Subscription @@ -70,19 +72,21 @@ type callProc struct { notifiers []*Notifier } -func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry) *handler { +func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, batchRequestLimit, batchResponseMaxSize int) *handler { rootCtx, cancelRoot := context.WithCancel(connCtx) h := &handler{ - reg: reg, - idgen: idgen, - conn: conn, - respWait: make(map[string]*requestOp), - clientSubs: make(map[string]*ClientSubscription), - rootCtx: rootCtx, - cancelRoot: cancelRoot, - allowSubscribe: true, - serverSubs: make(map[ID]*Subscription), - log: log.Root(), + reg: reg, + idgen: idgen, + conn: conn, + respWait: make(map[string]*requestOp), + clientSubs: make(map[string]*ClientSubscription), + rootCtx: rootCtx, + cancelRoot: cancelRoot, + allowSubscribe: true, + serverSubs: make(map[ID]*Subscription), + log: log.Root(), + batchRequestLimit: batchRequestLimit, + batchResponseMaxSize: batchResponseMaxSize, } if conn.remoteAddr() != "" { h.log = h.log.New("conn", conn.remoteAddr()) @@ -134,16 +138,15 @@ func (b *batchCallBuffer) write(ctx context.Context, conn jsonWriter) { b.doWrite(ctx, conn, false) } -// timeout sends the responses added so far. For the remaining unanswered call -// messages, it sends a timeout error response. -func (b *batchCallBuffer) timeout(ctx context.Context, conn jsonWriter) { +// respondWithError sends the responses added so far. For the remaining unanswered call +// messages, it responds with the given error. +func (b *batchCallBuffer) respondWithError(ctx context.Context, conn jsonWriter, err error) { b.mutex.Lock() defer b.mutex.Unlock() for _, msg := range b.calls { if !msg.isNotification() { - resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout}) - b.resp = append(b.resp, resp) + b.resp = append(b.resp, msg.errorResponse(err)) } } b.doWrite(ctx, conn, true) @@ -171,17 +174,24 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { }) return } - - // Handle non-call messages first: - calls := make([]*jsonrpcMessage, 0, len(msgs)) - for _, msg := range msgs { - if handled := h.handleImmediate(msg); !handled { - calls = append(calls, msg) - } + // Apply limit on total number of requests. + if h.batchRequestLimit != 0 && len(msgs) > h.batchRequestLimit { + h.startCallProc(func(cp *callProc) { + h.respondWithBatchTooLarge(cp, msgs) + }) + return } + + // Handle non-call messages first. + // Here we need to find the requestOp that sent the request batch. + calls := make([]*jsonrpcMessage, 0, len(msgs)) + h.handleResponses(msgs, func(msg *jsonrpcMessage) { + calls = append(calls, msg) + }) if len(calls) == 0 { return } + // Process calls on a goroutine because they may block indefinitely: h.startCallProc(func(cp *callProc) { var ( @@ -199,10 +209,12 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { if timeout, ok := ContextRequestTimeout(cp.ctx); ok { timer = time.AfterFunc(timeout, func() { cancel() - callBuffer.timeout(cp.ctx, h.conn) + err := &internalServerError{errcodeTimeout, errMsgTimeout} + callBuffer.respondWithError(cp.ctx, h.conn, err) }) } + responseBytes := 0 for { // No need to handle rest of calls if timed out. if cp.ctx.Err() != nil { @@ -214,61 +226,88 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { } resp := h.handleCallMsg(cp, msg) callBuffer.pushResponse(resp) + if resp != nil && h.batchResponseMaxSize != 0 { + responseBytes += len(resp.Result) + if responseBytes > h.batchResponseMaxSize { + err := &internalServerError{errcodeResponseTooLarge, errMsgResponseTooLarge} + callBuffer.respondWithError(cp.ctx, h.conn, err) + break + } + } } if timer != nil { timer.Stop() } - callBuffer.write(cp.ctx, h.conn) + h.addSubscriptions(cp.notifiers) + callBuffer.write(cp.ctx, h.conn) for _, n := range cp.notifiers { n.activate() } }) } -// handleMsg handles a single message. -func (h *handler) handleMsg(msg *jsonrpcMessage) { - if ok := h.handleImmediate(msg); ok { - return +func (h *handler) respondWithBatchTooLarge(cp *callProc, batch []*jsonrpcMessage) { + resp := errorMessage(&invalidRequestError{errMsgBatchTooLarge}) + // Find the first call and add its "id" field to the error. + // This is the best we can do, given that the protocol doesn't have a way + // of reporting an error for the entire batch. + for _, msg := range batch { + if msg.isCall() { + resp.ID = msg.ID + break + } } - h.startCallProc(func(cp *callProc) { - var ( - responded sync.Once - timer *time.Timer - cancel context.CancelFunc - ) - cp.ctx, cancel = context.WithCancel(cp.ctx) - defer cancel() + h.conn.writeJSON(cp.ctx, []*jsonrpcMessage{resp}, true) +} - // Cancel the request context after timeout and send an error response. Since the - // running method might not return immediately on timeout, we must wait for the - // timeout concurrently with processing the request. - if timeout, ok := ContextRequestTimeout(cp.ctx); ok { - timer = time.AfterFunc(timeout, func() { - cancel() - responded.Do(func() { - resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout}) - h.conn.writeJSON(cp.ctx, resp, true) - }) - }) - } - - answer := h.handleCallMsg(cp, msg) - if timer != nil { - timer.Stop() - } - h.addSubscriptions(cp.notifiers) - if answer != nil { - responded.Do(func() { - h.conn.writeJSON(cp.ctx, answer, false) - }) - } - for _, n := range cp.notifiers { - n.activate() - } +// handleMsg handles a single non-batch message. +func (h *handler) handleMsg(msg *jsonrpcMessage) { + msgs := []*jsonrpcMessage{msg} + h.handleResponses(msgs, func(msg *jsonrpcMessage) { + h.startCallProc(func(cp *callProc) { + h.handleNonBatchCall(cp, msg) + }) }) } +func (h *handler) handleNonBatchCall(cp *callProc, msg *jsonrpcMessage) { + var ( + responded sync.Once + timer *time.Timer + cancel context.CancelFunc + ) + cp.ctx, cancel = context.WithCancel(cp.ctx) + defer cancel() + + // Cancel the request context after timeout and send an error response. Since the + // running method might not return immediately on timeout, we must wait for the + // timeout concurrently with processing the request. + if timeout, ok := ContextRequestTimeout(cp.ctx); ok { + timer = time.AfterFunc(timeout, func() { + cancel() + responded.Do(func() { + resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout}) + h.conn.writeJSON(cp.ctx, resp, true) + }) + }) + } + + answer := h.handleCallMsg(cp, msg) + if timer != nil { + timer.Stop() + } + h.addSubscriptions(cp.notifiers) + if answer != nil { + responded.Do(func() { + h.conn.writeJSON(cp.ctx, answer, false) + }) + } + for _, n := range cp.notifiers { + n.activate() + } +} + // close cancels all requests except for inflightReq and waits for // call goroutines to shut down. func (h *handler) close(err error, inflightReq *requestOp) { @@ -349,23 +388,60 @@ func (h *handler) startCallProc(fn func(*callProc)) { }() } -// handleImmediate executes non-call messages. It returns false if the message is a -// call or requires a reply. -func (h *handler) handleImmediate(msg *jsonrpcMessage) bool { - start := time.Now() - switch { - case msg.isNotification(): - if strings.HasSuffix(msg.Method, notificationMethodSuffix) { - h.handleSubscriptionResult(msg) - return true +// handleResponse processes method call responses. +func (h *handler) handleResponses(batch []*jsonrpcMessage, handleCall func(*jsonrpcMessage)) { + var resolvedops []*requestOp + handleResp := func(msg *jsonrpcMessage) { + op := h.respWait[string(msg.ID)] + if op == nil { + h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID}) + return } - return false - case msg.isResponse(): - h.handleResponse(msg) - h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "duration", time.Since(start)) - return true - default: - return false + resolvedops = append(resolvedops, op) + delete(h.respWait, string(msg.ID)) + + // For subscription responses, start the subscription if the server + // indicates success. EthSubscribe gets unblocked in either case through + // the op.resp channel. + if op.sub != nil { + if msg.Error != nil { + op.err = msg.Error + } else { + op.err = json.Unmarshal(msg.Result, &op.sub.subid) + if op.err == nil { + go op.sub.run() + h.clientSubs[op.sub.subid] = op.sub + } + } + } + + if !op.hadResponse { + op.hadResponse = true + op.resp <- batch + } + } + + for _, msg := range batch { + start := time.Now() + switch { + case msg.isResponse(): + handleResp(msg) + h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "duration", time.Since(start)) + + case msg.isNotification(): + if strings.HasSuffix(msg.Method, notificationMethodSuffix) { + h.handleSubscriptionResult(msg) + continue + } + handleCall(msg) + + default: + handleCall(msg) + } + } + + for _, op := range resolvedops { + h.removeRequestOp(op) } } @@ -381,33 +457,6 @@ func (h *handler) handleSubscriptionResult(msg *jsonrpcMessage) { } } -// handleResponse processes method call responses. -func (h *handler) handleResponse(msg *jsonrpcMessage) { - op := h.respWait[string(msg.ID)] - if op == nil { - h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID}) - return - } - delete(h.respWait, string(msg.ID)) - // For normal responses, just forward the reply to Call/BatchCall. - if op.sub == nil { - op.resp <- msg - return - } - // For subscription responses, start the subscription if the server - // indicates success. EthSubscribe gets unblocked in either case through - // the op.resp channel. - defer close(op.resp) - if msg.Error != nil { - op.err = msg.Error - return - } - if op.err = json.Unmarshal(msg.Result, &op.sub.subid); op.err == nil { - go op.sub.run() - h.clientSubs[op.sub.subid] = op.sub - } -} - // handleCallMsg executes a call message and returns the answer. func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMessage { start := time.Now() @@ -416,6 +465,7 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMess h.handleCall(ctx, msg) h.log.Debug("Served "+msg.Method, "duration", time.Since(start)) return nil + case msg.isCall(): resp := h.handleCall(ctx, msg) var ctx []interface{} @@ -430,8 +480,10 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMess h.log.Debug("Served "+msg.Method, ctx...) } return resp + case msg.hasValidID(): return msg.errorResponse(&invalidRequestError{"invalid request"}) + default: return errorMessage(&invalidRequestError{"invalid request"}) } @@ -451,6 +503,7 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage if callb == nil { return msg.errorResponse(&methodNotFoundError{method: msg.Method}) } + args, err := parsePositionalArguments(msg.Params, callb.argTypes) if err != nil { return msg.errorResponse(&invalidParamsError{err.Error()}) @@ -461,6 +514,7 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage //begin PluGeth code injection start := time.Now() answer := h.runMethod(cp.ctx, msg, callb, args) + // Collect the statistics for RPC calls if metrics is enabled. // We only care about pure rpc call. Filter out subscription. if callb != h.unsubscribeCb { @@ -473,16 +527,14 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage rpcServingTimer.UpdateSince(start) updateServeTimeHistogram(msg.Method, answer.Error == nil, time.Since(start)) } + return answer } // handleSubscribe processes *_subscribe method calls. func (h *handler) handleSubscribe(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage { if !h.allowSubscribe { - return msg.errorResponse(&internalServerError{ - code: errcodeNotificationsUnsupported, - message: ErrNotificationsUnsupported.Error(), - }) + return msg.errorResponse(ErrNotificationsUnsupported) } // Subscription method name is first argument. diff --git a/rpc/http.go b/rpc/http.go index 8712f9961..741fa1c0e 100644 --- a/rpc/http.go +++ b/rpc/http.go @@ -139,7 +139,7 @@ func DialHTTPWithClient(endpoint string, client *http.Client) (*Client, error) { var cfg clientConfig cfg.httpClient = client fn := newClientTransportHTTP(endpoint, &cfg) - return newClient(context.Background(), fn) + return newClient(context.Background(), &cfg, fn) } func newClientTransportHTTP(endpoint string, cfg *clientConfig) reconnectFunc { @@ -176,11 +176,12 @@ func (c *Client) sendHTTP(ctx context.Context, op *requestOp, msg interface{}) e } defer respBody.Close() - var respmsg jsonrpcMessage - if err := json.NewDecoder(respBody).Decode(&respmsg); err != nil { + var resp jsonrpcMessage + batch := [1]*jsonrpcMessage{&resp} + if err := json.NewDecoder(respBody).Decode(&resp); err != nil { return err } - op.resp <- &respmsg + op.resp <- batch[:] return nil } @@ -191,16 +192,12 @@ func (c *Client) sendBatchHTTP(ctx context.Context, op *requestOp, msgs []*jsonr return err } defer respBody.Close() - var respmsgs []jsonrpcMessage + + var respmsgs []*jsonrpcMessage if err := json.NewDecoder(respBody).Decode(&respmsgs); err != nil { return err } - if len(respmsgs) != len(msgs) { - return fmt.Errorf("batch has %d requests but response has %d: %w", len(msgs), len(respmsgs), ErrBadResult) - } - for i := 0; i < len(respmsgs); i++ { - op.resp <- &respmsgs[i] - } + op.resp <- respmsgs return nil } diff --git a/rpc/inproc.go b/rpc/inproc.go index fbe9a40ce..306974e04 100644 --- a/rpc/inproc.go +++ b/rpc/inproc.go @@ -24,7 +24,8 @@ import ( // DialInProc attaches an in-process connection to the given RPC server. func DialInProc(handler *Server) *Client { initctx := context.Background() - c, _ := newClient(initctx, func(context.Context) (ServerCodec, error) { + cfg := new(clientConfig) + c, _ := newClient(initctx, cfg, func(context.Context) (ServerCodec, error) { p1, p2 := net.Pipe() go handler.ServeCodec(NewCodec(p1), 0) return NewCodec(p2), nil diff --git a/rpc/ipc.go b/rpc/ipc.go index d9e0de62e..a08245b27 100644 --- a/rpc/ipc.go +++ b/rpc/ipc.go @@ -46,7 +46,8 @@ func (s *Server) ServeListener(l net.Listener) error { // The context is used for the initial connection establishment. It does not // affect subsequent interactions with the client. func DialIPC(ctx context.Context, endpoint string) (*Client, error) { - return newClient(ctx, newClientTransportIPC(endpoint)) + cfg := new(clientConfig) + return newClient(ctx, cfg, newClientTransportIPC(endpoint)) } func newClientTransportIPC(endpoint string) reconnectFunc { diff --git a/rpc/ipc_unix.go b/rpc/ipc_unix.go index 987634770..33c1cad54 100644 --- a/rpc/ipc_unix.go +++ b/rpc/ipc_unix.go @@ -29,11 +29,17 @@ import ( "github.com/ethereum/go-ethereum/log" ) +const ( + // On Linux, sun_path is 108 bytes in size + // see http://man7.org/linux/man-pages/man7/unix.7.html + maxPathSize = int(108) +) + // ipcListen will create a Unix socket on the given endpoint. func ipcListen(endpoint string) (net.Listener, error) { // account for null-terminator too - if len(endpoint)+1 > int(max_path_size) { - log.Warn(fmt.Sprintf("The ipc endpoint is longer than %d characters. ", max_path_size-1), + if len(endpoint)+1 > maxPathSize { + log.Warn(fmt.Sprintf("The ipc endpoint is longer than %d characters. ", maxPathSize-1), "endpoint", endpoint) } diff --git a/rpc/ipc_windows.go b/rpc/ipc_windows.go index adb1826f0..efec38cf3 100644 --- a/rpc/ipc_windows.go +++ b/rpc/ipc_windows.go @@ -24,7 +24,7 @@ import ( "net" "time" - "gopkg.in/natefinch/npipe.v2" + "github.com/Microsoft/go-winio" ) // This is used if the dialing context has no deadline. It is much smaller than the @@ -33,17 +33,12 @@ const defaultPipeDialTimeout = 2 * time.Second // ipcListen will create a named pipe on the given endpoint. func ipcListen(endpoint string) (net.Listener, error) { - return npipe.Listen(endpoint) + return winio.ListenPipe(endpoint, nil) } // newIPCConnection will connect to a named pipe with the given endpoint as name. func newIPCConnection(ctx context.Context, endpoint string) (net.Conn, error) { - timeout := defaultPipeDialTimeout - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - if timeout < 0 { - timeout = 0 - } - } - return npipe.DialTimeout(endpoint, timeout) + ctx, cancel := context.WithTimeout(ctx, defaultPipeDialTimeout) + defer cancel() + return winio.DialPipeContext(ctx, endpoint) } diff --git a/rpc/server.go b/rpc/server.go index 089bbb1fd..2742adf07 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -46,9 +46,11 @@ type Server struct { services serviceRegistry idgen func() ID - mutex sync.Mutex - codecs map[ServerCodec]struct{} - run atomic.Bool + mutex sync.Mutex + codecs map[ServerCodec]struct{} + run atomic.Bool + batchItemLimit int + batchResponseLimit int } // NewServer creates a new server instance with no registered handlers. @@ -65,6 +67,17 @@ func NewServer() *Server { return server } +// SetBatchLimits sets limits applied to batch requests. There are two limits: 'itemLimit' +// is the maximum number of items in a batch. 'maxResponseSize' is the maximum number of +// response bytes across all requests in a batch. +// +// This method should be called before processing any requests via ServeCodec, ServeHTTP, +// ServeListener etc. +func (s *Server) SetBatchLimits(itemLimit, maxResponseSize int) { + s.batchItemLimit = itemLimit + s.batchResponseLimit = maxResponseSize +} + // RegisterName creates a service for the given receiver type under the given name. When no // methods on the given receiver match the criteria to be either a RPC method or a // subscription an error is returned. Otherwise a new service is created and added to the @@ -86,7 +99,12 @@ func (s *Server) ServeCodec(codec ServerCodec, options CodecOption) { } defer s.untrackCodec(codec) - c := initClient(codec, s.idgen, &s.services) + cfg := &clientConfig{ + idgen: s.idgen, + batchItemLimit: s.batchItemLimit, + batchResponseLimit: s.batchResponseLimit, + } + c := initClient(codec, &s.services, cfg) <-codec.closed() c.Close() } @@ -118,7 +136,7 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { return } - h := newHandler(ctx, codec, s.idgen, &s.services) + h := newHandler(ctx, codec, s.idgen, &s.services, s.batchItemLimit, s.batchResponseLimit) h.allowSubscribe = false defer h.close(io.EOF, nil) diff --git a/rpc/server_test.go b/rpc/server_test.go index f1a9b3d5c..5d3929dfd 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -70,6 +70,7 @@ func TestServer(t *testing.T) { func runTestScript(t *testing.T, file string) { server := newTestServer() + server.SetBatchLimits(4, 100000) content, err := os.ReadFile(file) if err != nil { t.Fatal(err) @@ -152,3 +153,41 @@ func TestServerShortLivedConn(t *testing.T) { } } } + +func TestServerBatchResponseSizeLimit(t *testing.T) { + server := newTestServer() + defer server.Stop() + server.SetBatchLimits(100, 60) + var ( + batch []BatchElem + client = DialInProc(server) + ) + for i := 0; i < 5; i++ { + batch = append(batch, BatchElem{ + Method: "test_echo", + Args: []any{"x", 1}, + Result: new(echoResult), + }) + } + if err := client.BatchCall(batch); err != nil { + t.Fatal("error sending batch:", err) + } + for i := range batch { + // We expect the first two queries to be ok, but after that the size limit takes effect. + if i < 2 { + if batch[i].Error != nil { + t.Fatalf("batch elem %d has unexpected error: %v", i, batch[i].Error) + } + continue + } + // After two, we expect an error. + re, ok := batch[i].Error.(Error) + if !ok { + t.Fatalf("batch elem %d has wrong error: %v", i, batch[i].Error) + } + wantedCode := errcodeResponseTooLarge + if re.ErrorCode() != wantedCode { + t.Errorf("batch elem %d wrong error code, have %d want %d", i, re.ErrorCode(), wantedCode) + } + } +} diff --git a/rpc/stdio.go b/rpc/stdio.go index ae32db26e..084e5f070 100644 --- a/rpc/stdio.go +++ b/rpc/stdio.go @@ -32,7 +32,8 @@ func DialStdIO(ctx context.Context) (*Client, error) { // DialIO creates a client which uses the given IO channels func DialIO(ctx context.Context, in io.Reader, out io.Writer) (*Client, error) { - return newClient(ctx, newClientTransportIO(in, out)) + cfg := new(clientConfig) + return newClient(ctx, cfg, newClientTransportIO(in, out)) } func newClientTransportIO(in io.Reader, out io.Writer) reconnectFunc { diff --git a/rpc/subscription.go b/rpc/subscription.go index 334ead3ac..3231c2cee 100644 --- a/rpc/subscription.go +++ b/rpc/subscription.go @@ -32,8 +32,17 @@ import ( ) var ( - // ErrNotificationsUnsupported is returned when the connection doesn't support notifications - ErrNotificationsUnsupported = errors.New("notifications not supported") + // ErrNotificationsUnsupported is returned by the client when the connection doesn't + // support notifications. You can use this error value to check for subscription + // support like this: + // + // sub, err := client.EthSubscribe(ctx, channel, "newHeads", true) + // if errors.Is(err, rpc.ErrNotificationsUnsupported) { + // // Server does not support subscriptions, fall back to polling. + // } + // + ErrNotificationsUnsupported = notificationsUnsupportedError{} + // ErrSubscriptionNotFound is returned when the notification for the given id is not found ErrSubscriptionNotFound = errors.New("subscription not found") ) diff --git a/rpc/testdata/invalid-batch-toolarge.js b/rpc/testdata/invalid-batch-toolarge.js new file mode 100644 index 000000000..218fea58a --- /dev/null +++ b/rpc/testdata/invalid-batch-toolarge.js @@ -0,0 +1,13 @@ +// This file checks the behavior of the batch item limit code. +// In tests, the batch item limit is set to 4. So to trigger the error, +// all batches in this file have 5 elements. + +// For batches that do not contain any calls, a response message with "id" == null +// is returned. + +--> [{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]}] +<-- [{"jsonrpc":"2.0","id":null,"error":{"code":-32600,"message":"batch too large"}}] + +// For batches with at least one call, the call's "id" is used. +--> [{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","id":3,"method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]}] +<-- [{"jsonrpc":"2.0","id":3,"error":{"code":-32600,"message":"batch too large"}}] diff --git a/rpc/websocket.go b/rpc/websocket.go index 889562d1a..86cf50594 100644 --- a/rpc/websocket.go +++ b/rpc/websocket.go @@ -197,7 +197,7 @@ func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, diale if err != nil { return nil, err } - return newClient(ctx, connect) + return newClient(ctx, cfg, connect) } // DialWebsocket creates a new RPC client that communicates with a JSON-RPC server @@ -214,7 +214,7 @@ func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error if err != nil { return nil, err } - return newClient(ctx, connect) + return newClient(ctx, cfg, connect) } func newClientTransportWS(endpoint string, cfg *clientConfig) (reconnectFunc, error) { @@ -278,24 +278,21 @@ type websocketCodec struct { conn *websocket.Conn info PeerInfo - wg sync.WaitGroup - pingReset chan struct{} + wg sync.WaitGroup + pingReset chan struct{} + pongReceived chan struct{} } func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header) ServerCodec { conn.SetReadLimit(wsMessageSizeLimit) - conn.SetPongHandler(func(appData string) error { - conn.SetReadDeadline(time.Time{}) - return nil - }) - encode := func(v interface{}, isErrorResponse bool) error { return conn.WriteJSON(v) } wc := &websocketCodec{ - jsonCodec: NewFuncCodec(conn, encode, conn.ReadJSON).(*jsonCodec), - conn: conn, - pingReset: make(chan struct{}, 1), + jsonCodec: NewFuncCodec(conn, encode, conn.ReadJSON).(*jsonCodec), + conn: conn, + pingReset: make(chan struct{}, 1), + pongReceived: make(chan struct{}), info: PeerInfo{ Transport: "ws", RemoteAddr: conn.RemoteAddr().String(), @@ -306,6 +303,13 @@ func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header) Serve wc.info.HTTP.Origin = req.Get("Origin") wc.info.HTTP.UserAgent = req.Get("User-Agent") // Start pinger. + conn.SetPongHandler(func(appData string) error { + select { + case wc.pongReceived <- struct{}{}: + case <-wc.closed(): + } + return nil + }) wc.wg.Add(1) go wc.pingLoop() return wc @@ -334,26 +338,31 @@ func (wc *websocketCodec) writeJSON(ctx context.Context, v interface{}, isError // pingLoop sends periodic ping frames when the connection is idle. func (wc *websocketCodec) pingLoop() { - var timer = time.NewTimer(wsPingInterval) + var pingTimer = time.NewTimer(wsPingInterval) defer wc.wg.Done() - defer timer.Stop() + defer pingTimer.Stop() for { select { case <-wc.closed(): return + case <-wc.pingReset: - if !timer.Stop() { - <-timer.C + if !pingTimer.Stop() { + <-pingTimer.C } - timer.Reset(wsPingInterval) - case <-timer.C: + pingTimer.Reset(wsPingInterval) + + case <-pingTimer.C: wc.jsonCodec.encMu.Lock() wc.conn.SetWriteDeadline(time.Now().Add(wsPingWriteTimeout)) wc.conn.WriteMessage(websocket.PingMessage, nil) wc.conn.SetReadDeadline(time.Now().Add(wsPongTimeout)) wc.jsonCodec.encMu.Unlock() - timer.Reset(wsPingInterval) + pingTimer.Reset(wsPingInterval) + + case <-wc.pongReceived: + wc.conn.SetReadDeadline(time.Time{}) } } } diff --git a/signer/core/api.go b/signer/core/api.go index 3c1c94801..43eb89ee0 100644 --- a/signer/core/api.go +++ b/signer/core/api.go @@ -553,6 +553,7 @@ func (api *SignerAPI) SignTransaction(ctx context.Context, args apitypes.SendTxA // If we are in 'rejectMode', then reject rather than show the user warnings if api.rejectMode { if err := msgs.GetWarnings(); err != nil { + log.Info("Signing aborted due to warnings. In order to continue despite warnings, please use the flag '--advanced'.") return nil, err } } @@ -625,6 +626,7 @@ func (api *SignerAPI) SignGnosisSafeTx(ctx context.Context, signerAddress common // If we are in 'rejectMode', then reject rather than show the user warnings if api.rejectMode { if err := msgs.GetWarnings(); err != nil { + log.Info("Signing aborted due to warnings. In order to continue despite warnings, please use the flag '--advanced'.") return nil, err } } diff --git a/signer/core/api_test.go b/signer/core/api_test.go index 9bb55bddc..5a9de161b 100644 --- a/signer/core/api_test.go +++ b/signer/core/api_test.go @@ -282,7 +282,7 @@ func TestSignTx(t *testing.T) { t.Fatal(err) } parsedTx := &types.Transaction{} - rlp.Decode(bytes.NewReader(res.Raw), parsedTx) + rlp.DecodeBytes(res.Raw, parsedTx) //The tx should NOT be modified by the UI if parsedTx.Value().Cmp(tx.Value.ToInt()) != 0 { @@ -308,7 +308,7 @@ func TestSignTx(t *testing.T) { t.Fatal(err) } parsedTx2 := &types.Transaction{} - rlp.Decode(bytes.NewReader(res.Raw), parsedTx2) + rlp.DecodeBytes(res.Raw, parsedTx2) //The tx should be modified by the UI if parsedTx2.Value().Cmp(tx.Value.ToInt()) != 0 { diff --git a/tests/block_test.go b/tests/block_test.go index 9e72f7f97..645b4b702 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -18,11 +18,12 @@ package tests import ( "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" ) func TestBlockchain(t *testing.T) { - t.Parallel() - bt := new(testMatcher) // General state tests are 'exported' as blockchain tests, but we can run them natively. // For speedier CI-runs, the line below can be uncommented, so those are skipped. @@ -48,14 +49,40 @@ func TestBlockchain(t *testing.T) { bt.skipLoad(`.*randomStatetest94.json.*`) bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { - if err := bt.checkFailure(t, test.Run(false)); err != nil { - t.Errorf("test without snapshotter failed: %v", err) - } - if err := bt.checkFailure(t, test.Run(true)); err != nil { - t.Errorf("test with snapshotter failed: %v", err) - } + execBlockTest(t, bt, test) }) // There is also a LegacyTests folder, containing blockchain tests generated // prior to Istanbul. However, they are all derived from GeneralStateTests, // which run natively, so there's no reason to run them here. } + +// TestExecutionSpec runs the test fixtures from execution-spec-tests. +func TestExecutionSpec(t *testing.T) { + if !common.FileExist(executionSpecDir) { + t.Skipf("directory %s does not exist", executionSpecDir) + } + bt := new(testMatcher) + + // cancun tests are not complete yet + bt.skipLoad(`^cancun/`) + bt.skipLoad(`-fork=Cancun`) + + bt.walk(t, executionSpecDir, func(t *testing.T, name string, test *BlockTest) { + execBlockTest(t, bt, test) + }) +} + +func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) { + if err := bt.checkFailure(t, test.Run(false, rawdb.HashScheme, nil)); err != nil { + t.Errorf("test in hash mode without snapshotter failed: %v", err) + } + if err := bt.checkFailure(t, test.Run(true, rawdb.HashScheme, nil)); err != nil { + t.Errorf("test in hash mode with snapshotter failed: %v", err) + } + if err := bt.checkFailure(t, test.Run(false, rawdb.PathScheme, nil)); err != nil { + t.Errorf("test in path mode without snapshotter failed: %v", err) + } + if err := bt.checkFailure(t, test.Run(true, rawdb.PathScheme, nil)); err != nil { + t.Errorf("test in path mode with snapshotter failed: %v", err) + } +} diff --git a/tests/block_test_util.go b/tests/block_test_util.go index 45b7e49ae..ad1d34fb2 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -38,6 +38,9 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) // A BlockTest checks handling of entire blocks. @@ -70,24 +73,27 @@ type btBlock struct { //go:generate go run github.com/fjl/gencodec -type btHeader -field-override btHeaderMarshaling -out gen_btheader.go type btHeader struct { - Bloom types.Bloom - Coinbase common.Address - MixHash common.Hash - Nonce types.BlockNonce - Number *big.Int - Hash common.Hash - ParentHash common.Hash - ReceiptTrie common.Hash - StateRoot common.Hash - TransactionsTrie common.Hash - UncleHash common.Hash - ExtraData []byte - Difficulty *big.Int - GasLimit uint64 - GasUsed uint64 - Timestamp uint64 - BaseFeePerGas *big.Int - WithdrawalsRoot *common.Hash + Bloom types.Bloom + Coinbase common.Address + MixHash common.Hash + Nonce types.BlockNonce + Number *big.Int + Hash common.Hash + ParentHash common.Hash + ReceiptTrie common.Hash + StateRoot common.Hash + TransactionsTrie common.Hash + UncleHash common.Hash + ExtraData []byte + Difficulty *big.Int + GasLimit uint64 + GasUsed uint64 + Timestamp uint64 + BaseFeePerGas *big.Int + WithdrawalsRoot *common.Hash + BlobGasUsed *uint64 + ExcessBlobGas *uint64 + ParentBeaconBlockRoot *common.Hash } type btHeaderMarshaling struct { @@ -98,18 +104,34 @@ type btHeaderMarshaling struct { GasUsed math.HexOrDecimal64 Timestamp math.HexOrDecimal64 BaseFeePerGas *math.HexOrDecimal256 + BlobGasUsed *math.HexOrDecimal64 + ExcessBlobGas *math.HexOrDecimal64 } -func (t *BlockTest) Run(snapshotter bool) error { +func (t *BlockTest) Run(snapshotter bool, scheme string, tracer vm.EVMLogger) error { config, ok := Forks[t.json.Network] if !ok { return UnsupportedForkError{t.json.Network} } - // import pre accounts & construct test genesis block & state root - db := rawdb.NewMemoryDatabase() + var ( + db = rawdb.NewMemoryDatabase() + tconf = &trie.Config{} + ) + if scheme == rawdb.PathScheme { + tconf.PathDB = pathdb.Defaults + } else { + tconf.HashDB = hashdb.Defaults + } + // Commit genesis state gspec := t.genesis(config) - gblock := gspec.MustCommit(db) + triedb := trie.NewDatabase(db, tconf) + gblock, err := gspec.Commit(db, triedb) + if err != nil { + return err + } + triedb.Close() // close the db to prevent memory leak + if gblock.Hash() != t.json.Genesis.Hash { return fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6]) } @@ -119,12 +141,14 @@ func (t *BlockTest) Run(snapshotter bool) error { // Wrap the original engine within the beacon-engine engine := beacon.New(ethash.NewFaker()) - cache := &core.CacheConfig{TrieCleanLimit: 0} + cache := &core.CacheConfig{TrieCleanLimit: 0, StateScheme: scheme} if snapshotter { cache.SnapshotLimit = 1 cache.SnapshotWait = true } - chain, err := core.NewBlockChain(db, cache, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := core.NewBlockChain(db, cache, gspec, nil, engine, vm.Config{ + Tracer: tracer, + }, nil, nil) if err != nil { return err } @@ -156,18 +180,20 @@ func (t *BlockTest) Run(snapshotter bool) error { func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis { return &core.Genesis{ - Config: config, - Nonce: t.json.Genesis.Nonce.Uint64(), - Timestamp: t.json.Genesis.Timestamp, - ParentHash: t.json.Genesis.ParentHash, - ExtraData: t.json.Genesis.ExtraData, - GasLimit: t.json.Genesis.GasLimit, - GasUsed: t.json.Genesis.GasUsed, - Difficulty: t.json.Genesis.Difficulty, - Mixhash: t.json.Genesis.MixHash, - Coinbase: t.json.Genesis.Coinbase, - Alloc: t.json.Pre, - BaseFee: t.json.Genesis.BaseFeePerGas, + Config: config, + Nonce: t.json.Genesis.Nonce.Uint64(), + Timestamp: t.json.Genesis.Timestamp, + ParentHash: t.json.Genesis.ParentHash, + ExtraData: t.json.Genesis.ExtraData, + GasLimit: t.json.Genesis.GasLimit, + GasUsed: t.json.Genesis.GasUsed, + Difficulty: t.json.Genesis.Difficulty, + Mixhash: t.json.Genesis.MixHash, + Coinbase: t.json.Genesis.Coinbase, + Alloc: t.json.Pre, + BaseFee: t.json.Genesis.BaseFeePerGas, + BlobGasUsed: t.json.Genesis.BlobGasUsed, + ExcessBlobGas: t.json.Genesis.ExcessBlobGas, } } @@ -276,6 +302,15 @@ func validateHeader(h *btHeader, h2 *types.Header) error { if !reflect.DeepEqual(h.WithdrawalsRoot, h2.WithdrawalsHash) { return fmt.Errorf("withdrawalsRoot: want: %v have: %v", h.WithdrawalsRoot, h2.WithdrawalsHash) } + if !reflect.DeepEqual(h.BlobGasUsed, h2.BlobGasUsed) { + return fmt.Errorf("blobGasUsed: want: %v have: %v", h.BlobGasUsed, h2.BlobGasUsed) + } + if !reflect.DeepEqual(h.ExcessBlobGas, h2.ExcessBlobGas) { + return fmt.Errorf("excessBlobGas: want: %v have: %v", h.ExcessBlobGas, h2.ExcessBlobGas) + } + if !reflect.DeepEqual(h.ParentBeaconBlockRoot, h2.ParentBeaconRoot) { + return fmt.Errorf("parentBeaconBlockRoot: want: %v have: %v", h.ParentBeaconBlockRoot, h2.ParentBeaconRoot) + } return nil } diff --git a/tests/fuzzers/les/les-fuzzer.go b/tests/fuzzers/les/les-fuzzer.go index 926de0458..c29bb2ef1 100644 --- a/tests/fuzzers/les/les-fuzzer.go +++ b/tests/fuzzers/les/les-fuzzer.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -44,9 +45,9 @@ var ( testChainLen = 256 testContractCode = common.Hex2Bytes("606060405260cc8060106000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806360cd2685146041578063c16431b914606b57603f565b005b6055600480803590602001909190505060a9565b6040518082815260200191505060405180910390f35b60886004808035906020019091908035906020019091905050608a565b005b80600060005083606481101560025790900160005b50819055505b5050565b6000600060005082606481101560025790900160005b5054905060c7565b91905056") - chain *core.BlockChain - addrHashes []common.Hash - txHashes []common.Hash + chain *core.BlockChain + addresses []common.Address + txHashes []common.Hash chtTrie *trie.Trie bloomTrie *trie.Trie @@ -54,7 +55,7 @@ var ( bloomKeys [][]byte ) -func makechain() (bc *core.BlockChain, addrHashes, txHashes []common.Hash) { +func makechain() (bc *core.BlockChain, addresses []common.Address, txHashes []common.Hash) { gspec := &core.Genesis{ Config: params.TestChainConfig, Alloc: core.GenesisAlloc{bankAddr: {Balance: bankFunds}}, @@ -76,7 +77,7 @@ func makechain() (bc *core.BlockChain, addrHashes, txHashes []common.Hash) { tx, _ = types.SignTx(types.NewTransaction(nonce, addr, big.NewInt(10000), params.TxGas, big.NewInt(params.GWei), nil), signer, bankKey) } gen.AddTx(tx) - addrHashes = append(addrHashes, crypto.Keccak256Hash(addr[:])) + addresses = append(addresses, addr) txHashes = append(txHashes, tx.Hash()) }) bc, _ = core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) @@ -87,8 +88,8 @@ func makechain() (bc *core.BlockChain, addrHashes, txHashes []common.Hash) { } func makeTries() (chtTrie *trie.Trie, bloomTrie *trie.Trie, chtKeys, bloomKeys [][]byte) { - chtTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())) - bloomTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())) + chtTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), trie.HashDefaults)) + bloomTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), trie.HashDefaults)) for i := 0; i < testChainLen; i++ { // The element in CHT is -> key := make([]byte, 8) @@ -106,7 +107,7 @@ func makeTries() (chtTrie *trie.Trie, bloomTrie *trie.Trie, chtKeys, bloomKeys [ } func init() { - chain, addrHashes, txHashes = makechain() + chain, addresses, txHashes = makechain() chtTrie, bloomTrie, chtKeys, bloomKeys = makeTries() } @@ -115,7 +116,8 @@ type fuzzer struct { pool *txpool.TxPool chainLen int - addr, txs []common.Hash + addresses []common.Address + txs []common.Hash nonce uint64 chtKeys [][]byte @@ -128,17 +130,20 @@ type fuzzer struct { } func newFuzzer(input []byte) *fuzzer { + pool := legacypool.New(legacypool.DefaultConfig, chain) + txpool, _ := txpool.New(new(big.Int).SetUint64(legacypool.DefaultConfig.PriceLimit), chain, []txpool.SubPool{pool}) + return &fuzzer{ chain: chain, chainLen: testChainLen, - addr: addrHashes, + addresses: addresses, txs: txHashes, chtTrie: chtTrie, bloomTrie: bloomTrie, chtKeys: chtKeys, bloomKeys: bloomKeys, nonce: uint64(len(txHashes)), - pool: txpool.NewTxPool(txpool.DefaultConfig, params.TestChainConfig, chain), + pool: txpool, input: bytes.NewReader(input), } } @@ -194,12 +199,12 @@ func (f *fuzzer) randomBlockHash() common.Hash { return common.BytesToHash(f.read(common.HashLength)) } -func (f *fuzzer) randomAddrHash() []byte { - i := f.randomInt(3 * len(f.addr)) - if i < len(f.addr) { - return f.addr[i].Bytes() +func (f *fuzzer) randomAddress() []byte { + i := f.randomInt(3 * len(f.addresses)) + if i < len(f.addresses) { + return f.addresses[i].Bytes() } - return f.read(common.HashLength) + return f.read(common.AddressLength) } func (f *fuzzer) randomCHTTrieKey() []byte { @@ -311,8 +316,8 @@ func Fuzz(input []byte) int { req := &l.GetCodePacket{Reqs: make([]l.CodeReq, f.randomInt(l.MaxCodeFetch+1))} for i := range req.Reqs { req.Reqs[i] = l.CodeReq{ - BHash: f.randomBlockHash(), - AccKey: f.randomAddrHash(), + BHash: f.randomBlockHash(), + AccountAddress: f.randomAddress(), } } f.doFuzz(l.GetCodeMsg, req) @@ -329,15 +334,15 @@ func Fuzz(input []byte) int { for i := range req.Reqs { if f.randomBool() { req.Reqs[i] = l.ProofReq{ - BHash: f.randomBlockHash(), - AccKey: f.randomAddrHash(), - Key: f.randomAddrHash(), - FromLevel: uint(f.randomX(3)), + BHash: f.randomBlockHash(), + AccountAddress: f.randomAddress(), + Key: f.randomAddress(), + FromLevel: uint(f.randomX(3)), } } else { req.Reqs[i] = l.ProofReq{ BHash: f.randomBlockHash(), - Key: f.randomAddrHash(), + Key: f.randomAddress(), FromLevel: uint(f.randomX(3)), } } diff --git a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go index 2881c7a7c..ba490b761 100644 --- a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go +++ b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go @@ -21,12 +21,12 @@ import ( "encoding/binary" "fmt" "io" - "sort" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/trie" + "golang.org/x/exp/slices" ) type kv struct { @@ -34,12 +34,6 @@ type kv struct { t bool } -type entrySlice []*kv - -func (p entrySlice) Len() int { return len(p) } -func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 } -func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - type fuzzer struct { input io.Reader exhausted bool @@ -62,7 +56,7 @@ func (f *fuzzer) readInt() uint64 { } func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) { - trie := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())) + trie := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := make(map[string]*kv) size := f.readInt() // Fill it with some fluff @@ -97,14 +91,16 @@ func (f *fuzzer) fuzz() int { if f.exhausted { return 0 // input too short } - var entries entrySlice + var entries []*kv for _, kv := range vals { entries = append(entries, kv) } if len(entries) <= 1 { return 0 } - sort.Sort(entries) + slices.SortFunc(entries, func(a, b *kv) int { + return bytes.Compare(a.k, b.k) + }) var ok = 0 for { @@ -117,10 +113,10 @@ func (f *fuzzer) fuzz() int { break } proof := memorydb.New() - if err := tr.Prove(entries[start].k, 0, proof); err != nil { + if err := tr.Prove(entries[start].k, proof); err != nil { panic(fmt.Sprintf("Failed to prove the first node %v", err)) } - if err := tr.Prove(entries[end-1].k, 0, proof); err != nil { + if err := tr.Prove(entries[end-1].k, proof); err != nil { panic(fmt.Sprintf("Failed to prove the last node %v", err)) } var keys [][]byte diff --git a/tests/fuzzers/snap/fuzz_handler.go b/tests/fuzzers/snap/fuzz_handler.go index 2e5dcd6e2..784b526dc 100644 --- a/tests/fuzzers/snap/fuzz_handler.go +++ b/tests/fuzzers/snap/fuzz_handler.go @@ -71,7 +71,6 @@ func getChain() *core.BlockChain { TrieDirtyLimit: 0, TrieTimeLimit: 5 * time.Minute, TrieCleanNoPrefetch: true, - TrieCleanRejournal: 0, SnapshotLimit: 100, SnapshotWait: true, } diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go index 0099e9e16..3d6552409 100644 --- a/tests/fuzzers/stacktrie/trie_fuzzer.go +++ b/tests/fuzzers/stacktrie/trie_fuzzer.go @@ -23,7 +23,6 @@ import ( "fmt" "hash" "io" - "sort" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -33,6 +32,7 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "golang.org/x/crypto/sha3" + "golang.org/x/exp/slices" ) type fuzzer struct { @@ -104,19 +104,6 @@ func (b *spongeBatch) Replay(w ethdb.KeyValueWriter) error { return nil } type kv struct { k, v []byte } -type kvs []kv - -func (k kvs) Len() int { - return len(k) -} - -func (k kvs) Less(i, j int) bool { - return bytes.Compare(k[i].k, k[j].k) < 0 -} - -func (k kvs) Swap(i, j int) { - k[j], k[i] = k[i], k[j] -} // Fuzz is the fuzzing entry-point. // The function must return @@ -149,14 +136,14 @@ func (f *fuzzer) fuzz() int { // This spongeDb is used to check the sequence of disk-db-writes var ( spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()} - dbA = trie.NewDatabase(rawdb.NewDatabase(spongeA)) + dbA = trie.NewDatabase(rawdb.NewDatabase(spongeA), nil) trieA = trie.NewEmpty(dbA) spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()} - dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB)) + dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB), nil) trieB = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { rawdb.WriteTrieNode(spongeB, owner, path, hash, blob, dbB.Scheme()) }) - vals kvs + vals []kv useful bool maxElements = 10000 // operate on unique keys only @@ -184,15 +171,20 @@ func (f *fuzzer) fuzz() int { return 0 } // Flush trie -> database - rootA, nodes := trieA.Commit(false) + rootA, nodes, err := trieA.Commit(false) + if err != nil { + panic(err) + } if nodes != nil { - dbA.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + dbA.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) } // Flush memdb -> disk (sponge) dbA.Commit(rootA, false) // Stacktrie requires sorted insertion - sort.Sort(vals) + slices.SortFunc(vals, func(a, b kv) int { + return bytes.Compare(a.k, b.k) + }) for _, kv := range vals { if f.debugging { fmt.Printf("{\"%#x\" , \"%#x\"} // stacktrie.Update\n", kv.k, kv.v) @@ -232,7 +224,7 @@ func (f *fuzzer) fuzz() int { panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootC)) } trieA, _ = trie.New(trie.TrieID(rootA), dbA) - iterA := trieA.NodeIterator(nil) + iterA := trieA.MustNodeIterator(nil) for iterA.Next(true) { if iterA.Hash() == (common.Hash{}) { if _, present := nodeset[string(iterA.Path())]; present { diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go index 7be41b57c..687f5efb1 100644 --- a/tests/fuzzers/trie/trie-fuzzer.go +++ b/tests/fuzzers/trie/trie-fuzzer.go @@ -143,7 +143,7 @@ func Fuzz(input []byte) int { func runRandTest(rt randTest) error { var ( - triedb = trie.NewDatabase(rawdb.NewMemoryDatabase()) + triedb = trie.NewDatabase(rawdb.NewMemoryDatabase(), nil) tr = trie.NewEmpty(triedb) origin = types.EmptyRootHash values = make(map[string]string) // tracks content of the trie @@ -165,9 +165,12 @@ func runRandTest(rt randTest) error { case opHash: tr.Hash() case opCommit: - hash, nodes := tr.Commit(false) + hash, nodes, err := tr.Commit(false) + if err != nil { + return err + } if nodes != nil { - if err := triedb.Update(hash, origin, trienode.NewWithNodeSet(nodes)); err != nil { + if err := triedb.Update(hash, origin, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { return err } } @@ -179,7 +182,7 @@ func runRandTest(rt randTest) error { origin = hash case opItercheckhash: checktr := trie.NewEmpty(triedb) - it := trie.NewIterator(tr.NodeIterator(nil)) + it := trie.NewIterator(tr.MustNodeIterator(nil)) for it.Next() { checktr.MustUpdate(it.Key, it.Value) } @@ -187,7 +190,7 @@ func runRandTest(rt randTest) error { return errors.New("hash mismatch in opItercheckhash") } case opProve: - rt[i].err = tr.Prove(step.key, 0, proofDb{}) + rt[i].err = tr.Prove(step.key, proofDb{}) } // Abort the test on error. if rt[i].err != nil { diff --git a/tests/gen_btheader.go b/tests/gen_btheader.go index 985ea692d..80ad89e03 100644 --- a/tests/gen_btheader.go +++ b/tests/gen_btheader.go @@ -17,24 +17,27 @@ var _ = (*btHeaderMarshaling)(nil) // MarshalJSON marshals as JSON. func (b btHeader) MarshalJSON() ([]byte, error) { type btHeader struct { - Bloom types.Bloom - Coinbase common.Address - MixHash common.Hash - Nonce types.BlockNonce - Number *math.HexOrDecimal256 - Hash common.Hash - ParentHash common.Hash - ReceiptTrie common.Hash - StateRoot common.Hash - TransactionsTrie common.Hash - UncleHash common.Hash - ExtraData hexutil.Bytes - Difficulty *math.HexOrDecimal256 - GasLimit math.HexOrDecimal64 - GasUsed math.HexOrDecimal64 - Timestamp math.HexOrDecimal64 - BaseFeePerGas *math.HexOrDecimal256 - WithdrawalsRoot *common.Hash + Bloom types.Bloom + Coinbase common.Address + MixHash common.Hash + Nonce types.BlockNonce + Number *math.HexOrDecimal256 + Hash common.Hash + ParentHash common.Hash + ReceiptTrie common.Hash + StateRoot common.Hash + TransactionsTrie common.Hash + UncleHash common.Hash + ExtraData hexutil.Bytes + Difficulty *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + GasUsed math.HexOrDecimal64 + Timestamp math.HexOrDecimal64 + BaseFeePerGas *math.HexOrDecimal256 + WithdrawalsRoot *common.Hash + BlobGasUsed *math.HexOrDecimal64 + ExcessBlobGas *math.HexOrDecimal64 + ParentBeaconBlockRoot *common.Hash } var enc btHeader enc.Bloom = b.Bloom @@ -55,30 +58,36 @@ func (b btHeader) MarshalJSON() ([]byte, error) { enc.Timestamp = math.HexOrDecimal64(b.Timestamp) enc.BaseFeePerGas = (*math.HexOrDecimal256)(b.BaseFeePerGas) enc.WithdrawalsRoot = b.WithdrawalsRoot + enc.BlobGasUsed = (*math.HexOrDecimal64)(b.BlobGasUsed) + enc.ExcessBlobGas = (*math.HexOrDecimal64)(b.ExcessBlobGas) + enc.ParentBeaconBlockRoot = b.ParentBeaconBlockRoot return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (b *btHeader) UnmarshalJSON(input []byte) error { type btHeader struct { - Bloom *types.Bloom - Coinbase *common.Address - MixHash *common.Hash - Nonce *types.BlockNonce - Number *math.HexOrDecimal256 - Hash *common.Hash - ParentHash *common.Hash - ReceiptTrie *common.Hash - StateRoot *common.Hash - TransactionsTrie *common.Hash - UncleHash *common.Hash - ExtraData *hexutil.Bytes - Difficulty *math.HexOrDecimal256 - GasLimit *math.HexOrDecimal64 - GasUsed *math.HexOrDecimal64 - Timestamp *math.HexOrDecimal64 - BaseFeePerGas *math.HexOrDecimal256 - WithdrawalsRoot *common.Hash + Bloom *types.Bloom + Coinbase *common.Address + MixHash *common.Hash + Nonce *types.BlockNonce + Number *math.HexOrDecimal256 + Hash *common.Hash + ParentHash *common.Hash + ReceiptTrie *common.Hash + StateRoot *common.Hash + TransactionsTrie *common.Hash + UncleHash *common.Hash + ExtraData *hexutil.Bytes + Difficulty *math.HexOrDecimal256 + GasLimit *math.HexOrDecimal64 + GasUsed *math.HexOrDecimal64 + Timestamp *math.HexOrDecimal64 + BaseFeePerGas *math.HexOrDecimal256 + WithdrawalsRoot *common.Hash + BlobGasUsed *math.HexOrDecimal64 + ExcessBlobGas *math.HexOrDecimal64 + ParentBeaconBlockRoot *common.Hash } var dec btHeader if err := json.Unmarshal(input, &dec); err != nil { @@ -138,5 +147,14 @@ func (b *btHeader) UnmarshalJSON(input []byte) error { if dec.WithdrawalsRoot != nil { b.WithdrawalsRoot = dec.WithdrawalsRoot } + if dec.BlobGasUsed != nil { + b.BlobGasUsed = (*uint64)(dec.BlobGasUsed) + } + if dec.ExcessBlobGas != nil { + b.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) + } + if dec.ParentBeaconBlockRoot != nil { + b.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot + } return nil } diff --git a/tests/gen_sttransaction.go b/tests/gen_sttransaction.go index 7693a207a..9b5aecbfe 100644 --- a/tests/gen_sttransaction.go +++ b/tests/gen_sttransaction.go @@ -6,6 +6,7 @@ import ( "encoding/json" "math/big" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" @@ -26,6 +27,9 @@ func (s stTransaction) MarshalJSON() ([]byte, error) { GasLimit []math.HexOrDecimal64 `json:"gasLimit"` Value []string `json:"value"` PrivateKey hexutil.Bytes `json:"secretKey"` + Sender *common.Address `json:"sender"` + BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"` + BlobGasFeeCap *math.HexOrDecimal256 `json:"maxFeePerBlobGas,omitempty"` } var enc stTransaction enc.GasPrice = (*math.HexOrDecimal256)(s.GasPrice) @@ -43,6 +47,9 @@ func (s stTransaction) MarshalJSON() ([]byte, error) { } enc.Value = s.Value enc.PrivateKey = s.PrivateKey + enc.Sender = s.Sender + enc.BlobVersionedHashes = s.BlobVersionedHashes + enc.BlobGasFeeCap = (*math.HexOrDecimal256)(s.BlobGasFeeCap) return json.Marshal(&enc) } @@ -59,6 +66,9 @@ func (s *stTransaction) UnmarshalJSON(input []byte) error { GasLimit []math.HexOrDecimal64 `json:"gasLimit"` Value []string `json:"value"` PrivateKey *hexutil.Bytes `json:"secretKey"` + Sender *common.Address `json:"sender"` + BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"` + BlobGasFeeCap *math.HexOrDecimal256 `json:"maxFeePerBlobGas,omitempty"` } var dec stTransaction if err := json.Unmarshal(input, &dec); err != nil { @@ -97,5 +107,14 @@ func (s *stTransaction) UnmarshalJSON(input []byte) error { if dec.PrivateKey != nil { s.PrivateKey = *dec.PrivateKey } + if dec.Sender != nil { + s.Sender = dec.Sender + } + if dec.BlobVersionedHashes != nil { + s.BlobVersionedHashes = dec.BlobVersionedHashes + } + if dec.BlobGasFeeCap != nil { + s.BlobGasFeeCap = (*big.Int)(dec.BlobGasFeeCap) + } return nil } diff --git a/tests/init.go b/tests/init.go index 869f1bfcd..99b7e4d33 100644 --- a/tests/init.go +++ b/tests/init.go @@ -299,6 +299,44 @@ var Forks = map[string]*params.ChainConfig{ TerminalTotalDifficulty: big.NewInt(0), ShanghaiTime: u64(15_000), }, + "Cancun": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + CancunTime: u64(0), + }, + "ShanghaiToCancunAtTime15k": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + ShanghaiTime: u64(0), + CancunTime: u64(15_000), + }, } // AvailableForks returns the set of defined fork names diff --git a/tests/init_test.go b/tests/init_test.go index 7d8743efc..3ab15e765 100644 --- a/tests/init_test.go +++ b/tests/init_test.go @@ -41,6 +41,7 @@ var ( transactionTestDir = filepath.Join(baseDir, "TransactionTests") rlpTestDir = filepath.Join(baseDir, "RLPTests") difficultyTestDir = filepath.Join(baseDir, "BasicTests") + executionSpecDir = filepath.Join(".", "spec-tests", "fixtures") benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks") ) diff --git a/tests/state_test.go b/tests/state_test.go index 9d3862e1d..094dafcaf 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -30,6 +30,8 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers/logger" @@ -48,23 +50,27 @@ func TestState(t *testing.T) { st.slow(`^stStaticCall/static_Return50000`) st.slow(`^stSystemOperationsTest/CallRecursiveBomb`) st.slow(`^stTransactionTest/Opcodes_TransactionInit`) - // Very time consuming st.skipLoad(`^stTimeConsuming/`) st.skipLoad(`.*vmPerformance/loop.*`) - // Uses 1GB RAM per tested fork st.skipLoad(`^stStaticCall/static_Call1MB`) // Broken tests: - // - // The stEOF tests are generated with EOF as part of Shanghai, which - // is erroneous. Therefore, these tests are skipped. - st.skipLoad(`^EIPTests/stEOF/`) + // EOF is not part of cancun + st.skipLoad(`^stEOF/`) + + // EIP-4844 tests need to be regenerated due to the data-to-blob rename + st.skipLoad(`^stEIP4844-blobtransactions/`) + // Expected failures: + // These EIP-4844 tests need to be regenerated. + st.fails(`stEIP4844-blobtransactions/opcodeBlobhashOutOfRange.json`, "test has incorrect state root") + st.fails(`stEIP4844-blobtransactions/opcodeBlobhBounds.json`, "test has incorrect state root") // For Istanbul, older tests were moved into LegacyTests for _, dir := range []string{ + filepath.Join(baseDir, "EIPTests", "StateTests"), stateTestDir, legacyStateTestDir, benchmarksDir, @@ -74,21 +80,52 @@ func TestState(t *testing.T) { subtest := subtest key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index) - t.Run(key+"/trie", func(t *testing.T) { + t.Run(key+"/hash/trie", func(t *testing.T) { withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { - _, _, err := test.Run(subtest, vmconfig, false) - return st.checkFailure(t, err) + var result error + test.Run(subtest, vmconfig, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) { + result = st.checkFailure(t, err) + }) + return result }) }) - t.Run(key+"/snap", func(t *testing.T) { + t.Run(key+"/hash/snap", func(t *testing.T) { withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { - snaps, statedb, err := test.Run(subtest, vmconfig, true) - if snaps != nil && statedb != nil { - if _, err := snaps.Journal(statedb.IntermediateRoot(false)); err != nil { - return err + var result error + test.Run(subtest, vmconfig, true, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) { + if snaps != nil && state != nil { + if _, err := snaps.Journal(state.IntermediateRoot(false)); err != nil { + result = err + return + } } - } - return st.checkFailure(t, err) + result = st.checkFailure(t, err) + }) + return result + }) + }) + t.Run(key+"/path/trie", func(t *testing.T) { + withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { + var result error + test.Run(subtest, vmconfig, false, rawdb.PathScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) { + result = st.checkFailure(t, err) + }) + return result + }) + }) + t.Run(key+"/path/snap", func(t *testing.T) { + withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { + var result error + test.Run(subtest, vmconfig, true, rawdb.PathScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) { + if snaps != nil && state != nil { + if _, err := snaps.Journal(state.IntermediateRoot(false)); err != nil { + result = err + return + } + } + result = st.checkFailure(t, err) + }) + return result }) }) } @@ -186,7 +223,8 @@ func runBenchmark(b *testing.B, t *StateTest) { vmconfig.ExtraEips = eips block := t.genesis(config).ToBlock() - _, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, false) + triedb, _, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, false, rawdb.HashScheme) + defer triedb.Close() var baseFee *big.Int if rules.IsLondon { diff --git a/tests/state_test_util.go b/tests/state_test_util.go index eec91b67a..8c255c1b5 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -39,6 +39,8 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" "golang.org/x/crypto/sha3" ) @@ -113,6 +115,9 @@ type stTransaction struct { GasLimit []uint64 `json:"gasLimit"` Value []string `json:"value"` PrivateKey []byte `json:"secretKey"` + Sender *common.Address `json:"sender"` + BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"` + BlobGasFeeCap *big.Int `json:"maxFeePerBlobGas,omitempty"` } type stTransactionMarshaling struct { @@ -122,6 +127,7 @@ type stTransactionMarshaling struct { Nonce math.HexOrDecimal64 GasLimit []math.HexOrDecimal64 PrivateKey hexutil.Bytes + BlobGasFeeCap *math.HexOrDecimal256 } // GetChainConfig takes a fork definition and returns a chain config. @@ -184,38 +190,50 @@ func (t *StateTest) checkError(subtest StateSubtest, err error) error { } // Run executes a specific subtest and verifies the post-state and logs -func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, error) { - snaps, statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter) - if checkedErr := t.checkError(subtest, err); checkedErr != nil { - return snaps, statedb, checkedErr +func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string, postCheck func(err error, snaps *snapshot.Tree, state *state.StateDB)) (result error) { + triedb, snaps, statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter, scheme) + + // Invoke the callback at the end of function for further analysis. + defer func() { + postCheck(result, snaps, statedb) + + if triedb != nil { + triedb.Close() + } + }() + checkedErr := t.checkError(subtest, err) + if checkedErr != nil { + return checkedErr } // The error has been checked; if it was unexpected, it's already returned. if err != nil { // Here, an error exists but it was expected. // We do not check the post state or logs. - return snaps, statedb, nil + return nil } post := t.json.Post[subtest.Fork][subtest.Index] // N.B: We need to do this in a two-step process, because the first Commit takes care - // of suicides, and we need to touch the coinbase _after_ it has potentially suicided. + // of self-destructs, and we need to touch the coinbase _after_ it has potentially self-destructed. if root != common.Hash(post.Root) { - return snaps, statedb, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root) + return fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root) } if logs := rlpHash(statedb.Logs()); logs != common.Hash(post.Logs) { - return snaps, statedb, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs) + return fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs) } - return snaps, statedb, nil + statedb, _ = state.New(root, statedb.Database(), snaps) + return nil } // RunNoVerify runs a specific subtest and returns the statedb and post-state root -func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, common.Hash, error) { +func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB, common.Hash, error) { config, eips, err := GetChainConfig(subtest.Fork) if err != nil { - return nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork} + return nil, nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork} } vmconfig.ExtraEips = eips + block := t.genesis(config).ToBlock() - snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter) + triedb, snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter, scheme) var baseFee *big.Int if config.IsLondon(new(big.Int)) { @@ -229,7 +247,8 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh post := t.json.Post[subtest.Fork][subtest.Index] msg, err := t.json.Tx.toMessage(post, baseFee) if err != nil { - return nil, nil, common.Hash{}, err + triedb.Close() + return nil, nil, nil, common.Hash{}, err } // Try to recover tx with current signer @@ -237,11 +256,13 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh var ttx types.Transaction err := ttx.UnmarshalBinary(post.TxBytes) if err != nil { - return nil, nil, common.Hash{}, err + triedb.Close() + return nil, nil, nil, common.Hash{}, err } if _, err := types.Sender(types.LatestSigner(config), &ttx); err != nil { - return nil, nil, common.Hash{}, err + triedb.Close() + return nil, nil, nil, common.Hash{}, err } } @@ -260,6 +281,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh context.Difficulty = big.NewInt(0) } evm := vm.NewEVM(context, txContext, statedb, config, vmconfig) + // Execute the message. snapshot := statedb.Snapshot() gaspool := new(core.GasPool) @@ -270,23 +292,29 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh } // Add 0-value mining reward. This only makes a difference in the cases // where - // - the coinbase suicided, or + // - the coinbase self-destructed, or // - there are only 'bad' transactions, which aren't executed. In those cases, // the coinbase gets no txfee, so isn't created, and thus needs to be touched statedb.AddBalance(block.Coinbase(), new(big.Int)) - // Commit block - statedb.Commit(config.IsEIP158(block.Number())) - // And _now_ get the state root - root := statedb.IntermediateRoot(config.IsEIP158(block.Number())) - return snaps, statedb, root, err + + // Commit state mutations into database. + root, _ := statedb.Commit(block.NumberU64(), config.IsEIP158(block.Number())) + return triedb, snaps, statedb, root, err } func (t *StateTest) gasLimit(subtest StateSubtest) uint64 { return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas] } -func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) { - sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) +func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB) { + tconf := &trie.Config{Preimages: true} + if scheme == rawdb.HashScheme { + tconf.HashDB = hashdb.Defaults + } else { + tconf.PathDB = pathdb.Defaults + } + triedb := trie.NewDatabase(db, tconf) + sdb := state.NewDatabaseWithNodeDB(db, triedb) statedb, _ := state.New(types.EmptyRootHash, sdb, nil) for addr, a := range accounts { statedb.SetCode(addr, a.Code) @@ -297,7 +325,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo } } // Commit and re-open to start with a clean state. - root, _ := statedb.Commit(false) + root, _ := statedb.Commit(0, false) var snaps *snapshot.Tree if snapshotter { @@ -307,10 +335,10 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo NoBuild: false, AsyncBuild: false, } - snaps, _ = snapshot.New(snapconfig, db, sdb.TrieDB(), root) + snaps, _ = snapshot.New(snapconfig, db, triedb, root) } statedb, _ = state.New(root, sdb, snaps) - return snaps, statedb + return triedb, snaps, statedb } func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis { @@ -332,9 +360,12 @@ func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis { } func (tx *stTransaction) toMessage(ps stPostState, baseFee *big.Int) (*core.Message, error) { - // Derive sender from private key if present. var from common.Address - if len(tx.PrivateKey) > 0 { + // If 'sender' field is present, use that + if tx.Sender != nil { + from = *tx.Sender + } else if len(tx.PrivateKey) > 0 { + // Derive sender from private key if needed. key, err := crypto.ToECDSA(tx.PrivateKey) if err != nil { return nil, fmt.Errorf("invalid private key: %v", err) @@ -400,16 +431,18 @@ func (tx *stTransaction) toMessage(ps stPostState, baseFee *big.Int) (*core.Mess } msg := &core.Message{ - From: from, - To: to, - Nonce: tx.Nonce, - Value: value, - GasLimit: gasLimit, - GasPrice: gasPrice, - GasFeeCap: tx.MaxFeePerGas, - GasTipCap: tx.MaxPriorityFeePerGas, - Data: data, - AccessList: accessList, + From: from, + To: to, + Nonce: tx.Nonce, + Value: value, + GasLimit: gasLimit, + GasPrice: gasPrice, + GasFeeCap: tx.MaxFeePerGas, + GasTipCap: tx.MaxPriorityFeePerGas, + Data: data, + AccessList: accessList, + BlobHashes: tx.BlobVersionedHashes, + BlobGasFeeCap: tx.BlobGasFeeCap, } return msg, nil } diff --git a/tests/testdata b/tests/testdata index bac70c50a..ee3fa4c86 160000 --- a/tests/testdata +++ b/tests/testdata @@ -1 +1 @@ -Subproject commit bac70c50a579197af68af5fc6d8c7b6163b92c52 +Subproject commit ee3fa4c86d05f99f2717f83a6ad08008490ddf07 diff --git a/trie/committer.go b/trie/committer.go index e825287fd..92163cdb3 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -131,22 +131,15 @@ func (c *committer) store(path []byte, n node) node { // The node is embedded in its parent, in other words, this node // will not be stored in the database independently, mark it as // deleted only if the node was existent in database before. - prev, ok := c.tracer.accessList[string(path)] + _, ok := c.tracer.accessList[string(path)] if ok { - c.nodes.AddNode(path, trienode.NewWithPrev(common.Hash{}, nil, prev)) + c.nodes.AddNode(path, trienode.NewDeleted()) } return n } // Collect the dirty node to nodeset for return. - var ( - nhash = common.BytesToHash(hash) - node = trienode.NewWithPrev( - nhash, - nodeToBytes(n), - c.tracer.accessList[string(path)], - ) - ) - c.nodes.AddNode(path, node) + nhash := common.BytesToHash(hash) + c.nodes.AddNode(path, trienode.New(nhash, nodeToBytes(n))) // Collect the corresponding leaf node if it's required. We don't check // full node since it's impossible to store value in fullNode. The key diff --git a/trie/database_wrap.go b/trie/database.go similarity index 54% rename from trie/database_wrap.go rename to trie/database.go index b05f56847..2915ff948 100644 --- a/trie/database_wrap.go +++ b/trie/database.go @@ -18,22 +18,28 @@ package trie import ( "errors" - "runtime" - "time" - "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" ) // Config defines all necessary options for database. type Config struct { - Cache int // Memory allowance (MB) to use for caching trie nodes in memory - Journal string // Journal of clean cache to survive node restarts - Preimages bool // Flag whether the preimage of trie key is recorded + Preimages bool // Flag whether the preimage of node key is recorded + HashDB *hashdb.Config // Configs for hash-based scheme + PathDB *pathdb.Config // Configs for experimental path-based scheme +} + +// HashDefaults represents a config for using hash-based scheme with +// default settings. +var HashDefaults = &Config{ + Preimages: false, + HashDB: hashdb.Defaults, } // backend defines the methods needed to access/update trie nodes in different @@ -46,14 +52,20 @@ type backend interface { // according to the state scheme. Initialized(genesisRoot common.Hash) bool - // Size returns the current storage size of the memory cache in front of the - // persistent database layer. - Size() common.StorageSize + // Size returns the current storage size of the diff layers on top of the + // disk layer and the storage size of the nodes cached in the disk layer. + // + // For hash scheme, there is no differentiation between diff layer nodes + // and dirty disk layer nodes, so both are merged into the second return. + Size() (common.StorageSize, common.StorageSize) // Update performs a state transition by committing dirty nodes contained // in the given set in order to update state from the specified parent to // the specified root. - Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error + // + // The passed in maps(nodes, states) will be retained to avoid copying + // everything. Therefore, these maps must not be changed afterwards. + Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error // Commit writes all relevant trie nodes belonging to the specified state // to disk. Report specifies whether logs will be displayed in info level. @@ -67,66 +79,63 @@ type backend interface { // types of node backend as an entrypoint. It's responsible for all interactions // relevant with trie nodes and node preimages. type Database struct { - config *Config // Configuration for trie database - diskdb ethdb.Database // Persistent database to store the snapshot - cleans *fastcache.Cache // Megabytes permitted using for read caches - preimages *preimageStore // The store for caching preimages - backend backend // The backend for managing trie nodes + config *Config // Configuration for trie database + diskdb ethdb.Database // Persistent database to store the snapshot + preimages *preimageStore // The store for caching preimages + backend backend // The backend for managing trie nodes } -// prepare initializes the database with provided configs, but the -// database backend is still left as nil. -func prepare(diskdb ethdb.Database, config *Config) *Database { - var cleans *fastcache.Cache - if config != nil && config.Cache > 0 { - if config.Journal == "" { - cleans = fastcache.New(config.Cache * 1024 * 1024) - } else { - cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024) - } +// NewDatabase initializes the trie database with default settings, note +// the legacy hash-based scheme is used by default. +func NewDatabase(diskdb ethdb.Database, config *Config) *Database { + // Sanitize the config and use the default one if it's not specified. + if config == nil { + config = HashDefaults } var preimages *preimageStore - if config != nil && config.Preimages { + if config.Preimages { preimages = newPreimageStore(diskdb) } - return &Database{ + db := &Database{ config: config, diskdb: diskdb, - cleans: cleans, preimages: preimages, } -} - -// NewDatabase initializes the trie database with default settings, namely -// the legacy hash-based scheme is used by default. -func NewDatabase(diskdb ethdb.Database) *Database { - return NewDatabaseWithConfig(diskdb, nil) -} - -// NewDatabaseWithConfig initializes the trie database with provided configs. -// The path-based scheme is not activated yet, always initialized with legacy -// hash-based scheme by default. -func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database { - db := prepare(diskdb, config) - db.backend = hashdb.New(diskdb, db.cleans, mptResolver{}) + if config.HashDB != nil && config.PathDB != nil { + log.Crit("Both 'hash' and 'path' mode are configured") + } + if config.PathDB != nil { + db.backend = pathdb.New(diskdb, config.PathDB) + } else { + db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{}) + } return db } // Reader returns a reader for accessing all trie nodes with provided state root. -// Nil is returned in case the state is not available. -func (db *Database) Reader(blockRoot common.Hash) Reader { - return db.backend.(*hashdb.Database).Reader(blockRoot) +// An error will be returned if the requested state is not available. +func (db *Database) Reader(blockRoot common.Hash) (Reader, error) { + switch b := db.backend.(type) { + case *hashdb.Database: + return b.Reader(blockRoot) + case *pathdb.Database: + return b.Reader(blockRoot) + } + return nil, errors.New("unknown backend") } // Update performs a state transition by committing dirty nodes contained in the // given set in order to update state from the specified parent to the specified // root. The held pre-images accumulated up to this point will be flushed in case // the size exceeds the threshold. -func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error { +// +// The passed in maps(nodes, states) will be retained to avoid copying everything. +// Therefore, these maps must not be changed afterwards. +func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { if db.preimages != nil { db.preimages.commit(false) } - return db.backend.Update(root, parent, nodes) + return db.backend.Update(root, parent, block, nodes, states) } // Commit iterates over all the children of a particular node, writes them out @@ -139,18 +148,19 @@ func (db *Database) Commit(root common.Hash, report bool) error { return db.backend.Commit(root, report) } -// Size returns the storage size of dirty trie nodes in front of the persistent -// database and the size of cached preimages. -func (db *Database) Size() (common.StorageSize, common.StorageSize) { +// Size returns the storage size of diff layer nodes above the persistent disk +// layer, the dirty nodes buffered within the disk layer, and the size of cached +// preimages. +func (db *Database) Size() (common.StorageSize, common.StorageSize, common.StorageSize) { var ( - storages common.StorageSize - preimages common.StorageSize + diffs, nodes common.StorageSize + preimages common.StorageSize ) - storages = db.backend.Size() + diffs, nodes = db.backend.Size() if db.preimages != nil { preimages = db.preimages.size() } - return storages, preimages + return diffs, nodes, preimages } // Initialized returns an indicator if the state data is already initialized @@ -168,49 +178,14 @@ func (db *Database) Scheme() string { // It is meant to be called when closing the blockchain object, so that all // resources held can be released correctly. func (db *Database) Close() error { - if db.preimages != nil { - db.preimages.commit(true) - } + db.WritePreimages() return db.backend.Close() } -// saveCache saves clean state cache to given directory path -// using specified CPU cores. -func (db *Database) saveCache(dir string, threads int) error { - if db.cleans == nil { - return nil - } - log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads) - - start := time.Now() - err := db.cleans.SaveToFileConcurrent(dir, threads) - if err != nil { - log.Error("Failed to persist clean trie cache", "error", err) - return err - } - log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start))) - return nil -} - -// SaveCache atomically saves fast cache data to the given dir using all -// available CPU cores. -func (db *Database) SaveCache(dir string) error { - return db.saveCache(dir, runtime.GOMAXPROCS(0)) -} - -// SaveCachePeriodically atomically saves fast cache data to the given dir with -// the specified interval. All dump operation will only use a single CPU core. -func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) { - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - db.saveCache(dir, 1) - case <-stopCh: - return - } +// WritePreimages flushes all accumulated preimages to disk forcibly. +func (db *Database) WritePreimages() { + if db.preimages != nil { + db.preimages.commit(true) } } @@ -265,3 +240,60 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) { } return hdb.Node(hash) } + +// Recover rollbacks the database to a specified historical point. The state is +// supported as the rollback destination only if it's canonical state and the +// corresponding trie histories are existent. It's only supported by path-based +// database and will return an error for others. +func (db *Database) Recover(target common.Hash) error { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return errors.New("not supported") + } + return pdb.Recover(target, &trieLoader{db: db}) +} + +// Recoverable returns the indicator if the specified state is enabled to be +// recovered. It's only supported by path-based database and will return an +// error for others. +func (db *Database) Recoverable(root common.Hash) (bool, error) { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return false, errors.New("not supported") + } + return pdb.Recoverable(root), nil +} + +// Reset wipes all available journal from the persistent database and discard +// all caches and diff layers. Using the given root to create a new disk layer. +// It's only supported by path-based database and will return an error for others. +func (db *Database) Reset(root common.Hash) error { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return errors.New("not supported") + } + return pdb.Reset(root) +} + +// Journal commits an entire diff hierarchy to disk into a single journal entry. +// This is meant to be used during shutdown to persist the snapshot without +// flattening everything down (bad for reorgs). It's only supported by path-based +// database and will return an error for others. +func (db *Database) Journal(root common.Hash) error { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return errors.New("not supported") + } + return pdb.Journal(root) +} + +// SetBufferSize sets the node buffer size to the provided value(in bytes). +// It's only supported by path-based database and will return an error for +// others. +func (db *Database) SetBufferSize(size int) error { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return errors.New("not supported") + } + return pdb.SetBufferSize(size) +} diff --git a/trie/database_test.go b/trie/database_test.go index cad462f73..d508c6553 100644 --- a/trie/database_test.go +++ b/trie/database_test.go @@ -20,16 +20,21 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) // newTestDatabase initializes the trie database with specified scheme. func newTestDatabase(diskdb ethdb.Database, scheme string) *Database { - db := prepare(diskdb, nil) + config := &Config{Preimages: false} if scheme == rawdb.HashScheme { - db.backend = hashdb.New(diskdb, db.cleans, mptResolver{}) + config.HashDB = &hashdb.Config{ + CleanCacheSize: 0, + } // disable clean cache + } else { + config.PathDB = &pathdb.Config{ + CleanCacheSize: 0, + DirtyCacheSize: 0, + } // disable clean/dirty cache } - //} else { - // db.backend = snap.New(diskdb, db.cleans, nil) - //} - return db + return NewDatabase(diskdb, config) } diff --git a/trie/encoding.go b/trie/encoding.go index 8ee0022ef..3284d3f8f 100644 --- a/trie/encoding.go +++ b/trie/encoding.go @@ -51,9 +51,8 @@ func hexToCompact(hex []byte) []byte { return buf } -// hexToCompactInPlace places the compact key in input buffer, returning the length -// needed for the representation -func hexToCompactInPlace(hex []byte) int { +// hexToCompactInPlace places the compact key in input buffer, returning the compacted key. +func hexToCompactInPlace(hex []byte) []byte { var ( hexLen = len(hex) // length of the hex input firstByte = byte(0) @@ -77,7 +76,7 @@ func hexToCompactInPlace(hex []byte) int { hex[bi] = hex[ni]<<4 | hex[ni+1] } hex[0] = firstByte - return binLen + return hex[:binLen] } func compactToHex(compact []byte) []byte { diff --git a/trie/encoding_test.go b/trie/encoding_test.go index d16d25c35..ac50b5d02 100644 --- a/trie/encoding_test.go +++ b/trie/encoding_test.go @@ -86,8 +86,7 @@ func TestHexToCompactInPlace(t *testing.T) { } { hexBytes, _ := hex.DecodeString(key) exp := hexToCompact(hexBytes) - sz := hexToCompactInPlace(hexBytes) - got := hexBytes[:sz] + got := hexToCompactInPlace(hexBytes) if !bytes.Equal(exp, got) { t.Fatalf("test %d: encoding err\ninp %v\ngot %x\nexp %x\n", i, key, got, exp) } @@ -102,8 +101,7 @@ func TestHexToCompactInPlaceRandom(t *testing.T) { hexBytes := keybytesToHex(key) hexOrig := []byte(string(hexBytes)) exp := hexToCompact(hexBytes) - sz := hexToCompactInPlace(hexBytes) - got := hexBytes[:sz] + got := hexToCompactInPlace(hexBytes) if !bytes.Equal(exp, got) { t.Fatalf("encoding err \ncpt %x\nhex %x\ngot %x\nexp %x\n", @@ -119,6 +117,13 @@ func BenchmarkHexToCompact(b *testing.B) { } } +func BenchmarkHexToCompactInPlace(b *testing.B) { + testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/} + for i := 0; i < b.N; i++ { + hexToCompactInPlace(testBytes) + } +} + func BenchmarkCompactToHex(b *testing.B) { testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/} for i := 0; i < b.N; i++ { diff --git a/trie/errors.go b/trie/errors.go index bd82b950a..7be7041c7 100644 --- a/trie/errors.go +++ b/trie/errors.go @@ -17,11 +17,17 @@ package trie import ( + "errors" "fmt" "github.com/ethereum/go-ethereum/common" ) +// ErrCommitted is returned when a already committed trie is requested for usage. +// The potential usages can be `Get`, `Update`, `Delete`, `NodeIterator`, `Prove` +// and so on. +var ErrCommitted = errors.New("trie is already committed") + // MissingNodeError is returned by the trie functions (Get, Update, Delete) // in the case where a trie node is not present in the local database. It contains // information necessary for retrieving the missing node. diff --git a/trie/iterator_test.go b/trie/iterator_test.go index 8003858e5..57d1f06a1 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -18,7 +18,6 @@ package trie import ( "bytes" - "encoding/binary" "fmt" "math/rand" "testing" @@ -27,14 +26,12 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/trie/trienode" ) func TestEmptyIterator(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - iter := trie.NodeIterator(nil) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + iter := trie.MustNodeIterator(nil) seen := make(map[string]struct{}) for iter.Next(true) { @@ -46,7 +43,7 @@ func TestEmptyIterator(t *testing.T) { } func TestIterator(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) + db := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie := NewEmpty(db) vals := []struct{ k, v string }{ {"do", "verb"}, @@ -62,12 +59,12 @@ func TestIterator(t *testing.T) { all[val.k] = val.v trie.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes := trie.Commit(false) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := trie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) trie, _ = New(TrieID(root), db) found := make(map[string]string) - it := NewIterator(trie.NodeIterator(nil)) + it := NewIterator(trie.MustNodeIterator(nil)) for it.Next() { found[string(it.Key)] = string(it.Value) } @@ -84,8 +81,12 @@ type kv struct { t bool } +func (k *kv) cmp(other *kv) int { + return bytes.Compare(k.k, other.k) +} + func TestIteratorLargeData(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := make(map[string]*kv) for i := byte(0); i < 255; i++ { @@ -97,7 +98,7 @@ func TestIteratorLargeData(t *testing.T) { vals[string(value2.k)] = value2 } - it := NewIterator(trie.NodeIterator(nil)) + it := NewIterator(trie.MustNodeIterator(nil)) for it.Next() { vals[string(it.Key)].t = true } @@ -126,7 +127,7 @@ type iterationElement struct { // Tests that the node iterator indeed walks over the entire database contents. func TestNodeIteratorCoverage(t *testing.T) { testNodeIteratorCoverage(t, rawdb.HashScheme) - //testNodeIteratorCoverage(t, rawdb.PathScheme) + testNodeIteratorCoverage(t, rawdb.PathScheme) } func testNodeIteratorCoverage(t *testing.T, scheme string) { @@ -135,7 +136,7 @@ func testNodeIteratorCoverage(t *testing.T, scheme string) { // Gather all the node hashes found by the iterator var elements = make(map[common.Hash]iterationElement) - for it := trie.NodeIterator(nil); it.Next(true); { + for it := trie.MustNodeIterator(nil); it.Next(true); { if it.Hash() != (common.Hash{}) { elements[it.Hash()] = iterationElement{ hash: it.Hash(), @@ -145,8 +146,12 @@ func testNodeIteratorCoverage(t *testing.T, scheme string) { } } // Cross check the hashes and the database itself + reader, err := nodeDb.Reader(trie.Hash()) + if err != nil { + t.Fatalf("state is not available %x", trie.Hash()) + } for _, element := range elements { - if blob, err := nodeDb.Reader(trie.Hash()).Node(common.Hash{}, element.path, element.hash); err != nil { + if blob, err := reader.Node(common.Hash{}, element.path, element.hash); err != nil { t.Errorf("failed to retrieve reported node %x: %v", element.hash, err) } else if !bytes.Equal(blob, element.blob) { t.Errorf("node blob is different, want %v got %v", element.blob, blob) @@ -200,25 +205,25 @@ var testdata2 = []kvs{ } func TestIteratorSeek(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for _, val := range testdata1 { trie.MustUpdate([]byte(val.k), []byte(val.v)) } // Seek to the middle. - it := NewIterator(trie.NodeIterator([]byte("fab"))) + it := NewIterator(trie.MustNodeIterator([]byte("fab"))) if err := checkIteratorOrder(testdata1[4:], it); err != nil { t.Fatal(err) } // Seek to a non-existent key. - it = NewIterator(trie.NodeIterator([]byte("barc"))) + it = NewIterator(trie.MustNodeIterator([]byte("barc"))) if err := checkIteratorOrder(testdata1[1:], it); err != nil { t.Fatal(err) } // Seek beyond the end. - it = NewIterator(trie.NodeIterator([]byte("z"))) + it = NewIterator(trie.MustNodeIterator([]byte("z"))) if err := checkIteratorOrder(nil, it); err != nil { t.Fatal(err) } @@ -241,26 +246,26 @@ func checkIteratorOrder(want []kvs, it *Iterator) error { } func TestDifferenceIterator(t *testing.T) { - dba := NewDatabase(rawdb.NewMemoryDatabase()) + dba := NewDatabase(rawdb.NewMemoryDatabase(), nil) triea := NewEmpty(dba) for _, val := range testdata1 { triea.MustUpdate([]byte(val.k), []byte(val.v)) } - rootA, nodesA := triea.Commit(false) - dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA)) + rootA, nodesA, _ := triea.Commit(false) + dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil) triea, _ = New(TrieID(rootA), dba) - dbb := NewDatabase(rawdb.NewMemoryDatabase()) + dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil) trieb := NewEmpty(dbb) for _, val := range testdata2 { trieb.MustUpdate([]byte(val.k), []byte(val.v)) } - rootB, nodesB := trieb.Commit(false) - dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB)) + rootB, nodesB, _ := trieb.Commit(false) + dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil) trieb, _ = New(TrieID(rootB), dbb) found := make(map[string]string) - di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil)) + di, _ := NewDifferenceIterator(triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)) it := NewIterator(di) for it.Next() { found[string(it.Key)] = string(it.Value) @@ -283,25 +288,25 @@ func TestDifferenceIterator(t *testing.T) { } func TestUnionIterator(t *testing.T) { - dba := NewDatabase(rawdb.NewMemoryDatabase()) + dba := NewDatabase(rawdb.NewMemoryDatabase(), nil) triea := NewEmpty(dba) for _, val := range testdata1 { triea.MustUpdate([]byte(val.k), []byte(val.v)) } - rootA, nodesA := triea.Commit(false) - dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA)) + rootA, nodesA, _ := triea.Commit(false) + dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil) triea, _ = New(TrieID(rootA), dba) - dbb := NewDatabase(rawdb.NewMemoryDatabase()) + dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil) trieb := NewEmpty(dbb) for _, val := range testdata2 { trieb.MustUpdate([]byte(val.k), []byte(val.v)) } - rootB, nodesB := trieb.Commit(false) - dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB)) + rootB, nodesB, _ := trieb.Commit(false) + dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil) trieb, _ = New(TrieID(rootB), dbb) - di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)}) + di, _ := NewUnionIterator([]NodeIterator{triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)}) it := NewIterator(di) all := []struct{ k, v string }{ @@ -336,19 +341,19 @@ func TestUnionIterator(t *testing.T) { } func TestIteratorNoDups(t *testing.T) { - tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for _, val := range testdata1 { tr.MustUpdate([]byte(val.k), []byte(val.v)) } - checkIteratorNoDups(t, tr.NodeIterator(nil), nil) + checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil) } // This test checks that nodeIterator.Next can be retried after inserting missing trie nodes. func TestIteratorContinueAfterError(t *testing.T) { testIteratorContinueAfterError(t, false, rawdb.HashScheme) testIteratorContinueAfterError(t, true, rawdb.HashScheme) - // testIteratorContinueAfterError(t, false, rawdb.PathScheme) - // testIteratorContinueAfterError(t, true, rawdb.PathScheme) + testIteratorContinueAfterError(t, false, rawdb.PathScheme) + testIteratorContinueAfterError(t, true, rawdb.PathScheme) } func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) { @@ -359,13 +364,13 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) { for _, val := range testdata1 { tr.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes := tr.Commit(false) - tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := tr.Commit(false) + tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) if !memonly { tdb.Commit(root, false) } tr, _ = New(TrieID(root), tdb) - wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil) + wantNodeCount := checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil) var ( paths [][]byte @@ -424,7 +429,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) { } // Iterate until the error is hit. seen := make(map[string]bool) - it := tr.NodeIterator(nil) + it := tr.MustNodeIterator(nil) checkIteratorNoDups(t, it, seen) missing, ok := it.Error().(*MissingNodeError) if !ok || missing.NodeHash != rhash { @@ -453,8 +458,8 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) { func TestIteratorContinueAfterSeekError(t *testing.T) { testIteratorContinueAfterSeekError(t, false, rawdb.HashScheme) testIteratorContinueAfterSeekError(t, true, rawdb.HashScheme) - // testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme) - // testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme) + testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme) + testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme) } func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme string) { @@ -469,14 +474,14 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin for _, val := range testdata1 { ctr.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes := ctr.Commit(false) + root, nodes, _ := ctr.Commit(false) for path, n := range nodes.Nodes { if n.Hash == barNodeHash { barNodePath = []byte(path) break } } - triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) if !memonly { triedb.Commit(root, false) } @@ -492,7 +497,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin } // Create a new iterator that seeks to "bars". Seeking can't proceed because // the node is missing. - it := tr.NodeIterator([]byte("bars")) + it := tr.MustNodeIterator([]byte("bars")) missing, ok := it.Error().(*MissingNodeError) if !ok { t.Fatal("want MissingNodeError, got", it.Error()) @@ -526,94 +531,7 @@ func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) in func TestIteratorNodeBlob(t *testing.T) { testIteratorNodeBlob(t, rawdb.HashScheme) - //testIteratorNodeBlob(t, rawdb.PathScheme) -} - -type loggingDb struct { - getCount uint64 - backend ethdb.KeyValueStore -} - -func (l *loggingDb) Has(key []byte) (bool, error) { - return l.backend.Has(key) -} - -func (l *loggingDb) Get(key []byte) ([]byte, error) { - l.getCount++ - return l.backend.Get(key) -} - -func (l *loggingDb) Put(key []byte, value []byte) error { - return l.backend.Put(key, value) -} - -func (l *loggingDb) Delete(key []byte) error { - return l.backend.Delete(key) -} - -func (l *loggingDb) NewBatch() ethdb.Batch { - return l.backend.NewBatch() -} - -func (l *loggingDb) NewBatchWithSize(size int) ethdb.Batch { - return l.backend.NewBatchWithSize(size) -} - -func (l *loggingDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - return l.backend.NewIterator(prefix, start) -} - -func (l *loggingDb) NewSnapshot() (ethdb.Snapshot, error) { - return l.backend.NewSnapshot() -} - -func (l *loggingDb) Stat(property string) (string, error) { - return l.backend.Stat(property) -} - -func (l *loggingDb) Compact(start []byte, limit []byte) error { - return l.backend.Compact(start, limit) -} - -func (l *loggingDb) Close() error { - return l.backend.Close() -} - -// makeLargeTestTrie create a sample test trie -func makeLargeTestTrie() (*Database, *StateTrie, *loggingDb) { - // Create an empty trie - logDb := &loggingDb{0, memorydb.New()} - triedb := NewDatabase(rawdb.NewDatabase(logDb)) - trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb) - - // Fill it with some arbitrary data - for i := 0; i < 10000; i++ { - key := make([]byte, 32) - val := make([]byte, 32) - binary.BigEndian.PutUint64(key, uint64(i)) - binary.BigEndian.PutUint64(val, uint64(i)) - key = crypto.Keccak256(key) - val = crypto.Keccak256(val) - trie.MustUpdate(key, val) - } - root, nodes := trie.Commit(false) - triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) - // Return the generated trie - return triedb, trie, logDb -} - -// Tests that the node iterator indeed walks over the entire database contents. -func TestNodeIteratorLargeTrie(t *testing.T) { - // Create some arbitrary test trie to iterate - db, trie, logDb := makeLargeTestTrie() - db.Cap(0) // flush everything - // Do a seek operation - trie.NodeIterator(common.FromHex("0x77667766776677766778855885885885")) - // master: 24 get operations - // this pr: 5 get operations - if have, want := logDb.getCount, uint64(5); have != want { - t.Fatalf("Too many lookups during seek, have %d want %d", have, want) - } + testIteratorNodeBlob(t, rawdb.PathScheme) } func testIteratorNodeBlob(t *testing.T, scheme string) { @@ -636,13 +554,13 @@ func testIteratorNodeBlob(t *testing.T, scheme string) { all[val.k] = val.v trie.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes := trie.Commit(false) - triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := trie.Commit(false) + triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) triedb.Commit(root, false) var found = make(map[common.Hash][]byte) trie, _ = New(TrieID(root), triedb) - it := trie.NodeIterator(nil) + it := trie.MustNodeIterator(nil) for it.Next(true) { if it.Hash() == (common.Hash{}) { continue @@ -689,7 +607,7 @@ func isTrieNode(scheme string, key, val []byte) (bool, []byte, common.Hash) { } hash = common.BytesToHash(key) } else { - ok, remain := rawdb.IsAccountTrieNode(key) + ok, remain := rawdb.ResolveAccountTrieNodeKey(key) if !ok { return false, nil, common.Hash{} } diff --git a/trie/proof.go b/trie/proof.go index a25263d82..a463c80b4 100644 --- a/trie/proof.go +++ b/trie/proof.go @@ -33,7 +33,11 @@ import ( // If the trie does not contain a value for key, the returned proof contains all // nodes of the longest existing prefix of the key (at least the root node), ending // with the node that proves the absence of the key. -func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { +func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { + // Short circuit if the trie is already committed and not usable. + if t.committed { + return ErrCommitted + } // Collect all nodes on the path to key. var ( prefix []byte @@ -81,10 +85,6 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e defer returnHasherToPool(hasher) for i, n := range nodes { - if fromLevel > 0 { - fromLevel-- - continue - } var hn node n, hn = hasher.proofHash(n) if hash, ok := hn.(hashNode); ok || i == 0 { @@ -107,8 +107,8 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e // If the trie does not contain a value for key, the returned proof contains all // nodes of the longest existing prefix of the key (at least the root node), ending // with the node that proves the absence of the key. -func (t *StateTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { - return t.trie.Prove(key, fromLevel, proofDb) +func (t *StateTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { + return t.trie.Prove(key, proofDb) } // VerifyProof checks merkle proofs. The given proof must contain the value for diff --git a/trie/proof_test.go b/trie/proof_test.go index 69e3f8e9c..fc2de6264 100644 --- a/trie/proof_test.go +++ b/trie/proof_test.go @@ -22,13 +22,13 @@ import ( "encoding/binary" "fmt" mrand "math/rand" - "sort" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb/memorydb" + "golang.org/x/exp/slices" ) // Prng is a pseudo random number generator seeded by strong randomness. @@ -57,13 +57,13 @@ func makeProvers(trie *Trie) []func(key []byte) *memorydb.Database { // Create a direct trie based Merkle prover provers = append(provers, func(key []byte) *memorydb.Database { proof := memorydb.New() - trie.Prove(key, 0, proof) + trie.Prove(key, proof) return proof }) // Create a leaf iterator based Merkle prover provers = append(provers, func(key []byte) *memorydb.Database { proof := memorydb.New() - if it := NewIterator(trie.NodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) { + if it := NewIterator(trie.MustNodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) { for _, p := range it.Prove() { proof.Put(crypto.Keccak256(p), p) } @@ -94,7 +94,7 @@ func TestProof(t *testing.T) { } func TestOneElementProof(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) updateString(trie, "k", "v") for i, prover := range makeProvers(trie) { proof := prover([]byte("k")) @@ -145,12 +145,12 @@ func TestBadProof(t *testing.T) { // Tests that missing keys can also be proven. The test explicitly uses a single // entry trie and checks for missing keys both before and after the single entry. func TestMissingKeyProof(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) updateString(trie, "k", "v") for i, key := range []string{"a", "j", "l", "z"} { proof := memorydb.New() - trie.Prove([]byte(key), 0, proof) + trie.Prove([]byte(key), proof) if proof.Len() != 1 { t.Errorf("test %d: proof should have one element", i) @@ -165,30 +165,24 @@ func TestMissingKeyProof(t *testing.T) { } } -type entrySlice []*kv - -func (p entrySlice) Len() int { return len(p) } -func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 } -func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - // TestRangeProof tests normal range proof with both edge proofs // as the existent proof. The test cases are generated randomly. func TestRangeProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries entrySlice + var entries []*kv for _, kv := range vals { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) for i := 0; i < 500; i++ { start := mrand.Intn(len(entries)) end := mrand.Intn(len(entries)-start) + start + 1 proof := memorydb.New() - if err := trie.Prove(entries[start].k, 0, proof); err != nil { + if err := trie.Prove(entries[start].k, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { + if err := trie.Prove(entries[end-1].k, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -208,11 +202,11 @@ func TestRangeProof(t *testing.T) { // The test cases are generated randomly. func TestRangeProofWithNonExistentProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries entrySlice + var entries []*kv for _, kv := range vals { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) for i := 0; i < 500; i++ { start := mrand.Intn(len(entries)) end := mrand.Intn(len(entries)-start) + start + 1 @@ -236,10 +230,10 @@ func TestRangeProofWithNonExistentProof(t *testing.T) { if bytes.Compare(last, entries[end-1].k) < 0 { continue } - if err := trie.Prove(first, 0, proof); err != nil { + if err := trie.Prove(first, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, 0, proof); err != nil { + if err := trie.Prove(last, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -257,10 +251,10 @@ func TestRangeProofWithNonExistentProof(t *testing.T) { proof := memorydb.New() first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes() last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes() - if err := trie.Prove(first, 0, proof); err != nil { + if err := trie.Prove(first, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, 0, proof); err != nil { + if err := trie.Prove(last, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var k [][]byte @@ -280,21 +274,21 @@ func TestRangeProofWithNonExistentProof(t *testing.T) { // - There exists a gap between the last element and the right edge proof func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries entrySlice + var entries []*kv for _, kv := range vals { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) // Case 1 start, end := 100, 200 first := decreaseKey(common.CopyBytes(entries[start].k)) proof := memorydb.New() - if err := trie.Prove(first, 0, proof); err != nil { + if err := trie.Prove(first, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { + if err := trie.Prove(entries[end-1].k, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } start = 105 // Gap created @@ -313,10 +307,10 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { start, end = 100, 200 last := increaseKey(common.CopyBytes(entries[end-1].k)) proof = memorydb.New() - if err := trie.Prove(entries[start].k, 0, proof); err != nil { + if err := trie.Prove(entries[start].k, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, 0, proof); err != nil { + if err := trie.Prove(last, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } end = 195 // Capped slice @@ -337,17 +331,17 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { // non-existent one. func TestOneElementRangeProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries entrySlice + var entries []*kv for _, kv := range vals { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) // One element with existent edge proof, both edge proofs // point to the SAME key. start := 1000 proof := memorydb.New() - if err := trie.Prove(entries[start].k, 0, proof); err != nil { + if err := trie.Prove(entries[start].k, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } _, err := VerifyRangeProof(trie.Hash(), entries[start].k, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) @@ -359,10 +353,10 @@ func TestOneElementRangeProof(t *testing.T) { start = 1000 first := decreaseKey(common.CopyBytes(entries[start].k)) proof = memorydb.New() - if err := trie.Prove(first, 0, proof); err != nil { + if err := trie.Prove(first, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[start].k, 0, proof); err != nil { + if err := trie.Prove(entries[start].k, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(trie.Hash(), first, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) @@ -374,10 +368,10 @@ func TestOneElementRangeProof(t *testing.T) { start = 1000 last := increaseKey(common.CopyBytes(entries[start].k)) proof = memorydb.New() - if err := trie.Prove(entries[start].k, 0, proof); err != nil { + if err := trie.Prove(entries[start].k, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, 0, proof); err != nil { + if err := trie.Prove(last, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(trie.Hash(), entries[start].k, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) @@ -389,10 +383,10 @@ func TestOneElementRangeProof(t *testing.T) { start = 1000 first, last = decreaseKey(common.CopyBytes(entries[start].k)), increaseKey(common.CopyBytes(entries[start].k)) proof = memorydb.New() - if err := trie.Prove(first, 0, proof); err != nil { + if err := trie.Prove(first, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, 0, proof); err != nil { + if err := trie.Prove(last, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) @@ -401,17 +395,17 @@ func TestOneElementRangeProof(t *testing.T) { } // Test the mini trie with only a single element. - tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) entry := &kv{randBytes(32), randBytes(20), false} tinyTrie.MustUpdate(entry.k, entry.v) first = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes() last = entry.k proof = memorydb.New() - if err := tinyTrie.Prove(first, 0, proof); err != nil { + if err := tinyTrie.Prove(first, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := tinyTrie.Prove(last, 0, proof); err != nil { + if err := tinyTrie.Prove(last, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(tinyTrie.Hash(), first, last, [][]byte{entry.k}, [][]byte{entry.v}, proof) @@ -424,11 +418,11 @@ func TestOneElementRangeProof(t *testing.T) { // The edge proofs can be nil. func TestAllElementsProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries entrySlice + var entries []*kv for _, kv := range vals { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) var k [][]byte var v [][]byte @@ -443,10 +437,10 @@ func TestAllElementsProof(t *testing.T) { // With edge proofs, it should still work. proof := memorydb.New() - if err := trie.Prove(entries[0].k, 0, proof); err != nil { + if err := trie.Prove(entries[0].k, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[len(entries)-1].k, 0, proof); err != nil { + if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(trie.Hash(), k[0], k[len(k)-1], k, v, proof) @@ -458,10 +452,10 @@ func TestAllElementsProof(t *testing.T) { proof = memorydb.New() first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes() last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes() - if err := trie.Prove(first, 0, proof); err != nil { + if err := trie.Prove(first, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, 0, proof); err != nil { + if err := trie.Prove(last, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(trie.Hash(), first, last, k, v, proof) @@ -473,22 +467,22 @@ func TestAllElementsProof(t *testing.T) { // TestSingleSideRangeProof tests the range starts from zero. func TestSingleSideRangeProof(t *testing.T) { for i := 0; i < 64; i++ { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - var entries entrySlice + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + var entries []*kv for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} trie.MustUpdate(value.k, value.v) entries = append(entries, value) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1} for _, pos := range cases { proof := memorydb.New() - if err := trie.Prove(common.Hash{}.Bytes(), 0, proof); err != nil { + if err := trie.Prove(common.Hash{}.Bytes(), proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[pos].k, 0, proof); err != nil { + if err := trie.Prove(entries[pos].k, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } k := make([][]byte, 0) @@ -508,23 +502,23 @@ func TestSingleSideRangeProof(t *testing.T) { // TestReverseSingleSideRangeProof tests the range ends with 0xffff...fff. func TestReverseSingleSideRangeProof(t *testing.T) { for i := 0; i < 64; i++ { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - var entries entrySlice + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + var entries []*kv for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} trie.MustUpdate(value.k, value.v) entries = append(entries, value) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1} for _, pos := range cases { proof := memorydb.New() - if err := trie.Prove(entries[pos].k, 0, proof); err != nil { + if err := trie.Prove(entries[pos].k, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") - if err := trie.Prove(last.Bytes(), 0, proof); err != nil { + if err := trie.Prove(last.Bytes(), proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } k := make([][]byte, 0) @@ -545,20 +539,20 @@ func TestReverseSingleSideRangeProof(t *testing.T) { // The prover is expected to detect the error. func TestBadRangeProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries entrySlice + var entries []*kv for _, kv := range vals { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) for i := 0; i < 500; i++ { start := mrand.Intn(len(entries)) end := mrand.Intn(len(entries)-start) + start + 1 proof := memorydb.New() - if err := trie.Prove(entries[start].k, 0, proof); err != nil { + if err := trie.Prove(entries[start].k, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { + if err := trie.Prove(entries[end-1].k, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -615,7 +609,7 @@ func TestBadRangeProof(t *testing.T) { // TestGappedRangeProof focuses on the small trie with embedded nodes. // If the gapped node is embedded in the trie, it should be detected too. func TestGappedRangeProof(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) var entries []*kv // Sorted entries for i := byte(0); i < 10; i++ { value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} @@ -624,10 +618,10 @@ func TestGappedRangeProof(t *testing.T) { } first, last := 2, 8 proof := memorydb.New() - if err := trie.Prove(entries[first].k, 0, proof); err != nil { + if err := trie.Prove(entries[first].k, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[last-1].k, 0, proof); err != nil { + if err := trie.Prove(entries[last-1].k, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -648,11 +642,11 @@ func TestGappedRangeProof(t *testing.T) { // TestSameSideProofs tests the element is not in the range covered by proofs func TestSameSideProofs(t *testing.T) { trie, vals := randomTrie(4096) - var entries entrySlice + var entries []*kv for _, kv := range vals { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) pos := 1000 first := decreaseKey(common.CopyBytes(entries[pos].k)) @@ -660,10 +654,10 @@ func TestSameSideProofs(t *testing.T) { last := decreaseKey(common.CopyBytes(entries[pos].k)) proof := memorydb.New() - if err := trie.Prove(first, 0, proof); err != nil { + if err := trie.Prove(first, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, 0, proof); err != nil { + if err := trie.Prove(last, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err := VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof) @@ -676,10 +670,10 @@ func TestSameSideProofs(t *testing.T) { last = increaseKey(last) proof = memorydb.New() - if err := trie.Prove(first, 0, proof); err != nil { + if err := trie.Prove(first, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, 0, proof); err != nil { + if err := trie.Prove(last, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof) @@ -689,14 +683,14 @@ func TestSameSideProofs(t *testing.T) { } func TestHasRightElement(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - var entries entrySlice + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + var entries []*kv for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} trie.MustUpdate(value.k, value.v) entries = append(entries, value) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) var cases = []struct { start int @@ -724,23 +718,23 @@ func TestHasRightElement(t *testing.T) { ) if c.start == -1 { firstKey, start = common.Hash{}.Bytes(), 0 - if err := trie.Prove(firstKey, 0, proof); err != nil { + if err := trie.Prove(firstKey, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } } else { firstKey = entries[c.start].k - if err := trie.Prove(entries[c.start].k, 0, proof); err != nil { + if err := trie.Prove(entries[c.start].k, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } } if c.end == -1 { lastKey, end = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes(), len(entries) - if err := trie.Prove(lastKey, 0, proof); err != nil { + if err := trie.Prove(lastKey, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } } else { lastKey = entries[c.end-1].k - if err := trie.Prove(entries[c.end-1].k, 0, proof); err != nil { + if err := trie.Prove(entries[c.end-1].k, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } } @@ -764,11 +758,11 @@ func TestHasRightElement(t *testing.T) { // The first edge proof must be a non-existent proof. func TestEmptyRangeProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries entrySlice + var entries []*kv for _, kv := range vals { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) var cases = []struct { pos int @@ -780,7 +774,7 @@ func TestEmptyRangeProof(t *testing.T) { for _, c := range cases { proof := memorydb.New() first := increaseKey(common.CopyBytes(entries[c.pos].k)) - if err := trie.Prove(first, 0, proof); err != nil { + if err := trie.Prove(first, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } _, err := VerifyRangeProof(trie.Hash(), first, nil, nil, nil, proof) @@ -799,11 +793,11 @@ func TestEmptyRangeProof(t *testing.T) { func TestBloatedProof(t *testing.T) { // Use a small trie trie, kvs := nonRandomTrie(100) - var entries entrySlice + var entries []*kv for _, kv := range kvs { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) var keys [][]byte var vals [][]byte @@ -811,7 +805,7 @@ func TestBloatedProof(t *testing.T) { // In the 'malicious' case, we add proofs for every single item // (but only one key/value pair used as leaf) for i, entry := range entries { - trie.Prove(entry.k, 0, proof) + trie.Prove(entry.k, proof) if i == 50 { keys = append(keys, entry.k) vals = append(vals, entry.v) @@ -820,8 +814,8 @@ func TestBloatedProof(t *testing.T) { // For reference, we use the same function, but _only_ prove the first // and last element want := memorydb.New() - trie.Prove(keys[0], 0, want) - trie.Prove(keys[len(keys)-1], 0, want) + trie.Prove(keys[0], want) + trie.Prove(keys[len(keys)-1], want) if _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof); err != nil { t.Fatalf("expected bloated proof to succeed, got %v", err) @@ -833,11 +827,11 @@ func TestBloatedProof(t *testing.T) { // noop technically, but practically should be rejected. func TestEmptyValueRangeProof(t *testing.T) { trie, values := randomTrie(512) - var entries entrySlice + var entries []*kv for _, kv := range values { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) // Create a new entry with a slightly modified key mid := len(entries) / 2 @@ -854,10 +848,10 @@ func TestEmptyValueRangeProof(t *testing.T) { start, end := 1, len(entries)-1 proof := memorydb.New() - if err := trie.Prove(entries[start].k, 0, proof); err != nil { + if err := trie.Prove(entries[start].k, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { + if err := trie.Prove(entries[end-1].k, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -877,11 +871,11 @@ func TestEmptyValueRangeProof(t *testing.T) { // practically should be rejected. func TestAllElementsEmptyValueRangeProof(t *testing.T) { trie, values := randomTrie(512) - var entries entrySlice + var entries []*kv for _, kv := range values { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) // Create a new entry with a slightly modified key mid := len(entries) / 2 @@ -949,7 +943,7 @@ func BenchmarkProve(b *testing.B) { for i := 0; i < b.N; i++ { kv := vals[keys[i%len(keys)]] proofs := memorydb.New() - if trie.Prove(kv.k, 0, proofs); proofs.Len() == 0 { + if trie.Prove(kv.k, proofs); proofs.Len() == 0 { b.Fatalf("zero length proof for %x", kv.k) } } @@ -963,7 +957,7 @@ func BenchmarkVerifyProof(b *testing.B) { for k := range vals { keys = append(keys, k) proof := memorydb.New() - trie.Prove([]byte(k), 0, proof) + trie.Prove([]byte(k), proof) proofs = append(proofs, proof) } @@ -983,19 +977,19 @@ func BenchmarkVerifyRangeProof5000(b *testing.B) { benchmarkVerifyRangeProof(b, func benchmarkVerifyRangeProof(b *testing.B, size int) { trie, vals := randomTrie(8192) - var entries entrySlice + var entries []*kv for _, kv := range vals { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) start := 2 end := start + size proof := memorydb.New() - if err := trie.Prove(entries[start].k, 0, proof); err != nil { + if err := trie.Prove(entries[start].k, proof); err != nil { b.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { + if err := trie.Prove(entries[end-1].k, proof); err != nil { b.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -1020,11 +1014,11 @@ func BenchmarkVerifyRangeNoProof1000(b *testing.B) { benchmarkVerifyRangeNoProof func benchmarkVerifyRangeNoProof(b *testing.B, size int) { trie, vals := randomTrie(size) - var entries entrySlice + var entries []*kv for _, kv := range vals { entries = append(entries, kv) } - sort.Sort(entries) + slices.SortFunc(entries, (*kv).cmp) var keys [][]byte var values [][]byte @@ -1042,7 +1036,7 @@ func benchmarkVerifyRangeNoProof(b *testing.B, size int) { } func randomTrie(n int) (*Trie, map[string]*kv) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := make(map[string]*kv) for i := byte(0); i < 100; i++ { value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} @@ -1061,7 +1055,7 @@ func randomTrie(n int) (*Trie, map[string]*kv) { } func nonRandomTrie(n int) (*Trie, map[string]*kv) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := make(map[string]*kv) max := uint64(0xffffffffffffffff) for i := uint64(0); i < uint64(n); i++ { @@ -1086,7 +1080,7 @@ func TestRangeProofKeysWithSharedPrefix(t *testing.T) { common.Hex2Bytes("02"), common.Hex2Bytes("03"), } - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for i, key := range keys { trie.MustUpdate(key, vals[i]) } @@ -1094,10 +1088,10 @@ func TestRangeProofKeysWithSharedPrefix(t *testing.T) { proof := memorydb.New() start := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000") end := common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") - if err := trie.Prove(start, 0, proof); err != nil { + if err := trie.Prove(start, proof); err != nil { t.Fatalf("failed to prove start: %v", err) } - if err := trie.Prove(end, 0, proof); err != nil { + if err := trie.Prove(end, proof); err != nil { t.Fatalf("failed to prove end: %v", err) } diff --git a/trie/secure_trie.go b/trie/secure_trie.go index b3cdeadb6..7f0685e30 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -86,7 +86,12 @@ func (t *StateTrie) MustGet(key []byte) []byte { // If the specified storage slot is not in the trie, nil will be returned. // If a trie node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) { - return t.trie.Get(t.hashKey(key)) + enc, err := t.trie.Get(t.hashKey(key)) + if err != nil || len(enc) == 0 { + return nil, err + } + _, content, _, err := rlp.Split(enc) + return content, err } // GetAccount attempts to retrieve an account with provided account address. @@ -148,7 +153,8 @@ func (t *StateTrie) MustUpdate(key, value []byte) { // If a node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error { hk := t.hashKey(key) - err := t.trie.Update(hk, value) + v, _ := rlp.EncodeToBytes(value) + err := t.trie.Update(hk, v) if err != nil { return err } @@ -170,6 +176,10 @@ func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccoun return nil } +func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error { + return nil +} + // MustDelete removes any existing value for key from the trie. This function // will omit any encountered error but just print out an error message. func (t *StateTrie) MustDelete(key []byte) { @@ -213,7 +223,7 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte { // All cached preimages will be also flushed if preimages recording is enabled. // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage -func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) { +func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { // Write all the pre-images to the actual disk database if len(t.getSecKeyCache()) > 0 { if t.preimages != nil { @@ -244,12 +254,18 @@ func (t *StateTrie) Copy() *StateTrie { } } -// NodeIterator returns an iterator that returns nodes of the underlying trie. Iteration -// starts at the key after the given start key. -func (t *StateTrie) NodeIterator(start []byte) NodeIterator { +// NodeIterator returns an iterator that returns nodes of the underlying trie. +// Iteration starts at the key after the given start key. +func (t *StateTrie) NodeIterator(start []byte) (NodeIterator, error) { return t.trie.NodeIterator(start) } +// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered +// error but just print out an error message. +func (t *StateTrie) MustNodeIterator(start []byte) NodeIterator { + return t.trie.MustNodeIterator(start) +} + // hashKey returns the hash of key as an ephemeral buffer. // The caller must not hold onto the return value because it will become // invalid on the next call to hashKey or secKey. diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go index e4f6bca97..2087866d3 100644 --- a/trie/secure_trie_test.go +++ b/trie/secure_trie_test.go @@ -31,14 +31,14 @@ import ( ) func newEmptySecure() *StateTrie { - trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), NewDatabase(rawdb.NewMemoryDatabase())) + trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), NewDatabase(rawdb.NewMemoryDatabase(), nil)) return trie } // makeTestStateTrie creates a large enough secure trie for testing. func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { // Create an empty trie - triedb := NewDatabase(rawdb.NewMemoryDatabase()) + triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb) // Fill it with some arbitrary data @@ -60,8 +60,8 @@ func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { trie.MustUpdate(key, val) } } - root, nodes := trie.Commit(false) - if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil { + root, nodes, _ := trie.Commit(false) + if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { panic(fmt.Errorf("failed to commit db %v", err)) } // Re-create the trie based on the new state diff --git a/trie/stacktrie.go b/trie/stacktrie.go index ee1ce2829..0d65ee75e 100644 --- a/trie/stacktrie.go +++ b/trie/stacktrie.go @@ -444,7 +444,7 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) { case extNode: st.children[0].hashRec(hasher, append(path, st.key...)) - n := shortNode{Key: hexToCompact(st.key)} + n := shortNode{Key: hexToCompactInPlace(st.key)} if len(st.children[0].val) < 32 { n.Val = rawNode(st.children[0].val) } else { @@ -460,7 +460,7 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) { case leafNode: st.key = append(st.key, byte(16)) - n := shortNode{Key: hexToCompact(st.key), Val: valueNode(st.val)} + n := shortNode{Key: hexToCompactInPlace(st.key), Val: valueNode(st.val)} n.encode(hasher.encbuf) encodedNode = hasher.encodedBytes() diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go index ea3eef788..6bd0b83e3 100644 --- a/trie/stacktrie_test.go +++ b/trie/stacktrie_test.go @@ -165,6 +165,38 @@ func TestStackTrieInsertAndHash(t *testing.T) { {"123e", "x___________________________2", "0d230561e398c579e09a9f7b69ceaf7d3970f5a436fdb28b68b7a37c5bdd6b80"}, {"13aa", "x___________________________3", "ff0dc70ce2e5db90ee42a4c2ad12139596b890e90eb4e16526ab38fa465b35cf"}, }, + { // branch node with short values + {"01", "a", "b48605025f5f4b129d40a420e721aa7d504487f015fce85b96e52126365ef7dc"}, + {"80", "b", "2dc6b680daf74db067cb7aeaad73265ded93d96fce190fcbf64f498d475672ab"}, + {"ee", "c", "017dc705a54ac5328dd263fa1bae68d655310fb3e3f7b7bc57e9a43ddf99c4bf"}, + {"ff", "d", "bd5a3584d271d459bd4eb95247b2fc88656b3671b60c1125ffe7bc0b689470d0"}, + }, + { // ext node with short branch node, then becoming long + {"a0", "a", "a83e028cb1e4365935661a9fd36a5c65c30b9ab416eaa877424146ca2a69d088"}, + {"a1", "b", "f586a4639b07b01798ca65e05c253b75d51135ebfbf6f8d6e87c0435089e65f0"}, + {"a2", "c", "63e297c295c008e09a8d531e18d57f270b6bc403e23179b915429db948cd62e3"}, + {"a3", "d", "94a7b721535578e9381f1f4e4b6ec29f8bdc5f0458a30320684c562f5d47b4b5"}, + {"a4", "e", "4b7e66d1c81965cdbe8fab8295ef56bc57fefdc5733d4782d2f8baf630f083c6"}, + {"a5", "f", "2997e7b502198ce1783b5277faacf52b25844fb55a99b63e88bdbbafac573106"}, + {"a6", "g", "bee629dd27a40772b2e1a67ec6db270d26acdf8d3b674dfae27866ad6ae1f48b"}, + }, + { // branch node with short values, then long ones + {"a001", "v1", "b9cc982d995392b51e6787f1915f0b88efd4ad8b30f138da0a3e2242f2323e35"}, + {"b002", "v2", "a7b474bc77ef5097096fa0ee6298fdae8928c0bc3724e7311cd0fa9ed1942fc7"}, + {"c003", "v___________________________3", "dceb5bb7c92b0e348df988a8d9fc36b101397e38ebd405df55ba6ee5f14a264a"}, + {"d004", "v___________________________4", "36e60ecb86b9626165e1c6543c42ecbe4d83bca58e8e1124746961511fce362a"}, + }, + { // ext node to branch node with short values, then long ones + {"8002", "v1", "3258fcb3e9e7d7234ecd3b8d4743999e4ab3a21592565e0a5ca64c141e8620d9"}, + {"8004", "v2", "b6cb95b7024a83c17624a3c9bed09b4b5e8ed426f49f54b8ad13c39028b1e75a"}, + {"8008", "v___________________________3", "c769d82963abe6f0900bf69754738eeb2f84559777cfa87a44f54e1aab417871"}, + {"800d", "v___________________________4", "1cad1fdaab1a6fa95d7b780fd680030e423eb76669971368ba04797a8d9cdfc9"}, + }, + { // ext node with a child of size 31 (Y) and branch node with a child of size 31 (X) + {"000001", "ZZZZZZZZZ", "cef154b87c03c563408520ff9b26923c360cbc3ddb590c079bedeeb25a8c9c77"}, + {"000002", "Y", "2130735e600f612f6e657a32bd7be64ddcaec6512c5694844b19de713922895d"}, + {"000003", "XXXXXXXXXXXXXXXXXXXXXXXXXXXX", "962c0fffdeef7612a4f7bff1950d67e3e81c878e48b9ae45b3b374253b050bd8"}, + }, } st := NewStackTrie(nil) for i, test := range tests { @@ -188,7 +220,7 @@ func TestStackTrieInsertAndHash(t *testing.T) { func TestSizeBug(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -203,7 +235,7 @@ func TestSizeBug(t *testing.T) { func TestEmptyBug(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -229,7 +261,7 @@ func TestEmptyBug(t *testing.T) { func TestValLength56(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -254,7 +286,7 @@ func TestValLength56(t *testing.T) { // which causes a lot of node-within-node. This case was found via fuzzing. func TestUpdateSmallNodes(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) kvs := []struct { K string V string @@ -282,7 +314,7 @@ func TestUpdateSmallNodes(t *testing.T) { func TestUpdateVariableKeys(t *testing.T) { t.SkipNow() st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) kvs := []struct { K string V string @@ -351,7 +383,7 @@ func TestStacktrieNotModifyValues(t *testing.T) { func TestStacktrieSerialization(t *testing.T) { var ( st = NewStackTrie(nil) - nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) keyB = big.NewInt(1) keyDelta = big.NewInt(1) vals [][]byte diff --git a/trie/sync_test.go b/trie/sync_test.go index 7cd405817..dd3506559 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -56,8 +56,8 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str trie.MustUpdate(key, val) } } - root, nodes := trie.Commit(false) - if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil { + root, nodes, _ := trie.Commit(false) + if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { panic(fmt.Errorf("failed to commit db %v", err)) } if err := triedb.Commit(root, false); err != nil { @@ -94,7 +94,7 @@ func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash) er if err != nil { return nil // Consider a non existent state consistent } - it := trie.NodeIterator(nil) + it := trie.MustNodeIterator(nil) for it.Next(true) { } return it.Error() @@ -109,18 +109,18 @@ type trieElement struct { // Tests that an empty trie is not scheduled for syncing. func TestEmptySync(t *testing.T) { - dbA := NewDatabase(rawdb.NewMemoryDatabase()) - dbB := NewDatabase(rawdb.NewMemoryDatabase()) - //dbC := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) - //dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) + dbA := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme) + dbB := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme) + dbC := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) + dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) emptyA := NewEmpty(dbA) emptyB, _ := New(TrieID(types.EmptyRootHash), dbB) - //emptyC := NewEmpty(dbC) - //emptyD, _ := New(TrieID(types.EmptyRootHash), dbD) + emptyC := NewEmpty(dbC) + emptyD, _ := New(TrieID(types.EmptyRootHash), dbD) - for i, trie := range []*Trie{emptyA, emptyB /*emptyC, emptyD*/} { - sync := NewSync(trie.Hash(), memorydb.New(), nil, []*Database{dbA, dbB /*dbC, dbD*/}[i].Scheme()) + for i, trie := range []*Trie{emptyA, emptyB, emptyC, emptyD} { + sync := NewSync(trie.Hash(), memorydb.New(), nil, []*Database{dbA, dbB, dbC, dbD}[i].Scheme()) if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, paths, nodes, codes) } @@ -134,10 +134,10 @@ func TestIterativeSync(t *testing.T) { testIterativeSync(t, 100, false, rawdb.HashScheme) testIterativeSync(t, 1, true, rawdb.HashScheme) testIterativeSync(t, 100, true, rawdb.HashScheme) - // testIterativeSync(t, 1, false, rawdb.PathScheme) - // testIterativeSync(t, 100, false, rawdb.PathScheme) - // testIterativeSync(t, 1, true, rawdb.PathScheme) - // testIterativeSync(t, 100, true, rawdb.PathScheme) + testIterativeSync(t, 1, false, rawdb.PathScheme) + testIterativeSync(t, 100, false, rawdb.PathScheme) + testIterativeSync(t, 1, true, rawdb.PathScheme) + testIterativeSync(t, 100, true, rawdb.PathScheme) } func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) { @@ -159,12 +159,16 @@ func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) { syncPath: NewSyncPath([]byte(paths[i])), }) } + reader, err := srcDb.Reader(srcTrie.Hash()) + if err != nil { + t.Fatalf("State is not available %x", srcTrie.Hash()) + } for len(elements) > 0 { results := make([]NodeSyncResult, len(elements)) if !bypath { for i, element := range elements { owner, inner := ResolvePath([]byte(element.path)) - data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err) } @@ -208,7 +212,7 @@ func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) { // partial results are returned, and the others sent only later. func TestIterativeDelayedSync(t *testing.T) { testIterativeDelayedSync(t, rawdb.HashScheme) - //testIterativeDelayedSync(t, rawdb.PathScheme) + testIterativeDelayedSync(t, rawdb.PathScheme) } func testIterativeDelayedSync(t *testing.T, scheme string) { @@ -230,12 +234,16 @@ func testIterativeDelayedSync(t *testing.T, scheme string) { syncPath: NewSyncPath([]byte(paths[i])), }) } + reader, err := srcDb.Reader(srcTrie.Hash()) + if err != nil { + t.Fatalf("State is not available %x", srcTrie.Hash()) + } for len(elements) > 0 { // Sync only half of the scheduled nodes results := make([]NodeSyncResult, len(elements)/2+1) for i, element := range elements[:len(results)] { owner, inner := ResolvePath([]byte(element.path)) - data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) } @@ -272,8 +280,8 @@ func testIterativeDelayedSync(t *testing.T, scheme string) { func TestIterativeRandomSyncIndividual(t *testing.T) { testIterativeRandomSync(t, 1, rawdb.HashScheme) testIterativeRandomSync(t, 100, rawdb.HashScheme) - // testIterativeRandomSync(t, 1, rawdb.PathScheme) - // testIterativeRandomSync(t, 100, rawdb.PathScheme) + testIterativeRandomSync(t, 1, rawdb.PathScheme) + testIterativeRandomSync(t, 100, rawdb.PathScheme) } func testIterativeRandomSync(t *testing.T, count int, scheme string) { @@ -295,12 +303,16 @@ func testIterativeRandomSync(t *testing.T, count int, scheme string) { syncPath: NewSyncPath([]byte(paths[i])), } } + reader, err := srcDb.Reader(srcTrie.Hash()) + if err != nil { + t.Fatalf("State is not available %x", srcTrie.Hash()) + } for len(queue) > 0 { // Fetch all the queued nodes in a random order results := make([]NodeSyncResult, 0, len(queue)) for path, element := range queue { owner, inner := ResolvePath([]byte(element.path)) - data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) } @@ -336,7 +348,7 @@ func testIterativeRandomSync(t *testing.T, count int, scheme string) { // partial results are returned (Even those randomly), others sent only later. func TestIterativeRandomDelayedSync(t *testing.T) { testIterativeRandomDelayedSync(t, rawdb.HashScheme) - // testIterativeRandomDelayedSync(t, rawdb.PathScheme) + testIterativeRandomDelayedSync(t, rawdb.PathScheme) } func testIterativeRandomDelayedSync(t *testing.T, scheme string) { @@ -358,12 +370,16 @@ func testIterativeRandomDelayedSync(t *testing.T, scheme string) { syncPath: NewSyncPath([]byte(path)), } } + reader, err := srcDb.Reader(srcTrie.Hash()) + if err != nil { + t.Fatalf("State is not available %x", srcTrie.Hash()) + } for len(queue) > 0 { // Sync only half of the scheduled nodes, even those in random order results := make([]NodeSyncResult, 0, len(queue)/2+1) for path, element := range queue { owner, inner := ResolvePath([]byte(element.path)) - data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) } @@ -404,7 +420,7 @@ func testIterativeRandomDelayedSync(t *testing.T, scheme string) { // have such references. func TestDuplicateAvoidanceSync(t *testing.T) { testDuplicateAvoidanceSync(t, rawdb.HashScheme) - // testDuplicateAvoidanceSync(t, rawdb.PathScheme) + testDuplicateAvoidanceSync(t, rawdb.PathScheme) } func testDuplicateAvoidanceSync(t *testing.T, scheme string) { @@ -426,13 +442,16 @@ func testDuplicateAvoidanceSync(t *testing.T, scheme string) { syncPath: NewSyncPath([]byte(paths[i])), }) } + reader, err := srcDb.Reader(srcTrie.Hash()) + if err != nil { + t.Fatalf("State is not available %x", srcTrie.Hash()) + } requested := make(map[common.Hash]struct{}) - for len(elements) > 0 { results := make([]NodeSyncResult, len(elements)) for i, element := range elements { owner, inner := ResolvePath([]byte(element.path)) - data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) } @@ -472,12 +491,10 @@ func testDuplicateAvoidanceSync(t *testing.T, scheme string) { // the database. func TestIncompleteSyncHash(t *testing.T) { testIncompleteSync(t, rawdb.HashScheme) - // testIncompleteSync(t, rawdb.PathScheme) + testIncompleteSync(t, rawdb.PathScheme) } func testIncompleteSync(t *testing.T, scheme string) { - t.Parallel() - // Create a random trie to copy _, srcDb, srcTrie, _ := makeTestTrie(scheme) @@ -501,12 +518,16 @@ func testIncompleteSync(t *testing.T, scheme string) { syncPath: NewSyncPath([]byte(paths[i])), }) } + reader, err := srcDb.Reader(srcTrie.Hash()) + if err != nil { + t.Fatalf("State is not available %x", srcTrie.Hash()) + } for len(elements) > 0 { // Fetch a batch of trie nodes results := make([]NodeSyncResult, len(elements)) for i, element := range elements { owner, inner := ResolvePath([]byte(element.path)) - data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) } @@ -559,7 +580,7 @@ func testIncompleteSync(t *testing.T, scheme string) { // depth. func TestSyncOrdering(t *testing.T) { testSyncOrdering(t, rawdb.HashScheme) - // testSyncOrdering(t, rawdb.PathScheme) + testSyncOrdering(t, rawdb.PathScheme) } func testSyncOrdering(t *testing.T, scheme string) { @@ -585,12 +606,15 @@ func testSyncOrdering(t *testing.T, scheme string) { }) reqs = append(reqs, NewSyncPath([]byte(paths[i]))) } - + reader, err := srcDb.Reader(srcTrie.Hash()) + if err != nil { + t.Fatalf("State is not available %x", srcTrie.Hash()) + } for len(elements) > 0 { results := make([]NodeSyncResult, len(elements)) for i, element := range elements { owner, inner := ResolvePath([]byte(element.path)) - data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err) } @@ -649,11 +673,15 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database syncPath: NewSyncPath([]byte(paths[i])), }) } + reader, err := srcDb.Reader(root) + if err != nil { + t.Fatalf("State is not available %x", root) + } for len(elements) > 0 { results := make([]NodeSyncResult, len(elements)) for i, element := range elements { owner, inner := ResolvePath([]byte(element.path)) - data, err := srcDb.Reader(root).Node(owner, inner, element.hash) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err) } @@ -686,7 +714,7 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database // states synced in the last cycle. func TestSyncMovingTarget(t *testing.T) { testSyncMovingTarget(t, rawdb.HashScheme) - // testSyncMovingTarget(t, rawdb.PathScheme) + testSyncMovingTarget(t, rawdb.PathScheme) } func testSyncMovingTarget(t *testing.T, scheme string) { @@ -709,8 +737,8 @@ func testSyncMovingTarget(t *testing.T, scheme string) { srcTrie.MustUpdate(key, val) diff[string(key)] = val } - root, nodes := srcTrie.Commit(false) - if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil { + root, nodes, _ := srcTrie.Commit(false) + if err := srcDb.Update(root, preRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { panic(err) } if err := srcDb.Commit(root, false); err != nil { @@ -734,8 +762,8 @@ func testSyncMovingTarget(t *testing.T, scheme string) { srcTrie.MustUpdate([]byte(k), val) reverted[k] = val } - root, nodes = srcTrie.Commit(false) - if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil { + root, nodes, _ = srcTrie.Commit(false) + if err := srcDb.Update(root, preRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { panic(err) } if err := srcDb.Commit(root, false); err != nil { diff --git a/trie/testutil/utils.go b/trie/testutil/utils.go new file mode 100644 index 000000000..a75d0431b --- /dev/null +++ b/trie/testutil/utils.go @@ -0,0 +1,61 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package testutil + +import ( + crand "crypto/rand" + "encoding/binary" + mrand "math/rand" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/trie/trienode" +) + +// Prng is a pseudo random number generator seeded by strong randomness. +// The randomness is printed on startup in order to make failures reproducible. +var prng = initRand() + +func initRand() *mrand.Rand { + var seed [8]byte + crand.Read(seed[:]) + rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:])))) + return rnd +} + +// RandBytes generates a random byte slice with specified length. +func RandBytes(n int) []byte { + r := make([]byte, n) + prng.Read(r) + return r +} + +// RandomHash generates a random blob of data and returns it as a hash. +func RandomHash() common.Hash { + return common.BytesToHash(RandBytes(common.HashLength)) +} + +// RandomAddress generates a random blob of data and returns it as an address. +func RandomAddress() common.Address { + return common.BytesToAddress(RandBytes(common.AddressLength)) +} + +// RandomNode generates a random node. +func RandomNode() *trienode.Node { + val := RandBytes(100) + return trienode.New(crypto.Keccak256Hash(val), val) +} diff --git a/trie/tracer.go b/trie/tracer.go index 2b5de8ec4..5786af4d3 100644 --- a/trie/tracer.go +++ b/trie/tracer.go @@ -18,7 +18,6 @@ package trie import ( "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/trie/trienode" ) // tracer tracks the changes of trie nodes. During the trie operations, @@ -114,16 +113,18 @@ func (t *tracer) copy() *tracer { } } -// markDeletions puts all tracked deletions into the provided nodeset. -func (t *tracer) markDeletions(set *trienode.NodeSet) { +// deletedNodes returns a list of node paths which are deleted from the trie. +func (t *tracer) deletedNodes() []string { + var paths []string for path := range t.deletes { // It's possible a few deleted nodes were embedded // in their parent before, the deletions can be no // effect by deleting nothing, filter them out. - prev, ok := t.accessList[path] + _, ok := t.accessList[path] if !ok { continue } - set.AddNode([]byte(path), trienode.NewWithPrev(common.Hash{}, nil, prev)) + paths = append(paths, path) } + return paths } diff --git a/trie/tracer_test.go b/trie/tracer_test.go index 5f61a1d6b..acb8c2f6b 100644 --- a/trie/tracer_test.go +++ b/trie/tracer_test.go @@ -61,7 +61,7 @@ func TestTrieTracer(t *testing.T) { // Tests if the trie diffs are tracked correctly. Tracer should capture // all non-leaf dirty nodes, no matter the node is embedded or not. func testTrieTracer(t *testing.T, vals []struct{ k, v string }) { - db := NewDatabase(rawdb.NewMemoryDatabase()) + db := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie := NewEmpty(db) // Determine all new nodes are tracked @@ -70,8 +70,8 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) { } insertSet := copySet(trie.tracer.inserts) // copy before commit deleteSet := copySet(trie.tracer.deletes) // copy before commit - root, nodes := trie.Commit(false) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := trie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) seen := setKeys(iterNodes(db, root)) if !compareSet(insertSet, seen) { @@ -104,7 +104,7 @@ func TestTrieTracerNoop(t *testing.T) { } func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for _, val := range vals { trie.MustUpdate([]byte(val.k), []byte(val.v)) } @@ -128,7 +128,7 @@ func TestAccessList(t *testing.T) { func testAccessList(t *testing.T, vals []struct{ k, v string }) { var ( - db = NewDatabase(rawdb.NewMemoryDatabase()) + db = NewDatabase(rawdb.NewMemoryDatabase(), nil) trie = NewEmpty(db) orig = trie.Copy() ) @@ -136,8 +136,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { for _, val := range vals { trie.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes := trie.Commit(false) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := trie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, nodes); err != nil { @@ -151,8 +151,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { for _, val := range vals { trie.MustUpdate([]byte(val.k), randBytes(32)) } - root, nodes = trie.Commit(false) - db.Update(root, parent, trienode.NewWithNodeSet(nodes)) + root, nodes, _ = trie.Commit(false) + db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, nodes); err != nil { @@ -169,8 +169,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { keys = append(keys, string(key)) trie.MustUpdate(key, randBytes(32)) } - root, nodes = trie.Commit(false) - db.Update(root, parent, trienode.NewWithNodeSet(nodes)) + root, nodes, _ = trie.Commit(false) + db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, nodes); err != nil { @@ -184,8 +184,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { for _, key := range keys { trie.MustUpdate([]byte(key), nil) } - root, nodes = trie.Commit(false) - db.Update(root, parent, trienode.NewWithNodeSet(nodes)) + root, nodes, _ = trie.Commit(false) + db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, nodes); err != nil { @@ -199,8 +199,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { for _, val := range vals { trie.MustUpdate([]byte(val.k), nil) } - root, nodes = trie.Commit(false) - db.Update(root, parent, trienode.NewWithNodeSet(nodes)) + root, nodes, _ = trie.Commit(false) + db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, nodes); err != nil { @@ -211,29 +211,29 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { // Tests origin values won't be tracked in Iterator or Prover func TestAccessListLeak(t *testing.T) { var ( - db = NewDatabase(rawdb.NewMemoryDatabase()) + db = NewDatabase(rawdb.NewMemoryDatabase(), nil) trie = NewEmpty(db) ) // Create trie from scratch for _, val := range standard { trie.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes := trie.Commit(false) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := trie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) var cases = []struct { op func(tr *Trie) }{ { func(tr *Trie) { - it := tr.NodeIterator(nil) + it := tr.MustNodeIterator(nil) for it.Next(true) { } }, }, { func(tr *Trie) { - it := NewIterator(tr.NodeIterator(nil)) + it := NewIterator(tr.MustNodeIterator(nil)) for it.Next() { } }, @@ -241,7 +241,7 @@ func TestAccessListLeak(t *testing.T) { { func(tr *Trie) { for _, val := range standard { - tr.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase()) + tr.Prove([]byte(val.k), rawdb.NewMemoryDatabase()) } }, }, @@ -262,14 +262,14 @@ func TestAccessListLeak(t *testing.T) { // in its parent due to the smaller size of the original tree node. func TestTinyTree(t *testing.T) { var ( - db = NewDatabase(rawdb.NewMemoryDatabase()) + db = NewDatabase(rawdb.NewMemoryDatabase(), nil) trie = NewEmpty(db) ) for _, val := range tiny { trie.MustUpdate([]byte(val.k), randBytes(32)) } - root, set := trie.Commit(false) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set)) + root, set, _ := trie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(set), nil) parent := root trie, _ = New(TrieID(root), db) @@ -277,8 +277,8 @@ func TestTinyTree(t *testing.T) { for _, val := range tiny { trie.MustUpdate([]byte(val.k), []byte(val.v)) } - root, set = trie.Commit(false) - db.Update(root, parent, trienode.NewWithNodeSet(set)) + root, set, _ = trie.Commit(false) + db.Update(root, parent, 0, trienode.NewWithNodeSet(set), nil) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, set); err != nil { @@ -300,7 +300,7 @@ func compareSet(setA, setB map[string]struct{}) bool { func forNodes(tr *Trie) map[string][]byte { var ( - it = tr.NodeIterator(nil) + it = tr.MustNodeIterator(nil) nodes = make(map[string][]byte) ) for it.Next(true) { @@ -319,7 +319,7 @@ func iterNodes(db *Database, root common.Hash) map[string][]byte { func forHashedNodes(tr *Trie) map[string][]byte { var ( - it = tr.NodeIterator(nil) + it = tr.MustNodeIterator(nil) nodes = make(map[string][]byte) ) for it.Next(true) { diff --git a/trie/trie.go b/trie/trie.go index e63b2d283..07467ac69 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -39,6 +39,10 @@ type Trie struct { root node owner common.Hash + // Flag whether the commit operation is already performed. If so the + // trie is not usable(latest states is invisible). + committed bool + // Keep track of the number leaves which have been inserted since the last // hashing operation. This number will not directly map to the number of // actually unhashed nodes. @@ -60,11 +64,12 @@ func (t *Trie) newFlag() nodeFlag { // Copy returns a copy of Trie. func (t *Trie) Copy() *Trie { return &Trie{ - root: t.root, - owner: t.owner, - unhashed: t.unhashed, - reader: t.reader, - tracer: t.tracer.copy(), + root: t.root, + owner: t.owner, + committed: t.committed, + unhashed: t.unhashed, + reader: t.reader, + tracer: t.tracer.copy(), } } @@ -74,7 +79,7 @@ func (t *Trie) Copy() *Trie { // zero hash or the sha3 hash of an empty string, then trie is initially // empty, otherwise, the root node must be present in database or returns // a MissingNodeError if not. -func New(id *ID, db NodeReader) (*Trie, error) { +func New(id *ID, db *Database) (*Trie, error) { reader, err := newTrieReader(id.StateRoot, id.Owner, db) if err != nil { return nil, err @@ -100,10 +105,24 @@ func NewEmpty(db *Database) *Trie { return tr } +// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered +// error but just print out an error message. +func (t *Trie) MustNodeIterator(start []byte) NodeIterator { + it, err := t.NodeIterator(start) + if err != nil { + log.Error("Unhandled trie error in Trie.NodeIterator", "err", err) + } + return it +} + // NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at // the key after the given start key. -func (t *Trie) NodeIterator(start []byte) NodeIterator { - return newNodeIterator(t, start) +func (t *Trie) NodeIterator(start []byte) (NodeIterator, error) { + // Short circuit if the trie is already committed and not usable. + if t.committed { + return nil, ErrCommitted + } + return newNodeIterator(t, start), nil } // MustGet is a wrapper of Get and will omit any encountered error but just @@ -122,6 +141,10 @@ func (t *Trie) MustGet(key []byte) []byte { // If the requested node is not present in trie, no error will be returned. // If the trie is corrupted, a MissingNodeError is returned. func (t *Trie) Get(key []byte) ([]byte, error) { + // Short circuit if the trie is already committed and not usable. + if t.committed { + return nil, ErrCommitted + } value, newroot, didResolve, err := t.get(t.root, keybytesToHex(key), 0) if err == nil && didResolve { t.root = newroot @@ -181,6 +204,10 @@ func (t *Trie) MustGetNode(path []byte) ([]byte, int) { // If the requested node is not present in trie, no error will be returned. // If the trie is corrupted, a MissingNodeError is returned. func (t *Trie) GetNode(path []byte) ([]byte, int, error) { + // Short circuit if the trie is already committed and not usable. + if t.committed { + return nil, 0, ErrCommitted + } item, newroot, resolved, err := t.getNode(t.root, compactToHex(path), 0) if err != nil { return nil, resolved, err @@ -273,6 +300,10 @@ func (t *Trie) MustUpdate(key, value []byte) { // If the requested node is not present in trie, no error will be returned. // If the trie is corrupted, a MissingNodeError is returned. func (t *Trie) Update(key, value []byte) error { + // Short circuit if the trie is already committed and not usable. + if t.committed { + return ErrCommitted + } return t.update(key, value) } @@ -387,6 +418,10 @@ func (t *Trie) MustDelete(key []byte) { // If the requested node is not present in trie, no error will be returned. // If the trie is corrupted, a MissingNodeError is returned. func (t *Trie) Delete(key []byte) error { + // Short circuit if the trie is already committed and not usable. + if t.committed { + return ErrCommitted + } t.unhashed++ k := keybytesToHex(key) _, n, err := t.delete(t.root, nil, k) @@ -572,17 +607,25 @@ func (t *Trie) Hash() common.Hash { // The returned nodeset can be nil if the trie is clean (nothing to commit). // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage -func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) { +func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { defer t.tracer.reset() - - nodes := trienode.NewNodeSet(t.owner) - t.tracer.markDeletions(nodes) - + defer func() { + t.committed = true + }() // Trie is empty and can be classified into two types of situations: - // - The trie was empty and no update happens - // - The trie was non-empty and all nodes are dropped + // (a) The trie was empty and no update happens => return nil + // (b) The trie was non-empty and all nodes are dropped => return + // the node set includes all deleted nodes if t.root == nil { - return types.EmptyRootHash, nodes + paths := t.tracer.deletedNodes() + if len(paths) == 0 { + return types.EmptyRootHash, nil, nil // case (a) + } + nodes := trienode.NewNodeSet(t.owner) + for _, path := range paths { + nodes.AddNode([]byte(path), trienode.NewDeleted()) + } + return types.EmptyRootHash, nodes, nil // case (b) } // Derive the hash for all dirty nodes first. We hold the assumption // in the following procedure that all nodes are hashed. @@ -594,10 +637,14 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) { // Replace the root node with the origin hash in order to // ensure all resolved nodes are dropped after the commit. t.root = hashedNode - return rootHash, nil + return rootHash, nil, nil + } + nodes := trienode.NewNodeSet(t.owner) + for _, path := range t.tracer.deletedNodes() { + nodes.AddNode([]byte(path), trienode.NewDeleted()) } t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root) - return rootHash, nodes + return rootHash, nodes, nil } // hashRoot calculates the root hash of the given trie @@ -621,4 +668,5 @@ func (t *Trie) Reset() { t.owner = common.Hash{} t.unhashed = 0 t.tracer.reset() + t.committed = false } diff --git a/trie/trie_reader.go b/trie/trie_reader.go index 51855320a..421596455 100644 --- a/trie/trie_reader.go +++ b/trie/trie_reader.go @@ -17,26 +17,25 @@ package trie import ( - "fmt" - "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie/triestate" ) // Reader wraps the Node method of a backing trie store. type Reader interface { - // Node retrieves the RLP-encoded trie node blob with the provided trie - // identifier, node path and the corresponding node hash. No error will - // be returned if the node is not found. + // Node retrieves the trie node blob with the provided trie identifier, node path and + // the corresponding node hash. No error will be returned if the node is not found. + // + // When looking up nodes in the account trie, 'owner' is the zero hash. For contract + // storage trie nodes, 'owner' is the hash of the account address that containing the + // storage. + // + // TODO(rjl493456442): remove the 'hash' parameter, it's redundant in PBSS. Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) } -// NodeReader wraps all the necessary functions for accessing trie node. -type NodeReader interface { - // Reader returns a reader for accessing all trie nodes with provided - // state root. Nil is returned in case the state is not available. - Reader(root common.Hash) Reader -} - // trieReader is a wrapper of the underlying node reader. It's not safe // for concurrent usage. type trieReader struct { @@ -46,10 +45,16 @@ type trieReader struct { } // newTrieReader initializes the trie reader with the given node reader. -func newTrieReader(stateRoot, owner common.Hash, db NodeReader) (*trieReader, error) { - reader := db.Reader(stateRoot) - if reader == nil { - return nil, fmt.Errorf("state not found #%x", stateRoot) +func newTrieReader(stateRoot, owner common.Hash, db *Database) (*trieReader, error) { + if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash { + if stateRoot == (common.Hash{}) { + log.Error("Zero state root hash!") + } + return &trieReader{owner: owner}, nil + } + reader, err := db.Reader(stateRoot) + if err != nil { + return nil, &MissingNodeError{Owner: owner, NodeHash: stateRoot, err: err} } return &trieReader{owner: owner, reader: reader}, nil } @@ -79,3 +84,18 @@ func (r *trieReader) node(path []byte, hash common.Hash) ([]byte, error) { } return blob, nil } + +// trieLoader implements triestate.TrieLoader for constructing tries. +type trieLoader struct { + db *Database +} + +// OpenTrie opens the main account trie. +func (l *trieLoader) OpenTrie(root common.Hash) (triestate.Trie, error) { + return New(TrieID(root), l.db) +} + +// OpenStorageTrie opens the storage trie of an account. +func (l *trieLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) { + return New(StorageTrieID(stateRoot, addrHash, root), l.db) +} diff --git a/trie/trie_test.go b/trie/trie_test.go index 70de46022..35ccc7720 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -45,7 +45,7 @@ func init() { } func TestEmptyTrie(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) res := trie.Hash() exp := types.EmptyRootHash if res != exp { @@ -54,7 +54,7 @@ func TestEmptyTrie(t *testing.T) { } func TestNull(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) key := make([]byte, 32) value := []byte("test") trie.MustUpdate(key, value) @@ -64,8 +64,13 @@ func TestNull(t *testing.T) { } func TestMissingRoot(t *testing.T) { + testMissingRoot(t, rawdb.HashScheme) + testMissingRoot(t, rawdb.PathScheme) +} + +func testMissingRoot(t *testing.T, scheme string) { root := common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") - trie, err := New(TrieID(root), NewDatabase(rawdb.NewMemoryDatabase())) + trie, err := New(TrieID(root), newTestDatabase(rawdb.NewMemoryDatabase(), scheme)) if trie != nil { t.Error("New returned non-nil trie for invalid root") } @@ -76,9 +81,9 @@ func TestMissingRoot(t *testing.T) { func TestMissingNode(t *testing.T) { testMissingNode(t, false, rawdb.HashScheme) - //testMissingNode(t, false, rawdb.PathScheme) + testMissingNode(t, false, rawdb.PathScheme) testMissingNode(t, true, rawdb.HashScheme) - //testMissingNode(t, true, rawdb.PathScheme) + testMissingNode(t, true, rawdb.PathScheme) } func testMissingNode(t *testing.T, memonly bool, scheme string) { @@ -88,8 +93,8 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) { trie := NewEmpty(triedb) updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer") updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf") - root, nodes := trie.Commit(false) - triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := trie.Commit(false) + triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) if !memonly { triedb.Commit(root, false) @@ -161,7 +166,7 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) { } func TestInsert(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) updateString(trie, "doe", "reindeer") updateString(trie, "dog", "puppy") @@ -173,18 +178,18 @@ func TestInsert(t *testing.T) { t.Errorf("case 1: exp %x got %x", exp, root) } - trie = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab") - root, _ = trie.Commit(false) + root, _, _ = trie.Commit(false) if root != exp { t.Errorf("case 2: exp %x got %x", exp, root) } } func TestGet(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) + db := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie := NewEmpty(db) updateString(trie, "doe", "reindeer") updateString(trie, "dog", "puppy") @@ -202,14 +207,14 @@ func TestGet(t *testing.T) { if i == 1 { return } - root, nodes := trie.Commit(false) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := trie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) trie, _ = New(TrieID(root), db) } } func TestDelete(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := []struct{ k, v string }{ {"do", "verb"}, {"ether", "wookiedoo"}, @@ -236,7 +241,7 @@ func TestDelete(t *testing.T) { } func TestEmptyValues(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := []struct{ k, v string }{ {"do", "verb"}, @@ -260,7 +265,7 @@ func TestEmptyValues(t *testing.T) { } func TestReplication(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) + db := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie := NewEmpty(db) vals := []struct{ k, v string }{ {"do", "verb"}, @@ -274,8 +279,8 @@ func TestReplication(t *testing.T) { for _, val := range vals { updateString(trie, val.k, val.v) } - root, nodes := trie.Commit(false) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := trie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) // create a new trie on top of the database and check that lookups work. trie2, err := New(TrieID(root), db) @@ -287,14 +292,14 @@ func TestReplication(t *testing.T) { t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v) } } - hash, nodes := trie2.Commit(false) + hash, nodes, _ := trie2.Commit(false) if hash != root { t.Errorf("root failure. expected %x got %x", root, hash) } // recreate the trie after commit if nodes != nil { - db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + db.Update(hash, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) } trie2, err = New(TrieID(hash), db) if err != nil { @@ -321,7 +326,7 @@ func TestReplication(t *testing.T) { } func TestLargeValue(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie.MustUpdate([]byte("key1"), []byte{99, 99, 99, 99}) trie.MustUpdate([]byte("key2"), bytes.Repeat([]byte{1}, 32)) trie.Hash() @@ -422,44 +427,44 @@ func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error { if !ok || n.IsDeleted() { return errors.New("expect new node") } - if len(n.Prev) > 0 { - return errors.New("unexpected origin value") - } + //if len(n.Prev) > 0 { + // return errors.New("unexpected origin value") + //} } // Check deletion set - for path, blob := range deletes { + for path := range deletes { n, ok := set.Nodes[path] if !ok || !n.IsDeleted() { return errors.New("expect deleted node") } - if len(n.Prev) == 0 { - return errors.New("expect origin value") - } - if !bytes.Equal(n.Prev, blob) { - return errors.New("invalid origin value") - } + //if len(n.Prev) == 0 { + // return errors.New("expect origin value") + //} + //if !bytes.Equal(n.Prev, blob) { + // return errors.New("invalid origin value") + //} } // Check update set - for path, blob := range updates { + for path := range updates { n, ok := set.Nodes[path] if !ok || n.IsDeleted() { return errors.New("expect updated node") } - if len(n.Prev) == 0 { - return errors.New("expect origin value") - } - if !bytes.Equal(n.Prev, blob) { - return errors.New("invalid origin value") - } + //if len(n.Prev) == 0 { + // return errors.New("expect origin value") + //} + //if !bytes.Equal(n.Prev, blob) { + // return errors.New("invalid origin value") + //} } return nil } func runRandTest(rt randTest) bool { var scheme = rawdb.HashScheme - //if rand.Intn(2) == 0 { - // scheme = rawdb.PathScheme - //} + if rand.Intn(2) == 0 { + scheme = rawdb.PathScheme + } var ( origin = types.EmptyRootHash triedb = newTestDatabase(rawdb.NewMemoryDatabase(), scheme) @@ -490,7 +495,7 @@ func runRandTest(rt randTest) bool { continue } proofDb := rawdb.NewMemoryDatabase() - err := tr.Prove(step.key, 0, proofDb) + err := tr.Prove(step.key, proofDb) if err != nil { rt[i].err = fmt.Errorf("failed for proving key %#x, %v", step.key, err) } @@ -501,9 +506,9 @@ func runRandTest(rt randTest) bool { case opHash: tr.Hash() case opCommit: - root, nodes := tr.Commit(true) + root, nodes, _ := tr.Commit(true) if nodes != nil { - triedb.Update(root, origin, trienode.NewWithNodeSet(nodes)) + triedb.Update(root, origin, 0, trienode.NewWithNodeSet(nodes), nil) } newtr, err := New(TrieID(root), triedb) if err != nil { @@ -521,7 +526,7 @@ func runRandTest(rt randTest) bool { origin = root case opItercheckhash: checktr := NewEmpty(triedb) - it := NewIterator(tr.NodeIterator(nil)) + it := NewIterator(tr.MustNodeIterator(nil)) for it.Next() { checktr.MustUpdate(it.Key, it.Value) } @@ -530,8 +535,8 @@ func runRandTest(rt randTest) bool { } case opNodeDiff: var ( - origIter = origTrie.NodeIterator(nil) - curIter = tr.NodeIterator(nil) + origIter = origTrie.MustNodeIterator(nil) + curIter = tr.MustNodeIterator(nil) origSeen = make(map[string]struct{}) curSeen = make(map[string]struct{}) ) @@ -604,7 +609,7 @@ func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) } const benchElemCount = 20000 func benchGet(b *testing.B) { - triedb := NewDatabase(rawdb.NewMemoryDatabase()) + triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie := NewEmpty(triedb) k := make([]byte, 32) for i := 0; i < benchElemCount; i++ { @@ -621,7 +626,7 @@ func benchGet(b *testing.B) { } func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) k := make([]byte, 32) b.ReportAllocs() for i := 0; i < b.N; i++ { @@ -651,7 +656,7 @@ func BenchmarkHash(b *testing.B) { // entries, then adding N more. addresses, accounts := makeAccounts(2 * b.N) // Insert the accounts into the trie and hash it - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) i := 0 for ; i < len(addresses)/2; i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) @@ -682,7 +687,7 @@ func BenchmarkCommitAfterHash(b *testing.B) { func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) { // Make the random benchmark deterministic addresses, accounts := makeAccounts(b.N) - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for i := 0; i < len(addresses); i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } @@ -696,7 +701,7 @@ func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) { func TestTinyTrie(t *testing.T) { // Create a realistic account trie to hash _, accounts := makeAccounts(5) - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3]) if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root { t.Errorf("1: got %x, exp %x", root, exp) @@ -709,8 +714,8 @@ func TestTinyTrie(t *testing.T) { if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root { t.Errorf("3: got %x, exp %x", root, exp) } - checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - it := NewIterator(trie.NodeIterator(nil)) + checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) + it := NewIterator(trie.MustNodeIterator(nil)) for it.Next() { checktr.MustUpdate(it.Key, it.Value) } @@ -722,7 +727,7 @@ func TestTinyTrie(t *testing.T) { func TestCommitAfterHash(t *testing.T) { // Create a realistic account trie to hash addresses, accounts := makeAccounts(1000) - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for i := 0; i < len(addresses); i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } @@ -734,7 +739,7 @@ func TestCommitAfterHash(t *testing.T) { if exp != root { t.Errorf("got %x, exp %x", root, exp) } - root, _ = trie.Commit(false) + root, _, _ = trie.Commit(false) if exp != root { t.Errorf("got %x, exp %x", root, exp) } @@ -788,11 +793,17 @@ func (s *spongeDb) Stat(property string) (string, error) { panic("implement func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") } func (s *spongeDb) Close() error { return nil } func (s *spongeDb) Put(key []byte, value []byte) error { - valbrief := value + var ( + keybrief = key + valbrief = value + ) + if len(keybrief) > 8 { + keybrief = keybrief[:8] + } if len(valbrief) > 8 { valbrief = valbrief[:8] } - s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, key[:8], len(value), valbrief)) + s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, keybrief, len(value), valbrief)) s.sponge.Write(key) s.sponge.Write(value) return nil @@ -830,15 +841,15 @@ func TestCommitSequence(t *testing.T) { addresses, accounts := makeAccounts(tc.count) // This spongeDb is used to check the sequence of disk-db-writes s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} - db := NewDatabase(rawdb.NewDatabase(s)) + db := NewDatabase(rawdb.NewDatabase(s), nil) trie := NewEmpty(db) // Fill the trie with elements for i := 0; i < tc.count; i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } // Flush trie -> database - root, nodes := trie.Commit(false) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := trie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) // Flush memdb -> disk (sponge) db.Commit(root, false) if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) { @@ -861,7 +872,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) { prng := rand.New(rand.NewSource(int64(i))) // This spongeDb is used to check the sequence of disk-db-writes s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} - db := NewDatabase(rawdb.NewDatabase(s)) + db := NewDatabase(rawdb.NewDatabase(s), nil) trie := NewEmpty(db) // Fill the trie with elements for i := 0; i < tc.count; i++ { @@ -878,8 +889,8 @@ func TestCommitSequenceRandomBlobs(t *testing.T) { trie.MustUpdate(key, val) } // Flush trie -> database - root, nodes := trie.Commit(false) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := trie.Commit(false) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) // Flush memdb -> disk (sponge) db.Commit(root, false) if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) { @@ -893,7 +904,7 @@ func TestCommitSequenceStackTrie(t *testing.T) { prng := rand.New(rand.NewSource(int64(count))) // This spongeDb is used to check the sequence of disk-db-writes s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"} - db := NewDatabase(rawdb.NewDatabase(s)) + db := NewDatabase(rawdb.NewDatabase(s), nil) trie := NewEmpty(db) // Another sponge is used for the stacktrie commits stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"} @@ -917,9 +928,9 @@ func TestCommitSequenceStackTrie(t *testing.T) { stTrie.Update(key, val) } // Flush trie -> database - root, nodes := trie.Commit(false) + root, nodes, _ := trie.Commit(false) // Flush memdb -> disk (sponge) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Commit(root, false) // And flush stacktrie -> disk stRoot, err := stTrie.Commit() @@ -952,7 +963,7 @@ func TestCommitSequenceStackTrie(t *testing.T) { // not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do. func TestCommitSequenceSmallRoot(t *testing.T) { s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"} - db := NewDatabase(rawdb.NewDatabase(s)) + db := NewDatabase(rawdb.NewDatabase(s), nil) trie := NewEmpty(db) // Another sponge is used for the stacktrie commits stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"} @@ -965,9 +976,9 @@ func TestCommitSequenceSmallRoot(t *testing.T) { trie.Update(key, []byte{0x1}) stTrie.Update(key, []byte{0x1}) // Flush trie -> database - root, nodes := trie.Commit(false) + root, nodes, _ := trie.Commit(false) // Flush memdb -> disk (sponge) - db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) db.Commit(root, false) // And flush stacktrie -> disk stRoot, err := stTrie.Commit() @@ -1029,7 +1040,7 @@ func BenchmarkHashFixedSize(b *testing.B) { func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) { b.ReportAllocs() - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for i := 0; i < len(addresses); i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } @@ -1080,7 +1091,7 @@ func BenchmarkCommitAfterHashFixedSize(b *testing.B) { func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) { b.ReportAllocs() - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for i := 0; i < len(addresses); i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } @@ -1132,14 +1143,14 @@ func BenchmarkDerefRootFixedSize(b *testing.B) { func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) { b.ReportAllocs() - triedb := NewDatabase(rawdb.NewMemoryDatabase()) + triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie := NewEmpty(triedb) for i := 0; i < len(addresses); i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } h := trie.Hash() - root, nodes := trie.Commit(false) - triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + root, nodes, _ := trie.Commit(false) + triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) b.StartTimer() triedb.Dereference(h) b.StopTimer() diff --git a/trie/triedb/hashdb/database.go b/trie/triedb/hashdb/database.go index 1e5a464e2..6ae5eaa3f 100644 --- a/trie/triedb/hashdb/database.go +++ b/trie/triedb/hashdb/database.go @@ -18,6 +18,7 @@ package hashdb import ( "errors" + "fmt" "reflect" "sync" "time" @@ -31,30 +32,31 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" ) var ( - memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) - memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) - memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) - memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) + memcacheCleanHitMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/hit", nil) + memcacheCleanMissMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/miss", nil) + memcacheCleanReadMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/read", nil) + memcacheCleanWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/write", nil) - memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) - memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) - memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) - memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) + memcacheDirtyHitMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/hit", nil) + memcacheDirtyMissMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/miss", nil) + memcacheDirtyReadMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/read", nil) + memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/write", nil) - memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) - memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) - memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) + memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/flush/time", nil) + memcacheFlushNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/nodes", nil) + memcacheFlushBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/bytes", nil) - memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) - memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) - memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) + memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/gc/time", nil) + memcacheGCNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/nodes", nil) + memcacheGCBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/bytes", nil) - memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) - memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) - memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) + memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/commit/time", nil) + memcacheCommitNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/nodes", nil) + memcacheCommitBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/bytes", nil) ) // ChildResolver defines the required method to decode the provided @@ -63,6 +65,20 @@ type ChildResolver interface { ForEach(node []byte, onChild func(common.Hash)) } +// Config contains the settings for database. +type Config struct { + CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes +} + +// Defaults is the default setting for database if it's not specified. +// Notably, clean cache is disabled explicitly, +var Defaults = &Config{ + // Explicitly set clean cache size to 0 to avoid creating fastcache, + // otherwise database must be closed when it's no longer needed to + // prevent memory leak. + CleanCacheSize: 0, +} + // Database is an intermediate write layer between the trie data structures and // the disk database. The aim is to accumulate trie writes in-memory and only // periodically flush a couple tries to disk, garbage collecting the remainder. @@ -120,7 +136,14 @@ func (n *cachedNode) forChildren(resolver ChildResolver, onChild func(hash commo } // New initializes the hash-based node database. -func New(diskdb ethdb.Database, cleans *fastcache.Cache, resolver ChildResolver) *Database { +func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Database { + if config == nil { + config = Defaults + } + var cleans *fastcache.Cache + if config.CleanCacheSize > 0 { + cleans = fastcache.New(config.CleanCacheSize) + } return &Database{ diskdb: diskdb, resolver: resolver, @@ -268,7 +291,7 @@ func (db *Database) Dereference(root common.Hash) { db.gctime += time.Since(start) memcacheGCTimeTimer.Update(time.Since(start)) - memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheGCBytesMeter.Mark(int64(storage - db.dirtiesSize)) memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), @@ -389,7 +412,7 @@ func (db *Database) Cap(limit common.StorageSize) error { db.flushtime += time.Since(start) memcacheFlushTimeTimer.Update(time.Since(start)) - memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheFlushBytesMeter.Mark(int64(storage - db.dirtiesSize)) memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), @@ -440,7 +463,7 @@ func (db *Database) Commit(node common.Hash, report bool) error { // Reset the storage counters and bumped metrics memcacheCommitTimeTimer.Update(time.Since(start)) - memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheCommitBytesMeter.Mark(int64(storage - db.dirtiesSize)) memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) logger := log.Info @@ -557,7 +580,7 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool { // Update inserts the dirty nodes in provided nodeset into database and link the // account trie with multiple storage tries if necessary. -func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error { +func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { // Ensure the parent state is present and signal a warning if not. if parent != types.EmptyRootHash { if blob, _ := db.Node(parent); len(blob) == 0 { @@ -610,7 +633,10 @@ func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode // Size returns the current storage size of the memory cache in front of the // persistent database layer. -func (db *Database) Size() common.StorageSize { +// +// The first return will always be 0, representing the memory stored in unbounded +// diff layers above the dirty cache. This is only available in pathdb. +func (db *Database) Size() (common.StorageSize, common.StorageSize) { db.lock.RLock() defer db.lock.RUnlock() @@ -618,11 +644,17 @@ func (db *Database) Size() common.StorageSize { // the total memory consumption, the maintenance metadata is also needed to be // counted. var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize) - return db.dirtiesSize + db.childrenSize + metadataSize + return 0, db.dirtiesSize + db.childrenSize + metadataSize } // Close closes the trie database and releases all held resources. -func (db *Database) Close() error { return nil } +func (db *Database) Close() error { + if db.cleans != nil { + db.cleans.Reset() + db.cleans = nil + } + return nil +} // Scheme returns the node scheme used in the database. func (db *Database) Scheme() string { @@ -630,8 +662,12 @@ func (db *Database) Scheme() string { } // Reader retrieves a node reader belonging to the given state root. -func (db *Database) Reader(root common.Hash) *reader { - return &reader{db: db} +// An error will be returned if the requested state is not available. +func (db *Database) Reader(root common.Hash) (*reader, error) { + if _, err := db.Node(root); err != nil { + return nil, fmt.Errorf("state %#x is not available, %v", root, err) + } + return &reader{db: db}, nil } // reader is a state reader of Database which implements the Reader interface. diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go new file mode 100644 index 000000000..18cc36ffc --- /dev/null +++ b/trie/triedb/pathdb/database.go @@ -0,0 +1,426 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "errors" + "fmt" + "io" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +const ( + // maxDiffLayers is the maximum diff layers allowed in the layer tree. + maxDiffLayers = 128 + + // defaultCleanSize is the default memory allowance of clean cache. + defaultCleanSize = 16 * 1024 * 1024 + + // maxBufferSize is the maximum memory allowance of node buffer. + // Too large nodebuffer will cause the system to pause for a long + // time when write happens. Also, the largest batch that pebble can + // support is 4GB, node will panic if batch size exceeds this limit. + maxBufferSize = 256 * 1024 * 1024 + + // DefaultBufferSize is the default memory allowance of node buffer + // that aggregates the writes from above until it's flushed into the + // disk. It's meant to be used once the initial sync is finished. + // Do not increase the buffer size arbitrarily, otherwise the system + // pause time will increase when the database writes happen. + DefaultBufferSize = 64 * 1024 * 1024 +) + +// layer is the interface implemented by all state layers which includes some +// public methods and some additional methods for internal usage. +type layer interface { + // Node retrieves the trie node with the node info. An error will be returned + // if the read operation exits abnormally. For example, if the layer is already + // stale, or the associated state is regarded as corrupted. Notably, no error + // will be returned if the requested node is not found in database. + Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) + + // rootHash returns the root hash for which this layer was made. + rootHash() common.Hash + + // stateID returns the associated state id of layer. + stateID() uint64 + + // parentLayer returns the subsequent layer of it, or nil if the disk was reached. + parentLayer() layer + + // update creates a new layer on top of the existing layer diff tree with + // the provided dirty trie nodes along with the state change set. + // + // Note, the maps are retained by the method to avoid copying everything. + update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer + + // journal commits an entire diff hierarchy to disk into a single journal entry. + // This is meant to be used during shutdown to persist the layer without + // flattening everything down (bad for reorgs). + journal(w io.Writer) error +} + +// Config contains the settings for database. +type Config struct { + StateHistory uint64 // Number of recent blocks to maintain state history for + CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes + DirtyCacheSize int // Maximum memory allowance (in bytes) for caching dirty nodes + ReadOnly bool // Flag whether the database is opened in read only mode. +} + +// sanitize checks the provided user configurations and changes anything that's +// unreasonable or unworkable. +func (c *Config) sanitize() *Config { + conf := *c + if conf.DirtyCacheSize > maxBufferSize { + log.Warn("Sanitizing invalid node buffer size", "provided", common.StorageSize(conf.DirtyCacheSize), "updated", common.StorageSize(maxBufferSize)) + conf.DirtyCacheSize = maxBufferSize + } + return &conf +} + +// Defaults contains default settings for Ethereum mainnet. +var Defaults = &Config{ + StateHistory: params.FullImmutabilityThreshold, + CleanCacheSize: defaultCleanSize, + DirtyCacheSize: DefaultBufferSize, +} + +// ReadOnly is the config in order to open database in read only mode. +var ReadOnly = &Config{ReadOnly: true} + +// Database is a multiple-layered structure for maintaining in-memory trie nodes. +// It consists of one persistent base layer backed by a key-value store, on top +// of which arbitrarily many in-memory diff layers are stacked. The memory diffs +// can form a tree with branching, but the disk layer is singleton and common to +// all. If a reorg goes deeper than the disk layer, a batch of reverse diffs can +// be applied to rollback. The deepest reorg that can be handled depends on the +// amount of state histories tracked in the disk. +// +// At most one readable and writable database can be opened at the same time in +// the whole system which ensures that only one database writer can operate disk +// state. Unexpected open operations can cause the system to panic. +type Database struct { + // readOnly is the flag whether the mutation is allowed to be applied. + // It will be set automatically when the database is journaled during + // the shutdown to reject all following unexpected mutations. + readOnly bool // Indicator if database is opened in read only mode + bufferSize int // Memory allowance (in bytes) for caching dirty nodes + config *Config // Configuration for database + diskdb ethdb.Database // Persistent storage for matured trie nodes + tree *layerTree // The group for all known layers + freezer *rawdb.ResettableFreezer // Freezer for storing trie histories, nil possible in tests + lock sync.RWMutex // Lock to prevent mutations from happening at the same time +} + +// New attempts to load an already existing layer from a persistent key-value +// store (with a number of memory layers from a journal). If the journal is not +// matched with the base persistent layer, all the recorded diff layers are discarded. +func New(diskdb ethdb.Database, config *Config) *Database { + if config == nil { + config = Defaults + } + config = config.sanitize() + + db := &Database{ + readOnly: config.ReadOnly, + bufferSize: config.DirtyCacheSize, + config: config, + diskdb: diskdb, + } + // Construct the layer tree by resolving the in-disk singleton state + // and in-memory layer journal. + db.tree = newLayerTree(db.loadLayers()) + + // Open the freezer for state history if the passed database contains an + // ancient store. Otherwise, all the relevant functionalities are disabled. + // + // Because the freezer can only be opened once at the same time, this + // mechanism also ensures that at most one **non-readOnly** database + // is opened at the same time to prevent accidental mutation. + if ancient, err := diskdb.AncientDatadir(); err == nil && ancient != "" && !db.readOnly { + freezer, err := rawdb.NewStateFreezer(ancient, false) + if err != nil { + log.Crit("Failed to open state history freezer", "err", err) + } + db.freezer = freezer + + // Truncate the extra state histories above in freezer in case + // it's not aligned with the disk layer. + pruned, err := truncateFromHead(db.diskdb, freezer, db.tree.bottom().stateID()) + if err != nil { + log.Crit("Failed to truncate extra state histories", "err", err) + } + if pruned != 0 { + log.Warn("Truncated extra state histories", "number", pruned) + } + } + log.Warn("Path-based state scheme is an experimental feature") + return db +} + +// Reader retrieves a layer belonging to the given state root. +func (db *Database) Reader(root common.Hash) (layer, error) { + l := db.tree.get(root) + if l == nil { + return nil, fmt.Errorf("state %#x is not available", root) + } + return l, nil +} + +// Update adds a new layer into the tree, if that can be linked to an existing +// old parent. It is disallowed to insert a disk layer (the origin of all). Apart +// from that this function will flatten the extra diff layers at bottom into disk +// to only keep 128 diff layers in memory by default. +// +// The passed in maps(nodes, states) will be retained to avoid copying everything. +// Therefore, these maps must not be changed afterwards. +func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { + // Hold the lock to prevent concurrent mutations. + db.lock.Lock() + defer db.lock.Unlock() + + // Short circuit if the database is in read only mode. + if db.readOnly { + return errSnapshotReadOnly + } + if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil { + return err + } + // Keep 128 diff layers in the memory, persistent layer is 129th. + // - head layer is paired with HEAD state + // - head-1 layer is paired with HEAD-1 state + // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state + // - head-128 layer(disk layer) is paired with HEAD-128 state + return db.tree.cap(root, maxDiffLayers) +} + +// Commit traverses downwards the layer tree from a specified layer with the +// provided state root and all the layers below are flattened downwards. It +// can be used alone and mostly for test purposes. +func (db *Database) Commit(root common.Hash, report bool) error { + // Hold the lock to prevent concurrent mutations. + db.lock.Lock() + defer db.lock.Unlock() + + // Short circuit if the database is in read only mode. + if db.readOnly { + return errSnapshotReadOnly + } + return db.tree.cap(root, 0) +} + +// Reset rebuilds the database with the specified state as the base. +// +// - if target state is empty, clear the stored state and all layers on top +// - if target state is non-empty, ensure the stored state matches with it +// and clear all other layers on top. +func (db *Database) Reset(root common.Hash) error { + db.lock.Lock() + defer db.lock.Unlock() + + // Short circuit if the database is in read only mode. + if db.readOnly { + return errSnapshotReadOnly + } + batch := db.diskdb.NewBatch() + root = types.TrieRootHash(root) + if root == types.EmptyRootHash { + // Empty state is requested as the target, nuke out + // the root node and leave all others as dangling. + rawdb.DeleteAccountTrieNode(batch, nil) + } else { + // Ensure the requested state is existent before any + // action is applied. + _, hash := rawdb.ReadAccountTrieNode(db.diskdb, nil) + if hash != root { + return fmt.Errorf("state is mismatched, local: %x, target: %x", hash, root) + } + } + // Mark the disk layer as stale before applying any mutation. + db.tree.bottom().markStale() + + // Drop the stale state journal in persistent database and + // reset the persistent state id back to zero. + rawdb.DeleteTrieJournal(batch) + rawdb.WritePersistentStateID(batch, 0) + if err := batch.Write(); err != nil { + return err + } + // Clean up all state histories in freezer. Theoretically + // all root->id mappings should be removed as well. Since + // mappings can be huge and might take a while to clear + // them, just leave them in disk and wait for overwriting. + if db.freezer != nil { + if err := db.freezer.Reset(); err != nil { + return err + } + } + // Re-construct a new disk layer backed by persistent state + // with **empty clean cache and node buffer**. + dl := newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0)) + db.tree.reset(dl) + log.Info("Rebuilt trie database", "root", root) + return nil +} + +// Recover rollbacks the database to a specified historical point. +// The state is supported as the rollback destination only if it's +// canonical state and the corresponding trie histories are existent. +func (db *Database) Recover(root common.Hash, loader triestate.TrieLoader) error { + db.lock.Lock() + defer db.lock.Unlock() + + // Short circuit if rollback operation is not supported. + if db.readOnly || db.freezer == nil { + return errors.New("state rollback is non-supported") + } + // Short circuit if the target state is not recoverable. + root = types.TrieRootHash(root) + if !db.Recoverable(root) { + return errStateUnrecoverable + } + // Apply the state histories upon the disk layer in order. + var ( + start = time.Now() + dl = db.tree.bottom() + ) + for dl.rootHash() != root { + h, err := readHistory(db.freezer, dl.stateID()) + if err != nil { + return err + } + dl, err = dl.revert(h, loader) + if err != nil { + return err + } + // reset layer with newly created disk layer. It must be + // done after each revert operation, otherwise the new + // disk layer won't be accessible from outside. + db.tree.reset(dl) + } + rawdb.DeleteTrieJournal(db.diskdb) + _, err := truncateFromHead(db.diskdb, db.freezer, dl.stateID()) + if err != nil { + return err + } + log.Debug("Recovered state", "root", root, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// Recoverable returns the indicator if the specified state is recoverable. +func (db *Database) Recoverable(root common.Hash) bool { + // Ensure the requested state is a known state. + root = types.TrieRootHash(root) + id := rawdb.ReadStateID(db.diskdb, root) + if id == nil { + return false + } + // Recoverable state must below the disk layer. The recoverable + // state only refers the state that is currently not available, + // but can be restored by applying state history. + dl := db.tree.bottom() + if *id >= dl.stateID() { + return false + } + // Ensure the requested state is a canonical state and all state + // histories in range [id+1, disklayer.ID] are present and complete. + parent := root + return checkHistories(db.freezer, *id+1, dl.stateID()-*id, func(m *meta) error { + if m.parent != parent { + return errors.New("unexpected state history") + } + if len(m.incomplete) > 0 { + return errors.New("incomplete state history") + } + parent = m.root + return nil + }) == nil +} + +// Close closes the trie database and the held freezer. +func (db *Database) Close() error { + db.lock.Lock() + defer db.lock.Unlock() + + // Set the database to read-only mode to prevent all + // following mutations. + db.readOnly = true + + // Release the memory held by clean cache. + db.tree.bottom().resetCache() + + // Close the attached state history freezer. + if db.freezer == nil { + return nil + } + return db.freezer.Close() +} + +// Size returns the current storage size of the memory cache in front of the +// persistent database layer. +func (db *Database) Size() (diffs common.StorageSize, nodes common.StorageSize) { + db.tree.forEach(func(layer layer) { + if diff, ok := layer.(*diffLayer); ok { + diffs += common.StorageSize(diff.memory) + } + if disk, ok := layer.(*diskLayer); ok { + nodes += disk.size() + } + }) + return diffs, nodes +} + +// Initialized returns an indicator if the state data is already +// initialized in path-based scheme. +func (db *Database) Initialized(genesisRoot common.Hash) bool { + var inited bool + db.tree.forEach(func(layer layer) { + if layer.rootHash() != types.EmptyRootHash { + inited = true + } + }) + return inited +} + +// SetBufferSize sets the node buffer size to the provided value(in bytes). +func (db *Database) SetBufferSize(size int) error { + db.lock.Lock() + defer db.lock.Unlock() + + if size > maxBufferSize { + log.Info("Capped node buffer size", "provided", common.StorageSize(size), "adjusted", common.StorageSize(maxBufferSize)) + size = maxBufferSize + } + db.bufferSize = size + return db.tree.bottom().setBufferSize(db.bufferSize) +} + +// Scheme returns the node scheme used in the database. +func (db *Database) Scheme() string { + return rawdb.PathScheme +} diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go new file mode 100644 index 000000000..6d346d20e --- /dev/null +++ b/trie/triedb/pathdb/database_test.go @@ -0,0 +1,574 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "bytes" + "errors" + "fmt" + "math/big" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie/testutil" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +func updateTrie(addrHash common.Hash, root common.Hash, dirties, cleans map[common.Hash][]byte) (common.Hash, *trienode.NodeSet) { + h, err := newTestHasher(addrHash, root, cleans) + if err != nil { + panic(fmt.Errorf("failed to create hasher, err: %w", err)) + } + for key, val := range dirties { + if len(val) == 0 { + h.Delete(key.Bytes()) + } else { + h.Update(key.Bytes(), val) + } + } + root, nodes, _ := h.Commit(false) + return root, nodes +} + +func generateAccount(storageRoot common.Hash) types.StateAccount { + return types.StateAccount{ + Nonce: uint64(rand.Intn(100)), + Balance: big.NewInt(rand.Int63()), + CodeHash: testutil.RandBytes(32), + Root: storageRoot, + } +} + +const ( + createAccountOp int = iota + modifyAccountOp + deleteAccountOp + opLen +) + +type genctx struct { + accounts map[common.Hash][]byte + storages map[common.Hash]map[common.Hash][]byte + accountOrigin map[common.Address][]byte + storageOrigin map[common.Address]map[common.Hash][]byte + nodes *trienode.MergedNodeSet +} + +func newCtx() *genctx { + return &genctx{ + accounts: make(map[common.Hash][]byte), + storages: make(map[common.Hash]map[common.Hash][]byte), + accountOrigin: make(map[common.Address][]byte), + storageOrigin: make(map[common.Address]map[common.Hash][]byte), + nodes: trienode.NewMergedNodeSet(), + } +} + +type tester struct { + db *Database + roots []common.Hash + preimages map[common.Hash]common.Address + accounts map[common.Hash][]byte + storages map[common.Hash]map[common.Hash][]byte + + // state snapshots + snapAccounts map[common.Hash]map[common.Hash][]byte + snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte +} + +func newTester(t *testing.T) *tester { + var ( + disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) + db = New(disk, &Config{CleanCacheSize: 256 * 1024, DirtyCacheSize: 256 * 1024}) + obj = &tester{ + db: db, + preimages: make(map[common.Hash]common.Address), + accounts: make(map[common.Hash][]byte), + storages: make(map[common.Hash]map[common.Hash][]byte), + snapAccounts: make(map[common.Hash]map[common.Hash][]byte), + snapStorages: make(map[common.Hash]map[common.Hash]map[common.Hash][]byte), + } + ) + for i := 0; i < 2*128; i++ { + var parent = types.EmptyRootHash + if len(obj.roots) != 0 { + parent = obj.roots[len(obj.roots)-1] + } + root, nodes, states := obj.generate(parent) + if err := db.Update(root, parent, uint64(i), nodes, states); err != nil { + panic(fmt.Errorf("failed to update state changes, err: %w", err)) + } + obj.roots = append(obj.roots, root) + } + return obj +} + +func (t *tester) release() { + t.db.Close() + t.db.diskdb.Close() +} + +func (t *tester) randAccount() (common.Address, []byte) { + for addrHash, account := range t.accounts { + return t.preimages[addrHash], account + } + return common.Address{}, nil +} + +func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash { + var ( + addrHash = crypto.Keccak256Hash(addr.Bytes()) + storage = make(map[common.Hash][]byte) + origin = make(map[common.Hash][]byte) + ) + for i := 0; i < 10; i++ { + v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32))) + hash := testutil.RandomHash() + + storage[hash] = v + origin[hash] = nil + } + root, set := updateTrie(addrHash, types.EmptyRootHash, storage, nil) + + ctx.storages[addrHash] = storage + ctx.storageOrigin[addr] = origin + ctx.nodes.Merge(set) + return root +} + +func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash { + var ( + addrHash = crypto.Keccak256Hash(addr.Bytes()) + storage = make(map[common.Hash][]byte) + origin = make(map[common.Hash][]byte) + ) + for hash, val := range t.storages[addrHash] { + origin[hash] = val + storage[hash] = nil + + if len(origin) == 3 { + break + } + } + for i := 0; i < 3; i++ { + v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32))) + hash := testutil.RandomHash() + + storage[hash] = v + origin[hash] = nil + } + root, set := updateTrie(crypto.Keccak256Hash(addr.Bytes()), root, storage, t.storages[addrHash]) + + ctx.storages[addrHash] = storage + ctx.storageOrigin[addr] = origin + ctx.nodes.Merge(set) + return root +} + +func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash { + var ( + addrHash = crypto.Keccak256Hash(addr.Bytes()) + storage = make(map[common.Hash][]byte) + origin = make(map[common.Hash][]byte) + ) + for hash, val := range t.storages[addrHash] { + origin[hash] = val + storage[hash] = nil + } + root, set := updateTrie(addrHash, root, storage, t.storages[addrHash]) + if root != types.EmptyRootHash { + panic("failed to clear storage trie") + } + ctx.storages[addrHash] = storage + ctx.storageOrigin[addr] = origin + ctx.nodes.Merge(set) + return root +} + +func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNodeSet, *triestate.Set) { + var ( + ctx = newCtx() + dirties = make(map[common.Hash]struct{}) + ) + for i := 0; i < 20; i++ { + switch rand.Intn(opLen) { + case createAccountOp: + // account creation + addr := testutil.RandomAddress() + addrHash := crypto.Keccak256Hash(addr.Bytes()) + if _, ok := t.accounts[addrHash]; ok { + continue + } + if _, ok := dirties[addrHash]; ok { + continue + } + dirties[addrHash] = struct{}{} + + root := t.generateStorage(ctx, addr) + ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root)) + ctx.accountOrigin[addr] = nil + t.preimages[addrHash] = addr + + case modifyAccountOp: + // account mutation + addr, account := t.randAccount() + if addr == (common.Address{}) { + continue + } + addrHash := crypto.Keccak256Hash(addr.Bytes()) + if _, ok := dirties[addrHash]; ok { + continue + } + dirties[addrHash] = struct{}{} + + acct, _ := types.FullAccount(account) + stRoot := t.mutateStorage(ctx, addr, acct.Root) + newAccount := types.SlimAccountRLP(generateAccount(stRoot)) + + ctx.accounts[addrHash] = newAccount + ctx.accountOrigin[addr] = account + + case deleteAccountOp: + // account deletion + addr, account := t.randAccount() + if addr == (common.Address{}) { + continue + } + addrHash := crypto.Keccak256Hash(addr.Bytes()) + if _, ok := dirties[addrHash]; ok { + continue + } + dirties[addrHash] = struct{}{} + + acct, _ := types.FullAccount(account) + if acct.Root != types.EmptyRootHash { + t.clearStorage(ctx, addr, acct.Root) + } + ctx.accounts[addrHash] = nil + ctx.accountOrigin[addr] = account + } + } + root, set := updateTrie(common.Hash{}, parent, ctx.accounts, t.accounts) + ctx.nodes.Merge(set) + + // Save state snapshot before commit + t.snapAccounts[parent] = copyAccounts(t.accounts) + t.snapStorages[parent] = copyStorages(t.storages) + + // Commit all changes to live state set + for addrHash, account := range ctx.accounts { + if len(account) == 0 { + delete(t.accounts, addrHash) + } else { + t.accounts[addrHash] = account + } + } + for addrHash, slots := range ctx.storages { + if _, ok := t.storages[addrHash]; !ok { + t.storages[addrHash] = make(map[common.Hash][]byte) + } + for sHash, slot := range slots { + if len(slot) == 0 { + delete(t.storages[addrHash], sHash) + } else { + t.storages[addrHash][sHash] = slot + } + } + } + return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin, nil) +} + +// lastRoot returns the latest root hash, or empty if nothing is cached. +func (t *tester) lastHash() common.Hash { + if len(t.roots) == 0 { + return common.Hash{} + } + return t.roots[len(t.roots)-1] +} + +func (t *tester) verifyState(root common.Hash) error { + reader, err := t.db.Reader(root) + if err != nil { + return err + } + _, err = reader.Node(common.Hash{}, nil, root) + if err != nil { + return errors.New("root node is not available") + } + for addrHash, account := range t.snapAccounts[root] { + blob, err := reader.Node(common.Hash{}, addrHash.Bytes(), crypto.Keccak256Hash(account)) + if err != nil || !bytes.Equal(blob, account) { + return fmt.Errorf("account is mismatched: %w", err) + } + } + for addrHash, slots := range t.snapStorages[root] { + for hash, slot := range slots { + blob, err := reader.Node(addrHash, hash.Bytes(), crypto.Keccak256Hash(slot)) + if err != nil || !bytes.Equal(blob, slot) { + return fmt.Errorf("slot is mismatched: %w", err) + } + } + } + return nil +} + +func (t *tester) verifyHistory() error { + bottom := t.bottomIndex() + for i, root := range t.roots { + // The state history related to the state above disk layer should not exist. + if i > bottom { + _, err := readHistory(t.db.freezer, uint64(i+1)) + if err == nil { + return errors.New("unexpected state history") + } + continue + } + // The state history related to the state below or equal to the disk layer + // should exist. + obj, err := readHistory(t.db.freezer, uint64(i+1)) + if err != nil { + return err + } + parent := types.EmptyRootHash + if i != 0 { + parent = t.roots[i-1] + } + if obj.meta.parent != parent { + return fmt.Errorf("unexpected parent, want: %x, got: %x", parent, obj.meta.parent) + } + if obj.meta.root != root { + return fmt.Errorf("unexpected root, want: %x, got: %x", root, obj.meta.root) + } + } + return nil +} + +// bottomIndex returns the index of current disk layer. +func (t *tester) bottomIndex() int { + bottom := t.db.tree.bottom() + for i := 0; i < len(t.roots); i++ { + if t.roots[i] == bottom.rootHash() { + return i + } + } + return -1 +} + +func TestDatabaseRollback(t *testing.T) { + // Verify state histories + tester := newTester(t) + defer tester.release() + + if err := tester.verifyHistory(); err != nil { + t.Fatalf("Invalid state history, err: %v", err) + } + // Revert database from top to bottom + for i := tester.bottomIndex(); i >= 0; i-- { + root := tester.roots[i] + parent := types.EmptyRootHash + if i > 0 { + parent = tester.roots[i-1] + } + loader := newHashLoader(tester.snapAccounts[root], tester.snapStorages[root]) + if err := tester.db.Recover(parent, loader); err != nil { + t.Fatalf("Failed to revert db, err: %v", err) + } + tester.verifyState(parent) + } + if tester.db.tree.len() != 1 { + t.Fatal("Only disk layer is expected") + } +} + +func TestDatabaseRecoverable(t *testing.T) { + var ( + tester = newTester(t) + index = tester.bottomIndex() + ) + defer tester.release() + + var cases = []struct { + root common.Hash + expect bool + }{ + // Unknown state should be unrecoverable + {common.Hash{0x1}, false}, + + // Initial state should be recoverable + {types.EmptyRootHash, true}, + + // Initial state should be recoverable + {common.Hash{}, true}, + + // Layers below current disk layer are recoverable + {tester.roots[index-1], true}, + + // Disklayer itself is not recoverable, since it's + // available for accessing. + {tester.roots[index], false}, + + // Layers above current disk layer are not recoverable + // since they are available for accessing. + {tester.roots[index+1], false}, + } + for i, c := range cases { + result := tester.db.Recoverable(c.root) + if result != c.expect { + t.Fatalf("case: %d, unexpected result, want %t, got %t", i, c.expect, result) + } + } +} + +func TestReset(t *testing.T) { + var ( + tester = newTester(t) + index = tester.bottomIndex() + ) + defer tester.release() + + // Reset database to unknown target, should reject it + if err := tester.db.Reset(testutil.RandomHash()); err == nil { + t.Fatal("Failed to reject invalid reset") + } + // Reset database to state persisted in the disk + if err := tester.db.Reset(types.EmptyRootHash); err != nil { + t.Fatalf("Failed to reset database %v", err) + } + // Ensure journal is deleted from disk + if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 { + t.Fatal("Failed to clean journal") + } + // Ensure all trie histories are removed + for i := 0; i <= index; i++ { + _, err := readHistory(tester.db.freezer, uint64(i+1)) + if err == nil { + t.Fatalf("Failed to clean state history, index %d", i+1) + } + } + // Verify layer tree structure, single disk layer is expected + if tester.db.tree.len() != 1 { + t.Fatalf("Extra layer kept %d", tester.db.tree.len()) + } + if tester.db.tree.bottom().rootHash() != types.EmptyRootHash { + t.Fatalf("Root hash is not matched exp %x got %x", types.EmptyRootHash, tester.db.tree.bottom().rootHash()) + } +} + +func TestCommit(t *testing.T) { + tester := newTester(t) + defer tester.release() + + if err := tester.db.Commit(tester.lastHash(), false); err != nil { + t.Fatalf("Failed to cap database, err: %v", err) + } + // Verify layer tree structure, single disk layer is expected + if tester.db.tree.len() != 1 { + t.Fatal("Layer tree structure is invalid") + } + if tester.db.tree.bottom().rootHash() != tester.lastHash() { + t.Fatal("Layer tree structure is invalid") + } + // Verify states + if err := tester.verifyState(tester.lastHash()); err != nil { + t.Fatalf("State is invalid, err: %v", err) + } + // Verify state histories + if err := tester.verifyHistory(); err != nil { + t.Fatalf("State history is invalid, err: %v", err) + } +} + +func TestJournal(t *testing.T) { + tester := newTester(t) + defer tester.release() + + if err := tester.db.Journal(tester.lastHash()); err != nil { + t.Errorf("Failed to journal, err: %v", err) + } + tester.db.Close() + tester.db = New(tester.db.diskdb, nil) + + // Verify states including disk layer and all diff on top. + for i := 0; i < len(tester.roots); i++ { + if i >= tester.bottomIndex() { + if err := tester.verifyState(tester.roots[i]); err != nil { + t.Fatalf("Invalid state, err: %v", err) + } + continue + } + if err := tester.verifyState(tester.roots[i]); err == nil { + t.Fatal("Unexpected state") + } + } +} + +func TestCorruptedJournal(t *testing.T) { + tester := newTester(t) + defer tester.release() + + if err := tester.db.Journal(tester.lastHash()); err != nil { + t.Errorf("Failed to journal, err: %v", err) + } + tester.db.Close() + _, root := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) + + // Mutate the journal in disk, it should be regarded as invalid + blob := rawdb.ReadTrieJournal(tester.db.diskdb) + blob[0] = 1 + rawdb.WriteTrieJournal(tester.db.diskdb, blob) + + // Verify states, all not-yet-written states should be discarded + tester.db = New(tester.db.diskdb, nil) + for i := 0; i < len(tester.roots); i++ { + if tester.roots[i] == root { + if err := tester.verifyState(root); err != nil { + t.Fatalf("Disk state is corrupted, err: %v", err) + } + continue + } + if err := tester.verifyState(tester.roots[i]); err == nil { + t.Fatal("Unexpected state") + } + } +} + +// copyAccounts returns a deep-copied account set of the provided one. +func copyAccounts(set map[common.Hash][]byte) map[common.Hash][]byte { + copied := make(map[common.Hash][]byte, len(set)) + for key, val := range set { + copied[key] = common.CopyBytes(val) + } + return copied +} + +// copyStorages returns a deep-copied storage set of the provided one. +func copyStorages(set map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte { + copied := make(map[common.Hash]map[common.Hash][]byte, len(set)) + for addrHash, subset := range set { + copied[addrHash] = make(map[common.Hash][]byte, len(subset)) + for key, val := range subset { + copied[addrHash][key] = common.CopyBytes(val) + } + } + return copied +} diff --git a/trie/triedb/pathdb/difflayer.go b/trie/triedb/pathdb/difflayer.go new file mode 100644 index 000000000..d25ac1c60 --- /dev/null +++ b/trie/triedb/pathdb/difflayer.go @@ -0,0 +1,174 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +// diffLayer represents a collection of modifications made to the in-memory tries +// along with associated state changes after running a block on top. +// +// The goal of a diff layer is to act as a journal, tracking recent modifications +// made to the state, that have not yet graduated into a semi-immutable state. +type diffLayer struct { + // Immutables + root common.Hash // Root hash to which this layer diff belongs to + id uint64 // Corresponding state id + block uint64 // Associated block number + nodes map[common.Hash]map[string]*trienode.Node // Cached trie nodes indexed by owner and path + states *triestate.Set // Associated state change set for building history + memory uint64 // Approximate guess as to how much memory we use + + parent layer // Parent layer modified by this one, never nil, **can be changed** + lock sync.RWMutex // Lock used to protect parent +} + +// newDiffLayer creates a new diff layer on top of an existing layer. +func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer { + var ( + size int64 + count int + ) + dl := &diffLayer{ + root: root, + id: id, + block: block, + nodes: nodes, + states: states, + parent: parent, + } + for _, subset := range nodes { + for path, n := range subset { + dl.memory += uint64(n.Size() + len(path)) + size += int64(len(n.Blob) + len(path)) + } + count += len(subset) + } + if states != nil { + dl.memory += uint64(states.Size()) + } + dirtyWriteMeter.Mark(size) + diffLayerNodesMeter.Mark(int64(count)) + diffLayerBytesMeter.Mark(int64(dl.memory)) + log.Debug("Created new diff layer", "id", id, "block", block, "nodes", count, "size", common.StorageSize(dl.memory)) + return dl +} + +// rootHash implements the layer interface, returning the root hash of +// corresponding state. +func (dl *diffLayer) rootHash() common.Hash { + return dl.root +} + +// stateID implements the layer interface, returning the state id of the layer. +func (dl *diffLayer) stateID() uint64 { + return dl.id +} + +// parentLayer implements the layer interface, returning the subsequent +// layer of the diff layer. +func (dl *diffLayer) parentLayer() layer { + dl.lock.RLock() + defer dl.lock.RUnlock() + + return dl.parent +} + +// node retrieves the node with provided node information. It's the internal +// version of Node function with additional accessed layer tracked. No error +// will be returned if node is not found. +func (dl *diffLayer) node(owner common.Hash, path []byte, hash common.Hash, depth int) ([]byte, error) { + // Hold the lock, ensure the parent won't be changed during the + // state accessing. + dl.lock.RLock() + defer dl.lock.RUnlock() + + // If the trie node is known locally, return it + subset, ok := dl.nodes[owner] + if ok { + n, ok := subset[string(path)] + if ok { + // If the trie node is not hash matched, or marked as removed, + // bubble up an error here. It shouldn't happen at all. + if n.Hash != hash { + dirtyFalseMeter.Mark(1) + log.Error("Unexpected trie node in diff layer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) + return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path) + } + dirtyHitMeter.Mark(1) + dirtyNodeHitDepthHist.Update(int64(depth)) + dirtyReadMeter.Mark(int64(len(n.Blob))) + return n.Blob, nil + } + } + // Trie node unknown to this layer, resolve from parent + if diff, ok := dl.parent.(*diffLayer); ok { + return diff.node(owner, path, hash, depth+1) + } + // Failed to resolve through diff layers, fallback to disk layer + return dl.parent.Node(owner, path, hash) +} + +// Node implements the layer interface, retrieving the trie node blob with the +// provided node information. No error will be returned if the node is not found. +func (dl *diffLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) { + return dl.node(owner, path, hash, 0) +} + +// update implements the layer interface, creating a new layer on top of the +// existing layer tree with the specified data items. +func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer { + return newDiffLayer(dl, root, id, block, nodes, states) +} + +// persist flushes the diff layer and all its parent layers to disk layer. +func (dl *diffLayer) persist(force bool) (layer, error) { + if parent, ok := dl.parentLayer().(*diffLayer); ok { + // Hold the lock to prevent any read operation until the new + // parent is linked correctly. + dl.lock.Lock() + + // The merging of diff layers starts at the bottom-most layer, + // therefore we recurse down here, flattening on the way up + // (diffToDisk). + result, err := parent.persist(force) + if err != nil { + dl.lock.Unlock() + return nil, err + } + dl.parent = result + dl.lock.Unlock() + } + return diffToDisk(dl, force) +} + +// diffToDisk merges a bottom-most diff into the persistent disk layer underneath +// it. The method will panic if called onto a non-bottom-most diff layer. +func diffToDisk(layer *diffLayer, force bool) (layer, error) { + disk, ok := layer.parentLayer().(*diskLayer) + if !ok { + panic(fmt.Sprintf("unknown layer type: %T", layer.parentLayer())) + } + return disk.commit(layer, force) +} diff --git a/trie/triedb/pathdb/difflayer_test.go b/trie/triedb/pathdb/difflayer_test.go new file mode 100644 index 000000000..9b5907c3c --- /dev/null +++ b/trie/triedb/pathdb/difflayer_test.go @@ -0,0 +1,170 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/trie/testutil" + "github.com/ethereum/go-ethereum/trie/trienode" +) + +func emptyLayer() *diskLayer { + return &diskLayer{ + db: New(rawdb.NewMemoryDatabase(), nil), + buffer: newNodeBuffer(DefaultBufferSize, nil, 0), + } +} + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkSearch128Layers +// BenchmarkSearch128Layers-8 243826 4755 ns/op +func BenchmarkSearch128Layers(b *testing.B) { benchmarkSearch(b, 0, 128) } + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkSearch512Layers +// BenchmarkSearch512Layers-8 49686 24256 ns/op +func BenchmarkSearch512Layers(b *testing.B) { benchmarkSearch(b, 0, 512) } + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkSearch1Layer +// BenchmarkSearch1Layer-8 14062725 88.40 ns/op +func BenchmarkSearch1Layer(b *testing.B) { benchmarkSearch(b, 127, 128) } + +func benchmarkSearch(b *testing.B, depth int, total int) { + var ( + npath []byte + nhash common.Hash + nblob []byte + ) + // First, we set up 128 diff layers, with 3K items each + fill := func(parent layer, index int) *diffLayer { + nodes := make(map[common.Hash]map[string]*trienode.Node) + nodes[common.Hash{}] = make(map[string]*trienode.Node) + for i := 0; i < 3000; i++ { + var ( + path = testutil.RandBytes(32) + node = testutil.RandomNode() + ) + nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + if npath == nil && depth == index { + npath = common.CopyBytes(path) + nblob = common.CopyBytes(node.Blob) + nhash = node.Hash + } + } + return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) + } + var layer layer + layer = emptyLayer() + for i := 0; i < total; i++ { + layer = fill(layer, i) + } + b.ResetTimer() + + var ( + have []byte + err error + ) + for i := 0; i < b.N; i++ { + have, err = layer.Node(common.Hash{}, npath, nhash) + if err != nil { + b.Fatal(err) + } + } + if !bytes.Equal(have, nblob) { + b.Fatalf("have %x want %x", have, nblob) + } +} + +// goos: darwin +// goarch: arm64 +// pkg: github.com/ethereum/go-ethereum/trie +// BenchmarkPersist +// BenchmarkPersist-8 10 111252975 ns/op +func BenchmarkPersist(b *testing.B) { + // First, we set up 128 diff layers, with 3K items each + fill := func(parent layer) *diffLayer { + nodes := make(map[common.Hash]map[string]*trienode.Node) + nodes[common.Hash{}] = make(map[string]*trienode.Node) + for i := 0; i < 3000; i++ { + var ( + path = testutil.RandBytes(32) + node = testutil.RandomNode() + ) + nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + } + return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) + } + for i := 0; i < b.N; i++ { + b.StopTimer() + var layer layer + layer = emptyLayer() + for i := 1; i < 128; i++ { + layer = fill(layer) + } + b.StartTimer() + + dl, ok := layer.(*diffLayer) + if !ok { + break + } + dl.persist(false) + } +} + +// BenchmarkJournal benchmarks the performance for journaling the layers. +// +// BenchmarkJournal +// BenchmarkJournal-8 10 110969279 ns/op +func BenchmarkJournal(b *testing.B) { + b.SkipNow() + + // First, we set up 128 diff layers, with 3K items each + fill := func(parent layer) *diffLayer { + nodes := make(map[common.Hash]map[string]*trienode.Node) + nodes[common.Hash{}] = make(map[string]*trienode.Node) + for i := 0; i < 3000; i++ { + var ( + path = testutil.RandBytes(32) + node = testutil.RandomNode() + ) + nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) + } + // TODO(rjl493456442) a non-nil state set is expected. + return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) + } + var layer layer + layer = emptyLayer() + for i := 0; i < 128; i++ { + layer = fill(layer) + } + b.ResetTimer() + + for i := 0; i < b.N; i++ { + layer.journal(new(bytes.Buffer)) + } +} diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go new file mode 100644 index 000000000..87718290f --- /dev/null +++ b/trie/triedb/pathdb/disklayer.go @@ -0,0 +1,310 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "errors" + "fmt" + "sync" + + "github.com/VictoriaMetrics/fastcache" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" + "golang.org/x/crypto/sha3" +) + +// diskLayer is a low level persistent layer built on top of a key-value store. +type diskLayer struct { + root common.Hash // Immutable, root hash to which this layer was made for + id uint64 // Immutable, corresponding state id + db *Database // Path-based trie database + cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs + buffer *nodebuffer // Node buffer to aggregate writes + stale bool // Signals that the layer became stale (state progressed) + lock sync.RWMutex // Lock used to protect stale flag +} + +// newDiskLayer creates a new disk layer based on the passing arguments. +func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.Cache, buffer *nodebuffer) *diskLayer { + // Initialize a clean cache if the memory allowance is not zero + // or reuse the provided cache if it is not nil (inherited from + // the original disk layer). + if cleans == nil && db.config.CleanCacheSize != 0 { + cleans = fastcache.New(db.config.CleanCacheSize) + } + return &diskLayer{ + root: root, + id: id, + db: db, + cleans: cleans, + buffer: buffer, + } +} + +// root implements the layer interface, returning root hash of corresponding state. +func (dl *diskLayer) rootHash() common.Hash { + return dl.root +} + +// stateID implements the layer interface, returning the state id of disk layer. +func (dl *diskLayer) stateID() uint64 { + return dl.id +} + +// parent implements the layer interface, returning nil as there's no layer +// below the disk. +func (dl *diskLayer) parentLayer() layer { + return nil +} + +// isStale return whether this layer has become stale (was flattened across) or if +// it's still live. +func (dl *diskLayer) isStale() bool { + dl.lock.RLock() + defer dl.lock.RUnlock() + + return dl.stale +} + +// markStale sets the stale flag as true. +func (dl *diskLayer) markStale() { + dl.lock.Lock() + defer dl.lock.Unlock() + + if dl.stale { + panic("triedb disk layer is stale") // we've committed into the same base from two children, boom + } + dl.stale = true +} + +// Node implements the layer interface, retrieving the trie node with the +// provided node info. No error will be returned if the node is not found. +func (dl *diskLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) { + dl.lock.RLock() + defer dl.lock.RUnlock() + + if dl.stale { + return nil, errSnapshotStale + } + // Try to retrieve the trie node from the not-yet-written + // node buffer first. Note the buffer is lock free since + // it's impossible to mutate the buffer before tagging the + // layer as stale. + n, err := dl.buffer.node(owner, path, hash) + if err != nil { + return nil, err + } + if n != nil { + dirtyHitMeter.Mark(1) + dirtyReadMeter.Mark(int64(len(n.Blob))) + return n.Blob, nil + } + dirtyMissMeter.Mark(1) + + // Try to retrieve the trie node from the clean memory cache + key := cacheKey(owner, path) + if dl.cleans != nil { + if blob := dl.cleans.Get(nil, key); len(blob) > 0 { + h := newHasher() + defer h.release() + + got := h.hash(blob) + if got == hash { + cleanHitMeter.Mark(1) + cleanReadMeter.Mark(int64(len(blob))) + return blob, nil + } + cleanFalseMeter.Mark(1) + log.Error("Unexpected trie node in clean cache", "owner", owner, "path", path, "expect", hash, "got", got) + } + cleanMissMeter.Mark(1) + } + // Try to retrieve the trie node from the disk. + var ( + nBlob []byte + nHash common.Hash + ) + if owner == (common.Hash{}) { + nBlob, nHash = rawdb.ReadAccountTrieNode(dl.db.diskdb, path) + } else { + nBlob, nHash = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path) + } + if nHash != hash { + diskFalseMeter.Mark(1) + log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash) + return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path) + } + if dl.cleans != nil && len(nBlob) > 0 { + dl.cleans.Set(key, nBlob) + cleanWriteMeter.Mark(int64(len(nBlob))) + } + return nBlob, nil +} + +// update implements the layer interface, returning a new diff layer on top +// with the given state set. +func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer { + return newDiffLayer(dl, root, id, block, nodes, states) +} + +// commit merges the given bottom-most diff layer into the node buffer +// and returns a newly constructed disk layer. Note the current disk +// layer must be tagged as stale first to prevent re-access. +func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) { + dl.lock.Lock() + defer dl.lock.Unlock() + + // Construct and store the state history first. If crash happens + // after storing the state history but without flushing the + // corresponding states(journal), the stored state history will + // be truncated in the next restart. + if dl.db.freezer != nil { + err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateHistory) + if err != nil { + return nil, err + } + } + // Mark the diskLayer as stale before applying any mutations on top. + dl.stale = true + + // Store the root->id lookup afterwards. All stored lookups are + // identified by the **unique** state root. It's impossible that + // in the same chain blocks are not adjacent but have the same + // root. + if dl.id == 0 { + rawdb.WriteStateID(dl.db.diskdb, dl.root, 0) + } + rawdb.WriteStateID(dl.db.diskdb, bottom.rootHash(), bottom.stateID()) + + // Construct a new disk layer by merging the nodes from the provided + // diff layer, and flush the content in disk layer if there are too + // many nodes cached. The clean cache is inherited from the original + // disk layer for reusing. + ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.cleans, dl.buffer.commit(bottom.nodes)) + err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force) + if err != nil { + return nil, err + } + return ndl, nil +} + +// revert applies the given state history and return a reverted disk layer. +func (dl *diskLayer) revert(h *history, loader triestate.TrieLoader) (*diskLayer, error) { + if h.meta.root != dl.rootHash() { + return nil, errUnexpectedHistory + } + // Reject if the provided state history is incomplete. It's due to + // a large construct SELF-DESTRUCT which can't be handled because + // of memory limitation. + if len(h.meta.incomplete) > 0 { + return nil, errors.New("incomplete state history") + } + if dl.id == 0 { + return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable) + } + // Apply the reverse state changes upon the current state. This must + // be done before holding the lock in order to access state in "this" + // layer. + nodes, err := triestate.Apply(h.meta.parent, h.meta.root, h.accounts, h.storages, loader) + if err != nil { + return nil, err + } + // Mark the diskLayer as stale before applying any mutations on top. + dl.lock.Lock() + defer dl.lock.Unlock() + + dl.stale = true + + // State change may be applied to node buffer, or the persistent + // state, depends on if node buffer is empty or not. If the node + // buffer is not empty, it means that the state transition that + // needs to be reverted is not yet flushed and cached in node + // buffer, otherwise, manipulate persistent state directly. + if !dl.buffer.empty() { + err := dl.buffer.revert(dl.db.diskdb, nodes) + if err != nil { + return nil, err + } + } else { + batch := dl.db.diskdb.NewBatch() + writeNodes(batch, nodes, dl.cleans) + rawdb.WritePersistentStateID(batch, dl.id-1) + if err := batch.Write(); err != nil { + log.Crit("Failed to write states", "err", err) + } + } + return newDiskLayer(h.meta.parent, dl.id-1, dl.db, dl.cleans, dl.buffer), nil +} + +// setBufferSize sets the node buffer size to the provided value. +func (dl *diskLayer) setBufferSize(size int) error { + dl.lock.RLock() + defer dl.lock.RUnlock() + + if dl.stale { + return errSnapshotStale + } + return dl.buffer.setSize(size, dl.db.diskdb, dl.cleans, dl.id) +} + +// size returns the approximate size of cached nodes in the disk layer. +func (dl *diskLayer) size() common.StorageSize { + dl.lock.RLock() + defer dl.lock.RUnlock() + + if dl.stale { + return 0 + } + return common.StorageSize(dl.buffer.size) +} + +// resetCache releases the memory held by clean cache to prevent memory leak. +func (dl *diskLayer) resetCache() { + dl.lock.RLock() + defer dl.lock.RUnlock() + + // Stale disk layer loses the ownership of clean cache. + if dl.stale { + return + } + if dl.cleans != nil { + dl.cleans.Reset() + } +} + +// hasher is used to compute the sha256 hash of the provided data. +type hasher struct{ sha crypto.KeccakState } + +var hasherPool = sync.Pool{ + New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, +} + +func newHasher() *hasher { + return hasherPool.Get().(*hasher) +} + +func (h *hasher) hash(data []byte) common.Hash { + return crypto.HashData(h.sha, data) +} + +func (h *hasher) release() { + hasherPool.Put(h) +} diff --git a/trie/triedb/pathdb/errors.go b/trie/triedb/pathdb/errors.go new file mode 100644 index 000000000..f503a9c49 --- /dev/null +++ b/trie/triedb/pathdb/errors.go @@ -0,0 +1,51 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package pathdb + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" +) + +var ( + // errSnapshotReadOnly is returned if the database is opened in read only mode + // and mutation is requested. + errSnapshotReadOnly = errors.New("read only") + + // errSnapshotStale is returned from data accessors if the underlying layer + // layer had been invalidated due to the chain progressing forward far enough + // to not maintain the layer's original state. + errSnapshotStale = errors.New("layer stale") + + // errUnexpectedHistory is returned if an unmatched state history is applied + // to the database for state rollback. + errUnexpectedHistory = errors.New("unexpected state history") + + // errStateUnrecoverable is returned if state is required to be reverted to + // a destination without associated state history available. + errStateUnrecoverable = errors.New("state is unrecoverable") + + // errUnexpectedNode is returned if the requested node with specified path is + // not hash matched with expectation. + errUnexpectedNode = errors.New("unexpected node") +) + +func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte) error { + return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x", errUnexpectedNode, loc, owner, path, expHash, gotHash) +} diff --git a/trie/triedb/pathdb/history.go b/trie/triedb/pathdb/history.go new file mode 100644 index 000000000..ce8253250 --- /dev/null +++ b/trie/triedb/pathdb/history.go @@ -0,0 +1,641 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package pathdb + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie/triestate" + "golang.org/x/exp/slices" +) + +// State history records the state changes involved in executing a block. The +// state can be reverted to the previous version by applying the associated +// history object (state reverse diff). State history objects are kept to +// guarantee that the system can perform state rollbacks in case of deep reorg. +// +// Each state transition will generate a state history object. Note that not +// every block has a corresponding state history object. If a block performs +// no state changes whatsoever, no state is created for it. Each state history +// will have a sequentially increasing number acting as its unique identifier. +// +// The state history is written to disk (ancient store) when the corresponding +// diff layer is merged into the disk layer. At the same time, system can prune +// the oldest histories according to config. +// +// Disk State +// ^ +// | +// +------------+ +---------+ +---------+ +---------+ +// | Init State |---->| State 1 |---->| ... |---->| State n | +// +------------+ +---------+ +---------+ +---------+ +// +// +-----------+ +------+ +-----------+ +// | History 1 |----> | ... |---->| History n | +// +-----------+ +------+ +-----------+ +// +// # Rollback +// +// If the system wants to roll back to a previous state n, it needs to ensure +// all history objects from n+1 up to the current disk layer are existent. The +// history objects are applied to the state in reverse order, starting from the +// current disk layer. + +const ( + accountIndexSize = common.AddressLength + 13 // The length of encoded account index + slotIndexSize = common.HashLength + 5 // The length of encoded slot index + historyMetaSize = 9 + 2*common.HashLength // The length of fixed size part of meta object + + stateHistoryVersion = uint8(0) // initial version of state history structure. +) + +// Each state history entry is consisted of five elements: +// +// # metadata +// This object contains a few meta fields, such as the associated state root, +// block number, version tag and so on. This object may contain an extra +// accountHash list which means the storage changes belong to these accounts +// are not complete due to large contract destruction. The incomplete history +// can not be used for rollback and serving archive state request. +// +// # account index +// This object contains some index information of account. For example, offset +// and length indicate the location of the data belonging to the account. Besides, +// storageOffset and storageSlots indicate the storage modification location +// belonging to the account. +// +// The size of each account index is *fixed*, and all indexes are sorted +// lexicographically. Thus binary search can be performed to quickly locate a +// specific account. +// +// # account data +// Account data is a concatenated byte stream composed of all account data. +// The account data can be solved by the offset and length info indicated +// by corresponding account index. +// +// fixed size +// ^ ^ +// / \ +// +-----------------+-----------------+----------------+-----------------+ +// | Account index 1 | Account index 2 | ... | Account index N | +// +-----------------+-----------------+----------------+-----------------+ +// | +// | length +// offset |----------------+ +// v v +// +----------------+----------------+----------------+----------------+ +// | Account data 1 | Account data 2 | ... | Account data N | +// +----------------+----------------+----------------+----------------+ +// +// # storage index +// This object is similar with account index. It's also fixed size and contains +// the location info of storage slot data. +// +// # storage data +// Storage data is a concatenated byte stream composed of all storage slot data. +// The storage slot data can be solved by the location info indicated by +// corresponding account index and storage slot index. +// +// fixed size +// ^ ^ +// / \ +// +-----------------+-----------------+----------------+-----------------+ +// | Account index 1 | Account index 2 | ... | Account index N | +// +-----------------+-----------------+----------------+-----------------+ +// | +// | storage slots +// storage offset |-----------------------------------------------------+ +// v v +// +-----------------+-----------------+-----------------+ +// | storage index 1 | storage index 2 | storage index 3 | +// +-----------------+-----------------+-----------------+ +// | length +// offset |-------------+ +// v v +// +-------------+ +// | slot data 1 | +// +-------------+ + +// accountIndex describes the metadata belonging to an account. +type accountIndex struct { + address common.Address // The address of account + length uint8 // The length of account data, size limited by 255 + offset uint32 // The offset of item in account data table + storageOffset uint32 // The offset of storage index in storage index table + storageSlots uint32 // The number of mutated storage slots belonging to the account +} + +// encode packs account index into byte stream. +func (i *accountIndex) encode() []byte { + var buf [accountIndexSize]byte + copy(buf[:], i.address.Bytes()) + buf[common.AddressLength] = i.length + binary.BigEndian.PutUint32(buf[common.AddressLength+1:], i.offset) + binary.BigEndian.PutUint32(buf[common.AddressLength+5:], i.storageOffset) + binary.BigEndian.PutUint32(buf[common.AddressLength+9:], i.storageSlots) + return buf[:] +} + +// decode unpacks account index from byte stream. +func (i *accountIndex) decode(blob []byte) { + i.address = common.BytesToAddress(blob[:common.AddressLength]) + i.length = blob[common.AddressLength] + i.offset = binary.BigEndian.Uint32(blob[common.AddressLength+1:]) + i.storageOffset = binary.BigEndian.Uint32(blob[common.AddressLength+5:]) + i.storageSlots = binary.BigEndian.Uint32(blob[common.AddressLength+9:]) +} + +// slotIndex describes the metadata belonging to a storage slot. +type slotIndex struct { + hash common.Hash // The hash of slot key + length uint8 // The length of storage slot, up to 32 bytes defined in protocol + offset uint32 // The offset of item in storage slot data table +} + +// encode packs slot index into byte stream. +func (i *slotIndex) encode() []byte { + var buf [slotIndexSize]byte + copy(buf[:common.HashLength], i.hash.Bytes()) + buf[common.HashLength] = i.length + binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset) + return buf[:] +} + +// decode unpack slot index from the byte stream. +func (i *slotIndex) decode(blob []byte) { + i.hash = common.BytesToHash(blob[:common.HashLength]) + i.length = blob[common.HashLength] + i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:]) +} + +// meta describes the meta data of state history object. +type meta struct { + version uint8 // version tag of history object + parent common.Hash // prev-state root before the state transition + root common.Hash // post-state root after the state transition + block uint64 // associated block number + incomplete []common.Address // list of address whose storage set is incomplete +} + +// encode packs the meta object into byte stream. +func (m *meta) encode() []byte { + buf := make([]byte, historyMetaSize+len(m.incomplete)*common.AddressLength) + buf[0] = m.version + copy(buf[1:1+common.HashLength], m.parent.Bytes()) + copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes()) + binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block) + for i, h := range m.incomplete { + copy(buf[i*common.AddressLength+historyMetaSize:], h.Bytes()) + } + return buf[:] +} + +// decode unpacks the meta object from byte stream. +func (m *meta) decode(blob []byte) error { + if len(blob) < 1 { + return fmt.Errorf("no version tag") + } + switch blob[0] { + case stateHistoryVersion: + if len(blob) < historyMetaSize { + return fmt.Errorf("invalid state history meta, len: %d", len(blob)) + } + if (len(blob)-historyMetaSize)%common.AddressLength != 0 { + return fmt.Errorf("corrupted state history meta, len: %d", len(blob)) + } + m.version = blob[0] + m.parent = common.BytesToHash(blob[1 : 1+common.HashLength]) + m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength]) + m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize]) + for pos := historyMetaSize; pos < len(blob); { + m.incomplete = append(m.incomplete, common.BytesToAddress(blob[pos:pos+common.AddressLength])) + pos += common.AddressLength + } + return nil + default: + return fmt.Errorf("unknown version %d", blob[0]) + } +} + +// history represents a set of state changes belong to a block along with +// the metadata including the state roots involved in the state transition. +// State history objects in disk are linked with each other by a unique id +// (8-bytes integer), the oldest state history object can be pruned on demand +// in order to control the storage size. +type history struct { + meta *meta // Meta data of history + accounts map[common.Address][]byte // Account data keyed by its address hash + accountList []common.Address // Sorted account hash list + storages map[common.Address]map[common.Hash][]byte // Storage data keyed by its address hash and slot hash + storageList map[common.Address][]common.Hash // Sorted slot hash list +} + +// newHistory constructs the state history object with provided state change set. +func newHistory(root common.Hash, parent common.Hash, block uint64, states *triestate.Set) *history { + var ( + accountList []common.Address + storageList = make(map[common.Address][]common.Hash) + incomplete []common.Address + ) + for addr := range states.Accounts { + accountList = append(accountList, addr) + } + slices.SortFunc(accountList, common.Address.Cmp) + + for addr, slots := range states.Storages { + slist := make([]common.Hash, 0, len(slots)) + for slotHash := range slots { + slist = append(slist, slotHash) + } + slices.SortFunc(slist, common.Hash.Cmp) + storageList[addr] = slist + } + for addr := range states.Incomplete { + incomplete = append(incomplete, addr) + } + slices.SortFunc(incomplete, common.Address.Cmp) + + return &history{ + meta: &meta{ + version: stateHistoryVersion, + parent: parent, + root: root, + block: block, + incomplete: incomplete, + }, + accounts: states.Accounts, + accountList: accountList, + storages: states.Storages, + storageList: storageList, + } +} + +// encode serializes the state history and returns four byte streams represent +// concatenated account/storage data, account/storage indexes respectively. +func (h *history) encode() ([]byte, []byte, []byte, []byte) { + var ( + slotNumber uint32 // the number of processed slots + accountData []byte // the buffer for concatenated account data + storageData []byte // the buffer for concatenated storage data + accountIndexes []byte // the buffer for concatenated account index + storageIndexes []byte // the buffer for concatenated storage index + ) + for _, addr := range h.accountList { + accIndex := accountIndex{ + address: addr, + length: uint8(len(h.accounts[addr])), + offset: uint32(len(accountData)), + } + slots, exist := h.storages[addr] + if exist { + // Encode storage slots in order + for _, slotHash := range h.storageList[addr] { + sIndex := slotIndex{ + hash: slotHash, + length: uint8(len(slots[slotHash])), + offset: uint32(len(storageData)), + } + storageData = append(storageData, slots[slotHash]...) + storageIndexes = append(storageIndexes, sIndex.encode()...) + } + // Fill up the storage meta in account index + accIndex.storageOffset = slotNumber + accIndex.storageSlots = uint32(len(slots)) + slotNumber += uint32(len(slots)) + } + accountData = append(accountData, h.accounts[addr]...) + accountIndexes = append(accountIndexes, accIndex.encode()...) + } + return accountData, storageData, accountIndexes, storageIndexes +} + +// decoder wraps the byte streams for decoding with extra meta fields. +type decoder struct { + accountData []byte // the buffer for concatenated account data + storageData []byte // the buffer for concatenated storage data + accountIndexes []byte // the buffer for concatenated account index + storageIndexes []byte // the buffer for concatenated storage index + + lastAccount *common.Address // the address of last resolved account + lastAccountRead uint32 // the read-cursor position of account data + lastSlotIndexRead uint32 // the read-cursor position of storage slot index + lastSlotDataRead uint32 // the read-cursor position of storage slot data +} + +// verify validates the provided byte streams for decoding state history. A few +// checks will be performed to quickly detect data corruption. The byte stream +// is regarded as corrupted if: +// +// - account indexes buffer is empty(empty state set is invalid) +// - account indexes/storage indexer buffer is not aligned +// +// note, these situations are allowed: +// +// - empty account data: all accounts were not present +// - empty storage set: no slots are modified +func (r *decoder) verify() error { + if len(r.accountIndexes)%accountIndexSize != 0 || len(r.accountIndexes) == 0 { + return fmt.Errorf("invalid account index, len: %d", len(r.accountIndexes)) + } + if len(r.storageIndexes)%slotIndexSize != 0 { + return fmt.Errorf("invalid storage index, len: %d", len(r.storageIndexes)) + } + return nil +} + +// readAccount parses the account from the byte stream with specified position. +func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) { + // Decode account index from the index byte stream. + var index accountIndex + if (pos+1)*accountIndexSize > len(r.accountIndexes) { + return accountIndex{}, nil, errors.New("account data buffer is corrupted") + } + index.decode(r.accountIndexes[pos*accountIndexSize : (pos+1)*accountIndexSize]) + + // Perform validation before parsing account data, ensure + // - account is sorted in order in byte stream + // - account data is strictly encoded with no gap inside + // - account data is not out-of-slice + if r.lastAccount != nil { // zero address is possible + if bytes.Compare(r.lastAccount.Bytes(), index.address.Bytes()) >= 0 { + return accountIndex{}, nil, errors.New("account is not in order") + } + } + if index.offset != r.lastAccountRead { + return accountIndex{}, nil, errors.New("account data buffer is gaped") + } + last := index.offset + uint32(index.length) + if uint32(len(r.accountData)) < last { + return accountIndex{}, nil, errors.New("account data buffer is corrupted") + } + data := r.accountData[index.offset:last] + + r.lastAccount = &index.address + r.lastAccountRead = last + + return index, data, nil +} + +// readStorage parses the storage slots from the byte stream with specified account. +func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) { + var ( + last common.Hash + list []common.Hash + storage = make(map[common.Hash][]byte) + ) + for j := 0; j < int(accIndex.storageSlots); j++ { + var ( + index slotIndex + start = (accIndex.storageOffset + uint32(j)) * uint32(slotIndexSize) + end = (accIndex.storageOffset + uint32(j+1)) * uint32(slotIndexSize) + ) + // Perform validation before parsing storage slot data, ensure + // - slot index is not out-of-slice + // - slot data is not out-of-slice + // - slot is sorted in order in byte stream + // - slot indexes is strictly encoded with no gap inside + // - slot data is strictly encoded with no gap inside + if start != r.lastSlotIndexRead { + return nil, nil, errors.New("storage index buffer is gapped") + } + if uint32(len(r.storageIndexes)) < end { + return nil, nil, errors.New("storage index buffer is corrupted") + } + index.decode(r.storageIndexes[start:end]) + + if bytes.Compare(last.Bytes(), index.hash.Bytes()) >= 0 { + return nil, nil, errors.New("storage slot is not in order") + } + if index.offset != r.lastSlotDataRead { + return nil, nil, errors.New("storage data buffer is gapped") + } + sEnd := index.offset + uint32(index.length) + if uint32(len(r.storageData)) < sEnd { + return nil, nil, errors.New("storage data buffer is corrupted") + } + storage[index.hash] = r.storageData[r.lastSlotDataRead:sEnd] + list = append(list, index.hash) + + last = index.hash + r.lastSlotIndexRead = end + r.lastSlotDataRead = sEnd + } + return list, storage, nil +} + +// decode deserializes the account and storage data from the provided byte stream. +func (h *history) decode(accountData, storageData, accountIndexes, storageIndexes []byte) error { + var ( + accounts = make(map[common.Address][]byte) + storages = make(map[common.Address]map[common.Hash][]byte) + accountList []common.Address + storageList = make(map[common.Address][]common.Hash) + + r = &decoder{ + accountData: accountData, + storageData: storageData, + accountIndexes: accountIndexes, + storageIndexes: storageIndexes, + } + ) + if err := r.verify(); err != nil { + return err + } + for i := 0; i < len(accountIndexes)/accountIndexSize; i++ { + // Resolve account first + accIndex, accData, err := r.readAccount(i) + if err != nil { + return err + } + accounts[accIndex.address] = accData + accountList = append(accountList, accIndex.address) + + // Resolve storage slots + slotList, slotData, err := r.readStorage(accIndex) + if err != nil { + return err + } + if len(slotList) > 0 { + storageList[accIndex.address] = slotList + storages[accIndex.address] = slotData + } + } + h.accounts = accounts + h.accountList = accountList + h.storages = storages + h.storageList = storageList + return nil +} + +// readHistory reads and decodes the state history object by the given id. +func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) { + blob := rawdb.ReadStateHistoryMeta(freezer, id) + if len(blob) == 0 { + return nil, fmt.Errorf("state history not found %d", id) + } + var m meta + if err := m.decode(blob); err != nil { + return nil, err + } + var ( + dec = history{meta: &m} + accountData = rawdb.ReadStateAccountHistory(freezer, id) + storageData = rawdb.ReadStateStorageHistory(freezer, id) + accountIndexes = rawdb.ReadStateAccountIndex(freezer, id) + storageIndexes = rawdb.ReadStateStorageIndex(freezer, id) + ) + if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil { + return nil, err + } + return &dec, nil +} + +// writeHistory writes the state history with provided state set. After +// storing the corresponding state history, it will also prune the stale +// histories from the disk with the given threshold. +func writeHistory(db ethdb.KeyValueStore, freezer *rawdb.ResettableFreezer, dl *diffLayer, limit uint64) error { + // Short circuit if state set is not available. + if dl.states == nil { + return errors.New("state change set is not available") + } + var ( + err error + n int + start = time.Now() + h = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states) + ) + accountData, storageData, accountIndex, storageIndex := h.encode() + dataSize := common.StorageSize(len(accountData) + len(storageData)) + indexSize := common.StorageSize(len(accountIndex) + len(storageIndex)) + + // Write history data into five freezer table respectively. + rawdb.WriteStateHistory(freezer, dl.stateID(), h.meta.encode(), accountIndex, storageIndex, accountData, storageData) + + // Prune stale state histories based on the config. + if limit != 0 && dl.stateID() > limit { + n, err = truncateFromTail(db, freezer, dl.stateID()-limit) + if err != nil { + return err + } + } + historyDataBytesMeter.Mark(int64(dataSize)) + historyIndexBytesMeter.Mark(int64(indexSize)) + historyBuildTimeMeter.UpdateSince(start) + log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "pruned", n, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// checkHistories retrieves a batch of meta objects with the specified range +// and performs the callback on each item. +func checkHistories(freezer *rawdb.ResettableFreezer, start, count uint64, check func(*meta) error) error { + for count > 0 { + number := count + if number > 10000 { + number = 10000 // split the big read into small chunks + } + blobs, err := rawdb.ReadStateHistoryMetaList(freezer, start, number) + if err != nil { + return err + } + for _, blob := range blobs { + var dec meta + if err := dec.decode(blob); err != nil { + return err + } + if err := check(&dec); err != nil { + return err + } + } + count -= uint64(len(blobs)) + start += uint64(len(blobs)) + } + return nil +} + +// truncateFromHead removes the extra state histories from the head with the given +// parameters. It returns the number of items removed from the head. +func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead uint64) (int, error) { + ohead, err := freezer.Ancients() + if err != nil { + return 0, err + } + if ohead <= nhead { + return 0, nil + } + // Load the meta objects in range [nhead+1, ohead] + blobs, err := rawdb.ReadStateHistoryMetaList(freezer, nhead+1, ohead-nhead) + if err != nil { + return 0, err + } + batch := db.NewBatch() + for _, blob := range blobs { + var m meta + if err := m.decode(blob); err != nil { + return 0, err + } + rawdb.DeleteStateID(batch, m.root) + } + if err := batch.Write(); err != nil { + return 0, err + } + ohead, err = freezer.TruncateHead(nhead) + if err != nil { + return 0, err + } + return int(ohead - nhead), nil +} + +// truncateFromTail removes the extra state histories from the tail with the given +// parameters. It returns the number of items removed from the tail. +func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail uint64) (int, error) { + otail, err := freezer.Tail() + if err != nil { + return 0, err + } + if otail >= ntail { + return 0, nil + } + // Load the meta objects in range [otail+1, ntail] + blobs, err := rawdb.ReadStateHistoryMetaList(freezer, otail+1, ntail-otail) + if err != nil { + return 0, err + } + batch := db.NewBatch() + for _, blob := range blobs { + var m meta + if err := m.decode(blob); err != nil { + return 0, err + } + rawdb.DeleteStateID(batch, m.root) + } + if err := batch.Write(); err != nil { + return 0, err + } + otail, err = freezer.TruncateTail(ntail) + if err != nil { + return 0, err + } + return int(ntail - otail), nil +} diff --git a/trie/triedb/pathdb/history_test.go b/trie/triedb/pathdb/history_test.go new file mode 100644 index 000000000..677103e2b --- /dev/null +++ b/trie/triedb/pathdb/history_test.go @@ -0,0 +1,290 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package pathdb + +import ( + "bytes" + "fmt" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie/testutil" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +// randomStateSet generates a random state change set. +func randomStateSet(n int) *triestate.Set { + var ( + accounts = make(map[common.Address][]byte) + storages = make(map[common.Address]map[common.Hash][]byte) + ) + for i := 0; i < n; i++ { + addr := testutil.RandomAddress() + storages[addr] = make(map[common.Hash][]byte) + for j := 0; j < 3; j++ { + v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32))) + storages[addr][testutil.RandomHash()] = v + } + account := generateAccount(types.EmptyRootHash) + accounts[addr] = types.SlimAccountRLP(account) + } + return triestate.New(accounts, storages, nil) +} + +func makeHistory() *history { + return newHistory(testutil.RandomHash(), types.EmptyRootHash, 0, randomStateSet(3)) +} + +func makeHistories(n int) []*history { + var ( + parent = types.EmptyRootHash + result []*history + ) + for i := 0; i < n; i++ { + root := testutil.RandomHash() + h := newHistory(root, parent, uint64(i), randomStateSet(3)) + parent = root + result = append(result, h) + } + return result +} + +func TestEncodeDecodeHistory(t *testing.T) { + var ( + m meta + dec history + obj = makeHistory() + ) + // check if meta data can be correctly encode/decode + blob := obj.meta.encode() + if err := m.decode(blob); err != nil { + t.Fatalf("Failed to decode %v", err) + } + if !reflect.DeepEqual(&m, obj.meta) { + t.Fatal("meta is mismatched") + } + + // check if account/storage data can be correctly encode/decode + accountData, storageData, accountIndexes, storageIndexes := obj.encode() + if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil { + t.Fatalf("Failed to decode, err: %v", err) + } + if !compareSet(dec.accounts, obj.accounts) { + t.Fatal("account data is mismatched") + } + if !compareStorages(dec.storages, obj.storages) { + t.Fatal("storage data is mismatched") + } + if !compareList(dec.accountList, obj.accountList) { + t.Fatal("account list is mismatched") + } + if !compareStorageList(dec.storageList, obj.storageList) { + t.Fatal("storage list is mismatched") + } +} + +func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.ResettableFreezer, id uint64, root common.Hash, exist bool) { + blob := rawdb.ReadStateHistoryMeta(freezer, id) + if exist && len(blob) == 0 { + t.Fatalf("Failed to load trie history, %d", id) + } + if !exist && len(blob) != 0 { + t.Fatalf("Unexpected trie history, %d", id) + } + if exist && rawdb.ReadStateID(db, root) == nil { + t.Fatalf("Root->ID mapping is not found, %d", id) + } + if !exist && rawdb.ReadStateID(db, root) != nil { + t.Fatalf("Unexpected root->ID mapping, %d", id) + } +} + +func checkHistoriesInRange(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.ResettableFreezer, from, to uint64, roots []common.Hash, exist bool) { + for i, j := from, 0; i <= to; i, j = i+1, j+1 { + checkHistory(t, db, freezer, i, roots[j], exist) + } +} + +func TestTruncateHeadHistory(t *testing.T) { + var ( + roots []common.Hash + hs = makeHistories(10) + db = rawdb.NewMemoryDatabase() + freezer, _ = openFreezer(t.TempDir(), false) + ) + defer freezer.Close() + + for i := 0; i < len(hs); i++ { + accountData, storageData, accountIndex, storageIndex := hs[i].encode() + rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData) + rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1)) + roots = append(roots, hs[i].meta.root) + } + for size := len(hs); size > 0; size-- { + pruned, err := truncateFromHead(db, freezer, uint64(size-1)) + if err != nil { + t.Fatalf("Failed to truncate from head %v", err) + } + if pruned != 1 { + t.Error("Unexpected pruned items", "want", 1, "got", pruned) + } + checkHistoriesInRange(t, db, freezer, uint64(size), uint64(10), roots[size-1:], false) + checkHistoriesInRange(t, db, freezer, uint64(1), uint64(size-1), roots[:size-1], true) + } +} + +func TestTruncateTailHistory(t *testing.T) { + var ( + roots []common.Hash + hs = makeHistories(10) + db = rawdb.NewMemoryDatabase() + freezer, _ = openFreezer(t.TempDir(), false) + ) + defer freezer.Close() + + for i := 0; i < len(hs); i++ { + accountData, storageData, accountIndex, storageIndex := hs[i].encode() + rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData) + rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1)) + roots = append(roots, hs[i].meta.root) + } + for newTail := 1; newTail < len(hs); newTail++ { + pruned, _ := truncateFromTail(db, freezer, uint64(newTail)) + if pruned != 1 { + t.Error("Unexpected pruned items", "want", 1, "got", pruned) + } + checkHistoriesInRange(t, db, freezer, uint64(1), uint64(newTail), roots[:newTail], false) + checkHistoriesInRange(t, db, freezer, uint64(newTail+1), uint64(10), roots[newTail:], true) + } +} + +func TestTruncateTailHistories(t *testing.T) { + var cases = []struct { + limit uint64 + expPruned int + maxPruned uint64 + minUnpruned uint64 + empty bool + }{ + { + 1, 9, 9, 10, false, + }, + { + 0, 10, 10, 0 /* no meaning */, true, + }, + { + 10, 0, 0, 1, false, + }, + } + for i, c := range cases { + var ( + roots []common.Hash + hs = makeHistories(10) + db = rawdb.NewMemoryDatabase() + freezer, _ = openFreezer(t.TempDir()+fmt.Sprintf("%d", i), false) + ) + defer freezer.Close() + + for i := 0; i < len(hs); i++ { + accountData, storageData, accountIndex, storageIndex := hs[i].encode() + rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData) + rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1)) + roots = append(roots, hs[i].meta.root) + } + pruned, _ := truncateFromTail(db, freezer, uint64(10)-c.limit) + if pruned != c.expPruned { + t.Error("Unexpected pruned items", "want", c.expPruned, "got", pruned) + } + if c.empty { + checkHistoriesInRange(t, db, freezer, uint64(1), uint64(10), roots, false) + } else { + tail := 10 - int(c.limit) + checkHistoriesInRange(t, db, freezer, uint64(1), c.maxPruned, roots[:tail], false) + checkHistoriesInRange(t, db, freezer, c.minUnpruned, uint64(10), roots[tail:], true) + } + } +} + +// openFreezer initializes the freezer instance for storing state histories. +func openFreezer(datadir string, readOnly bool) (*rawdb.ResettableFreezer, error) { + return rawdb.NewStateFreezer(datadir, readOnly) +} + +func compareSet[k comparable](a, b map[k][]byte) bool { + if len(a) != len(b) { + return false + } + for key, valA := range a { + valB, ok := b[key] + if !ok { + return false + } + if !bytes.Equal(valA, valB) { + return false + } + } + return true +} + +func compareList[k comparable](a, b []k) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +func compareStorages(a, b map[common.Address]map[common.Hash][]byte) bool { + if len(a) != len(b) { + return false + } + for h, subA := range a { + subB, ok := b[h] + if !ok { + return false + } + if !compareSet(subA, subB) { + return false + } + } + return true +} + +func compareStorageList(a, b map[common.Address][]common.Hash) bool { + if len(a) != len(b) { + return false + } + for h, la := range a { + lb, ok := b[h] + if !ok { + return false + } + if !compareList(la, lb) { + return false + } + } + return true +} diff --git a/trie/triedb/pathdb/journal.go b/trie/triedb/pathdb/journal.go new file mode 100644 index 000000000..ea90207f2 --- /dev/null +++ b/trie/triedb/pathdb/journal.go @@ -0,0 +1,387 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "bytes" + "errors" + "fmt" + "io" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +var ( + errMissJournal = errors.New("journal not found") + errMissVersion = errors.New("version not found") + errUnexpectedVersion = errors.New("unexpected journal version") + errMissDiskRoot = errors.New("disk layer root not found") + errUnmatchedJournal = errors.New("unmatched journal") +) + +const journalVersion uint64 = 0 + +// journalNode represents a trie node persisted in the journal. +type journalNode struct { + Path []byte // Path of the node in the trie + Blob []byte // RLP-encoded trie node blob, nil means the node is deleted +} + +// journalNodes represents a list trie nodes belong to a single account +// or the main account trie. +type journalNodes struct { + Owner common.Hash + Nodes []journalNode +} + +// journalAccounts represents a list accounts belong to the layer. +type journalAccounts struct { + Addresses []common.Address + Accounts [][]byte +} + +// journalStorage represents a list of storage slots belong to an account. +type journalStorage struct { + Incomplete bool + Account common.Address + Hashes []common.Hash + Slots [][]byte +} + +// loadJournal tries to parse the layer journal from the disk. +func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) { + journal := rawdb.ReadTrieJournal(db.diskdb) + if len(journal) == 0 { + return nil, errMissJournal + } + r := rlp.NewStream(bytes.NewReader(journal), 0) + + // Firstly, resolve the first element as the journal version + version, err := r.Uint64() + if err != nil { + return nil, errMissVersion + } + if version != journalVersion { + return nil, fmt.Errorf("%w want %d got %d", errUnexpectedVersion, journalVersion, version) + } + // Secondly, resolve the disk layer root, ensure it's continuous + // with disk layer. Note now we can ensure it's the layer journal + // correct version, so we expect everything can be resolved properly. + var root common.Hash + if err := r.Decode(&root); err != nil { + return nil, errMissDiskRoot + } + // The journal is not matched with persistent state, discard them. + // It can happen that geth crashes without persisting the journal. + if !bytes.Equal(root.Bytes(), diskRoot.Bytes()) { + return nil, fmt.Errorf("%w want %x got %x", errUnmatchedJournal, root, diskRoot) + } + // Load the disk layer from the journal + base, err := db.loadDiskLayer(r) + if err != nil { + return nil, err + } + // Load all the diff layers from the journal + head, err := db.loadDiffLayer(base, r) + if err != nil { + return nil, err + } + log.Debug("Loaded layer journal", "diskroot", diskRoot, "diffhead", head.rootHash()) + return head, nil +} + +// loadLayers loads a pre-existing state layer backed by a key-value store. +func (db *Database) loadLayers() layer { + // Retrieve the root node of persistent state. + _, root := rawdb.ReadAccountTrieNode(db.diskdb, nil) + root = types.TrieRootHash(root) + + // Load the layers by resolving the journal + head, err := db.loadJournal(root) + if err == nil { + return head + } + // journal is not matched(or missing) with the persistent state, discard + // it. Display log for discarding journal, but try to avoid showing + // useless information when the db is created from scratch. + if !(root == types.EmptyRootHash && errors.Is(err, errMissJournal)) { + log.Info("Failed to load journal, discard it", "err", err) + } + // Return single layer with persistent state. + return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newNodeBuffer(db.bufferSize, nil, 0)) +} + +// loadDiskLayer reads the binary blob from the layer journal, reconstructing +// a new disk layer on it. +func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) { + // Resolve disk layer root + var root common.Hash + if err := r.Decode(&root); err != nil { + return nil, fmt.Errorf("load disk root: %v", err) + } + // Resolve the state id of disk layer, it can be different + // with the persistent id tracked in disk, the id distance + // is the number of transitions aggregated in disk layer. + var id uint64 + if err := r.Decode(&id); err != nil { + return nil, fmt.Errorf("load state id: %v", err) + } + stored := rawdb.ReadPersistentStateID(db.diskdb) + if stored > id { + return nil, fmt.Errorf("invalid state id: stored %d resolved %d", stored, id) + } + // Resolve nodes cached in node buffer + var encoded []journalNodes + if err := r.Decode(&encoded); err != nil { + return nil, fmt.Errorf("load disk nodes: %v", err) + } + nodes := make(map[common.Hash]map[string]*trienode.Node) + for _, entry := range encoded { + subset := make(map[string]*trienode.Node) + for _, n := range entry.Nodes { + if len(n.Blob) > 0 { + subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob) + } else { + subset[string(n.Path)] = trienode.NewDeleted() + } + } + nodes[entry.Owner] = subset + } + // Calculate the internal state transitions by id difference. + base := newDiskLayer(root, id, db, nil, newNodeBuffer(db.bufferSize, nodes, id-stored)) + return base, nil +} + +// loadDiffLayer reads the next sections of a layer journal, reconstructing a new +// diff and verifying that it can be linked to the requested parent. +func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) { + // Read the next diff journal entry + var root common.Hash + if err := r.Decode(&root); err != nil { + // The first read may fail with EOF, marking the end of the journal + if err == io.EOF { + return parent, nil + } + return nil, fmt.Errorf("load diff root: %v", err) + } + var block uint64 + if err := r.Decode(&block); err != nil { + return nil, fmt.Errorf("load block number: %v", err) + } + // Read in-memory trie nodes from journal + var encoded []journalNodes + if err := r.Decode(&encoded); err != nil { + return nil, fmt.Errorf("load diff nodes: %v", err) + } + nodes := make(map[common.Hash]map[string]*trienode.Node) + for _, entry := range encoded { + subset := make(map[string]*trienode.Node) + for _, n := range entry.Nodes { + if len(n.Blob) > 0 { + subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob) + } else { + subset[string(n.Path)] = trienode.NewDeleted() + } + } + nodes[entry.Owner] = subset + } + // Read state changes from journal + var ( + jaccounts journalAccounts + jstorages []journalStorage + accounts = make(map[common.Address][]byte) + storages = make(map[common.Address]map[common.Hash][]byte) + incomplete = make(map[common.Address]struct{}) + ) + if err := r.Decode(&jaccounts); err != nil { + return nil, fmt.Errorf("load diff accounts: %v", err) + } + for i, addr := range jaccounts.Addresses { + accounts[addr] = jaccounts.Accounts[i] + } + if err := r.Decode(&jstorages); err != nil { + return nil, fmt.Errorf("load diff storages: %v", err) + } + for _, entry := range jstorages { + set := make(map[common.Hash][]byte) + for i, h := range entry.Hashes { + if len(entry.Slots[i]) > 0 { + set[h] = entry.Slots[i] + } else { + set[h] = nil + } + } + if entry.Incomplete { + incomplete[entry.Account] = struct{}{} + } + storages[entry.Account] = set + } + return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages, incomplete)), r) +} + +// journal implements the layer interface, marshaling the un-flushed trie nodes +// along with layer meta data into provided byte buffer. +func (dl *diskLayer) journal(w io.Writer) error { + dl.lock.RLock() + defer dl.lock.RUnlock() + + // Ensure the layer didn't get stale + if dl.stale { + return errSnapshotStale + } + // Step one, write the disk root into the journal. + if err := rlp.Encode(w, dl.root); err != nil { + return err + } + // Step two, write the corresponding state id into the journal + if err := rlp.Encode(w, dl.id); err != nil { + return err + } + // Step three, write all unwritten nodes into the journal + nodes := make([]journalNodes, 0, len(dl.buffer.nodes)) + for owner, subset := range dl.buffer.nodes { + entry := journalNodes{Owner: owner} + for path, node := range subset { + entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob}) + } + nodes = append(nodes, entry) + } + if err := rlp.Encode(w, nodes); err != nil { + return err + } + log.Debug("Journaled pathdb disk layer", "root", dl.root, "nodes", len(dl.buffer.nodes)) + return nil +} + +// journal implements the layer interface, writing the memory layer contents +// into a buffer to be stored in the database as the layer journal. +func (dl *diffLayer) journal(w io.Writer) error { + dl.lock.RLock() + defer dl.lock.RUnlock() + + // journal the parent first + if err := dl.parent.journal(w); err != nil { + return err + } + // Everything below was journaled, persist this layer too + if err := rlp.Encode(w, dl.root); err != nil { + return err + } + if err := rlp.Encode(w, dl.block); err != nil { + return err + } + // Write the accumulated trie nodes into buffer + nodes := make([]journalNodes, 0, len(dl.nodes)) + for owner, subset := range dl.nodes { + entry := journalNodes{Owner: owner} + for path, node := range subset { + entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob}) + } + nodes = append(nodes, entry) + } + if err := rlp.Encode(w, nodes); err != nil { + return err + } + // Write the accumulated state changes into buffer + var jacct journalAccounts + for addr, account := range dl.states.Accounts { + jacct.Addresses = append(jacct.Addresses, addr) + jacct.Accounts = append(jacct.Accounts, account) + } + if err := rlp.Encode(w, jacct); err != nil { + return err + } + storage := make([]journalStorage, 0, len(dl.states.Storages)) + for addr, slots := range dl.states.Storages { + entry := journalStorage{Account: addr} + if _, ok := dl.states.Incomplete[addr]; ok { + entry.Incomplete = true + } + for slotHash, slot := range slots { + entry.Hashes = append(entry.Hashes, slotHash) + entry.Slots = append(entry.Slots, slot) + } + storage = append(storage, entry) + } + if err := rlp.Encode(w, storage); err != nil { + return err + } + log.Debug("Journaled pathdb diff layer", "root", dl.root, "parent", dl.parent.rootHash(), "id", dl.stateID(), "block", dl.block, "nodes", len(dl.nodes)) + return nil +} + +// Journal commits an entire diff hierarchy to disk into a single journal entry. +// This is meant to be used during shutdown to persist the layer without +// flattening everything down (bad for reorgs). And this function will mark the +// database as read-only to prevent all following mutation to disk. +func (db *Database) Journal(root common.Hash) error { + // Retrieve the head layer to journal from. + l := db.tree.get(root) + if l == nil { + return fmt.Errorf("triedb layer [%#x] missing", root) + } + disk := db.tree.bottom() + if l, ok := l.(*diffLayer); ok { + log.Info("Persisting dirty state to disk", "head", l.block, "root", root, "layers", l.id-disk.id+disk.buffer.layers) + } else { // disk layer only on noop runs (likely) or deep reorgs (unlikely) + log.Info("Persisting dirty state to disk", "root", root, "layers", disk.buffer.layers) + } + start := time.Now() + + // Run the journaling + db.lock.Lock() + defer db.lock.Unlock() + + // Short circuit if the database is in read only mode. + if db.readOnly { + return errSnapshotReadOnly + } + // Firstly write out the metadata of journal + journal := new(bytes.Buffer) + if err := rlp.Encode(journal, journalVersion); err != nil { + return err + } + // The stored state in disk might be empty, convert the + // root to emptyRoot in this case. + _, diskroot := rawdb.ReadAccountTrieNode(db.diskdb, nil) + diskroot = types.TrieRootHash(diskroot) + + // Secondly write out the state root in disk, ensure all layers + // on top are continuous with disk. + if err := rlp.Encode(journal, diskroot); err != nil { + return err + } + // Finally write out the journal of each layer in reverse order. + if err := l.journal(journal); err != nil { + return err + } + // Store the journal into the database and return + rawdb.WriteTrieJournal(db.diskdb, journal.Bytes()) + + // Set the db in read only mode to reject all following mutations + db.readOnly = true + log.Info("Persisted dirty state to disk", "size", common.StorageSize(journal.Len()), "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} diff --git a/trie/triedb/pathdb/layertree.go b/trie/triedb/pathdb/layertree.go new file mode 100644 index 000000000..d31477991 --- /dev/null +++ b/trie/triedb/pathdb/layertree.go @@ -0,0 +1,214 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package pathdb + +import ( + "errors" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" +) + +// layerTree is a group of state layers identified by the state root. +// This structure defines a few basic operations for manipulating +// state layers linked with each other in a tree structure. It's +// thread-safe to use. However, callers need to ensure the thread-safety +// of the referenced layer by themselves. +type layerTree struct { + lock sync.RWMutex + layers map[common.Hash]layer +} + +// newLayerTree constructs the layerTree with the given head layer. +func newLayerTree(head layer) *layerTree { + tree := new(layerTree) + tree.reset(head) + return tree +} + +// reset initializes the layerTree by the given head layer. +// All the ancestors will be iterated out and linked in the tree. +func (tree *layerTree) reset(head layer) { + tree.lock.Lock() + defer tree.lock.Unlock() + + var layers = make(map[common.Hash]layer) + for head != nil { + layers[head.rootHash()] = head + head = head.parentLayer() + } + tree.layers = layers +} + +// get retrieves a layer belonging to the given state root. +func (tree *layerTree) get(root common.Hash) layer { + tree.lock.RLock() + defer tree.lock.RUnlock() + + return tree.layers[types.TrieRootHash(root)] +} + +// forEach iterates the stored layers inside and applies the +// given callback on them. +func (tree *layerTree) forEach(onLayer func(layer)) { + tree.lock.RLock() + defer tree.lock.RUnlock() + + for _, layer := range tree.layers { + onLayer(layer) + } +} + +// len returns the number of layers cached. +func (tree *layerTree) len() int { + tree.lock.RLock() + defer tree.lock.RUnlock() + + return len(tree.layers) +} + +// add inserts a new layer into the tree if it can be linked to an existing old parent. +func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { + // Reject noop updates to avoid self-loops. This is a special case that can + // happen for clique networks and proof-of-stake networks where empty blocks + // don't modify the state (0 block subsidy). + // + // Although we could silently ignore this internally, it should be the caller's + // responsibility to avoid even attempting to insert such a layer. + root, parentRoot = types.TrieRootHash(root), types.TrieRootHash(parentRoot) + if root == parentRoot { + return errors.New("layer cycle") + } + parent := tree.get(parentRoot) + if parent == nil { + return fmt.Errorf("triedb parent [%#x] layer missing", parentRoot) + } + l := parent.update(root, parent.stateID()+1, block, nodes.Flatten(), states) + + tree.lock.Lock() + tree.layers[l.rootHash()] = l + tree.lock.Unlock() + return nil +} + +// cap traverses downwards the diff tree until the number of allowed diff layers +// are crossed. All diffs beyond the permitted number are flattened downwards. +func (tree *layerTree) cap(root common.Hash, layers int) error { + // Retrieve the head layer to cap from + root = types.TrieRootHash(root) + l := tree.get(root) + if l == nil { + return fmt.Errorf("triedb layer [%#x] missing", root) + } + diff, ok := l.(*diffLayer) + if !ok { + return fmt.Errorf("triedb layer [%#x] is disk layer", root) + } + tree.lock.Lock() + defer tree.lock.Unlock() + + // If full commit was requested, flatten the diffs and merge onto disk + if layers == 0 { + base, err := diff.persist(true) + if err != nil { + return err + } + // Replace the entire layer tree with the flat base + tree.layers = map[common.Hash]layer{base.rootHash(): base} + return nil + } + // Dive until we run out of layers or reach the persistent database + for i := 0; i < layers-1; i++ { + // If we still have diff layers below, continue down + if parent, ok := diff.parentLayer().(*diffLayer); ok { + diff = parent + } else { + // Diff stack too shallow, return without modifications + return nil + } + } + // We're out of layers, flatten anything below, stopping if it's the disk or if + // the memory limit is not yet exceeded. + switch parent := diff.parentLayer().(type) { + case *diskLayer: + return nil + + case *diffLayer: + // Hold the lock to prevent any read operations until the new + // parent is linked correctly. + diff.lock.Lock() + + base, err := parent.persist(false) + if err != nil { + diff.lock.Unlock() + return err + } + tree.layers[base.rootHash()] = base + diff.parent = base + + diff.lock.Unlock() + + default: + panic(fmt.Sprintf("unknown data layer in triedb: %T", parent)) + } + // Remove any layer that is stale or links into a stale layer + children := make(map[common.Hash][]common.Hash) + for root, layer := range tree.layers { + if dl, ok := layer.(*diffLayer); ok { + parent := dl.parentLayer().rootHash() + children[parent] = append(children[parent], root) + } + } + var remove func(root common.Hash) + remove = func(root common.Hash) { + delete(tree.layers, root) + for _, child := range children[root] { + remove(child) + } + delete(children, root) + } + for root, layer := range tree.layers { + if dl, ok := layer.(*diskLayer); ok && dl.isStale() { + remove(root) + } + } + return nil +} + +// bottom returns the bottom-most disk layer in this tree. +func (tree *layerTree) bottom() *diskLayer { + tree.lock.RLock() + defer tree.lock.RUnlock() + + if len(tree.layers) == 0 { + return nil // Shouldn't happen, empty tree + } + // pick a random one as the entry point + var current layer + for _, layer := range tree.layers { + current = layer + break + } + for current.parentLayer() != nil { + current = current.parentLayer() + } + return current.(*diskLayer) +} diff --git a/trie/triedb/pathdb/metrics.go b/trie/triedb/pathdb/metrics.go new file mode 100644 index 000000000..9e2b1dcbf --- /dev/null +++ b/trie/triedb/pathdb/metrics.go @@ -0,0 +1,50 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package pathdb + +import "github.com/ethereum/go-ethereum/metrics" + +var ( + cleanHitMeter = metrics.NewRegisteredMeter("pathdb/clean/hit", nil) + cleanMissMeter = metrics.NewRegisteredMeter("pathdb/clean/miss", nil) + cleanReadMeter = metrics.NewRegisteredMeter("pathdb/clean/read", nil) + cleanWriteMeter = metrics.NewRegisteredMeter("pathdb/clean/write", nil) + + dirtyHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/hit", nil) + dirtyMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/miss", nil) + dirtyReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/read", nil) + dirtyWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/write", nil) + dirtyNodeHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/depth", nil, metrics.NewExpDecaySample(1028, 0.015)) + + cleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil) + dirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil) + diskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil) + + commitTimeTimer = metrics.NewRegisteredTimer("pathdb/commit/time", nil) + commitNodesMeter = metrics.NewRegisteredMeter("pathdb/commit/nodes", nil) + commitBytesMeter = metrics.NewRegisteredMeter("pathdb/commit/bytes", nil) + + gcNodesMeter = metrics.NewRegisteredMeter("pathdb/gc/nodes", nil) + gcBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/bytes", nil) + + diffLayerBytesMeter = metrics.NewRegisteredMeter("pathdb/diff/bytes", nil) + diffLayerNodesMeter = metrics.NewRegisteredMeter("pathdb/diff/nodes", nil) + + historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil) + historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil) + historyIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/index", nil) +) diff --git a/trie/triedb/pathdb/nodebuffer.go b/trie/triedb/pathdb/nodebuffer.go new file mode 100644 index 000000000..67de225b0 --- /dev/null +++ b/trie/triedb/pathdb/nodebuffer.go @@ -0,0 +1,275 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "fmt" + "time" + + "github.com/VictoriaMetrics/fastcache" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie/trienode" +) + +// nodebuffer is a collection of modified trie nodes to aggregate the disk +// write. The content of the nodebuffer must be checked before diving into +// disk (since it basically is not-yet-written data). +type nodebuffer struct { + layers uint64 // The number of diff layers aggregated inside + size uint64 // The size of aggregated writes + limit uint64 // The maximum memory allowance in bytes + nodes map[common.Hash]map[string]*trienode.Node // The dirty node set, mapped by owner and path +} + +// newNodeBuffer initializes the node buffer with the provided nodes. +func newNodeBuffer(limit int, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) *nodebuffer { + if nodes == nil { + nodes = make(map[common.Hash]map[string]*trienode.Node) + } + var size uint64 + for _, subset := range nodes { + for path, n := range subset { + size += uint64(len(n.Blob) + len(path)) + } + } + return &nodebuffer{ + layers: layers, + nodes: nodes, + size: size, + limit: uint64(limit), + } +} + +// node retrieves the trie node with given node info. +func (b *nodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*trienode.Node, error) { + subset, ok := b.nodes[owner] + if !ok { + return nil, nil + } + n, ok := subset[string(path)] + if !ok { + return nil, nil + } + if n.Hash != hash { + dirtyFalseMeter.Mark(1) + log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) + return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path) + } + return n, nil +} + +// commit merges the dirty nodes into the nodebuffer. This operation won't take +// the ownership of the nodes map which belongs to the bottom-most diff layer. +// It will just hold the node references from the given map which are safe to +// copy. +func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *nodebuffer { + var ( + delta int64 + overwrite int64 + overwriteSize int64 + ) + for owner, subset := range nodes { + current, exist := b.nodes[owner] + if !exist { + // Allocate a new map for the subset instead of claiming it directly + // from the passed map to avoid potential concurrent map read/write. + // The nodes belong to original diff layer are still accessible even + // after merging, thus the ownership of nodes map should still belong + // to original layer and any mutation on it should be prevented. + current = make(map[string]*trienode.Node) + for path, n := range subset { + current[path] = n + delta += int64(len(n.Blob) + len(path)) + } + b.nodes[owner] = current + continue + } + for path, n := range subset { + if orig, exist := current[path]; !exist { + delta += int64(len(n.Blob) + len(path)) + } else { + delta += int64(len(n.Blob) - len(orig.Blob)) + overwrite++ + overwriteSize += int64(len(orig.Blob) + len(path)) + } + current[path] = n + } + b.nodes[owner] = current + } + b.updateSize(delta) + b.layers++ + gcNodesMeter.Mark(overwrite) + gcBytesMeter.Mark(overwriteSize) + return b +} + +// revert is the reverse operation of commit. It also merges the provided nodes +// into the nodebuffer, the difference is that the provided node set should +// revert the changes made by the last state transition. +func (b *nodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error { + // Short circuit if no embedded state transition to revert. + if b.layers == 0 { + return errStateUnrecoverable + } + b.layers-- + + // Reset the entire buffer if only a single transition left. + if b.layers == 0 { + b.reset() + return nil + } + var delta int64 + for owner, subset := range nodes { + current, ok := b.nodes[owner] + if !ok { + panic(fmt.Sprintf("non-existent subset (%x)", owner)) + } + for path, n := range subset { + orig, ok := current[path] + if !ok { + // There is a special case in MPT that one child is removed from + // a fullNode which only has two children, and then a new child + // with different position is immediately inserted into the fullNode. + // In this case, the clean child of the fullNode will also be + // marked as dirty because of node collapse and expansion. + // + // In case of database rollback, don't panic if this "clean" + // node occurs which is not present in buffer. + var nhash common.Hash + if owner == (common.Hash{}) { + _, nhash = rawdb.ReadAccountTrieNode(db, []byte(path)) + } else { + _, nhash = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) + } + // Ignore the clean node in the case described above. + if nhash == n.Hash { + continue + } + panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex())) + } + current[path] = n + delta += int64(len(n.Blob)) - int64(len(orig.Blob)) + } + } + b.updateSize(delta) + return nil +} + +// updateSize updates the total cache size by the given delta. +func (b *nodebuffer) updateSize(delta int64) { + size := int64(b.size) + delta + if size >= 0 { + b.size = uint64(size) + return + } + s := b.size + b.size = 0 + log.Error("Invalid pathdb buffer size", "prev", common.StorageSize(s), "delta", common.StorageSize(delta)) +} + +// reset cleans up the disk cache. +func (b *nodebuffer) reset() { + b.layers = 0 + b.size = 0 + b.nodes = make(map[common.Hash]map[string]*trienode.Node) +} + +// empty returns an indicator if nodebuffer contains any state transition inside. +func (b *nodebuffer) empty() bool { + return b.layers == 0 +} + +// setSize sets the buffer size to the provided number, and invokes a flush +// operation if the current memory usage exceeds the new limit. +func (b *nodebuffer) setSize(size int, db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error { + b.limit = uint64(size) + return b.flush(db, clean, id, false) +} + +// flush persists the in-memory dirty trie node into the disk if the configured +// memory threshold is reached. Note, all data must be written atomically. +func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error { + if b.size <= b.limit && !force { + return nil + } + // Ensure the target state id is aligned with the internal counter. + head := rawdb.ReadPersistentStateID(db) + if head+b.layers != id { + return fmt.Errorf("buffer layers (%d) cannot be applied on top of persisted state id (%d) to reach requested state id (%d)", b.layers, head, id) + } + var ( + start = time.Now() + batch = db.NewBatchWithSize(int(b.size)) + ) + nodes := writeNodes(batch, b.nodes, clean) + rawdb.WritePersistentStateID(batch, id) + + // Flush all mutations in a single batch + size := batch.ValueSize() + if err := batch.Write(); err != nil { + return err + } + commitBytesMeter.Mark(int64(size)) + commitNodesMeter.Mark(int64(nodes)) + commitTimeTimer.UpdateSince(start) + log.Debug("Persisted pathdb nodes", "nodes", len(b.nodes), "bytes", common.StorageSize(size), "elapsed", common.PrettyDuration(time.Since(start))) + b.reset() + return nil +} + +// writeNodes writes the trie nodes into the provided database batch. +// Note this function will also inject all the newly written nodes +// into clean cache. +func writeNodes(batch ethdb.Batch, nodes map[common.Hash]map[string]*trienode.Node, clean *fastcache.Cache) (total int) { + for owner, subset := range nodes { + for path, n := range subset { + if n.IsDeleted() { + if owner == (common.Hash{}) { + rawdb.DeleteAccountTrieNode(batch, []byte(path)) + } else { + rawdb.DeleteStorageTrieNode(batch, owner, []byte(path)) + } + if clean != nil { + clean.Del(cacheKey(owner, []byte(path))) + } + } else { + if owner == (common.Hash{}) { + rawdb.WriteAccountTrieNode(batch, []byte(path), n.Blob) + } else { + rawdb.WriteStorageTrieNode(batch, owner, []byte(path), n.Blob) + } + if clean != nil { + clean.Set(cacheKey(owner, []byte(path)), n.Blob) + } + } + } + total += len(subset) + } + return total +} + +// cacheKey constructs the unique key of clean cache. +func cacheKey(owner common.Hash, path []byte) []byte { + if owner == (common.Hash{}) { + return path + } + return append(owner.Bytes(), path...) +} diff --git a/trie/triedb/pathdb/testutils.go b/trie/triedb/pathdb/testutils.go new file mode 100644 index 000000000..d6fdacb42 --- /dev/null +++ b/trie/triedb/pathdb/testutils.go @@ -0,0 +1,156 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pathdb + +import ( + "bytes" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/triestate" + "golang.org/x/exp/slices" +) + +// testHasher is a test utility for computing root hash of a batch of state +// elements. The hash algorithm is to sort all the elements in lexicographical +// order, concat the key and value in turn, and perform hash calculation on +// the concatenated bytes. Except the root hash, a nodeset will be returned +// once Commit is called, which contains all the changes made to hasher. +type testHasher struct { + owner common.Hash // owner identifier + root common.Hash // original root + dirties map[common.Hash][]byte // dirty states + cleans map[common.Hash][]byte // clean states +} + +// newTestHasher constructs a hasher object with provided states. +func newTestHasher(owner common.Hash, root common.Hash, cleans map[common.Hash][]byte) (*testHasher, error) { + if cleans == nil { + cleans = make(map[common.Hash][]byte) + } + if got, _ := hash(cleans); got != root { + return nil, fmt.Errorf("state root mismatched, want: %x, got: %x", root, got) + } + return &testHasher{ + owner: owner, + root: root, + dirties: make(map[common.Hash][]byte), + cleans: cleans, + }, nil +} + +// Get returns the value for key stored in the trie. +func (h *testHasher) Get(key []byte) ([]byte, error) { + hash := common.BytesToHash(key) + val, ok := h.dirties[hash] + if ok { + return val, nil + } + return h.cleans[hash], nil +} + +// Update associates key with value in the trie. +func (h *testHasher) Update(key, value []byte) error { + h.dirties[common.BytesToHash(key)] = common.CopyBytes(value) + return nil +} + +// Delete removes any existing value for key from the trie. +func (h *testHasher) Delete(key []byte) error { + h.dirties[common.BytesToHash(key)] = nil + return nil +} + +// Commit computes the new hash of the states and returns the set with all +// state changes. +func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { + var ( + nodes = make(map[common.Hash][]byte) + set = trienode.NewNodeSet(h.owner) + ) + for hash, val := range h.cleans { + nodes[hash] = val + } + for hash, val := range h.dirties { + nodes[hash] = val + if bytes.Equal(val, h.cleans[hash]) { + continue + } + if len(val) == 0 { + set.AddNode(hash.Bytes(), trienode.NewDeleted()) + } else { + set.AddNode(hash.Bytes(), trienode.New(crypto.Keccak256Hash(val), val)) + } + } + root, blob := hash(nodes) + + // Include the dirty root node as well. + if root != types.EmptyRootHash && root != h.root { + set.AddNode(nil, trienode.New(root, blob)) + } + if root == types.EmptyRootHash && h.root != types.EmptyRootHash { + set.AddNode(nil, trienode.NewDeleted()) + } + return root, set, nil +} + +// hash performs the hash computation upon the provided states. +func hash(states map[common.Hash][]byte) (common.Hash, []byte) { + var hs []common.Hash + for hash := range states { + hs = append(hs, hash) + } + slices.SortFunc(hs, common.Hash.Cmp) + + var input []byte + for _, hash := range hs { + if len(states[hash]) == 0 { + continue + } + input = append(input, hash.Bytes()...) + input = append(input, states[hash]...) + } + if len(input) == 0 { + return types.EmptyRootHash, nil + } + return crypto.Keccak256Hash(input), input +} + +type hashLoader struct { + accounts map[common.Hash][]byte + storages map[common.Hash]map[common.Hash][]byte +} + +func newHashLoader(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *hashLoader { + return &hashLoader{ + accounts: accounts, + storages: storages, + } +} + +// OpenTrie opens the main account trie. +func (l *hashLoader) OpenTrie(root common.Hash) (triestate.Trie, error) { + return newTestHasher(common.Hash{}, root, l.accounts) +} + +// OpenStorageTrie opens the storage trie of an account. +func (l *hashLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) { + return newTestHasher(addrHash, root, l.storages[addrHash]) +} diff --git a/trie/trienode/node.go b/trie/trienode/node.go index 8152eab6c..98d5588b6 100644 --- a/trie/trienode/node.go +++ b/trie/trienode/node.go @@ -25,8 +25,8 @@ import ( ) // Node is a wrapper which contains the encoded blob of the trie node and its -// unique hash identifier. It is general enough that can be used to represent -// trie nodes corresponding to different trie implementations. +// node hash. It is general enough that can be used to represent trie node +// corresponding to different trie implementations. type Node struct { Hash common.Hash // Node hash, empty for deleted node Blob []byte // Encoded node blob, nil for the deleted node @@ -42,35 +42,13 @@ func (n *Node) IsDeleted() bool { return n.Hash == (common.Hash{}) } -// WithPrev wraps the Node with the previous node value attached. -type WithPrev struct { - *Node - Prev []byte // Encoded original value, nil means it's non-existent -} - -// Unwrap returns the internal Node object. -func (n *WithPrev) Unwrap() *Node { - return n.Node -} - -// Size returns the total memory size used by this node. It overloads -// the function in Node by counting the size of previous value as well. -func (n *WithPrev) Size() int { - return n.Node.Size() + len(n.Prev) -} - // New constructs a node with provided node information. func New(hash common.Hash, blob []byte) *Node { return &Node{Hash: hash, Blob: blob} } -// NewWithPrev constructs a node with provided node information. -func NewWithPrev(hash common.Hash, blob []byte, prev []byte) *WithPrev { - return &WithPrev{ - Node: New(hash, blob), - Prev: prev, - } -} +// NewDeleted constructs a node which is deleted. +func NewDeleted() *Node { return New(common.Hash{}, nil) } // leaf represents a trie leaf node type leaf struct { @@ -83,7 +61,7 @@ type leaf struct { type NodeSet struct { Owner common.Hash Leaves []*leaf - Nodes map[string]*WithPrev + Nodes map[string]*Node updates int // the count of updated and inserted nodes deletes int // the count of deleted nodes } @@ -93,26 +71,26 @@ type NodeSet struct { func NewNodeSet(owner common.Hash) *NodeSet { return &NodeSet{ Owner: owner, - Nodes: make(map[string]*WithPrev), + Nodes: make(map[string]*Node), } } // ForEachWithOrder iterates the nodes with the order from bottom to top, // right to left, nodes with the longest path will be iterated first. func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) { - var paths sort.StringSlice + var paths []string for path := range set.Nodes { paths = append(paths, path) } - // Bottom-up, longest path first - sort.Sort(sort.Reverse(paths)) + // Bottom-up, the longest path first + sort.Sort(sort.Reverse(sort.StringSlice(paths))) for _, path := range paths { - callback(path, set.Nodes[path].Unwrap()) + callback(path, set.Nodes[path]) } } // AddNode adds the provided node into set. -func (set *NodeSet) AddNode(path []byte, n *WithPrev) { +func (set *NodeSet) AddNode(path []byte, n *Node) { if n.IsDeleted() { set.deletes += 1 } else { @@ -121,6 +99,26 @@ func (set *NodeSet) AddNode(path []byte, n *WithPrev) { set.Nodes[string(path)] = n } +// Merge adds a set of nodes into the set. +func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error { + if set.Owner != owner { + return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, owner) + } + for path, node := range nodes { + prev, ok := set.Nodes[path] + if ok { + // overwrite happens, revoke the counter + if prev.IsDeleted() { + set.deletes -= 1 + } else { + set.updates -= 1 + } + } + set.AddNode([]byte(path), node) + } + return nil +} + // AddLeaf adds the provided leaf node into set. TODO(rjl493456442) how can // we get rid of it? func (set *NodeSet) AddLeaf(parent common.Hash, blob []byte) { @@ -150,16 +148,11 @@ func (set *NodeSet) Summary() string { for path, n := range set.Nodes { // Deletion if n.IsDeleted() { - fmt.Fprintf(out, " [-]: %x prev: %x\n", path, n.Prev) + fmt.Fprintf(out, " [-]: %x\n", path) continue } - // Insertion - if len(n.Prev) == 0 { - fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.Hash) - continue - } - // Update - fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.Hash, n.Prev) + // Insertion or update + fmt.Fprintf(out, " [+/*]: %x -> %v \n", path, n.Hash) } } for _, n := range set.Leaves { @@ -188,10 +181,19 @@ func NewWithNodeSet(set *NodeSet) *MergedNodeSet { // Merge merges the provided dirty nodes of a trie into the set. The assumption // is held that no duplicated set belonging to the same trie will be merged twice. func (set *MergedNodeSet) Merge(other *NodeSet) error { - _, present := set.Sets[other.Owner] + subset, present := set.Sets[other.Owner] if present { - return fmt.Errorf("duplicate trie for owner %#x", other.Owner) + return subset.Merge(other.Owner, other.Nodes) } set.Sets[other.Owner] = other return nil } + +// Flatten returns a two-dimensional map for internal nodes. +func (set *MergedNodeSet) Flatten() map[common.Hash]map[string]*Node { + nodes := make(map[common.Hash]map[string]*Node) + for owner, set := range set.Sets { + nodes[owner] = set.Nodes + } + return nodes +} diff --git a/trie/triestate/state.go b/trie/triestate/state.go new file mode 100644 index 000000000..4c47e9c39 --- /dev/null +++ b/trie/triestate/state.go @@ -0,0 +1,276 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package triestate + +import ( + "errors" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie/trienode" + "golang.org/x/crypto/sha3" +) + +// Trie is an Ethereum state trie, can be implemented by Ethereum Merkle Patricia +// tree or Verkle tree. +type Trie interface { + // Get returns the value for key stored in the trie. + Get(key []byte) ([]byte, error) + + // Update associates key with value in the trie. + Update(key, value []byte) error + + // Delete removes any existing value for key from the trie. + Delete(key []byte) error + + // Commit the trie and returns a set of dirty nodes generated along with + // the new root hash. + Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) +} + +// TrieLoader wraps functions to load tries. +type TrieLoader interface { + // OpenTrie opens the main account trie. + OpenTrie(root common.Hash) (Trie, error) + + // OpenStorageTrie opens the storage trie of an account. + OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) +} + +// Set represents a collection of mutated states during a state transition. +// The value refers to the original content of state before the transition +// is made. Nil means that the state was not present previously. +type Set struct { + Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present + Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present + Incomplete map[common.Address]struct{} // Indicator whether the storage is incomplete due to large deletion + size common.StorageSize // Approximate size of set +} + +// New constructs the state set with provided data. +func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, incomplete map[common.Address]struct{}) *Set { + return &Set{ + Accounts: accounts, + Storages: storages, + Incomplete: incomplete, + } +} + +// Size returns the approximate memory size occupied by the set. +func (s *Set) Size() common.StorageSize { + if s.size != 0 { + return s.size + } + for _, account := range s.Accounts { + s.size += common.StorageSize(common.AddressLength + len(account)) + } + for _, slots := range s.Storages { + for _, val := range slots { + s.size += common.StorageSize(common.HashLength + len(val)) + } + s.size += common.StorageSize(common.AddressLength) + } + s.size += common.StorageSize(common.AddressLength * len(s.Incomplete)) + return s.size +} + +// context wraps all fields for executing state diffs. +type context struct { + prevRoot common.Hash + postRoot common.Hash + accounts map[common.Address][]byte + storages map[common.Address]map[common.Hash][]byte + accountTrie Trie + nodes *trienode.MergedNodeSet +} + +// Apply traverses the provided state diffs, apply them in the associated +// post-state and return the generated dirty trie nodes. The state can be +// loaded via the provided trie loader. +func Apply(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, loader TrieLoader) (map[common.Hash]map[string]*trienode.Node, error) { + tr, err := loader.OpenTrie(postRoot) + if err != nil { + return nil, err + } + ctx := &context{ + prevRoot: prevRoot, + postRoot: postRoot, + accounts: accounts, + storages: storages, + accountTrie: tr, + nodes: trienode.NewMergedNodeSet(), + } + for addr, account := range accounts { + var err error + if len(account) == 0 { + err = deleteAccount(ctx, loader, addr) + } else { + err = updateAccount(ctx, loader, addr) + } + if err != nil { + return nil, fmt.Errorf("failed to revert state, err: %w", err) + } + } + root, result, err := tr.Commit(false) + if err != nil { + return nil, err + } + if root != prevRoot { + return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", prevRoot, root) + } + if err := ctx.nodes.Merge(result); err != nil { + return nil, err + } + return ctx.nodes.Flatten(), nil +} + +// updateAccount the account was present in prev-state, and may or may not +// existent in post-state. Apply the reverse diff and verify if the storage +// root matches the one in prev-state account. +func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error { + // The account was present in prev-state, decode it from the + // 'slim-rlp' format bytes. + h := newHasher() + defer h.release() + + addrHash := h.hash(addr.Bytes()) + prev, err := types.FullAccount(ctx.accounts[addr]) + if err != nil { + return err + } + // The account may or may not existent in post-state, try to + // load it and decode if it's found. + blob, err := ctx.accountTrie.Get(addrHash.Bytes()) + if err != nil { + return err + } + post := types.NewEmptyStateAccount() + if len(blob) != 0 { + if err := rlp.DecodeBytes(blob, &post); err != nil { + return err + } + } + // Apply all storage changes into the post-state storage trie. + st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root) + if err != nil { + return err + } + for key, val := range ctx.storages[addr] { + var err error + if len(val) == 0 { + err = st.Delete(key.Bytes()) + } else { + err = st.Update(key.Bytes(), val) + } + if err != nil { + return err + } + } + root, result, err := st.Commit(false) + if err != nil { + return err + } + if root != prev.Root { + return errors.New("failed to reset storage trie") + } + // The returned set can be nil if storage trie is not changed + // at all. + if result != nil { + if err := ctx.nodes.Merge(result); err != nil { + return err + } + } + // Write the prev-state account into the main trie + full, err := rlp.EncodeToBytes(prev) + if err != nil { + return err + } + return ctx.accountTrie.Update(addrHash.Bytes(), full) +} + +// deleteAccount the account was not present in prev-state, and is expected +// to be existent in post-state. Apply the reverse diff and verify if the +// account and storage is wiped out correctly. +func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error { + // The account must be existent in post-state, load the account. + h := newHasher() + defer h.release() + + addrHash := h.hash(addr.Bytes()) + blob, err := ctx.accountTrie.Get(addrHash.Bytes()) + if err != nil { + return err + } + if len(blob) == 0 { + return fmt.Errorf("account is non-existent %#x", addrHash) + } + var post types.StateAccount + if err := rlp.DecodeBytes(blob, &post); err != nil { + return err + } + st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root) + if err != nil { + return err + } + for key, val := range ctx.storages[addr] { + if len(val) != 0 { + return errors.New("expect storage deletion") + } + if err := st.Delete(key.Bytes()); err != nil { + return err + } + } + root, result, err := st.Commit(false) + if err != nil { + return err + } + if root != types.EmptyRootHash { + return errors.New("failed to clear storage trie") + } + // The returned set can be nil if storage trie is not changed + // at all. + if result != nil { + if err := ctx.nodes.Merge(result); err != nil { + return err + } + } + // Delete the post-state account from the main trie. + return ctx.accountTrie.Delete(addrHash.Bytes()) +} + +// hasher is used to compute the sha256 hash of the provided data. +type hasher struct{ sha crypto.KeccakState } + +var hasherPool = sync.Pool{ + New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, +} + +func newHasher() *hasher { + return hasherPool.Get().(*hasher) +} + +func (h *hasher) hash(data []byte) common.Hash { + return crypto.HashData(h.sha, data) +} + +func (h *hasher) release() { + hasherPool.Put(h) +}