forked from cerc-io/plugeth
all: remove ethash pow, only retain shims needed for consensus and tests (#27178)
* all: remove ethash pow, only retain shims needed for consensus and tests * all: thank you linter * all: disallow launching Geth in legacy PoW mode * cmd/env/internal/t8ntool: remove dangling ethash flag
This commit is contained in:
parent
ac3418def6
commit
dde2da0efb
@ -6,6 +6,7 @@
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0,
|
||||
"byzantiumBlock": 0,
|
||||
"terminalTotalDifficultyPassed": true,
|
||||
"ethash": {}
|
||||
},
|
||||
"nonce": "0xdeadbeefdeadbeef",
|
||||
|
@ -28,7 +28,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus/clique"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@ -74,11 +73,9 @@ type bbInput struct {
|
||||
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
|
||||
Clique *cliqueInput `json:"clique,omitempty"`
|
||||
|
||||
Ethash bool `json:"-"`
|
||||
EthashDir string `json:"-"`
|
||||
PowMode ethash.Mode `json:"-"`
|
||||
Txs []*types.Transaction `json:"-"`
|
||||
Ommers []*types.Header `json:"-"`
|
||||
Ethash bool `json:"-"`
|
||||
Txs []*types.Transaction `json:"-"`
|
||||
Ommers []*types.Header `json:"-"`
|
||||
}
|
||||
|
||||
type cliqueInput struct {
|
||||
@ -162,8 +159,6 @@ func (i *bbInput) ToBlock() *types.Block {
|
||||
// SealBlock seals the given block using the configured engine.
|
||||
func (i *bbInput) SealBlock(block *types.Block) (*types.Block, error) {
|
||||
switch {
|
||||
case i.Ethash:
|
||||
return i.sealEthash(block)
|
||||
case i.Clique != nil:
|
||||
return i.sealClique(block)
|
||||
default:
|
||||
@ -171,33 +166,6 @@ func (i *bbInput) SealBlock(block *types.Block) (*types.Block, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// sealEthash seals the given block using ethash.
|
||||
func (i *bbInput) sealEthash(block *types.Block) (*types.Block, error) {
|
||||
if i.Header.Nonce != nil {
|
||||
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with ethash will overwrite provided nonce"))
|
||||
}
|
||||
ethashConfig := ethash.Config{
|
||||
PowMode: i.PowMode,
|
||||
DatasetDir: i.EthashDir,
|
||||
CacheDir: i.EthashDir,
|
||||
DatasetsInMem: 1,
|
||||
DatasetsOnDisk: 2,
|
||||
CachesInMem: 2,
|
||||
CachesOnDisk: 3,
|
||||
}
|
||||
engine := ethash.New(ethashConfig, nil, true)
|
||||
defer engine.Close()
|
||||
// Use a buffered chan for results.
|
||||
// If the testmode is used, the sealer will return quickly, and complain
|
||||
// "Sealing result is not read by miner" if it cannot write the result.
|
||||
results := make(chan *types.Block, 1)
|
||||
if err := engine.Seal(nil, block, results, nil); err != nil {
|
||||
panic(fmt.Sprintf("failed to seal block: %v", err))
|
||||
}
|
||||
found := <-results
|
||||
return block.WithSeal(found.Header()), nil
|
||||
}
|
||||
|
||||
// sealClique seals the given block using clique.
|
||||
func (i *bbInput) sealClique(block *types.Block) (*types.Block, error) {
|
||||
// If any clique value overwrites an explicit header value, fail
|
||||
@ -267,28 +235,8 @@ func readInput(ctx *cli.Context) (*bbInput, error) {
|
||||
withdrawalsStr = ctx.String(InputWithdrawalsFlag.Name)
|
||||
txsStr = ctx.String(InputTxsRlpFlag.Name)
|
||||
cliqueStr = ctx.String(SealCliqueFlag.Name)
|
||||
ethashOn = ctx.Bool(SealEthashFlag.Name)
|
||||
ethashDir = ctx.String(SealEthashDirFlag.Name)
|
||||
ethashMode = ctx.String(SealEthashModeFlag.Name)
|
||||
inputData = &bbInput{}
|
||||
)
|
||||
if ethashOn && cliqueStr != "" {
|
||||
return nil, NewError(ErrorConfig, fmt.Errorf("both ethash and clique sealing specified, only one may be chosen"))
|
||||
}
|
||||
if ethashOn {
|
||||
inputData.Ethash = ethashOn
|
||||
inputData.EthashDir = ethashDir
|
||||
switch ethashMode {
|
||||
case "normal":
|
||||
inputData.PowMode = ethash.ModeNormal
|
||||
case "test":
|
||||
inputData.PowMode = ethash.ModeTest
|
||||
case "fake":
|
||||
inputData.PowMode = ethash.ModeFake
|
||||
default:
|
||||
return nil, NewError(ErrorConfig, fmt.Errorf("unknown pow mode: %s, supported modes: test, fake, normal", ethashMode))
|
||||
}
|
||||
}
|
||||
if headerStr == stdinSelector || ommersStr == stdinSelector || txsStr == stdinSelector || cliqueStr == stdinSelector {
|
||||
decoder := json.NewDecoder(os.Stdin)
|
||||
if err := decoder.Decode(inputData); err != nil {
|
||||
|
@ -125,19 +125,6 @@ var (
|
||||
Name: "seal.clique",
|
||||
Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.",
|
||||
}
|
||||
SealEthashFlag = &cli.BoolFlag{
|
||||
Name: "seal.ethash",
|
||||
Usage: "Seal block with ethash.",
|
||||
}
|
||||
SealEthashDirFlag = &cli.StringFlag{
|
||||
Name: "seal.ethash.dir",
|
||||
Usage: "Path to ethash DAG. If none exists, a new DAG will be generated.",
|
||||
}
|
||||
SealEthashModeFlag = &cli.StringFlag{
|
||||
Name: "seal.ethash.mode",
|
||||
Usage: "Defines the type and amount of PoW verification an ethash engine makes.",
|
||||
Value: "normal",
|
||||
}
|
||||
RewardFlag = &cli.Int64Flag{
|
||||
Name: "state.reward",
|
||||
Usage: "Mining reward. Set to -1 to disable",
|
||||
|
@ -179,9 +179,6 @@ var blockBuilderCommand = &cli.Command{
|
||||
t8ntool.InputWithdrawalsFlag,
|
||||
t8ntool.InputTxsRlpFlag,
|
||||
t8ntool.SealCliqueFlag,
|
||||
t8ntool.SealEthashFlag,
|
||||
t8ntool.SealEthashDirFlag,
|
||||
t8ntool.SealEthashModeFlag,
|
||||
t8ntool.VerbosityFlag,
|
||||
},
|
||||
}
|
||||
|
@ -41,7 +41,9 @@ var customGenesisTests = []struct {
|
||||
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"timestamp" : "0x00",
|
||||
"config" : {}
|
||||
"config" : {
|
||||
"terminalTotalDifficultyPassed": true
|
||||
}
|
||||
}`,
|
||||
query: "eth.getBlock(0).nonce",
|
||||
result: "0x0000000000001338",
|
||||
@ -59,9 +61,10 @@ var customGenesisTests = []struct {
|
||||
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"timestamp" : "0x00",
|
||||
"config" : {
|
||||
"homesteadBlock" : 42,
|
||||
"daoForkBlock" : 141,
|
||||
"daoForkSupport" : true
|
||||
"homesteadBlock" : 42,
|
||||
"daoForkBlock" : 141,
|
||||
"daoForkSupport" : true,
|
||||
"terminalTotalDifficultyPassed" : true
|
||||
}
|
||||
}`,
|
||||
query: "eth.getBlock(0).nonce",
|
||||
@ -111,8 +114,10 @@ func TestCustomBackend(t *testing.T) {
|
||||
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"timestamp" : "0x00",
|
||||
"config" : {}
|
||||
}`
|
||||
"config" : {
|
||||
"terminalTotalDifficultyPassed": true
|
||||
}
|
||||
}`
|
||||
type backendTest struct {
|
||||
initArgs []string
|
||||
initExpect string
|
||||
|
@ -66,14 +66,6 @@ var (
|
||||
utils.SmartCardDaemonPathFlag,
|
||||
utils.OverrideCancun,
|
||||
utils.EnablePersonal,
|
||||
utils.EthashCacheDirFlag,
|
||||
utils.EthashCachesInMemoryFlag,
|
||||
utils.EthashCachesOnDiskFlag,
|
||||
utils.EthashCachesLockMmapFlag,
|
||||
utils.EthashDatasetDirFlag,
|
||||
utils.EthashDatasetsInMemoryFlag,
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
utils.EthashDatasetsLockMmapFlag,
|
||||
utils.TxPoolLocalsFlag,
|
||||
utils.TxPoolNoLocalsFlag,
|
||||
utils.TxPoolJournalFlag,
|
||||
@ -120,14 +112,11 @@ var (
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
utils.MiningEnabledFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MinerNotifyFlag,
|
||||
utils.MinerGasLimitFlag,
|
||||
utils.MinerGasPriceFlag,
|
||||
utils.MinerEtherbaseFlag,
|
||||
utils.MinerExtraDataFlag,
|
||||
utils.MinerRecommitIntervalFlag,
|
||||
utils.MinerNoVerifyFlag,
|
||||
utils.MinerNewPayloadTimeout,
|
||||
utils.NATFlag,
|
||||
utils.NoDiscoverFlag,
|
||||
@ -142,13 +131,11 @@ var (
|
||||
utils.VMEnableDebugFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.NoCompactionFlag,
|
||||
utils.GpoBlocksFlag,
|
||||
utils.GpoPercentileFlag,
|
||||
utils.GpoMaxGasPriceFlag,
|
||||
utils.GpoIgnoreGasPriceFlag,
|
||||
utils.MinerNotifyFullFlag,
|
||||
configFileFlag,
|
||||
}, utils.NetworkFlags, utils.DatabasePathFlags)
|
||||
|
||||
@ -224,8 +211,6 @@ func init() {
|
||||
attachCommand,
|
||||
javascriptCommand,
|
||||
// See misccmd.go:
|
||||
makecacheCommand,
|
||||
makedagCommand,
|
||||
versionCommand,
|
||||
versionCheckCommand,
|
||||
licenseCommand,
|
||||
@ -438,9 +423,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon
|
||||
// Set the gas price to the limits from the CLI and start mining
|
||||
gasprice := flags.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
|
||||
ethBackend.TxPool().SetGasPrice(gasprice)
|
||||
// start mining
|
||||
threads := ctx.Int(utils.MinerThreadsFlag.Name)
|
||||
if err := ethBackend.StartMining(threads); err != nil {
|
||||
if err := ethBackend.StartMining(); err != nil {
|
||||
utils.Fatalf("Failed to start mining: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -20,11 +20,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/internal/version"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
@ -41,30 +38,6 @@ var (
|
||||
Usage: "Version to check",
|
||||
Value: version.ClientName(clientIdentifier),
|
||||
}
|
||||
makecacheCommand = &cli.Command{
|
||||
Action: makecache,
|
||||
Name: "makecache",
|
||||
Usage: "Generate ethash verification cache (for testing)",
|
||||
ArgsUsage: "<blockNum> <outputDir>",
|
||||
Description: `
|
||||
The makecache command generates an ethash cache in <outputDir>.
|
||||
|
||||
This command exists to support the system testing project.
|
||||
Regular users do not need to execute it.
|
||||
`,
|
||||
}
|
||||
makedagCommand = &cli.Command{
|
||||
Action: makedag,
|
||||
Name: "makedag",
|
||||
Usage: "Generate ethash mining DAG (for testing)",
|
||||
ArgsUsage: "<blockNum> <outputDir>",
|
||||
Description: `
|
||||
The makedag command generates an ethash DAG in <outputDir>.
|
||||
|
||||
This command exists to support the system testing project.
|
||||
Regular users do not need to execute it.
|
||||
`,
|
||||
}
|
||||
versionCommand = &cli.Command{
|
||||
Action: printVersion,
|
||||
Name: "version",
|
||||
@ -96,36 +69,6 @@ and displays information about any security vulnerabilities that affect the curr
|
||||
}
|
||||
)
|
||||
|
||||
// makecache generates an ethash verification cache into the provided folder.
|
||||
func makecache(ctx *cli.Context) error {
|
||||
args := ctx.Args().Slice()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf(`Usage: geth makecache <block number> <outputdir>`)
|
||||
}
|
||||
block, err := strconv.ParseUint(args[0], 0, 64)
|
||||
if err != nil {
|
||||
utils.Fatalf("Invalid block number: %v", err)
|
||||
}
|
||||
ethash.MakeCache(block, args[1])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makedag generates an ethash mining DAG into the provided folder.
|
||||
func makedag(ctx *cli.Context) error {
|
||||
args := ctx.Args().Slice()
|
||||
if len(args) != 2 {
|
||||
utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`)
|
||||
}
|
||||
block, err := strconv.ParseUint(args[0], 0, 64)
|
||||
if err != nil {
|
||||
utils.Fatalf("Invalid block number: %v", err)
|
||||
}
|
||||
ethash.MakeDataset(block, args[1])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func printVersion(ctx *cli.Context) error {
|
||||
git, _ := version.VCS()
|
||||
|
||||
|
@ -39,7 +39,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/txpool"
|
||||
@ -325,54 +324,6 @@ var (
|
||||
Usage: "Enables serving light clients before syncing",
|
||||
Category: flags.LightCategory,
|
||||
}
|
||||
|
||||
// Ethash settings
|
||||
EthashCacheDirFlag = &flags.DirectoryFlag{
|
||||
Name: "ethash.cachedir",
|
||||
Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
|
||||
Category: flags.EthashCategory,
|
||||
}
|
||||
EthashCachesInMemoryFlag = &cli.IntFlag{
|
||||
Name: "ethash.cachesinmem",
|
||||
Usage: "Number of recent ethash caches to keep in memory (16MB each)",
|
||||
Value: ethconfig.Defaults.Ethash.CachesInMem,
|
||||
Category: flags.EthashCategory,
|
||||
}
|
||||
EthashCachesOnDiskFlag = &cli.IntFlag{
|
||||
Name: "ethash.cachesondisk",
|
||||
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
|
||||
Value: ethconfig.Defaults.Ethash.CachesOnDisk,
|
||||
Category: flags.EthashCategory,
|
||||
}
|
||||
EthashCachesLockMmapFlag = &cli.BoolFlag{
|
||||
Name: "ethash.cacheslockmmap",
|
||||
Usage: "Lock memory maps of recent ethash caches",
|
||||
Category: flags.EthashCategory,
|
||||
}
|
||||
EthashDatasetDirFlag = &flags.DirectoryFlag{
|
||||
Name: "ethash.dagdir",
|
||||
Usage: "Directory to store the ethash mining DAGs",
|
||||
Value: flags.DirectoryString(ethconfig.Defaults.Ethash.DatasetDir),
|
||||
Category: flags.EthashCategory,
|
||||
}
|
||||
EthashDatasetsInMemoryFlag = &cli.IntFlag{
|
||||
Name: "ethash.dagsinmem",
|
||||
Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
|
||||
Value: ethconfig.Defaults.Ethash.DatasetsInMem,
|
||||
Category: flags.EthashCategory,
|
||||
}
|
||||
EthashDatasetsOnDiskFlag = &cli.IntFlag{
|
||||
Name: "ethash.dagsondisk",
|
||||
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
|
||||
Value: ethconfig.Defaults.Ethash.DatasetsOnDisk,
|
||||
Category: flags.EthashCategory,
|
||||
}
|
||||
EthashDatasetsLockMmapFlag = &cli.BoolFlag{
|
||||
Name: "ethash.dagslockmmap",
|
||||
Usage: "Lock memory maps for recent ethash mining DAGs",
|
||||
Category: flags.EthashCategory,
|
||||
}
|
||||
|
||||
// Transaction pool settings
|
||||
TxPoolLocalsFlag = &cli.StringFlag{
|
||||
Name: "txpool.locals",
|
||||
@ -510,22 +461,6 @@ var (
|
||||
Usage: "Enable mining",
|
||||
Category: flags.MinerCategory,
|
||||
}
|
||||
MinerThreadsFlag = &cli.IntFlag{
|
||||
Name: "miner.threads",
|
||||
Usage: "Number of CPU threads to use for mining",
|
||||
Value: 0,
|
||||
Category: flags.MinerCategory,
|
||||
}
|
||||
MinerNotifyFlag = &cli.StringFlag{
|
||||
Name: "miner.notify",
|
||||
Usage: "Comma separated HTTP URL list to notify of new work packages",
|
||||
Category: flags.MinerCategory,
|
||||
}
|
||||
MinerNotifyFullFlag = &cli.BoolFlag{
|
||||
Name: "miner.notify.full",
|
||||
Usage: "Notify with pending block headers instead of work packages",
|
||||
Category: flags.MinerCategory,
|
||||
}
|
||||
MinerGasLimitFlag = &cli.Uint64Flag{
|
||||
Name: "miner.gaslimit",
|
||||
Usage: "Target gas ceiling for mined blocks",
|
||||
@ -554,11 +489,6 @@ var (
|
||||
Value: ethconfig.Defaults.Miner.Recommit,
|
||||
Category: flags.MinerCategory,
|
||||
}
|
||||
MinerNoVerifyFlag = &cli.BoolFlag{
|
||||
Name: "miner.noverify",
|
||||
Usage: "Disable remote sealing verification",
|
||||
Category: flags.MinerCategory,
|
||||
}
|
||||
MinerNewPayloadTimeout = &cli.DurationFlag{
|
||||
Name: "miner.newpayload-timeout",
|
||||
Usage: "Specify the maximum time allowance for creating a new payload",
|
||||
@ -648,11 +578,6 @@ var (
|
||||
Usage: "Reporting URL of a ethstats service (nodename:secret@host:port)",
|
||||
Category: flags.MetricsCategory,
|
||||
}
|
||||
FakePoWFlag = &cli.BoolFlag{
|
||||
Name: "fakepow",
|
||||
Usage: "Disables proof-of-work verification",
|
||||
Category: flags.LoggingCategory,
|
||||
}
|
||||
NoCompactionFlag = &cli.BoolFlag{
|
||||
Name: "nocompaction",
|
||||
Usage: "Disables db compaction after import",
|
||||
@ -1604,38 +1529,7 @@ func setTxPool(ctx *cli.Context, cfg *txpool.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
func setEthash(ctx *cli.Context, cfg *ethconfig.Config) {
|
||||
if ctx.IsSet(EthashCacheDirFlag.Name) {
|
||||
cfg.Ethash.CacheDir = ctx.String(EthashCacheDirFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(EthashDatasetDirFlag.Name) {
|
||||
cfg.Ethash.DatasetDir = ctx.String(EthashDatasetDirFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(EthashCachesInMemoryFlag.Name) {
|
||||
cfg.Ethash.CachesInMem = ctx.Int(EthashCachesInMemoryFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(EthashCachesOnDiskFlag.Name) {
|
||||
cfg.Ethash.CachesOnDisk = ctx.Int(EthashCachesOnDiskFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(EthashCachesLockMmapFlag.Name) {
|
||||
cfg.Ethash.CachesLockMmap = ctx.Bool(EthashCachesLockMmapFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(EthashDatasetsInMemoryFlag.Name) {
|
||||
cfg.Ethash.DatasetsInMem = ctx.Int(EthashDatasetsInMemoryFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(EthashDatasetsOnDiskFlag.Name) {
|
||||
cfg.Ethash.DatasetsOnDisk = ctx.Int(EthashDatasetsOnDiskFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(EthashDatasetsLockMmapFlag.Name) {
|
||||
cfg.Ethash.DatasetsLockMmap = ctx.Bool(EthashDatasetsLockMmapFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func setMiner(ctx *cli.Context, cfg *miner.Config) {
|
||||
if ctx.IsSet(MinerNotifyFlag.Name) {
|
||||
cfg.Notify = strings.Split(ctx.String(MinerNotifyFlag.Name), ",")
|
||||
}
|
||||
cfg.NotifyFull = ctx.Bool(MinerNotifyFullFlag.Name)
|
||||
if ctx.IsSet(MinerExtraDataFlag.Name) {
|
||||
cfg.ExtraData = []byte(ctx.String(MinerExtraDataFlag.Name))
|
||||
}
|
||||
@ -1648,9 +1542,6 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
|
||||
if ctx.IsSet(MinerRecommitIntervalFlag.Name) {
|
||||
cfg.Recommit = ctx.Duration(MinerRecommitIntervalFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(MinerNoVerifyFlag.Name) {
|
||||
cfg.Noverify = ctx.Bool(MinerNoVerifyFlag.Name)
|
||||
}
|
||||
if ctx.IsSet(MinerNewPayloadTimeout.Name) {
|
||||
cfg.NewPayloadTimeout = ctx.Duration(MinerNewPayloadTimeout.Name)
|
||||
}
|
||||
@ -1741,7 +1632,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||
setEtherbase(ctx, cfg)
|
||||
setGPO(ctx, &cfg.GPO, ctx.String(SyncModeFlag.Name) == "light")
|
||||
setTxPool(ctx, &cfg.TxPool)
|
||||
setEthash(ctx, cfg)
|
||||
setMiner(ctx, &cfg.Miner)
|
||||
setRequiredBlocks(ctx, cfg)
|
||||
setLes(ctx, cfg)
|
||||
@ -2226,15 +2116,14 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
|
||||
gspec = MakeGenesis(ctx)
|
||||
chainDb = MakeChainDatabase(ctx, stack, readonly)
|
||||
)
|
||||
cliqueConfig, err := core.LoadCliqueConfig(chainDb, gspec)
|
||||
config, err := core.LoadChainConfig(chainDb, gspec)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
ethashConfig := ethconfig.Defaults.Ethash
|
||||
if ctx.Bool(FakePoWFlag.Name) {
|
||||
ethashConfig.PowMode = ethash.ModeFake
|
||||
engine, err := ethconfig.CreateConsensusEngine(config, chainDb)
|
||||
if err != nil {
|
||||
Fatalf("%v", err)
|
||||
}
|
||||
engine := ethconfig.CreateConsensusEngine(stack, ðashConfig, cliqueConfig, nil, false, chainDb)
|
||||
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
|
||||
}
|
||||
|
@ -78,13 +78,13 @@ func (beacon *Beacon) Author(header *types.Header) (common.Address, error) {
|
||||
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules of the
|
||||
// stock Ethereum consensus engine.
|
||||
func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
|
||||
func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
reached, err := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !reached {
|
||||
return beacon.ethone.VerifyHeader(chain, header, seal)
|
||||
return beacon.ethone.VerifyHeader(chain, header)
|
||||
}
|
||||
// Short circuit if the parent is not known
|
||||
parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
|
||||
@ -149,13 +149,13 @@ func (beacon *Beacon) splitHeaders(chain consensus.ChainHeaderReader, headers []
|
||||
// concurrently. The method returns a quit channel to abort the operations and
|
||||
// a results channel to retrieve the async verifications.
|
||||
// VerifyHeaders expect the headers to be ordered and continuous.
|
||||
func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) {
|
||||
preHeaders, postHeaders, err := beacon.splitHeaders(chain, headers)
|
||||
if err != nil {
|
||||
return make(chan struct{}), errOut(len(headers), err)
|
||||
}
|
||||
if len(postHeaders) == 0 {
|
||||
return beacon.ethone.VerifyHeaders(chain, headers, seals)
|
||||
return beacon.ethone.VerifyHeaders(chain, headers)
|
||||
}
|
||||
if len(preHeaders) == 0 {
|
||||
return beacon.verifyHeaders(chain, headers, nil)
|
||||
@ -171,7 +171,7 @@ func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers [
|
||||
old, new, out = 0, len(preHeaders), 0
|
||||
errors = make([]error, len(headers))
|
||||
done = make([]bool, len(headers))
|
||||
oldDone, oldResult = beacon.ethone.VerifyHeaders(chain, preHeaders, seals[:len(preHeaders)])
|
||||
oldDone, oldResult = beacon.ethone.VerifyHeaders(chain, preHeaders)
|
||||
newDone, newResult = beacon.verifyHeaders(chain, postHeaders, preHeaders[len(preHeaders)-1])
|
||||
)
|
||||
// Collect the results
|
||||
|
@ -214,14 +214,14 @@ func (c *Clique) Author(header *types.Header) (common.Address, error) {
|
||||
}
|
||||
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules.
|
||||
func (c *Clique) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
|
||||
func (c *Clique) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
return c.verifyHeader(chain, header, nil)
|
||||
}
|
||||
|
||||
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The
|
||||
// method returns a quit channel to abort the operations and a results channel to
|
||||
// retrieve the async verifications (the order is that of the input slice).
|
||||
func (c *Clique) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
func (c *Clique) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) {
|
||||
abort := make(chan struct{})
|
||||
results := make(chan error, len(headers))
|
||||
|
||||
|
@ -66,15 +66,14 @@ type Engine interface {
|
||||
Author(header *types.Header) (common.Address, error)
|
||||
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules of a
|
||||
// given engine. Verifying the seal may be done optionally here, or explicitly
|
||||
// via the VerifySeal method.
|
||||
VerifyHeader(chain ChainHeaderReader, header *types.Header, seal bool) error
|
||||
// given engine.
|
||||
VerifyHeader(chain ChainHeaderReader, header *types.Header) error
|
||||
|
||||
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
||||
// concurrently. The method returns a quit channel to abort the operations and
|
||||
// a results channel to retrieve the async verifications (the order is that of
|
||||
// the input slice).
|
||||
VerifyHeaders(chain ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error)
|
||||
VerifyHeaders(chain ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error)
|
||||
|
||||
// VerifyUncles verifies that the given block's uncles conform to the consensus
|
||||
// rules of a given engine.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,815 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// prepare converts an ethash cache or dataset from a byte stream into the internal
|
||||
// int representation. All ethash methods work with ints to avoid constant byte to
|
||||
// int conversions as well as to handle both little and big endian systems.
|
||||
func prepare(dest []uint32, src []byte) {
|
||||
for i := 0; i < len(dest); i++ {
|
||||
dest[i] = binary.LittleEndian.Uint32(src[i*4:])
|
||||
}
|
||||
}
|
||||
|
||||
// Tests whether the dataset size calculator works correctly by cross checking the
|
||||
// hard coded lookup table with the value generated by it.
|
||||
func TestSizeCalculations(t *testing.T) {
|
||||
// Verify all the cache and dataset sizes from the lookup table.
|
||||
for epoch, want := range cacheSizes {
|
||||
if size := calcCacheSize(epoch); size != want {
|
||||
t.Errorf("cache %d: cache size mismatch: have %d, want %d", epoch, size, want)
|
||||
}
|
||||
}
|
||||
for epoch, want := range datasetSizes {
|
||||
if size := calcDatasetSize(epoch); size != want {
|
||||
t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", epoch, size, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that verification caches can be correctly generated.
|
||||
func TestCacheGeneration(t *testing.T) {
|
||||
tests := []struct {
|
||||
size uint64
|
||||
epoch uint64
|
||||
cache []byte
|
||||
}{
|
||||
{
|
||||
size: 1024,
|
||||
epoch: 0,
|
||||
cache: hexutil.MustDecode("0x" +
|
||||
"7ce2991c951f7bf4c4c1bb119887ee07871eb5339d7b97b8588e85c742de90e5bafd5bbe6ce93a134fb6be9ad3e30db99d9528a2ea7846833f52e9ca119b6b54" +
|
||||
"8979480c46e19972bd0738779c932c1b43e665a2fd3122fc3ddb2691f353ceb0ed3e38b8f51fd55b6940290743563c9f8fa8822e611924657501a12aafab8a8d" +
|
||||
"88fb5fbae3a99d14792406672e783a06940a42799b1c38bc28715db6d37cb11f9f6b24e386dc52dd8c286bd8c36fa813dffe4448a9f56ebcbeea866b42f68d22" +
|
||||
"6c32aae4d695a23cab28fd74af53b0c2efcc180ceaaccc0b2e280103d097a03c1d1b0f0f26ce5f32a90238f9bc49f645db001ef9cd3d13d44743f841fad11a37" +
|
||||
"fa290c62c16042f703578921f30b9951465aae2af4a5dad43a7341d7b4a62750954965a47a1c3af638dc3495c4d62a9bab843168c9fc0114e79cffd1b2827b01" +
|
||||
"75d30ba054658f214e946cf24c43b40d3383fbb0493408e5c5392434ca21bbcf43200dfb876c713d201813934fa485f48767c5915745cf0986b1dc0f33e57748" +
|
||||
"bf483ee2aff4248dfe461ec0504a13628401020fc22638584a8f2f5206a13b2f233898c78359b21c8226024d0a7a93df5eb6c282bdbf005a4aab497e096f2847" +
|
||||
"76c71cee57932a8fb89f6d6b8743b60a4ea374899a94a2e0f218d5c55818cefb1790c8529a76dba31ebb0f4592d709b49587d2317970d39c086f18dd244291d9" +
|
||||
"eedb16705e53e3350591bd4ff4566a3595ac0f0ce24b5e112a3d033bc51b6fea0a92296dea7f5e20bf6ee6bc347d868fda193c395b9bb147e55e5a9f67cfe741" +
|
||||
"7eea7d699b155bd13804204df7ea91fa9249e4474dddf35188f77019c67d201e4c10d7079c5ad492a71afff9a23ca7e900ba7d1bdeaf3270514d8eb35eab8a0a" +
|
||||
"718bb7273aeb37768fa589ed8ab01fbf4027f4ebdbbae128d21e485f061c20183a9bc2e31edbda0727442e9d58eb0fe198440fe199e02e77c0f7b99973f1f74c" +
|
||||
"c9089a51ab96c94a84d66e6aa48b2d0a4543adb5a789039a2aa7b335ca85c91026c7d3c894da53ae364188c3fd92f78e01d080399884a47385aa792e38150cda" +
|
||||
"a8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995c" +
|
||||
"778401b94a2e66e6993ad67ad3ecdc2acb17779f1ea8606827ec92b11c728f8c3b6d3f04a3e6ed05ff81dd76d5dc5695a50377bc135aaf1671cf68b750315493" +
|
||||
"6c64510164d53312bf3c41740c7a237b05faf4a191bd8a95dafa068dbcf370255c725900ce5c934f36feadcfe55b687c440574c1f06f39d207a8553d39156a24" +
|
||||
"845f64fd8324bb85312979dead74f764c9677aab89801ad4f927f1c00f12e28f22422bb44200d1969d9ab377dd6b099dc6dbc3222e9321b2c1e84f8e2f07731c"),
|
||||
},
|
||||
{
|
||||
size: 1024,
|
||||
epoch: 1,
|
||||
cache: hexutil.MustDecode("0x" +
|
||||
"1f56855d59cc5a085720899b4377a0198f1abe948d85fe5820dc0e346b7c0931b9cde8e541d751de3b2b3275d0aabfae316209d5879297d8bd99f8a033c9d4df" +
|
||||
"35add1029f4e6404a022d504fb8023e42989aba985a65933b0109c7218854356f9284983c9e7de97de591828ae348b63d1fc78d8db58157344d4e06530ffd422" +
|
||||
"5c7f6080d451ff94961ec2dd9e28e6d81b49102451676dbdcb6ef1094c1e8b29e7e808d47b2ba5aeb52dabf00d5f0ee08c116289cbf56d8132e5ca557c3d6220" +
|
||||
"5ba3a48539acabfd4ca3c89e3aaa668e24ffeaeb9eb0136a9fc5a8a676b6d5ad76175eeda0a1fa44b5ff5591079e4b7f581569b6c82416adcb82d7e92980df67" +
|
||||
"2248c4024013e7be52cf91a82491627d9e6d80eda2770ab82badc5e120cd33a4c84495f718b57396a8f397e797087fad81fa50f0e2f5da71e40816a85de35a96" +
|
||||
"3cd351364905c45b3116ff25851d43a2ca1d2aa5cdb408440dabef8c57778fc18608bf431d0c7ffd37649a21a7bb9d90def39c821669dbaf165c0262434dfb08" +
|
||||
"5d057a12de4a7a59fd2dfc931c29c20371abf748b69b618a9bd485b3fb3166cad4d3d27edf0197aabeceb28b96670bdf020f26d1bb9b564aaf82d866bdffd6d4" +
|
||||
"1aea89e20b15a5d1264ab01d1556bfc2a266081609d60928216bd9646038f07de9fedcc9f2b86ab1b07d7bd88ba1df08b3d89b2ac789001b48a723f217debcb7" +
|
||||
"090303a3ef50c1d5d99a75c640ec2b401ab149e06511753d8c49cafdde2929ae61e09cc0f0319d262869d21ead9e0cf5ff2de3dbedfb994f32432d2e4aa44c82" +
|
||||
"7c42781d1477fe03ea0772998e776d63363c6c3edd2d52c89b4d2c9d89cdd90fa33b2b41c8e3f78ef06fe90bcf5cc5756d33a032f16b744141aaa8852bb4cb3a" +
|
||||
"40792b93489c6d6e56c235ec4aa36c263e9b766a4daaff34b2ea709f9f811aef498a65bfbc1deffd36fcc4d1a123345fac7bf57a1fb50394843cd28976a6c7ff" +
|
||||
"fe70f7b8d8f384aa06e2c9964c92a8788cef397fffdd35181b42a35d5d98cd7244bbd09e802888d7efc0311ae58e0961e3656205df4bdc553f317df4b6ede4ca" +
|
||||
"846294a32aec830ab1aa5aac4e78b821c35c70fd752fec353e373bf9be656e775a0111bcbeffdfebd3bd5251d27b9f6971aa561a2bd27a99d61b2ce3965c3726" +
|
||||
"1e114353e6a31b09340f4078b8a8c6ce6ff4213067a8f21020f78aff4f8b472b701ef730aacb8ce7806ea31b14abe8f8efdd6357ca299d339abc4e43ba324ad1" +
|
||||
"efe6eb1a5a6e137daa6ec9f6be30931ca368a944cfcf2a0a29f9a9664188f0466e6f078c347f9fe26a9a89d2029462b19245f24ace47aecace6ef85a4e96b31b" +
|
||||
"5f470eb0165c6375eb8f245d50a25d521d1e569e3b2dccce626752bb26eae624a24511e831a81fab6898a791579f462574ca4851e6588116493dbccc3072e0c5"),
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
cache := make([]uint32, tt.size/4)
|
||||
generateCache(cache, tt.epoch, seedHash(tt.epoch*epochLength+1))
|
||||
|
||||
want := make([]uint32, tt.size/4)
|
||||
prepare(want, tt.cache)
|
||||
|
||||
if !reflect.DeepEqual(cache, want) {
|
||||
t.Errorf("cache %d: content mismatch: have %x, want %x", i, cache, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatasetGeneration(t *testing.T) {
|
||||
tests := []struct {
|
||||
epoch uint64
|
||||
cacheSize uint64
|
||||
datasetSize uint64
|
||||
dataset []byte
|
||||
}{
|
||||
{
|
||||
epoch: 0,
|
||||
cacheSize: 1024,
|
||||
datasetSize: 32 * 1024,
|
||||
dataset: hexutil.MustDecode("0x" +
|
||||
"4bc09fbd530a041dd2ec296110a29e8f130f179c59d223f51ecce3126e8b0c74326abc2f32ccd9d7f976bd0944e3ccf8479db39343cbbffa467046ca97e2da63" +
|
||||
"da5f9d9688c7c33ab7b8aace570e422fa48b24659b72fc534669209d66389ca15b099c5604601e7581488e3bd6925cec0f12d465f8004d4fa84793f8e1e46a1b" +
|
||||
"31b7298991c6142f4f0b6e6b296728ae5fa63ccb667b61fbb1b078003d18d97b906af157debed5e6c55d5a61cae90c85f9e97d565314a2f9fd9e0c08430547d0" +
|
||||
"7cfcee3271f921b95c32a11596219abaa30abc62c2c72c6725078c436c677320594df6bcb92134c1b114fffec982a1f68f13a9f812f074b9fb9c78f2cd4c1c90" +
|
||||
"7ebf1e447f7a422b06303921e3d54f430584d849eaa4b7d652e92a5d659bdfc462adcdd7991e8c66a19da4ddb5390463d073941491859397f135ebbbdbdf5801" +
|
||||
"cafb873c383893390141ae385515504d74a33608273310c312ba468046d2e20c271a38cc0e3920b39705050e752f34f244fc23ddd17ff18677756a87671d4145" +
|
||||
"3aebf97e4890da1d645f41eb20da92a8537c787ce419580073c46aa3bb154952993142ec5b4fb6e8f108fd15fc618cd5c27b45a37ee6dcd52a4ce656c0f58604" +
|
||||
"717ec55f5e592355f1f20e8316f8fd77243734a8b0f50ad93c1d95b5b0482afb22cd0667d935bd6053d7198b54974e10d100df7ca3ec2e0bb5ccce5807b266e0" +
|
||||
"8429d5fec2ae6ae1cc7c5efc27f19c89d4b4a6c5c0b9397886dac635ba37446ff528b582457a4fe7f803f1a47903574f8982d4a679b627396a4e97aaa12fa179" +
|
||||
"0d31ba52e9010bc3c26ace81f702f86649fe9eeda9ec03b74a8a5cf540d82e22af33ab893564397dfc4edd8b1677350df5b82ab61d24db95f58fd2d78afb49c7" +
|
||||
"2d2b1fefa8ff6606b8623829cc752ea37d663b945f3f1d48ad07b1416af252f81b55acd8f164da4faa9d9453721b3b795041ce7df7c77edc13865dbe04fee331" +
|
||||
"47daebe18c183c4a6594a6df3a4d2dc5e3811d805102c9c49286e3d12b38927fa49a7b0cdcb1d799f57118953e31c560aae213a1799d59a78ae68f0590347061" +
|
||||
"fc2668caf08f860452f6b7d3ebc1efecc2e1227d33296b1f1850360dee7236e85274eaede4d18a58b4261ce1f6a7d283dcf64e6d021813f82a566354445327e5" +
|
||||
"6217279b2393fe5aa0f9eb149d4866e1105106bcc221810ceaf053f2ec733d8a22f409c1baf955e50184005c5d55de907de97f5f713b62ae10937e1a7af6267b" +
|
||||
"d2a239e8589017197c343b81540bc26bc52bffd5336fb1da1202a511c7175014d2f500b9d9ce78e4b9f2b158d0fb27af352b6f78c129cad642fe909612c9d658" +
|
||||
"17a8d7f9195ee97201675a918e3cf520fdc19f92b7e6a3db806d4f3799361334082cc58a22ddb4e4f5760bd1667c177b26be325166c6bbed669a158fc87acd43" +
|
||||
"a2462e12578d72db6606f9e24ae659ff411ac9b31d696b8354fd08a591622967a14f8468eaaae3907b7818154ba2d6e4581589354d178bb6ae1c03651c44bbf0" +
|
||||
"e7fa52cb0da09508b5a444aed05a54f416841247a4fe36bd5529029e3adf78b105e22468ed775f4d0954504dd55f2c9b9e6b3a086370b2c0b6fec7efd6914e07" +
|
||||
"26627edb7a04869a874e31f448271077a7de3031cf81bdbc39848efee6075e0d65fa3a32640e9f0395cf7ec12139992aff0a54e0a7dfe5048b3cc03246b56f7d" +
|
||||
"3093538a7b87538d8792a665bc589373621b2f3cf47d2c1f8f580fe34d79c6b2a66323ce89808ce0e5cf77700f5a4446c4be01a310e8f7c7ebefe756b0044886" +
|
||||
"a0477c88ee8ea8c71503748a4cf9eb40ad5c1c8accf7c63c0f43a94ed2b8a5999df3ab9b11b80de73310e036ca88668e640015fcf9cd18eed05517d54896f43e" +
|
||||
"25e7931b44872c4e4183500e0e8c5103292bca1c0d6b0b00c9acce25d31204bb3e4f255c03a0a0916664e9c831b28b364078109a74411a11afb1e610c7d1c9d4" +
|
||||
"ba5e10d0ee0da409654d9e7308395e17caeb9caebccb0192679866e6f2ecb5f10044333bb70d61712adb6d74cdec6918ed9a71d9925da576a1e6f4e906a5cd5f" +
|
||||
"0e94a25e48a4141e4e2770144b63e2449b0f84c82879f34d78440cc430196ba85a213fdac1bcf279a46d7592fa29a876bb7a2efb7081365522a3f06fdceaedd3" +
|
||||
"cc0335cef9ea570733fe8799bb1b918aa7732b4d175929d80c7844a78e19f2dc6a6febf648f49b40320b0f7d784e7f84e45408d70b046bd01cbd8fdaf606fcd3" +
|
||||
"02f4e5a48ab8d13e93a246adfcc94f3109e02a7a969986e75b6ced6bf2d11a55ab77488e131b65a06398fa8e384dc90d875584c9b17cdcf2da5dd72a461cd07c" +
|
||||
"4a955c5fe48509b3284476c42247e086de7d63839b7358cf4ebd9edf9ac8b6fd0c096166405de19c51e8785009d30feb67cdb8ff9ba55459dfdffba8c022e26c" +
|
||||
"0ebd399e4b76ccb4d5491a862c2c4d8cdf1461a96c9b98150e170efacec980edc00a2c7f6d7c6bea3075627e1eb386a7f1ede1059da81a4ac5cf35aa173c88c5" +
|
||||
"1818dc0fbc688b68b82ddc225b6c87588e0c680e303e737c82a13e34be58df8b0cb336aeacc698c79e7682ebb69e6cd6bdc5d11790c96afcfa9290f39515142f" +
|
||||
"5f90b938216a1d14bc049ce3f0ac135722208b989d2557d3520c2186479f179e50fe5b125b8d6638a65047729c6249b9b2c6381c9103c97d1b389cc9cdb31c21" +
|
||||
"8a2eecbf4b9ad1dcfa57446cde88f96563a544c49d6f5303a84a1b7cf074fca78e67e72c9ffa0c542fb646418c6434b16b771088140725cf2dc723c1a975c4ca" +
|
||||
"8a80e633721274907353f51e95952c2b403b45750b42ad10961f60473eb54616f61f7b038c5b7eca475d6a2b844994a9eeddce4f7bb49782e50ef78bc13b85d1" +
|
||||
"9e956f47c60823f3d1981413cb78d309f63a844694861b11b5238961c71f61d82daef6795734f0961e92b9167c57f48e91693e9656fcc6e88f9ce2d373da26bf" +
|
||||
"45b3dff50211fec72387005a7e04828e4ae7ddd10fc2332acf5f1b0f67adcd863752573c2d24488857bfc58c41af45be7641f5cfff611f184612fc0d695866f4" +
|
||||
"2b396b1d9881f442c4a995f4b500f02d4ab4b53ad6e01776ab0e244583f01301203a1515f3dbb73906014e36c7143bf882b005f0228ca0562623893c8a24b7c6" +
|
||||
"4c2c561912010c121b5c3a1e35e75c0b094731e9c0d6acf5a2b1e5b179355525a175640579705f898feb98bffa25633bc126613fa27d2ceb214812902ada23f4" +
|
||||
"367a78655d0d2276095c9e83dfa79153730103963499c367c5621fecfd0888253df82b3d5716703ef92594cf269310b9e6c892c488edb3bba1d0b216e92f622a" +
|
||||
"7f8f7f00d2926d81a4c7ca6cef40d240576a8d5541ccf561c8e0e699925d20347ba7493ed6e182cfe3b633e70b3ce3a0d90813574f6fe329c495d3cd46fd5d7e" +
|
||||
"bdde58d7eafcb134a9a5d3e5d66136e8c9b5d9ecac195dcc44158941c9fe2d87db52a7ddcedc9f82ec160901cc36a9c877af80ceae0563dfa75cabde5d7a7c94" +
|
||||
"9f24bc190f7c2045368356474ff6eee284e7125d1c5f9a036fbde24cecfd3a30481ce077f20cbcb31924368296abf66ce4834102cf7cb949d1b4c6faa6d006ef" +
|
||||
"21379cead5d5a39324d41555c46e0b42a1e871143e47f8e6b3d794e75d7a43c282732766d856e04e666ea346657b157404b0fc8534a2dee8243d40a5e37609e6" +
|
||||
"18bc1d52b91a7623aaf8214a97e4c8c5d860b31c3792b129354a121a7a7e42b50dfbe3ab6590769401eb280545547a43c3a1455355d5d5fdedccb472abfe75b8" +
|
||||
"f5e7d62b0b31553d8d55de0c3c71e6f5a2abba6fe81e9a42ec1968f235bc4296c1ac5df7430917453384450ab56dafa7c7af764cefa3b0bc861c52ae27692365" +
|
||||
"9d7d9ed7609958884147acca867909a75bb6a2c364debefaf98c7ff70c7f4acb5cdb81100fd79a48c5139f8bbdc6553b509f1eb0f5d5d31886a602cd669b3f9f" +
|
||||
"59195a1fa2bcff1170003ba1b2e5e9ad7f2bfcd0573d0f2be9d8fc1773c3a63a2b9292cdbf9b4515c0b1d51772e5ee95303ff493d85314c989e269df4ec3a916" +
|
||||
"40988a11c6a4ad96f7d0541a150edf444c2b1672aa6d37564453b835c2d39864c05c4366492fc9164bf73795410e7aae8206430403357fec6389142b4976b218" +
|
||||
"d70622b4098e322f73020a0d045f07668d1e512c6eeed6e2befbfc3a6ac64054396df96fd41f7aeefa0ab1f66bb52ee1a1df066f365fc43ff0800b0398b621f9" +
|
||||
"a415895268505a81517c44a56dc94e76580fd107dba034bab9f4f4b8a9f881ff34c60c406c47b6d4a998894401006aa88f328393f9cd55a2b4d24db5abbcb05e" +
|
||||
"20d392f3feab3ca12dac475eb3690f2bf9c699d7d90900d9a840068c8cdda2ca7a27bebd685a26eb01a768259a65ab4d7efc1811c87a5a1f4e5038f6b3dc74a6" +
|
||||
"b46d9ac58d31bfc22dac23645aeef819329c9419326f22e1c24c53457baf62ae9b92ab5f999d4ef0ccfb5a21b7598340eb2d399ec81588b6a674c5a1e45aa238" +
|
||||
"c55cae8e1af0f5d64ea378b8afeab263a3a2e5c71cdda4cdb824ae55df2b0260aadf386275ef57781d46f6da3d0b300ea68c14a620c25b5df738c54aef04d63b" +
|
||||
"7dee06cd225e9ed00e78abcddca5a133d8b5e0d9b287e6436014c5da729442239bddb7ecd3fe34e6f6e530134a03ef45de4ae4fe3bf507f16cdfb9bab1fc90e8" +
|
||||
"dc565e4a7ead95352c5a894661e5d82c6d0fc47843d5cab12c4013db76c90734cbff34c73d0d873ac9b27b417665f4948469865f33179624860604a9aba2ceb1" +
|
||||
"68e74b6af3d1ad0bfcac4180ea844339a034b6b2c3e2f61f0c7afbaa76c1ebe93727df1d3db27d59a5cf51b2baaf637b6eb8a20302ef9af0b25dbe3a5e74331c" +
|
||||
"6b0c8a0cf2a2ad72d2e19797983e09468ea95270dc229f2fa084dd2aa96e722016504f6d82508572d9c30711c3ef41ae3ae2f36cc6f5dddbcb0b40d9499b24c5" +
|
||||
"4cd36d2927a6b9d57e335e4fca7f0f16887711a8c1ffa0b48bda46c506ca444b7c23e2c8dd086c2a87283d5fc0d58e9a384106837318dc84ffe65b52d4cb9141" +
|
||||
"2672adfe139c3327350fe3cf355a08c0ca43598a253833e114243c5253077d65643323f5d69b3c7902d91bab7a0928754e7d80afab8d48539fcbe0d9ab83b4db" +
|
||||
"43a6594c4071df2ef35acd1f53006a570f09104f1776b26a303e2aec93a00d2fd8c952d1ca0e54504cd9b469be7c1e71557ec31467ecc773ee817b17c4418712" +
|
||||
"163ae86646b20b80c85860e828c48e88f1309c9ff018e6a95f4c1178de6a4f9f5860039511845da7d8727b5d824ba2502d0a3d76ce74372db77c2050c728dd65" +
|
||||
"b3a15da4f1e1e41c3c2acfebc5618e5e923d503c43a3421d2628ac037c5ce13c74c4ee14d47af02323872f6bf2e8bf09d017ea6e8ec4d3f9fc4fb203ac4e1663" +
|
||||
"756b11629224c676713a42b1f43dfd6362876be1c4865928688765589e26c8dd8bc04ca18d76ced7f786cdb0fa5028ae53991d5b7b45f93bbd50aeb97300f04e" +
|
||||
"69c6736f270907f6a7ad76dde0a365183a961bc8385511e0f22ce0cb8f3c42c5d3928621841e30285fb625294865409267dbb0cf91730ba2fb1088fb79789a54" +
|
||||
"a856311bdca5b0ac0e95fbc79b11c561dc03ea82db182808031e86ec327097143ee761bb62dae8a9f4101fabcac1fc87b3c2080820582dc8a7a8287364550013" +
|
||||
"08053c781b3eb279c89e817fe97103b6930fef2dbf7728def389403a4283f63ec04ae953784b749f0ea6f08749781cd17fadfd15bb197afd2f4e0a8aade2b1ad" +
|
||||
"5100cbdce49ed59658993c00e06bf57c0026b97beadc30cd25f586ff03ab40fcd731535c9a1ccb2c99dc7f8815feab767e1237cb069981f28d8fe26bdec24218" +
|
||||
"488e6086c0ab0efc5d4211fa0726b3a11387df9bb62b863a7b154ca390a268f5e49f50dec45d24bece2a06575cc07a24bfff017d7445024739efb050ace5f345" +
|
||||
"98dacda843d4ef5bfb2c931dc16ee3dd8b61a6f01d9a7de8bbb6d89ca8695f8ef8bd1cc6e0455848fac7691e6789218790270aef40fba114557fd88ff74fe8fc" +
|
||||
"476d9b9665d7e45582540710ce92c8dcad1ad8c05642a23a0d58c02db37ae1a0e70fbc5f71b1300fe398c74cbad37fd57379f58dd3e2d3de6860a17acf3c9321" +
|
||||
"02eb4f9d596497bd849c5bfaf59a83113ef389b6896aa4d4665504a22486299993a9987b2bbdb47d59b3f6ce5d2c9f9ba33b5f0760388ca7f8d8af07c1cd28f5" +
|
||||
"67a417a59ebde4bb9867d4e7b7b79dd8665602c029e9a16a7718efde3d034f13f7f0b9af1702c335893526cb87afc2100e874b25c37fd666bf34bf6a653c7cf5" +
|
||||
"44e1fe0286a6723c7d33461dea380b392dad68f79a78fe1b785d7833ca0d1cd68cff472991a625e3099f3ad2cdc99bd37eae35353cecf424098389dbaf1885fd" +
|
||||
"7db54909a92ee879609eb2e9ef4de1f4338f0df53dbde486ede944ae69869fac701d4f1f48c83757b470ea28c9de2ae5f1ef5d1c91118d16ca0d80b1baf3d314" +
|
||||
"056949df27a09eff70c9ac50b54feff67a165ce5e22ba2222defedc7c39e02356c3553e97524c1506441527da4f5de121142ccd494f83114b3ca2dc37e15c752" +
|
||||
"e2faed7d50254124d68f67e26f4f50c9f0edf6e58b916ca830c4e33801dc11039b18292b87b08f4f2edbaaacddcdab78ff3a0004f86034080f2ca4394b14aed4" +
|
||||
"31e38e3605e6b257bd772954d2f4b846a17df7ed6e5dafa33964d9e56a07a19898fb4dfe8b2ddbd11fa0013e6ebc0e429a5166a43d1ec45557cd1fc1bddbec4b" +
|
||||
"2e9ca26395394c96395ff8f557bd0f7f805c09f0c18534585b7c7fc1d07f145372983ad77fa804fbb7765934e72beae0929a87cc6bf7f6c242ff5db2d4d5541c" +
|
||||
"8c366d22e24e1da5379836fc0eb484683285f99f178b98ca170464bdff60ee04584c12c65408102ac6dc7d10bf58a7d770bf1b3c636a48f934f6f4bbdbcc75d3" +
|
||||
"fc551de3ebaf77006707f6120b3804f2bef9b4bd59f5996610c09ba3953994d1b78a9f3bc3bafeb52266f10755ea842e5b4370c937c09afd34a092ff9b98b4d3" +
|
||||
"518bc2480d4b132455b7f03774ad76b83b254742117921c31cebeab5f39c145f7f373a5603d17dd95217ba1af37a0aa95b2992efcd02d0bb4ad08ebafb31440f" +
|
||||
"1ccfce45882b547ee4bf6ec7ecae11ed79fc63b03636c8a14ec4e0f6877eb658d839be2eac0f10a8948e74203f46078ce66aad2764ff05590e2ac7a8dd8b3036" +
|
||||
"901fcb7ff3369ee989a28e34b9b62e1e607d14da3049ded1a4ee50257195eaaef995bed79ec85111abb522aba1fb306869a1ab381e82943f35345bb5502bb90a" +
|
||||
"e2a0af77526a84754ee4d600ba7f8ac98705ee687bab949a081849889d7b83a21a3dd34af84dc2b9458230ef0ff44c6398d3c6e48e5c09c399ac4d4c7b285549" +
|
||||
"e0bcab7fd96de42f072f1cb633e3e250745321049d0d7ecdef4636e70e94c8414e76ecaedd6ee0792e97de11e7dc1e1e1801ad68f9147278e268d7ad76c5bbb7" +
|
||||
"98386fdc13ca8c77569d96e0debba8ea3b751352136c8f1c8d611a69f1baa9aa4b9d0a476ebd5dd21339ef7f97f09aa86b69a7b114cebe17a6b0e58bf52803d6" +
|
||||
"fd47d9eac3a988b51e9bca95c546d49367a3126bf8ee44fbd0e77611473a1d3d2de0ce4ea54f9bb7f9dd0d0c065f613a623fad43a445eba294fd00037492914f" +
|
||||
"b74d10d0b97a0cf9bd3151c3cade89521f36b6fe1aca7f352e79a77d063da5337a7c88d90e9e566bcd97732baa4459305967c2f65adf1a4a4c7991cbc99df3b7" +
|
||||
"14335a107a97a4ab104bc94fecd1d003fe6d2f22e717853c449881c4ccaa7e7a1e44961a14a47a0d0aa1b1493dd02760ff4d31fbddf5941f93c8e5925d1886e2" +
|
||||
"8761baef8610fa6be016c8f4fe65bb0f335152d5e94893e274f2ab90118e4c07957d252963755b4b638ffc0a734fbe6e32c2e304b10a46a4eed330d101c4f0ae" +
|
||||
"011e7f94b89bc0eb9d358a6548b3f0c47ccc3c2d986d381437c49041629c6cbf61bdf0825efe17e4abef128003681450ceeff0e28842895d8e338c247abf81cb" +
|
||||
"7260fd45042c1f6c630a4b195579721392e577fbfdb9f5b003a8b9a6bc15ae754f6255131a0be600c7b07e2cee1ffe32aad4687f9a429998ed9059a99fd879ea" +
|
||||
"c4dcb55f4551bbb70c187cf1b162e2ca4a929edd6ec9260877df652622ae073fc63c0d8522d3882ba888ac50a67a68fb6530193f93165093a1d8132e87d8887e" +
|
||||
"ff2fdab0fbae6ab9506dae61fada4023133d166bcf1956aedc3237c77d1c81dcc84ae957d89367b0fc950c78e58f2cb9c4fd93e16b94421fdecd46c3ff55592e" +
|
||||
"4374a7f7d8ede9923115770cb416071e8f102d4ad78b891464ffd14f589c238c8e13a4e2a81744d179e7d3ae36cffee75ceb99633face85d077d0c15b3970930" +
|
||||
"075dc08b420e0a545200895207c5a746a18ce9ab64a50d3dcf44da857fb65e4efc29b2b4d532dc6a03b699dcfd77030a4945e6431273e25f06ad8f913c2a9eb7" +
|
||||
"59d8d3049868d337e451726d95c4cf8baf381096fc9b62679175dc8f14e52f8b99f212cab6544414c62f17c8323256cce95356dcd351e34c7a1576b17c1406d7" +
|
||||
"5b8bcca8099a1993df1541ded61b876ae83396b191b719c4b1cbe50d73fc13da352d827ba09aa7fdfef3e4e0273c31ef4fd38b93cf64199c3969a7c09dd5e0f3" +
|
||||
"ff93a5a7db9c2c1ec25e3060bcb5481c6802e1eca78f31862842ea08e8f92b8e52856c4c9fedd0bf20e386cfdf926425f7756ff41fd3567c5bf334e96e3f492f" +
|
||||
"74bd0519d8d98efa0b427ba681b8b1be8fab041ff084dc5f8c4d5d48f481115d7e407ad8a6034f481c2be86f8451980c3aa83a3fff245d90d13801a54527e97b" +
|
||||
"e392b25867882d43e3819f4a8aa380db63954ec23d2f0c11a7aa5bc7a3aedc43ecd3b024280ed8843399e28deb954bfc11a3197fb14a9c9a895859e390e9586e" +
|
||||
"2ad21da39bb9ba79a62222d228a0fc96a24e801f00afc3f98d2168a8a253f24deffe461f6313de9b433e1d2e307239c0e3fd5d9fe4c8352c2c6797b1737e93fc" +
|
||||
"14d411bc69bbc9d78cf91734052b8aa1dab348e4c243b8e6d623865c135f807de8d5fd88f3921327affa37066dd538351bc4ec52eece88856de0a424a87d062a" +
|
||||
"f68cf24db37dbaa8e8e96a812fbf32ccafdf1b9d27f11bea23df02143bd09061a881c010819a315a5b6ee44b3c60979b3f7b41f488b2429d49377d6542fb0e22" +
|
||||
"d09a0ef5b81aa7c8134c0aecdc7a4f9228559d0bb826d30fd77fe0f834212647ce61e22fef0a1c10eb4177de81c31c12054a15f81b605619f3045646110673d0" +
|
||||
"b2d79d80577fa43284266fd2ed54f9a3b9df3509f79559c5bc51a58521bfeb2f95d8851527b7ea47b92a694f6ea2b67dc2d4f506d11d2db32c2929cdf5c8816b" +
|
||||
"7f0c310cceb7ede08d5965ed2c7be6c0a317251c7d31cc4a15f6d7976a8a1e6a2f386fe0071d43a50bd0ce5e864a8e449fe9600c6e4a84866879c490de9f9d46" +
|
||||
"3f22708abf34d3e180dbb6005484a6afad373838cdac335f05c034e3090b2fbaaa53fa2db1f96bbe141d570f17363ff98672500e16994b79be74634755b09e66" +
|
||||
"f1b37e338c946bf85e06c97e31dbddf257d58fd10468278648d86f38710c2ca0b6ea7cac4ea0e2c49b96bf1998bde1b3d38aa853736308e12b4a0d467fdb8a73" +
|
||||
"0d810ce45518614bd5845f58a9835a5cfbe745f45ef59ce9a677d10d8c9f6294f1a0565301efb3c6610afda35167150bd326c77057e530c213da63af3e6a600a" +
|
||||
"d87b16ec5cbf76a13764f71b3e7e0c867086ebd9fad02e1d747030064e071a13da4758cd0fa20872b3dc350f4cbfcde1b68a97aca41e32207b40beddec412c0e" +
|
||||
"c75d87c6671ed94bda5170aa2866509161c28d550190675f60139a7b460469f3d4829b3c65f5d185936582629160522fcfdcc53fd0dcc8fc46de11d52bfcc5e6" +
|
||||
"3407ecbbb682cc1693d6543756fa4e068e92ae1a94924a1ff6891361e5f262b7d3c3a3bc2866f54e6d03ebd5479afa3f424077d51668cc60e23b35fb0222ae22" +
|
||||
"5223ba8a8c416b68c8853022d150c951f06f8f85c2078d3035b8ac3ae984ffcfb024431acaae8bfbeb981870f9ad6bbb88d7d5ff34ba21a44cbffd0aeaa435ba" +
|
||||
"7d40d22602e807ac9a69db514ab13248133142cf03fac999a2b185f34d83fdb495ef042d4a5e92f2624193c88858d91c0812b18fd67046cf50635e6ab1ea9ade" +
|
||||
"7b1fe783dc5147f14f9194cfa92c03a0456f4171f9e5c156fee1c607a1e9e06535f2dac49b92ddf5fdacbf88a062bd7ca5439bae645100121e598deee6043baa" +
|
||||
"85cc0d727f08d37a766a55a9ca21ffb6594fb73f9aad15be4a64bafddb6c85d00f7bb5705d9e56b410dd80df8b087b6d67c7ca84eff2ad699f901415fab21343" +
|
||||
"6351a9bdf83b440e29f3950c7e4c49963ab109686d78fce629e9207db2e17eb5f02f01db6441002d72c06c6c6bbcdc0a7443589ba29909a5a78864ad51e1dfda" +
|
||||
"14782d869e4989ac3c5ef0aa1eabe540e9e7cd4e8eabe25b07f300a134a92718186f085d5c10a711ed0e574bf7550f6bccfc3c094d6e59619bde9fd892af8ef2" +
|
||||
"50e1cc3afdcd9c84ccb97344542843028b00064b0c3d18ac0f0703fe6f9683d40813abbb883e164c5797bc1555338566cf8cdd358e9fcb0e93f08f7ae06a5121" +
|
||||
"c67a231106ad8fd42f0798d7185c2de78b8b76c10e82272a405212ce3b904f90236eeea02054953b967cb614e8f8ac49b977152a52df981c86fa4a92f7f70eb6" +
|
||||
"cd4eb65986564039b0d77f8bafedb4fcbf9c34b8fe9c5fa87b0785c118a8624498fb0184a0dbbfb16777579e1964330c12e494449f6aa5cf69ec4a32054be553" +
|
||||
"6027e0d27c7044abd4c0b8e43db703209037efcfd08944647a90a1ab0c71011753354990cac5a472fae44dc370aac8131ebdf31456a8484e7fdefd268cbf5cb5" +
|
||||
"85ac615039d3655b1348fc0b3b078ac41cbcaf6aaedcc1153bb8d55c307f45405ad6a959abb37bf8891c8dec79a9d7ccd9b791cb60361d4a28f33ec0dfd13fa8" +
|
||||
"e0b9b29e14bf36f5047e51a39c2efcefcc156bd08e46c5c1000a3cdc2bb20713e19d6f492c40e51eb93628cf85d07041ae5353e7decc824cbb1db8ab3a7a7fca" +
|
||||
"ff04c2af423bcfb1864ddc864624b827ddcff2a2f8fdb7a3d86d76e72b4f850ec1262d8fc89e7b12e4cc618afe6a2bdf205075c2008f93b7281d80180199409c" +
|
||||
"de850d1f14ca0ff960f69772385cf0f0a0f47cafd5489ea4fd8b68ec7aa539b942379139756c95bb90818842cd43511edbb7577ae469f46728b13a61e6eede06" +
|
||||
"3a4cdfab5ed590feb807d55d76e518d1d74bfa6704f7c8ccc672824b4d5ef5fa5b3ab8fdf2b6c1753404ba35b76aaa931a4e0e5ca7e440524166b23e9a8be9e8" +
|
||||
"635381f6c9086802d428fece81395dada6b3b866e905ec00ccc4fb9b8415dd15e443f84b7220e3b28700ce3d88f9c6df2afea39e0ead537a50ee11f0c247ee86" +
|
||||
"d4b3074e8761de4de611c409c6d4c369c2c11742a7763f6550edfaae49afeec33353a14d2ae60687dbeefd2fe29689da6ae79d7b06042dfd25a68bde9182fba4" +
|
||||
"1ac53706a8b96535057fc2f99ac84a9cfc6549920c3e2cab44e48a08e77207b6a95b2f6179d6dfd6c2d9e3c91106a7a687e40bb2a1c5ccf566c0e31a0fdbd0a4" +
|
||||
"f270f9812208f939efd9698a8b28ce9c5633f18ace7ab0a7550d9e7e26cf62eef49200331e19a64bed648b5d18ceb389bafbcb3f280ba78e4cf03b053f2a5f08" +
|
||||
"3c852452837138004073cf6726143179386279f1a8f15d44876c19bf6c2e2992ce6056191bb1a386f0e1f6f249495cff126991c6560e3f613e56525c0c49b5cc" +
|
||||
"2ea4e736d83480f2b45d7dc840b849887f54a2aa072e72e3fd0db34e5cddb02221fdf2a40fb6ec271ba3a09de8dc73c24328c5d9a33ae0adc9874902f25d5bef" +
|
||||
"4d85914557e2983c93fba16cdd4bd929e878b5d51b142b6e9aa0ce84871b7b03ee6cc13251e17547c2d20a7d4e948760e207e29de58a7ccb71b87f99d79837db" +
|
||||
"d0f293ad3d33ffe91435598e8a4584b7b7ef5b1a895a2827b4976f81d335e4aa6feda3539690899619a4cb34fdcbbecf1b8b38cec2ec7c07ce84ec3044f49656" +
|
||||
"28fdba8971585afb509526640d36425777b6ddf5b2a49d795fdcf71e57fd35f29fff37890541b6e152f14fb6ea4c70a1b9f159d02ed895a68dcc276f5d5ae83e" +
|
||||
"47c021392ee22a398c8c73b3446d61562b3ec596036959aa645a65e5d24f733e142ec0e184b72a2adcbe3913932b2c9503c856a7e989d24f306e01e99268188d" +
|
||||
"f858694e297803effeb8e28bf8fb63ed6787acc2c61f509e19099607512d40928a08e649474a43728b63523175fad12ad088aade0c1e20815c7c12773bc959e8" +
|
||||
"640ee23eef2b1653ae8918615b45158a01be5a5f39a75a7c6cd8f1f6b463516539771ad251d5c2d40c5049877765512c44e58bd3b9ac3a0ac281771097880fe2" +
|
||||
"c9516dcd6f1373e1e8a52fc485d104004dcc839fe3d120f1432b213388dd37980ce8238c87a70d5abe95d78d696d2436eb23a8f620ce74335d5e47f6524b11c3" +
|
||||
"e22288644b539e3ab664dd5fd6bafb02897aab35adaef204f82d9318b22f45b787f5bacd74b01d23537973060868a47f2e3a45c1d8805a1d657f2332af8170e2" +
|
||||
"9435d7540e70e92a8c8794bf22d3e11d54ff2d48cbc7a1ac3cecfc48f80fe521f6852f97aafa0605f3e7084b15e61a74869512c9c2d84180686ea07b562cf35b" +
|
||||
"5a0ca529481ddbdba9c60729f821dc7a5a8b5c7eaef1ea7927d455a702aab538e7441933c4fff2d27de5659d6fa41f0ee72bb13a829839267f3a7b51a81a85b0" +
|
||||
"d737194d94e1bf8173248cb057cee19eb5e2cdda38c529298f3c4d3b95400198063c5b27e9262f9c66425c65568a09035bed9cd55c1f2ec4becb6b9c59445398" +
|
||||
"ad5b7c85142e713b6dd32493dcb817c8bcdbd728e325c25c5a14d764b63f960d1e48a0bc7f4d2bf51060f83b1d1f2591c6a9b79182e686b887a2c1461442e2f9" +
|
||||
"16e8582e298f87ca95a8052df33af20ebded7bb1c528920233d1aca3b3789494d97084890fa3db0ea7eb561b0087c4a90000db41ea072613f91ebba82790f33c" +
|
||||
"fd52cdd92d2ef1246911ef1dd82ad083881b72a08a40ee55884380dd136a7c0724cded69c6abf1f156b14ecd7284abcbf66522264145ee78ab0ef0d2a74eb390" +
|
||||
"10946d5efefb7175164e96621d3f158de8b57956b8b1155c35b32007e47d915cb61dabd556a370537737574741fcf9a8a23f7155bf1f0e3d3c0d2088d1191d9c" +
|
||||
"9c974139303f3dda55a70ab4810fddca3561114969d370f4e6bad60a53815eab1c4613854d04ba8b049dd7ab1a935c728299d1502ff9aa3fbb356f87f2a52b6e" +
|
||||
"947dc79b5fd211ed31dee722d3fd857f43aad973fbfacb7cbfe1b2553bdc76142ccae5b4021a4647b8d8087925dd3191a57198792b6f918de87a92705ce57905" +
|
||||
"f2dcfb67a20f8c77e700933432d60a4536d0959415f15f3eb8a788f1b19c497d3b68194e27ee736231835469d8bf0ce1717ecf533ab77dd97b35881d8eda959f" +
|
||||
"54a7935b1bc11d7f2e472757734afaf0463da3fad9804eb948e8d6444e8394b33f1c187618c7c02371ee6d378ebb7a20b6049a5504daa71999d15944ee82650a" +
|
||||
"2388f374f3ec3afd4ca58ef3f2588997d194a2741252cf6562e00cd6b5c5fe4066454d2b3150317694052b4dafb40c2f04c850e4062cd8f0af2da75280046850" +
|
||||
"77990788b27fa457ae9d0b622d18fc070f1d2661ecab529b5cb82f30a29610dc6a9e93ca9a2617ab0109957a45c1204e5eedb8860c6f4d57122060f39a4194fc" +
|
||||
"a285f1e9e7a75cc3511b8cb4865719c2260a630845051876e7795bba59573b6ce5faf7e5708eda7be25dd49c8cace4c04c541074d703e6601e043f6c63a0a371" +
|
||||
"1a381f0ff83d136f4aa29de266169ce5b3105cbfeffba370fa306a93830e3c0519a495b8b9f4b72078e2c45421b4b0667f903676a1339c70ddd1a90dbd21853b" +
|
||||
"2826ac3fa5add5073c634d4c5e87db0efe18638ee93c460257e52aacb8600ff36739818056110b2e974a1959e3784903aa97b0fcd9264f7d8f6bb5d8b7d9f03c" +
|
||||
"4b643955bf7966250936d4e7d651712db5e695a6a36b5e6f56c651ff737042b5bb73638e21ca6ce9a3e63fbb1906675d97001d7ee240d277d62df18acb169677" +
|
||||
"963d231c5276bdf5767ec35fbedb062e61c23d759aefd287b2dd62a0d6f0518d90b3c1756fde50afd33cab395ddf3cd538b9ad8862a199141331c63110c9ddaf" +
|
||||
"fa3d6c63a1fb1b45529eace826cc29a1df5df327bb782e573c41864c18e6d31401d19719326e5c35bb50de7fdc67177a6a6015b4264fecba2360ab72ae8b060a" +
|
||||
"6c66c5a05782a15fe3c1833b47e3495d29f2cfa579fcb08f02fd064e9ef2ef5564ac6a43cfbcae7d79e9f87ebc2176611823c6624db11892f8c47f8c96a49539" +
|
||||
"1c18f821ecdefb343eae3fd98dae1ef96fa3527788543c0d06d9793579cc62d91dc4d25312901c6368ba81c8536c6287230e8f97d25f6c77366609580cf26a27" +
|
||||
"88502a9aada84a794d3674ae11cd1742cf245e9d9502dbb5b340c2a6c79e3607f6b47666e1ea991ccfbdf6cc41ede46d043bc4d3e5e6882414dc65d62f9f47b9" +
|
||||
"fb7b828a89afd6361ae458c2cdc82f459c54977072702ee5a4c22955b8019d8b8d91f558897c4b661f8e5412ccdc10c40521303c0ffd353a0c04cebca5622a71" +
|
||||
"192b144d0f9c5c0706a130df887526b7b6e0f358ad9f7d0fd4d87c5fdb29a7453388c0d009da0d4c47a5d6cf8363892ac42b6ce3388771f698802b4dbfd66aa3" +
|
||||
"5fa6a6f8b42dd8446324501c807b6e72cdd35cfe08956a52f86bb4709fe2980f62152dba3571f18fcc4c1cf7a25384c4b5174e93e5afc9b9f12db2bd505ddade" +
|
||||
"d670d0d71b9548f9a07ef98521961cd96e8f363cf3222336bc4baa284b5305aab47dace615c1b3f3fb1ee23ad9ca3f58b086d9169ee5b2d3c2831e1db4f905da" +
|
||||
"11e1fe79e3d48c01bd9879ed68391e4d24d6db8d6774cb8747e7ea368aba3bbf355386408af4a59b23fce74a5e673a1044db66ed8529a65462269480736cdaa5" +
|
||||
"0784fbd77e1c41197335b4c517af8a67eef5b7165c5fd6022cceed0396089c3985c36595497db0a0fcae478e4e4d68c57b93f466aae86dd4244633beaa8116a0" +
|
||||
"de25d2a54353b7ee85fee58ad4780a2957d69816585a64f65e75f332614aa6786d1a1432f6acde385d3d6e870bc968c60c81401726a958f0caae336c83a9523a" +
|
||||
"c174faed43ec67473dcd151506e334a6aaf1731dd3aaa831f934be83beaefafa11810e7eb140f4fe80cfba574e6106c1bfe9f0b20173a4ec2663ce0580df6daa" +
|
||||
"7966a3a8906677ab680025782c61b95cec6a73b5deb16599e6521f9c6c4cae0d9286566388d5181d6ba11c51a25c62b510d9b1793f3ce9f73ff0c9226c8aae69" +
|
||||
"5d014287df074a244014720ee38e3968557db00aa63dab71854b8573c42c65116e3d88bf040d53ef3165a5827c717179e2939e310be5eaf6fb75447ba98ce925" +
|
||||
"98e83a32a90eea848500a30eaaaceb307d37b1201b83a744468a1a52632ce5525c1fce5f702421e42e7cc4c61caed539dc09001cd31a8a2b48a783c36c56a3a2" +
|
||||
"d50de42c63981c86642cc92bcceeec8a66b4afad3c1be1df4bcb8beedd442c281080c94692bf453196ed1a66a074d56a8e7f60238ce18358373efc173e70c691" +
|
||||
"f832e1139bc04e6258d77cf7529af7ce5eca28ca5cda818625c0bb5beca96d99fc9b6689a7771434aa96e23c55a41cff7b7b718df58260b3bc91762034debf49" +
|
||||
"7d8ca8d5764c52bc9665bf86db5407ee1b786d90f8d7772597eceb98f0121e3996e771d951568a162f6b71042998db8208ece5b8b0c68107b8e2079765b0d8c3" +
|
||||
"2747597072756208b0d84415a5334a88d916bda390e26ccf3046b860e7ccbe22c48cd3d3f51bb65a98ace74d52613f782db726babd02780b8d620655bf9d551c" +
|
||||
"ae9ef3056e3d24f5e7c3105c4857492fedd244ac2b8c30a874c1446630b042d819bc6b6d2d96829de903db22af706e93c5ae876d72c633600222443d1765bc62" +
|
||||
"a8a20c458ae55bce8cbbef753cccc5e7d929408d6a3709467373651f0163128aba4142ecc56ef11ff1fabf5eaf6e955b4252d1350e9002300a1236ab2fa0ed34" +
|
||||
"c9cc7dc1d4f09bd31296cec1493e725b57cc496fdac4e8d26197376bda7f74c0965c4352bc9d5c731df04f9908899cce6ec3afe15210d115992b2d95308dd032" +
|
||||
"13c557ac527424c7db02475a2fc78b88d022d212c3d02d5ee490e2436e6e572e8a1330465b9052f8a3de01aab76662d18fa3d076fb77103fe432d549bc861fcf" +
|
||||
"f63f3401cda31673ee48826b68b387802fea4471deb1fc928586f1b1614c16311c9820b563ab0112c28af5c1af5121818540c4b7d7f549b33906c1b86c6674ad" +
|
||||
"799acee7342e4a79d9295493b2430fd08f373338795764621bca444868f3f42b0e40abd4b8e148cad2861fb4980b83bb58d40eeecd8d8cb1ef1ece17b0ab72e5" +
|
||||
"06c6e650a3a43081f545acbac51ed7e121df51edb75120cce30ef7dbf41fad331120e537fb35be45d93de4fac0cadc7e5f644e2b767a285facd5f12845559785" +
|
||||
"57f4afc276e21d77f6162062430dc8918435f035f435ea419ae9f1ddb6afd46b243f8bd6a3a33e7970e7e76fab9ba6afa72a4806189462f9d0f231a23e3ee1cc" +
|
||||
"51cd10cb9043a27deecaca866751f971254fbe3084c243ef5f516bb652988b770896ae5abfa12db2eb2abe404cf694e9f60d47e734e260ae668b750e11b26001" +
|
||||
"0d2bee5ca555a44523742fb069e484f7a69c12d4bad026c03ed7af10ebc9cf2f54d143fbe4de83448df80668217a11f5a1187f35ff306e6c685cfc2417c14aa7" +
|
||||
"aeba1fb7dab05c913fbcbb8e677dd0f89324048862220ab6f5340c38b70804f625f5a526d6675a49fdc22ea6ceed477097fc723a7b6eaffd65c48dbee13df566" +
|
||||
"f8f3449d91abb367cf37a8460fc8072c4ac75f88be8b9c840ef438cbf12a2e7d55799f641316e3381f72265425f3e90fbeaa9919533d8f9262da27f1f933d4f9" +
|
||||
"a83e07aeb968016fed89e7b16babf0b6af3800a27c9c3d330b6bf8be447d31bedcc526b1bb53ecb10c3ea098bfa7d014d93274bec70b6e82bd5c443e860835f0" +
|
||||
"ae82b7be7c78cd996e0990e3cac8c1c431481c8159ae1dbc40c03f4ac543e5758f347e12715822d86c881030de83a76ba1c49e4d4836bab7b5287122ccf523d2" +
|
||||
"33935d802d2bca303cf57b36a5ff17e7c611f1cf99699881ae464da2911d77580587a7228db8325f204adb14413a13fe318e995d60e35c88bb47b99ba9ee8daa" +
|
||||
"3e40ce5818876a3911107a159125dcf768ba04074e5771334e0de430c439070422508577e474e9532f7dfbc489d0c87d37103920415b6c116a422ac15e0736a8" +
|
||||
"1e1e317adc87005f868815950882fc7497794c5eaf76f9def434d198304ff495bd2f9f4026aea330450741fb969700b953ab265aabf1fe146d861ba2aedc53d4" +
|
||||
"f929abec2dee710aed8fa605fbb9bba914eaff01fdc113836d34d855383e4a311b4ec6ef6e80dfe32bc8035d84ddc4e2c305c112b93560112c1f3dff800d6043" +
|
||||
"7eab01991f924075b4dea4db01c377ee1ca374d383ff1fbb463bf7078f6cc7509a0ecf536871abe7c95bf89f29c71f72f1a2002854113cb0d6d2192c00123010" +
|
||||
"8dc9477808a218f84afb81f0274718c024393d5be66edaac7406e520b0c8e2c02ab98ee7b290db261f2122ea68bd79f2cc6dc64936af5064cce2b4d1b7078703" +
|
||||
"951b6b81b9b60b99da4c2d12bbb50351a5b7713541db0958740910ff69e748c71bc7470a3c05489febefd384e06d267371935f652736bbcaacb20c34bd50144c" +
|
||||
"71923b5a521ac4b1ba694d024ba51b4bef3ffcff74d5dc63810b2c0f529073e13ec3232d8647ad124b21ff73402d371c0db39d46cf4d2d4cf7ad43fd8dd365f6" +
|
||||
"9b6b7bcdf664df0e62ba58f3ca0c62ad6fdcc9b091fb4926cb47b5ff8de7d3b12bd8709a46e5c3d5f0d22934c7a0574ee70b87af97d0fa46f7d9673915fed1d5" +
|
||||
"a6c57197524ec9978d1bdf65633721ea2ccc25626dcb5e7f5e090b00e413c10a6d20b45fb8e98c22928de6dda184e856c86792c7cd09d38e4333a76882d363f1" +
|
||||
"7f4d773ba104b2d04fd81027da087258fb175bfa8005c035a4719bac5b9630ae57889fb3b52a0fd47ec4060137b0f95fa5d5684172d07ca91e91eaf20dbfdea8" +
|
||||
"a3e23937f33d8774f30c7e8e5d4b2d5371e5ea5e8d290970904c4c1ff33baf675ed79599653808f652ec4fd0088877f7dd7973023ccc8377d1ada2b80c07d077" +
|
||||
"d7208686354f511925a3514c9e93c13525353b3d9528ab678e3e783c290ead88c2c3d6230bd4cb3bf79fce6dc3e95bfebda41e5d994e61ab083d73408ff6b627" +
|
||||
"6996a263d2920170fff6869c2311441837a2fc190bee104328591b402defa38b421b972b01d020bd20b1b6a6ae884b23eb829fdf032a81d4f199a87ef125d4cc" +
|
||||
"8662e24deb93700980e6ebc6882bcbaaa0283492e81f81e76bbe2ce18df4fb665436310658918ee217b5da262f1a1adbd59eb3c555cfebb12280058c75b5b33f" +
|
||||
"8aa8c2d7cebf12ce46c5f49ecec5a865a9f0b65476793884f0021f8731b1bd288f55dfa1665776b2aee1007bcaa6d92a76a2ba9925bcfa68db7cc727b2a07ebc" +
|
||||
"e24c0314c96ee4d6164c699e585461388dd73476a1e0519d92f51b64eb2842a7b17bb55d512d52da802df63206ee926f6a6a8c32de7b30e7cd3f23e37e0fd82a" +
|
||||
"556323736ecd9de77494a2f8702463f40fb837c2a99270b9050b0cbbc2c305a32380ff5fa94bf9c101c667f36293c12ff9aaf6e0a810b75230caf915135cbe6e" +
|
||||
"63ffb2a0e8632d32f72a65aa965fc556e10ddf6d5e40be919066eebda09d581a32156e1675300f52c8b355e88696fc2a67dd8e350a6e902e082af28a9809ba11" +
|
||||
"ae0a5fd9c6627fb808d757147e5d59cffd9c45874478ab226e72909ccba6592a54391d072c7eb0221f1ff7be9924b9d037e4f8c31e94fdc814a8c4cc7ad4c9f6" +
|
||||
"eacd5af66dd76bb6222b2fd3ea50a828fc3a91ef8b084214bfdcca56348517be18ca472166dd7f18c8e444e3641486e7dada626ced8710fc73a2b09b6e9395b0" +
|
||||
"31ee2c48c9183851357d230204c911b345457de602824273193b795fc21e90a0c1cdaaba36787424b23ce73e2116947f143f9641d39a4c07c2e40e02f3bd7c68" +
|
||||
"6899fd57e3eb23c6f5615c9dbc279fca0d4218bc79d928e70018533a85b4646bdc78015149b4d41d77ec7b46900e7fd5250116ce978f825569bd887bf3fd0365" +
|
||||
"e1259a7514116fdcdd6da3ffdf432bbe8e59b9bca9222c5dca1eaa61caf29b8461ddced6f312838fe490f742db696fadddd19bab8de6bedaade878be07aca4ac" +
|
||||
"76d69b81a6890e66dccd702720c3bd5601c6abdab95fbe4ccde6e35385b75e1977d5085ace928adfa382ea2890889017b9c4c81d9ba4629771f84cced6280db7" +
|
||||
"a6cd83ff9375ffb0a75a6bebba9a209f048788ba39127c1036e4bd0aad9be40754fd75295611e455909a818a3541af32eae98df7222353a4405da0e7be9f1cf1" +
|
||||
"bcb823fdea7976a810e8a3c7bf93fd947f961a344a93aa1ba99bf2df48ec82769d8c08e7b14191050d5706a9467c9122f34e27f060dd4d6e936c414c4e551b9e" +
|
||||
"5d6b5b58347ed0012a8a323f41b43bf5e960b2806de59da85b998affdb490fbc965d569114223db3ca65df69a617f6808bea23017327ddaf32990070aaf5f444" +
|
||||
"a9db44a57b5c92bc27bc71c5f8a2b6929edfed8e182bf5942564ef045c75448450eb1a4e4e09a1875e8a4a74f229879ccb7a2f2cd0359abd91a782c2ec1f68bb" +
|
||||
"40ce0a63bcc014b198adc222fc957eec0483f5b93f0db91b7ab3b3e3c59841dae057eec97abb55fc42b2de124946e66ed2a7fe8cd047cb79051b55f82594ab45" +
|
||||
"711c92364f932a5fd274fe184c85583ac7cfaf258c57e296f9c18fd181308565315e27272cbad3b21cb4490ca0e5f675365caac42f299e22d8a74ca51a9d0883" +
|
||||
"bb376804e234502db66067e7a434d38c3dc075346e888e4558b1745d00458df99db02f0e4c37702fb0989387f74d002a924790a6b7351ee0f41684bef079be26" +
|
||||
"ee9d70b560c006cff4b08b9578afb5019c21ab9418ae4ecaa7a1cfed2d880a06a03c2c7711b601a2cb3d9193e1577b4f1d0e614c0be1f69205fa6524fee80bf1" +
|
||||
"e1f1906b50e75fea2d19b8a83071a460145e1730581e5e9538888d2e797ee3cbd3b31399ecb4d6244ee44362493802b142ea397c2e7a3c1bc86f0ea0546a38ce" +
|
||||
"574e1df0c27ad8a28dca70f659ae6a1369d8b3aee7d0dd24ea370cc2bc1b1a4dc9f63911b63e60fe4ed8552bbca10e01c82d11b0ddf748d234b4aa3b31683c09" +
|
||||
"86358fad680dd2178902beadc4646b3eceff572631ff9e6b64d8a622ad9f0308cc46b7d422ce792fe5573e9b9480e1ae9fedf31edaaac3b08c5a2c6c27d6b033" +
|
||||
"6b92a3da7b838bb0a2916ebb6ee72bf33a7fa70630491f49c67031ce4b9dec2315088d0a5cbf7473fd121e0ef5f4e92d43114014c9f8c6e671086a446eb1f66f" +
|
||||
"70f0cb0c668998ed96ee0ad2687946681fe40dc46cbd170e0cabb6f6216be61221f171fb2f4273f58c10d5c4eccafd1df62fdc8ac2c5c8f6d5eb637b71fa89e3" +
|
||||
"f8347343f89667a4450c5c6e3791034d2dc3a593185b55bb95d8f8f2984ef981e4b692c1383ace4cb2c4adb80d5d582857b5d0e3ccb12845a59587b47232ad20" +
|
||||
"926efa78e05a57b136e284401c516296b6b194d541ec165d11ef94f166cb52f45145d745ff3deaf643b5c45573ed0e69a22f0e0c9c5367f6d1398105516729b6" +
|
||||
"3f2edf1b01ad9633edf80efbba6555d4253fd99b45a36f16ba98ea0bb0d80533aed806544a084a398a692f698c78b9bcfc9b4d3328dd869dbf7085893b8dafe1" +
|
||||
"59e0517c2f6a3ddfd4a8c670072b30c96b90f81fcc08523e4fd75919752bfa52a1db7c374debbd83ca8e311b98b0d8275bedad215847fa8984cb50e108f69550" +
|
||||
"f6517d719dbb5dade1d3c283357e14b6d9e85d61e33813546517e1262a7cbac814d79cf6b7e21b0fbbee9b6314f02b2d4e6995d2231670884c78cfd86a2acbcf" +
|
||||
"0a178ba64de2f13f022e22b9b968ceefaff374aff02b703811f3dc541a69a21d6e1c5d1aca48889b125ff1274e65413f61e42bb0194b60b65a3454c696033cc8" +
|
||||
"e3cc3613a52850296a0154bde0e2a81b7a6489bfce505dbe1bc44e0e1052f678297bb19cbdf7970bfa5268af8a54eee004063f9894118ddce7fae8bbba53a428" +
|
||||
"678cec8a2bf6cca2b1a5f4a2e95562437e4eae41167f39d2a150f7c46c1eb6da35587f7234d870b16ed91c7db548ddc99967381b4bb4f3a2b0a5ebcbc7ab1b06" +
|
||||
"7d5418768eaf7d526ca116e239ceb3ab393c45f3b32b713c11fa8e5ae8d7611e6008fa08d1305d5655315a72c85a04dc853da3e8ea9d46674194e15226f126c1" +
|
||||
"a233c26dd7d3cc04ae572320d0c351911b6fcdbc0b8450523e96022f4b964d4e479b6cb1c40a6d27699b57ed2952ef7fb3172c69ba7beb8c8633a01070ac4344" +
|
||||
"d4c401acf8ca7fcafeaa59e1d4c2ff251bb67dbe10a862103df1b416fd2097fe412b3da9d4095b48ea094fc3bbf2ca41e4452af3a179580e3bc11a7d97ba050c" +
|
||||
"ae1d6b8075da267b3ae2231a1fcfce0c976402f34963c007d4f85d9ca95646990d1bb09691ceea3b34211dc58409e052d0acf8c2296a7e8fb52d7c673506d89b" +
|
||||
"847c369daec7909da8657e8976f59f2ef4c8a049b46fdf30d6d223ea4175e4d60e469bcea0eb3bdcaa4d6024f2b43cf6de9bb40efa9172381291079dd82ac5b2" +
|
||||
"39f2051a7f1aabcb8d50333e8c160de19ce1c76ced8056a0724ac630dd45ec4e315437391158a633c179a3d1f364b475454fd29c1e539077b9d5f7227786a5d9" +
|
||||
"d8ec78e5615c25e517e9fcaf07611b85dff2c131a1b11a901a431a601854e5cb627cf7b8b0c5e66ad6cf60b7ffd6c6441f9ecd58f414013279e9de533d8d797b" +
|
||||
"936cfdbfcc78342b7ab586457541df5f3b7d1873612df200896e2929f44c6fe10d24f7e6dbe52b6c42c0a40c947c1cbda2a41437079eebcdc29716d80957c159" +
|
||||
"627e7366cc16df92cdedfa9f52edc848335f1c7152652fe24661a469fd503393229063c7ab20d8d895139a2f580dceac9f6dd4c4ac652b1d60c2b8a1b0b2923a" +
|
||||
"86c31742807549e6d523b3c88d31e8534b9e05a6c63f6c8fb8a1eb4dad733d92e7071e410f0087ca3074f4a2df511ae89cefe9ed09a8df603d61f23754e43cc2" +
|
||||
"e42bcdcbe58b0587aba9a62f32c7507116fdc8a9db3d65d6c0097c8f473eb7f3bcd11ab81d5b636b0812b7982201a63d0b8d40f2c38f65ffd953668eaa5751b3" +
|
||||
"dab7f038aa7adbcd1f1102267c9d55d43649f9b4f65f1851546c5a9ef2c7ef56e84b16f12641e9d5ddaa78ec778b5f113b2e06bad5821e1a5203b006a774e36f" +
|
||||
"56c9336d92c8cd8bddcf014b6d58c394e2a93554af6361fc1bbd13c359fed98bb5adfa4dd1266e2744e126e1bc029ab28fd68b648a2ab26ac23252171b298641" +
|
||||
"2621f2a8697a00ab3fdc1b3b04921390ee16d213601ab249a51830661051d34eb777f690fc2d8dfb8e0898567e388830bac8b0bc896f43003feadf34256a927e" +
|
||||
"b4d9293e32ca135351a19d1246cda30551c87de1e148ff5ea576b67e19e1a0389b88a5548b3b1a8cbee19eecc7de5c2333264c711d50d688a1c57eebc28dd6f3" +
|
||||
"3dc0e4cb857973c3d0f28683a6f3c09db9f54b8fabeca9e4f9b86d794ca55d6611858f0d48736adc10dd6763ed7199bad81369ab1f3de30f521d43382bcccb7b" +
|
||||
"be0178f716d5c3cb87488cebd7d9e2bbe671dfcf2512f1b815075777ea92a867f35e09ff0110e61db24423d0598eb6fd078dde0dc2b5d7f5e0bb6fee207da109" +
|
||||
"2e656b5c982866d5fe01e6db79809646559a6f2b9088e977789aac74435dc625b54296b25788bfbbda9bbb25247d428f5141b03172fa11f12339b91ca96c92e7" +
|
||||
"ea5a128c8046087dc7a7eba63e3bdb200565d8a103e7b3c292b088eb06aa27b43688c8516bbffcf123499574f00908ff43d66b79106cebaf16725f1dee600a29" +
|
||||
"7b3a3da878940867f9549e65c73ea798ca923b012fb8a7ef3e2ded1d2c4e85635219f627dc4feb90f884ae6436e7b44f9159f9889d8e194828e079cd2ee60a7a" +
|
||||
"6fbb6b8fc1f7355d7322709fabddd76e4283ddda3018b7882ad79b32bac133da415453eecd5bb1f0deb4f3b987a71a2f2e60194cde63a42b91b39bfe51b4aa8c" +
|
||||
"20952b601df11d170c65a7fe935915890849a367936e97bd242edf305eaf2f4f4fb9e5ee1464c51a899ba5cc69cf56731502c1b75d0d565b1dce15440b0de0f5" +
|
||||
"58bd4f810bf058af99c158a2be0dd02a01bb5317f55675f4d42c6766fc61271954b6988c33a84518bcedbac8de305946d060d19c4691c026953ebd680a4c9012" +
|
||||
"0e8bd54675d6c33cc86e65f5cd3c34cb1e6fd47784a64f39e95a1945b5c21df2b3288f963863b33366908b05c2bd499dd25c1b8e97329d7e435899afeaed174d" +
|
||||
"2a2471b6e8d6ad7a0b1b6a8b19fbd976362283e5abffcbd2cd310245092749b23e0d114e727622953487f373c833281a74a1b97742ca99e49cac14d9102e3680" +
|
||||
"404509889ace009c47d075ba9891e7f67b89aca3e213150f3c715cbab1869135601612d7dffda3cc104b6508f56eb8b7e7f379b21e1ce290ce5fb96f53e3a7eb" +
|
||||
"c7f7bddcbdfc266f23b775602d8d12527d30446cb4144df7fe3c2756e232a8ffca625d7b6ea2c8c0a92e6425ba67ab75160623c39f01fd96856b582e257a6930" +
|
||||
"224c6da90a6eac4249214c3b85aef52835d904a8a5e224d59eae0c80a33b3141ffb31a7d8e62833fa4c850fa6be135558fff5434777df45feed00316c475759f" +
|
||||
"ac6e014e9d3cf23e7322281ed75623ed69a81d6f05ee7de193f6b44ede4a94ced27aef5ab9056144593a836da80f5297875e7bd84d8ca6df95de8650b00b3528" +
|
||||
"123132f26aabf755d00450648e44f3beafa4dc746775958c6dd88bee825c29112a3af582bb2ebe628d70364fe9ad01b8a9961d5b71018690440151486114af1a" +
|
||||
"d85679bcd3eca510c6d6887e70e0d04b04fc2db5ab1eb21fff925b66f08f4fcbf31be3d743154056ba137727b63576e72f1756029c86bbcf9452fc6cfd89f3b5" +
|
||||
"9f243d84c410253ba7c9284191a0ed87b2513901a93606f1aeb736c90dfe40c0a343d45e9a992ea894b22ee5d49e0f7d55d9bddaa6c74bde8ca5839db67b77a9" +
|
||||
"ef740f9a47241f05e5dc1b9c95c459cc9db560b1db090daa3f4c6de46f695a158baaf357a1fc63ebc0d9db8144137ec4bd69c5af89cdf9cfa66e06bff6339d62" +
|
||||
"2c372fbe5a855d14fa7ff3726512f966e4da0556b29ca6d7517803f897d0e1911f9b46a291002a8320091aa7016cb7ac993e35c8b0f5aed3c94ff0b5dadd8b77" +
|
||||
"056d06d1bed59aaf7bca8516c3bba6b33e12df2e5ca4aa40664b3bf48c4dc2c57cfd74c765fe9f794f55b5df6ac6dd2b3592bbc71354c8dd9ae41b0a05e1c7c0" +
|
||||
"d3bd1a0ac6b671c48c01d4a0fec7a01ad11040f213461759f9e029c835ca1d22f9a661b69d72bc46e34b1be7ed85a21830fb87baa74d7ab145ac1647f5f2df68" +
|
||||
"671100d4d9e41082d3c81f3b5a6e603bb33fd56c1dbcbdce5e213c651da45d9d1dd7532d9a955202338387af6315137dc458fed62920a0e721aa7ff1660981c0" +
|
||||
"e4c3de0a4863f6f660a7c1b9745ea26036a25cfa37e1337ded405ebb0401d7041a7938800a97a032fcababcc06391a77a580b1a61de014db9d7e280ffa6b2381" +
|
||||
"ab6969ae5cfcca00a47ac2fa05be02aae7beb806d2afcc11dc0642d2a12ecde2d2926efd9fe790e1bee19f9114d22ca42f438ef656a1311e4931ab7fac93ed17" +
|
||||
"3f68ea0abed18cc2c8905bb2d599780690eabe4996e38872a3190fee361df9fecd5906f664106de4835f8fbb657366327871a2d38cbb671df04e0d14fe97e260" +
|
||||
"c42eb07bd1d70514913c7a64a51e405cc92e06845e5a78981fef9822fc79e9937ce0513138f6bbf247f5c457da708cf84e30d083b4ba48d2d43d70e7c31e9482" +
|
||||
"4472617910a3de4369217b4daf892c2c3250d1de0457e88b3bcb5c4568f9b26aa675c551a9a730fe9ea8145ce7f8e23ec825be9be3b9edd588c391295fe31ac5" +
|
||||
"bfc97d2e438ca9bf6551728b3be6d6c6ac064baca763e0eaa24f754f4bbc84a4377de45fb6a8f37150865df18749df1af4ea911b62f616dd4dd4b25b27c7b6fd" +
|
||||
"99d8c00ce8a53fde3ced091891e8daf43cade10086be046ee5607003de24101db49b1a4fb0ac270d05bab12583e263e903e94dab8bba7c785e40499ab01ff92b" +
|
||||
"b82c2e5342dce84881adedf77cab593f541e4c963f4f9ffc80a16bd4eb7f20ef4bf3f57abc7cbd86332d8be80f0794fc82767d13c71d8ee20468ee35c13308b0" +
|
||||
"dc29ebe8c6a81e02ee9a21807ff57e4d932edcaf59ae9e76f7cdad46b32f94a74982f0887d7083c90ff54058e873b10cec67fba1b717deba5356e170dec1a40d" +
|
||||
"36c57674ad8d43c5c98022b553fe060251b994271585f702de3e71fb1c8e36293dd44a4b99a1baf33f6205e9fbc9acdfe8cfdf007224f93a7104e7803454fdc0" +
|
||||
"9fc5a20be59f600ee734847257a5ad62c599a7fa836d1174a6291e61c1be4b310bd4d7b7cb9be976dbdfdd2b99340a9863c8c0e5009165d7097317e6c3a29cfc" +
|
||||
"dc84b19bc68f38694998f626567b80ce6699124b12bae4bb9e661c2484f5109517318341287e142a849d61d0d7b11d4996547e7325f28842dcaed26367f7a888" +
|
||||
"e58c24c857da2f48a9fb91c78cf351a23e82ae443223580a9fe15a6a778f6c13be66888219e3e15971170712b6c356520cc15e4e75167993b66e6f125799cd40" +
|
||||
"86c72588a85f68361f1c2f09e87f9a4de95ef9a3b92c3313664a706cb72916b96a9cb50771f6917ddcf696ce8d7f2525745fb6edc30bf3fdaad66ca5b013300a" +
|
||||
"7ec7cd274327b1b9cd931c068d8fa9fd6336d59f6ac79b84a24b34c47e408b3bcb8ead49428c123922e54bfcaec7e39c4d6ef79e5645a35f715d151e679ef5c6" +
|
||||
"6f86cd013fdaab978ee4e52eea5e2753e693271344a1f215e1c690de06f29c856c469ccb484d445bacb16694f4def1537cdb32260705e8a50fd65e98a24967a2" +
|
||||
"456af6cf90643638999389a35de6e192068fd2e2ec29aced58611560c792ea5c7fa37583ebd5452a8d94cbf1898937dd8aa6656047e6e03f84dfd0bded514a6c" +
|
||||
"b47ca71c2cf1e76f606c04374663712fd96925eecf0ac1c38392390c8cb095f39e1814252ded78b55ebeb9915dc5e2ec14fe99e3a075bd389ac601681f154286" +
|
||||
"885289e568a8646d94abc806b4637492e3a407cde582d42764eef0d56ab14b00e9aa1f64d8fdd533d4314145c8255c44d0c746af6da844d285eb044d57e8cafb" +
|
||||
"ab6c3b962e0177f11a839f4a5c0d2c2e8d5f76375ac115e0a89f460ea1be238f974a68e0693d15790117106c1a65ab5f7aa08e738aa888d5b56be39d2078837d" +
|
||||
"fb2357d86f5be85a9af41aed611b231495564493e46acc90c6a3e67d5b055320290aef508aa6a1896f19cb5633edc0fec023216726e50960a44d81e0614ce748" +
|
||||
"6ccfdaf620eaac0517e8cdeb1095d55f3a60d61dd27d967eb26128b84c9ea8418779e074cee8961c5dac811ce5ee8134d3910a47de7a1344293f5c64ae8f1b38" +
|
||||
"9d6c457dc74e7005c339394f5f24630f5e40cf270640d1e4c27cb6a74fb440f3203026acfcd31f39cd4844ede7e785290878fac8770f930e96c3edf61748dc6f" +
|
||||
"b7476832cf77ddfbe8eb8e12fd002038630301439ef8a7659bb10593a92cb84018e1ec78856f403e1eb9d6343aa0bbd77a63d776f1d12838f27f3cf6296ab0b3" +
|
||||
"b4436f0ec545a5a1e92a5351fb273b3ed56a40e5a5d25e0057f4077bfeba2e2d8cb17a553b157609b20bfb5cd2699af9936f50d823bb59a950a24b8fd15ed705" +
|
||||
"b1628663f0eb5b5c2b18f000ab039bc425ebafb2010e1a2264c38fa2bbd0f77e38eac8acd670565490fd60cac7fd28d988c8dc0658505dd98425f22c94647d44" +
|
||||
"5d0236b97ea58b3c71feee90be0055ce1fabda5ebfced9d9bf5efaeac8408c4b6bcdb39851cfe038d88ada5211de2f0f69e9e3c62453106c366cf0c40971c0e8" +
|
||||
"e8f2a790aa66999a0cb4cdb57a8c2d812e9e4a66df2f001a57e291864339257ec26c9bc2dc6cb2eb5c3301c167e1ed0387f9ce9f76c6759ebe5c68e8be378c42" +
|
||||
"e0350b344acbc8b40c95cee9e82bb43cef5e91a32a6be8a727d5fbe089321ede3abee4da6b9f41775d7e9abc36f6a5d26ab88ba32978b5ea0ad63f0ce8a772be" +
|
||||
"5aa51143bcd00d78bdcbd69beb652139ad658dc7ad242b2057eacee092aab4940d6ff993a8c7d8fbe93c08c93c45d5f3a01058f3c75c94be9da1a19a97754734" +
|
||||
"b713e1ad6b7cd472619ec1abd4cf42f50b0648661c2b8dbe8976037c094c7176090ba94618e1918db44f5d2c367a0c7f911132d9a8b2398b9417542c7ad99b53" +
|
||||
"a7ca48253bab8382a1a24d35b9b9818bda513f4b52fc576a71fa63e72aa8042ee1fc806c6fd3fc16e07ed2caf9f82bd3bb6b393b2708c051c24c2e05aaf72531" +
|
||||
"d865888db06f719314d6094b2c4f0718c151c88958d2d6c8a6f49464f81cc46709dde026f4e05325ea4ca2dddf9a79bf98bff3aa5eb412434f0b7457b4ed47ab" +
|
||||
"85a212e0c7720c78c961d56141bff0f964622d4d3984c1017de6f5846c72fae0c771a819ba6c111bd739fcf16f4b85f8101e7c3f0daefe753ec130a6f34c7697" +
|
||||
"4dc531f83715ecae28bf2e55111778ae42aef17fa95340584cfae3d4599af9dbd10211baf3aafa8ac8a07edf8243daffd6a6486b1e3be4b60711194261e2b646" +
|
||||
"e2667554cc0bb2fc07054b653231cede43154c9002890ca20b0ac81c4788847c6ecf7c174e528f36f8cfc53f3366fa9ce07b1843939cf6d318ed11f7ba6eb791" +
|
||||
"ce25e75cbe37d2ee3d45bea487d969de041011959c0fed4e6c86802a7485fad70059ece14a29b03d4df41677acf71419ee63f1101060ca5e4ca0ab2edd71fe77" +
|
||||
"46c6bd9f36bdbbf0a9956eaaf974f7bef982cd34881abd686fe77b536c85d042d77dadd00c5cb0130737e5318a025e6ae6af96ca28cbd41094d86a85765ff891" +
|
||||
"af825793910c406470cc61be5d9282911d2faf82abfb309598fce0101ca64abe3920701a958c20ac35927733466a23de809afa2bdf331f68c3ab0cfa08b0c549" +
|
||||
"a20e9b50dbe85d22d215d0e5fef854ba271a4c0f95e6abca19018bdd4a042721887418136b4a60cf291bf06ec47a5a4d2f9b29f988733c6bf6f65da5a95f8939" +
|
||||
"fe0f2bab0bdce98569a81f861014e532f6a995542db02b6bdf3169191d300fb0429c1cae1d2dd4d29e0b61751576e04b558d38d3afcce8326c2871e969c1492a" +
|
||||
"8391c0becec29edcf7f038a8093471763db9f13b97114acf7a979f5ba3bf6f990317890ee0705850fb97bfacf306a0ad621b2c3b633af01fc5aa059c0e22ed17" +
|
||||
"23584dde6cf140bd1d0087ca9090ca9f07d3b93c60938af8df976555455bafbd8cc986ba32fc3f15b5962dbb2d37b6ae55a7de0c0c6f2366be0278e26bf9a725" +
|
||||
"f61f2bcb545d66f79261783f7f03395f2a5d27e56af62a01ffcf778c3c686e244bd9b7e5029d1d40dd2250705c6825bf78e83730212640cb5ba54191b61fce33" +
|
||||
"ce6df7721b15662162b631d99e6431efd24ec35639c2b97f10374fa5b9e2ca4231f523195206fb9695ec7721c98d74f29533cf714866adae8edbe8ed2d0969c4" +
|
||||
"9ed36200c4b8b75131e6d1efa913106bb0759aa8255bd6a9dc2b00407358f4523486575b111676730094f46d0a7b95427df74f053c6611b4c465efa5310f760c" +
|
||||
"5ff081e841e5f90c2de35855d45a7f35ce73d7c7f9f61fbfa953398e042c3946aaa4b7a2094d95410b8a5ef76c8b57d49f77311192b3f4578f37bda1a426de7c" +
|
||||
"7cc54b5400bd16bb30cd8d1b7b42ff31c5e3759e3c9a7668174c02bc5a08f1bcc7e3ba145fa5f5c41e48877b41b0ef8fffd0f75c6547047c2e7b7c7e1aef2cda" +
|
||||
"c4a778adbf71257618b4eb3c6dbd8211f829c1d6373415b969cc48f33d586d2678e7c1b441364a9fe2bb426a33b2a132741fac547766d196df3505fdb17977de" +
|
||||
"7853cdcd8d9932eb9452620aa4921b4416f65055d77573b132a40795bf142815b655e670bf2c4464adb5d826a1744c8049d7a6cfbc8a4634e66eb32f0cb6fa17" +
|
||||
"ffa8925131c3a253101733406a2a3a0dc61ec3ca1448623b6295791d4e2d65d303f78038e15d0ef75d823759bcb4b277b51410c37d5efbbb2e3a9e0cd78a8475" +
|
||||
"05d44bb1fed7f72b1bf1a96ad0148e816d34c66b1b5faf172b8141ba007bf2e5dbbfee4b09ef66656ea3cde54f086040d14116aa7f3584ab6773f6091a2fbcee" +
|
||||
"f59d6ea115f88ef9fcb358c87c35caf7c1a6022e141a3c688beef17da5a619e733d854218b30d5edc39b933b19dedd6750acabc52234934b08f930b608a18008" +
|
||||
"838cb0fb73d4c78af0c468d9fa4fc5852135ae91ae00a99a6c603371d09b031ee37f87586cdc83897d8fd8ee2e07b9d0478a812d3f7eddca08860386e3ad9521" +
|
||||
"98d5fc04fd0aba4b3da6ab8bdd9eca8e0399a2012d6158ed75ced5f432a223449b4e3db3fd4b19c494a69e9f2670833f8a88f7b2873319e9495f03fc69b6d098" +
|
||||
"6006e3ffd8cdb9c1b98f72345848deea1b98ff6ef766f4398e642e5f2b217c1a87a608c1dc701bbb79d75a4433ca1d600061836888a220ee262124d145d371f6" +
|
||||
"576f04cf71701133787a97aaa615ca98138c2be1046604d885e2f274b0de8743af50ad5dfc4c3a09164448e102be577eecf77ffaec1724f91f00f908ff6af41e" +
|
||||
"57056dfa8f5dbcade85a66c10e524bae55922b4084407fb36ca8d6b7322f76a8139be9455a34440c719d0db8a36385efa48841170c8d35046407b586f5bbd169" +
|
||||
"7cbaf6819b663fb17d0f0ae89691a099a8ccf47fa61fb6dbb22b3298e5cf2465e4b93c49da70fa76924fdf29389194cc5c61cb4b3084d0851bc3018270d1a24c" +
|
||||
"b4b04e8af927d9fec9ea1c9ce18d4dbe61f7aac0ffd4e7c2e9729b49ed9874b883ec644864c6d9ad0422c4d89f87df1dfb2c96314b6a3e19afd21783f365445f" +
|
||||
"bef10562a26b48df42dc344ddd63fcc03220dbde98f1109cade221027c40f0f996f4beb29513c3979ba374c4c6a2c2dc6276ca6be66eecf1dcb245d6efe78aea" +
|
||||
"e49ece37f87894bef3c0cb1b993d974685564e2476c12c8d8f63a1aaf142fe34a6840be340b64f96d441f4537dff434ddce630101ed9f78e807881f6b7590697" +
|
||||
"bc97e60accd7a135d8915781f4fc22e437145154dad0a39e5e306c117b11deb10462ba74d58e81de7674ef0bcb20b38511991447f63ad906b11abd4ba88df3c9" +
|
||||
"e6931f87fece49f48543fed0439c88ad78f82aabb32dea03d030bdd76efef6b737daac2de2db1cce10e2ec74565b0a609606cbb6aa259ba88715229b8176c874" +
|
||||
"db3fc4f6db9f167e7b2d55b33a261f9eecb69a0d36ecf9ec4f8f9cee5b74bcdc5d77b02ada89f56259edeec0d9ea866ccf454b9abd29d5d21041179912a5c302" +
|
||||
"1862d850c3ff483e09479957df5bde03a29504b4a43e1fd40af2b8a2653a37cae89c8d917aecdec3959fecd32b7fd313a61e134abc15ad008aa993aba9629a5f" +
|
||||
"0af0ec713f742bee096e171729e70530b60f910ef83746a61580f0cc6d67723792c0e0e94775d5b1edf37864a50678d197bb34a97e84d7f764c0bfe05f4b2d0c" +
|
||||
"dc431d1f4410500dbe2758eb05bb6b19b154707c255a97cedc6aec1841f1817f6bcba0b9a9c1d3aebf747bec4423c71309fb8b4ada90dd9f7adbcffebbc905de" +
|
||||
"74ce531403df33457c4d0b970fea5df4f85732e3c33c5b8242b041141a8c51a62f0bb14dbe07b14d3f5ce646d76e87b258e9b62128f9c0c0a8014f2c5b3d3dbd" +
|
||||
"a3a77be6222419cd3fbbd3b842c46c099f142bcd36442961e8205ec5d7fd159befdbff12693953307026f1e06fd57b6447dd3cb52df466f0352cc46f27d1fc56" +
|
||||
"56e06f68ca2847d291421bc9e0af6bbcfb7b3ce07600827809506ba3f96f40ca22766f8cba32d4461488f6596082a52c11e9ac908922075a7b443c41e55b719d" +
|
||||
"9cac9fb587cf02432e1accf3cb7a16de0d5bc3a1c0aeff5a1795680b4551316e3d7b5a9bc63a09c6f75b0f00eb69fb6ef5130c1ec40c7a7d5d6ccce364b74f63" +
|
||||
"a836a4a711027e592d6a70e10e573cc6d730a0def4a7a2d4dfcf3b0aba37fa2060ae6935710191c023a0b8e123a67ee811ed43b5127a1c4cf82d52ad6c40fd66" +
|
||||
"1160f77dc320bbed349c8b6d08b2a7a6234a8dc88e4744b51d2d7c56e02f1c3bea9e6c2c3d5522ca02ec7e0b8160555eaf28797ed30b5c931a73562791f5f0a1" +
|
||||
"b7ce83824bae17de449cff41312bd441f34df62904f4a0265d6fb9b8a352895ac6f0025d6b2074570970b4e679c559d03ef40794708eae36567008d9c33f7fc3" +
|
||||
"5f8df7e901c27f408cc7fcc52631f1178695ea660d07df541e5a32721d145a32e8d32f06301b5073149e8798371fdb1a2daa5e1d02c24da07682a2cbacf5af55" +
|
||||
"64810e479e5966dc6bfe14b4472c42cb154e19f7b8659d42de5ac926224cc6b0d8f3fa797058fd6e21ea85146838c4612760d84e24341825b6931a6417327394" +
|
||||
"0154125254d4e11ac80e475a178605e851e1be39695cdc0781da241f232cedb32a04b1cc7352882fb635162ec3f5fc5004cfa7d03780753c14173ae7b12a71cd" +
|
||||
"b40d4835023a00a4803bdfb6916956ade9f687af567e6f29981120d306084ad061ca1585f0e9497fdb27f9d54cbac8fecff176145114ebfc17e3f346b246ab91" +
|
||||
"094dac0e684a708b45dcea16378fc29683dd033310068339b13d995dce77a50f9ee9cb4cf564566b05ce352a21159ad21e720e05ce6069a5ef4e9fe8ffd28516" +
|
||||
"8356a0b80c4d1da547776f486a117f6f7ff6557edae7d68834cd71973517cfe4af045ad0fecdead68edc8017000958b69410247a51bd9bd3152dd57389f25223" +
|
||||
"d5e88c0d343ab3aeb89b763eeb7ee48b3966fee147a4614e436c9a1a398487c80a001700666251b3dd6a2e5dc96814d21e6498e75811ba4c51160cbecb7d5510" +
|
||||
"62697171a25a6abbc41fd806c3dfc83daaa10d7ce47f5a29ef0d85dda5a61429c637520e6a4048624cbb25f53977254cf803848ad81f25eda07690fe7a0466e4" +
|
||||
"d18a2fd145dda1c94a994bc4ba5ce1aa1b50c38151febee757afceeaa91c7b35e57b90ff7b62efa929dcb962d32dde5a0bc3159524728621a3d7487eb7c3edd8" +
|
||||
"6df3f8a18e590039bfc84a22b23b11468c90dcdc8506187233d8a6b3dc9785ddf6f341709fefccde91a7a0925a8446b1896aabd6a6826ef88b756a9711cb3b78" +
|
||||
"1ab1f4df4d0515070e41fd5b0c5483270307e60eaaaf0b3a48f6bb96eef6141075445285675bb12f2ce38b42c91c1e063400d7bba9b322a0783e7d2f5d3f8874" +
|
||||
"52ceb65bdedaa032336d969d2e0e3007d2ac07bcf054b9d0330f2e26c486c054bfa709fdabe283ed9a4ae67cee24f40f2a5c4e70160e6ceead208ca400959922" +
|
||||
"70bc35be104c9ad94cdbe288b1c599db1758331340c9e927bc9d688e4186d5badd463bd3ba116bdc22a39c604778cd95503ce4ce642041e89bcdeb86fb19ab25" +
|
||||
"e1f94ed2a2f857b228ed4a582ad411d7273a0d5189bf7a2b87a135753e03383033b989ea532041ab9ed397ecb3ce61e01923b3729068f6828ffd12e2ab1d28db" +
|
||||
"6ed7423d458decb00476657a0580b4af3aa5615bf07df55beaa2bec71447aeb39791477dd09349bf573e29e9c4fd454b4bbf1e19591bf38dc47c83bf39babdc8" +
|
||||
"737d69ce4b586cd10ed406426b88e686c11072f04c680e8b6275166e2dbe91f701642b1b4ed92d23d6fe14f39ff7f5a09401f3a398eb4bb742f6cf10aa35e767" +
|
||||
"7e6e92aec791e94f8122e8c9cc9d0bc979e3eac6562ab614ff20330b00d9cdbd08e9deaeaf5cd67b49164f550c5f5c2d7523fe5ad71a2bd03fe2a97329980cb3" +
|
||||
"049ecf6d677d815e56f7cc27407cf73528549ea98265ef90277c14763d5acb3572f5a482432cd8288972af580fdd3418889bfa6a373c4813c4c45e933ea4ec72" +
|
||||
"cdd068089c2a30897dd57031445834de9f23faf506ad930d843b1cabad2c0aa8965d1b5e57032c969f9f55fe2a3049f4e63d5b5c6f5f760da5ba44e3bb9307e9" +
|
||||
"ea39973d05a74a49e15bb71eaecf62373153ca316fdd40b1c64ce2896c95a7b5df970c2bec85edbd5ed84fa7949c08d5ec4d987052fffe357d444e2408a22295" +
|
||||
"6ac1fb272f5023740b381e00dae9f09751a33bc6ad673c4221ce3f932164deb99f1da3eb3581caf475e385bcc56d47a7a1615a9543403750f0121d5482c4ea5c" +
|
||||
"94fa3428178f6a4deea08d754ba2abb3d1aa48c3e06f06ffcdf4571579d398cd991e60599e9633fae6a0c07e10e538aebb7d33aa127c830f14b083728f6ad7eb" +
|
||||
"c9a60a0ba222f47780eaa82a21393a49defee97aa8c3aa2fa53a2af86059a7587074128c2fee7060f398ae70b156d53aba0bf1af4bff10a966ce7e6382cec4b6" +
|
||||
"054a8f6b9ef0e8729ee182f86c862f9b7d5ea36ef7e15bed10ed41b25738c380e58cf42795e3202749074fe5cb6e8fcb49a116f54d84734a834609a3443b8b42" +
|
||||
"97c05ced428f5756ba59bfc1535bc7e16d370d81b72b1f3f78ba75c820b22e485dc042e4f38e93cc2918a491750c92998f03aee571cbe9abce4d00fdf9801f9e" +
|
||||
"8e0fa276822e1e5349945f1d337e656b431c48c1a2e9d4142ea14e9427881bf201ad8cd8effaa6fe6a7e07c8112299db1b327a0cc34c9fbd35596f4ee25caeee" +
|
||||
"221afad93ce7df64aa6ac766cf4fe1660446dcbafdfb86b4e0fea78c29c3e84ce42da4a503178bd250a6dbc4fc65e397062229001da05d5be118dea7ca5ce67f" +
|
||||
"b4ee07a8b01e408aebef2c913434921df686a242b7d015a559f9efdc54ad62d7f31ceb72463041843d7fb60f948fed03ff143fe24ab81bd4ef6bdabb856ef1b7" +
|
||||
"174cc987436322271bf48423114e05758a08cdbf300931fc7e950830b7ee920f7033541f1db9b0d2b91cad80d06c049b05fd0a76d6dc211bef2a08d53b1d16d4" +
|
||||
"2232fb263941daac4e004542517807341bde98e9990a97739ef86d66c7a51324f1f6911cce4c3db37ebacb6e58eb02d8f7d6ea31338b56a99649c4e730a01bca" +
|
||||
"deb6fc87cabe00addf1bf76b83927de26bc2bb3f0cd5945d863b0c31cfe8fd4b60462000a911755cbecdb6a98139041d52df498aa99aa3876836ce5b5bb426e7" +
|
||||
"c22b5977902e0b3425fdbdb8f44e8758b207b469c3e5363f552c89fbf778e95e8b7ff6566ab591fb68a8bde38d8169c708a321b669c08d9ecf1a06c5321bb1cc" +
|
||||
"9c8a585b6381645edfbd1ea4a2ad7e7eb8be6c431958add393c0a257aeb283644c6fe97580aef613f1b9d83e5b009f7a4d059025c11e0a0a67801be511dc097e" +
|
||||
"4e7c065684effcafab83e0e716e2d0862e83b295f82089ed3ba4f6897c8d8eb2b358231f95eef840e1fe22e9065de2b3dfb3633e2968135756cd9c109e8acbb3" +
|
||||
"172bbb6680c2e4fd69e179916a7849315c9f4dc86991d75cc6358617454694b3fcf2471ec7fca6ea2d99f704b9aa37a25a3b3183c5e32e3711346ba2336d6001" +
|
||||
"489afb9cbd8822dbe4f0323ebf7cfa9367b6548213d473c0f07b1bb6d16e1c66fd2bfa1ca623e03149fc81eb6f71c12e7b4b76ca588548bb4872469687f334f9" +
|
||||
"7e114a16a0a58ec70ed74ef69dd96666a85aa52d1ca812235796d90b9af4296247f4c1ab632effeaaef6acbb637f1aa9379195b3b668ca541bc6eb595bbc430b" +
|
||||
"28adc5d1a26fd4cc2239516ac9ca9c0c028110926a2f88881a5886554c31539f4c8260e16364f4ac27710d2becdadf573f4a2b7b55d76ef059432c91c6f5beb1" +
|
||||
"56686a620bdb4aea50df564cc0c5ccd8a93c454e06b8969a0f59d63ae5a29105149c08a5de65e87b0dc445dc5d86db8788ba77b83e22125c69621140d7f17906" +
|
||||
"4ec0157a877cc51ce3c0d565bdf6c884f69b0ca631d6863770f6db30eff847e33c8b30d5714668a38a09f454ee44ff2b7f97207f10efcba74325378f6f272ef9" +
|
||||
"9f09c501c12bd0a4155f559a604204b36751ce8d4c0af35a8b445a9290c305d5d3ea21f944e31df9a711ee90bd16a37850e2a87c3bd3fdecdc6e2f261a5d6d0d" +
|
||||
"580990fcab9228cbb39f8c79608d821ce27c10b0ee0b5a96474759f67970cbe03cec9fe594765bc935abccf867b9717a4087465c8604eae89497c8ffae7e46f7" +
|
||||
"ade2848916b54aa796809cb98a4364b7b44c17944dbc408909a92d4cbb24a514b72fe8de7d1cbba0a101973fa9b29d97dcf1f4ed8a05d5e0cb38849dc6e2d041" +
|
||||
"16892ce649e0a553a727bfbb1d5794a059d6a411e43876e561d83bd22c054680cc8fa928f5f4be2d849f02ddf9c6d11ba35810b81553e1938ab013663f6ef35b" +
|
||||
"08f06260932d7acf99f57967eec57a61f03d880c3225e53102a672f5842da21aaaee02444d372ab8ed7096235a4926e3288912d9c736c2c4dc49918abdfdd6d6" +
|
||||
"d0df5be0133abd61b02a6f008909c5350f9077598ab2e612603431bddd3826e314feb280585b37eb89e597f7f0bdb738a9a93d9af224659d50c8f7479b240487" +
|
||||
"76c2a960eb18923fa2d3b31b3d20ef538759cf22f5b415d19bdae689f2bab651d79ff99f77a721bc1b2233da12c12be0c9881ad82fc97a6343b3af8207dda8b6" +
|
||||
"5c600644d741b8a16750964e341e060260c4de26f991f3a1f6a606d1153565f1c9cfee58eef327edc0e9cfaa206ce930b191f521be2344949bc75d583a413a96" +
|
||||
"ee4edac424cbf9bdad2883c96a1306b96ee059d8044e3b7af4e7138697f142774ed6409a86a3c6c456600d4e405e6117ec759f4b22d7e5a185b0f9c67ad987bc" +
|
||||
"58d2e8c929d4a487e5b77201d7c1416878e8d63258b2f58727cb631494cf1d68b99c28493b99b0409ccc1f9c218a2b95c45ff36563f0045ae5c3098f641ea6a9" +
|
||||
"b48a3e1489831b2d176a1e0cb2afe6bd8cc5e797de01391e47e798c1aa945d33d5e7dd607aa73c9efe93f0646adcd7e211303ac7deea4d02c80370e8e867e2ad" +
|
||||
"9942bfd5a66143560a1f59e5be1f3aeecd7eab689a4a481aec78045ae0604f69d9eea550152f6e2bc692529357b509d60e5a497bd94e63dc698cdfa2a3a55976" +
|
||||
"0b2d072a2fe9c1fd41f31518aae0edaab532591476a9c5a61cd76937575cef71ff5dd66e158e7820b4b6bff4067cc26ee9aa66f41b80f078645b920512b5efd8" +
|
||||
"88b3644601a72e3f665b9c8f0ee246593667379b8fa043718f2d75c21d2a11640c328971c32d5743c11ada6c95cfabf1c6b66e0b09342afc899e1f272ec48a7f" +
|
||||
"ba5a51943763bf969cbac879363e14dad1952517d8f4b463511adccf25e655bced7cd9666d01dd4f2a0a21729ac4f44970c9c478a995d1c3b358a244110f1db9" +
|
||||
"fe6335685701e0c2660ae69d33a93a75e44f5374b979a5af140200db43ff612be2728548192ebfd0a3860a9e135b910fe3fb249926d334167622bf4123bdf0d5" +
|
||||
"38e9ff2a3bb67a44f8407328e3c94b47d92e0401aa1db85459967699804df245a7808f972683afded9cad8fbce15c1be38fd10c62c7abc302eb0537d5cc573ec" +
|
||||
"245513a87c1a8d386f7ef0c4a91ec3c602b14a14ae395da13284df3391b929c7379e181c5d3d4597e3c955ef6e3dc2fc55890df04785bdd4e3fa35ac775f44ef" +
|
||||
"9d7813cc036d6bcc316e869eeddf7b30e4b837e9285eb20397b4d7e0d12080c502c750268bcd6ffc323cb094afbe8304ae840d37be833878697f2cf931faf06d" +
|
||||
"28dc6c7e1b5df69327127b47eddd0237f1bb5942ee5903d8cbfe1b11484199e90fe7c8e7f2f725deb2293630bd8c8a377d539736e2ccc2b90c08b97abd8c5ce4" +
|
||||
"ea91a6219ab06c61c31eb48a35587b3c1719f387bd8c2063c5a79d041ca8a9ffac2e3c728f74efdb74ee0730f84cb3a8aefff7c8a1b570127cfc93eb6d3327f5" +
|
||||
"ba7f886dee8be0548f710d6bfb18cbe5910bf61aed2c95028006f419241d968933aa00bb0760a41d2693465827a00837a84cadaf8a8e804d175adc5915c6cb6e" +
|
||||
"fefb2cf70db063f2f3812da17586436c176aa0a815dfc7983eb88bfb1b6d1db7ab119cd3058c0db4d1910034f70f6eedfee8b742ea45af9780f415be2f851061" +
|
||||
"313a218ad48e992b75afaa07c33ca47ee0155fe72e13d7e5736e512c5e5a45d351f7c7902d8b0fa31b34569a9aea31b018d63d572a9898c389d07caa427f114d" +
|
||||
"251263d56cf5d6663159c2b32683b266fb909ba9d4caadaeda6700c03b25307cdea597a3287fd76082dbf33f073482872fdb494b892112c594d7f265d2799b5e" +
|
||||
"5ec46a30fbf1557fa344a664a7af457a4e8ce2c014a270215d3f95d47a42d8f86a61d6d6b363d04a99a0d8f06c5b15cd803d951aea0ca185a807ca4c677db789" +
|
||||
"fca64f0c5ba95b8c64f930eda658f9f773a9e1c8669589a7d98ade8dbc2c2c4cbbaf6ea2bbc6e762d4098f4db0d3f055958ae9da15ae57ee0b60fb9513dacf5a" +
|
||||
"d65e34613570186acecf9e165bfa470aabcd35f22620497fbcabf220c53cff84eec12cf9965297b364f0e9122895c175d213fc2a9c9cbf27ebe1cf96fdacaf1c" +
|
||||
"1c79ede66cfaa5057d33e09b31b43869812e5a0ce730663c18c4333141ae9565e437d99ade6b2cbe005214e8b3392c55bf4d7b38ef16e7f84b4ba3c85e1dfd1a" +
|
||||
"ca8da1a5c75fd190e7752926533327880aed1461c7e9de2443ba0a2d094f4a14d5fffd3b102ea78acd34d162e82ab78fbb82bfbc8a9708ab532aa861643c39cd" +
|
||||
"2bc89f2be53c583f9930fb2da14f1c5d5f218384b1740a76bd8b7ddd2c9888c8d7e7e78cc7a3304fa41995c7c1c3316894296caeeb9711f0e6bf16abc380bd41" +
|
||||
"10448be3cb03cc3246ee7b9559c858307001033c84ecf89690526544c05c146f206d4a21e710597bb57759d232154a1f9d88eb3f3440374bad1e901da7a154b8" +
|
||||
"39a6d1b1b6b2ab0be872ff036a9f9f769a169fbf91bada732d8f28c453b9be49011b211155fa5c588b43018775f99e3b92b322a4c41282326b79fd26541ccafc" +
|
||||
"c0e2f09797e3217fb0e5785b72e654dbcde8ba14b2d56faa2218748c6789c158bb635d43c9a64690b004ab70f457e9fd959b2d90875966968c7ac44b103283e7" +
|
||||
"50b60deeb1f89444aee25fbdb7fa3a96d70c3dce38246f111e466cdfa3b807c54ed584f5b1a64456e923dcf37f45b36cea3d602ba3a55a4fd883ebb6dc198650" +
|
||||
"b522461614656897b9b7d408d48b12e594af06c91f715b32a4ed65a379f0ab461acb9b8b20d1f1b12e9f7fea422c0c7d545eff4152e06002cbd120fd74b483d3" +
|
||||
"a0ee30cfd851c98e9aa8fb19b60528de4a75b412bed656933ae8ab600aeaef5befdcca4d35fa472ed38ffb91a9017c19c5d500426f262ba379034c45cf5d1627" +
|
||||
"48da223207721b4bc4504b79309f3d622c53dfe3c83ff8866dd7614a2e90a85c077b2e18bf1cb6008f0d785d6a0ffd5f15a83a343036f3fdd25314bfe47b5a12" +
|
||||
"58a7c89475f39a58a671d0a17f6fd100a8928181b94d8d53149316d5addf14bd398b538e2593273f02cf296fd73ff92d02230de939dae94e03d44ce93dd4dfa1" +
|
||||
"b9219fd369c854ec409d7bf94b316e5e9c16e1ba525a783e24bd3fc0ecc949be245c402efae8ea77aaca74c78703506cfd5a5a614793e04c76652b4f344f79fd" +
|
||||
"f2da1e34f650fc1094116ead723813d204ffe375d20707fd94d90f21c009194201c88d22afaee83a8a6be7518dc915331b863664e033d397c64e1516c0fd9324" +
|
||||
"11614a1bdf2feb86e0d0ae21e784a55086c596c7eedd44d3afd7295455450f507f1c1a33c9ba94d50931ec054d8740510ea23990c266f30678a74fdd485b482b" +
|
||||
"cbfb4070e3f10b66c65a4210794a3137adabe887ffb9bcf2a30c625138f840b2666610e76e5a0abc183088a94930c025836653eddbc440621bbf94761c74e108" +
|
||||
"3672c6a914a753fd452e8e7a02c54b21d7bab4b705b4509b9b5b27e2e5144289eece950c3634b410b5e3cf8c5a5f74d98b55d17d45d7014390cf696a7e693777" +
|
||||
"4c028517062a69276910cf5f139078e8ef6e77aa8b35aa55fd4f53e48ae6b4875d1732b286ffe8bf852b73af7b964fdf1aa4c4f16d9f14485a2b1a704c2615ac" +
|
||||
"8ac74eaaacec7e8e4e506e1b418d377e4d5a271dfab47b3d3c11a809beda596fdf37935dfe06c147dfe7d5be696ffb2a0cff907d1eb2a88477c261d5a7aba06c" +
|
||||
"d70dc52d00b9a9d851e849f86e1cba91b4c40d1ae3d4f21a2763369dde34d084adfc09d2a6cb5f09114cd8d6fa26d15f1ec428adc245064e5b8e80f21b0b3ff2" +
|
||||
"6690398d3080f5355fc082bc4bf3a38576c7da00efbc80839dc9a06fab2b998a152553c36fab42e03e3e4b54456ed954e53bd63902d89e2617a263e70146d1eb" +
|
||||
"71557baf43aeb0a681f600a784778c895afce26fe17e3ac33990c54cd96fcb2432de79d4f95ab2fb96effdd37f4e4247ae5b4c1fa461ca3269d45a90af090333" +
|
||||
"fc3ab5152bd5aed4445eab93466701382ba76fc8745abd911bdb45a494e1c62647670380c04377bcdb5e631318dfa79850469a988094acd48a4110bbc7289617" +
|
||||
"ce436294ff242302d154ad75437ae2f551df5b84f884c87497de0bb2ef7bd41a8c758e4b158084c78ef047389d88974faafa00ce7396e849509d39c403fdcca6" +
|
||||
"8f47e1d0fc294e5510a07af24c165e1a4b4ba9498e7b333c4e8624c552801079775fc684b6e98b72ff133164a2052c2aadcef168d9cdeab8a935c98f08e23b95" +
|
||||
"859277381a2ce23ea61fbe9ec1439a489523161ed370b0069aa6a5c7981e4a80c04e304ff2fd85f80b51e3de3484b53084f376cc72a390aaefc49baddf4d2545" +
|
||||
"93dcb5a49326c9c15c3d1c0e0709c9879d68bee07b956d018a995bf1e7f8fa03ef2079d01e0bec601519704cced98854c94f1f0ae837653f14c0221e12f2cbdb" +
|
||||
"1564066062bc1d4dcf7ed8b2c980b90e8101842d5844375cb370f402d858dffd9eb52572f8420d4d246462230ca0dbd567250e4f065730a6aecbd804b1acf949" +
|
||||
"30e2890a39fdd4c1eb693f7e345504dbad5ac207f1a649968c3a7b416bd972b6a6bfde04375337a93b0ac08f6fae62c0fa7df8ae9deeee421f7ac62d8cf5ecf3" +
|
||||
"b5ff39877ee4abaeb9db03d8a8f13f7925e54267a2651c55ecf580d5cbb24bf504fb01291e3e97ad1696ed995608fceda79f2441ca67bfe3c31f4f4bf0fffcd6" +
|
||||
"55408744524412cd4d3cbdbdd216694aa7477e88b25f7efeb34abf491a50695ff686829a8fea9e999877bcb37291b8dbeeddfd44465a2c28a215aa532590c487" +
|
||||
"d4747b6ece4e1aeaef725cb305d11b965a9647bef36a5c2fb45cc334d35ff4e308cd8813b6de3953b35a4ef6a3ae07794f8b54ef6365a573135320612bd1acfe" +
|
||||
"6cac5524c0e98b6f2a33a790b94f5134f0cba075a6fa93c191f4176ca62ea2e365557d6b3363a17b9ee52f3c347c82cd19f8432d16a934ae9c5d4d4505e7d20e" +
|
||||
"1ae31bb64ccb084f7a59744b27d58c2388d449ff4b63604878ae858240348ecfcb51761678265bd60d5dd7d51e25e91668fdf80f6b726b29ef6c3f0f229d8af4" +
|
||||
"b2cdadc3ca7fbadab49b28819b9c9b92b49cbe9a281e5891f4eae7616013777605a0623dd7a650baf9a9dad66ca9aae3c76ef1e27db32bd9514a2776eb0c8d05" +
|
||||
"65eec06fc4c8a69c417efa336842e248e5a51e3b5f3ba3227e3f78f1bd12d81595e03a01f4259c772fd481ab5f3d7a945e1c95fe0dc3c4742eeb7e15c9426ec3" +
|
||||
"ed4c90ee07d56acc78fecfd7c5ce1e04e7db1a970091f15c90f0aae2865d135395d27787aaf68c6a179064d82691e0b6c795f61875f317dc6d2e8feea55a28f2" +
|
||||
"461d74e14e350351720b6f536adfe3addd4111f08e3a84da2656fd4bc83989b329b383da9f01cf2392aa0b19577884d1281f2e6c106df451c078a472b36057d3" +
|
||||
"065dfc4bbb47ce4e5dce4acf6da095bdd10322f3ae12bcdd1f805e73b303f1fc7a7e16cf3ffd822bd8b25fbc93be065019e394113182713f1ad299ae6537f6bf" +
|
||||
"57116e8dc9ec775519b797ab4107c2ac5129ba85188852c3bc5f116044bbd8985b6dc8b8da4659589bf9d2351c4c3adaed87fe2ea20ef6bf62224c7af86fe8b4" +
|
||||
"973e558f39465dff43bf23cf1f78957514e4e82a3009d40d97bf8d8442a11deabde806e2fa84c1ba75273da75ce8dad3b2a34786b2958ac4bfd248ebe604a173" +
|
||||
"83c727b11dd922b1f72476af700b663fbd7033d0ac74b463d40a92d26c938b69f96fb4a9cb7a9ca2bd9496251270c0c5fcae6b3c2eda5377b897891648a97125" +
|
||||
"8ac71fed8dce8e02c30961a299cb7f3145845dbe8f4dfaaaf4baf0ca3fb730abdd258e98215f072a943d5aee8d8bc4c86023524f7b69186d99ad88ccdfc0b4bd" +
|
||||
"7db422bbad7eaa0824ce24b5c186e172c8c584f1cc5c126c901a69ebee8dbd230a653a3643b7875672d22fd86079daf8d834ba33664f5ad0b6eec767b4f58b45" +
|
||||
"e67b776b90e0a5e130aa5365003eb7fd78b757b1cf9133f6a1d51064b293cb42c8c41b15b7e95e2a39fa5dae19c6e20031d2bfa4632c37779163fdecc6b45624" +
|
||||
"4d6bfd01a8877f6fe7739591917a86e7dd795ad85cc3f256cff5961e8b62e92a0754a51f2c6d59819446eec8bdd08b87cf9f4fb5373e809d52240d2dd691cd50" +
|
||||
"37fc79d35b61d63851917cfdba164868a3f79e2061bd4610c1f5216ed77df00baa75f949ad37142db4fd282a5c7d2e2636ca590f92fc4781d4f51efa69f53947" +
|
||||
"d4fca1dc7dd2429837b6d7e5c9528effdecf6f731f676587785e5c4096bdb6f1f44e72f5f77d9025813e848881506f65bfb0f2b2d3ae6f9e00731929b5ac083d" +
|
||||
"b1c9c324703e63fef6319e1d8150aa0ff7d9a2049961df9158f3e1f2e540a91feb742625d2a859a452186d2ccaa3ec2ba086ee0868a4dc24ae6818fc02f9c1df" +
|
||||
"dc326cc31c46feefda97265238592f638968627ec24903b97513ab05ed58ce5b516decda0e2fbf01a70e6cb2e53c3e7b8855f80cd7e007b78da727ef0893e099" +
|
||||
"592ba684d62ae2d1f06ad148fa7f34cfc724d804149cda21aee7eac064ad20d29132b260c2c2867fa6a2e747739fc30df2f002c2a99da6c7e64ee51e806af7d9" +
|
||||
"768aec456b93a05002666cb61b2229c99f2cdef9afc9cea1c4ee3a85dd189823399781ee33cde2abedff09c47960c035e075a29156005d75845a11fa06abcc50" +
|
||||
"5f7f849a0caaf683f334e9e7bbbef90fe6cf94cbf87767219440d31713daef6ad1e4a1cc720ce59fee4cf7731e46bbba9ec1648908ea345030aa8f10ade10ffa" +
|
||||
"3d2acfd480b0b11eadc4fb2b740596b204e911456cb2f35ad9993ab7dd6a48b35ba0c207625384bb3c2ff24437810bd13c7ee96cd6f97f19ffd537ad182a3657" +
|
||||
"b0e83d42fd6e2ebac6cbf5ea1bde97465b7cec6954ff5b5be049e59a49ea25ed6667dbace765401bde12031e5cfabf2df7afb728d2a0b2a38b24d79bf23a313c" +
|
||||
"40fe5adef97487641c6088dd8712c0c352708e474b02c08fd2d71b6d44f16d82f291ccd61c43a339408379a8de54cfdbbae5e421e084112fbc17fb5561e084d1" +
|
||||
"4149bf4bb06fd161878d8574f856867cff974d5898e161923d55bdac8699c9df6a220bcb6c800d3ae7f107b8c4acab206d780aafaf6c2e2379de8c900700d9c9" +
|
||||
"c87d464772514c5aa3e5f5bfc00fb54f2b74702838b4731c5ac8a070b50783e81dd97fa8d55c739d026b607a2a78aba1bb79b1a7a3c22c78368672ac020061e6" +
|
||||
"d9683d57d6989c6c6f08b8d5d74720f5cb25505fbe81d2bf53a68e972a54784705b20f83fd1ab5afff30764ef89dba4465b56f48b325ab3810bf8dcbf4faa61f" +
|
||||
"676e2043ac8540df9e3af4c0f51d816e89c09bf67253be45fc5f75f64be97f6c7dc0c6392af6fa8e75aab58eda976b36773cd37d315771400a2cb846fdef3d8a" +
|
||||
"a15bce5dfda0379e526f87cf67767a2ab93d41c85b4ed016ed0a89d2f94737433a3ebade813def29eaa18a1fb925fca7d08d1020f64caebc562cb4ad2fb241e2" +
|
||||
"94923b2f2df5e6c4953c4b73be0f5568defe57ce49d16e2a205323e46cbb5a3e77fff1557671503bd7b5de5320f1fb951fbe26400cfa854af2d12fab0215310c" +
|
||||
"f070add34dc4565d1757d7e10a03e3bb73a607ed7e10861b1274ddee76183cf7e56c1de7162c805c2dba0e0331d36f3a4e2019a2e0705ee2747ed1e52bc3a6fb" +
|
||||
"3b061f784348204cdf8d643ff6c271fa72b56900edcc2f77201f3bd4fc296ad6534a7029ea66761bb9a3589a1f6ef566504c70297b98fbb603214fed2e4b7ca1" +
|
||||
"06e3f0e993118897fa641fb9722d4667fa98d07a6837e5ab2144e5ec1548a7dbca28c559f2a9a99d54b8e55f56d0e59bcef1ac45e2046835b60579da0d2261e7" +
|
||||
"30dab9009d138421c6458d146e870358b0b3fa20257e50b58f167c6b47edf7053513d58f33547d06ce52458baaa4dcf15f77b103565c66a81f183c827801b455" +
|
||||
"b61b6287a46a37a96884075a7eada9ba7f0ddcc14654bf87a26d2e27a978b415257773796923a220e06b25af16fb5aaded9b2d081a4c64106df460ddce9c3b2a" +
|
||||
"c8553e1521e501ad29a4b7f7681c9b60576a127087a5237c4c2bacf9b163dd590e63f2bc80e7f1e613773f87d034313064710404739d63363d204be7b14800c4" +
|
||||
"d8c1b6a2a21da70223be51d281fee302ef806454f9d7d28244ba537c1d9f8f1bcc5d47038d986a8f95ca48437ffe94fd44a90bb03014a259112a97508adb3db4" +
|
||||
"34f72a5268c1af6bc6d5801e579aab2228ca33600ebbf1a1959081c3a4ca99e444f97409f5e0ca4779241c9aacad1f4ee7fb4369bd6ae076378e4f63000b9a5c" +
|
||||
"849ba6e72e47e2454a44659149338ac0767cd25d8693c0d143e354bd600f1c1d3a44eeb024923ea659060665d5cd9a4ca1ca86162819556535fd59b9fde90caa" +
|
||||
"29920efe99479fe7e4b4e5371e13ccb43a1419cf023433239d840900d31bab37fabc3fd20e31bb7dbcb3ae8df66f67e2844944bcf544b658364f9e3d0b6d84b4" +
|
||||
"63ad4eb621644fd7d774b501407a1178814b15149345d551b474347188067db2ab4d7f4d1abd3027133039e855e129f3f5649550da8c04fe2db57cb89bf1bf4f" +
|
||||
"72eb35ccfe31afb92f6136d4c2a1c115b07b721b2da43151f11c356256230408979c5d95243714429e2c9500e7b043b20dac8a9763e5b487d1cbdb34ac379b9c" +
|
||||
"6409454c79385b6e562459c4fdaad1b7f9297c1473e9b90fffe6d1c5390e241a187a4cefa2eb0cb0c11f4ca6c5b961c18ceb57892295290dbc991692556bffa3" +
|
||||
"b8c405cf285e6bcb8a90246ad0ac15122f4cf73adc129d23aa2240733404beb6d74bf698e5589288a522573c774ce9f514b5d5c086382ea1dd4e89ff5facbf23" +
|
||||
"d36bc3d203941e17747ede4b82820351f4df278ddb787ce8f6f1cc468ef953399efb072ce706e253f1bab76444bb70be6443cd0db633e958dc57bd223e00418e" +
|
||||
"915a7c2e4d94c0623f9788276480cdfe798387d35e2ea2d304d066aec7627794cdd4200a44208d6c87f242c76e2d4a3f966b6fb96eaa63d892c1a177bef249b4" +
|
||||
"fdd1a4c06c791f677dd9919f739ccf318bd77835330b0219786249c9c9736161dac771a838724f2dd70afba46a6782fd27601cf8a7126ae95a66e526131a68d7" +
|
||||
"7a809e513533ed8021eecdbc5851dfcf95e10f1bbe47b5c7f079275a1837836245266b66d89fab25ac4bd6c1225560bea3259b67bf50a58ee056754d574da79e" +
|
||||
"f9a1a0df3a5defed0f74fe74ce0bf65a04086f17e94a8451828c723c97932f26f9349f1a2c7866c617a528602721de4f3cc8916bcfc66cdc106bafa26ea87a13" +
|
||||
"94dfa37e396365fb7f92df007b46a50ff04c7f85bfa679230ebedf18c2fb876fc7098dd1c4328adf85de71c31d94687a308053bfcdc158cfa7772170fbed63f5" +
|
||||
"37dda41f65196dfacdd1186b5de0f3369d841ce6502192292d05a19ce7464f5bcab3015c721cac13ddca561b92dc1ee25d3068dc1945a1b4e2bd1e6604c42e4c" +
|
||||
"3c04b490f6365828957990007394557854a903e19feb06906e41cbc8766bf37bd7dece90f4cdc987709b1129e84bfdc502543b5bfa887bf78553a5ec10ad68c5" +
|
||||
"d10eff75f7aa495e7d934a55577fdc0aead31aee4522db0259d7d4ea8438a7996d80a787465a2980457193d1c4bf1a0a1e01741d72e5fc4dfe58475c1c01026b" +
|
||||
"5a3bc973b902280753e9c3226db9cc778e2506c56ee86ae85b4d54dbf05394107329b2d1ee56522cb1ce562fb1aa4e592199d9c29f64cc3ab1d757531e209eec" +
|
||||
"aa138d8388169b5e28c45f5aba267eeaa57f69869f0b6855d82b0eafcde63895251f41e8e676a0ab12ef3f569bb7de91b79fa46ad9637da01ca004f4d30259c1" +
|
||||
"f5b00761f6ca9c17721a6718390624a10a11f7f52d7afb71ee5f8338828910e48f94a1347761abac87897b2dd0e23f1d325aec5031ef58f2972e8b402e05f8c1" +
|
||||
"ae7053a90380a1ae0d4d06645548c23e13afa31aac8ff83b10f8341418af4114632f6406d6e33076391696c9161d63c8bcfd1c625fc737f68198046212d1638a" +
|
||||
"d2d2d42ff7029c1fcc682a046edc4d4f24862d82c600180b1e8f57ff6a3865dfe9274f9886d00efa523a1b3b3757c4489200fec3dc5583854c955492336253dd" +
|
||||
"767f2a60ce3d224afcff9cdc19e9b28830d33affda6af99942a8fe39562055f3e884fd6c1ebc1908ac159061f35e9b0da80434ce9673d9c6b87265170077c670" +
|
||||
"743e37474d7605cd01c44af600f16d9ffaf24acf87fbe5ccf39bac41047a810d210051c87f06147a0bb8f1427a406700483679638f1af23f1dafb7aa0c468669" +
|
||||
"71c3a82f535c26cf6cb335e8e915fda393799d3dbe0e04b907ed3612d12ac95783a6876cd986d2a13b82192532e02c250eaa42f891d2481655fa4494c723fe00" +
|
||||
"87d224444245eb5b0eade5f741b025db1992a8ad0dce51b0c1af4a18a9e244f9f755891adf0f19179c7baa6c32bffc91e0b03c4ed3aaee1978b6a1f03b87ac6a" +
|
||||
"fc3b9e7030bb212b17de198edfccde29d04224798c1204e47ea235f048724fac62d637d1ba0ee3922048fcf79c746b6c0c036d882e3491fd72bad6e009c6403e" +
|
||||
"55876f4d31330caa02aedd0b0c121c3c41e736853a08071f0dd4ddc7412db0bbe274a9ac2932552bb37c40e72c2ef1d7cca8236942e480d709d3ea9d5ae0a1b7",
|
||||
),
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
cache := make([]uint32, tt.cacheSize/4)
|
||||
generateCache(cache, tt.epoch, seedHash(tt.epoch*epochLength+1))
|
||||
|
||||
dataset := make([]uint32, tt.datasetSize/4)
|
||||
generateDataset(dataset, tt.epoch, cache)
|
||||
|
||||
want := make([]uint32, tt.datasetSize/4)
|
||||
prepare(want, tt.dataset)
|
||||
|
||||
if !reflect.DeepEqual(dataset, want) {
|
||||
t.Errorf("dataset %d: content mismatch: have %x, want %x", i, dataset, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests whether the hashimoto lookup works for both light as well as the full
|
||||
// datasets.
|
||||
func TestHashimoto(t *testing.T) {
|
||||
// Create the verification cache and mining dataset
|
||||
cache := make([]uint32, 1024/4)
|
||||
generateCache(cache, 0, make([]byte, 32))
|
||||
|
||||
dataset := make([]uint32, 32*1024/4)
|
||||
generateDataset(dataset, 0, cache)
|
||||
|
||||
// Create a block to verify
|
||||
hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
|
||||
nonce := uint64(0)
|
||||
|
||||
wantDigest := hexutil.MustDecode("0xe4073cffaef931d37117cefd9afd27ea0f1cad6a981dd2605c4a1ac97c519800")
|
||||
wantResult := hexutil.MustDecode("0xd3539235ee2e6f8db665c0a72169f55b7f6c605712330b778ec3944f0eb5a557")
|
||||
|
||||
digest, result := hashimotoLight(32*1024, cache, hash, nonce)
|
||||
if !bytes.Equal(digest, wantDigest) {
|
||||
t.Errorf("light hashimoto digest mismatch: have %x, want %x", digest, wantDigest)
|
||||
}
|
||||
if !bytes.Equal(result, wantResult) {
|
||||
t.Errorf("light hashimoto result mismatch: have %x, want %x", result, wantResult)
|
||||
}
|
||||
digest, result = hashimotoFull(dataset, hash, nonce)
|
||||
if !bytes.Equal(digest, wantDigest) {
|
||||
t.Errorf("full hashimoto digest mismatch: have %x, want %x", digest, wantDigest)
|
||||
}
|
||||
if !bytes.Equal(result, wantResult) {
|
||||
t.Errorf("full hashimoto result mismatch: have %x, want %x", result, wantResult)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that caches generated on disk may be done concurrently.
|
||||
func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||
// Create a temp folder to generate the caches into
|
||||
// TODO: t.TempDir fails to remove the directory on Windows
|
||||
// \AppData\Local\Temp\1\TestConcurrentDiskCacheGeneration2382060137\001\cache-R23-1dca8a85e74aa763: Access is denied.
|
||||
cachedir, err := os.MkdirTemp("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temporary cache dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(cachedir)
|
||||
|
||||
// Define a heavy enough block, one from mainnet should do
|
||||
block := types.NewBlockWithHeader(&types.Header{
|
||||
Number: big.NewInt(3311058),
|
||||
ParentHash: common.HexToHash("0xd783efa4d392943503f28438ad5830b2d5964696ffc285f338585e9fe0a37a05"),
|
||||
UncleHash: types.EmptyUncleHash,
|
||||
Coinbase: common.HexToAddress("0xc0ea08a2d404d3172d2add29a45be56da40e2949"),
|
||||
Root: common.HexToHash("0x77d14e10470b5850332524f8cd6f69ad21f070ce92dca33ab2858300242ef2f1"),
|
||||
TxHash: types.EmptyTxsHash,
|
||||
ReceiptHash: types.EmptyReceiptsHash,
|
||||
Difficulty: big.NewInt(167925187834220),
|
||||
GasLimit: 4015682,
|
||||
GasUsed: 0,
|
||||
Time: 1488928920,
|
||||
Extra: []byte("www.bw.com"),
|
||||
MixDigest: common.HexToHash("0x3e140b0784516af5e5ec6730f2fb20cca22f32be399b9e4ad77d32541f798cd0"),
|
||||
Nonce: types.EncodeNonce(0xf400cd0006070c49),
|
||||
})
|
||||
// Simulate multiple processes sharing the same datadir
|
||||
var pend sync.WaitGroup
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
pend.Add(1)
|
||||
go func(idx int) {
|
||||
defer pend.Done()
|
||||
|
||||
config := Config{
|
||||
CacheDir: cachedir,
|
||||
CachesOnDisk: 1,
|
||||
}
|
||||
ethash := New(config, nil, false)
|
||||
defer ethash.Close()
|
||||
if err := ethash.verifySeal(nil, block.Header(), false); err != nil {
|
||||
t.Errorf("proc %d: block verification failed: %v", idx, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
pend.Wait()
|
||||
}
|
||||
|
||||
// Benchmarks the cache generation performance.
|
||||
func BenchmarkCacheGeneration(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache := make([]uint32, cacheSize(1)/4)
|
||||
generateCache(cache, 0, make([]byte, 32))
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the dataset (small) generation performance.
|
||||
func BenchmarkSmallDatasetGeneration(b *testing.B) {
|
||||
cache := make([]uint32, 65536/4)
|
||||
generateCache(cache, 0, make([]byte, 32))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
dataset := make([]uint32, 32*65536/4)
|
||||
generateDataset(dataset, 0, cache)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the light verification performance.
|
||||
func BenchmarkHashimotoLight(b *testing.B) {
|
||||
cache := make([]uint32, cacheSize(1)/4)
|
||||
generateCache(cache, 0, make([]byte, 32))
|
||||
|
||||
hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
hashimotoLight(datasetSize(1), cache, hash, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the full (small) verification performance.
|
||||
func BenchmarkHashimotoFullSmall(b *testing.B) {
|
||||
cache := make([]uint32, 65536/4)
|
||||
generateCache(cache, 0, make([]byte, 32))
|
||||
|
||||
dataset := make([]uint32, 32*65536/4)
|
||||
generateDataset(dataset, 0, cache)
|
||||
|
||||
hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
hashimotoFull(dataset, hash, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkHashimotoFullMmap(b *testing.B, name string, lock bool) {
|
||||
b.Run(name, func(b *testing.B) {
|
||||
tmpdir := b.TempDir()
|
||||
|
||||
d := &dataset{epoch: 0}
|
||||
d.generate(tmpdir, 1, lock, false)
|
||||
var hash [common.HashLength]byte
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
binary.PutVarint(hash[:], int64(i))
|
||||
hashimotoFull(d.dataset, hash[:], 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Benchmarks the full verification performance for mmap
|
||||
func BenchmarkHashimotoFullMmap(b *testing.B) {
|
||||
benchmarkHashimotoFullMmap(b, "WithLock", true)
|
||||
benchmarkHashimotoFullMmap(b, "WithoutLock", false)
|
||||
}
|
@ -1,113 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
var errEthashStopped = errors.New("ethash stopped")
|
||||
|
||||
// API exposes ethash related methods for the RPC interface.
|
||||
type API struct {
|
||||
ethash *Ethash
|
||||
}
|
||||
|
||||
// GetWork returns a work package for external miner.
|
||||
//
|
||||
// The work package consists of 3 strings:
|
||||
//
|
||||
// result[0] - 32 bytes hex encoded current block header pow-hash
|
||||
// result[1] - 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
// result[3] - hex encoded block number
|
||||
func (api *API) GetWork() ([4]string, error) {
|
||||
if api.ethash.remote == nil {
|
||||
return [4]string{}, errors.New("not supported")
|
||||
}
|
||||
|
||||
var (
|
||||
workCh = make(chan [4]string, 1)
|
||||
errc = make(chan error, 1)
|
||||
)
|
||||
select {
|
||||
case api.ethash.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
|
||||
case <-api.ethash.remote.exitCh:
|
||||
return [4]string{}, errEthashStopped
|
||||
}
|
||||
select {
|
||||
case work := <-workCh:
|
||||
return work, nil
|
||||
case err := <-errc:
|
||||
return [4]string{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitWork can be used by external miner to submit their POW solution.
|
||||
// It returns an indication if the work was accepted.
|
||||
// Note either an invalid solution, a stale work a non-existent work will return false.
|
||||
func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool {
|
||||
if api.ethash.remote == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var errc = make(chan error, 1)
|
||||
select {
|
||||
case api.ethash.remote.submitWorkCh <- &mineResult{
|
||||
nonce: nonce,
|
||||
mixDigest: digest,
|
||||
hash: hash,
|
||||
errc: errc,
|
||||
}:
|
||||
case <-api.ethash.remote.exitCh:
|
||||
return false
|
||||
}
|
||||
err := <-errc
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// SubmitHashrate can be used for remote miners to submit their hash rate.
|
||||
// This enables the node to report the combined hash rate of all miners
|
||||
// which submit work through this node.
|
||||
//
|
||||
// It accepts the miner hash rate and an identifier which must be unique
|
||||
// between nodes.
|
||||
func (api *API) SubmitHashrate(rate hexutil.Uint64, id common.Hash) bool {
|
||||
if api.ethash.remote == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var done = make(chan struct{}, 1)
|
||||
select {
|
||||
case api.ethash.remote.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
|
||||
case <-api.ethash.remote.exitCh:
|
||||
return false
|
||||
}
|
||||
|
||||
// Block until hash rate submitted successfully.
|
||||
<-done
|
||||
return true
|
||||
}
|
||||
|
||||
// GetHashrate returns the current hashrate for local CPU miner and remote miner.
|
||||
func (api *API) GetHashrate() uint64 {
|
||||
return uint64(api.ethash.Hashrate())
|
||||
}
|
@ -17,11 +17,9 @@
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
@ -84,14 +82,11 @@ var (
|
||||
// codebase, inherently breaking if the engine is swapped out. Please put common
|
||||
// error types into the consensus package.
|
||||
var (
|
||||
errOlderBlockTime = errors.New("timestamp older than parent")
|
||||
errTooManyUncles = errors.New("too many uncles")
|
||||
errDuplicateUncle = errors.New("duplicate uncle")
|
||||
errUncleIsAncestor = errors.New("uncle is ancestor")
|
||||
errDanglingUncle = errors.New("uncle's parent is not ancestor")
|
||||
errInvalidDifficulty = errors.New("non-positive difficulty")
|
||||
errInvalidMixDigest = errors.New("invalid mix digest")
|
||||
errInvalidPoW = errors.New("invalid proof-of-work")
|
||||
errOlderBlockTime = errors.New("timestamp older than parent")
|
||||
errTooManyUncles = errors.New("too many uncles")
|
||||
errDuplicateUncle = errors.New("duplicate uncle")
|
||||
errUncleIsAncestor = errors.New("uncle is ancestor")
|
||||
errDanglingUncle = errors.New("uncle's parent is not ancestor")
|
||||
)
|
||||
|
||||
// Author implements consensus.Engine, returning the header's coinbase as the
|
||||
@ -102,11 +97,7 @@ func (ethash *Ethash) Author(header *types.Header) (common.Address, error) {
|
||||
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules of the
|
||||
// stock Ethereum ethash engine.
|
||||
func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
|
||||
// If we're running a full engine faking, accept any input as valid
|
||||
if ethash.config.PowMode == ModeFullFake {
|
||||
return nil
|
||||
}
|
||||
func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
// Short circuit if the header is known, or its parent not
|
||||
number := header.Number.Uint64()
|
||||
if chain.GetHeader(header.Hash(), number) != nil {
|
||||
@ -117,93 +108,54 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *ty
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
// Sanity checks passed, do a proper verification
|
||||
return ethash.verifyHeader(chain, header, parent, false, seal, time.Now().Unix())
|
||||
return ethash.verifyHeader(chain, header, parent, false, time.Now().Unix())
|
||||
}
|
||||
|
||||
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
|
||||
// concurrently. The method returns a quit channel to abort the operations and
|
||||
// a results channel to retrieve the async verifications.
|
||||
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) {
|
||||
// If we're running a full engine faking, accept any input as valid
|
||||
if ethash.config.PowMode == ModeFullFake || len(headers) == 0 {
|
||||
if ethash.fakeFull || len(headers) == 0 {
|
||||
abort, results := make(chan struct{}), make(chan error, len(headers))
|
||||
for i := 0; i < len(headers); i++ {
|
||||
results <- nil
|
||||
}
|
||||
return abort, results
|
||||
}
|
||||
abort := make(chan struct{})
|
||||
results := make(chan error, len(headers))
|
||||
unixNow := time.Now().Unix()
|
||||
|
||||
// Spawn as many workers as allowed threads
|
||||
workers := runtime.GOMAXPROCS(0)
|
||||
if len(headers) < workers {
|
||||
workers = len(headers)
|
||||
}
|
||||
|
||||
// Create a task channel and spawn the verifiers
|
||||
var (
|
||||
inputs = make(chan int)
|
||||
done = make(chan int, workers)
|
||||
errors = make([]error, len(headers))
|
||||
abort = make(chan struct{})
|
||||
unixNow = time.Now().Unix()
|
||||
)
|
||||
for i := 0; i < workers; i++ {
|
||||
go func() {
|
||||
for index := range inputs {
|
||||
errors[index] = ethash.verifyHeaderWorker(chain, headers, seals, index, unixNow)
|
||||
done <- index
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
errorsOut := make(chan error, len(headers))
|
||||
go func() {
|
||||
defer close(inputs)
|
||||
var (
|
||||
in, out = 0, 0
|
||||
checked = make([]bool, len(headers))
|
||||
inputs = inputs
|
||||
)
|
||||
for {
|
||||
for i, header := range headers {
|
||||
var parent *types.Header
|
||||
if i == 0 {
|
||||
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
|
||||
} else if headers[i-1].Hash() == headers[i].ParentHash {
|
||||
parent = headers[i-1]
|
||||
}
|
||||
var err error
|
||||
if parent == nil {
|
||||
err = consensus.ErrUnknownAncestor
|
||||
} else {
|
||||
err = ethash.verifyHeader(chain, header, parent, false, unixNow)
|
||||
}
|
||||
select {
|
||||
case inputs <- in:
|
||||
if in++; in == len(headers) {
|
||||
// Reached end of headers. Stop sending to workers.
|
||||
inputs = nil
|
||||
}
|
||||
case index := <-done:
|
||||
for checked[index] = true; checked[out]; out++ {
|
||||
errorsOut <- errors[out]
|
||||
if out == len(headers)-1 {
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-abort:
|
||||
return
|
||||
case results <- err:
|
||||
}
|
||||
}
|
||||
}()
|
||||
return abort, errorsOut
|
||||
}
|
||||
|
||||
func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int, unixNow int64) error {
|
||||
var parent *types.Header
|
||||
if index == 0 {
|
||||
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
|
||||
} else if headers[index-1].Hash() == headers[index].ParentHash {
|
||||
parent = headers[index-1]
|
||||
}
|
||||
if parent == nil {
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
return ethash.verifyHeader(chain, headers[index], parent, false, seals[index], unixNow)
|
||||
return abort, results
|
||||
}
|
||||
|
||||
// VerifyUncles verifies that the given block's uncles conform to the consensus
|
||||
// rules of the stock Ethereum ethash engine.
|
||||
func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
|
||||
// If we're running a full engine faking, accept any input as valid
|
||||
if ethash.config.PowMode == ModeFullFake {
|
||||
if ethash.fakeFull {
|
||||
return nil
|
||||
}
|
||||
// Verify that there are at most 2 uncles included in this block
|
||||
@ -255,7 +207,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
||||
if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == block.ParentHash() {
|
||||
return errDanglingUncle
|
||||
}
|
||||
if err := ethash.verifyHeader(chain, uncle, ancestors[uncle.ParentHash], true, true, time.Now().Unix()); err != nil {
|
||||
if err := ethash.verifyHeader(chain, uncle, ancestors[uncle.ParentHash], true, time.Now().Unix()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -265,7 +217,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
||||
// verifyHeader checks whether a header conforms to the consensus rules of the
|
||||
// stock Ethereum ethash engine.
|
||||
// See YP section 4.3.4. "Block Header Validity"
|
||||
func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool, unixNow int64) error {
|
||||
func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, unixNow int64) error {
|
||||
// Ensure that the header's extra-data section is of a reasonable size
|
||||
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
|
||||
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
|
||||
@ -316,11 +268,12 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa
|
||||
if chain.Config().IsCancun(header.Time) {
|
||||
return fmt.Errorf("ethash does not support cancun fork")
|
||||
}
|
||||
// Verify the engine specific seal securing the block
|
||||
if seal {
|
||||
if err := ethash.verifySeal(chain, header, false); err != nil {
|
||||
return err
|
||||
}
|
||||
// Add some fake checks for tests
|
||||
if ethash.fakeDelay != nil {
|
||||
time.Sleep(*ethash.fakeDelay)
|
||||
}
|
||||
if ethash.fakeFail != nil && *ethash.fakeFail == header.Number.Uint64() {
|
||||
return errors.New("invalid tester pow")
|
||||
}
|
||||
// If all checks passed, validate any special fields for hard forks
|
||||
if err := misc.VerifyDAOHeaderExtraData(chain.Config(), header); err != nil {
|
||||
@ -521,72 +474,6 @@ var FrontierDifficultyCalculator = calcDifficultyFrontier
|
||||
var HomesteadDifficultyCalculator = calcDifficultyHomestead
|
||||
var DynamicDifficultyCalculator = makeDifficultyCalculator
|
||||
|
||||
// verifySeal checks whether a block satisfies the PoW difficulty requirements,
|
||||
// either using the usual ethash cache for it, or alternatively using a full DAG
|
||||
// to make remote mining fast.
|
||||
func (ethash *Ethash) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, fulldag bool) error {
|
||||
// If we're running a fake PoW, accept any seal as valid
|
||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||
time.Sleep(ethash.fakeDelay)
|
||||
if ethash.fakeFail == header.Number.Uint64() {
|
||||
return errInvalidPoW
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// If we're running a shared PoW, delegate verification to it
|
||||
if ethash.shared != nil {
|
||||
return ethash.shared.verifySeal(chain, header, fulldag)
|
||||
}
|
||||
// Ensure that we have a valid difficulty for the block
|
||||
if header.Difficulty.Sign() <= 0 {
|
||||
return errInvalidDifficulty
|
||||
}
|
||||
// Recompute the digest and PoW values
|
||||
number := header.Number.Uint64()
|
||||
|
||||
var (
|
||||
digest []byte
|
||||
result []byte
|
||||
)
|
||||
// If fast-but-heavy PoW verification was requested, use an ethash dataset
|
||||
if fulldag {
|
||||
dataset := ethash.dataset(number, true)
|
||||
if dataset.generated() {
|
||||
digest, result = hashimotoFull(dataset.dataset, ethash.SealHash(header).Bytes(), header.Nonce.Uint64())
|
||||
|
||||
// Datasets are unmapped in a finalizer. Ensure that the dataset stays alive
|
||||
// until after the call to hashimotoFull so it's not unmapped while being used.
|
||||
runtime.KeepAlive(dataset)
|
||||
} else {
|
||||
// Dataset not yet generated, don't hang, use a cache instead
|
||||
fulldag = false
|
||||
}
|
||||
}
|
||||
// If slow-but-light PoW verification was requested (or DAG not yet ready), use an ethash cache
|
||||
if !fulldag {
|
||||
cache := ethash.cache(number)
|
||||
|
||||
size := datasetSize(number)
|
||||
if ethash.config.PowMode == ModeTest {
|
||||
size = 32 * 1024
|
||||
}
|
||||
digest, result = hashimotoLight(size, cache.cache, ethash.SealHash(header).Bytes(), header.Nonce.Uint64())
|
||||
|
||||
// Caches are unmapped in a finalizer. Ensure that the cache stays alive
|
||||
// until after the call to hashimotoLight so it's not unmapped while being used.
|
||||
runtime.KeepAlive(cache)
|
||||
}
|
||||
// Verify the calculated values against the ones provided in the header
|
||||
if !bytes.Equal(header.MixDigest[:], digest) {
|
||||
return errInvalidMixDigest
|
||||
}
|
||||
target := new(big.Int).Div(two256, header.Difficulty)
|
||||
if new(big.Int).SetBytes(result).Cmp(target) > 0 {
|
||||
return errInvalidPoW
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prepare implements consensus.Engine, initializing the difficulty field of a
|
||||
// header to conform to the ethash protocol. The changes are done inline.
|
||||
func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
|
||||
|
@ -18,503 +18,26 @@
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/edsrzf/mmap-go"
|
||||
lrupkg "github.com/ethereum/go-ethereum/common/lru"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
var ErrInvalidDumpMagic = errors.New("invalid dump magic")
|
||||
|
||||
var (
|
||||
// two256 is a big integer representing 2^256
|
||||
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
|
||||
// sharedEthash is a full instance that can be shared between multiple users.
|
||||
sharedEthash *Ethash
|
||||
|
||||
// algorithmRevision is the data structure version used for file naming.
|
||||
algorithmRevision = 23
|
||||
|
||||
// dumpMagic is a dataset dump header to sanity check a data dump.
|
||||
dumpMagic = []uint32{0xbaddcafe, 0xfee1dead}
|
||||
)
|
||||
|
||||
func init() {
|
||||
sharedConfig := Config{
|
||||
PowMode: ModeNormal,
|
||||
CachesInMem: 3,
|
||||
DatasetsInMem: 1,
|
||||
}
|
||||
sharedEthash = New(sharedConfig, nil, false)
|
||||
}
|
||||
|
||||
// isLittleEndian returns whether the local system is running in little or big
|
||||
// endian byte order.
|
||||
func isLittleEndian() bool {
|
||||
n := uint32(0x01020304)
|
||||
return *(*byte)(unsafe.Pointer(&n)) == 0x04
|
||||
}
|
||||
|
||||
// memoryMap tries to memory map a file of uint32s for read only access.
|
||||
func memoryMap(path string, lock bool) (*os.File, mmap.MMap, []uint32, error) {
|
||||
file, err := os.OpenFile(path, os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
mem, buffer, err := memoryMapFile(file, false)
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
for i, magic := range dumpMagic {
|
||||
if buffer[i] != magic {
|
||||
mem.Unmap()
|
||||
file.Close()
|
||||
return nil, nil, nil, ErrInvalidDumpMagic
|
||||
}
|
||||
}
|
||||
if lock {
|
||||
if err := mem.Lock(); err != nil {
|
||||
mem.Unmap()
|
||||
file.Close()
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
return file, mem, buffer[len(dumpMagic):], err
|
||||
}
|
||||
|
||||
// memoryMapFile tries to memory map an already opened file descriptor.
|
||||
func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) {
|
||||
// Try to memory map the file
|
||||
flag := mmap.RDONLY
|
||||
if write {
|
||||
flag = mmap.RDWR
|
||||
}
|
||||
mem, err := mmap.Map(file, flag, 0)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// The file is now memory-mapped. Create a []uint32 view of the file.
|
||||
var view []uint32
|
||||
header := (*reflect.SliceHeader)(unsafe.Pointer(&view))
|
||||
header.Data = (*reflect.SliceHeader)(unsafe.Pointer(&mem)).Data
|
||||
header.Cap = len(mem) / 4
|
||||
header.Len = header.Cap
|
||||
return mem, view, nil
|
||||
}
|
||||
|
||||
// memoryMapAndGenerate tries to memory map a temporary file of uint32s for write
|
||||
// access, fill it with the data from a generator and then move it into the final
|
||||
// path requested.
|
||||
func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) {
|
||||
// Ensure the data folder exists
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
// Create a huge temporary empty file to fill with data
|
||||
temp := path + "." + strconv.Itoa(rand.Int())
|
||||
|
||||
dump, err := os.Create(temp)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if err = ensureSize(dump, int64(len(dumpMagic))*4+int64(size)); err != nil {
|
||||
dump.Close()
|
||||
os.Remove(temp)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
// Memory map the file for writing and fill it with the generator
|
||||
mem, buffer, err := memoryMapFile(dump, true)
|
||||
if err != nil {
|
||||
dump.Close()
|
||||
os.Remove(temp)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
copy(buffer, dumpMagic)
|
||||
|
||||
data := buffer[len(dumpMagic):]
|
||||
generator(data)
|
||||
|
||||
if err := mem.Unmap(); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if err := dump.Close(); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if err := os.Rename(temp, path); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return memoryMap(path, lock)
|
||||
}
|
||||
|
||||
type cacheOrDataset interface {
|
||||
*cache | *dataset
|
||||
}
|
||||
|
||||
// lru tracks caches or datasets by their last use time, keeping at most N of them.
|
||||
type lru[T cacheOrDataset] struct {
|
||||
what string
|
||||
new func(epoch uint64) T
|
||||
mu sync.Mutex
|
||||
// Items are kept in a LRU cache, but there is a special case:
|
||||
// We always keep an item for (highest seen epoch) + 1 as the 'future item'.
|
||||
cache lrupkg.BasicLRU[uint64, T]
|
||||
future uint64
|
||||
futureItem T
|
||||
}
|
||||
|
||||
// newlru create a new least-recently-used cache for either the verification caches
|
||||
// or the mining datasets.
|
||||
func newlru[T cacheOrDataset](maxItems int, new func(epoch uint64) T) *lru[T] {
|
||||
var what string
|
||||
switch any(T(nil)).(type) {
|
||||
case *cache:
|
||||
what = "cache"
|
||||
case *dataset:
|
||||
what = "dataset"
|
||||
default:
|
||||
panic("unknown type")
|
||||
}
|
||||
return &lru[T]{
|
||||
what: what,
|
||||
new: new,
|
||||
cache: lrupkg.NewBasicLRU[uint64, T](maxItems),
|
||||
}
|
||||
}
|
||||
|
||||
// get retrieves or creates an item for the given epoch. The first return value is always
|
||||
// non-nil. The second return value is non-nil if lru thinks that an item will be useful in
|
||||
// the near future.
|
||||
func (lru *lru[T]) get(epoch uint64) (item, future T) {
|
||||
lru.mu.Lock()
|
||||
defer lru.mu.Unlock()
|
||||
|
||||
// Get or create the item for the requested epoch.
|
||||
item, ok := lru.cache.Get(epoch)
|
||||
if !ok {
|
||||
if lru.future > 0 && lru.future == epoch {
|
||||
item = lru.futureItem
|
||||
} else {
|
||||
log.Trace("Requiring new ethash "+lru.what, "epoch", epoch)
|
||||
item = lru.new(epoch)
|
||||
}
|
||||
lru.cache.Add(epoch, item)
|
||||
}
|
||||
// Update the 'future item' if epoch is larger than previously seen.
|
||||
if epoch < maxEpoch-1 && lru.future < epoch+1 {
|
||||
log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1)
|
||||
future = lru.new(epoch + 1)
|
||||
lru.future = epoch + 1
|
||||
lru.futureItem = future
|
||||
}
|
||||
return item, future
|
||||
}
|
||||
|
||||
// cache wraps an ethash cache with some metadata to allow easier concurrent use.
|
||||
type cache struct {
|
||||
epoch uint64 // Epoch for which this cache is relevant
|
||||
dump *os.File // File descriptor of the memory mapped cache
|
||||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
||||
cache []uint32 // The actual cache data content (may be memory mapped)
|
||||
once sync.Once // Ensures the cache is generated only once
|
||||
}
|
||||
|
||||
// newCache creates a new ethash verification cache.
|
||||
func newCache(epoch uint64) *cache {
|
||||
return &cache{epoch: epoch}
|
||||
}
|
||||
|
||||
// generate ensures that the cache content is generated before use.
|
||||
func (c *cache) generate(dir string, limit int, lock bool, test bool) {
|
||||
c.once.Do(func() {
|
||||
size := cacheSize(c.epoch*epochLength + 1)
|
||||
seed := seedHash(c.epoch*epochLength + 1)
|
||||
if test {
|
||||
size = 1024
|
||||
}
|
||||
// If we don't store anything on disk, generate and return.
|
||||
if dir == "" {
|
||||
c.cache = make([]uint32, size/4)
|
||||
generateCache(c.cache, c.epoch, seed)
|
||||
return
|
||||
}
|
||||
// Disk storage is needed, this will get fancy
|
||||
var endian string
|
||||
if !isLittleEndian() {
|
||||
endian = ".be"
|
||||
}
|
||||
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
||||
logger := log.New("epoch", c.epoch)
|
||||
|
||||
// We're about to mmap the file, ensure that the mapping is cleaned up when the
|
||||
// cache becomes unused.
|
||||
runtime.SetFinalizer(c, (*cache).finalizer)
|
||||
|
||||
// Try to load the file from disk and memory map it
|
||||
var err error
|
||||
c.dump, c.mmap, c.cache, err = memoryMap(path, lock)
|
||||
if err == nil {
|
||||
logger.Debug("Loaded old ethash cache from disk")
|
||||
return
|
||||
}
|
||||
logger.Debug("Failed to load old ethash cache", "err", err)
|
||||
|
||||
// No previous cache available, create a new cache file to fill
|
||||
c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) })
|
||||
if err != nil {
|
||||
logger.Error("Failed to generate mapped ethash cache", "err", err)
|
||||
|
||||
c.cache = make([]uint32, size/4)
|
||||
generateCache(c.cache, c.epoch, seed)
|
||||
}
|
||||
// Iterate over all previous instances and delete old ones
|
||||
for ep := int(c.epoch) - limit; ep >= 0; ep-- {
|
||||
seed := seedHash(uint64(ep)*epochLength + 1)
|
||||
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s*", algorithmRevision, seed[:8], endian))
|
||||
files, _ := filepath.Glob(path) // find also the temp files that are generated.
|
||||
for _, file := range files {
|
||||
os.Remove(file)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// finalizer unmaps the memory and closes the file.
|
||||
func (c *cache) finalizer() {
|
||||
if c.mmap != nil {
|
||||
c.mmap.Unmap()
|
||||
c.dump.Close()
|
||||
c.mmap, c.dump = nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
|
||||
type dataset struct {
|
||||
epoch uint64 // Epoch for which this cache is relevant
|
||||
dump *os.File // File descriptor of the memory mapped cache
|
||||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
||||
dataset []uint32 // The actual cache data content
|
||||
once sync.Once // Ensures the cache is generated only once
|
||||
done atomic.Bool // Atomic flag to determine generation status
|
||||
}
|
||||
|
||||
// newDataset creates a new ethash mining dataset and returns it as a plain Go
|
||||
// interface to be usable in an LRU cache.
|
||||
func newDataset(epoch uint64) *dataset {
|
||||
return &dataset{epoch: epoch}
|
||||
}
|
||||
|
||||
// generate ensures that the dataset content is generated before use.
|
||||
func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
|
||||
d.once.Do(func() {
|
||||
// Mark the dataset generated after we're done. This is needed for remote
|
||||
defer d.done.Store(true)
|
||||
|
||||
csize := cacheSize(d.epoch*epochLength + 1)
|
||||
dsize := datasetSize(d.epoch*epochLength + 1)
|
||||
seed := seedHash(d.epoch*epochLength + 1)
|
||||
if test {
|
||||
csize = 1024
|
||||
dsize = 32 * 1024
|
||||
}
|
||||
// If we don't store anything on disk, generate and return
|
||||
if dir == "" {
|
||||
cache := make([]uint32, csize/4)
|
||||
generateCache(cache, d.epoch, seed)
|
||||
|
||||
d.dataset = make([]uint32, dsize/4)
|
||||
generateDataset(d.dataset, d.epoch, cache)
|
||||
|
||||
return
|
||||
}
|
||||
// Disk storage is needed, this will get fancy
|
||||
var endian string
|
||||
if !isLittleEndian() {
|
||||
endian = ".be"
|
||||
}
|
||||
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
||||
logger := log.New("epoch", d.epoch)
|
||||
|
||||
// We're about to mmap the file, ensure that the mapping is cleaned up when the
|
||||
// cache becomes unused.
|
||||
runtime.SetFinalizer(d, (*dataset).finalizer)
|
||||
|
||||
// Try to load the file from disk and memory map it
|
||||
var err error
|
||||
d.dump, d.mmap, d.dataset, err = memoryMap(path, lock)
|
||||
if err == nil {
|
||||
logger.Debug("Loaded old ethash dataset from disk")
|
||||
return
|
||||
}
|
||||
logger.Debug("Failed to load old ethash dataset", "err", err)
|
||||
|
||||
// No previous dataset available, create a new dataset file to fill
|
||||
cache := make([]uint32, csize/4)
|
||||
generateCache(cache, d.epoch, seed)
|
||||
|
||||
d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) })
|
||||
if err != nil {
|
||||
logger.Error("Failed to generate mapped ethash dataset", "err", err)
|
||||
|
||||
d.dataset = make([]uint32, dsize/4)
|
||||
generateDataset(d.dataset, d.epoch, cache)
|
||||
}
|
||||
// Iterate over all previous instances and delete old ones
|
||||
for ep := int(d.epoch) - limit; ep >= 0; ep-- {
|
||||
seed := seedHash(uint64(ep)*epochLength + 1)
|
||||
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
||||
os.Remove(path)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// generated returns whether this particular dataset finished generating already
|
||||
// or not (it may not have been started at all). This is useful for remote miners
|
||||
// to default to verification caches instead of blocking on DAG generations.
|
||||
func (d *dataset) generated() bool {
|
||||
return d.done.Load()
|
||||
}
|
||||
|
||||
// finalizer closes any file handlers and memory maps open.
|
||||
func (d *dataset) finalizer() {
|
||||
if d.mmap != nil {
|
||||
d.mmap.Unmap()
|
||||
d.dump.Close()
|
||||
d.mmap, d.dump = nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// MakeCache generates a new ethash cache and optionally stores it to disk.
|
||||
func MakeCache(block uint64, dir string) {
|
||||
c := cache{epoch: block / epochLength}
|
||||
c.generate(dir, math.MaxInt32, false, false)
|
||||
}
|
||||
|
||||
// MakeDataset generates a new ethash dataset and optionally stores it to disk.
|
||||
func MakeDataset(block uint64, dir string) {
|
||||
d := dataset{epoch: block / epochLength}
|
||||
d.generate(dir, math.MaxInt32, false, false)
|
||||
}
|
||||
|
||||
// Mode defines the type and amount of PoW verification an ethash engine makes.
|
||||
type Mode uint
|
||||
|
||||
const (
|
||||
ModeNormal Mode = iota
|
||||
ModeShared
|
||||
ModeTest
|
||||
ModeFake
|
||||
ModeFullFake
|
||||
)
|
||||
|
||||
// Config are the configuration parameters of the ethash.
|
||||
type Config struct {
|
||||
CacheDir string
|
||||
CachesInMem int
|
||||
CachesOnDisk int
|
||||
CachesLockMmap bool
|
||||
DatasetDir string
|
||||
DatasetsInMem int
|
||||
DatasetsOnDisk int
|
||||
DatasetsLockMmap bool
|
||||
PowMode Mode
|
||||
|
||||
// When set, notifications sent by the remote sealer will
|
||||
// be block header JSON objects instead of work package arrays.
|
||||
NotifyFull bool
|
||||
|
||||
Log log.Logger `toml:"-"`
|
||||
}
|
||||
|
||||
// Ethash is a consensus engine based on proof-of-work implementing the ethash
|
||||
// algorithm.
|
||||
type Ethash struct {
|
||||
config Config
|
||||
|
||||
caches *lru[*cache] // In memory caches to avoid regenerating too often
|
||||
datasets *lru[*dataset] // In memory datasets to avoid regenerating too often
|
||||
|
||||
// Mining related fields
|
||||
rand *rand.Rand // Properly seeded random source for nonces
|
||||
threads int // Number of threads to mine on if mining
|
||||
update chan struct{} // Notification channel to update mining parameters
|
||||
hashrate metrics.Meter // Meter tracking the average hashrate
|
||||
remote *remoteSealer
|
||||
|
||||
// The fields below are hooks for testing
|
||||
shared *Ethash // Shared PoW verifier to avoid cache regeneration
|
||||
fakeFail uint64 // Block number which fails PoW check even in fake mode
|
||||
fakeDelay time.Duration // Time delay to sleep for before returning from verify
|
||||
|
||||
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
|
||||
closeOnce sync.Once // Ensures exit channel will not be closed twice.
|
||||
fakeFail *uint64 // Block number which fails PoW check even in fake mode
|
||||
fakeDelay *time.Duration // Time delay to sleep for before returning from verify
|
||||
fakeFull bool // Accepts everything as valid
|
||||
}
|
||||
|
||||
// New creates a full sized ethash PoW scheme and starts a background thread for
|
||||
// remote mining, also optionally notifying a batch of remote services of new work
|
||||
// packages.
|
||||
func New(config Config, notify []string, noverify bool) *Ethash {
|
||||
if config.Log == nil {
|
||||
config.Log = log.Root()
|
||||
}
|
||||
if config.CachesInMem <= 0 {
|
||||
config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||
config.CachesInMem = 1
|
||||
}
|
||||
if config.CacheDir != "" && config.CachesOnDisk > 0 {
|
||||
config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
|
||||
}
|
||||
if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
|
||||
config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
|
||||
}
|
||||
ethash := &Ethash{
|
||||
config: config,
|
||||
caches: newlru(config.CachesInMem, newCache),
|
||||
datasets: newlru(config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeterForced(),
|
||||
}
|
||||
if config.PowMode == ModeShared {
|
||||
ethash.shared = sharedEthash
|
||||
}
|
||||
ethash.remote = startRemoteSealer(ethash, notify, noverify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
// NewTester creates a small sized ethash PoW scheme useful only for testing
|
||||
// purposes.
|
||||
func NewTester(notify []string, noverify bool) *Ethash {
|
||||
return New(Config{PowMode: ModeTest}, notify, noverify)
|
||||
}
|
||||
|
||||
// NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
|
||||
// NewFaker creates an ethash consensus engine with a fake PoW scheme that accepts
|
||||
// all blocks' seal as valid, though they still have to conform to the Ethereum
|
||||
// consensus rules.
|
||||
func NewFaker() *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
}
|
||||
return new(Ethash)
|
||||
}
|
||||
|
||||
// NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
|
||||
@ -522,11 +45,7 @@ func NewFaker() *Ethash {
|
||||
// still have to conform to the Ethereum consensus rules.
|
||||
func NewFakeFailer(fail uint64) *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
fakeFail: fail,
|
||||
fakeFail: &fail,
|
||||
}
|
||||
}
|
||||
|
||||
@ -535,11 +54,7 @@ func NewFakeFailer(fail uint64) *Ethash {
|
||||
// they still have to conform to the Ethereum consensus rules.
|
||||
func NewFakeDelayer(delay time.Duration) *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
fakeDelay: delay,
|
||||
fakeDelay: &delay,
|
||||
}
|
||||
}
|
||||
|
||||
@ -547,154 +62,24 @@ func NewFakeDelayer(delay time.Duration) *Ethash {
|
||||
// accepts all blocks as valid, without checking any consensus rules whatsoever.
|
||||
func NewFullFaker() *Ethash {
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFullFake,
|
||||
Log: log.Root(),
|
||||
},
|
||||
fakeFull: true,
|
||||
}
|
||||
}
|
||||
|
||||
// NewShared creates a full sized ethash PoW shared between all requesters running
|
||||
// in the same process.
|
||||
func NewShared() *Ethash {
|
||||
return &Ethash{shared: sharedEthash}
|
||||
}
|
||||
|
||||
// Close closes the exit channel to notify all backend threads exiting.
|
||||
func (ethash *Ethash) Close() error {
|
||||
return ethash.StopRemoteSealer()
|
||||
}
|
||||
|
||||
// StopRemoteSealer stops the remote sealer
|
||||
func (ethash *Ethash) StopRemoteSealer() error {
|
||||
ethash.closeOnce.Do(func() {
|
||||
// Short circuit if the exit channel is not allocated.
|
||||
if ethash.remote == nil {
|
||||
return
|
||||
}
|
||||
close(ethash.remote.requestExit)
|
||||
<-ethash.remote.exitCh
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// cache tries to retrieve a verification cache for the specified block number
|
||||
// by first checking against a list of in-memory caches, then against caches
|
||||
// stored on disk, and finally generating one if none can be found.
|
||||
func (ethash *Ethash) cache(block uint64) *cache {
|
||||
epoch := block / epochLength
|
||||
current, future := ethash.caches.get(epoch)
|
||||
|
||||
// Wait for generation finish.
|
||||
current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest)
|
||||
|
||||
// If we need a new future cache, now's a good time to regenerate it.
|
||||
if future != nil {
|
||||
go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest)
|
||||
}
|
||||
return current
|
||||
}
|
||||
|
||||
// dataset tries to retrieve a mining dataset for the specified block number
|
||||
// by first checking against a list of in-memory datasets, then against DAGs
|
||||
// stored on disk, and finally generating one if none can be found.
|
||||
//
|
||||
// If async is specified, not only the future but the current DAG is also
|
||||
// generates on a background thread.
|
||||
func (ethash *Ethash) dataset(block uint64, async bool) *dataset {
|
||||
// Retrieve the requested ethash dataset
|
||||
epoch := block / epochLength
|
||||
current, future := ethash.datasets.get(epoch)
|
||||
|
||||
// If async is specified, generate everything in a background thread
|
||||
if async && !current.generated() {
|
||||
go func() {
|
||||
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
|
||||
if future != nil {
|
||||
future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
// Either blocking generation was requested, or already done
|
||||
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
|
||||
if future != nil {
|
||||
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
|
||||
}
|
||||
}
|
||||
return current
|
||||
}
|
||||
|
||||
// Threads returns the number of mining threads currently enabled. This doesn't
|
||||
// necessarily mean that mining is running!
|
||||
func (ethash *Ethash) Threads() int {
|
||||
ethash.lock.Lock()
|
||||
defer ethash.lock.Unlock()
|
||||
|
||||
return ethash.threads
|
||||
}
|
||||
|
||||
// SetThreads updates the number of mining threads currently enabled. Calling
|
||||
// this method does not start mining, only sets the thread count. If zero is
|
||||
// specified, the miner will use all cores of the machine. Setting a thread
|
||||
// count below zero is allowed and will cause the miner to idle, without any
|
||||
// work being done.
|
||||
func (ethash *Ethash) SetThreads(threads int) {
|
||||
ethash.lock.Lock()
|
||||
defer ethash.lock.Unlock()
|
||||
|
||||
// If we're running a shared PoW, set the thread count on that instead
|
||||
if ethash.shared != nil {
|
||||
ethash.shared.SetThreads(threads)
|
||||
return
|
||||
}
|
||||
// Update the threads and ping any running seal to pull in any changes
|
||||
ethash.threads = threads
|
||||
select {
|
||||
case ethash.update <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Hashrate implements PoW, returning the measured rate of the search invocations
|
||||
// per second over the last minute.
|
||||
// Note the returned hashrate includes local hashrate, but also includes the total
|
||||
// hashrate of all remote miner.
|
||||
func (ethash *Ethash) Hashrate() float64 {
|
||||
// Short circuit if we are run the ethash in normal/test mode.
|
||||
if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest {
|
||||
return ethash.hashrate.Rate1()
|
||||
}
|
||||
var res = make(chan uint64, 1)
|
||||
|
||||
select {
|
||||
case ethash.remote.fetchRateCh <- res:
|
||||
case <-ethash.remote.exitCh:
|
||||
// Return local hashrate only if ethash is stopped.
|
||||
return ethash.hashrate.Rate1()
|
||||
}
|
||||
|
||||
// Gather total submitted hash rate of remote sealers.
|
||||
return ethash.hashrate.Rate1() + float64(<-res)
|
||||
}
|
||||
|
||||
// APIs implements consensus.Engine, returning the user facing RPC APIs.
|
||||
// APIs implements consensus.Engine, returning no APIs as ethash is an empty
|
||||
// shell in the post-merge world.
|
||||
func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API {
|
||||
// In order to ensure backward compatibility, we exposes ethash RPC APIs
|
||||
// to both eth and ethash namespaces.
|
||||
return []rpc.API{
|
||||
{
|
||||
Namespace: "eth",
|
||||
Service: &API{ethash},
|
||||
},
|
||||
{
|
||||
Namespace: "ethash",
|
||||
Service: &API{ethash},
|
||||
},
|
||||
}
|
||||
return []rpc.API{}
|
||||
}
|
||||
|
||||
// SeedHash is the seed to use for generating a verification cache and the mining
|
||||
// dataset.
|
||||
func SeedHash(block uint64) []byte {
|
||||
return seedHash(block)
|
||||
// Seal generates a new sealing request for the given input block and pushes
|
||||
// the result into the given channel. For the ethash engine, this method will
|
||||
// just panic as sealing is not supported anymore.
|
||||
func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
panic("ethash (pow) sealing not supported any more")
|
||||
}
|
||||
|
@ -1,177 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// Tests that ethash works correctly in test mode.
|
||||
func TestTestMode(t *testing.T) {
|
||||
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||
|
||||
ethash := NewTester(nil, false)
|
||||
defer ethash.Close()
|
||||
|
||||
results := make(chan *types.Block)
|
||||
err := ethash.Seal(nil, types.NewBlockWithHeader(header), results, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to seal block: %v", err)
|
||||
}
|
||||
select {
|
||||
case block := <-results:
|
||||
header.Nonce = types.EncodeNonce(block.Nonce())
|
||||
header.MixDigest = block.MixDigest()
|
||||
if err := ethash.verifySeal(nil, header, false); err != nil {
|
||||
t.Fatalf("unexpected verification error: %v", err)
|
||||
}
|
||||
case <-time.NewTimer(4 * time.Second).C:
|
||||
t.Error("sealing result timeout")
|
||||
}
|
||||
}
|
||||
|
||||
// This test checks that cache lru logic doesn't crash under load.
|
||||
// It reproduces https://github.com/ethereum/go-ethereum/issues/14943
|
||||
func TestCacheFileEvict(t *testing.T) {
|
||||
// TODO: t.TempDir fails to remove the directory on Windows
|
||||
// \AppData\Local\Temp\1\TestCacheFileEvict2179435125\001\cache-R23-0000000000000000: Access is denied.
|
||||
tmpdir, err := os.MkdirTemp("", "ethash-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
config := Config{
|
||||
CachesInMem: 3,
|
||||
CachesOnDisk: 10,
|
||||
CacheDir: tmpdir,
|
||||
PowMode: ModeTest,
|
||||
}
|
||||
e := New(config, nil, false)
|
||||
defer e.Close()
|
||||
|
||||
workers := 8
|
||||
epochs := 100
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
go verifyTest(&wg, e, i, epochs)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func verifyTest(wg *sync.WaitGroup, e *Ethash, workerIndex, epochs int) {
|
||||
defer wg.Done()
|
||||
|
||||
const wiggle = 4 * epochLength
|
||||
r := rand.New(rand.NewSource(int64(workerIndex)))
|
||||
for epoch := 0; epoch < epochs; epoch++ {
|
||||
block := int64(epoch)*epochLength - wiggle/2 + r.Int63n(wiggle)
|
||||
if block < 0 {
|
||||
block = 0
|
||||
}
|
||||
header := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)}
|
||||
e.verifySeal(nil, header, false)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteSealer(t *testing.T) {
|
||||
ethash := NewTester(nil, false)
|
||||
defer ethash.Close()
|
||||
|
||||
api := &API{ethash}
|
||||
if _, err := api.GetWork(); err != errNoMiningWork {
|
||||
t.Error("expect to return an error indicate there is no mining work")
|
||||
}
|
||||
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
sealhash := ethash.SealHash(header)
|
||||
|
||||
// Push new work.
|
||||
results := make(chan *types.Block)
|
||||
ethash.Seal(nil, block, results, nil)
|
||||
|
||||
var (
|
||||
work [4]string
|
||||
err error
|
||||
)
|
||||
if work, err = api.GetWork(); err != nil || work[0] != sealhash.Hex() {
|
||||
t.Error("expect to return a mining work has same hash")
|
||||
}
|
||||
|
||||
if res := api.SubmitWork(types.BlockNonce{}, sealhash, common.Hash{}); res {
|
||||
t.Error("expect to return false when submit a fake solution")
|
||||
}
|
||||
// Push new block with same block number to replace the original one.
|
||||
header = &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(1000)}
|
||||
block = types.NewBlockWithHeader(header)
|
||||
sealhash = ethash.SealHash(header)
|
||||
ethash.Seal(nil, block, results, nil)
|
||||
|
||||
if work, err = api.GetWork(); err != nil || work[0] != sealhash.Hex() {
|
||||
t.Error("expect to return the latest pushed work")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashrate(t *testing.T) {
|
||||
var (
|
||||
hashrate = []hexutil.Uint64{100, 200, 300}
|
||||
expect uint64
|
||||
ids = []common.Hash{common.HexToHash("a"), common.HexToHash("b"), common.HexToHash("c")}
|
||||
)
|
||||
ethash := NewTester(nil, false)
|
||||
defer ethash.Close()
|
||||
|
||||
if tot := ethash.Hashrate(); tot != 0 {
|
||||
t.Error("expect the result should be zero")
|
||||
}
|
||||
|
||||
api := &API{ethash}
|
||||
for i := 0; i < len(hashrate); i += 1 {
|
||||
if res := api.SubmitHashrate(hashrate[i], ids[i]); !res {
|
||||
t.Error("remote miner submit hashrate failed")
|
||||
}
|
||||
expect += uint64(hashrate[i])
|
||||
}
|
||||
if tot := ethash.Hashrate(); tot != float64(expect) {
|
||||
t.Error("expect total hashrate should be same")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClosedRemoteSealer(t *testing.T) {
|
||||
ethash := NewTester(nil, false)
|
||||
time.Sleep(1 * time.Second) // ensure exit channel is listening
|
||||
ethash.Close()
|
||||
|
||||
api := &API{ethash}
|
||||
if _, err := api.GetWork(); err != errEthashStopped {
|
||||
t.Error("expect to return an error to indicate ethash is stopped")
|
||||
}
|
||||
|
||||
if res := api.SubmitHashrate(hexutil.Uint64(100), common.HexToHash("a")); res {
|
||||
t.Error("expect to return false when submit hashrate to a stopped ethash")
|
||||
}
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// ensureSize expands the file to the given size. This is to prevent runtime
|
||||
// errors later on, if the underlying file expands beyond the disk capacity,
|
||||
// even though it ostensibly is already expanded, but due to being sparse
|
||||
// does not actually occupy the full declared size on disk.
|
||||
func ensureSize(f *os.File, size int64) error {
|
||||
// Docs: https://www.man7.org/linux/man-pages/man2/fallocate.2.html
|
||||
return unix.Fallocate(int(f.Fd()), 0, 0, size)
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// ensureSize expands the file to the given size. This is to prevent runtime
|
||||
// errors later on, if the underlying file expands beyond the disk capacity,
|
||||
// even though it ostensibly is already expanded, but due to being sparse
|
||||
// does not actually occupy the full declared size on disk.
|
||||
func ensureSize(f *os.File, size int64) error {
|
||||
// On systems which do not support fallocate, we merely truncate it.
|
||||
// More robust alternatives would be to
|
||||
// - Use posix_fallocate, or
|
||||
// - explicitly fill the file with zeroes.
|
||||
return f.Truncate(size)
|
||||
}
|
@ -1,451 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// staleThreshold is the maximum depth of the acceptable stale but valid ethash solution.
|
||||
staleThreshold = 7
|
||||
)
|
||||
|
||||
var (
|
||||
errNoMiningWork = errors.New("no mining work available yet")
|
||||
errInvalidSealResult = errors.New("invalid or stale proof-of-work solution")
|
||||
)
|
||||
|
||||
// Seal implements consensus.Engine, attempting to find a nonce that satisfies
|
||||
// the block's difficulty requirements.
|
||||
func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
// If we're running a fake PoW, simply return a 0 nonce immediately
|
||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||
header := block.Header()
|
||||
header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
|
||||
select {
|
||||
case results <- block.WithSeal(header):
|
||||
default:
|
||||
ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// If we're running a shared PoW, delegate sealing to it
|
||||
if ethash.shared != nil {
|
||||
return ethash.shared.Seal(chain, block, results, stop)
|
||||
}
|
||||
// Create a runner and the multiple search threads it directs
|
||||
abort := make(chan struct{})
|
||||
|
||||
ethash.lock.Lock()
|
||||
threads := ethash.threads
|
||||
if ethash.rand == nil {
|
||||
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
ethash.lock.Unlock()
|
||||
return err
|
||||
}
|
||||
ethash.rand = rand.New(rand.NewSource(seed.Int64()))
|
||||
}
|
||||
ethash.lock.Unlock()
|
||||
if threads == 0 {
|
||||
threads = runtime.NumCPU()
|
||||
}
|
||||
if threads < 0 {
|
||||
threads = 0 // Allows disabling local mining without extra logic around local/remote
|
||||
}
|
||||
// Push new work to remote sealer
|
||||
if ethash.remote != nil {
|
||||
ethash.remote.workCh <- &sealTask{block: block, results: results}
|
||||
}
|
||||
var (
|
||||
pend sync.WaitGroup
|
||||
locals = make(chan *types.Block)
|
||||
)
|
||||
for i := 0; i < threads; i++ {
|
||||
pend.Add(1)
|
||||
go func(id int, nonce uint64) {
|
||||
defer pend.Done()
|
||||
ethash.mine(block, id, nonce, abort, locals)
|
||||
}(i, uint64(ethash.rand.Int63()))
|
||||
}
|
||||
// Wait until sealing is terminated or a nonce is found
|
||||
go func() {
|
||||
var result *types.Block
|
||||
select {
|
||||
case <-stop:
|
||||
// Outside abort, stop all miner threads
|
||||
close(abort)
|
||||
case result = <-locals:
|
||||
// One of the threads found a block, abort all others
|
||||
select {
|
||||
case results <- result:
|
||||
default:
|
||||
ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
|
||||
}
|
||||
close(abort)
|
||||
case <-ethash.update:
|
||||
// Thread count was changed on user request, restart
|
||||
close(abort)
|
||||
if err := ethash.Seal(chain, block, results, stop); err != nil {
|
||||
ethash.config.Log.Error("Failed to restart sealing after update", "err", err)
|
||||
}
|
||||
}
|
||||
// Wait for all miners to terminate and return the block
|
||||
pend.Wait()
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// mine is the actual proof-of-work miner that searches for a nonce starting from
|
||||
// seed that results in correct final block difficulty.
|
||||
func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan struct{}, found chan *types.Block) {
|
||||
// Extract some data from the header
|
||||
var (
|
||||
header = block.Header()
|
||||
hash = ethash.SealHash(header).Bytes()
|
||||
target = new(big.Int).Div(two256, header.Difficulty)
|
||||
number = header.Number.Uint64()
|
||||
dataset = ethash.dataset(number, false)
|
||||
)
|
||||
// Start generating random nonces until we abort or find a good one
|
||||
var (
|
||||
attempts = int64(0)
|
||||
nonce = seed
|
||||
powBuffer = new(big.Int)
|
||||
)
|
||||
logger := ethash.config.Log.New("miner", id)
|
||||
logger.Trace("Started ethash search for new nonces", "seed", seed)
|
||||
search:
|
||||
for {
|
||||
select {
|
||||
case <-abort:
|
||||
// Mining terminated, update stats and abort
|
||||
logger.Trace("Ethash nonce search aborted", "attempts", nonce-seed)
|
||||
ethash.hashrate.Mark(attempts)
|
||||
break search
|
||||
|
||||
default:
|
||||
// We don't have to update hash rate on every nonce, so update after after 2^X nonces
|
||||
attempts++
|
||||
if (attempts % (1 << 15)) == 0 {
|
||||
ethash.hashrate.Mark(attempts)
|
||||
attempts = 0
|
||||
}
|
||||
// Compute the PoW value of this nonce
|
||||
digest, result := hashimotoFull(dataset.dataset, hash, nonce)
|
||||
if powBuffer.SetBytes(result).Cmp(target) <= 0 {
|
||||
// Correct nonce found, create a new header with it
|
||||
header = types.CopyHeader(header)
|
||||
header.Nonce = types.EncodeNonce(nonce)
|
||||
header.MixDigest = common.BytesToHash(digest)
|
||||
|
||||
// Seal and return a block (if still needed)
|
||||
select {
|
||||
case found <- block.WithSeal(header):
|
||||
logger.Trace("Ethash nonce found and reported", "attempts", nonce-seed, "nonce", nonce)
|
||||
case <-abort:
|
||||
logger.Trace("Ethash nonce found but discarded", "attempts", nonce-seed, "nonce", nonce)
|
||||
}
|
||||
break search
|
||||
}
|
||||
nonce++
|
||||
}
|
||||
}
|
||||
// Datasets are unmapped in a finalizer. Ensure that the dataset stays live
|
||||
// during sealing so it's not unmapped while being read.
|
||||
runtime.KeepAlive(dataset)
|
||||
}
|
||||
|
||||
// This is the timeout for HTTP requests to notify external miners.
|
||||
const remoteSealerTimeout = 1 * time.Second
|
||||
|
||||
type remoteSealer struct {
|
||||
works map[common.Hash]*types.Block
|
||||
rates map[common.Hash]hashrate
|
||||
currentBlock *types.Block
|
||||
currentWork [4]string
|
||||
notifyCtx context.Context
|
||||
cancelNotify context.CancelFunc // cancels all notification requests
|
||||
reqWG sync.WaitGroup // tracks notification request goroutines
|
||||
|
||||
ethash *Ethash
|
||||
noverify bool
|
||||
notifyURLs []string
|
||||
results chan<- *types.Block
|
||||
workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
|
||||
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
|
||||
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
|
||||
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
|
||||
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
|
||||
requestExit chan struct{}
|
||||
exitCh chan struct{}
|
||||
}
|
||||
|
||||
// sealTask wraps a seal block with relative result channel for remote sealer thread.
|
||||
type sealTask struct {
|
||||
block *types.Block
|
||||
results chan<- *types.Block
|
||||
}
|
||||
|
||||
// mineResult wraps the pow solution parameters for the specified block.
|
||||
type mineResult struct {
|
||||
nonce types.BlockNonce
|
||||
mixDigest common.Hash
|
||||
hash common.Hash
|
||||
|
||||
errc chan error
|
||||
}
|
||||
|
||||
// hashrate wraps the hash rate submitted by the remote sealer.
|
||||
type hashrate struct {
|
||||
id common.Hash
|
||||
ping time.Time
|
||||
rate uint64
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// sealWork wraps a seal work package for remote sealer.
|
||||
type sealWork struct {
|
||||
errc chan error
|
||||
res chan [4]string
|
||||
}
|
||||
|
||||
func startRemoteSealer(ethash *Ethash, urls []string, noverify bool) *remoteSealer {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s := &remoteSealer{
|
||||
ethash: ethash,
|
||||
noverify: noverify,
|
||||
notifyURLs: urls,
|
||||
notifyCtx: ctx,
|
||||
cancelNotify: cancel,
|
||||
works: make(map[common.Hash]*types.Block),
|
||||
rates: make(map[common.Hash]hashrate),
|
||||
workCh: make(chan *sealTask),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
requestExit: make(chan struct{}),
|
||||
exitCh: make(chan struct{}),
|
||||
}
|
||||
go s.loop()
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *remoteSealer) loop() {
|
||||
defer func() {
|
||||
s.ethash.config.Log.Trace("Ethash remote sealer is exiting")
|
||||
s.cancelNotify()
|
||||
s.reqWG.Wait()
|
||||
close(s.exitCh)
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case work := <-s.workCh:
|
||||
// Update current work with new received block.
|
||||
// Note same work can be past twice, happens when changing CPU threads.
|
||||
s.results = work.results
|
||||
s.makeWork(work.block)
|
||||
s.notifyWork()
|
||||
|
||||
case work := <-s.fetchWorkCh:
|
||||
// Return current mining work to remote miner.
|
||||
if s.currentBlock == nil {
|
||||
work.errc <- errNoMiningWork
|
||||
} else {
|
||||
work.res <- s.currentWork
|
||||
}
|
||||
|
||||
case result := <-s.submitWorkCh:
|
||||
// Verify submitted PoW solution based on maintained mining blocks.
|
||||
if s.submitWork(result.nonce, result.mixDigest, result.hash) {
|
||||
result.errc <- nil
|
||||
} else {
|
||||
result.errc <- errInvalidSealResult
|
||||
}
|
||||
|
||||
case result := <-s.submitRateCh:
|
||||
// Trace remote sealer's hash rate by submitted value.
|
||||
s.rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
|
||||
close(result.done)
|
||||
|
||||
case req := <-s.fetchRateCh:
|
||||
// Gather all hash rate submitted by remote sealer.
|
||||
var total uint64
|
||||
for _, rate := range s.rates {
|
||||
// this could overflow
|
||||
total += rate.rate
|
||||
}
|
||||
req <- total
|
||||
|
||||
case <-ticker.C:
|
||||
// Clear stale submitted hash rate.
|
||||
for id, rate := range s.rates {
|
||||
if time.Since(rate.ping) > 10*time.Second {
|
||||
delete(s.rates, id)
|
||||
}
|
||||
}
|
||||
// Clear stale pending blocks
|
||||
if s.currentBlock != nil {
|
||||
for hash, block := range s.works {
|
||||
if block.NumberU64()+staleThreshold <= s.currentBlock.NumberU64() {
|
||||
delete(s.works, hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case <-s.requestExit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makeWork creates a work package for external miner.
|
||||
//
|
||||
// The work package consists of 3 strings:
|
||||
//
|
||||
// result[0], 32 bytes hex encoded current block header pow-hash
|
||||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
// result[3], hex encoded block number
|
||||
func (s *remoteSealer) makeWork(block *types.Block) {
|
||||
hash := s.ethash.SealHash(block.Header())
|
||||
s.currentWork[0] = hash.Hex()
|
||||
s.currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
||||
s.currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
|
||||
s.currentWork[3] = hexutil.EncodeBig(block.Number())
|
||||
|
||||
// Trace the seal work fetched by remote sealer.
|
||||
s.currentBlock = block
|
||||
s.works[hash] = block
|
||||
}
|
||||
|
||||
// notifyWork notifies all the specified mining endpoints of the availability of
|
||||
// new work to be processed.
|
||||
func (s *remoteSealer) notifyWork() {
|
||||
work := s.currentWork
|
||||
|
||||
// Encode the JSON payload of the notification. When NotifyFull is set,
|
||||
// this is the complete block header, otherwise it is a JSON array.
|
||||
var blob []byte
|
||||
if s.ethash.config.NotifyFull {
|
||||
blob, _ = json.Marshal(s.currentBlock.Header())
|
||||
} else {
|
||||
blob, _ = json.Marshal(work)
|
||||
}
|
||||
|
||||
s.reqWG.Add(len(s.notifyURLs))
|
||||
for _, url := range s.notifyURLs {
|
||||
go s.sendNotification(s.notifyCtx, url, blob, work)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) {
|
||||
defer s.reqWG.Done()
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(json))
|
||||
if err != nil {
|
||||
s.ethash.config.Log.Warn("Can't create remote miner notification", "err", err)
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout)
|
||||
defer cancel()
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
s.ethash.config.Log.Warn("Failed to notify remote miner", "err", err)
|
||||
} else {
|
||||
s.ethash.config.Log.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2])
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// submitWork verifies the submitted pow solution, returning
|
||||
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||
// any other error, like no pending work or stale mining result).
|
||||
func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
|
||||
if s.currentBlock == nil {
|
||||
s.ethash.config.Log.Error("Pending work without block", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
// Make sure the work submitted is present
|
||||
block := s.works[sealhash]
|
||||
if block == nil {
|
||||
s.ethash.config.Log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentBlock.NumberU64())
|
||||
return false
|
||||
}
|
||||
// Verify the correctness of submitted result.
|
||||
header := block.Header()
|
||||
header.Nonce = nonce
|
||||
header.MixDigest = mixDigest
|
||||
|
||||
start := time.Now()
|
||||
if !s.noverify {
|
||||
if err := s.ethash.verifySeal(nil, header, true); err != nil {
|
||||
s.ethash.config.Log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Make sure the result channel is assigned.
|
||||
if s.results == nil {
|
||||
s.ethash.config.Log.Warn("Ethash result channel is empty, submitted mining result is rejected")
|
||||
return false
|
||||
}
|
||||
s.ethash.config.Log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
// Solutions seems to be valid, return to the miner and notify acceptance.
|
||||
solution := block.WithSeal(header)
|
||||
|
||||
// The submitted solution is within the scope of acceptance.
|
||||
if solution.NumberU64()+staleThreshold > s.currentBlock.NumberU64() {
|
||||
select {
|
||||
case s.results <- solution:
|
||||
s.ethash.config.Log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return true
|
||||
default:
|
||||
s.ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// The submitted block is too old to accept, drop it.
|
||||
s.ethash.config.Log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return false
|
||||
}
|
@ -1,298 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/internal/testlog"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// Tests whether remote HTTP servers are correctly notified of new work.
|
||||
func TestRemoteNotify(t *testing.T) {
|
||||
// Start a simple web server to capture notifications.
|
||||
sink := make(chan [3]string)
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Errorf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create the custom ethash engine.
|
||||
ethash := NewTester([]string{server.URL}, false)
|
||||
defer ethash.Close()
|
||||
|
||||
// Stream a work task and ensure the notification bubbles out.
|
||||
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
|
||||
ethash.Seal(nil, block, nil, nil)
|
||||
select {
|
||||
case work := <-sink:
|
||||
if want := ethash.SealHash(header).Hex(); work[0] != want {
|
||||
t.Errorf("work packet hash mismatch: have %s, want %s", work[0], want)
|
||||
}
|
||||
if want := common.BytesToHash(SeedHash(header.Number.Uint64())).Hex(); work[1] != want {
|
||||
t.Errorf("work packet seed mismatch: have %s, want %s", work[1], want)
|
||||
}
|
||||
target := new(big.Int).Div(new(big.Int).Lsh(big.NewInt(1), 256), header.Difficulty)
|
||||
if want := common.BytesToHash(target.Bytes()).Hex(); work[2] != want {
|
||||
t.Errorf("work packet target mismatch: have %s, want %s", work[2], want)
|
||||
}
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatalf("notification timed out")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests whether remote HTTP servers are correctly notified of new work. (Full pending block body / --miner.notify.full)
|
||||
func TestRemoteNotifyFull(t *testing.T) {
|
||||
// Start a simple web server to capture notifications.
|
||||
sink := make(chan map[string]interface{})
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work map[string]interface{}
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Errorf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create the custom ethash engine.
|
||||
config := Config{
|
||||
PowMode: ModeTest,
|
||||
NotifyFull: true,
|
||||
Log: testlog.Logger(t, log.LvlWarn),
|
||||
}
|
||||
ethash := New(config, []string{server.URL}, false)
|
||||
defer ethash.Close()
|
||||
|
||||
// Stream a work task and ensure the notification bubbles out.
|
||||
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
|
||||
ethash.Seal(nil, block, nil, nil)
|
||||
select {
|
||||
case work := <-sink:
|
||||
if want := "0x" + strconv.FormatUint(header.Number.Uint64(), 16); work["number"] != want {
|
||||
t.Errorf("pending block number mismatch: have %v, want %v", work["number"], want)
|
||||
}
|
||||
if want := "0x" + header.Difficulty.Text(16); work["difficulty"] != want {
|
||||
t.Errorf("pending block difficulty mismatch: have %s, want %s", work["difficulty"], want)
|
||||
}
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatalf("notification timed out")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that pushing work packages fast to the miner doesn't cause any data race
|
||||
// issues in the notifications.
|
||||
func TestRemoteMultiNotify(t *testing.T) {
|
||||
// Start a simple web server to capture notifications.
|
||||
sink := make(chan [3]string, 64)
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work [3]string
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Errorf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create the custom ethash engine.
|
||||
ethash := NewTester([]string{server.URL}, false)
|
||||
ethash.config.Log = testlog.Logger(t, log.LvlWarn)
|
||||
defer ethash.Close()
|
||||
|
||||
// Provide a results reader.
|
||||
// Otherwise the unread results will be logged asynchronously
|
||||
// and this can happen after the test is finished, causing a panic.
|
||||
results := make(chan *types.Block, cap(sink))
|
||||
|
||||
// Stream a lot of work task and ensure all the notifications bubble out.
|
||||
for i := 0; i < cap(sink); i++ {
|
||||
header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
ethash.Seal(nil, block, results, nil)
|
||||
}
|
||||
|
||||
for i := 0; i < cap(sink); i++ {
|
||||
select {
|
||||
case <-sink:
|
||||
<-results
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("notification %d timed out", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that pushing work packages fast to the miner doesn't cause any data race
|
||||
// issues in the notifications. Full pending block body / --miner.notify.full)
|
||||
func TestRemoteMultiNotifyFull(t *testing.T) {
|
||||
// Start a simple web server to capture notifications.
|
||||
sink := make(chan map[string]interface{}, 64)
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
blob, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
t.Errorf("failed to read miner notification: %v", err)
|
||||
}
|
||||
var work map[string]interface{}
|
||||
if err := json.Unmarshal(blob, &work); err != nil {
|
||||
t.Errorf("failed to unmarshal miner notification: %v", err)
|
||||
}
|
||||
sink <- work
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Create the custom ethash engine.
|
||||
config := Config{
|
||||
PowMode: ModeTest,
|
||||
NotifyFull: true,
|
||||
Log: testlog.Logger(t, log.LvlWarn),
|
||||
}
|
||||
ethash := New(config, []string{server.URL}, false)
|
||||
defer ethash.Close()
|
||||
|
||||
// Provide a results reader.
|
||||
// Otherwise the unread results will be logged asynchronously
|
||||
// and this can happen after the test is finished, causing a panic.
|
||||
results := make(chan *types.Block, cap(sink))
|
||||
|
||||
// Stream a lot of work task and ensure all the notifications bubble out.
|
||||
for i := 0; i < cap(sink); i++ {
|
||||
header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)}
|
||||
block := types.NewBlockWithHeader(header)
|
||||
ethash.Seal(nil, block, results, nil)
|
||||
}
|
||||
|
||||
for i := 0; i < cap(sink); i++ {
|
||||
select {
|
||||
case <-sink:
|
||||
<-results
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("notification %d timed out", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests whether stale solutions are correctly processed.
|
||||
func TestStaleSubmission(t *testing.T) {
|
||||
ethash := NewTester(nil, true)
|
||||
defer ethash.Close()
|
||||
api := &API{ethash}
|
||||
|
||||
fakeNonce, fakeDigest := types.BlockNonce{0x01, 0x02, 0x03}, common.HexToHash("deadbeef")
|
||||
|
||||
testcases := []struct {
|
||||
headers []*types.Header
|
||||
submitIndex int
|
||||
submitRes bool
|
||||
}{
|
||||
// Case1: submit solution for the latest mining package
|
||||
{
|
||||
[]*types.Header{
|
||||
{ParentHash: common.BytesToHash([]byte{0xa}), Number: big.NewInt(1), Difficulty: big.NewInt(100000000)},
|
||||
},
|
||||
0,
|
||||
true,
|
||||
},
|
||||
// Case2: submit solution for the previous package but have same parent.
|
||||
{
|
||||
[]*types.Header{
|
||||
{ParentHash: common.BytesToHash([]byte{0xb}), Number: big.NewInt(2), Difficulty: big.NewInt(100000000)},
|
||||
{ParentHash: common.BytesToHash([]byte{0xb}), Number: big.NewInt(2), Difficulty: big.NewInt(100000001)},
|
||||
},
|
||||
0,
|
||||
true,
|
||||
},
|
||||
// Case3: submit stale but acceptable solution
|
||||
{
|
||||
[]*types.Header{
|
||||
{ParentHash: common.BytesToHash([]byte{0xc}), Number: big.NewInt(3), Difficulty: big.NewInt(100000000)},
|
||||
{ParentHash: common.BytesToHash([]byte{0xd}), Number: big.NewInt(9), Difficulty: big.NewInt(100000000)},
|
||||
},
|
||||
0,
|
||||
true,
|
||||
},
|
||||
// Case4: submit very old solution
|
||||
{
|
||||
[]*types.Header{
|
||||
{ParentHash: common.BytesToHash([]byte{0xe}), Number: big.NewInt(10), Difficulty: big.NewInt(100000000)},
|
||||
{ParentHash: common.BytesToHash([]byte{0xf}), Number: big.NewInt(17), Difficulty: big.NewInt(100000000)},
|
||||
},
|
||||
0,
|
||||
false,
|
||||
},
|
||||
}
|
||||
results := make(chan *types.Block, 16)
|
||||
|
||||
for id, c := range testcases {
|
||||
for _, h := range c.headers {
|
||||
ethash.Seal(nil, types.NewBlockWithHeader(h), results, nil)
|
||||
}
|
||||
if res := api.SubmitWork(fakeNonce, ethash.SealHash(c.headers[c.submitIndex]), fakeDigest); res != c.submitRes {
|
||||
t.Errorf("case %d submit result mismatch, want %t, get %t", id+1, c.submitRes, res)
|
||||
}
|
||||
if !c.submitRes {
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case res := <-results:
|
||||
if res.Header().Nonce != fakeNonce {
|
||||
t.Errorf("case %d block nonce mismatch, want %x, get %x", id+1, fakeNonce, res.Header().Nonce)
|
||||
}
|
||||
if res.Header().MixDigest != fakeDigest {
|
||||
t.Errorf("case %d block digest mismatch, want %x, get %x", id+1, fakeDigest, res.Header().MixDigest)
|
||||
}
|
||||
if res.Header().Difficulty.Uint64() != c.headers[c.submitIndex].Difficulty.Uint64() {
|
||||
t.Errorf("case %d block difficulty mismatch, want %d, get %d", id+1, c.headers[c.submitIndex].Difficulty, res.Header().Difficulty)
|
||||
}
|
||||
if res.Header().Number.Uint64() != c.headers[c.submitIndex].Number.Uint64() {
|
||||
t.Errorf("case %d block number mismatch, want %d, get %d", id+1, c.headers[c.submitIndex].Number.Uint64(), res.Header().Number.Uint64())
|
||||
}
|
||||
if res.Header().ParentHash != c.headers[c.submitIndex].ParentHash {
|
||||
t.Errorf("case %d block parent hash mismatch, want %s, get %s", id+1, c.headers[c.submitIndex].ParentHash.Hex(), res.Header().ParentHash.Hex())
|
||||
}
|
||||
case <-time.NewTimer(time.Second).C:
|
||||
t.Errorf("case %d fetch ethash result timeout", id+1)
|
||||
}
|
||||
}
|
||||
}
|
@ -26,7 +26,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/console/prompt"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
@ -99,9 +98,6 @@ func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester {
|
||||
Miner: miner.Config{
|
||||
Etherbase: common.HexToAddress(testAddress),
|
||||
},
|
||||
Ethash: ethash.Config{
|
||||
PowMode: ethash.ModeTest,
|
||||
},
|
||||
}
|
||||
if confOverride != nil {
|
||||
confOverride(ethConf)
|
||||
|
@ -55,10 +55,10 @@ func TestHeaderVerification(t *testing.T) {
|
||||
|
||||
if valid {
|
||||
engine := ethash.NewFaker()
|
||||
_, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]}, []bool{true})
|
||||
_, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]})
|
||||
} else {
|
||||
engine := ethash.NewFakeFailer(headers[i].Number.Uint64())
|
||||
_, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]}, []bool{true})
|
||||
_, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]})
|
||||
}
|
||||
// Wait for the verification result
|
||||
select {
|
||||
@ -164,7 +164,7 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
|
||||
|
||||
// Verify the blocks before the merging
|
||||
for i := 0; i < len(preBlocks); i++ {
|
||||
_, results := engine.VerifyHeaders(chain, []*types.Header{preHeaders[i]}, []bool{true})
|
||||
_, results := engine.VerifyHeaders(chain, []*types.Header{preHeaders[i]})
|
||||
// Wait for the verification result
|
||||
select {
|
||||
case result := <-results:
|
||||
@ -189,7 +189,7 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
|
||||
|
||||
// Verify the blocks after the merging
|
||||
for i := 0; i < len(postBlocks); i++ {
|
||||
_, results := engine.VerifyHeaders(chain, []*types.Header{postHeaders[i]}, []bool{true})
|
||||
_, results := engine.VerifyHeaders(chain, []*types.Header{postHeaders[i]})
|
||||
// Wait for the verification result
|
||||
select {
|
||||
case result := <-results:
|
||||
@ -209,19 +209,14 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
|
||||
}
|
||||
|
||||
// Verify the blocks with pre-merge blocks and post-merge blocks
|
||||
var (
|
||||
headers []*types.Header
|
||||
seals []bool
|
||||
)
|
||||
var headers []*types.Header
|
||||
for _, block := range preBlocks {
|
||||
headers = append(headers, block.Header())
|
||||
seals = append(seals, true)
|
||||
}
|
||||
for _, block := range postBlocks {
|
||||
headers = append(headers, block.Header())
|
||||
seals = append(seals, true)
|
||||
}
|
||||
_, results := engine.VerifyHeaders(chain, headers, seals)
|
||||
_, results := engine.VerifyHeaders(chain, headers)
|
||||
for i := 0; i < len(headers); i++ {
|
||||
select {
|
||||
case result := <-results:
|
||||
@ -252,11 +247,8 @@ func testHeaderConcurrentVerification(t *testing.T, threads int) {
|
||||
_, blocks, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 8, nil)
|
||||
)
|
||||
headers := make([]*types.Header, len(blocks))
|
||||
seals := make([]bool, len(blocks))
|
||||
|
||||
for i, block := range blocks {
|
||||
headers[i] = block.Header()
|
||||
seals[i] = true
|
||||
}
|
||||
// Set the number of threads to verify on
|
||||
old := runtime.GOMAXPROCS(threads)
|
||||
@ -269,11 +261,11 @@ func testHeaderConcurrentVerification(t *testing.T, threads int) {
|
||||
|
||||
if valid {
|
||||
chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
|
||||
_, results = chain.engine.VerifyHeaders(chain, headers)
|
||||
chain.Stop()
|
||||
} else {
|
||||
chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFakeFailer(uint64(len(headers)-1)), vm.Config{}, nil, nil)
|
||||
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
|
||||
_, results = chain.engine.VerifyHeaders(chain, headers)
|
||||
chain.Stop()
|
||||
}
|
||||
// Wait for all the verification results
|
||||
@ -322,11 +314,8 @@ func testHeaderConcurrentAbortion(t *testing.T, threads int) {
|
||||
_, blocks, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 1024, nil)
|
||||
)
|
||||
headers := make([]*types.Header, len(blocks))
|
||||
seals := make([]bool, len(blocks))
|
||||
|
||||
for i, block := range blocks {
|
||||
headers[i] = block.Header()
|
||||
seals[i] = true
|
||||
}
|
||||
// Set the number of threads to verify on
|
||||
old := runtime.GOMAXPROCS(threads)
|
||||
@ -336,7 +325,7 @@ func testHeaderConcurrentAbortion(t *testing.T, threads int) {
|
||||
chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFakeDelayer(time.Millisecond), vm.Config{}, nil, nil)
|
||||
defer chain.Stop()
|
||||
|
||||
abort, results := chain.engine.VerifyHeaders(chain, headers, seals)
|
||||
abort, results := chain.engine.VerifyHeaders(chain, headers)
|
||||
close(abort)
|
||||
|
||||
// Deplete the results channel
|
||||
|
@ -365,7 +365,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
||||
// The first thing the node will do is reconstruct the verification data for
|
||||
// the head block (ethash cache or clique voting snapshot). Might as well do
|
||||
// it in advance.
|
||||
bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
|
||||
bc.engine.VerifyHeader(bc, bc.CurrentHeader())
|
||||
|
||||
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
|
||||
for hash := range BadHashes {
|
||||
@ -1522,7 +1522,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
return 0, errChainStopped
|
||||
}
|
||||
defer bc.chainmu.Unlock()
|
||||
return bc.insertChain(chain, true, true)
|
||||
return bc.insertChain(chain, true)
|
||||
}
|
||||
|
||||
// insertChain is the internal implementation of InsertChain, which assumes that
|
||||
@ -1533,7 +1533,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
||||
// racey behaviour. If a sidechain import is in progress, and the historic state
|
||||
// is imported, but then new canon-head is added before the actual sidechain
|
||||
// completes, then the historic state could be pruned again
|
||||
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) (int, error) {
|
||||
func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) {
|
||||
// If the chain is terminating, don't even bother starting up.
|
||||
if bc.insertStopped() {
|
||||
return 0, nil
|
||||
@ -1554,13 +1554,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
|
||||
}()
|
||||
// Start the parallel header verifier
|
||||
headers := make([]*types.Header, len(chain))
|
||||
seals := make([]bool, len(chain))
|
||||
|
||||
for i, block := range chain {
|
||||
headers[i] = block.Header()
|
||||
seals[i] = verifySeals
|
||||
}
|
||||
abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
|
||||
abort, results := bc.engine.VerifyHeaders(bc, headers)
|
||||
defer close(abort)
|
||||
|
||||
// Peek the error for the first block to decide the directing import logic
|
||||
@ -1981,7 +1978,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
// memory here.
|
||||
if len(blocks) >= 2048 || memory > 64*1024*1024 {
|
||||
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
|
||||
if _, err := bc.insertChain(blocks, false, true); err != nil {
|
||||
if _, err := bc.insertChain(blocks, true); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
blocks, memory = blocks[:0], 0
|
||||
@ -1995,7 +1992,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
|
||||
}
|
||||
if len(blocks) > 0 {
|
||||
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
|
||||
return bc.insertChain(blocks, false, true)
|
||||
return bc.insertChain(blocks, true)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
@ -2038,7 +2035,7 @@ func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error)
|
||||
} else {
|
||||
b = bc.GetBlock(hashes[i], numbers[i])
|
||||
}
|
||||
if _, err := bc.insertChain(types.Blocks{b}, false, false); err != nil {
|
||||
if _, err := bc.insertChain(types.Blocks{b}, false); err != nil {
|
||||
return b.ParentHash(), err
|
||||
}
|
||||
}
|
||||
@ -2242,7 +2239,7 @@ func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error {
|
||||
}
|
||||
defer bc.chainmu.Unlock()
|
||||
|
||||
_, err := bc.insertChain(types.Blocks{block}, true, false)
|
||||
_, err := bc.insertChain(types.Blocks{block}, false)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -2462,17 +2459,12 @@ Receipts: %v
|
||||
// InsertHeaderChain attempts to insert the given header chain in to the local
|
||||
// chain, possibly creating a reorg. If an error is returned, it will return the
|
||||
// index number of the failing header as well an error describing what went wrong.
|
||||
//
|
||||
// The verify parameter can be used to fine tune whether nonce verification
|
||||
// should be done or not. The reason behind the optional check is because some
|
||||
// of the header retrieval mechanisms already need to verify nonces, as well as
|
||||
// because nonces can be verified sparsely, not needing to check each.
|
||||
func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
|
||||
func (bc *BlockChain) InsertHeaderChain(chain []*types.Header) (int, error) {
|
||||
if len(chain) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
start := time.Now()
|
||||
if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
|
||||
if i, err := bc.hc.ValidateHeaderChain(chain); err != nil {
|
||||
return i, err
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *G
|
||||
}
|
||||
// Header-only chain requested
|
||||
genDb, headers := makeHeaderChainWithGenesis(genesis, n, engine, canonicalSeed)
|
||||
_, err := blockchain.InsertHeaderChain(headers, 1)
|
||||
_, err := blockchain.InsertHeaderChain(headers)
|
||||
return genDb, genesis, blockchain, err
|
||||
}
|
||||
|
||||
@ -115,7 +115,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
|
||||
}
|
||||
} else {
|
||||
headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed)
|
||||
if _, err := blockchain2.InsertHeaderChain(headerChainB, 1); err != nil {
|
||||
if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil {
|
||||
t.Fatalf("failed to insert forking chain: %v", err)
|
||||
}
|
||||
}
|
||||
@ -148,7 +148,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
|
||||
func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||
for _, block := range chain {
|
||||
// Try and process the block
|
||||
err := blockchain.engine.VerifyHeader(blockchain, block.Header(), true)
|
||||
err := blockchain.engine.VerifyHeader(blockchain, block.Header())
|
||||
if err == nil {
|
||||
err = blockchain.validator.ValidateBody(block)
|
||||
}
|
||||
@ -187,7 +187,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
||||
func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
|
||||
for _, header := range chain {
|
||||
// Try and validate the header
|
||||
if err := blockchain.engine.VerifyHeader(blockchain, header, false); err != nil {
|
||||
if err := blockchain.engine.VerifyHeader(blockchain, header); err != nil {
|
||||
return err
|
||||
}
|
||||
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
|
||||
@ -252,7 +252,7 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b
|
||||
}
|
||||
} else {
|
||||
headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed)
|
||||
if _, err := blockchain2.InsertHeaderChain(headerChainB, 1); err != nil {
|
||||
if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil {
|
||||
t.Fatalf("failed to insert forking chain: %v", err)
|
||||
}
|
||||
if blockchain2.CurrentHeader().Number.Uint64() != headerChainB[len(headerChainB)-1].Number.Uint64() {
|
||||
@ -549,10 +549,10 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
|
||||
for i, block := range diffBlocks {
|
||||
diffHeaders[i] = block.Header()
|
||||
}
|
||||
if _, err := blockchain.InsertHeaderChain(easyHeaders, 1); err != nil {
|
||||
if _, err := blockchain.InsertHeaderChain(easyHeaders); err != nil {
|
||||
t.Fatalf("failed to insert easy chain: %v", err)
|
||||
}
|
||||
if _, err := blockchain.InsertHeaderChain(diffHeaders, 1); err != nil {
|
||||
if _, err := blockchain.InsertHeaderChain(diffHeaders); err != nil {
|
||||
t.Fatalf("failed to insert difficult chain: %v", err)
|
||||
}
|
||||
}
|
||||
@ -613,7 +613,7 @@ func testBadHashes(t *testing.T, full bool) {
|
||||
BadHashes[headers[2].Hash()] = true
|
||||
defer func() { delete(BadHashes, headers[2].Hash()) }()
|
||||
|
||||
_, err = blockchain.InsertHeaderChain(headers, 1)
|
||||
_, err = blockchain.InsertHeaderChain(headers)
|
||||
}
|
||||
if !errors.Is(err, ErrBannedHash) {
|
||||
t.Errorf("error mismatch: have: %v, want: %v", err, ErrBannedHash)
|
||||
@ -645,7 +645,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
|
||||
BadHashes[blocks[3].Header().Hash()] = true
|
||||
defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
|
||||
} else {
|
||||
if _, err = blockchain.InsertHeaderChain(headers, 1); err != nil {
|
||||
if _, err = blockchain.InsertHeaderChain(headers); err != nil {
|
||||
t.Errorf("failed to import headers: %v", err)
|
||||
}
|
||||
if blockchain.CurrentHeader().Hash() != headers[3].Hash() {
|
||||
@ -711,7 +711,7 @@ func testInsertNonceError(t *testing.T, full bool) {
|
||||
|
||||
blockchain.engine = ethash.NewFakeFailer(failNum)
|
||||
blockchain.hc.engine = blockchain.engine
|
||||
failRes, err = blockchain.InsertHeaderChain(headers, 1)
|
||||
failRes, err = blockchain.InsertHeaderChain(headers)
|
||||
}
|
||||
// Check that the returned error indicates the failure
|
||||
if failRes != failAt {
|
||||
@ -785,7 +785,7 @@ func TestFastVsFullChains(t *testing.T) {
|
||||
for i, block := range blocks {
|
||||
headers[i] = block.Header()
|
||||
}
|
||||
if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
|
||||
if n, err := fast.InsertHeaderChain(headers); err != nil {
|
||||
t.Fatalf("failed to insert header %d: %v", n, err)
|
||||
}
|
||||
if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil {
|
||||
@ -800,7 +800,7 @@ func TestFastVsFullChains(t *testing.T) {
|
||||
ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||
defer ancient.Stop()
|
||||
|
||||
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
|
||||
if n, err := ancient.InsertHeaderChain(headers); err != nil {
|
||||
t.Fatalf("failed to insert header %d: %v", n, err)
|
||||
}
|
||||
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(len(blocks)/2)); err != nil {
|
||||
@ -931,7 +931,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||
for i, block := range blocks {
|
||||
headers[i] = block.Header()
|
||||
}
|
||||
if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
|
||||
if n, err := fast.InsertHeaderChain(headers); err != nil {
|
||||
t.Fatalf("failed to insert header %d: %v", n, err)
|
||||
}
|
||||
if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil {
|
||||
@ -947,7 +947,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||
ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||
defer ancient.Stop()
|
||||
|
||||
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
|
||||
if n, err := ancient.InsertHeaderChain(headers); err != nil {
|
||||
t.Fatalf("failed to insert header %d: %v", n, err)
|
||||
}
|
||||
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
|
||||
@ -964,7 +964,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
||||
lightDb := makeDb()
|
||||
defer lightDb.Close()
|
||||
light, _ := NewBlockChain(lightDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
|
||||
if n, err := light.InsertHeaderChain(headers); err != nil {
|
||||
t.Fatalf("failed to insert header %d: %v", n, err)
|
||||
}
|
||||
defer light.Stop()
|
||||
@ -1781,7 +1781,7 @@ func TestBlockchainRecovery(t *testing.T) {
|
||||
for i, block := range blocks {
|
||||
headers[i] = block.Header()
|
||||
}
|
||||
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
|
||||
if n, err := ancient.InsertHeaderChain(headers); err != nil {
|
||||
t.Fatalf("failed to insert header %d: %v", n, err)
|
||||
}
|
||||
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
|
||||
@ -1850,7 +1850,7 @@ func TestInsertReceiptChainRollback(t *testing.T) {
|
||||
for i, block := range canonblocks {
|
||||
canonHeaders[i] = block.Header()
|
||||
}
|
||||
if _, err = ancientChain.InsertHeaderChain(canonHeaders, 1); err != nil {
|
||||
if _, err = ancientChain.InsertHeaderChain(canonHeaders); err != nil {
|
||||
t.Fatal("can't import canon headers:", err)
|
||||
}
|
||||
|
||||
@ -2128,7 +2128,7 @@ func testInsertKnownChainData(t *testing.T, typ string) {
|
||||
for _, block := range blocks {
|
||||
headers = append(headers, block.Header())
|
||||
}
|
||||
_, err := chain.InsertHeaderChain(headers, 1)
|
||||
_, err := chain.InsertHeaderChain(headers)
|
||||
return err
|
||||
}
|
||||
asserter = func(t *testing.T, block *types.Block) {
|
||||
@ -2142,7 +2142,7 @@ func testInsertKnownChainData(t *testing.T, typ string) {
|
||||
for _, block := range blocks {
|
||||
headers = append(headers, block.Header())
|
||||
}
|
||||
_, err := chain.InsertHeaderChain(headers, 1)
|
||||
_, err := chain.InsertHeaderChain(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -2299,7 +2299,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i
|
||||
for _, block := range blocks {
|
||||
headers = append(headers, block.Header())
|
||||
}
|
||||
i, err := chain.InsertHeaderChain(headers, 1)
|
||||
i, err := chain.InsertHeaderChain(headers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("index %d, number %d: %w", i, headers[i].Number, err)
|
||||
}
|
||||
@ -2316,7 +2316,7 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i
|
||||
for _, block := range blocks {
|
||||
headers = append(headers, block.Header())
|
||||
}
|
||||
i, err := chain.InsertHeaderChain(headers, 1)
|
||||
i, err := chain.InsertHeaderChain(headers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("index %d: %w", i, err)
|
||||
}
|
||||
@ -2492,7 +2492,7 @@ func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) {
|
||||
for i, block := range canonblocks {
|
||||
canonHeaders[i] = block.Header()
|
||||
}
|
||||
if n, err := chain.InsertHeaderChain(canonHeaders, 0); err != nil {
|
||||
if n, err := chain.InsertHeaderChain(canonHeaders); err != nil {
|
||||
t.Fatalf("header %d: failed to insert into chain: %v", n, err)
|
||||
}
|
||||
canonNum := chain.CurrentHeader().Number.Uint64()
|
||||
@ -2501,7 +2501,7 @@ func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) {
|
||||
for i, block := range sideblocks {
|
||||
sideHeaders[i] = block.Header()
|
||||
}
|
||||
if n, err := chain.InsertHeaderChain(sideHeaders, 0); err != nil {
|
||||
if n, err := chain.InsertHeaderChain(sideHeaders); err != nil {
|
||||
t.Fatalf("header %d: failed to insert into chain: %v", n, err)
|
||||
}
|
||||
head := chain.CurrentHeader()
|
||||
@ -2692,7 +2692,7 @@ func TestSkipStaleTxIndicesInSnapSync(t *testing.T) {
|
||||
for i, block := range blocks {
|
||||
headers[i] = block.Header()
|
||||
}
|
||||
if n, err := chain.InsertHeaderChain(headers, 0); err != nil {
|
||||
if n, err := chain.InsertHeaderChain(headers); err != nil {
|
||||
t.Fatalf("failed to insert header %d: %v", n, err)
|
||||
}
|
||||
// The indices before ancient-N(32) should be ignored. After that all blocks should be indexed.
|
||||
|
@ -379,11 +379,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
|
||||
return newcfg, stored, nil
|
||||
}
|
||||
|
||||
// LoadCliqueConfig loads the stored clique config if the chain config
|
||||
// is already present in database, otherwise, return the config in the
|
||||
// provided genesis specification. Note the returned clique config can
|
||||
// be nil if we are not in the clique network.
|
||||
func LoadCliqueConfig(db ethdb.Database, genesis *Genesis) (*params.CliqueConfig, error) {
|
||||
// LoadChainConfig loads the stored chain config if it is already present in
|
||||
// database, otherwise, return the config in the provided genesis specification.
|
||||
func LoadChainConfig(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, error) {
|
||||
// Load the stored chain config from the database. It can be nil
|
||||
// in case the database is empty. Notably, we only care about the
|
||||
// chain config corresponds to the canonical chain.
|
||||
@ -391,10 +389,10 @@ func LoadCliqueConfig(db ethdb.Database, genesis *Genesis) (*params.CliqueConfig
|
||||
if stored != (common.Hash{}) {
|
||||
storedcfg := rawdb.ReadChainConfig(db, stored)
|
||||
if storedcfg != nil {
|
||||
return storedcfg.Clique, nil
|
||||
return storedcfg, nil
|
||||
}
|
||||
}
|
||||
// Load the clique config from the provided genesis specification.
|
||||
// Load the config from the provided genesis specification
|
||||
if genesis != nil {
|
||||
// Reject invalid genesis spec without valid chain config
|
||||
if genesis.Config == nil {
|
||||
@ -407,12 +405,11 @@ func LoadCliqueConfig(db ethdb.Database, genesis *Genesis) (*params.CliqueConfig
|
||||
if stored != (common.Hash{}) && genesis.ToBlock().Hash() != stored {
|
||||
return nil, &GenesisMismatchError{stored, genesis.ToBlock().Hash()}
|
||||
}
|
||||
return genesis.Config.Clique, nil
|
||||
return genesis.Config, nil
|
||||
}
|
||||
// There is no stored chain config and no new config provided,
|
||||
// In this case the default chain config(mainnet) will be used,
|
||||
// namely ethash is the specified consensus engine, return nil.
|
||||
return nil, nil
|
||||
// In this case the default chain config(mainnet) will be used
|
||||
return params.MainnetChainConfig, nil
|
||||
}
|
||||
|
||||
func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
|
||||
|
@ -300,7 +300,7 @@ func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *F
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
|
||||
func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header) (int, error) {
|
||||
// Do a sanity check that the provided chain is actually ordered and linked
|
||||
for i := 1; i < len(chain); i++ {
|
||||
if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 {
|
||||
@ -322,23 +322,8 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
|
||||
return i, ErrBannedHash
|
||||
}
|
||||
}
|
||||
|
||||
// Generate the list of seal verification requests, and start the parallel verifier
|
||||
seals := make([]bool, len(chain))
|
||||
if checkFreq != 0 {
|
||||
// In case of checkFreq == 0 all seals are left false.
|
||||
for i := 0; i <= len(seals)/checkFreq; i++ {
|
||||
index := i*checkFreq + hc.rand.Intn(checkFreq)
|
||||
if index >= len(seals) {
|
||||
index = len(seals) - 1
|
||||
}
|
||||
seals[index] = true
|
||||
}
|
||||
// Last should always be verified to avoid junk.
|
||||
seals[len(seals)-1] = true
|
||||
}
|
||||
|
||||
abort, results := hc.engine.VerifyHeaders(hc, chain, seals)
|
||||
// Start the parallel verifier
|
||||
abort, results := hc.engine.VerifyHeaders(hc, chain)
|
||||
defer close(abort)
|
||||
|
||||
// Iterate over the headers and ensure they all check out
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"io"
|
||||
"math/big"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -86,11 +85,8 @@ func NewMinerAPI(e *Ethereum) *MinerAPI {
|
||||
// usable by this process. If mining is already running, this method adjust the
|
||||
// number of threads allowed to use and updates the minimum price required by the
|
||||
// transaction pool.
|
||||
func (api *MinerAPI) Start(threads *int) error {
|
||||
if threads == nil {
|
||||
return api.e.StartMining(runtime.NumCPU())
|
||||
}
|
||||
return api.e.StartMining(*threads)
|
||||
func (api *MinerAPI) Start() error {
|
||||
return api.e.StartMining()
|
||||
}
|
||||
|
||||
// Stop terminates the miner, both at the consensus engine level as well as at
|
||||
|
@ -397,8 +397,8 @@ func (b *EthAPIBackend) Miner() *miner.Miner {
|
||||
return b.eth.Miner()
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) StartMining(threads int) error {
|
||||
return b.eth.StartMining(threads)
|
||||
func (b *EthAPIBackend) StartMining() error {
|
||||
return b.eth.StartMining()
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) {
|
||||
|
@ -134,14 +134,14 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||
log.Error("Failed to recover state", "error", err)
|
||||
}
|
||||
// Transfer mining-related config to the ethash config.
|
||||
ethashConfig := config.Ethash
|
||||
ethashConfig.NotifyFull = config.Miner.NotifyFull
|
||||
cliqueConfig, err := core.LoadCliqueConfig(chainDb, config.Genesis)
|
||||
chainConfig, err := core.LoadChainConfig(chainDb, config.Genesis)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
engine, err := ethconfig.CreateConsensusEngine(chainConfig, chainDb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
engine := ethconfig.CreateConsensusEngine(stack, ðashConfig, cliqueConfig, config.Miner.Notify, config.Miner.Noverify, chainDb)
|
||||
|
||||
eth := &Ethereum{
|
||||
config: config,
|
||||
merger: consensus.NewMerger(chainDb),
|
||||
@ -392,18 +392,7 @@ func (s *Ethereum) SetEtherbase(etherbase common.Address) {
|
||||
// StartMining starts the miner with the given number of CPU threads. If mining
|
||||
// is already running, this method adjust the number of threads allowed to use
|
||||
// and updates the minimum price required by the transaction pool.
|
||||
func (s *Ethereum) StartMining(threads int) error {
|
||||
// Update the thread count within the consensus engine
|
||||
type threaded interface {
|
||||
SetThreads(threads int)
|
||||
}
|
||||
if th, ok := s.engine.(threaded); ok {
|
||||
log.Info("Updated mining threads", "threads", threads)
|
||||
if threads == 0 {
|
||||
threads = -1 // Disable the miner from within
|
||||
}
|
||||
th.SetThreads(threads)
|
||||
}
|
||||
func (s *Ethereum) StartMining() error {
|
||||
// If the miner was not running, initialize it
|
||||
if !s.IsMining() {
|
||||
// Propagate the initial price point to the transaction pool
|
||||
|
@ -440,7 +440,7 @@ func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block)
|
||||
t.Fatal("can't create node:", err)
|
||||
}
|
||||
|
||||
ethcfg := ðconfig.Config{Genesis: genesis, Ethash: ethash.Config{PowMode: ethash.ModeFake}, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256}
|
||||
ethcfg := ðconfig.Config{Genesis: genesis, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256}
|
||||
ethservice, err := eth.New(n, ethcfg)
|
||||
if err != nil {
|
||||
t.Fatal("can't create eth service:", err)
|
||||
|
@ -53,11 +53,9 @@ var (
|
||||
reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection
|
||||
reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
|
||||
|
||||
fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during snap sync
|
||||
fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
|
||||
fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
|
||||
fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
|
||||
fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in snap sync
|
||||
fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
|
||||
fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
|
||||
fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in snap sync
|
||||
)
|
||||
|
||||
var (
|
||||
@ -175,7 +173,7 @@ type LightChain interface {
|
||||
GetTd(common.Hash, uint64) *big.Int
|
||||
|
||||
// InsertHeaderChain inserts a batch of headers into the local chain.
|
||||
InsertHeaderChain([]*types.Header, int) (int, error)
|
||||
InsertHeaderChain([]*types.Header) (int, error)
|
||||
|
||||
// SetHead rewinds the local chain to a new head.
|
||||
SetHead(uint64) error
|
||||
@ -1381,19 +1379,6 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
|
||||
|
||||
// In case of header only syncing, validate the chunk immediately
|
||||
if mode == SnapSync || mode == LightSync {
|
||||
// If we're importing pure headers, verify based on their recentness
|
||||
var pivot uint64
|
||||
|
||||
d.pivotLock.RLock()
|
||||
if d.pivotHeader != nil {
|
||||
pivot = d.pivotHeader.Number.Uint64()
|
||||
}
|
||||
d.pivotLock.RUnlock()
|
||||
|
||||
frequency := fsHeaderCheckFrequency
|
||||
if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
|
||||
frequency = 1
|
||||
}
|
||||
// Although the received headers might be all valid, a legacy
|
||||
// PoW/PoA sync must not accept post-merge headers. Make sure
|
||||
// that any transition is rejected at this point.
|
||||
@ -1426,11 +1411,11 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
|
||||
}
|
||||
}
|
||||
if len(chunkHeaders) > 0 {
|
||||
if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil {
|
||||
if n, err := d.lightchain.InsertHeaderChain(chunkHeaders); err != nil {
|
||||
rollbackErr = err
|
||||
|
||||
// If some headers were inserted, track them as uncertain
|
||||
if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 {
|
||||
if mode == SnapSync && n > 0 && rollback == 0 {
|
||||
rollback = chunkHeaders[0].Number.Uint64()
|
||||
}
|
||||
log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
|
||||
|
@ -18,10 +18,7 @@
|
||||
package ethconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -34,9 +31,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
@ -62,16 +57,7 @@ var LightClientGPO = gasprice.Config{
|
||||
|
||||
// Defaults contains default settings for use on the Ethereum main net.
|
||||
var Defaults = Config{
|
||||
SyncMode: downloader.SnapSync,
|
||||
Ethash: ethash.Config{
|
||||
CacheDir: "ethash",
|
||||
CachesInMem: 2,
|
||||
CachesOnDisk: 3,
|
||||
CachesLockMmap: false,
|
||||
DatasetsInMem: 1,
|
||||
DatasetsOnDisk: 2,
|
||||
DatasetsLockMmap: false,
|
||||
},
|
||||
SyncMode: downloader.SnapSync,
|
||||
NetworkId: 1,
|
||||
TxLookupLimit: 2350000,
|
||||
LightPeers: 100,
|
||||
@ -92,27 +78,6 @@ var Defaults = Config{
|
||||
RPCTxFeeCap: 1, // 1 ether
|
||||
}
|
||||
|
||||
func init() {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" {
|
||||
if user, err := user.Current(); err == nil {
|
||||
home = user.HomeDir
|
||||
}
|
||||
}
|
||||
if runtime.GOOS == "darwin" {
|
||||
Defaults.Ethash.DatasetDir = filepath.Join(home, "Library", "Ethash")
|
||||
} else if runtime.GOOS == "windows" {
|
||||
localappdata := os.Getenv("LOCALAPPDATA")
|
||||
if localappdata != "" {
|
||||
Defaults.Ethash.DatasetDir = filepath.Join(localappdata, "Ethash")
|
||||
} else {
|
||||
Defaults.Ethash.DatasetDir = filepath.Join(home, "AppData", "Local", "Ethash")
|
||||
}
|
||||
} else {
|
||||
Defaults.Ethash.DatasetDir = filepath.Join(home, ".ethash")
|
||||
}
|
||||
}
|
||||
|
||||
//go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go
|
||||
|
||||
// Config contains configuration options for of the ETH and LES protocols.
|
||||
@ -173,9 +138,6 @@ type Config struct {
|
||||
// Mining options
|
||||
Miner miner.Config
|
||||
|
||||
// Ethash options
|
||||
Ethash ethash.Config
|
||||
|
||||
// Transaction pool options
|
||||
TxPool txpool.Config
|
||||
|
||||
@ -202,34 +164,19 @@ type Config struct {
|
||||
OverrideCancun *uint64 `toml:",omitempty"`
|
||||
}
|
||||
|
||||
// CreateConsensusEngine creates a consensus engine for the given chain configuration.
|
||||
func CreateConsensusEngine(stack *node.Node, ethashConfig *ethash.Config, cliqueConfig *params.CliqueConfig, notify []string, noverify bool, db ethdb.Database) consensus.Engine {
|
||||
// CreateConsensusEngine creates a consensus engine for the given chain config.
|
||||
// Clique is allowed for now to live standalone, but ethash is forbidden and can
|
||||
// only exist on already merged networks.
|
||||
func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (consensus.Engine, error) {
|
||||
// If proof-of-authority is requested, set it up
|
||||
var engine consensus.Engine
|
||||
if cliqueConfig != nil {
|
||||
engine = clique.New(cliqueConfig, db)
|
||||
} else {
|
||||
switch ethashConfig.PowMode {
|
||||
case ethash.ModeFake:
|
||||
log.Warn("Ethash used in fake mode")
|
||||
case ethash.ModeTest:
|
||||
log.Warn("Ethash used in test mode")
|
||||
case ethash.ModeShared:
|
||||
log.Warn("Ethash used in shared mode")
|
||||
}
|
||||
engine = ethash.New(ethash.Config{
|
||||
PowMode: ethashConfig.PowMode,
|
||||
CacheDir: stack.ResolvePath(ethashConfig.CacheDir),
|
||||
CachesInMem: ethashConfig.CachesInMem,
|
||||
CachesOnDisk: ethashConfig.CachesOnDisk,
|
||||
CachesLockMmap: ethashConfig.CachesLockMmap,
|
||||
DatasetDir: ethashConfig.DatasetDir,
|
||||
DatasetsInMem: ethashConfig.DatasetsInMem,
|
||||
DatasetsOnDisk: ethashConfig.DatasetsOnDisk,
|
||||
DatasetsLockMmap: ethashConfig.DatasetsLockMmap,
|
||||
NotifyFull: ethashConfig.NotifyFull,
|
||||
}, notify, noverify)
|
||||
engine.(*ethash.Ethash).SetThreads(-1) // Disable CPU mining
|
||||
if config.Clique != nil {
|
||||
return beacon.New(clique.New(config.Clique, db)), nil
|
||||
}
|
||||
return beacon.New(engine)
|
||||
// If defaulting to proof-of-work, enforce an already merged network since
|
||||
// we cannot run PoW algorithms and more, so we cannot even follow a chain
|
||||
// not coordinated by a beacon node.
|
||||
if !config.TerminalTotalDifficultyPassed {
|
||||
return nil, errors.New("ethash is only supported as a historical component of already merged networks")
|
||||
}
|
||||
return beacon.New(ethash.NewFaker()), nil
|
||||
}
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/txpool"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
@ -48,7 +47,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
Preimages bool
|
||||
FilterLogCacheSize int
|
||||
Miner miner.Config
|
||||
Ethash ethash.Config
|
||||
TxPool txpool.Config
|
||||
GPO gasprice.Config
|
||||
EnablePreimageRecording bool
|
||||
@ -90,7 +88,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
enc.Preimages = c.Preimages
|
||||
enc.FilterLogCacheSize = c.FilterLogCacheSize
|
||||
enc.Miner = c.Miner
|
||||
enc.Ethash = c.Ethash
|
||||
enc.TxPool = c.TxPool
|
||||
enc.GPO = c.GPO
|
||||
enc.EnablePreimageRecording = c.EnablePreimageRecording
|
||||
@ -136,7 +133,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
Preimages *bool
|
||||
FilterLogCacheSize *int
|
||||
Miner *miner.Config
|
||||
Ethash *ethash.Config
|
||||
TxPool *txpool.Config
|
||||
GPO *gasprice.Config
|
||||
EnablePreimageRecording *bool
|
||||
@ -243,9 +239,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
if dec.Miner != nil {
|
||||
c.Miner = *dec.Miner
|
||||
}
|
||||
if dec.Ethash != nil {
|
||||
c.Ethash = *dec.Ethash
|
||||
}
|
||||
if dec.TxPool != nil {
|
||||
c.TxPool = *dec.TxPool
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
|
||||
return errors.New("unexpected post-merge header")
|
||||
}
|
||||
}
|
||||
return h.chain.Engine().VerifyHeader(h.chain, header, true)
|
||||
return h.chain.Engine().VerifyHeader(h.chain, header)
|
||||
}
|
||||
heighter := func() uint64 {
|
||||
return h.chain.CurrentBlock().Number.Uint64()
|
||||
|
@ -221,7 +221,6 @@ func newTestBackend(t *testing.T) (*node.Node, []*types.Block) {
|
||||
}
|
||||
// Create Ethereum Service
|
||||
config := ðconfig.Config{Genesis: genesis}
|
||||
config.Ethash.PowMode = ethash.ModeFake
|
||||
ethservice, err := eth.New(n, config)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create new ethereum service: %v", err)
|
||||
|
@ -56,7 +56,6 @@ func newTestBackend(t *testing.T) (*node.Node, []*types.Block) {
|
||||
}
|
||||
// Create Ethereum Service
|
||||
config := ðconfig.Config{Genesis: genesis}
|
||||
config.Ethash.PowMode = ethash.ModeFake
|
||||
ethservice, err := eth.New(n, config)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create new ethereum service: %v", err)
|
||||
|
@ -376,10 +376,7 @@ func createNode(t *testing.T) *node.Node {
|
||||
|
||||
func newGQLService(t *testing.T, stack *node.Node, gspec *core.Genesis, genBlocks int, genfunc func(i int, gen *core.BlockGen)) (*handler, []*types.Block) {
|
||||
ethConf := ðconfig.Config{
|
||||
Genesis: gspec,
|
||||
Ethash: ethash.Config{
|
||||
PowMode: ethash.ModeFake,
|
||||
},
|
||||
Genesis: gspec,
|
||||
NetworkId: 1337,
|
||||
TrieCleanCache: 5,
|
||||
TrieCleanCacheJournal: "triecache",
|
||||
|
@ -34,7 +34,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
@ -2055,15 +2054,6 @@ func (api *DebugAPI) PrintBlock(ctx context.Context, number uint64) (string, err
|
||||
return spew.Sdump(block), nil
|
||||
}
|
||||
|
||||
// SeedHash retrieves the seed hash of a block.
|
||||
func (api *DebugAPI) SeedHash(ctx context.Context, number uint64) (string, error) {
|
||||
block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number))
|
||||
if block == nil {
|
||||
return "", fmt.Errorf("block #%d not found", number)
|
||||
}
|
||||
return fmt.Sprintf("%#x", ethash.SeedHash(number)), nil
|
||||
}
|
||||
|
||||
// ChaindbProperty returns leveldb properties of the key-value database.
|
||||
func (api *DebugAPI) ChaindbProperty(property string) (string, error) {
|
||||
if property == "" {
|
||||
|
@ -30,7 +30,6 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
ethdownloader "github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
@ -495,7 +494,6 @@ func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []
|
||||
func newLesClientService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
|
||||
config := ethconfig.Defaults
|
||||
config.SyncMode = (ethdownloader.SyncMode)(downloader.LightSync)
|
||||
config.Ethash.PowMode = ethash.ModeFake
|
||||
return New(stack, &config)
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,6 @@ func startLesService(t *testing.T, genesis *core.Genesis, headers []*types.Heade
|
||||
}
|
||||
ethcfg := ðconfig.Config{
|
||||
Genesis: genesis,
|
||||
Ethash: ethash.Config{PowMode: ethash.ModeFake},
|
||||
SyncMode: downloader.LightSync,
|
||||
TrieDirtyCache: 256,
|
||||
TrieCleanCache: 256,
|
||||
@ -236,7 +235,7 @@ func startLesService(t *testing.T, genesis *core.Genesis, headers []*types.Heade
|
||||
if err := n.Start(); err != nil {
|
||||
t.Fatal("can't start node:", err)
|
||||
}
|
||||
if _, err := lesService.BlockChain().InsertHeaderChain(headers, 0); err != nil {
|
||||
if _, err := lesService.BlockChain().InsertHeaderChain(headers); err != nil {
|
||||
n.Close()
|
||||
t.Fatal("can't import test headers:", err)
|
||||
}
|
||||
|
@ -101,6 +101,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
|
||||
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
|
||||
return nil, genesisErr
|
||||
}
|
||||
engine, err := ethconfig.CreateConsensusEngine(chainConfig, chainDb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Info("")
|
||||
log.Info(strings.Repeat("-", 153))
|
||||
for _, line := range strings.Split(chainConfig.Description(), "\n") {
|
||||
@ -126,7 +130,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
|
||||
reqDist: newRequestDistributor(peers, &mclock.System{}),
|
||||
accountManager: stack.AccountManager(),
|
||||
merger: merger,
|
||||
engine: ethconfig.CreateConsensusEngine(stack, &config.Ethash, chainConfig.Clique, nil, false, chainDb),
|
||||
engine: engine,
|
||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||
bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations),
|
||||
p2pServer: stack.Server(),
|
||||
@ -185,11 +189,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
|
||||
leth.ApiBackend.gpo = gasprice.NewOracle(leth.ApiBackend, gpoParams)
|
||||
|
||||
leth.handler = newClientHandler(config.UltraLightServers, config.UltraLightFraction, leth)
|
||||
if leth.handler.ulc != nil {
|
||||
log.Warn("Ultra light client is enabled", "trustedNodes", len(leth.handler.ulc.keys), "minTrustedFraction", leth.handler.ulc.fraction)
|
||||
leth.blockchain.DisableCheckFreq()
|
||||
}
|
||||
|
||||
leth.netRPCService = ethapi.NewNetAPI(leth.p2pServer, leth.config.NetworkId)
|
||||
|
||||
// Register the backend on the node
|
||||
|
@ -58,11 +58,9 @@ var (
|
||||
reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection
|
||||
reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
|
||||
|
||||
fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
|
||||
fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
|
||||
fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
|
||||
fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
|
||||
fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync
|
||||
fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
|
||||
fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
|
||||
fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync
|
||||
)
|
||||
|
||||
var (
|
||||
@ -166,7 +164,7 @@ type LightChain interface {
|
||||
GetTd(common.Hash, uint64) *big.Int
|
||||
|
||||
// InsertHeaderChain inserts a batch of headers into the local chain.
|
||||
InsertHeaderChain([]*types.Header, int) (int, error)
|
||||
InsertHeaderChain([]*types.Header) (int, error)
|
||||
|
||||
// SetHead rewinds the local chain to a new head.
|
||||
SetHead(uint64) error
|
||||
@ -1604,24 +1602,11 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
||||
|
||||
// In case of header only syncing, validate the chunk immediately
|
||||
if mode == FastSync || mode == LightSync {
|
||||
// If we're importing pure headers, verify based on their recentness
|
||||
var pivot uint64
|
||||
|
||||
d.pivotLock.RLock()
|
||||
if d.pivotHeader != nil {
|
||||
pivot = d.pivotHeader.Number.Uint64()
|
||||
}
|
||||
d.pivotLock.RUnlock()
|
||||
|
||||
frequency := fsHeaderCheckFrequency
|
||||
if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
|
||||
frequency = 1
|
||||
}
|
||||
if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
|
||||
if n, err := d.lightchain.InsertHeaderChain(chunk); err != nil {
|
||||
rollbackErr = err
|
||||
|
||||
// If some headers were inserted, track them as uncertain
|
||||
if (mode == FastSync || frequency > 1) && n > 0 && rollback == 0 {
|
||||
if mode == FastSync && n > 0 && rollback == 0 {
|
||||
rollback = chunk[0].Number.Uint64()
|
||||
}
|
||||
log.Warn("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err)
|
||||
|
@ -254,7 +254,7 @@ func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
|
||||
}
|
||||
|
||||
// InsertHeaderChain injects a new batch of headers into the simulated chain.
|
||||
func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {
|
||||
func (dl *downloadTester) InsertHeaderChain(headers []*types.Header) (i int, err error) {
|
||||
dl.lock.Lock()
|
||||
defer dl.lock.Unlock()
|
||||
// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
|
||||
|
@ -161,18 +161,11 @@ func newLightFetcher(chain *light.LightChain, engine consensus.Engine, peers *se
|
||||
// Construct the fetcher by offering all necessary APIs
|
||||
validator := func(header *types.Header) error {
|
||||
// Disable seal verification explicitly if we are running in ulc mode.
|
||||
return engine.VerifyHeader(chain, header, ulc == nil)
|
||||
return engine.VerifyHeader(chain, header)
|
||||
}
|
||||
heighter := func() uint64 { return chain.CurrentHeader().Number.Uint64() }
|
||||
dropper := func(id string) { peers.unregister(id) }
|
||||
inserter := func(headers []*types.Header) (int, error) {
|
||||
// Disable PoW checking explicitly if we are running in ulc mode.
|
||||
checkFreq := 1
|
||||
if ulc != nil {
|
||||
checkFreq = 0
|
||||
}
|
||||
return chain.InsertHeaderChain(headers, checkFreq)
|
||||
}
|
||||
inserter := func(headers []*types.Header) (int, error) { return chain.InsertHeaderChain(headers) }
|
||||
f := &lightFetcher{
|
||||
ulc: ulc,
|
||||
peerset: peers,
|
||||
|
@ -70,9 +70,8 @@ type LightChain struct {
|
||||
wg sync.WaitGroup
|
||||
|
||||
// Atomic boolean switches:
|
||||
stopped atomic.Bool // whether LightChain is stopped or running
|
||||
procInterrupt atomic.Bool // interrupts chain insert
|
||||
disableCheckFreq atomic.Bool // disables header verification
|
||||
stopped atomic.Bool // whether LightChain is stopped or running
|
||||
procInterrupt atomic.Bool // interrupts chain insert
|
||||
}
|
||||
|
||||
// NewLightChain returns a fully initialised light chain using information
|
||||
@ -345,7 +344,7 @@ func (lc *LightChain) Rollback(chain []common.Hash) {
|
||||
func (lc *LightChain) InsertHeader(header *types.Header) error {
|
||||
// Verify the header first before obtaining the lock
|
||||
headers := []*types.Header{header}
|
||||
if _, err := lc.hc.ValidateHeaderChain(headers, 100); err != nil {
|
||||
if _, err := lc.hc.ValidateHeaderChain(headers); err != nil {
|
||||
return err
|
||||
}
|
||||
// Make sure only one thread manipulates the chain at once
|
||||
@ -381,23 +380,15 @@ func (lc *LightChain) SetCanonical(header *types.Header) error {
|
||||
// InsertHeaderChain attempts to insert the given header chain in to the local
|
||||
// chain, possibly creating a reorg. If an error is returned, it will return the
|
||||
// index number of the failing header as well an error describing what went wrong.
|
||||
//
|
||||
// The verify parameter can be used to fine tune whether nonce verification
|
||||
// should be done or not. The reason behind the optional check is because some
|
||||
// of the header retrieval mechanisms already need to verify nonces, as well as
|
||||
// because nonces can be verified sparsely, not needing to check each.
|
||||
//
|
||||
|
||||
// In the case of a light chain, InsertHeaderChain also creates and posts light
|
||||
// chain events when necessary.
|
||||
func (lc *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
|
||||
func (lc *LightChain) InsertHeaderChain(chain []*types.Header) (int, error) {
|
||||
if len(chain) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if lc.disableCheckFreq.Load() {
|
||||
checkFreq = 0
|
||||
}
|
||||
start := time.Now()
|
||||
if i, err := lc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
|
||||
if i, err := lc.hc.ValidateHeaderChain(chain); err != nil {
|
||||
return i, err
|
||||
}
|
||||
|
||||
@ -538,13 +529,3 @@ func (lc *LightChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscript
|
||||
func (lc *LightChain) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
||||
return lc.scope.Track(new(event.Feed).Subscribe(ch))
|
||||
}
|
||||
|
||||
// DisableCheckFreq disables header validation. This is used for ultralight mode.
|
||||
func (lc *LightChain) DisableCheckFreq() {
|
||||
lc.disableCheckFreq.Store(true)
|
||||
}
|
||||
|
||||
// EnableCheckFreq enables header validation.
|
||||
func (lc *LightChain) EnableCheckFreq() {
|
||||
lc.disableCheckFreq.Store(false)
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ func newCanonical(n int) (ethdb.Database, *LightChain, error) {
|
||||
}
|
||||
// Header-only chain requested
|
||||
headers := makeHeaderChain(genesis.Header(), n, db, canonicalSeed)
|
||||
_, err := blockchain.InsertHeaderChain(headers, 1)
|
||||
_, err := blockchain.InsertHeaderChain(headers)
|
||||
return db, blockchain, err
|
||||
}
|
||||
|
||||
@ -99,7 +99,7 @@ func testFork(t *testing.T, LightChain *LightChain, i, n int, comparator func(td
|
||||
}
|
||||
// Extend the newly created chain
|
||||
headerChainB := makeHeaderChain(LightChain2.CurrentHeader(), n, db, forkSeed)
|
||||
if _, err := LightChain2.InsertHeaderChain(headerChainB, 1); err != nil {
|
||||
if _, err := LightChain2.InsertHeaderChain(headerChainB); err != nil {
|
||||
t.Fatalf("failed to insert forking chain: %v", err)
|
||||
}
|
||||
// Sanity check that the forked chain can be imported into the original
|
||||
@ -120,7 +120,7 @@ func testFork(t *testing.T, LightChain *LightChain, i, n int, comparator func(td
|
||||
func testHeaderChainImport(chain []*types.Header, lightchain *LightChain) error {
|
||||
for _, header := range chain {
|
||||
// Try and validate the header
|
||||
if err := lightchain.engine.VerifyHeader(lightchain.hc, header, true); err != nil {
|
||||
if err := lightchain.engine.VerifyHeader(lightchain.hc, header); err != nil {
|
||||
return err
|
||||
}
|
||||
// Manually insert the header into the database, but don't reorganize (allows subsequent testing)
|
||||
@ -300,8 +300,8 @@ func testReorg(t *testing.T, first, second []int, td int64) {
|
||||
bc := newTestLightChain()
|
||||
|
||||
// Insert an easy and a difficult chain afterwards
|
||||
bc.InsertHeaderChain(makeHeaderChainWithDiff(bc.genesisBlock, first, 11), 1)
|
||||
bc.InsertHeaderChain(makeHeaderChainWithDiff(bc.genesisBlock, second, 22), 1)
|
||||
bc.InsertHeaderChain(makeHeaderChainWithDiff(bc.genesisBlock, first, 11))
|
||||
bc.InsertHeaderChain(makeHeaderChainWithDiff(bc.genesisBlock, second, 22))
|
||||
// Check that the chain is valid number and link wise
|
||||
prev := bc.CurrentHeader()
|
||||
for header := bc.GetHeaderByNumber(bc.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, bc.GetHeaderByNumber(header.Number.Uint64()-1) {
|
||||
@ -324,7 +324,7 @@ func TestBadHeaderHashes(t *testing.T) {
|
||||
var err error
|
||||
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 4}, 10)
|
||||
core.BadHashes[headers[2].Hash()] = true
|
||||
if _, err = bc.InsertHeaderChain(headers, 1); !errors.Is(err, core.ErrBannedHash) {
|
||||
if _, err = bc.InsertHeaderChain(headers); !errors.Is(err, core.ErrBannedHash) {
|
||||
t.Errorf("error mismatch: have: %v, want %v", err, core.ErrBannedHash)
|
||||
}
|
||||
}
|
||||
@ -337,7 +337,7 @@ func TestReorgBadHeaderHashes(t *testing.T) {
|
||||
// Create a chain, import and ban afterwards
|
||||
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 3, 4}, 10)
|
||||
|
||||
if _, err := bc.InsertHeaderChain(headers, 1); err != nil {
|
||||
if _, err := bc.InsertHeaderChain(headers); err != nil {
|
||||
t.Fatalf("failed to import headers: %v", err)
|
||||
}
|
||||
if bc.CurrentHeader().Hash() != headers[3].Hash() {
|
||||
|
@ -292,7 +292,7 @@ func testChainOdr(t *testing.T, protocol int, fn odrTestFn) {
|
||||
for i, block := range gchain {
|
||||
headers[i] = block.Header()
|
||||
}
|
||||
if _, err := lightchain.InsertHeaderChain(headers, 1); err != nil {
|
||||
if _, err := lightchain.InsertHeaderChain(headers); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ func TestTxPool(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := lightchain.InsertHeaderChain([]*types.Header{block.Header()}, 1); err != nil {
|
||||
if _, err := lightchain.InsertHeaderChain([]*types.Header{block.Header()}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
|
@ -45,15 +45,12 @@ type Backend interface {
|
||||
|
||||
// Config is the configuration parameters of mining.
|
||||
type Config struct {
|
||||
Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards
|
||||
Notify []string `toml:",omitempty"` // HTTP URL list to be notified of new work packages (only useful in ethash).
|
||||
NotifyFull bool `toml:",omitempty"` // Notify with pending block headers instead of work packages
|
||||
ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner
|
||||
GasFloor uint64 // Target gas floor for mined blocks.
|
||||
GasCeil uint64 // Target gas ceiling for mined blocks.
|
||||
GasPrice *big.Int // Minimum gas price for mining a transaction
|
||||
Recommit time.Duration // The time interval for miner to re-create mining work.
|
||||
Noverify bool // Disable remote mining solution verification(only useful in ethash).
|
||||
Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards
|
||||
ExtraData hexutil.Bytes `toml:",omitempty"` // Block extra data set by the miner
|
||||
GasFloor uint64 // Target gas floor for mined blocks.
|
||||
GasCeil uint64 // Target gas ceiling for mined blocks.
|
||||
GasPrice *big.Int // Minimum gas price for mining a transaction
|
||||
Recommit time.Duration // The time interval for miner to re-create mining work.
|
||||
|
||||
NewPayloadTimeout time.Duration // The maximum time allowance for creating a new payload
|
||||
}
|
||||
|
@ -1,264 +0,0 @@
|
||||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains a miner stress test for eip 1559.
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
crand "crypto/rand"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/txpool"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
var (
|
||||
londonBlock = big.NewInt(30) // Predefined london fork block for activating eip 1559.
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||
fdlimit.Raise(2048)
|
||||
|
||||
// Generate a batch of accounts to seal and fund with
|
||||
faucets := make([]*ecdsa.PrivateKey, 128)
|
||||
for i := 0; i < len(faucets); i++ {
|
||||
faucets[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
// Pre-generate the ethash mining DAG so we don't race
|
||||
ethash.MakeDataset(1, ethconfig.Defaults.Ethash.DatasetDir)
|
||||
|
||||
// Create an Ethash network
|
||||
genesis := makeGenesis(faucets)
|
||||
|
||||
// Handle interrupts.
|
||||
interruptCh := make(chan os.Signal, 5)
|
||||
signal.Notify(interruptCh, os.Interrupt)
|
||||
|
||||
var (
|
||||
stacks []*node.Node
|
||||
nodes []*eth.Ethereum
|
||||
enodes []*enode.Node
|
||||
)
|
||||
for i := 0; i < 4; i++ {
|
||||
// Start the node and wait until it's up
|
||||
stack, ethBackend, err := makeMiner(genesis)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer stack.Close()
|
||||
|
||||
for stack.Server().NodeInfo().Ports.Listener == 0 {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
// Connect the node to all the previous ones
|
||||
for _, n := range enodes {
|
||||
stack.Server().AddPeer(n)
|
||||
}
|
||||
// Start tracking the node and its enode
|
||||
nodes = append(nodes, ethBackend)
|
||||
enodes = append(enodes, stack.Server().Self())
|
||||
}
|
||||
|
||||
// Iterate over all the nodes and start mining
|
||||
time.Sleep(3 * time.Second)
|
||||
for _, node := range nodes {
|
||||
if err := node.StartMining(1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Start injecting transactions from the faucets like crazy
|
||||
var (
|
||||
nonces = make([]uint64, len(faucets))
|
||||
|
||||
// The signer activates the 1559 features even before the fork,
|
||||
// so the new 1559 txs can be created with this signer.
|
||||
signer = types.LatestSignerForChainID(genesis.Config.ChainID)
|
||||
)
|
||||
for {
|
||||
// Stop when interrupted.
|
||||
select {
|
||||
case <-interruptCh:
|
||||
for _, node := range stacks {
|
||||
node.Close()
|
||||
}
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Pick a random mining node
|
||||
index := rand.Intn(len(faucets))
|
||||
backend := nodes[index%len(nodes)]
|
||||
|
||||
headHeader := backend.BlockChain().CurrentHeader()
|
||||
baseFee := headHeader.BaseFee
|
||||
|
||||
// Create a self transaction and inject into the pool. The legacy
|
||||
// and 1559 transactions can all be created by random even if the
|
||||
// fork is not happened.
|
||||
tx := makeTransaction(nonces[index], faucets[index], signer, baseFee)
|
||||
if err := backend.TxPool().AddLocal(tx); err != nil {
|
||||
continue
|
||||
}
|
||||
nonces[index]++
|
||||
|
||||
// Wait if we're too saturated
|
||||
if pend, _ := backend.TxPool().Stats(); pend > 4192 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Wait if the basefee is raised too fast
|
||||
if baseFee != nil && baseFee.Cmp(new(big.Int).Mul(big.NewInt(100), big.NewInt(params.GWei))) > 0 {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeTransaction(nonce uint64, privKey *ecdsa.PrivateKey, signer types.Signer, baseFee *big.Int) *types.Transaction {
|
||||
// Generate legacy transaction
|
||||
if rand.Intn(2) == 0 {
|
||||
tx, err := types.SignTx(types.NewTransaction(nonce, crypto.PubkeyToAddress(privKey.PublicKey), new(big.Int), 21000, big.NewInt(100000000000+rand.Int63n(65536)), nil), signer, privKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tx
|
||||
}
|
||||
// Generate eip 1559 transaction
|
||||
recipient := crypto.PubkeyToAddress(privKey.PublicKey)
|
||||
|
||||
// Feecap and feetip are limited to 32 bytes. Offer a sightly
|
||||
// larger buffer for creating both valid and invalid transactions.
|
||||
var buf = make([]byte, 32+5)
|
||||
crand.Read(buf)
|
||||
gasTipCap := new(big.Int).SetBytes(buf)
|
||||
|
||||
// If the given base fee is nil(the 1559 is still not available),
|
||||
// generate a fake base fee in order to create 1559 tx forcibly.
|
||||
if baseFee == nil {
|
||||
baseFee = new(big.Int).SetInt64(int64(rand.Int31()))
|
||||
}
|
||||
// Generate the feecap, 75% valid feecap and 25% unguaranteed.
|
||||
var gasFeeCap *big.Int
|
||||
if rand.Intn(4) == 0 {
|
||||
crand.Read(buf)
|
||||
gasFeeCap = new(big.Int).SetBytes(buf)
|
||||
} else {
|
||||
gasFeeCap = new(big.Int).Add(baseFee, gasTipCap)
|
||||
}
|
||||
return types.MustSignNewTx(privKey, signer, &types.DynamicFeeTx{
|
||||
ChainID: signer.ChainID(),
|
||||
Nonce: nonce,
|
||||
GasTipCap: gasTipCap,
|
||||
GasFeeCap: gasFeeCap,
|
||||
Gas: 21000,
|
||||
To: &recipient,
|
||||
Value: big.NewInt(100),
|
||||
Data: nil,
|
||||
AccessList: nil,
|
||||
})
|
||||
}
|
||||
|
||||
// makeGenesis creates a custom Ethash genesis block based on some pre-defined
|
||||
// faucet accounts.
|
||||
func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
|
||||
genesis := core.DefaultGenesisBlock()
|
||||
|
||||
genesis.Config = params.AllEthashProtocolChanges
|
||||
genesis.Config.LondonBlock = londonBlock
|
||||
genesis.Difficulty = params.MinimumDifficulty
|
||||
|
||||
// Small gaslimit for easier basefee moving testing.
|
||||
genesis.GasLimit = 8_000_000
|
||||
|
||||
genesis.Config.ChainID = big.NewInt(18)
|
||||
|
||||
genesis.Alloc = core.GenesisAlloc{}
|
||||
for _, faucet := range faucets {
|
||||
genesis.Alloc[crypto.PubkeyToAddress(faucet.PublicKey)] = core.GenesisAccount{
|
||||
Balance: new(big.Int).Exp(big.NewInt(2), big.NewInt(128), nil),
|
||||
}
|
||||
}
|
||||
if londonBlock.Sign() == 0 {
|
||||
log.Info("Enabled the eip 1559 by default")
|
||||
} else {
|
||||
log.Info("Registered the london fork", "number", londonBlock)
|
||||
}
|
||||
return genesis
|
||||
}
|
||||
|
||||
func makeMiner(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) {
|
||||
// Define the basic configurations for the Ethereum node
|
||||
datadir, _ := os.MkdirTemp("", "")
|
||||
|
||||
config := &node.Config{
|
||||
Name: "geth",
|
||||
Version: params.Version,
|
||||
DataDir: datadir,
|
||||
P2P: p2p.Config{
|
||||
ListenAddr: "0.0.0.0:0",
|
||||
NoDiscovery: true,
|
||||
MaxPeers: 25,
|
||||
},
|
||||
UseLightweightKDF: true,
|
||||
}
|
||||
// Create the node and configure a full Ethereum node on it
|
||||
stack, err := node.New(config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ethBackend, err := eth.New(stack, ðconfig.Config{
|
||||
Genesis: genesis,
|
||||
NetworkId: genesis.Config.ChainID.Uint64(),
|
||||
SyncMode: downloader.FullSync,
|
||||
DatabaseCache: 256,
|
||||
DatabaseHandles: 256,
|
||||
TxPool: txpool.DefaultConfig,
|
||||
GPO: ethconfig.Defaults.GPO,
|
||||
Ethash: ethconfig.Defaults.Ethash,
|
||||
Miner: miner.Config{
|
||||
Etherbase: common.Address{1},
|
||||
GasCeil: genesis.GasLimit * 11 / 10,
|
||||
GasPrice: big.NewInt(1),
|
||||
Recommit: time.Second,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = stack.Start()
|
||||
return stack, ethBackend, err
|
||||
}
|
@ -1,558 +0,0 @@
|
||||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains a miner stress test for the eth1/2 transition
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"errors"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/beacon/engine"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/txpool"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
lescatalyst "github.com/ethereum/go-ethereum/les/catalyst"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
type nodetype int
|
||||
|
||||
const (
|
||||
legacyMiningNode nodetype = iota
|
||||
legacyNormalNode
|
||||
eth2MiningNode
|
||||
eth2NormalNode
|
||||
eth2LightClient
|
||||
)
|
||||
|
||||
func (typ nodetype) String() string {
|
||||
switch typ {
|
||||
case legacyMiningNode:
|
||||
return "legacyMiningNode"
|
||||
case legacyNormalNode:
|
||||
return "legacyNormalNode"
|
||||
case eth2MiningNode:
|
||||
return "eth2MiningNode"
|
||||
case eth2NormalNode:
|
||||
return "eth2NormalNode"
|
||||
case eth2LightClient:
|
||||
return "eth2LightClient"
|
||||
default:
|
||||
return "undefined"
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// transitionDifficulty is the target total difficulty for transition
|
||||
transitionDifficulty = new(big.Int).Mul(big.NewInt(20), params.MinimumDifficulty)
|
||||
|
||||
// blockInterval is the time interval for creating a new eth2 block
|
||||
blockIntervalInt = 3
|
||||
blockInterval = time.Second * time.Duration(blockIntervalInt)
|
||||
|
||||
// finalizationDist is the block distance for finalizing block
|
||||
finalizationDist = 10
|
||||
)
|
||||
|
||||
type ethNode struct {
|
||||
typ nodetype
|
||||
stack *node.Node
|
||||
enode *enode.Node
|
||||
api *ethcatalyst.ConsensusAPI
|
||||
ethBackend *eth.Ethereum
|
||||
lapi *lescatalyst.ConsensusAPI
|
||||
lesBackend *les.LightEthereum
|
||||
}
|
||||
|
||||
func newNode(typ nodetype, genesis *core.Genesis, enodes []*enode.Node) *ethNode {
|
||||
var (
|
||||
err error
|
||||
api *ethcatalyst.ConsensusAPI
|
||||
lapi *lescatalyst.ConsensusAPI
|
||||
stack *node.Node
|
||||
ethBackend *eth.Ethereum
|
||||
lesBackend *les.LightEthereum
|
||||
)
|
||||
// Start the node and wait until it's up
|
||||
if typ == eth2LightClient {
|
||||
stack, lesBackend, lapi, err = makeLightNode(genesis)
|
||||
} else {
|
||||
stack, ethBackend, api, err = makeFullNode(genesis)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for stack.Server().NodeInfo().Ports.Listener == 0 {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
// Connect the node to all the previous ones
|
||||
for _, n := range enodes {
|
||||
stack.Server().AddPeer(n)
|
||||
}
|
||||
enode := stack.Server().Self()
|
||||
|
||||
// Inject the signer key and start sealing with it
|
||||
stack.AccountManager().AddBackend(keystore.NewPlaintextKeyStore("beacon-stress"))
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)
|
||||
if len(ks) == 0 {
|
||||
panic("Keystore is not available")
|
||||
}
|
||||
store := ks[0].(*keystore.KeyStore)
|
||||
if _, err := store.NewAccount(""); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ðNode{
|
||||
typ: typ,
|
||||
api: api,
|
||||
ethBackend: ethBackend,
|
||||
lapi: lapi,
|
||||
lesBackend: lesBackend,
|
||||
stack: stack,
|
||||
enode: enode,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *ethNode) assembleBlock(parentHash common.Hash, parentTimestamp uint64) (*engine.ExecutableData, error) {
|
||||
if n.typ != eth2MiningNode {
|
||||
return nil, errors.New("invalid node type")
|
||||
}
|
||||
timestamp := uint64(time.Now().Unix())
|
||||
if timestamp <= parentTimestamp {
|
||||
timestamp = parentTimestamp + 1
|
||||
}
|
||||
payloadAttribute := engine.PayloadAttributes{
|
||||
Timestamp: timestamp,
|
||||
Random: common.Hash{},
|
||||
SuggestedFeeRecipient: common.HexToAddress("0xdeadbeef"),
|
||||
}
|
||||
fcState := engine.ForkchoiceStateV1{
|
||||
HeadBlockHash: parentHash,
|
||||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
}
|
||||
payload, err := n.api.ForkchoiceUpdatedV1(fcState, &payloadAttribute)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
time.Sleep(time.Second * 5) // give enough time for block creation
|
||||
return n.api.GetPayloadV1(*payload.PayloadID)
|
||||
}
|
||||
|
||||
func (n *ethNode) insertBlock(eb engine.ExecutableData) error {
|
||||
if !eth2types(n.typ) {
|
||||
return errors.New("invalid node type")
|
||||
}
|
||||
switch n.typ {
|
||||
case eth2NormalNode, eth2MiningNode:
|
||||
newResp, err := n.api.NewPayloadV1(eb)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if newResp.Status != "VALID" {
|
||||
return errors.New("failed to insert block")
|
||||
}
|
||||
return nil
|
||||
case eth2LightClient:
|
||||
newResp, err := n.lapi.ExecutePayloadV1(eb)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if newResp.Status != "VALID" {
|
||||
return errors.New("failed to insert block")
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return errors.New("undefined node")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *ethNode) insertBlockAndSetHead(parent *types.Header, ed engine.ExecutableData) error {
|
||||
if !eth2types(n.typ) {
|
||||
return errors.New("invalid node type")
|
||||
}
|
||||
if err := n.insertBlock(ed); err != nil {
|
||||
return err
|
||||
}
|
||||
block, err := engine.ExecutableDataToBlock(ed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fcState := engine.ForkchoiceStateV1{
|
||||
HeadBlockHash: block.ParentHash(),
|
||||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
}
|
||||
switch n.typ {
|
||||
case eth2NormalNode, eth2MiningNode:
|
||||
if _, err := n.api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case eth2LightClient:
|
||||
if _, err := n.lapi.ForkchoiceUpdatedV1(fcState, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return errors.New("undefined node")
|
||||
}
|
||||
}
|
||||
|
||||
type nodeManager struct {
|
||||
genesis *core.Genesis
|
||||
genesisBlock *types.Block
|
||||
nodes []*ethNode
|
||||
enodes []*enode.Node
|
||||
close chan struct{}
|
||||
}
|
||||
|
||||
func newNodeManager(genesis *core.Genesis) *nodeManager {
|
||||
return &nodeManager{
|
||||
close: make(chan struct{}),
|
||||
genesis: genesis,
|
||||
genesisBlock: genesis.ToBlock(),
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *nodeManager) createNode(typ nodetype) {
|
||||
node := newNode(typ, mgr.genesis, mgr.enodes)
|
||||
mgr.nodes = append(mgr.nodes, node)
|
||||
mgr.enodes = append(mgr.enodes, node.enode)
|
||||
}
|
||||
|
||||
func (mgr *nodeManager) getNodes(typ nodetype) []*ethNode {
|
||||
var ret []*ethNode
|
||||
for _, node := range mgr.nodes {
|
||||
if node.typ == typ {
|
||||
ret = append(ret, node)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (mgr *nodeManager) startMining() {
|
||||
for _, node := range append(mgr.getNodes(eth2MiningNode), mgr.getNodes(legacyMiningNode)...) {
|
||||
if err := node.ethBackend.StartMining(1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *nodeManager) shutdown() {
|
||||
close(mgr.close)
|
||||
for _, node := range mgr.nodes {
|
||||
node.stack.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *nodeManager) run() {
|
||||
if len(mgr.nodes) == 0 {
|
||||
return
|
||||
}
|
||||
chain := mgr.nodes[0].ethBackend.BlockChain()
|
||||
sink := make(chan core.ChainHeadEvent, 1024)
|
||||
sub := chain.SubscribeChainHeadEvent(sink)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
var (
|
||||
transitioned bool
|
||||
parentBlock *types.Block
|
||||
waitFinalise []*types.Block
|
||||
)
|
||||
timer := time.NewTimer(0)
|
||||
defer timer.Stop()
|
||||
<-timer.C // discard the initial tick
|
||||
|
||||
// Handle the by default transition.
|
||||
if transitionDifficulty.Sign() == 0 {
|
||||
transitioned = true
|
||||
parentBlock = mgr.genesisBlock
|
||||
timer.Reset(blockInterval)
|
||||
log.Info("Enable the transition by default")
|
||||
}
|
||||
|
||||
// Handle the block finalization.
|
||||
checkFinalise := func() {
|
||||
if parentBlock == nil {
|
||||
return
|
||||
}
|
||||
if len(waitFinalise) == 0 {
|
||||
return
|
||||
}
|
||||
oldest := waitFinalise[0]
|
||||
if oldest.NumberU64() > parentBlock.NumberU64() {
|
||||
return
|
||||
}
|
||||
distance := parentBlock.NumberU64() - oldest.NumberU64()
|
||||
if int(distance) < finalizationDist {
|
||||
return
|
||||
}
|
||||
nodes := mgr.getNodes(eth2MiningNode)
|
||||
nodes = append(nodes, mgr.getNodes(eth2NormalNode)...)
|
||||
//nodes = append(nodes, mgr.getNodes(eth2LightClient)...)
|
||||
for _, node := range nodes {
|
||||
fcState := engine.ForkchoiceStateV1{
|
||||
HeadBlockHash: parentBlock.Hash(),
|
||||
SafeBlockHash: oldest.Hash(),
|
||||
FinalizedBlockHash: oldest.Hash(),
|
||||
}
|
||||
node.api.ForkchoiceUpdatedV1(fcState, nil)
|
||||
}
|
||||
log.Info("Finalised eth2 block", "number", oldest.NumberU64(), "hash", oldest.Hash())
|
||||
waitFinalise = waitFinalise[1:]
|
||||
}
|
||||
|
||||
for {
|
||||
checkFinalise()
|
||||
select {
|
||||
case <-mgr.close:
|
||||
return
|
||||
|
||||
case ev := <-sink:
|
||||
if transitioned {
|
||||
continue
|
||||
}
|
||||
td := chain.GetTd(ev.Block.Hash(), ev.Block.NumberU64())
|
||||
if td.Cmp(transitionDifficulty) < 0 {
|
||||
continue
|
||||
}
|
||||
transitioned, parentBlock = true, ev.Block
|
||||
timer.Reset(blockInterval)
|
||||
log.Info("Transition difficulty reached", "td", td, "target", transitionDifficulty, "number", ev.Block.NumberU64(), "hash", ev.Block.Hash())
|
||||
|
||||
case <-timer.C:
|
||||
producers := mgr.getNodes(eth2MiningNode)
|
||||
if len(producers) == 0 {
|
||||
continue
|
||||
}
|
||||
hash, timestamp := parentBlock.Hash(), parentBlock.Time()
|
||||
if parentBlock.NumberU64() == 0 {
|
||||
timestamp = uint64(time.Now().Unix()) - uint64(blockIntervalInt)
|
||||
}
|
||||
ed, err := producers[0].assembleBlock(hash, timestamp)
|
||||
if err != nil {
|
||||
log.Error("Failed to assemble the block", "err", err)
|
||||
continue
|
||||
}
|
||||
block, _ := engine.ExecutableDataToBlock(*ed)
|
||||
|
||||
nodes := mgr.getNodes(eth2MiningNode)
|
||||
nodes = append(nodes, mgr.getNodes(eth2NormalNode)...)
|
||||
nodes = append(nodes, mgr.getNodes(eth2LightClient)...)
|
||||
for _, node := range nodes {
|
||||
if err := node.insertBlockAndSetHead(parentBlock.Header(), *ed); err != nil {
|
||||
log.Error("Failed to insert block", "type", node.typ, "err", err)
|
||||
}
|
||||
}
|
||||
log.Info("Create and insert eth2 block", "number", ed.Number)
|
||||
parentBlock = block
|
||||
waitFinalise = append(waitFinalise, block)
|
||||
timer.Reset(blockInterval)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||
fdlimit.Raise(2048)
|
||||
|
||||
// Generate a batch of accounts to seal and fund with
|
||||
faucets := make([]*ecdsa.PrivateKey, 16)
|
||||
for i := 0; i < len(faucets); i++ {
|
||||
faucets[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
// Pre-generate the ethash mining DAG so we don't race
|
||||
ethash.MakeDataset(1, filepath.Join(os.Getenv("HOME"), ".ethash"))
|
||||
|
||||
// Create an Ethash network
|
||||
genesis := makeGenesis(faucets)
|
||||
manager := newNodeManager(genesis)
|
||||
defer manager.shutdown()
|
||||
|
||||
manager.createNode(eth2NormalNode)
|
||||
manager.createNode(eth2MiningNode)
|
||||
manager.createNode(legacyMiningNode)
|
||||
manager.createNode(legacyNormalNode)
|
||||
manager.createNode(eth2LightClient)
|
||||
|
||||
// Iterate over all the nodes and start mining
|
||||
time.Sleep(3 * time.Second)
|
||||
if transitionDifficulty.Sign() != 0 {
|
||||
manager.startMining()
|
||||
}
|
||||
go manager.run()
|
||||
|
||||
// Start injecting transactions from the faucets like crazy
|
||||
time.Sleep(3 * time.Second)
|
||||
nonces := make([]uint64, len(faucets))
|
||||
for {
|
||||
// Pick a random mining node
|
||||
nodes := manager.getNodes(eth2MiningNode)
|
||||
|
||||
index := rand.Intn(len(faucets))
|
||||
node := nodes[index%len(nodes)]
|
||||
|
||||
// Create a self transaction and inject into the pool
|
||||
tx, err := types.SignTx(types.NewTransaction(nonces[index], crypto.PubkeyToAddress(faucets[index].PublicKey), new(big.Int), 21000, big.NewInt(10_000_000_000+rand.Int63n(6_553_600_000)), nil), types.HomesteadSigner{}, faucets[index])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := node.ethBackend.TxPool().AddLocal(tx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
nonces[index]++
|
||||
|
||||
// Wait if we're too saturated
|
||||
if pend, _ := node.ethBackend.TxPool().Stats(); pend > 2048 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makeGenesis creates a custom Ethash genesis block based on some pre-defined
|
||||
// faucet accounts.
|
||||
func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
|
||||
genesis := core.DefaultGenesisBlock()
|
||||
genesis.Difficulty = params.MinimumDifficulty
|
||||
genesis.GasLimit = 25000000
|
||||
|
||||
genesis.BaseFee = big.NewInt(params.InitialBaseFee)
|
||||
genesis.Config = params.AllEthashProtocolChanges
|
||||
genesis.Config.TerminalTotalDifficulty = transitionDifficulty
|
||||
|
||||
genesis.Alloc = core.GenesisAlloc{}
|
||||
for _, faucet := range faucets {
|
||||
genesis.Alloc[crypto.PubkeyToAddress(faucet.PublicKey)] = core.GenesisAccount{
|
||||
Balance: new(big.Int).Exp(big.NewInt(2), big.NewInt(128), nil),
|
||||
}
|
||||
}
|
||||
return genesis
|
||||
}
|
||||
|
||||
func makeFullNode(genesis *core.Genesis) (*node.Node, *eth.Ethereum, *ethcatalyst.ConsensusAPI, error) {
|
||||
// Define the basic configurations for the Ethereum node
|
||||
datadir, _ := os.MkdirTemp("", "")
|
||||
|
||||
config := &node.Config{
|
||||
Name: "geth",
|
||||
Version: params.Version,
|
||||
DataDir: datadir,
|
||||
P2P: p2p.Config{
|
||||
ListenAddr: "0.0.0.0:0",
|
||||
NoDiscovery: true,
|
||||
MaxPeers: 25,
|
||||
},
|
||||
UseLightweightKDF: true,
|
||||
}
|
||||
// Create the node and configure a full Ethereum node on it
|
||||
stack, err := node.New(config)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
econfig := ðconfig.Config{
|
||||
Genesis: genesis,
|
||||
NetworkId: genesis.Config.ChainID.Uint64(),
|
||||
SyncMode: downloader.FullSync,
|
||||
DatabaseCache: 256,
|
||||
DatabaseHandles: 256,
|
||||
TxPool: txpool.DefaultConfig,
|
||||
GPO: ethconfig.Defaults.GPO,
|
||||
Ethash: ethconfig.Defaults.Ethash,
|
||||
Miner: miner.Config{
|
||||
GasFloor: genesis.GasLimit * 9 / 10,
|
||||
GasCeil: genesis.GasLimit * 11 / 10,
|
||||
GasPrice: big.NewInt(1),
|
||||
Recommit: 1 * time.Second,
|
||||
},
|
||||
LightServ: 100,
|
||||
LightPeers: 10,
|
||||
LightNoSyncServe: true,
|
||||
}
|
||||
ethBackend, err := eth.New(stack, econfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
_, err = les.NewLesServer(stack, ethBackend, econfig)
|
||||
if err != nil {
|
||||
log.Crit("Failed to create the LES server", "err", err)
|
||||
}
|
||||
err = stack.Start()
|
||||
return stack, ethBackend, ethcatalyst.NewConsensusAPI(ethBackend), err
|
||||
}
|
||||
|
||||
func makeLightNode(genesis *core.Genesis) (*node.Node, *les.LightEthereum, *lescatalyst.ConsensusAPI, error) {
|
||||
// Define the basic configurations for the Ethereum node
|
||||
datadir, _ := os.MkdirTemp("", "")
|
||||
|
||||
config := &node.Config{
|
||||
Name: "geth",
|
||||
Version: params.Version,
|
||||
DataDir: datadir,
|
||||
P2P: p2p.Config{
|
||||
ListenAddr: "0.0.0.0:0",
|
||||
NoDiscovery: true,
|
||||
MaxPeers: 25,
|
||||
},
|
||||
UseLightweightKDF: true,
|
||||
}
|
||||
// Create the node and configure a full Ethereum node on it
|
||||
stack, err := node.New(config)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
lesBackend, err := les.New(stack, ðconfig.Config{
|
||||
Genesis: genesis,
|
||||
NetworkId: genesis.Config.ChainID.Uint64(),
|
||||
SyncMode: downloader.LightSync,
|
||||
DatabaseCache: 256,
|
||||
DatabaseHandles: 256,
|
||||
TxPool: txpool.DefaultConfig,
|
||||
GPO: ethconfig.Defaults.GPO,
|
||||
Ethash: ethconfig.Defaults.Ethash,
|
||||
LightPeers: 10,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
err = stack.Start()
|
||||
return stack, lesBackend, lescatalyst.NewConsensusAPI(lesBackend), err
|
||||
}
|
||||
|
||||
func eth2types(typ nodetype) bool {
|
||||
if typ == eth2LightClient || typ == eth2NormalNode || typ == eth2MiningNode {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
@ -104,7 +104,7 @@ func main() {
|
||||
// Iterate over all the nodes and start signing on them
|
||||
time.Sleep(3 * time.Second)
|
||||
for _, node := range nodes {
|
||||
if err := node.StartMining(1); err != nil {
|
||||
if err := node.StartMining(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
@ -1,194 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// This file contains a miner stress test based on the Ethash consensus engine.
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/txpool"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||
fdlimit.Raise(2048)
|
||||
|
||||
// Generate a batch of accounts to seal and fund with
|
||||
faucets := make([]*ecdsa.PrivateKey, 128)
|
||||
for i := 0; i < len(faucets); i++ {
|
||||
faucets[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
// Pre-generate the ethash mining DAG so we don't race
|
||||
ethash.MakeDataset(1, ethconfig.Defaults.Ethash.DatasetDir)
|
||||
|
||||
// Create an Ethash network
|
||||
genesis := makeGenesis(faucets)
|
||||
|
||||
// Handle interrupts.
|
||||
interruptCh := make(chan os.Signal, 5)
|
||||
signal.Notify(interruptCh, os.Interrupt)
|
||||
|
||||
var (
|
||||
stacks []*node.Node
|
||||
nodes []*eth.Ethereum
|
||||
enodes []*enode.Node
|
||||
)
|
||||
for i := 0; i < 4; i++ {
|
||||
// Start the node and wait until it's up
|
||||
stack, ethBackend, err := makeMiner(genesis)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer stack.Close()
|
||||
|
||||
for stack.Server().NodeInfo().Ports.Listener == 0 {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
// Connect the node to all the previous ones
|
||||
for _, n := range enodes {
|
||||
stack.Server().AddPeer(n)
|
||||
}
|
||||
// Start tracking the node and its enode
|
||||
stacks = append(stacks, stack)
|
||||
nodes = append(nodes, ethBackend)
|
||||
enodes = append(enodes, stack.Server().Self())
|
||||
}
|
||||
|
||||
// Iterate over all the nodes and start mining
|
||||
time.Sleep(3 * time.Second)
|
||||
for _, node := range nodes {
|
||||
if err := node.StartMining(1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Start injecting transactions from the faucets like crazy
|
||||
nonces := make([]uint64, len(faucets))
|
||||
for {
|
||||
// Stop when interrupted.
|
||||
select {
|
||||
case <-interruptCh:
|
||||
for _, node := range stacks {
|
||||
node.Close()
|
||||
}
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Pick a random mining node
|
||||
index := rand.Intn(len(faucets))
|
||||
backend := nodes[index%len(nodes)]
|
||||
|
||||
// Create a self transaction and inject into the pool
|
||||
tx, err := types.SignTx(types.NewTransaction(nonces[index], crypto.PubkeyToAddress(faucets[index].PublicKey), new(big.Int), 21000, big.NewInt(100000000000+rand.Int63n(65536)), nil), types.HomesteadSigner{}, faucets[index])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := backend.TxPool().AddLocal(tx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
nonces[index]++
|
||||
|
||||
// Wait if we're too saturated
|
||||
if pend, _ := backend.TxPool().Stats(); pend > 2048 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makeGenesis creates a custom Ethash genesis block based on some pre-defined
|
||||
// faucet accounts.
|
||||
func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
|
||||
genesis := core.DefaultGenesisBlock()
|
||||
genesis.Difficulty = params.MinimumDifficulty
|
||||
genesis.GasLimit = 25000000
|
||||
|
||||
genesis.Config.ChainID = big.NewInt(18)
|
||||
|
||||
genesis.Alloc = core.GenesisAlloc{}
|
||||
for _, faucet := range faucets {
|
||||
genesis.Alloc[crypto.PubkeyToAddress(faucet.PublicKey)] = core.GenesisAccount{
|
||||
Balance: new(big.Int).Exp(big.NewInt(2), big.NewInt(128), nil),
|
||||
}
|
||||
}
|
||||
return genesis
|
||||
}
|
||||
|
||||
func makeMiner(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) {
|
||||
// Define the basic configurations for the Ethereum node
|
||||
datadir, _ := os.MkdirTemp("", "")
|
||||
|
||||
config := &node.Config{
|
||||
Name: "geth",
|
||||
Version: params.Version,
|
||||
DataDir: datadir,
|
||||
P2P: p2p.Config{
|
||||
ListenAddr: "0.0.0.0:0",
|
||||
NoDiscovery: true,
|
||||
MaxPeers: 25,
|
||||
},
|
||||
UseLightweightKDF: true,
|
||||
}
|
||||
// Create the node and configure a full Ethereum node on it
|
||||
stack, err := node.New(config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ethBackend, err := eth.New(stack, ðconfig.Config{
|
||||
Genesis: genesis,
|
||||
NetworkId: genesis.Config.ChainID.Uint64(),
|
||||
SyncMode: downloader.FullSync,
|
||||
DatabaseCache: 256,
|
||||
DatabaseHandles: 256,
|
||||
TxPool: txpool.DefaultConfig,
|
||||
GPO: ethconfig.Defaults.GPO,
|
||||
Ethash: ethconfig.Defaults.Ethash,
|
||||
Miner: miner.Config{
|
||||
Etherbase: common.Address{1},
|
||||
GasCeil: genesis.GasLimit * 11 / 10,
|
||||
GasPrice: big.NewInt(1),
|
||||
Recommit: time.Second,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
err = stack.Start()
|
||||
return stack, ethBackend, err
|
||||
}
|
@ -208,10 +208,6 @@ func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consens
|
||||
return w, backend
|
||||
}
|
||||
|
||||
func TestGenerateBlockAndImportEthash(t *testing.T) {
|
||||
testGenerateBlockAndImport(t, false)
|
||||
}
|
||||
|
||||
func TestGenerateBlockAndImportClique(t *testing.T) {
|
||||
testGenerateBlockAndImport(t, true)
|
||||
}
|
||||
|
@ -152,7 +152,7 @@ var (
|
||||
CancunTime: nil,
|
||||
PragueTime: nil,
|
||||
TerminalTotalDifficulty: nil,
|
||||
TerminalTotalDifficultyPassed: false,
|
||||
TerminalTotalDifficultyPassed: true,
|
||||
Ethash: new(EthashConfig),
|
||||
Clique: nil,
|
||||
}
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/beacon"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
@ -117,14 +116,8 @@ func (t *BlockTest) Run(snapshotter bool) error {
|
||||
if gblock.Root() != t.json.Genesis.StateRoot {
|
||||
return fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6])
|
||||
}
|
||||
var engine consensus.Engine
|
||||
if t.json.SealEngine == "NoProof" {
|
||||
engine = ethash.NewFaker()
|
||||
} else {
|
||||
engine = ethash.NewShared()
|
||||
}
|
||||
// Wrap the original engine within the beacon-engine
|
||||
engine = beacon.New(engine)
|
||||
engine := beacon.New(ethash.NewFaker())
|
||||
|
||||
cache := &core.CacheConfig{TrieCleanLimit: 0}
|
||||
if snapshotter {
|
||||
|
Loading…
Reference in New Issue
Block a user