Initial commit of merge geth v1.11.5
This commit is contained in:
commit
e986bbfa6f
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@ -10,7 +10,7 @@ core/ @karalabe @holiman @rjl493456442
|
||||
eth/ @karalabe @holiman @rjl493456442
|
||||
eth/catalyst/ @gballet
|
||||
eth/tracers/ @s1na
|
||||
graphql/ @gballet @s1na
|
||||
graphql/ @s1na
|
||||
les/ @zsfelfoldi @rjl493456442
|
||||
light/ @zsfelfoldi @rjl493456442
|
||||
node/ @fjl
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -47,3 +47,4 @@ profile.cov
|
||||
/dashboard/assets/package-lock.json
|
||||
|
||||
**/yarn-error.log
|
||||
logs/
|
@ -34,6 +34,11 @@ import (
|
||||
|
||||
const basefeeWiggleMultiplier = 2
|
||||
|
||||
var (
|
||||
errNoEventSignature = errors.New("no event signature")
|
||||
errEventSignatureMismatch = errors.New("event signature mismatch")
|
||||
)
|
||||
|
||||
// SignerFn is a signer function callback when a contract requires a method to
|
||||
// sign the transaction before submission.
|
||||
type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error)
|
||||
@ -488,8 +493,12 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter
|
||||
|
||||
// UnpackLog unpacks a retrieved log into the provided output structure.
|
||||
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
|
||||
// Anonymous events are not supported.
|
||||
if len(log.Topics) == 0 {
|
||||
return errNoEventSignature
|
||||
}
|
||||
if log.Topics[0] != c.abi.Events[event].ID {
|
||||
return fmt.Errorf("event signature mismatch")
|
||||
return errEventSignatureMismatch
|
||||
}
|
||||
if len(log.Data) > 0 {
|
||||
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
|
||||
@ -507,8 +516,12 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log)
|
||||
|
||||
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
|
||||
func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error {
|
||||
// Anonymous events are not supported.
|
||||
if len(log.Topics) == 0 {
|
||||
return errNoEventSignature
|
||||
}
|
||||
if log.Topics[0] != c.abi.Events[event].ID {
|
||||
return fmt.Errorf("event signature mismatch")
|
||||
return errEventSignatureMismatch
|
||||
}
|
||||
if len(log.Data) > 0 {
|
||||
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
|
||||
|
@ -186,6 +186,23 @@ func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
|
||||
unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
|
||||
}
|
||||
|
||||
func TestUnpackAnonymousLogIntoMap(t *testing.T) {
|
||||
mockLog := newMockLog(nil, common.HexToHash("0x0"))
|
||||
|
||||
abiString := `[{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"received","type":"event"}]`
|
||||
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
|
||||
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
|
||||
|
||||
var received map[string]interface{}
|
||||
err := bc.UnpackLogIntoMap(received, "received", mockLog)
|
||||
if err == nil {
|
||||
t.Error("unpacking anonymous event is not supported")
|
||||
}
|
||||
if err.Error() != "no event signature" {
|
||||
t.Errorf("expected error 'no event signature', got '%s'", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
|
||||
sliceBytes, err := rlp.EncodeToBytes([]string{"name1", "name2", "name3", "name4"})
|
||||
if err != nil {
|
||||
|
@ -1,19 +1,19 @@
|
||||
# This file contains sha256 checksums of optional build dependencies.
|
||||
|
||||
4d0e2850d197b4ddad3bdb0196300179d095bb3aefd4dfbc3b36702c3728f8ab go1.20.2.src.tar.gz
|
||||
c93b8ced9517d07e1cd4c362c6e2d5242cb139e29b417a328fbf19aded08764c go1.20.2.darwin-amd64.tar.gz
|
||||
7343c87f19e79c0063532e82e1c4d6f42175a32d99f7a4d15e658e88bf97f885 go1.20.2.darwin-arm64.tar.gz
|
||||
14f9be2004e042b3a64d0facb0c020756a9084a5c7333e33b0752b393b6016ea go1.20.2.freebsd-386.tar.gz
|
||||
b41b67b4f1b56797a7cecf6ee7f47fcf4f93960b2788a3683c07dd009d30b2a4 go1.20.2.freebsd-amd64.tar.gz
|
||||
ee240ed33ae57504c41f04c12236aeaa17fbeb6ea9fcd096cd9dc7a89d10d4db go1.20.2.linux-386.tar.gz
|
||||
4eaea32f59cde4dc635fbc42161031d13e1c780b87097f4b4234cfce671f1768 go1.20.2.linux-amd64.tar.gz
|
||||
78d632915bb75e9a6356a47a42625fd1a785c83a64a643fedd8f61e31b1b3bef go1.20.2.linux-arm64.tar.gz
|
||||
d79d56bafd6b52b8d8cbe3f8e967caaac5383a23d7a4fa9ac0e89778cd16a076 go1.20.2.linux-armv6l.tar.gz
|
||||
850564ddb760cb703db63bf20182dc4407abd2ff090a95fa66d6634d172fd095 go1.20.2.linux-ppc64le.tar.gz
|
||||
8da24c5c4205fe8115f594237e5db7bcb1d23df67bc1fa9a999954b1976896e8 go1.20.2.linux-s390x.tar.gz
|
||||
31838b291117495bbb93683603e98d5118bfabd2eb318b4d07540bfd524bab86 go1.20.2.windows-386.zip
|
||||
fe439f0e438f7555a7f5f7194ddb6f4a07b0de1fa414385d19f2aeb26d9f43db go1.20.2.windows-amd64.zip
|
||||
ac5010c8b8b22849228a8dea698d58b9c7be2195d30c6d778cce0f709858fa64 go1.20.2.windows-arm64.zip
|
||||
e447b498cde50215c4f7619e5124b0fc4e25fb5d16ea47271c47f278e7aa763a go1.20.3.src.tar.gz
|
||||
c1e1161d6d859deb576e6cfabeb40e3d042ceb1c6f444f617c3c9d76269c3565 go1.20.3.darwin-amd64.tar.gz
|
||||
86b0ed0f2b2df50fa8036eea875d1cf2d76cefdacf247c44639a1464b7e36b95 go1.20.3.darwin-arm64.tar.gz
|
||||
340e80abd047c597fdc0f50a6cc59617f06c297d62f7fc77f4a0164e2da6f7aa go1.20.3.freebsd-386.tar.gz
|
||||
2169fcd8b6c94c5fbe07c0b470ccfb6001d343f6548ad49f3d9ab78e3b5753c7 go1.20.3.freebsd-amd64.tar.gz
|
||||
e12384311403f1389d14cc1c1295bfb4e0dd5ab919403b80da429f671a223507 go1.20.3.linux-386.tar.gz
|
||||
979694c2c25c735755bf26f4f45e19e64e4811d661dd07b8c010f7a8e18adfca go1.20.3.linux-amd64.tar.gz
|
||||
eb186529f13f901e7a2c4438a05c2cd90d74706aaa0a888469b2a4a617b6ee54 go1.20.3.linux-arm64.tar.gz
|
||||
b421e90469a83671641f81b6e20df6500f033e9523e89cbe7b7223704dd1035c go1.20.3.linux-armv6l.tar.gz
|
||||
943c89aa1624ea544a022b31e3d6e16a037200e436370bdd5fd67f3fa60be282 go1.20.3.linux-ppc64le.tar.gz
|
||||
126cf823a5634ef2544b866db107b9d351d3ea70d9e240b0bdcfb46f4dcae54b go1.20.3.linux-s390x.tar.gz
|
||||
37e9146e1f9d681cfcaa6fee6c7b890c44c64bc50228c9588f3c4231346d33bd go1.20.3.windows-386.zip
|
||||
143a2837821c7dbacf7744cbb1a8421c1f48307c6fdfaeffc5f8c2f69e1b7932 go1.20.3.windows-amd64.zip
|
||||
158cb159e00bc979f473e0f5b5a561613129c5e51067967b72b8e072e5a4db81 go1.20.3.windows-arm64.zip
|
||||
|
||||
fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz
|
||||
75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz
|
||||
|
@ -139,7 +139,7 @@ var (
|
||||
// This is the version of Go that will be downloaded by
|
||||
//
|
||||
// go run ci.go install -dlgo
|
||||
dlgoVersion = "1.20.2"
|
||||
dlgoVersion = "1.20.3"
|
||||
|
||||
// This is the version of Go that will be used to bootstrap the PPA builder.
|
||||
//
|
||||
@ -465,10 +465,6 @@ func maybeSkipArchive(env build.Environment) {
|
||||
log.Printf("skipping archive creation because this is a PR build")
|
||||
os.Exit(0)
|
||||
}
|
||||
if env.IsCronJob {
|
||||
log.Printf("skipping archive creation because this is a cron job")
|
||||
os.Exit(0)
|
||||
}
|
||||
if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") {
|
||||
log.Printf("skipping archive creation because branch %q, tag %q is not on the inclusion list", env.Branch, env.Tag)
|
||||
os.Exit(0)
|
||||
|
@ -141,7 +141,7 @@ loop:
|
||||
"added", atomic.LoadUint64(&added),
|
||||
"updated", atomic.LoadUint64(&updated),
|
||||
"removed", atomic.LoadUint64(&removed),
|
||||
"ignored(recent)", atomic.LoadUint64(&removed),
|
||||
"ignored(recent)", atomic.LoadUint64(&recent),
|
||||
"ignored(incompatible)", atomic.LoadUint64(&skipped))
|
||||
}
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
|
||||
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record)
|
||||
} else if old.Content != val {
|
||||
// Entry already exists, only change its content.
|
||||
log.Debug(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
|
||||
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
|
||||
updated++
|
||||
old.Content = val
|
||||
err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old)
|
||||
|
@ -221,7 +221,13 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e
|
||||
}
|
||||
records = lrecords
|
||||
|
||||
var changes []types.Change
|
||||
var (
|
||||
changes []types.Change
|
||||
inserts int
|
||||
upserts int
|
||||
skips int
|
||||
)
|
||||
|
||||
for path, newValue := range records {
|
||||
prevRecords, exists := existing[path]
|
||||
prevValue := strings.Join(prevRecords.values, "")
|
||||
@ -237,20 +243,30 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e
|
||||
|
||||
if !exists {
|
||||
// Entry is unknown, push a new one
|
||||
log.Info(fmt.Sprintf("Creating %s = %s", path, newValue))
|
||||
log.Debug(fmt.Sprintf("Creating %s = %s", path, newValue))
|
||||
changes = append(changes, newTXTChange("CREATE", path, ttl, newValue))
|
||||
inserts++
|
||||
} else if prevValue != newValue || prevRecords.ttl != ttl {
|
||||
// Entry already exists, only change its content.
|
||||
log.Info(fmt.Sprintf("Updating %s from %s to %s", path, prevValue, newValue))
|
||||
changes = append(changes, newTXTChange("UPSERT", path, ttl, newValue))
|
||||
upserts++
|
||||
} else {
|
||||
log.Debug(fmt.Sprintf("Skipping %s = %s", path, newValue))
|
||||
skips++
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over the old records and delete anything stale.
|
||||
changes = append(changes, makeDeletionChanges(existing, records)...)
|
||||
deletions := makeDeletionChanges(existing, records)
|
||||
changes = append(changes, deletions...)
|
||||
|
||||
log.Info("Computed DNS changes",
|
||||
"changes", len(changes),
|
||||
"inserts", inserts,
|
||||
"skips", skips,
|
||||
"deleted", len(deletions),
|
||||
"upserts", upserts)
|
||||
// Ensure changes are in the correct order.
|
||||
sortChanges(changes)
|
||||
return changes
|
||||
@ -263,7 +279,7 @@ func makeDeletionChanges(records map[string]recordSet, keep map[string]string) [
|
||||
if _, ok := keep[path]; ok {
|
||||
continue
|
||||
}
|
||||
log.Info(fmt.Sprintf("Deleting %s = %s", path, strings.Join(set.values, "")))
|
||||
log.Debug(fmt.Sprintf("Deleting %s = %s", path, strings.Join(set.values, "")))
|
||||
changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values...))
|
||||
}
|
||||
return changes
|
||||
|
@ -174,7 +174,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||
return nil, nil, err
|
||||
}
|
||||
vmConfig.Tracer = tracer
|
||||
vmConfig.Debug = (tracer != nil)
|
||||
statedb.SetTxContext(tx.Hash(), txIndex)
|
||||
|
||||
var (
|
||||
|
@ -180,7 +180,6 @@ func Transition(ctx *cli.Context) error {
|
||||
|
||||
vmConfig := vm.Config{
|
||||
Tracer: tracer,
|
||||
Debug: (tracer != nil),
|
||||
}
|
||||
// Construct the chainconfig
|
||||
var chainConfig *params.ChainConfig
|
||||
@ -250,9 +249,9 @@ func Transition(ctx *cli.Context) error {
|
||||
if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
|
||||
if prestate.Env.BaseFee != nil {
|
||||
// Already set, base fee has precedent over parent base fee.
|
||||
} else if prestate.Env.ParentBaseFee != nil {
|
||||
} else if prestate.Env.ParentBaseFee != nil && prestate.Env.Number != 0 {
|
||||
parent := &types.Header{
|
||||
Number: new(big.Int).SetUint64(prestate.Env.Number),
|
||||
Number: new(big.Int).SetUint64(prestate.Env.Number - 1),
|
||||
BaseFee: prestate.Env.ParentBaseFee,
|
||||
GasUsed: prestate.Env.ParentGasUsed,
|
||||
GasLimit: prestate.Env.ParentGasLimit,
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@ -125,6 +126,7 @@ func runCmd(ctx *cli.Context) error {
|
||||
sender = common.BytesToAddress([]byte("sender"))
|
||||
receiver = common.BytesToAddress([]byte("receiver"))
|
||||
genesisConfig *core.Genesis
|
||||
preimages = ctx.Bool(DumpFlag.Name)
|
||||
)
|
||||
if ctx.Bool(MachineFlag.Name) {
|
||||
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
|
||||
@ -139,10 +141,12 @@ func runCmd(ctx *cli.Context) error {
|
||||
genesisConfig = gen
|
||||
db := rawdb.NewMemoryDatabase()
|
||||
genesis := gen.MustCommit(db)
|
||||
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)
|
||||
sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: preimages})
|
||||
statedb, _ = state.New(genesis.Root(), sdb, nil)
|
||||
chainConfig = gen.Config
|
||||
} else {
|
||||
statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||
sdb := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: preimages})
|
||||
statedb, _ = state.New(common.Hash{}, sdb, nil)
|
||||
genesisConfig = new(core.Genesis)
|
||||
}
|
||||
if ctx.String(SenderFlag.Name) != "" {
|
||||
@ -214,7 +218,6 @@ func runCmd(ctx *cli.Context) error {
|
||||
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
|
||||
EVMConfig: vm.Config{
|
||||
Tracer: tracer,
|
||||
Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,6 @@ func stateTestCmd(ctx *cli.Context) error {
|
||||
// Iterate over all the tests, run them and aggregate the results
|
||||
cfg := vm.Config{
|
||||
Tracer: tracer,
|
||||
Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name),
|
||||
}
|
||||
results := make([]StatetestResult, 0, len(tests))
|
||||
for key, test := range tests {
|
||||
|
@ -301,7 +301,11 @@ func accountUpdate(ctx *cli.Context) error {
|
||||
utils.Fatalf("No accounts specified to update")
|
||||
}
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
backends := stack.AccountManager().Backends(keystore.KeyStoreType)
|
||||
if len(backends) == 0 {
|
||||
utils.Fatalf("Keystore is not available")
|
||||
}
|
||||
ks := backends[0].(*keystore.KeyStore)
|
||||
|
||||
for _, addr := range ctx.Args().Slice() {
|
||||
account, oldPassword := unlockAccount(ks, addr, 0, nil)
|
||||
@ -326,7 +330,11 @@ func importWallet(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
passphrase := utils.GetPassPhraseWithList("", false, 0, utils.MakePasswordList(ctx))
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
backends := stack.AccountManager().Backends(keystore.KeyStoreType)
|
||||
if len(backends) == 0 {
|
||||
utils.Fatalf("Keystore is not available")
|
||||
}
|
||||
ks := backends[0].(*keystore.KeyStore)
|
||||
acct, err := ks.ImportPreSaleKey(keyJSON, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
@ -347,7 +355,11 @@ func accountImport(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
passphrase := utils.GetPassPhraseWithList("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
backends := stack.AccountManager().Backends(keystore.KeyStoreType)
|
||||
if len(backends) == 0 {
|
||||
utils.Fatalf("Keystore is not available")
|
||||
}
|
||||
ks := backends[0].(*keystore.KeyStore)
|
||||
acct, err := ks.ImportECDSA(key, passphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not create the account: %v", err)
|
||||
|
@ -480,7 +480,12 @@ func unlockAccounts(ctx *cli.Context, stack *node.Node) {
|
||||
if !stack.Config().InsecureUnlockAllowed && stack.Config().ExtRPCEnabled() {
|
||||
utils.Fatalf("Account unlock with HTTP access is forbidden!")
|
||||
}
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
backends := stack.AccountManager().Backends(keystore.KeyStoreType)
|
||||
if len(backends) == 0 {
|
||||
log.Warn("Failed to unlock accounts, keystore is not available")
|
||||
return
|
||||
}
|
||||
ks := backends[0].(*keystore.KeyStore)
|
||||
passwords := utils.MakePasswordList(ctx)
|
||||
for i, account := range unlocks {
|
||||
unlockAccount(ks, account, i, passwords)
|
||||
|
@ -17,19 +17,10 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// MakeName creates a node name that follows the ethereum convention
|
||||
// for such names. It adds the operation system name and Go runtime version
|
||||
// the name.
|
||||
func MakeName(name, version string) string {
|
||||
return fmt.Sprintf("%s/v%s/%s/%s", name, version, runtime.GOOS, runtime.Version())
|
||||
}
|
||||
|
||||
// FileExist checks if a file exists at filePath.
|
||||
func FileExist(filePath string) bool {
|
||||
_, err := os.Stat(filePath)
|
||||
|
@ -400,7 +400,7 @@ func (ma *MixedcaseAddress) UnmarshalJSON(input []byte) error {
|
||||
}
|
||||
|
||||
// MarshalJSON marshals the original value
|
||||
func (ma *MixedcaseAddress) MarshalJSON() ([]byte, error) {
|
||||
func (ma MixedcaseAddress) MarshalJSON() ([]byte, error) {
|
||||
if strings.HasPrefix(ma.original, "0x") || strings.HasPrefix(ma.original, "0X") {
|
||||
return json.Marshal(fmt.Sprintf("0x%s", ma.original[2:]))
|
||||
}
|
||||
|
@ -154,6 +154,31 @@ func BenchmarkAddressHex(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test checks if the customized json marshaller of MixedcaseAddress object
|
||||
// is invoked correctly. In golang the struct pointer will inherit the
|
||||
// non-pointer receiver methods, the reverse is not true. In the case of
|
||||
// MixedcaseAddress, it must define the MarshalJSON method in the object
|
||||
// but not the pointer level, so that this customized marshalled can be used
|
||||
// for both MixedcaseAddress object and pointer.
|
||||
func TestMixedcaseAddressMarshal(t *testing.T) {
|
||||
var (
|
||||
output string
|
||||
input = "0xae967917c465db8578ca9024c205720b1a3651A9"
|
||||
)
|
||||
addr, err := NewMixedcaseAddressFromString(input)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blob, err := json.Marshal(*addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
json.Unmarshal(blob, &output)
|
||||
if output != input {
|
||||
t.Fatal("Failed to marshal/unmarshal MixedcaseAddress object")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMixedcaseAccount_Address(t *testing.T) {
|
||||
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md
|
||||
// Note: 0X{checksum_addr} is not valid according to spec above
|
||||
|
@ -263,11 +263,19 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
|
||||
// Verify existence / non-existence of withdrawalsHash.
|
||||
shanghai := chain.Config().IsShanghai(header.Time)
|
||||
if shanghai && header.WithdrawalsHash == nil {
|
||||
return fmt.Errorf("missing withdrawalsHash")
|
||||
return errors.New("missing withdrawalsHash")
|
||||
}
|
||||
if !shanghai && header.WithdrawalsHash != nil {
|
||||
return fmt.Errorf("invalid withdrawalsHash: have %x, expected nil", header.WithdrawalsHash)
|
||||
}
|
||||
// Verify the existence / non-existence of excessDataGas
|
||||
cancun := chain.Config().IsCancun(header.Time)
|
||||
if cancun && header.ExcessDataGas == nil {
|
||||
return errors.New("missing excessDataGas")
|
||||
}
|
||||
if !cancun && header.ExcessDataGas != nil {
|
||||
return fmt.Errorf("invalid excessDataGas: have %d, expected nil", header.ExcessDataGas)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -301,9 +301,8 @@ func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
|
||||
if chain.Config().IsShanghai(header.Time) {
|
||||
return fmt.Errorf("clique does not support shanghai fork")
|
||||
}
|
||||
// If all checks passed, validate any special fields for hard forks
|
||||
if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil {
|
||||
return err
|
||||
if chain.Config().IsCancun(header.Time) {
|
||||
return fmt.Errorf("clique does not support cancun fork")
|
||||
}
|
||||
// All basic checks passed, verify cascading fields
|
||||
return c.verifyCascadingFields(chain, header, parents)
|
||||
|
@ -163,7 +163,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
||||
rows := int(size) / hashBytes
|
||||
|
||||
// Start a monitoring goroutine to report progress on low end devices
|
||||
var progress uint32
|
||||
var progress atomic.Uint32
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
@ -174,7 +174,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
||||
case <-done:
|
||||
return
|
||||
case <-time.After(3 * time.Second):
|
||||
logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
logger.Info("Generating ethash verification cache", "percentage", progress.Load()*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -185,7 +185,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
||||
keccak512(cache, seed)
|
||||
for offset := uint64(hashBytes); offset < size; offset += hashBytes {
|
||||
keccak512(cache[offset:], cache[offset-hashBytes:offset])
|
||||
atomic.AddUint32(&progress, 1)
|
||||
progress.Add(1)
|
||||
}
|
||||
// Use a low-round version of randmemohash
|
||||
temp := make([]byte, hashBytes)
|
||||
@ -200,7 +200,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
||||
bitutil.XORBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
|
||||
keccak512(cache[dstOff:], temp)
|
||||
|
||||
atomic.AddUint32(&progress, 1)
|
||||
progress.Add(1)
|
||||
}
|
||||
}
|
||||
// Swap the byte order on big endian systems and return
|
||||
@ -299,7 +299,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
||||
var pend sync.WaitGroup
|
||||
pend.Add(threads)
|
||||
|
||||
var progress uint64
|
||||
var progress atomic.Uint64
|
||||
for i := 0; i < threads; i++ {
|
||||
go func(id int) {
|
||||
defer pend.Done()
|
||||
@ -323,7 +323,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
||||
}
|
||||
copy(dataset[index*hashBytes:], item)
|
||||
|
||||
if status := atomic.AddUint64(&progress, 1); status%percent == 0 {
|
||||
if status := progress.Add(1); status%percent == 0 {
|
||||
logger.Info("Generating DAG in progress", "percentage", (status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
}
|
||||
}
|
||||
|
@ -313,6 +313,9 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa
|
||||
if chain.Config().IsShanghai(header.Time) {
|
||||
return fmt.Errorf("ethash does not support shanghai fork")
|
||||
}
|
||||
if chain.Config().IsCancun(header.Time) {
|
||||
return fmt.Errorf("ethash does not support cancun fork")
|
||||
}
|
||||
// Verify the engine specific seal securing the block
|
||||
if seal {
|
||||
if err := ethash.verifySeal(chain, header, false); err != nil {
|
||||
@ -323,9 +326,6 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa
|
||||
if err := misc.VerifyDAOHeaderExtraData(chain.Config(), header); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := misc.VerifyForkHashes(chain.Config(), header, uncle); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -313,7 +313,7 @@ type dataset struct {
|
||||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
||||
dataset []uint32 // The actual cache data content
|
||||
once sync.Once // Ensures the cache is generated only once
|
||||
done uint32 // Atomic flag to determine generation status
|
||||
done atomic.Bool // Atomic flag to determine generation status
|
||||
}
|
||||
|
||||
// newDataset creates a new ethash mining dataset and returns it as a plain Go
|
||||
@ -326,7 +326,7 @@ func newDataset(epoch uint64) *dataset {
|
||||
func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
|
||||
d.once.Do(func() {
|
||||
// Mark the dataset generated after we're done. This is needed for remote
|
||||
defer atomic.StoreUint32(&d.done, 1)
|
||||
defer d.done.Store(true)
|
||||
|
||||
csize := cacheSize(d.epoch*epochLength + 1)
|
||||
dsize := datasetSize(d.epoch*epochLength + 1)
|
||||
@ -390,7 +390,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
|
||||
// or not (it may not have been started at all). This is useful for remote miners
|
||||
// to default to verification caches instead of blocking on DAG generations.
|
||||
func (d *dataset) generated() bool {
|
||||
return atomic.LoadUint32(&d.done) == 1
|
||||
return d.done.Load()
|
||||
}
|
||||
|
||||
// finalizer closes any file handlers and memory maps open.
|
||||
|
@ -34,7 +34,6 @@ func copyConfig(original *params.ChainConfig) *params.ChainConfig {
|
||||
DAOForkBlock: original.DAOForkBlock,
|
||||
DAOForkSupport: original.DAOForkSupport,
|
||||
EIP150Block: original.EIP150Block,
|
||||
EIP150Hash: original.EIP150Hash,
|
||||
EIP155Block: original.EIP155Block,
|
||||
EIP158Block: original.EIP158Block,
|
||||
ByzantiumBlock: original.ByzantiumBlock,
|
||||
|
54
consensus/misc/eip4844.go
Normal file
54
consensus/misc/eip4844.go
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package misc
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
var (
|
||||
minDataGasPrice = big.NewInt(params.BlobTxMinDataGasprice)
|
||||
dataGaspriceUpdateFraction = big.NewInt(params.BlobTxDataGaspriceUpdateFraction)
|
||||
)
|
||||
|
||||
// CalcBlobFee calculates the blobfee from the header's excess data gas field.
|
||||
func CalcBlobFee(excessDataGas *big.Int) *big.Int {
|
||||
// If this block does not yet have EIP-4844 enabled, return the starting fee
|
||||
if excessDataGas == nil {
|
||||
return big.NewInt(params.BlobTxMinDataGasprice)
|
||||
}
|
||||
return fakeExponential(minDataGasPrice, excessDataGas, dataGaspriceUpdateFraction)
|
||||
}
|
||||
|
||||
// fakeExponential approximates factor * e ** (numerator / denominator) using
|
||||
// Taylor expansion.
|
||||
func fakeExponential(factor, numerator, denominator *big.Int) *big.Int {
|
||||
var (
|
||||
output = new(big.Int)
|
||||
accum = new(big.Int).Mul(factor, denominator)
|
||||
)
|
||||
for i := 1; accum.Sign() > 0; i++ {
|
||||
output.Add(output, accum)
|
||||
|
||||
accum.Mul(accum, numerator)
|
||||
accum.Div(accum, denominator)
|
||||
accum.Div(accum, big.NewInt(int64(i)))
|
||||
}
|
||||
return output.Div(output, denominator)
|
||||
}
|
85
consensus/misc/eip4844_test.go
Normal file
85
consensus/misc/eip4844_test.go
Normal file
@ -0,0 +1,85 @@
|
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package misc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
func TestCalcBlobFee(t *testing.T) {
|
||||
tests := []struct {
|
||||
excessDataGas int64
|
||||
blobfee int64
|
||||
}{
|
||||
{0, 1},
|
||||
{1542706, 1},
|
||||
{1542707, 2},
|
||||
{10 * 1024 * 1024, 111},
|
||||
}
|
||||
have := CalcBlobFee(nil)
|
||||
if have.Int64() != params.BlobTxMinDataGasprice {
|
||||
t.Errorf("nil test: blobfee mismatch: have %v, want %v", have, params.BlobTxMinDataGasprice)
|
||||
}
|
||||
for i, tt := range tests {
|
||||
have := CalcBlobFee(big.NewInt(tt.excessDataGas))
|
||||
if have.Int64() != tt.blobfee {
|
||||
t.Errorf("test %d: blobfee mismatch: have %v want %v", i, have, tt.blobfee)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFakeExponential(t *testing.T) {
|
||||
tests := []struct {
|
||||
factor int64
|
||||
numerator int64
|
||||
denominator int64
|
||||
want int64
|
||||
}{
|
||||
// When numerator == 0 the return value should always equal the value of factor
|
||||
{1, 0, 1, 1},
|
||||
{38493, 0, 1000, 38493},
|
||||
{0, 1234, 2345, 0}, // should be 0
|
||||
{1, 2, 1, 6}, // approximate 7.389
|
||||
{1, 4, 2, 6},
|
||||
{1, 3, 1, 16}, // approximate 20.09
|
||||
{1, 6, 2, 18},
|
||||
{1, 4, 1, 49}, // approximate 54.60
|
||||
{1, 8, 2, 50},
|
||||
{10, 8, 2, 542}, // approximate 540.598
|
||||
{11, 8, 2, 596}, // approximate 600.58
|
||||
{1, 5, 1, 136}, // approximate 148.4
|
||||
{1, 5, 2, 11}, // approximate 12.18
|
||||
{2, 5, 2, 23}, // approximate 24.36
|
||||
{1, 50000000, 2225652, 5709098764},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
f, n, d := big.NewInt(tt.factor), big.NewInt(tt.numerator), big.NewInt(tt.denominator)
|
||||
original := fmt.Sprintf("%d %d %d", f, n, d)
|
||||
have := fakeExponential(f, n, d)
|
||||
if have.Int64() != tt.want {
|
||||
t.Errorf("test %d: fake exponential mismatch: have %v want %v", i, have, tt.want)
|
||||
}
|
||||
later := fmt.Sprintf("%d %d %d", f, n, d)
|
||||
if original != later {
|
||||
t.Errorf("test %d: fake exponential modified arguments: have\n%v\nwant\n%v", i, later, original)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package misc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// VerifyForkHashes verifies that blocks conforming to network hard-forks do have
|
||||
// the correct hashes, to avoid clients going off on different chains. This is an
|
||||
// optional feature.
|
||||
func VerifyForkHashes(config *params.ChainConfig, header *types.Header, uncle bool) error {
|
||||
// We don't care about uncles
|
||||
if uncle {
|
||||
return nil
|
||||
}
|
||||
// If the homestead reprice hash is set, validate it
|
||||
if config.EIP150Block != nil && config.EIP150Block.Cmp(header.Number) == 0 {
|
||||
if config.EIP150Hash != (common.Hash{}) && config.EIP150Hash != header.Hash() {
|
||||
return fmt.Errorf("homestead gas reprice fork: have %#x, want %#x", header.Hash(), config.EIP150Hash)
|
||||
}
|
||||
}
|
||||
// All ok, return
|
||||
return nil
|
||||
}
|
@ -174,7 +174,7 @@ type BlockChain struct {
|
||||
triegc *prque.Prque[int64, common.Hash] // Priority queue mapping block numbers to tries to gc
|
||||
gcproc time.Duration // Accumulates canonical block processing for trie dumping
|
||||
lastWrite uint64 // Last block when the state was flushed
|
||||
flushInterval int64 // Time interval (processing time) after which to flush a state
|
||||
flushInterval atomic.Int64 // Time interval (processing time) after which to flush a state
|
||||
triedb *trie.Database // The database handler for maintaining trie nodes.
|
||||
stateCache state.Database // State database to reuse between imports (contains state cache)
|
||||
|
||||
@ -215,8 +215,8 @@ type BlockChain struct {
|
||||
|
||||
wg sync.WaitGroup //
|
||||
quit chan struct{} // shutdown signal, closed in Stop.
|
||||
running int32 // 0 if chain is running, 1 when stopped
|
||||
procInterrupt int32 // interrupt signaler for block processing
|
||||
stopping atomic.Bool // false if chain is running, true when stopped
|
||||
procInterrupt atomic.Bool // interrupt signaler for block processing
|
||||
|
||||
engine consensus.Engine
|
||||
validator Validator // Block and state validator interface
|
||||
@ -233,7 +233,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
||||
if cacheConfig == nil {
|
||||
cacheConfig = defaultCacheConfig
|
||||
}
|
||||
|
||||
// Open trie database with provided config
|
||||
triedb := trie.NewDatabaseWithConfig(db, &trie.Config{
|
||||
Cache: cacheConfig.TrieCleanLimit,
|
||||
@ -260,7 +259,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
||||
cacheConfig: cacheConfig,
|
||||
db: db,
|
||||
triedb: triedb,
|
||||
flushInterval: int64(cacheConfig.TrieTimeLimit),
|
||||
triegc: prque.New[int64, common.Hash](nil),
|
||||
quit: make(chan struct{}),
|
||||
chainmu: syncx.NewClosableMutex(),
|
||||
@ -273,6 +271,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
||||
engine: engine,
|
||||
vmConfig: vmConfig,
|
||||
}
|
||||
bc.flushInterval.Store(int64(cacheConfig.TrieTimeLimit))
|
||||
bc.forker = NewForkChoice(bc, shouldPreserve)
|
||||
bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb)
|
||||
bc.validator = NewBlockValidator(chainConfig, bc, engine)
|
||||
@ -909,14 +908,14 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) {
|
||||
headBlockGauge.Update(int64(block.NumberU64()))
|
||||
}
|
||||
|
||||
// stop stops the blockchain service. If any imports are currently in progress
|
||||
// stopWithoutSaving stops the blockchain service. If any imports are currently in progress
|
||||
// it will abort them using the procInterrupt. This method stops all running
|
||||
// goroutines, but does not do all the post-stop work of persisting data.
|
||||
// OBS! It is generally recommended to use the Stop method!
|
||||
// This method has been exposed to allow tests to stop the blockchain while simulating
|
||||
// a crash.
|
||||
func (bc *BlockChain) stopWithoutSaving() {
|
||||
if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
|
||||
if !bc.stopping.CompareAndSwap(false, true) {
|
||||
return
|
||||
}
|
||||
|
||||
@ -998,12 +997,12 @@ func (bc *BlockChain) Stop() {
|
||||
// errInsertionInterrupted as soon as possible. Insertion is permanently disabled after
|
||||
// calling this method.
|
||||
func (bc *BlockChain) StopInsert() {
|
||||
atomic.StoreInt32(&bc.procInterrupt, 1)
|
||||
bc.procInterrupt.Store(true)
|
||||
}
|
||||
|
||||
// insertStopped returns true after StopInsert has been called.
|
||||
func (bc *BlockChain) insertStopped() bool {
|
||||
return atomic.LoadInt32(&bc.procInterrupt) == 1
|
||||
return bc.procInterrupt.Load()
|
||||
}
|
||||
|
||||
func (bc *BlockChain) procFutureBlocks() {
|
||||
@ -1382,7 +1381,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
|
||||
}
|
||||
// Find the next state trie we need to commit
|
||||
chosen := current - TriesInMemory
|
||||
flushInterval := time.Duration(atomic.LoadInt64(&bc.flushInterval))
|
||||
flushInterval := time.Duration(bc.flushInterval.Load())
|
||||
// If we exceeded time allowance, flush an entire trie to disk
|
||||
|
||||
// begin PluGeth code injection
|
||||
@ -1752,68 +1751,69 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
|
||||
|
||||
// If we have a followup block, run that against the current state to pre-cache
|
||||
// transactions and probabilistically some of the account/storage trie nodes.
|
||||
var followupInterrupt uint32
|
||||
var followupInterrupt atomic.Bool
|
||||
if !bc.cacheConfig.TrieCleanNoPrefetch {
|
||||
if followup, err := it.peek(); followup != nil && err == nil {
|
||||
throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps)
|
||||
|
||||
go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) {
|
||||
go func(start time.Time, followup *types.Block, throwaway *state.StateDB) {
|
||||
bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
|
||||
|
||||
blockPrefetchExecuteTimer.Update(time.Since(start))
|
||||
if atomic.LoadUint32(interrupt) == 1 {
|
||||
if followupInterrupt.Load() {
|
||||
blockPrefetchInterruptMeter.Mark(1)
|
||||
}
|
||||
}(time.Now(), followup, throwaway, &followupInterrupt)
|
||||
}(time.Now(), followup, throwaway)
|
||||
}
|
||||
}
|
||||
|
||||
// Process block using the parent state as reference point
|
||||
substart := time.Now()
|
||||
pstart := time.Now()
|
||||
receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
|
||||
if err != nil {
|
||||
bc.reportBlock(block, receipts, err)
|
||||
atomic.StoreUint32(&followupInterrupt, 1)
|
||||
followupInterrupt.Store(true)
|
||||
return it.index, err
|
||||
}
|
||||
ptime := time.Since(pstart)
|
||||
|
||||
// Update the metrics touched during block processing
|
||||
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them
|
||||
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them
|
||||
accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them
|
||||
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
|
||||
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them
|
||||
snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them
|
||||
triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
|
||||
trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates
|
||||
trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates
|
||||
|
||||
blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
|
||||
|
||||
// Validate the state using the default validator
|
||||
substart = time.Now()
|
||||
vstart := time.Now()
|
||||
if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
|
||||
bc.reportBlock(block, receipts, err)
|
||||
atomic.StoreUint32(&followupInterrupt, 1)
|
||||
followupInterrupt.Store(true)
|
||||
return it.index, err
|
||||
}
|
||||
proctime := time.Since(start)
|
||||
vtime := time.Since(vstart)
|
||||
proctime := time.Since(start) // processing + validation
|
||||
|
||||
// Update the metrics touched during block validation
|
||||
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
|
||||
storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
|
||||
blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
|
||||
// Update the metrics touched during block processing and validation
|
||||
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
|
||||
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
|
||||
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete(in processing)
|
||||
snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete(in processing)
|
||||
accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
|
||||
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
|
||||
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
|
||||
storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete(in validation)
|
||||
triehash := statedb.AccountHashes + statedb.StorageHashes // The time spent on tries hashing
|
||||
trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
|
||||
trieRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read
|
||||
trieRead += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read
|
||||
blockExecutionTimer.Update(ptime - trieRead) // The time spent on EVM processing
|
||||
blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
|
||||
|
||||
// Write the block to the chain and get the status.
|
||||
substart = time.Now()
|
||||
var status WriteStatus
|
||||
var (
|
||||
wstart = time.Now()
|
||||
status WriteStatus
|
||||
)
|
||||
if !setHead {
|
||||
// Don't set the head, only insert the block
|
||||
err = bc.writeBlockWithState(block, receipts, statedb)
|
||||
} else {
|
||||
status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false)
|
||||
}
|
||||
atomic.StoreUint32(&followupInterrupt, 1)
|
||||
followupInterrupt.Store(true)
|
||||
if err != nil {
|
||||
return it.index, err
|
||||
}
|
||||
@ -1821,9 +1821,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
|
||||
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
|
||||
storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
|
||||
snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
|
||||
triedbCommitTimer.Update(statedb.TrieDBCommits) // Triedb commits are complete, we can mark them
|
||||
triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them
|
||||
|
||||
blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits)
|
||||
blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits)
|
||||
blockInsertTimer.UpdateSince(start)
|
||||
|
||||
// Report the import stats before returning the various results
|
||||
@ -2524,5 +2524,5 @@ func (bc *BlockChain) SetBlockValidatorAndProcessorForTesting(v Validator, p Pro
|
||||
// The interval is in terms of block processing time, not wall clock.
|
||||
// It is thread-safe and can be called repeatedly without side effects.
|
||||
func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
|
||||
atomic.StoreInt64(&bc.flushInterval, int64(interval))
|
||||
bc.flushInterval.Store(int64(interval))
|
||||
}
|
||||
|
@ -3024,7 +3024,6 @@ func TestDeleteRecreateSlots(t *testing.T) {
|
||||
})
|
||||
// Import the canonical chain
|
||||
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
|
||||
Debug: true,
|
||||
Tracer: logger.NewJSONLogger(nil, os.Stdout),
|
||||
}, nil, nil)
|
||||
if err != nil {
|
||||
@ -3102,7 +3101,6 @@ func TestDeleteRecreateAccount(t *testing.T) {
|
||||
})
|
||||
// Import the canonical chain
|
||||
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
|
||||
Debug: true,
|
||||
Tracer: logger.NewJSONLogger(nil, os.Stdout),
|
||||
}, nil, nil)
|
||||
if err != nil {
|
||||
|
@ -83,7 +83,7 @@ type Matcher struct {
|
||||
retrievals chan chan *Retrieval // Retriever processes waiting for task allocations
|
||||
deliveries chan *Retrieval // Retriever processes waiting for task response deliveries
|
||||
|
||||
running uint32 // Atomic flag whether a session is live or not
|
||||
running atomic.Bool // Atomic flag whether a session is live or not
|
||||
}
|
||||
|
||||
// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing
|
||||
@ -146,10 +146,10 @@ func (m *Matcher) addScheduler(idx uint) {
|
||||
// channel is closed.
|
||||
func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uint64) (*MatcherSession, error) {
|
||||
// Make sure we're not creating concurrent sessions
|
||||
if atomic.SwapUint32(&m.running, 1) == 1 {
|
||||
if m.running.Swap(true) {
|
||||
return nil, errors.New("matcher already running")
|
||||
}
|
||||
defer atomic.StoreUint32(&m.running, 0)
|
||||
defer m.running.Store(false)
|
||||
|
||||
// Initiate a new matching round
|
||||
session := &MatcherSession{
|
||||
|
@ -160,7 +160,7 @@ func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, in
|
||||
}
|
||||
}
|
||||
// Track the number of retrieval requests made
|
||||
var requested uint32
|
||||
var requested atomic.Uint32
|
||||
|
||||
// Start the matching session for the filter and the retriever goroutines
|
||||
quit := make(chan struct{})
|
||||
@ -208,15 +208,15 @@ func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, in
|
||||
session.Close()
|
||||
close(quit)
|
||||
|
||||
if retrievals != 0 && requested != retrievals {
|
||||
t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested, retrievals)
|
||||
if retrievals != 0 && requested.Load() != retrievals {
|
||||
t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested.Load(), retrievals)
|
||||
}
|
||||
return requested
|
||||
return requested.Load()
|
||||
}
|
||||
|
||||
// startRetrievers starts a batch of goroutines listening for section requests
|
||||
// and serving them.
|
||||
func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *uint32, batch int) {
|
||||
func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *atomic.Uint32, batch int) {
|
||||
requests := make(chan chan *Retrieval)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
@ -238,7 +238,7 @@ func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *ui
|
||||
for i, section := range task.Sections {
|
||||
if rand.Int()%4 != 0 { // Handle occasional missing deliveries
|
||||
task.Bitsets[i] = generateBitset(task.Bit, section)
|
||||
atomic.AddUint32(retrievals, 1)
|
||||
retrievals.Add(1)
|
||||
}
|
||||
}
|
||||
request <- task
|
||||
|
@ -45,13 +45,13 @@ func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
|
||||
fetch := make(chan *request, 16)
|
||||
defer close(fetch)
|
||||
|
||||
var delivered uint32
|
||||
var delivered atomic.Uint32
|
||||
for i := 0; i < fetchers; i++ {
|
||||
go func() {
|
||||
defer fetchPend.Done()
|
||||
|
||||
for req := range fetch {
|
||||
atomic.AddUint32(&delivered, 1)
|
||||
delivered.Add(1)
|
||||
|
||||
f.deliver([]uint64{
|
||||
req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds)
|
||||
@ -97,7 +97,7 @@ func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
|
||||
}
|
||||
pend.Wait()
|
||||
|
||||
if have := atomic.LoadUint32(&delivered); int(have) != requests {
|
||||
if have := delivered.Load(); int(have) != requests {
|
||||
t.Errorf("request count mismatch: have %v, want %v", have, requests)
|
||||
}
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ type ChainIndexer struct {
|
||||
backend ChainIndexerBackend // Background processor generating the index data content
|
||||
children []*ChainIndexer // Child indexers to cascade chain updates to
|
||||
|
||||
active uint32 // Flag whether the event loop was started
|
||||
active atomic.Bool // Flag whether the event loop was started
|
||||
update chan struct{} // Notification channel that headers should be processed
|
||||
quit chan chan error // Quit channel to tear down running goroutines
|
||||
ctx context.Context
|
||||
@ -166,7 +166,7 @@ func (c *ChainIndexer) Close() error {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// If needed, tear down the secondary event loop
|
||||
if atomic.LoadUint32(&c.active) != 0 {
|
||||
if c.active.Load() {
|
||||
c.quit <- errc
|
||||
if err := <-errc; err != nil {
|
||||
errs = append(errs, err)
|
||||
@ -196,7 +196,7 @@ func (c *ChainIndexer) Close() error {
|
||||
// queue.
|
||||
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainHeadEvent, sub event.Subscription) {
|
||||
// Mark the chain indexer as active, requiring an additional teardown
|
||||
atomic.StoreUint32(&c.active, 1)
|
||||
c.active.Store(true)
|
||||
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
|
@ -45,9 +45,10 @@ func (h *testHasher) Reset() {
|
||||
h.hasher.Reset()
|
||||
}
|
||||
|
||||
func (h *testHasher) Update(key, val []byte) {
|
||||
func (h *testHasher) Update(key, val []byte) error {
|
||||
h.hasher.Write(key)
|
||||
h.hasher.Write(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *testHasher) Hash() common.Hash {
|
||||
|
@ -43,10 +43,7 @@ const (
|
||||
// The background thread will keep moving ancient chain segments from key-value
|
||||
// database to flat files for saving space on live database.
|
||||
type chainFreezer struct {
|
||||
// WARNING: The `threshold` field is accessed atomically. On 32 bit platforms, only
|
||||
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
|
||||
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
|
||||
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
|
||||
threshold atomic.Uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
|
||||
|
||||
*Freezer
|
||||
quit chan struct{}
|
||||
@ -60,12 +57,13 @@ func newChainFreezer(datadir string, namespace string, readonly bool) (*chainFre
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &chainFreezer{
|
||||
cf := chainFreezer{
|
||||
Freezer: freezer,
|
||||
threshold: params.FullImmutabilityThreshold,
|
||||
quit: make(chan struct{}),
|
||||
trigger: make(chan chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
cf.threshold.Store(params.FullImmutabilityThreshold)
|
||||
return &cf, nil
|
||||
}
|
||||
|
||||
// Close closes the chain freezer instance and terminates the background thread.
|
||||
@ -124,8 +122,8 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
|
||||
continue
|
||||
}
|
||||
number := ReadHeaderNumber(nfdb, hash)
|
||||
threshold := atomic.LoadUint64(&f.threshold)
|
||||
frozen := atomic.LoadUint64(&f.frozen)
|
||||
threshold := f.threshold.Load()
|
||||
frozen := f.frozen.Load()
|
||||
switch {
|
||||
case number == nil:
|
||||
log.Error("Current full block number unavailable", "hash", hash)
|
||||
@ -186,7 +184,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
|
||||
|
||||
// Wipe out side chains also and track dangling side chains
|
||||
var dangling []common.Hash
|
||||
frozen = atomic.LoadUint64(&f.frozen) // Needs reload after during freezeRange
|
||||
frozen = f.frozen.Load() // Needs reload after during freezeRange
|
||||
for number := first; number < frozen; number++ {
|
||||
// Always keep the genesis block in active database
|
||||
if number != 0 {
|
||||
|
@ -132,11 +132,12 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool
|
||||
}
|
||||
}
|
||||
// process runs in parallel
|
||||
nThreadsAlive := int32(threads)
|
||||
var nThreadsAlive atomic.Int32
|
||||
nThreadsAlive.Store(int32(threads))
|
||||
process := func() {
|
||||
defer func() {
|
||||
// Last processor closes the result channel
|
||||
if atomic.AddInt32(&nThreadsAlive, -1) == 0 {
|
||||
if nThreadsAlive.Add(-1) == 0 {
|
||||
close(hashesCh)
|
||||
}
|
||||
}()
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -72,9 +71,9 @@ func (frdb *freezerdb) Freeze(threshold uint64) error {
|
||||
}
|
||||
// Set the freezer threshold to a temporary value
|
||||
defer func(old uint64) {
|
||||
atomic.StoreUint64(&frdb.AncientStore.(*chainFreezer).threshold, old)
|
||||
}(atomic.LoadUint64(&frdb.AncientStore.(*chainFreezer).threshold))
|
||||
atomic.StoreUint64(&frdb.AncientStore.(*chainFreezer).threshold, threshold)
|
||||
frdb.AncientStore.(*chainFreezer).threshold.Store(old)
|
||||
}(frdb.AncientStore.(*chainFreezer).threshold.Load())
|
||||
frdb.AncientStore.(*chainFreezer).threshold.Store(threshold)
|
||||
|
||||
// Trigger a freeze cycle and block until it's done
|
||||
trigger := make(chan struct{}, 1)
|
||||
|
@ -62,11 +62,8 @@ const freezerTableSize = 2 * 1000 * 1000 * 1000
|
||||
// reserving it for go-ethereum. This would also reduce the memory requirements
|
||||
// of Geth, and thus also GC overhead.
|
||||
type Freezer struct {
|
||||
// WARNING: The `frozen` and `tail` fields are accessed atomically. On 32 bit platforms, only
|
||||
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
|
||||
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
|
||||
frozen uint64 // Number of blocks already frozen
|
||||
tail uint64 // Number of the first stored item in the freezer
|
||||
frozen atomic.Uint64 // Number of blocks already frozen
|
||||
tail atomic.Uint64 // Number of the first stored item in the freezer
|
||||
|
||||
// This lock synchronizes writers and the truncate operation, as well as
|
||||
// the "atomic" (batched) read operations.
|
||||
@ -212,12 +209,12 @@ func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]
|
||||
|
||||
// Ancients returns the length of the frozen items.
|
||||
func (f *Freezer) Ancients() (uint64, error) {
|
||||
return atomic.LoadUint64(&f.frozen), nil
|
||||
return f.frozen.Load(), nil
|
||||
}
|
||||
|
||||
// Tail returns the number of first stored item in the freezer.
|
||||
func (f *Freezer) Tail() (uint64, error) {
|
||||
return atomic.LoadUint64(&f.tail), nil
|
||||
return f.tail.Load(), nil
|
||||
}
|
||||
|
||||
// AncientSize returns the ancient size of the specified category.
|
||||
@ -251,7 +248,7 @@ func (f *Freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
|
||||
defer f.writeLock.Unlock()
|
||||
|
||||
// Roll back all tables to the starting position in case of error.
|
||||
prevItem := atomic.LoadUint64(&f.frozen)
|
||||
prevItem := f.frozen.Load()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// The write operation has failed. Go back to the previous item position.
|
||||
@ -287,7 +284,7 @@ func (f *Freezer) TruncateHead(items uint64) error {
|
||||
f.writeLock.Lock()
|
||||
defer f.writeLock.Unlock()
|
||||
|
||||
if atomic.LoadUint64(&f.frozen) <= items {
|
||||
if f.frozen.Load() <= items {
|
||||
return nil
|
||||
}
|
||||
for _, table := range f.tables {
|
||||
@ -295,7 +292,7 @@ func (f *Freezer) TruncateHead(items uint64) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
atomic.StoreUint64(&f.frozen, items)
|
||||
f.frozen.Store(items)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -307,7 +304,7 @@ func (f *Freezer) TruncateTail(tail uint64) error {
|
||||
f.writeLock.Lock()
|
||||
defer f.writeLock.Unlock()
|
||||
|
||||
if atomic.LoadUint64(&f.tail) >= tail {
|
||||
if f.tail.Load() >= tail {
|
||||
return nil
|
||||
}
|
||||
for _, table := range f.tables {
|
||||
@ -315,7 +312,7 @@ func (f *Freezer) TruncateTail(tail uint64) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
atomic.StoreUint64(&f.tail, tail)
|
||||
f.tail.Store(tail)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -346,22 +343,22 @@ func (f *Freezer) validate() error {
|
||||
)
|
||||
// Hack to get boundary of any table
|
||||
for kind, table := range f.tables {
|
||||
head = atomic.LoadUint64(&table.items)
|
||||
tail = atomic.LoadUint64(&table.itemHidden)
|
||||
head = table.items.Load()
|
||||
tail = table.itemHidden.Load()
|
||||
name = kind
|
||||
break
|
||||
}
|
||||
// Now check every table against those boundaries.
|
||||
for kind, table := range f.tables {
|
||||
if head != atomic.LoadUint64(&table.items) {
|
||||
return fmt.Errorf("freezer tables %s and %s have differing head: %d != %d", kind, name, atomic.LoadUint64(&table.items), head)
|
||||
if head != table.items.Load() {
|
||||
return fmt.Errorf("freezer tables %s and %s have differing head: %d != %d", kind, name, table.items.Load(), head)
|
||||
}
|
||||
if tail != atomic.LoadUint64(&table.itemHidden) {
|
||||
return fmt.Errorf("freezer tables %s and %s have differing tail: %d != %d", kind, name, atomic.LoadUint64(&table.itemHidden), tail)
|
||||
if tail != table.itemHidden.Load() {
|
||||
return fmt.Errorf("freezer tables %s and %s have differing tail: %d != %d", kind, name, table.itemHidden.Load(), tail)
|
||||
}
|
||||
}
|
||||
atomic.StoreUint64(&f.frozen, head)
|
||||
atomic.StoreUint64(&f.tail, tail)
|
||||
f.frozen.Store(head)
|
||||
f.tail.Store(tail)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -372,11 +369,11 @@ func (f *Freezer) repair() error {
|
||||
tail = uint64(0)
|
||||
)
|
||||
for _, table := range f.tables {
|
||||
items := atomic.LoadUint64(&table.items)
|
||||
items := table.items.Load()
|
||||
if head > items {
|
||||
head = items
|
||||
}
|
||||
hidden := atomic.LoadUint64(&table.itemHidden)
|
||||
hidden := table.itemHidden.Load()
|
||||
if hidden > tail {
|
||||
tail = hidden
|
||||
}
|
||||
@ -389,8 +386,8 @@ func (f *Freezer) repair() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
atomic.StoreUint64(&f.frozen, head)
|
||||
atomic.StoreUint64(&f.tail, tail)
|
||||
f.frozen.Store(head)
|
||||
f.tail.Store(tail)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -416,7 +413,7 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error {
|
||||
// and that error will be returned.
|
||||
forEach := func(t *freezerTable, offset uint64, fn func(uint64, []byte) error) error {
|
||||
var (
|
||||
items = atomic.LoadUint64(&t.items)
|
||||
items = t.items.Load()
|
||||
batchSize = uint64(1024)
|
||||
maxBytes = uint64(1024 * 1024)
|
||||
)
|
||||
@ -439,7 +436,7 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error {
|
||||
}
|
||||
// TODO(s1na): This is a sanity-check since as of now no process does tail-deletion. But the migration
|
||||
// process assumes no deletion at tail and needs to be modified to account for that.
|
||||
if table.itemOffset > 0 || table.itemHidden > 0 {
|
||||
if table.itemOffset.Load() > 0 || table.itemHidden.Load() > 0 {
|
||||
return fmt.Errorf("migration not supported for tail-deleted freezers")
|
||||
}
|
||||
ancientsPath := filepath.Dir(table.index.Name())
|
||||
@ -455,7 +452,7 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error {
|
||||
out []byte
|
||||
start = time.Now()
|
||||
logged = time.Now()
|
||||
offset = newTable.items
|
||||
offset = newTable.items.Load()
|
||||
)
|
||||
if offset > 0 {
|
||||
log.Info("found previous migration attempt", "migrated", offset)
|
||||
|
@ -18,7 +18,6 @@ package rawdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -113,7 +112,7 @@ func (t *freezerTable) newBatch() *freezerTableBatch {
|
||||
func (batch *freezerTableBatch) reset() {
|
||||
batch.dataBuffer = batch.dataBuffer[:0]
|
||||
batch.indexBuffer = batch.indexBuffer[:0]
|
||||
batch.curItem = atomic.LoadUint64(&batch.t.items)
|
||||
batch.curItem = batch.t.items.Load()
|
||||
batch.totalBytes = 0
|
||||
}
|
||||
|
||||
@ -207,7 +206,7 @@ func (batch *freezerTableBatch) commit() error {
|
||||
|
||||
// Update headBytes of table.
|
||||
batch.t.headBytes += dataSize
|
||||
atomic.StoreUint64(&batch.t.items, batch.curItem)
|
||||
batch.t.items.Store(batch.curItem)
|
||||
|
||||
// Update metrics.
|
||||
batch.t.sizeGauge.Inc(dataSize + indexSize)
|
||||
|
@ -88,18 +88,15 @@ func (i *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uin
|
||||
// It consists of a data file (snappy encoded arbitrary data blobs) and an indexEntry
|
||||
// file (uncompressed 64 bit indices into the data file).
|
||||
type freezerTable struct {
|
||||
// WARNING: The `items` field is accessed atomically. On 32 bit platforms, only
|
||||
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
|
||||
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
|
||||
items uint64 // Number of items stored in the table (including items removed from tail)
|
||||
itemOffset uint64 // Number of items removed from the table
|
||||
items atomic.Uint64 // Number of items stored in the table (including items removed from tail)
|
||||
itemOffset atomic.Uint64 // Number of items removed from the table
|
||||
|
||||
// itemHidden is the number of items marked as deleted. Tail deletion is
|
||||
// only supported at file level which means the actual deletion will be
|
||||
// delayed until the entire data file is marked as deleted. Before that
|
||||
// these items will be hidden to prevent being visited again. The value
|
||||
// should never be lower than itemOffset.
|
||||
itemHidden uint64
|
||||
itemHidden atomic.Uint64
|
||||
|
||||
noCompression bool // if true, disables snappy compression. Note: does not work retroactively
|
||||
readonly bool
|
||||
@ -241,14 +238,14 @@ func (t *freezerTable) repair() error {
|
||||
// which is not enough in theory but enough in practice.
|
||||
// TODO: use uint64 to represent total removed items.
|
||||
t.tailId = firstIndex.filenum
|
||||
t.itemOffset = uint64(firstIndex.offset)
|
||||
t.itemOffset.Store(uint64(firstIndex.offset))
|
||||
|
||||
// Load metadata from the file
|
||||
meta, err := loadMetadata(t.meta, t.itemOffset)
|
||||
meta, err := loadMetadata(t.meta, t.itemOffset.Load())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.itemHidden = meta.VirtualTail
|
||||
t.itemHidden.Store(meta.VirtualTail)
|
||||
|
||||
// Read the last index, use the default value in case the freezer is empty
|
||||
if offsetsSize == indexEntrySize {
|
||||
@ -331,7 +328,7 @@ func (t *freezerTable) repair() error {
|
||||
}
|
||||
}
|
||||
// Update the item and byte counters and return
|
||||
t.items = t.itemOffset + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
|
||||
t.items.Store(t.itemOffset.Load() + uint64(offsetsSize/indexEntrySize-1)) // last indexEntry points to the end of the data file
|
||||
t.headBytes = contentSize
|
||||
t.headId = lastIndex.filenum
|
||||
|
||||
@ -346,9 +343,9 @@ func (t *freezerTable) repair() error {
|
||||
return err
|
||||
}
|
||||
if verbose {
|
||||
t.logger.Info("Chain freezer table opened", "items", t.items, "size", t.headBytes)
|
||||
t.logger.Info("Chain freezer table opened", "items", t.items.Load(), "size", t.headBytes)
|
||||
} else {
|
||||
t.logger.Debug("Chain freezer table opened", "items", t.items, "size", common.StorageSize(t.headBytes))
|
||||
t.logger.Debug("Chain freezer table opened", "items", t.items.Load(), "size", common.StorageSize(t.headBytes))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -382,11 +379,11 @@ func (t *freezerTable) truncateHead(items uint64) error {
|
||||
defer t.lock.Unlock()
|
||||
|
||||
// Ensure the given truncate target falls in the correct range
|
||||
existing := atomic.LoadUint64(&t.items)
|
||||
existing := t.items.Load()
|
||||
if existing <= items {
|
||||
return nil
|
||||
}
|
||||
if items < atomic.LoadUint64(&t.itemHidden) {
|
||||
if items < t.itemHidden.Load() {
|
||||
return errors.New("truncation below tail")
|
||||
}
|
||||
// We need to truncate, save the old size for metrics tracking
|
||||
@ -403,7 +400,7 @@ func (t *freezerTable) truncateHead(items uint64) error {
|
||||
|
||||
// Truncate the index file first, the tail position is also considered
|
||||
// when calculating the new freezer table length.
|
||||
length := items - atomic.LoadUint64(&t.itemOffset)
|
||||
length := items - t.itemOffset.Load()
|
||||
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -438,7 +435,7 @@ func (t *freezerTable) truncateHead(items uint64) error {
|
||||
}
|
||||
// All data files truncated, set internal counters and return
|
||||
t.headBytes = int64(expected.offset)
|
||||
atomic.StoreUint64(&t.items, items)
|
||||
t.items.Store(items)
|
||||
|
||||
// Retrieve the new size and update the total size counter
|
||||
newSize, err := t.sizeNolock()
|
||||
@ -455,10 +452,10 @@ func (t *freezerTable) truncateTail(items uint64) error {
|
||||
defer t.lock.Unlock()
|
||||
|
||||
// Ensure the given truncate target falls in the correct range
|
||||
if atomic.LoadUint64(&t.itemHidden) >= items {
|
||||
if t.itemHidden.Load() >= items {
|
||||
return nil
|
||||
}
|
||||
if atomic.LoadUint64(&t.items) < items {
|
||||
if t.items.Load() < items {
|
||||
return errors.New("truncation above head")
|
||||
}
|
||||
// Load the new tail index by the given new tail position
|
||||
@ -466,10 +463,10 @@ func (t *freezerTable) truncateTail(items uint64) error {
|
||||
newTailId uint32
|
||||
buffer = make([]byte, indexEntrySize)
|
||||
)
|
||||
if atomic.LoadUint64(&t.items) == items {
|
||||
if t.items.Load() == items {
|
||||
newTailId = t.headId
|
||||
} else {
|
||||
offset := items - atomic.LoadUint64(&t.itemOffset)
|
||||
offset := items - t.itemOffset.Load()
|
||||
if _, err := t.index.ReadAt(buffer, int64((offset+1)*indexEntrySize)); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -478,7 +475,7 @@ func (t *freezerTable) truncateTail(items uint64) error {
|
||||
newTailId = newTail.filenum
|
||||
}
|
||||
// Update the virtual tail marker and hidden these entries in table.
|
||||
atomic.StoreUint64(&t.itemHidden, items)
|
||||
t.itemHidden.Store(items)
|
||||
if err := writeMetadata(t.meta, newMetadata(items)); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -501,7 +498,7 @@ func (t *freezerTable) truncateTail(items uint64) error {
|
||||
// Count how many items can be deleted from the file.
|
||||
var (
|
||||
newDeleted = items
|
||||
deleted = atomic.LoadUint64(&t.itemOffset)
|
||||
deleted = t.itemOffset.Load()
|
||||
)
|
||||
for current := items - 1; current >= deleted; current -= 1 {
|
||||
if _, err := t.index.ReadAt(buffer, int64((current-deleted+1)*indexEntrySize)); err != nil {
|
||||
@ -541,7 +538,7 @@ func (t *freezerTable) truncateTail(items uint64) error {
|
||||
}
|
||||
// Release any files before the current tail
|
||||
t.tailId = newTailId
|
||||
atomic.StoreUint64(&t.itemOffset, newDeleted)
|
||||
t.itemOffset.Store(newDeleted)
|
||||
t.releaseFilesBefore(t.tailId, true)
|
||||
|
||||
// Retrieve the new size and update the total size counter
|
||||
@ -654,7 +651,7 @@ func (t *freezerTable) releaseFilesBefore(num uint32, remove bool) {
|
||||
// it will return error.
|
||||
func (t *freezerTable) getIndices(from, count uint64) ([]*indexEntry, error) {
|
||||
// Apply the table-offset
|
||||
from = from - t.itemOffset
|
||||
from = from - t.itemOffset.Load()
|
||||
// For reading N items, we need N+1 indices.
|
||||
buffer := make([]byte, (count+1)*indexEntrySize)
|
||||
if _, err := t.index.ReadAt(buffer, int64(from*indexEntrySize)); err != nil {
|
||||
@ -744,8 +741,8 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
|
||||
return nil, nil, errClosed
|
||||
}
|
||||
var (
|
||||
items = atomic.LoadUint64(&t.items) // the total items(head + 1)
|
||||
hidden = atomic.LoadUint64(&t.itemHidden) // the number of hidden items
|
||||
items = t.items.Load() // the total items(head + 1)
|
||||
hidden = t.itemHidden.Load() // the number of hidden items
|
||||
)
|
||||
// Ensure the start is written, not deleted from the tail, and that the
|
||||
// caller actually wants something
|
||||
@ -826,13 +823,16 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Update metrics.
|
||||
t.readMeter.Mark(int64(totalSize))
|
||||
return output[:outputSize], sizes, nil
|
||||
}
|
||||
|
||||
// has returns an indicator whether the specified number data is still accessible
|
||||
// in the freezer table.
|
||||
func (t *freezerTable) has(number uint64) bool {
|
||||
return atomic.LoadUint64(&t.items) > number && atomic.LoadUint64(&t.itemHidden) <= number
|
||||
return t.items.Load() > number && t.itemHidden.Load() <= number
|
||||
}
|
||||
|
||||
// size returns the total data size in the freezer table.
|
||||
@ -922,7 +922,7 @@ func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "Version %d count %d, deleted %d, hidden %d\n", meta.Version,
|
||||
atomic.LoadUint64(&t.items), atomic.LoadUint64(&t.itemOffset), atomic.LoadUint64(&t.itemHidden))
|
||||
t.items.Load(), t.itemOffset.Load(), t.itemHidden.Load())
|
||||
|
||||
buf := make([]byte, indexEntrySize)
|
||||
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
@ -191,7 +190,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
||||
writeChunks(t, f, 255, 15)
|
||||
|
||||
// The last item should be there
|
||||
if _, err = f.Retrieve(f.items - 1); err != nil {
|
||||
if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
@ -317,7 +316,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
||||
writeChunks(t, f, 9, 15)
|
||||
|
||||
// The last item should be there
|
||||
if _, err = f.Retrieve(f.items - 1); err != nil {
|
||||
if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
|
||||
f.Close()
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -350,8 +349,8 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
if f.items != 7 {
|
||||
t.Fatalf("expected %d items, got %d", 7, f.items)
|
||||
if f.items.Load() != 7 {
|
||||
t.Fatalf("expected %d items, got %d", 7, f.items.Load())
|
||||
}
|
||||
if err := assertFileSize(fileToCrop, 15); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -374,7 +373,7 @@ func TestFreezerTruncate(t *testing.T) {
|
||||
writeChunks(t, f, 30, 15)
|
||||
|
||||
// The last item should be there
|
||||
if _, err = f.Retrieve(f.items - 1); err != nil {
|
||||
if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
@ -388,8 +387,8 @@ func TestFreezerTruncate(t *testing.T) {
|
||||
}
|
||||
defer f.Close()
|
||||
f.truncateHead(10) // 150 bytes
|
||||
if f.items != 10 {
|
||||
t.Fatalf("expected %d items, got %d", 10, f.items)
|
||||
if f.items.Load() != 10 {
|
||||
t.Fatalf("expected %d items, got %d", 10, f.items.Load())
|
||||
}
|
||||
// 45, 45, 45, 15 -- bytes should be 15
|
||||
if f.headBytes != 15 {
|
||||
@ -444,9 +443,9 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if f.items != 1 {
|
||||
if f.items.Load() != 1 {
|
||||
f.Close()
|
||||
t.Fatalf("expected %d items, got %d", 0, f.items)
|
||||
t.Fatalf("expected %d items, got %d", 0, f.items.Load())
|
||||
}
|
||||
|
||||
// Write 40 bytes
|
||||
@ -483,7 +482,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
||||
writeChunks(t, f, 30, 15)
|
||||
|
||||
// The last item should be there
|
||||
if _, err = f.Retrieve(f.items - 1); err != nil {
|
||||
if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
@ -495,9 +494,9 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if f.items != 30 {
|
||||
if f.items.Load() != 30 {
|
||||
f.Close()
|
||||
t.Fatalf("expected %d items, got %d", 0, f.items)
|
||||
t.Fatalf("expected %d items, got %d", 0, f.items.Load())
|
||||
}
|
||||
for y := byte(0); y < 30; y++ {
|
||||
f.Retrieve(uint64(y))
|
||||
@ -1210,13 +1209,13 @@ func runRandTest(rt randTest) bool {
|
||||
rt[i].err = fmt.Errorf("failed to reload table %v", err)
|
||||
}
|
||||
case opCheckAll:
|
||||
tail := atomic.LoadUint64(&f.itemHidden)
|
||||
head := atomic.LoadUint64(&f.items)
|
||||
tail := f.itemHidden.Load()
|
||||
head := f.items.Load()
|
||||
|
||||
if tail == head {
|
||||
continue
|
||||
}
|
||||
got, err := f.RetrieveItems(atomic.LoadUint64(&f.itemHidden), head-tail, 100000)
|
||||
got, err := f.RetrieveItems(f.itemHidden.Load(), head-tail, 100000)
|
||||
if err != nil {
|
||||
rt[i].err = err
|
||||
} else {
|
||||
@ -1238,7 +1237,7 @@ func runRandTest(rt randTest) bool {
|
||||
if len(step.items) == 0 {
|
||||
continue
|
||||
}
|
||||
tail := atomic.LoadUint64(&f.itemHidden)
|
||||
tail := f.itemHidden.Load()
|
||||
for i := 0; i < len(step.items); i++ {
|
||||
blobs = append(blobs, values[step.items[i]-tail])
|
||||
}
|
||||
@ -1254,7 +1253,7 @@ func runRandTest(rt randTest) bool {
|
||||
case opTruncateHead:
|
||||
f.truncateHead(step.target)
|
||||
|
||||
length := atomic.LoadUint64(&f.items) - atomic.LoadUint64(&f.itemHidden)
|
||||
length := f.items.Load() - f.itemHidden.Load()
|
||||
values = values[:length]
|
||||
|
||||
case opTruncateHeadAll:
|
||||
@ -1262,10 +1261,10 @@ func runRandTest(rt randTest) bool {
|
||||
values = nil
|
||||
|
||||
case opTruncateTail:
|
||||
prev := atomic.LoadUint64(&f.itemHidden)
|
||||
prev := f.itemHidden.Load()
|
||||
f.truncateTail(step.target)
|
||||
|
||||
truncated := atomic.LoadUint64(&f.itemHidden) - prev
|
||||
truncated := f.itemHidden.Load() - prev
|
||||
values = values[truncated:]
|
||||
|
||||
case opTruncateTailAll:
|
||||
|
@ -273,10 +273,10 @@ func TestFreezerReadonlyValidate(t *testing.T) {
|
||||
bBatch := f.tables["b"].newBatch()
|
||||
require.NoError(t, bBatch.AppendRaw(0, item))
|
||||
require.NoError(t, bBatch.commit())
|
||||
if f.tables["a"].items != 3 {
|
||||
if f.tables["a"].items.Load() != 3 {
|
||||
t.Fatalf("unexpected number of items in table")
|
||||
}
|
||||
if f.tables["b"].items != 1 {
|
||||
if f.tables["b"].items.Load() != 1 {
|
||||
t.Fatalf("unexpected number of items in table")
|
||||
}
|
||||
require.NoError(t, f.Close())
|
||||
|
@ -68,36 +68,36 @@ type Trie interface {
|
||||
// TODO(fjl): remove this when StateTrie is removed
|
||||
GetKey([]byte) []byte
|
||||
|
||||
// TryGet returns the value for key stored in the trie. The value bytes must
|
||||
// not be modified by the caller. If a node was not found in the database, a
|
||||
// trie.MissingNodeError is returned.
|
||||
TryGet(key []byte) ([]byte, error)
|
||||
// GetStorage returns the value for key stored in the trie. The value bytes
|
||||
// must not be modified by the caller. If a node was not found in the database,
|
||||
// a trie.MissingNodeError is returned.
|
||||
GetStorage(addr common.Address, key []byte) ([]byte, error)
|
||||
|
||||
// TryGetAccount abstracts an account read from the trie. It retrieves the
|
||||
// GetAccount abstracts an account read from the trie. It retrieves the
|
||||
// account blob from the trie with provided account address and decodes it
|
||||
// with associated decoding algorithm. If the specified account is not in
|
||||
// the trie, nil will be returned. If the trie is corrupted(e.g. some nodes
|
||||
// are missing or the account blob is incorrect for decoding), an error will
|
||||
// be returned.
|
||||
TryGetAccount(address common.Address) (*types.StateAccount, error)
|
||||
GetAccount(address common.Address) (*types.StateAccount, error)
|
||||
|
||||
// TryUpdate associates key with value in the trie. If value has length zero, any
|
||||
// existing value is deleted from the trie. The value bytes must not be modified
|
||||
// UpdateStorage associates key with value in the trie. If value has length zero,
|
||||
// any existing value is deleted from the trie. The value bytes must not be modified
|
||||
// by the caller while they are stored in the trie. If a node was not found in the
|
||||
// database, a trie.MissingNodeError is returned.
|
||||
TryUpdate(key, value []byte) error
|
||||
UpdateStorage(addr common.Address, key, value []byte) error
|
||||
|
||||
// TryUpdateAccount abstracts an account write to the trie. It encodes the
|
||||
// UpdateAccount abstracts an account write to the trie. It encodes the
|
||||
// provided account object with associated algorithm and then updates it
|
||||
// in the trie with provided address.
|
||||
TryUpdateAccount(address common.Address, account *types.StateAccount) error
|
||||
UpdateAccount(address common.Address, account *types.StateAccount) error
|
||||
|
||||
// TryDelete removes any existing value for key from the trie. If a node was not
|
||||
// found in the database, a trie.MissingNodeError is returned.
|
||||
TryDelete(key []byte) error
|
||||
// DeleteStorage removes any existing value for key from the trie. If a node
|
||||
// was not found in the database, a trie.MissingNodeError is returned.
|
||||
DeleteStorage(addr common.Address, key []byte) error
|
||||
|
||||
// TryDeleteAccount abstracts an account deletion from the trie.
|
||||
TryDeleteAccount(address common.Address) error
|
||||
// DeleteAccount abstracts an account deletion from the trie.
|
||||
DeleteAccount(address common.Address) error
|
||||
|
||||
// Hash returns the root hash of the trie. It does not write to the database and
|
||||
// can be used even if the trie doesn't have one.
|
||||
|
@ -371,7 +371,7 @@ func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash
|
||||
}
|
||||
t := trie.NewStackTrieWithOwner(nodeWriter, owner)
|
||||
for leaf := range in {
|
||||
t.TryUpdate(leaf.key[:], leaf.value)
|
||||
t.Update(leaf.key[:], leaf.value)
|
||||
}
|
||||
var root common.Hash
|
||||
if db == nil {
|
||||
|
@ -103,7 +103,7 @@ type diffLayer struct {
|
||||
memory uint64 // Approximate guess as to how much memory we use
|
||||
|
||||
root common.Hash // Root hash to which this snapshot diff belongs to
|
||||
stale uint32 // Signals that the layer became stale (state progressed)
|
||||
stale atomic.Bool // Signals that the layer became stale (state progressed)
|
||||
|
||||
// destructSet is a very special helper marker. If an account is marked as
|
||||
// deleted, then it's recorded in this set. However it's allowed that an account
|
||||
@ -267,7 +267,7 @@ func (dl *diffLayer) Parent() snapshot {
|
||||
// Stale return whether this layer has become stale (was flattened across) or if
|
||||
// it's still live.
|
||||
func (dl *diffLayer) Stale() bool {
|
||||
return atomic.LoadUint32(&dl.stale) != 0
|
||||
return dl.stale.Load()
|
||||
}
|
||||
|
||||
// Account directly retrieves the account associated with a particular hash in
|
||||
@ -449,7 +449,7 @@ func (dl *diffLayer) flatten() snapshot {
|
||||
|
||||
// Before actually writing all our data to the parent, first ensure that the
|
||||
// parent hasn't been 'corrupted' by someone else already flattening into it
|
||||
if atomic.SwapUint32(&parent.stale, 1) != 0 {
|
||||
if parent.stale.Swap(true) {
|
||||
panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo
|
||||
}
|
||||
// Overwrite all the updated accounts blindly, merge the sorted list
|
||||
|
@ -230,7 +230,7 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix [
|
||||
if origin == nil && !diskMore {
|
||||
stackTr := trie.NewStackTrie(nil)
|
||||
for i, key := range keys {
|
||||
stackTr.TryUpdate(key, vals[i])
|
||||
stackTr.Update(key, vals[i])
|
||||
}
|
||||
if gotRoot := stackTr.Hash(); gotRoot != root {
|
||||
return &proofResult{
|
||||
|
@ -161,7 +161,7 @@ func newHelper() *testHelper {
|
||||
|
||||
func (t *testHelper) addTrieAccount(acckey string, acc *Account) {
|
||||
val, _ := rlp.EncodeToBytes(acc)
|
||||
t.accTrie.Update([]byte(acckey), val)
|
||||
t.accTrie.MustUpdate([]byte(acckey), val)
|
||||
}
|
||||
|
||||
func (t *testHelper) addSnapAccount(acckey string, acc *Account) {
|
||||
@ -186,7 +186,7 @@ func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string
|
||||
id := trie.StorageTrieID(stateRoot, owner, common.Hash{})
|
||||
stTrie, _ := trie.NewStateTrie(id, t.triedb)
|
||||
for i, k := range keys {
|
||||
stTrie.Update([]byte(k), []byte(vals[i]))
|
||||
stTrie.MustUpdate([]byte(k), []byte(vals[i]))
|
||||
}
|
||||
if !commit {
|
||||
return stTrie.Hash().Bytes()
|
||||
@ -491,7 +491,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
|
||||
)
|
||||
acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
|
||||
val, _ := rlp.EncodeToBytes(acc)
|
||||
helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||
helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||
|
||||
// Identical in the snap
|
||||
key := hashData([]byte("acc-1"))
|
||||
@ -562,7 +562,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) {
|
||||
)
|
||||
acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
|
||||
val, _ := rlp.EncodeToBytes(acc)
|
||||
helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||
helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
|
||||
|
||||
// Identical in the snap
|
||||
key := hashData([]byte("acc-1"))
|
||||
@ -613,8 +613,8 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
|
||||
{
|
||||
acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
|
||||
val, _ := rlp.EncodeToBytes(acc)
|
||||
helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val)
|
||||
helper.accTrie.Update(common.HexToHash("0x07").Bytes(), val)
|
||||
helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val)
|
||||
helper.accTrie.MustUpdate(common.HexToHash("0x07").Bytes(), val)
|
||||
|
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x01"), val)
|
||||
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x02"), val)
|
||||
@ -650,7 +650,7 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) {
|
||||
{
|
||||
acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
|
||||
val, _ := rlp.EncodeToBytes(acc)
|
||||
helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val)
|
||||
helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val)
|
||||
|
||||
junk := make([]byte, 100)
|
||||
copy(junk, []byte{0xde, 0xad})
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
@ -272,7 +271,7 @@ func (t *Tree) Disable() {
|
||||
case *diffLayer:
|
||||
// If the layer is a simple diff, simply mark as stale
|
||||
layer.lock.Lock()
|
||||
atomic.StoreUint32(&layer.stale, 1)
|
||||
layer.stale.Store(true)
|
||||
layer.lock.Unlock()
|
||||
|
||||
default:
|
||||
@ -726,7 +725,7 @@ func (t *Tree) Rebuild(root common.Hash) {
|
||||
case *diffLayer:
|
||||
// If the layer is a simple diff, simply mark as stale
|
||||
layer.lock.Lock()
|
||||
atomic.StoreUint32(&layer.stale, 1)
|
||||
layer.stale.Store(true)
|
||||
layer.lock.Unlock()
|
||||
|
||||
default:
|
||||
|
@ -118,7 +118,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
|
||||
if err := snaps.Cap(common.HexToHash("0x02"), 0); err != nil {
|
||||
t.Fatalf("failed to merge diff layer onto disk: %v", err)
|
||||
}
|
||||
// Since the base layer was modified, ensure that data retrieval on the external reference fail
|
||||
// Since the base layer was modified, ensure that data retrievals on the external reference fail
|
||||
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
|
||||
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
|
||||
s.db.setError(err)
|
||||
return common.Hash{}
|
||||
}
|
||||
enc, err = tr.TryGet(key.Bytes())
|
||||
enc, err = tr.GetStorage(s.address, key.Bytes())
|
||||
if metrics.EnabledExpensive {
|
||||
s.db.StorageReads += time.Since(start)
|
||||
}
|
||||
@ -253,7 +253,7 @@ func (s *stateObject) finalise(prefetch bool) {
|
||||
}
|
||||
}
|
||||
if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash {
|
||||
s.db.prefetcher.prefetch(s.addrHash, s.data.Root, slotsToPrefetch)
|
||||
s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch)
|
||||
}
|
||||
if len(s.dirtyStorage) > 0 {
|
||||
s.dirtyStorage = make(Storage)
|
||||
@ -294,7 +294,7 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) {
|
||||
|
||||
var v []byte
|
||||
if (value == common.Hash{}) {
|
||||
if err := tr.TryDelete(key[:]); err != nil {
|
||||
if err := tr.DeleteStorage(s.address, key[:]); err != nil {
|
||||
s.db.setError(err)
|
||||
return nil, err
|
||||
}
|
||||
@ -302,7 +302,7 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) {
|
||||
} else {
|
||||
// Encoding []byte cannot fail, ok to ignore the error.
|
||||
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
|
||||
if err := tr.TryUpdate(key[:], v); err != nil {
|
||||
if err := tr.UpdateStorage(s.address, key[:], v); err != nil {
|
||||
s.db.setError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
@ -521,7 +521,7 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
|
||||
}
|
||||
// Encode the account and update the account trie
|
||||
addr := obj.Address()
|
||||
if err := s.trie.TryUpdateAccount(addr, &obj.data); err != nil {
|
||||
if err := s.trie.UpdateAccount(addr, &obj.data); err != nil {
|
||||
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
|
||||
}
|
||||
|
||||
@ -542,7 +542,7 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
|
||||
}
|
||||
// Delete the account from the trie
|
||||
addr := obj.Address()
|
||||
if err := s.trie.TryDeleteAccount(addr); err != nil {
|
||||
if err := s.trie.DeleteAccount(addr); err != nil {
|
||||
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
|
||||
}
|
||||
}
|
||||
@ -596,7 +596,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
|
||||
if data == nil {
|
||||
start := time.Now()
|
||||
var err error
|
||||
data, err = s.trie.TryGetAccount(addr)
|
||||
data, err = s.trie.GetAccount(addr)
|
||||
if metrics.EnabledExpensive {
|
||||
s.AccountReads += time.Since(start)
|
||||
}
|
||||
@ -880,7 +880,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
|
||||
addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure
|
||||
}
|
||||
if s.prefetcher != nil && len(addressesToPrefetch) > 0 {
|
||||
s.prefetcher.prefetch(common.Hash{}, s.originalRoot, addressesToPrefetch)
|
||||
s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch)
|
||||
}
|
||||
// Invalidate journal because reverting across transactions is not allowed.
|
||||
s.clearJournalAndRefund()
|
||||
|
@ -213,14 +213,14 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
|
||||
for i, node := range nodeElements {
|
||||
if bypath {
|
||||
if len(node.syncPath) == 1 {
|
||||
data, _, err := srcTrie.TryGetNode(node.syncPath[0])
|
||||
data, _, err := srcTrie.GetNode(node.syncPath[0])
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve node data for path %x: %v", node.syncPath[0], err)
|
||||
}
|
||||
nodeResults[i] = trie.NodeSyncResult{Path: node.path, Data: data}
|
||||
} else {
|
||||
var acc types.StateAccount
|
||||
if err := rlp.DecodeBytes(srcTrie.Get(node.syncPath[0]), &acc); err != nil {
|
||||
if err := rlp.DecodeBytes(srcTrie.MustGet(node.syncPath[0]), &acc); err != nil {
|
||||
t.Fatalf("failed to decode account on path %x: %v", node.syncPath[0], err)
|
||||
}
|
||||
id := trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root)
|
||||
@ -228,7 +228,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err)
|
||||
}
|
||||
data, _, err := stTrie.TryGetNode(node.syncPath[1])
|
||||
data, _, err := stTrie.GetNode(node.syncPath[1])
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve node data for path %x: %v", node.syncPath[1], err)
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ func (p *triePrefetcher) copy() *triePrefetcher {
|
||||
}
|
||||
|
||||
// prefetch schedules a batch of trie items to prefetch.
|
||||
func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, keys [][]byte) {
|
||||
func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) {
|
||||
// If the prefetcher is an inactive one, bail out
|
||||
if p.fetches != nil {
|
||||
return
|
||||
@ -150,7 +150,7 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, keys [][]
|
||||
id := p.trieID(owner, root)
|
||||
fetcher := p.fetchers[id]
|
||||
if fetcher == nil {
|
||||
fetcher = newSubfetcher(p.db, p.root, owner, root)
|
||||
fetcher = newSubfetcher(p.db, p.root, owner, root, addr)
|
||||
p.fetchers[id] = fetcher
|
||||
}
|
||||
fetcher.schedule(keys)
|
||||
@ -209,6 +209,7 @@ type subfetcher struct {
|
||||
state common.Hash // Root hash of the state to prefetch
|
||||
owner common.Hash // Owner of the trie, usually account hash
|
||||
root common.Hash // Root hash of the trie to prefetch
|
||||
addr common.Address // Address of the account that the trie belongs to
|
||||
trie Trie // Trie being populated with nodes
|
||||
|
||||
tasks [][]byte // Items queued up for retrieval
|
||||
@ -226,12 +227,13 @@ type subfetcher struct {
|
||||
|
||||
// newSubfetcher creates a goroutine to prefetch state items belonging to a
|
||||
// particular root hash.
|
||||
func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash) *subfetcher {
|
||||
func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash, addr common.Address) *subfetcher {
|
||||
sf := &subfetcher{
|
||||
db: db,
|
||||
state: state,
|
||||
owner: owner,
|
||||
root: root,
|
||||
addr: addr,
|
||||
wake: make(chan struct{}, 1),
|
||||
stop: make(chan struct{}),
|
||||
term: make(chan struct{}),
|
||||
@ -336,7 +338,11 @@ func (sf *subfetcher) loop() {
|
||||
if _, ok := sf.seen[string(task)]; ok {
|
||||
sf.dups++
|
||||
} else {
|
||||
sf.trie.TryGet(task)
|
||||
if len(task) == common.AddressLength {
|
||||
sf.trie.GetAccount(common.BytesToAddress(task))
|
||||
} else {
|
||||
sf.trie.GetStorage(sf.addr, task)
|
||||
}
|
||||
sf.seen[string(task)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
@ -47,19 +47,19 @@ func TestCopyAndClose(t *testing.T) {
|
||||
db := filledStateDB()
|
||||
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "")
|
||||
skey := common.HexToHash("aaa")
|
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
|
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
|
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
|
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
|
||||
time.Sleep(1 * time.Second)
|
||||
a := prefetcher.trie(common.Hash{}, db.originalRoot)
|
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
|
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
|
||||
b := prefetcher.trie(common.Hash{}, db.originalRoot)
|
||||
cpy := prefetcher.copy()
|
||||
cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
|
||||
cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
|
||||
cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
|
||||
cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
|
||||
c := cpy.trie(common.Hash{}, db.originalRoot)
|
||||
prefetcher.close()
|
||||
cpy2 := cpy.copy()
|
||||
cpy2.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
|
||||
cpy2.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
|
||||
d := cpy2.trie(common.Hash{}, db.originalRoot)
|
||||
cpy.close()
|
||||
cpy2.close()
|
||||
@ -72,7 +72,7 @@ func TestUseAfterClose(t *testing.T) {
|
||||
db := filledStateDB()
|
||||
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "")
|
||||
skey := common.HexToHash("aaa")
|
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
|
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
|
||||
a := prefetcher.trie(common.Hash{}, db.originalRoot)
|
||||
prefetcher.close()
|
||||
b := prefetcher.trie(common.Hash{}, db.originalRoot)
|
||||
@ -88,7 +88,7 @@ func TestCopyClose(t *testing.T) {
|
||||
db := filledStateDB()
|
||||
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "")
|
||||
skey := common.HexToHash("aaa")
|
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
|
||||
prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
|
||||
cpy := prefetcher.copy()
|
||||
a := prefetcher.trie(common.Hash{}, db.originalRoot)
|
||||
b := cpy.trie(common.Hash{}, db.originalRoot)
|
||||
|
@ -47,7 +47,7 @@ func newStatePrefetcher(config *params.ChainConfig, bc *BlockChain, engine conse
|
||||
// Prefetch processes the state changes according to the Ethereum rules by running
|
||||
// the transaction messages using the statedb, but any changes are discarded. The
|
||||
// only goal is to pre-cache transaction signatures and state trie nodes.
|
||||
func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *uint32) {
|
||||
func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *atomic.Bool) {
|
||||
var (
|
||||
header = block.Header()
|
||||
gaspool = new(GasPool).AddGas(block.GasLimit())
|
||||
@ -59,7 +59,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c
|
||||
byzantium := p.config.IsByzantium(block.Number())
|
||||
for i, tx := range block.Transactions() {
|
||||
// If block precaching was interrupted, abort
|
||||
if interrupt != nil && atomic.LoadUint32(interrupt) == 1 {
|
||||
if interrupt != nil && interrupt.Load() {
|
||||
return
|
||||
}
|
||||
// Convert the transaction into an executable message and pre-cache its sender
|
||||
|
@ -136,7 +136,7 @@ type Message struct {
|
||||
Data []byte
|
||||
AccessList types.AccessList
|
||||
|
||||
// When SkipAccountCheckss is true, the message nonce is not checked against the
|
||||
// When SkipAccountChecks is true, the message nonce is not checked against the
|
||||
// account nonce in state. It also disables checking that the sender is an EOA.
|
||||
// This field will be set to true for operations like RPC eth_call.
|
||||
SkipAccountChecks bool
|
||||
@ -332,10 +332,10 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if st.evm.Config.Debug {
|
||||
st.evm.Config.Tracer.CaptureTxStart(st.initialGas)
|
||||
if tracer := st.evm.Config.Tracer; tracer != nil {
|
||||
tracer.CaptureTxStart(st.initialGas)
|
||||
defer func() {
|
||||
st.evm.Config.Tracer.CaptureTxEnd(st.gasRemaining)
|
||||
tracer.CaptureTxEnd(st.gasRemaining)
|
||||
}()
|
||||
}
|
||||
|
||||
|
@ -270,10 +270,10 @@ func newList(strict bool) *list {
|
||||
}
|
||||
}
|
||||
|
||||
// Overlaps returns whether the transaction specified has the same nonce as one
|
||||
// already contained within the list.
|
||||
func (l *list) Overlaps(tx *types.Transaction) bool {
|
||||
return l.txs.Get(tx.Nonce()) != nil
|
||||
// Contains returns whether the list contains a transaction
|
||||
// with the provided nonce.
|
||||
func (l *list) Contains(nonce uint64) bool {
|
||||
return l.txs.Get(nonce) != nil
|
||||
}
|
||||
|
||||
// Add tries to insert a new transaction into the list, returning whether the
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -250,14 +251,14 @@ type TxPool struct {
|
||||
signer types.Signer
|
||||
mu sync.RWMutex
|
||||
|
||||
istanbul bool // Fork indicator whether we are in the istanbul stage.
|
||||
eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions.
|
||||
eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions.
|
||||
shanghai bool // Fork indicator whether we are in the Shanghai stage.
|
||||
istanbul atomic.Bool // Fork indicator whether we are in the istanbul stage.
|
||||
eip2718 atomic.Bool // Fork indicator whether we are using EIP-2718 type transactions.
|
||||
eip1559 atomic.Bool // Fork indicator whether we are using EIP-1559 type transactions.
|
||||
shanghai atomic.Bool // Fork indicator whether we are in the Shanghai stage.
|
||||
|
||||
currentState *state.StateDB // Current state in the blockchain head
|
||||
pendingNonces *noncer // Pending state tracking virtual nonces
|
||||
currentMaxGas uint64 // Current gas limit for transaction caps
|
||||
currentMaxGas atomic.Uint64 // Current gas limit for transaction caps
|
||||
|
||||
locals *accountSet // Set of local transaction to exempt from eviction rules
|
||||
journal *journal // Journal of local transaction to back up to disk
|
||||
@ -592,15 +593,17 @@ func (pool *TxPool) local() map[common.Address]types.Transactions {
|
||||
return txs
|
||||
}
|
||||
|
||||
// validateTx checks whether a transaction is valid according to the consensus
|
||||
// rules and adheres to some heuristic limits of the local node (price and size).
|
||||
func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
|
||||
// validateTxBasics checks whether a transaction is valid according to the consensus
|
||||
// rules, but does not check state-dependent validation such as sufficient balance.
|
||||
// This check is meant as an early check which only needs to be performed once,
|
||||
// and does not require the pool mutex to be held.
|
||||
func (pool *TxPool) validateTxBasics(tx *types.Transaction, local bool) error {
|
||||
// Accept only legacy transactions until EIP-2718/2930 activates.
|
||||
if !pool.eip2718 && tx.Type() != types.LegacyTxType {
|
||||
if !pool.eip2718.Load() && tx.Type() != types.LegacyTxType {
|
||||
return core.ErrTxTypeNotSupported
|
||||
}
|
||||
// Reject dynamic fee transactions until EIP-1559 activates.
|
||||
if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType {
|
||||
if !pool.eip1559.Load() && tx.Type() == types.DynamicFeeTxType {
|
||||
return core.ErrTxTypeNotSupported
|
||||
}
|
||||
// Reject transactions over defined size to prevent DOS attacks
|
||||
@ -608,7 +611,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
|
||||
return ErrOversizedData
|
||||
}
|
||||
// Check whether the init code size has been exceeded.
|
||||
if pool.shanghai && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
|
||||
if pool.shanghai.Load() && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
|
||||
return fmt.Errorf("%w: code size %v limit %v", core.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize)
|
||||
}
|
||||
// Transactions can't be negative. This may never happen using RLP decoded
|
||||
@ -617,7 +620,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
|
||||
return ErrNegativeValue
|
||||
}
|
||||
// Ensure the transaction doesn't exceed the current block limit gas.
|
||||
if pool.currentMaxGas < tx.Gas() {
|
||||
if pool.currentMaxGas.Load() < tx.Gas() {
|
||||
return ErrGasLimit
|
||||
}
|
||||
// Sanity check for extremely large numbers
|
||||
@ -632,14 +635,29 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
|
||||
return core.ErrTipAboveFeeCap
|
||||
}
|
||||
// Make sure the transaction is signed properly.
|
||||
from, err := types.Sender(pool.signer, tx)
|
||||
if err != nil {
|
||||
if _, err := types.Sender(pool.signer, tx); err != nil {
|
||||
return ErrInvalidSender
|
||||
}
|
||||
// Drop non-local transactions under our own minimal accepted gas price or tip
|
||||
if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 {
|
||||
return ErrUnderpriced
|
||||
}
|
||||
// Ensure the transaction has more gas than the basic tx fee.
|
||||
intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul.Load(), pool.shanghai.Load())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tx.Gas() < intrGas {
|
||||
return core.ErrIntrinsicGas
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateTx checks whether a transaction is valid according to the consensus
|
||||
// rules and adheres to some heuristic limits of the local node (price and size).
|
||||
func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
|
||||
// Signature has been checked already, this cannot error.
|
||||
from, _ := types.Sender(pool.signer, tx)
|
||||
// Ensure the transaction adheres to nonce ordering
|
||||
if pool.currentState.GetNonce(from) > tx.Nonce() {
|
||||
return core.ErrNonceTooLow
|
||||
@ -664,15 +682,6 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
|
||||
return ErrOverdraft
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the transaction has more gas than the basic tx fee.
|
||||
intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul, pool.shanghai)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tx.Gas() < intrGas {
|
||||
return core.ErrIntrinsicGas
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -736,11 +745,11 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
||||
}
|
||||
|
||||
// If the new transaction is a future transaction it should never churn pending transactions
|
||||
if !isLocal && pool.isFuture(from, tx) {
|
||||
if !isLocal && pool.isGapped(from, tx) {
|
||||
var replacesPending bool
|
||||
for _, dropTx := range drop {
|
||||
dropSender, _ := types.Sender(pool.signer, dropTx)
|
||||
if list := pool.pending[dropSender]; list != nil && list.Overlaps(dropTx) {
|
||||
if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) {
|
||||
replacesPending = true
|
||||
break
|
||||
}
|
||||
@ -765,7 +774,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
||||
}
|
||||
|
||||
// Try to replace an existing transaction in the pending pool
|
||||
if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
|
||||
if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) {
|
||||
// Nonce already pending, check if required price bump is met
|
||||
inserted, old := list.Add(tx, pool.config.PriceBump)
|
||||
if !inserted {
|
||||
@ -808,18 +817,26 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
||||
return replaced, nil
|
||||
}
|
||||
|
||||
// isFuture reports whether the given transaction is immediately executable.
|
||||
func (pool *TxPool) isFuture(from common.Address, tx *types.Transaction) bool {
|
||||
list := pool.pending[from]
|
||||
if list == nil {
|
||||
return pool.pendingNonces.get(from) != tx.Nonce()
|
||||
// isGapped reports whether the given transaction is immediately executable.
|
||||
func (pool *TxPool) isGapped(from common.Address, tx *types.Transaction) bool {
|
||||
// Short circuit if transaction matches pending nonce and can be promoted
|
||||
// to pending list as an executable transaction.
|
||||
next := pool.pendingNonces.get(from)
|
||||
if tx.Nonce() == next {
|
||||
return false
|
||||
}
|
||||
// Sender has pending transactions.
|
||||
if old := list.txs.Get(tx.Nonce()); old != nil {
|
||||
return false // It replaces a pending transaction.
|
||||
// The transaction has a nonce gap with pending list, it's only considered
|
||||
// as executable if transactions in queue can fill up the nonce gap.
|
||||
queue, ok := pool.queue[from]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
// Not replacing, check if parent nonce exists in pending.
|
||||
return list.txs.Get(tx.Nonce()-1) == nil
|
||||
for nonce := next; nonce < tx.Nonce(); nonce++ {
|
||||
if !queue.Contains(nonce) {
|
||||
return true // txs in queue can't fill up the nonce gap
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// enqueueTx inserts a new transaction into the non-executable transaction queue.
|
||||
@ -969,12 +986,12 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
|
||||
knownTxMeter.Mark(1)
|
||||
continue
|
||||
}
|
||||
// Exclude transactions with invalid signatures as soon as
|
||||
// possible and cache senders in transactions before
|
||||
// obtaining lock
|
||||
_, err := types.Sender(pool.signer, tx)
|
||||
if err != nil {
|
||||
errs[i] = ErrInvalidSender
|
||||
// Exclude transactions with basic errors, e.g invalid signatures and
|
||||
// insufficient intrinsic gas as soon as possible and cache senders
|
||||
// in transactions before obtaining lock
|
||||
|
||||
if err := pool.validateTxBasics(tx, local); err != nil {
|
||||
errs[i] = err
|
||||
invalidTxMeter.Mark(1)
|
||||
continue
|
||||
}
|
||||
@ -1364,7 +1381,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
|
||||
}
|
||||
pool.currentState = statedb
|
||||
pool.pendingNonces = newNoncer(statedb)
|
||||
pool.currentMaxGas = newHead.GasLimit
|
||||
pool.currentMaxGas.Store(newHead.GasLimit)
|
||||
|
||||
// Inject any transactions discarded due to reorgs
|
||||
log.Debug("Reinjecting stale transactions", "count", len(reinject))
|
||||
@ -1373,10 +1390,10 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
|
||||
|
||||
// Update all fork indicator by next pending block number.
|
||||
next := new(big.Int).Add(newHead.Number, big.NewInt(1))
|
||||
pool.istanbul = pool.chainconfig.IsIstanbul(next)
|
||||
pool.eip2718 = pool.chainconfig.IsBerlin(next)
|
||||
pool.eip1559 = pool.chainconfig.IsLondon(next)
|
||||
pool.shanghai = pool.chainconfig.IsShanghai(uint64(time.Now().Unix()))
|
||||
pool.istanbul.Store(pool.chainconfig.IsIstanbul(next))
|
||||
pool.eip2718.Store(pool.chainconfig.IsBerlin(next))
|
||||
pool.eip1559.Store(pool.chainconfig.IsLondon(next))
|
||||
pool.shanghai.Store(pool.chainconfig.IsShanghai(uint64(time.Now().Unix())))
|
||||
}
|
||||
|
||||
// promoteExecutables moves transactions that have become processable from the
|
||||
@ -1400,7 +1417,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
|
||||
}
|
||||
log.Trace("Removed old queued transactions", "count", len(forwards))
|
||||
// Drop all transactions that are too costly (low balance or out of gas)
|
||||
drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
|
||||
drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load())
|
||||
for _, tx := range drops {
|
||||
hash := tx.Hash()
|
||||
pool.all.Remove(hash)
|
||||
@ -1597,7 +1614,7 @@ func (pool *TxPool) demoteUnexecutables() {
|
||||
log.Trace("Removed old pending transaction", "hash", hash)
|
||||
}
|
||||
// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
|
||||
drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
|
||||
drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load())
|
||||
for _, tx := range drops {
|
||||
hash := tx.Hash()
|
||||
log.Trace("Removed unpayable pending transaction", "hash", hash)
|
||||
|
@ -42,7 +42,7 @@ func count(t *testing.T, pool *TxPool) (pending int, queued int) {
|
||||
return pending, queued
|
||||
}
|
||||
|
||||
func fillPool(t *testing.T, pool *TxPool) {
|
||||
func fillPool(t testing.TB, pool *TxPool) {
|
||||
t.Helper()
|
||||
// Create a number of test accounts, fund them and make transactions
|
||||
executableTxs := types.Transactions{}
|
||||
@ -189,7 +189,7 @@ func TestTransactionZAttack(t *testing.T) {
|
||||
key, _ := crypto.GenerateKey()
|
||||
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
|
||||
for j := 0; j < int(pool.config.GlobalSlots); j++ {
|
||||
overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 60000000000, 21000, big.NewInt(500), key))
|
||||
overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 600000000000, 21000, big.NewInt(500), key))
|
||||
}
|
||||
}
|
||||
pool.AddRemotesSync(overDraftTxs)
|
||||
@ -210,3 +210,27 @@ func TestTransactionZAttack(t *testing.T) {
|
||||
newIvPending, ivPending, pool.config.GlobalSlots, newQueued)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFutureAttack(b *testing.B) {
|
||||
// Create the pool to test the limit enforcement with
|
||||
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||
blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
|
||||
config := testTxPoolConfig
|
||||
config.GlobalQueue = 100
|
||||
config.GlobalSlots = 100
|
||||
pool := NewTxPool(config, eip1559Config, blockchain)
|
||||
defer pool.Stop()
|
||||
fillPool(b, pool)
|
||||
|
||||
key, _ := crypto.GenerateKey()
|
||||
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
|
||||
futureTxs := types.Transactions{}
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
futureTxs = append(futureTxs, pricedTransaction(1000+uint64(n), 100000, big.NewInt(500), key))
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < 5; i++ {
|
||||
pool.AddRemotesSync(futureTxs)
|
||||
}
|
||||
}
|
||||
|
@ -293,28 +293,29 @@ func TestInvalidTransactions(t *testing.T) {
|
||||
tx := transaction(0, 100, key)
|
||||
from, _ := deriveSender(tx)
|
||||
|
||||
// Intrinsic gas too low
|
||||
testAddBalance(pool, from, big.NewInt(1))
|
||||
if err := pool.AddRemote(tx); !errors.Is(err, core.ErrInsufficientFunds) {
|
||||
t.Error("expected", core.ErrInsufficientFunds)
|
||||
if err, want := pool.AddRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) {
|
||||
t.Errorf("want %v have %v", want, err)
|
||||
}
|
||||
|
||||
balance := new(big.Int).Add(tx.Value(), new(big.Int).Mul(new(big.Int).SetUint64(tx.Gas()), tx.GasPrice()))
|
||||
testAddBalance(pool, from, balance)
|
||||
if err := pool.AddRemote(tx); !errors.Is(err, core.ErrIntrinsicGas) {
|
||||
t.Error("expected", core.ErrIntrinsicGas, "got", err)
|
||||
// Insufficient funds
|
||||
tx = transaction(0, 100000, key)
|
||||
if err, want := pool.AddRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) {
|
||||
t.Errorf("want %v have %v", want, err)
|
||||
}
|
||||
|
||||
testSetNonce(pool, from, 1)
|
||||
testAddBalance(pool, from, big.NewInt(0xffffffffffffff))
|
||||
tx = transaction(0, 100000, key)
|
||||
if err := pool.AddRemote(tx); !errors.Is(err, core.ErrNonceTooLow) {
|
||||
t.Error("expected", core.ErrNonceTooLow)
|
||||
if err, want := pool.AddRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) {
|
||||
t.Errorf("want %v have %v", want, err)
|
||||
}
|
||||
|
||||
tx = transaction(1, 100000, key)
|
||||
pool.gasPrice = big.NewInt(1000)
|
||||
if err := pool.AddRemote(tx); err != ErrUnderpriced {
|
||||
t.Error("expected", ErrUnderpriced, "got", err)
|
||||
if err, want := pool.AddRemote(tx), ErrUnderpriced; !errors.Is(err, want) {
|
||||
t.Errorf("want %v have %v", want, err)
|
||||
}
|
||||
if err := pool.AddLocal(tx); err != nil {
|
||||
t.Error("expected", nil, "got", err)
|
||||
@ -1217,22 +1218,22 @@ func TestAllowedTxSize(t *testing.T) {
|
||||
// All those fields are summed up to at most 213 bytes.
|
||||
baseSize := uint64(213)
|
||||
dataSize := txMaxSize - baseSize
|
||||
|
||||
maxGas := pool.currentMaxGas.Load()
|
||||
// Try adding a transaction with maximal allowed size
|
||||
tx := pricedDataTransaction(0, pool.currentMaxGas, big.NewInt(1), key, dataSize)
|
||||
tx := pricedDataTransaction(0, maxGas, big.NewInt(1), key, dataSize)
|
||||
if err := pool.addRemoteSync(tx); err != nil {
|
||||
t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err)
|
||||
}
|
||||
// Try adding a transaction with random allowed size
|
||||
if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentMaxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
|
||||
if err := pool.addRemoteSync(pricedDataTransaction(1, maxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
|
||||
t.Fatalf("failed to add transaction of random allowed size: %v", err)
|
||||
}
|
||||
// Try adding a transaction of minimal not allowed size
|
||||
if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, big.NewInt(1), key, txMaxSize)); err == nil {
|
||||
if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, txMaxSize)); err == nil {
|
||||
t.Fatalf("expected rejection on slightly oversize transaction")
|
||||
}
|
||||
// Try adding a transaction of random not allowed size
|
||||
if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
|
||||
if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
|
||||
t.Fatalf("expected rejection on oversize transaction")
|
||||
}
|
||||
// Run some sanity checks on the pool internals
|
||||
|
@ -17,6 +17,8 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
@ -39,7 +41,7 @@ type Prefetcher interface {
|
||||
// Prefetch processes the state changes according to the Ethereum rules by running
|
||||
// the transaction messages using the statedb, but any changes are discarded. The
|
||||
// only goal is to pre-cache transaction signatures and state trie nodes.
|
||||
Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *uint32)
|
||||
Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *atomic.Bool)
|
||||
}
|
||||
|
||||
// Processor is an interface for processing blocks using a given initial state.
|
||||
|
@ -85,6 +85,9 @@ type Header struct {
|
||||
// WithdrawalsHash was added by EIP-4895 and is ignored in legacy headers.
|
||||
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
|
||||
|
||||
// ExcessDataGas was added by EIP-4844 and is ignored in legacy headers.
|
||||
ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"`
|
||||
|
||||
/*
|
||||
TODO (MariusVanDerWijden) Add this field once needed
|
||||
// Random was added during the merge and contains the BeaconState randomness
|
||||
|
@ -232,9 +232,10 @@ func (h *testHasher) Reset() {
|
||||
h.hasher.Reset()
|
||||
}
|
||||
|
||||
func (h *testHasher) Update(key, val []byte) {
|
||||
func (h *testHasher) Update(key, val []byte) error {
|
||||
h.hasher.Write(key)
|
||||
h.hasher.Write(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *testHasher) Hash() common.Hash {
|
||||
|
@ -62,7 +62,7 @@ func prefixedRlpHash(prefix byte, x interface{}) (h common.Hash) {
|
||||
// This is internal, do not use.
|
||||
type TrieHasher interface {
|
||||
Reset()
|
||||
Update([]byte, []byte)
|
||||
Update([]byte, []byte) error
|
||||
Hash() common.Hash
|
||||
}
|
||||
|
||||
@ -83,7 +83,7 @@ func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte {
|
||||
return common.CopyBytes(buf.Bytes())
|
||||
}
|
||||
|
||||
// DeriveSha creates the tree hashes of transactions and receipts in a block header.
|
||||
// DeriveSha creates the tree hashes of transactions, receipts, and withdrawals in a block header.
|
||||
func DeriveSha(list DerivableList, hasher TrieHasher) common.Hash {
|
||||
hasher.Reset()
|
||||
|
||||
@ -93,6 +93,9 @@ func DeriveSha(list DerivableList, hasher TrieHasher) common.Hash {
|
||||
// StackTrie requires values to be inserted in increasing hash order, which is not the
|
||||
// order that `list` provides hashes in. This insertion sequence ensures that the
|
||||
// order is correct.
|
||||
//
|
||||
// The error returned by hasher is omitted because hasher will produce an incorrect
|
||||
// hash in case any error occurs.
|
||||
var indexBuf []byte
|
||||
for i := 1; i < list.Len() && i <= 0x7f; i++ {
|
||||
indexBuf = rlp.AppendUint64(indexBuf[:0], uint64(i))
|
||||
|
@ -219,9 +219,10 @@ func (d *hashToHumanReadable) Reset() {
|
||||
d.data = make([]byte, 0)
|
||||
}
|
||||
|
||||
func (d *hashToHumanReadable) Update(i []byte, i2 []byte) {
|
||||
func (d *hashToHumanReadable) Update(i []byte, i2 []byte) error {
|
||||
l := fmt.Sprintf("%x %x\n", i, i2)
|
||||
d.data = append(d.data, []byte(l)...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *hashToHumanReadable) Hash() common.Hash {
|
||||
|
@ -63,7 +63,7 @@ func (bits *bitvec) codeSegment(pos uint64) bool {
|
||||
// codeBitmap collects data locations in code.
|
||||
func codeBitmap(code []byte) bitvec {
|
||||
// The bitmap is 4 bytes longer than necessary, in case the code
|
||||
// ends with a PUSH32, the algorithm will push zeroes onto the
|
||||
// ends with a PUSH32, the algorithm will set bits on the
|
||||
// bitvector outside the bounds of the actual code.
|
||||
bits := make(bitvec, len(code)/8+1+4)
|
||||
return codeBitmapInternal(code, bits)
|
||||
|
@ -114,8 +114,7 @@ type EVM struct {
|
||||
// used throughout the execution of the tx.
|
||||
interpreter *EVMInterpreter
|
||||
// abort is used to abort the EVM calling operations
|
||||
// NOTE: must be set atomically
|
||||
abort int32
|
||||
abort atomic.Bool
|
||||
// callGasTemp holds the gas available for the current call. This is needed because the
|
||||
// available gas is calculated in gasCall* according to the 63/64 rule and later
|
||||
// applied in opCall*.
|
||||
@ -147,12 +146,12 @@ func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) {
|
||||
// Cancel cancels any running EVM operation. This may be called concurrently and
|
||||
// it's safe to be called multiple times.
|
||||
func (evm *EVM) Cancel() {
|
||||
atomic.StoreInt32(&evm.abort, 1)
|
||||
evm.abort.Store(true)
|
||||
}
|
||||
|
||||
// Cancelled returns true if Cancel has been called
|
||||
func (evm *EVM) Cancelled() bool {
|
||||
return atomic.LoadInt32(&evm.abort) == 1
|
||||
return evm.abort.Load()
|
||||
}
|
||||
|
||||
// Interpreter returns the current interpreter
|
||||
@ -183,11 +182,12 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
||||
}
|
||||
snapshot := evm.StateDB.Snapshot()
|
||||
p, isPrecompile := evm.precompile(addr)
|
||||
debug := evm.Config.Tracer != nil
|
||||
|
||||
if !evm.StateDB.Exist(addr) {
|
||||
if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 {
|
||||
// Calling a non existing account, don't do anything, but ping the tracer
|
||||
if evm.Config.Debug {
|
||||
if debug {
|
||||
if evm.depth == 0 {
|
||||
evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
|
||||
evm.Config.Tracer.CaptureEnd(ret, 0, nil)
|
||||
@ -203,7 +203,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
||||
evm.Context.Transfer(evm.StateDB, caller.Address(), addr, value)
|
||||
|
||||
// Capture the tracer start/end events in debug mode
|
||||
if evm.Config.Debug {
|
||||
if debug {
|
||||
if evm.depth == 0 {
|
||||
evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
|
||||
defer func(startGas uint64) { // Lazy evaluation of the parameters
|
||||
@ -273,7 +273,7 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
|
||||
var snapshot = evm.StateDB.Snapshot()
|
||||
|
||||
// Invoke tracer hooks that signal entering/exiting a call frame
|
||||
if evm.Config.Debug {
|
||||
if evm.Config.Tracer != nil {
|
||||
evm.Config.Tracer.CaptureEnter(CALLCODE, caller.Address(), addr, input, gas, value)
|
||||
defer func(startGas uint64) {
|
||||
evm.Config.Tracer.CaptureExit(ret, startGas-gas, err)
|
||||
@ -314,7 +314,7 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
|
||||
var snapshot = evm.StateDB.Snapshot()
|
||||
|
||||
// Invoke tracer hooks that signal entering/exiting a call frame
|
||||
if evm.Config.Debug {
|
||||
if evm.Config.Tracer != nil {
|
||||
// NOTE: caller must, at all times be a contract. It should never happen
|
||||
// that caller is something other than a Contract.
|
||||
parent := caller.(*Contract)
|
||||
@ -368,7 +368,7 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
|
||||
evm.StateDB.AddBalance(addr, big0)
|
||||
|
||||
// Invoke tracer hooks that signal entering/exiting a call frame
|
||||
if evm.Config.Debug {
|
||||
if evm.Config.Tracer != nil {
|
||||
evm.Config.Tracer.CaptureEnter(STATICCALL, caller.Address(), addr, input, gas, nil)
|
||||
defer func(startGas uint64) {
|
||||
evm.Config.Tracer.CaptureExit(ret, startGas-gas, err)
|
||||
@ -451,7 +451,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
|
||||
contract := NewContract(caller, AccountRef(address), value, gas)
|
||||
contract.SetCodeOptionalHash(&address, codeAndHash)
|
||||
|
||||
if evm.Config.Debug {
|
||||
if evm.Config.Tracer != nil {
|
||||
if evm.depth == 0 {
|
||||
evm.Config.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value)
|
||||
} else {
|
||||
@ -494,7 +494,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
|
||||
}
|
||||
}
|
||||
|
||||
if evm.Config.Debug {
|
||||
if evm.Config.Tracer != nil {
|
||||
if evm.depth == 0 {
|
||||
evm.Config.Tracer.CaptureEnd(ret, gas-contract.Gas, err)
|
||||
} else {
|
||||
|
@ -17,8 +17,6 @@
|
||||
package vm
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@ -531,7 +529,7 @@ func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
|
||||
}
|
||||
|
||||
func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
if atomic.LoadInt32(&interpreter.evm.abort) != 0 {
|
||||
if interpreter.evm.abort.Load() {
|
||||
return nil, errStopToken
|
||||
}
|
||||
pos := scope.Stack.pop()
|
||||
@ -543,7 +541,7 @@ func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
|
||||
}
|
||||
|
||||
func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
if atomic.LoadInt32(&interpreter.evm.abort) != 0 {
|
||||
if interpreter.evm.abort.Load() {
|
||||
return nil, errStopToken
|
||||
}
|
||||
pos, cond := scope.Stack.pop(), scope.Stack.pop()
|
||||
@ -824,9 +822,9 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
|
||||
balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
|
||||
interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance)
|
||||
interpreter.evm.StateDB.Suicide(scope.Contract.Address())
|
||||
if interpreter.evm.Config.Debug {
|
||||
interpreter.evm.Config.Tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance)
|
||||
interpreter.evm.Config.Tracer.CaptureExit([]byte{}, 0, nil)
|
||||
if tracer := interpreter.evm.Config.Tracer; tracer != nil {
|
||||
tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance)
|
||||
tracer.CaptureExit([]byte{}, 0, nil)
|
||||
}
|
||||
return nil, errStopToken
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
|
||||
// Config are the configuration options for the Interpreter
|
||||
type Config struct {
|
||||
Debug bool // Enables debugging
|
||||
Tracer EVMLogger // Opcode logger
|
||||
NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls)
|
||||
EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages
|
||||
@ -143,6 +142,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
|
||||
gasCopy uint64 // for EVMLogger to log gas remaining before execution
|
||||
logged bool // deferred EVMLogger should ignore already logged steps
|
||||
res []byte // result of the opcode execution function
|
||||
debug = in.evm.Config.Tracer != nil
|
||||
)
|
||||
// Don't move this deferred function, it's placed before the capturestate-deferred method,
|
||||
// so that it get's executed _after_: the capturestate needs the stacks before
|
||||
@ -152,7 +152,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
|
||||
}()
|
||||
contract.Input = input
|
||||
|
||||
if in.evm.Config.Debug {
|
||||
if debug {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if !logged {
|
||||
@ -168,7 +168,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
|
||||
// the execution of one of the operations or until the done flag is set by the
|
||||
// parent context.
|
||||
for {
|
||||
if in.evm.Config.Debug {
|
||||
if debug {
|
||||
// Capture pre-execution values for tracing.
|
||||
logged, pcCopy, gasCopy = false, pc, contract.Gas
|
||||
}
|
||||
@ -213,14 +213,14 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
|
||||
return nil, ErrOutOfGas
|
||||
}
|
||||
// Do tracing before memory expansion
|
||||
if in.evm.Config.Debug {
|
||||
if debug {
|
||||
in.evm.Config.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
|
||||
logged = true
|
||||
}
|
||||
if memorySize > 0 {
|
||||
mem.Resize(memorySize)
|
||||
}
|
||||
} else if in.evm.Config.Debug {
|
||||
} else if debug {
|
||||
in.evm.Config.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
|
||||
logged = true
|
||||
}
|
||||
|
74
core/vm/jump_table_export.go
Normal file
74
core/vm/jump_table_export.go
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package vm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// LookupInstructionSet returns the instructionset for the fork configured by
|
||||
// the rules.
|
||||
func LookupInstructionSet(rules params.Rules) (JumpTable, error) {
|
||||
switch {
|
||||
case rules.IsPrague:
|
||||
return newShanghaiInstructionSet(), errors.New("prague-fork not defined yet")
|
||||
case rules.IsCancun:
|
||||
return newShanghaiInstructionSet(), errors.New("cancun-fork not defined yet")
|
||||
case rules.IsShanghai:
|
||||
return newShanghaiInstructionSet(), nil
|
||||
case rules.IsMerge:
|
||||
return newMergeInstructionSet(), nil
|
||||
case rules.IsLondon:
|
||||
return newLondonInstructionSet(), nil
|
||||
case rules.IsBerlin:
|
||||
return newBerlinInstructionSet(), nil
|
||||
case rules.IsIstanbul:
|
||||
return newIstanbulInstructionSet(), nil
|
||||
case rules.IsConstantinople:
|
||||
return newConstantinopleInstructionSet(), nil
|
||||
case rules.IsByzantium:
|
||||
return newByzantiumInstructionSet(), nil
|
||||
case rules.IsEIP158:
|
||||
return newSpuriousDragonInstructionSet(), nil
|
||||
case rules.IsEIP150:
|
||||
return newTangerineWhistleInstructionSet(), nil
|
||||
case rules.IsHomestead:
|
||||
return newHomesteadInstructionSet(), nil
|
||||
}
|
||||
return newFrontierInstructionSet(), nil
|
||||
}
|
||||
|
||||
// Stack returns the mininum and maximum stack requirements.
|
||||
func (op *operation) Stack() (int, int) {
|
||||
return op.minStack, op.maxStack
|
||||
}
|
||||
|
||||
// HasCost returns true if the opcode has a cost. Opcodes which do _not_ have
|
||||
// a cost assigned are one of two things:
|
||||
// - undefined, a.k.a invalid opcodes,
|
||||
// - the STOP opcode.
|
||||
// This method can thus be used to check if an opcode is "Invalid (or STOP)".
|
||||
func (op *operation) HasCost() bool {
|
||||
// Ideally, we'd check this:
|
||||
// return op.execute == opUndefined
|
||||
// However, go-lang does now allow that. So we'll just check some other
|
||||
// 'indicators' that this is an invalid op. Alas, STOP is impossible to
|
||||
// filter out
|
||||
return op.dynamicGas != nil || op.constantGas != 0
|
||||
}
|
@ -204,6 +204,12 @@ const (
|
||||
LOG4
|
||||
)
|
||||
|
||||
// 0xb0 range.
|
||||
const (
|
||||
TLOAD OpCode = 0xb3
|
||||
TSTORE OpCode = 0xb4
|
||||
)
|
||||
|
||||
// 0xf0 range - closures.
|
||||
const (
|
||||
CREATE OpCode = 0xf0
|
||||
@ -219,12 +225,6 @@ const (
|
||||
SELFDESTRUCT OpCode = 0xff
|
||||
)
|
||||
|
||||
// 0xb0 range.
|
||||
const (
|
||||
TLOAD OpCode = 0xb3
|
||||
TSTORE OpCode = 0xb4
|
||||
)
|
||||
|
||||
// Since the opcodes aren't all in order we can't use a regular slice.
|
||||
var opCodeToString = map[OpCode]string{
|
||||
// 0x0 range - arithmetic ops.
|
||||
@ -291,8 +291,6 @@ var opCodeToString = map[OpCode]string{
|
||||
|
||||
// 0x50 range - 'storage' and execution.
|
||||
POP: "POP",
|
||||
//DUP: "DUP",
|
||||
//SWAP: "SWAP",
|
||||
MLOAD: "MLOAD",
|
||||
MSTORE: "MSTORE",
|
||||
MSTORE8: "MSTORE8",
|
||||
@ -306,7 +304,7 @@ var opCodeToString = map[OpCode]string{
|
||||
JUMPDEST: "JUMPDEST",
|
||||
PUSH0: "PUSH0",
|
||||
|
||||
// 0x60 range - push.
|
||||
// 0x60 range - pushes.
|
||||
PUSH1: "PUSH1",
|
||||
PUSH2: "PUSH2",
|
||||
PUSH3: "PUSH3",
|
||||
@ -340,6 +338,7 @@ var opCodeToString = map[OpCode]string{
|
||||
PUSH31: "PUSH31",
|
||||
PUSH32: "PUSH32",
|
||||
|
||||
// 0x80 - dups.
|
||||
DUP1: "DUP1",
|
||||
DUP2: "DUP2",
|
||||
DUP3: "DUP3",
|
||||
@ -357,6 +356,7 @@ var opCodeToString = map[OpCode]string{
|
||||
DUP15: "DUP15",
|
||||
DUP16: "DUP16",
|
||||
|
||||
// 0x90 - swaps.
|
||||
SWAP1: "SWAP1",
|
||||
SWAP2: "SWAP2",
|
||||
SWAP3: "SWAP3",
|
||||
@ -373,6 +373,8 @@ var opCodeToString = map[OpCode]string{
|
||||
SWAP14: "SWAP14",
|
||||
SWAP15: "SWAP15",
|
||||
SWAP16: "SWAP16",
|
||||
|
||||
// 0xa0 range - logging ops.
|
||||
LOG0: "LOG0",
|
||||
LOG1: "LOG1",
|
||||
LOG2: "LOG2",
|
||||
@ -383,7 +385,7 @@ var opCodeToString = map[OpCode]string{
|
||||
TLOAD: "TLOAD",
|
||||
TSTORE: "TSTORE",
|
||||
|
||||
// 0xf0 range.
|
||||
// 0xf0 range - closures.
|
||||
CREATE: "CREATE",
|
||||
CALL: "CALL",
|
||||
RETURN: "RETURN",
|
||||
@ -473,8 +475,6 @@ var stringToOp = map[string]OpCode{
|
||||
"GAS": GAS,
|
||||
"JUMPDEST": JUMPDEST,
|
||||
"PUSH0": PUSH0,
|
||||
"TLOAD": TLOAD,
|
||||
"TSTORE": TSTORE,
|
||||
"PUSH1": PUSH1,
|
||||
"PUSH2": PUSH2,
|
||||
"PUSH3": PUSH3,
|
||||
@ -544,6 +544,8 @@ var stringToOp = map[string]OpCode{
|
||||
"LOG2": LOG2,
|
||||
"LOG3": LOG3,
|
||||
"LOG4": LOG4,
|
||||
"TLOAD": TLOAD,
|
||||
"TSTORE": TSTORE,
|
||||
"CREATE": CREATE,
|
||||
"CREATE2": CREATE2,
|
||||
"CALL": CALL,
|
||||
|
@ -57,7 +57,6 @@ func setDefaults(cfg *Config) {
|
||||
DAOForkBlock: new(big.Int),
|
||||
DAOForkSupport: false,
|
||||
EIP150Block: new(big.Int),
|
||||
EIP150Hash: common.Hash{},
|
||||
EIP155Block: new(big.Int),
|
||||
EIP158Block: new(big.Int),
|
||||
ByzantiumBlock: new(big.Int),
|
||||
|
@ -335,7 +335,6 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode
|
||||
b.Fatal(err)
|
||||
}
|
||||
cfg.EVMConfig = vm.Config{
|
||||
Debug: true,
|
||||
Tracer: tracer,
|
||||
}
|
||||
}
|
||||
@ -511,7 +510,6 @@ func TestEip2929Cases(t *testing.T) {
|
||||
code, ops)
|
||||
Execute(code, nil, &Config{
|
||||
EVMConfig: vm.Config{
|
||||
Debug: true,
|
||||
Tracer: logger.NewMarkdownLogger(nil, os.Stdout),
|
||||
ExtraEips: []int{2929},
|
||||
},
|
||||
@ -665,7 +663,6 @@ func TestColdAccountAccessCost(t *testing.T) {
|
||||
tracer := logger.NewStructLogger(nil)
|
||||
Execute(tc.code, nil, &Config{
|
||||
EVMConfig: vm.Config{
|
||||
Debug: true,
|
||||
Tracer: tracer,
|
||||
},
|
||||
})
|
||||
@ -837,7 +834,6 @@ func TestRuntimeJSTracer(t *testing.T) {
|
||||
GasLimit: 1000000,
|
||||
State: statedb,
|
||||
EVMConfig: vm.Config{
|
||||
Debug: true,
|
||||
Tracer: tracer,
|
||||
}})
|
||||
if err != nil {
|
||||
@ -872,7 +868,6 @@ func TestJSTracerCreateTx(t *testing.T) {
|
||||
_, _, _, err = Create(code, &Config{
|
||||
State: statedb,
|
||||
EVMConfig: vm.Config{
|
||||
Debug: true,
|
||||
Tracer: tracer,
|
||||
}})
|
||||
if err != nil {
|
||||
|
@ -327,7 +327,7 @@ func (b *EthAPIBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error)
|
||||
return b.gpo.SuggestTipCap(ctx)
|
||||
}
|
||||
|
||||
func (b *EthAPIBackend) FeeHistory(ctx context.Context, blockCount int, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (firstBlock *big.Int, reward [][]*big.Int, baseFee []*big.Int, gasUsedRatio []float64, err error) {
|
||||
func (b *EthAPIBackend) FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (firstBlock *big.Int, reward [][]*big.Int, baseFee []*big.Int, gasUsedRatio []float64, err error) {
|
||||
return b.gpo.FeeHistory(ctx, blockCount, lastBlock, rewardPercentiles)
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,6 @@ package catalyst
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -64,11 +63,6 @@ const (
|
||||
// attached before starting to issue warnings.
|
||||
beaconUpdateStartupTimeout = 30 * time.Second
|
||||
|
||||
// beaconUpdateExchangeTimeout is the max time allowed for a beacon client to
|
||||
// do a transition config exchange before it's considered offline and the user
|
||||
// is warned.
|
||||
beaconUpdateExchangeTimeout = 2 * time.Minute
|
||||
|
||||
// beaconUpdateConsensusTimeout is the max time allowed for a beacon client
|
||||
// to send a consensus update before it's considered offline and the user is
|
||||
// warned.
|
||||
@ -667,14 +661,13 @@ func (api *ConsensusAPI) heartbeat() {
|
||||
// attached, so no need to print scary warnings to the user.
|
||||
time.Sleep(beaconUpdateStartupTimeout)
|
||||
|
||||
var (
|
||||
offlineLogged time.Time
|
||||
ttd = api.eth.BlockChain().Config().TerminalTotalDifficulty
|
||||
)
|
||||
// If the network is not yet merged/merging, don't bother continuing.
|
||||
if ttd == nil {
|
||||
if api.eth.BlockChain().Config().TerminalTotalDifficulty == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var offlineLogged time.Time
|
||||
|
||||
for {
|
||||
// Sleep a bit and retrieve the last known consensus updates
|
||||
time.Sleep(5 * time.Second)
|
||||
@ -698,20 +691,14 @@ func (api *ConsensusAPI) heartbeat() {
|
||||
offlineLogged = time.Time{}
|
||||
continue
|
||||
}
|
||||
if time.Since(lastTransitionUpdate) > beaconUpdateExchangeTimeout {
|
||||
|
||||
if time.Since(offlineLogged) > beaconUpdateWarnFrequency {
|
||||
if lastForkchoiceUpdate.IsZero() && lastNewPayloadUpdate.IsZero() {
|
||||
if lastTransitionUpdate.IsZero() {
|
||||
log.Warn("Post-merge network, but no beacon client seen. Please launch one to follow the chain!")
|
||||
} else {
|
||||
log.Warn("Previously seen beacon client is offline. Please ensure it is operational to follow the chain!")
|
||||
}
|
||||
offlineLogged = time.Now()
|
||||
}
|
||||
continue
|
||||
}
|
||||
if time.Since(offlineLogged) > beaconUpdateWarnFrequency {
|
||||
if lastForkchoiceUpdate.IsZero() && lastNewPayloadUpdate.IsZero() {
|
||||
log.Warn("Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!")
|
||||
}
|
||||
} else {
|
||||
log.Warn("Beacon client online, but no consensus updates received in a while. Please fix your beacon client to follow the chain!")
|
||||
}
|
||||
@ -719,62 +706,6 @@ func (api *ConsensusAPI) heartbeat() {
|
||||
}
|
||||
continue
|
||||
}
|
||||
if time.Since(lastTransitionUpdate) <= beaconUpdateExchangeTimeout {
|
||||
offlineLogged = time.Time{}
|
||||
continue
|
||||
}
|
||||
if time.Since(offlineLogged) > beaconUpdateWarnFrequency {
|
||||
// Retrieve the last few blocks and make a rough estimate as
|
||||
// to when the merge transition should happen
|
||||
var (
|
||||
chain = api.eth.BlockChain()
|
||||
head = chain.CurrentHeader()
|
||||
htd = chain.GetTd(head.Hash(), head.Number.Uint64())
|
||||
)
|
||||
if htd.Cmp(ttd) >= 0 {
|
||||
if lastTransitionUpdate.IsZero() {
|
||||
log.Warn("Merge already reached, but no beacon client seen. Please launch one to follow the chain!")
|
||||
} else {
|
||||
log.Warn("Merge already reached, but previously seen beacon client is offline. Please ensure it is operational to follow the chain!")
|
||||
}
|
||||
offlineLogged = time.Now()
|
||||
continue
|
||||
}
|
||||
var eta time.Duration
|
||||
if head.Number.Uint64() > 0 {
|
||||
// Accumulate the last 64 difficulties to estimate the growth
|
||||
var (
|
||||
deltaDiff uint64
|
||||
deltaTime uint64
|
||||
current = head
|
||||
)
|
||||
for i := 0; i < 64; i++ {
|
||||
parent := chain.GetHeader(current.ParentHash, current.Number.Uint64()-1)
|
||||
if parent == nil {
|
||||
break
|
||||
}
|
||||
deltaDiff += current.Difficulty.Uint64()
|
||||
deltaTime += current.Time - parent.Time
|
||||
current = parent
|
||||
}
|
||||
// Estimate an ETA based on the block times and the difficulty growth
|
||||
if deltaTime > 0 {
|
||||
growth := deltaDiff / deltaTime
|
||||
left := new(big.Int).Sub(ttd, htd)
|
||||
eta = time.Duration(new(big.Int).Div(left, new(big.Int).SetUint64(growth+1)).Uint64()) * time.Second
|
||||
}
|
||||
}
|
||||
message := "Merge is configured, but previously seen beacon client is offline. Please ensure it is operational before the transition arrives!"
|
||||
if lastTransitionUpdate.IsZero() {
|
||||
message = "Merge is configured, but no beacon client seen. Please ensure you have one available before the transition arrives!"
|
||||
}
|
||||
if eta < time.Second {
|
||||
log.Warn(message)
|
||||
} else {
|
||||
log.Warn(message, "eta", common.PrettyAge(time.Now().Add(-eta))) // weird hack, but duration formatted doesn't handle days
|
||||
}
|
||||
offlineLogged = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -879,15 +879,10 @@ func TestNewPayloadOnInvalidTerminalBlock(t *testing.T) {
|
||||
genesis, preMergeBlocks := generateMergeChain(100, false)
|
||||
n, ethservice := startEthService(t, genesis, preMergeBlocks)
|
||||
defer n.Close()
|
||||
|
||||
ethservice.BlockChain().Config().TerminalTotalDifficulty = preMergeBlocks[0].Difficulty() //.Sub(genesis.Config.TerminalTotalDifficulty, preMergeBlocks[len(preMergeBlocks)-1].Difficulty())
|
||||
|
||||
var (
|
||||
api = NewConsensusAPI(ethservice)
|
||||
parent = preMergeBlocks[len(preMergeBlocks)-1]
|
||||
)
|
||||
api := NewConsensusAPI(ethservice)
|
||||
|
||||
// Test parent already post TTD in FCU
|
||||
parent := preMergeBlocks[len(preMergeBlocks)-2]
|
||||
fcState := engine.ForkchoiceStateV1{
|
||||
HeadBlockHash: parent.Hash(),
|
||||
SafeBlockHash: common.Hash{},
|
||||
@ -913,6 +908,28 @@ func TestNewPayloadOnInvalidTerminalBlock(t *testing.T) {
|
||||
t.Fatalf("error preparing payload, err=%v", err)
|
||||
}
|
||||
data := *payload.Resolve().ExecutionPayload
|
||||
// We need to recompute the blockhash, since the miner computes a wrong (correct) blockhash
|
||||
txs, _ := decodeTransactions(data.Transactions)
|
||||
header := &types.Header{
|
||||
ParentHash: data.ParentHash,
|
||||
UncleHash: types.EmptyUncleHash,
|
||||
Coinbase: data.FeeRecipient,
|
||||
Root: data.StateRoot,
|
||||
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
|
||||
ReceiptHash: data.ReceiptsRoot,
|
||||
Bloom: types.BytesToBloom(data.LogsBloom),
|
||||
Difficulty: common.Big0,
|
||||
Number: new(big.Int).SetUint64(data.Number),
|
||||
GasLimit: data.GasLimit,
|
||||
GasUsed: data.GasUsed,
|
||||
Time: data.Timestamp,
|
||||
BaseFee: data.BaseFeePerGas,
|
||||
Extra: data.ExtraData,
|
||||
MixDigest: data.Random,
|
||||
}
|
||||
block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */)
|
||||
data.BlockHash = block.Hash()
|
||||
// Send the new payload
|
||||
resp2, err := api.NewPayloadV1(data)
|
||||
if err != nil {
|
||||
t.Fatalf("error sending NewPayload, err=%v", err)
|
||||
@ -1240,9 +1257,10 @@ func TestNilWithdrawals(t *testing.T) {
|
||||
|
||||
func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) {
|
||||
genesis, blocks := generateMergeChain(10, true)
|
||||
n, ethservice := startEthService(t, genesis, blocks)
|
||||
// enable shanghai on the last block
|
||||
ethservice.BlockChain().Config().ShanghaiTime = &blocks[len(blocks)-1].Header().Time
|
||||
time := blocks[len(blocks)-1].Header().Time + 1
|
||||
genesis.Config.ShanghaiTime = &time
|
||||
n, ethservice := startEthService(t, genesis, blocks)
|
||||
|
||||
var (
|
||||
parent = ethservice.BlockChain().CurrentBlock()
|
||||
|
@ -19,7 +19,6 @@ package downloader
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -371,7 +370,7 @@ func (d *Downloader) fetchBeaconHeaders(from uint64) error {
|
||||
continue
|
||||
}
|
||||
// If the pivot block is committed, signal header sync termination
|
||||
if atomic.LoadInt32(&d.committed) == 1 {
|
||||
if d.committed.Load() {
|
||||
select {
|
||||
case d.headerProcCh <- nil:
|
||||
return nil
|
||||
|
@ -98,7 +98,7 @@ type headerTask struct {
|
||||
}
|
||||
|
||||
type Downloader struct {
|
||||
mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
|
||||
mode atomic.Uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
|
||||
mux *event.TypeMux // Event multiplexer to announce sync operation events
|
||||
|
||||
checkpoint uint64 // Checkpoint block number to enforce head against (e.g. snap sync)
|
||||
@ -122,9 +122,9 @@ type Downloader struct {
|
||||
|
||||
// Status
|
||||
synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
|
||||
synchronising int32
|
||||
notified int32
|
||||
committed int32
|
||||
synchronising atomic.Bool
|
||||
notified atomic.Bool
|
||||
committed atomic.Bool
|
||||
ancientLimit uint64 // The maximum block number which can be regarded as ancient data.
|
||||
|
||||
// Channels
|
||||
@ -292,7 +292,7 @@ func (d *Downloader) Progress() ethereum.SyncProgress {
|
||||
|
||||
// Synchronising returns whether the downloader is currently retrieving blocks.
|
||||
func (d *Downloader) Synchronising() bool {
|
||||
return atomic.LoadInt32(&d.synchronising) > 0
|
||||
return d.synchronising.Load()
|
||||
}
|
||||
|
||||
// RegisterPeer injects a new download peer into the set of block source to be
|
||||
@ -392,13 +392,13 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int,
|
||||
return d.synchroniseMock(id, hash)
|
||||
}
|
||||
// Make sure only one goroutine is ever allowed past this point at once
|
||||
if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
|
||||
if !d.synchronising.CompareAndSwap(false, true) {
|
||||
return errBusy
|
||||
}
|
||||
defer atomic.StoreInt32(&d.synchronising, 0)
|
||||
defer d.synchronising.Store(false)
|
||||
|
||||
// Post a user notification of the sync (only once per session)
|
||||
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
|
||||
if d.notified.CompareAndSwap(false, true) {
|
||||
log.Info("Block synchronisation started")
|
||||
}
|
||||
if mode == SnapSync {
|
||||
@ -435,7 +435,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int,
|
||||
defer d.Cancel() // No matter what, we can't leave the cancel channel open
|
||||
|
||||
// Atomically set the requested sync mode
|
||||
atomic.StoreUint32(&d.mode, uint32(mode))
|
||||
d.mode.Store(uint32(mode))
|
||||
|
||||
// Retrieve the origin peer and initiate the downloading process
|
||||
var p *peerConnection
|
||||
@ -452,7 +452,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int,
|
||||
}
|
||||
|
||||
func (d *Downloader) getMode() SyncMode {
|
||||
return SyncMode(atomic.LoadUint32(&d.mode))
|
||||
return SyncMode(d.mode.Load())
|
||||
}
|
||||
|
||||
// syncWithPeer starts a block synchronization based on the hash chain from the
|
||||
@ -562,9 +562,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *
|
||||
rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber)
|
||||
}
|
||||
}
|
||||
d.committed = 1
|
||||
d.committed.Store(true)
|
||||
if mode == SnapSync && pivot.Number.Uint64() != 0 {
|
||||
d.committed = 0
|
||||
d.committed.Store(false)
|
||||
}
|
||||
if mode == SnapSync {
|
||||
// Set the ancient data limitation. If we are running snap sync, all block
|
||||
@ -1128,7 +1128,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
|
||||
// If no more headers are inbound, notify the content fetchers and return
|
||||
if len(headers) == 0 {
|
||||
// Don't abort header fetches while the pivot is downloading
|
||||
if atomic.LoadInt32(&d.committed) == 0 && pivot <= from {
|
||||
if !d.committed.Load() && pivot <= from {
|
||||
p.log.Debug("No headers, waiting for pivot commit")
|
||||
select {
|
||||
case <-time.After(fsHeaderContCheck):
|
||||
@ -1669,7 +1669,7 @@ func (d *Downloader) processSnapSyncContent() error {
|
||||
results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
|
||||
}
|
||||
// Split around the pivot block and process the two sides via snap/full sync
|
||||
if atomic.LoadInt32(&d.committed) == 0 {
|
||||
if !d.committed.Load() {
|
||||
latest := results[len(results)-1].Header
|
||||
// If the height is above the pivot block by 2 sets, it means the pivot
|
||||
// become stale in the network and it was garbage collected, move to a
|
||||
@ -1794,7 +1794,7 @@ func (d *Downloader) commitPivotBlock(result *fetchResult) error {
|
||||
if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil {
|
||||
return err
|
||||
}
|
||||
atomic.StoreInt32(&d.committed, 1)
|
||||
d.committed.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -476,9 +476,10 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
|
||||
tester.newPeer("peer", protocol, testChainBase.blocks[1:])
|
||||
|
||||
// Wrap the importer to allow stepping
|
||||
blocked, proceed := uint32(0), make(chan struct{})
|
||||
var blocked atomic.Uint32
|
||||
proceed := make(chan struct{})
|
||||
tester.downloader.chainInsertHook = func(results []*fetchResult) {
|
||||
atomic.StoreUint32(&blocked, uint32(len(results)))
|
||||
blocked.Store(uint32(len(results)))
|
||||
<-proceed
|
||||
}
|
||||
// Start a synchronisation concurrently
|
||||
@ -505,7 +506,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
|
||||
tester.downloader.queue.resultCache.lock.Lock()
|
||||
{
|
||||
cached = tester.downloader.queue.resultCache.countCompleted()
|
||||
frozen = int(atomic.LoadUint32(&blocked))
|
||||
frozen = int(blocked.Load())
|
||||
retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
|
||||
}
|
||||
tester.downloader.queue.resultCache.lock.Unlock()
|
||||
@ -528,8 +529,8 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
|
||||
t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
|
||||
}
|
||||
// Permit the blocked blocks to import
|
||||
if atomic.LoadUint32(&blocked) > 0 {
|
||||
atomic.StoreUint32(&blocked, uint32(0))
|
||||
if blocked.Load() > 0 {
|
||||
blocked.Store(uint32(0))
|
||||
proceed <- struct{}{}
|
||||
}
|
||||
}
|
||||
@ -786,12 +787,12 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
|
||||
tester.newPeer("peer", protocol, chain.blocks[1:])
|
||||
|
||||
// Instrument the downloader to signal body requests
|
||||
bodiesHave, receiptsHave := int32(0), int32(0)
|
||||
var bodiesHave, receiptsHave atomic.Int32
|
||||
tester.downloader.bodyFetchHook = func(headers []*types.Header) {
|
||||
atomic.AddInt32(&bodiesHave, int32(len(headers)))
|
||||
bodiesHave.Add(int32(len(headers)))
|
||||
}
|
||||
tester.downloader.receiptFetchHook = func(headers []*types.Header) {
|
||||
atomic.AddInt32(&receiptsHave, int32(len(headers)))
|
||||
receiptsHave.Add(int32(len(headers)))
|
||||
}
|
||||
// Synchronise with the peer and make sure all blocks were retrieved
|
||||
if err := tester.sync("peer", nil, mode); err != nil {
|
||||
@ -811,11 +812,11 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
|
||||
receiptsNeeded++
|
||||
}
|
||||
}
|
||||
if int(bodiesHave) != bodiesNeeded {
|
||||
t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
|
||||
if int(bodiesHave.Load()) != bodiesNeeded {
|
||||
t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave.Load(), bodiesNeeded)
|
||||
}
|
||||
if int(receiptsHave) != receiptsNeeded {
|
||||
t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
|
||||
if int(receiptsHave.Load()) != receiptsNeeded {
|
||||
t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave.Load(), receiptsNeeded)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@ type fetchRequest struct {
|
||||
// fetchResult is a struct collecting partial results from data fetchers until
|
||||
// all outstanding pieces complete and the result as a whole can be processed.
|
||||
type fetchResult struct {
|
||||
pending int32 // Flag telling what deliveries are outstanding
|
||||
pending atomic.Int32 // Flag telling what deliveries are outstanding
|
||||
|
||||
Header *types.Header
|
||||
Uncles []*types.Header
|
||||
@ -75,38 +75,38 @@ func newFetchResult(header *types.Header, fastSync bool) *fetchResult {
|
||||
Header: header,
|
||||
}
|
||||
if !header.EmptyBody() {
|
||||
item.pending |= (1 << bodyType)
|
||||
item.pending.Store(item.pending.Load() | (1 << bodyType))
|
||||
} else if header.WithdrawalsHash != nil {
|
||||
item.Withdrawals = make(types.Withdrawals, 0)
|
||||
}
|
||||
if fastSync && !header.EmptyReceipts() {
|
||||
item.pending |= (1 << receiptType)
|
||||
item.pending.Store(item.pending.Load() | (1 << receiptType))
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
// SetBodyDone flags the body as finished.
|
||||
func (f *fetchResult) SetBodyDone() {
|
||||
if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 {
|
||||
atomic.AddInt32(&f.pending, -1)
|
||||
if v := f.pending.Load(); (v & (1 << bodyType)) != 0 {
|
||||
f.pending.Add(-1)
|
||||
}
|
||||
}
|
||||
|
||||
// AllDone checks if item is done.
|
||||
func (f *fetchResult) AllDone() bool {
|
||||
return atomic.LoadInt32(&f.pending) == 0
|
||||
return f.pending.Load() == 0
|
||||
}
|
||||
|
||||
// SetReceiptsDone flags the receipts as finished.
|
||||
func (f *fetchResult) SetReceiptsDone() {
|
||||
if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 {
|
||||
atomic.AddInt32(&f.pending, -2)
|
||||
if v := f.pending.Load(); (v & (1 << receiptType)) != 0 {
|
||||
f.pending.Add(-2)
|
||||
}
|
||||
}
|
||||
|
||||
// Done checks if the given type is done already
|
||||
func (f *fetchResult) Done(kind uint) bool {
|
||||
v := atomic.LoadInt32(&f.pending)
|
||||
v := f.pending.Load()
|
||||
return v&(1<<kind) == 0
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ type resultStore struct {
|
||||
// Internal index of first non-completed entry, updated atomically when needed.
|
||||
// If all items are complete, this will equal length(items), so
|
||||
// *important* : is not safe to use for indexing without checking against length
|
||||
indexIncomplete int32 // atomic access
|
||||
indexIncomplete atomic.Int32
|
||||
|
||||
// throttleThreshold is the limit up to which we _want_ to fill the
|
||||
// results. If blocks are large, we want to limit the results to less
|
||||
@ -146,7 +146,7 @@ func (r *resultStore) HasCompletedItems() bool {
|
||||
func (r *resultStore) countCompleted() int {
|
||||
// We iterate from the already known complete point, and see
|
||||
// if any more has completed since last count
|
||||
index := atomic.LoadInt32(&r.indexIncomplete)
|
||||
index := r.indexIncomplete.Load()
|
||||
for ; ; index++ {
|
||||
if index >= int32(len(r.items)) {
|
||||
break
|
||||
@ -156,7 +156,7 @@ func (r *resultStore) countCompleted() int {
|
||||
break
|
||||
}
|
||||
}
|
||||
atomic.StoreInt32(&r.indexIncomplete, index)
|
||||
r.indexIncomplete.Store(index)
|
||||
return int(index)
|
||||
}
|
||||
|
||||
@ -179,7 +179,7 @@ func (r *resultStore) GetCompleted(limit int) []*fetchResult {
|
||||
}
|
||||
// Advance the expected block number of the first cache entry
|
||||
r.resultOffset += uint64(limit)
|
||||
atomic.AddInt32(&r.indexIncomplete, int32(-limit))
|
||||
r.indexIncomplete.Add(int32(-limit))
|
||||
|
||||
return results
|
||||
}
|
||||
|
@ -82,8 +82,8 @@ type skeletonTestPeer struct {
|
||||
|
||||
serve func(origin uint64) []*types.Header // Hook to allow custom responses
|
||||
|
||||
served uint64 // Number of headers served by this peer
|
||||
dropped uint64 // Flag whether the peer was dropped (stop responding)
|
||||
served atomic.Uint64 // Number of headers served by this peer
|
||||
dropped atomic.Uint64 // Flag whether the peer was dropped (stop responding)
|
||||
}
|
||||
|
||||
// newSkeletonTestPeer creates a new mock peer to test the skeleton sync with.
|
||||
@ -113,7 +113,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski
|
||||
// Since skeleton test peer are in-memory mocks, dropping the does not make
|
||||
// them inaccessible. As such, check a local `dropped` field to see if the
|
||||
// peer has been dropped and should not respond any more.
|
||||
if atomic.LoadUint64(&p.dropped) != 0 {
|
||||
if p.dropped.Load() != 0 {
|
||||
return nil, errors.New("peer already dropped")
|
||||
}
|
||||
// Skeleton sync retrieves batches of headers going backward without gaps.
|
||||
@ -161,7 +161,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski
|
||||
}
|
||||
}
|
||||
}
|
||||
atomic.AddUint64(&p.served, uint64(len(headers)))
|
||||
p.served.Add(uint64(len(headers)))
|
||||
|
||||
hashes := make([]common.Hash, len(headers))
|
||||
for i, header := range headers {
|
||||
@ -182,7 +182,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski
|
||||
sink <- res
|
||||
if err := <-res.Done; err != nil {
|
||||
log.Warn("Skeleton test peer response rejected", "err", err)
|
||||
atomic.AddUint64(&p.dropped, 1)
|
||||
p.dropped.Add(1)
|
||||
}
|
||||
}()
|
||||
return req, nil
|
||||
@ -817,7 +817,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
||||
dropped := make(map[string]int)
|
||||
drop := func(peer string) {
|
||||
if p := peerset.Peer(peer); p != nil {
|
||||
atomic.AddUint64(&p.peer.(*skeletonTestPeer).dropped, 1)
|
||||
p.peer.(*skeletonTestPeer).dropped.Add(1)
|
||||
}
|
||||
peerset.Unregister(peer)
|
||||
dropped[peer]++
|
||||
@ -895,14 +895,14 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
||||
if !tt.unpredictable {
|
||||
var served uint64
|
||||
for _, peer := range tt.peers {
|
||||
served += atomic.LoadUint64(&peer.served)
|
||||
served += peer.served.Load()
|
||||
}
|
||||
if served != tt.midserve {
|
||||
t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve)
|
||||
}
|
||||
var drops uint64
|
||||
for _, peer := range tt.peers {
|
||||
drops += atomic.LoadUint64(&peer.dropped)
|
||||
drops += peer.dropped.Load()
|
||||
}
|
||||
if drops != tt.middrop {
|
||||
t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop)
|
||||
@ -950,20 +950,20 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
||||
if !tt.unpredictable {
|
||||
served := uint64(0)
|
||||
for _, peer := range tt.peers {
|
||||
served += atomic.LoadUint64(&peer.served)
|
||||
served += peer.served.Load()
|
||||
}
|
||||
if tt.newPeer != nil {
|
||||
served += atomic.LoadUint64(&tt.newPeer.served)
|
||||
served += tt.newPeer.served.Load()
|
||||
}
|
||||
if served != tt.endserve {
|
||||
t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve)
|
||||
}
|
||||
drops := uint64(0)
|
||||
for _, peer := range tt.peers {
|
||||
drops += atomic.LoadUint64(&peer.dropped)
|
||||
drops += peer.dropped.Load()
|
||||
}
|
||||
if tt.newPeer != nil {
|
||||
drops += atomic.LoadUint64(&tt.newPeer.dropped)
|
||||
drops += tt.newPeer.dropped.Load()
|
||||
}
|
||||
if drops != tt.enddrop {
|
||||
t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop)
|
||||
|
@ -142,7 +142,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) {
|
||||
// also returned if requested and available.
|
||||
// Note: an error is only returned if retrieving the head header has failed. If there are no
|
||||
// retrievable blocks in the specified range then zero block count is returned with no error.
|
||||
func (oracle *Oracle) resolveBlockRange(ctx context.Context, reqEnd rpc.BlockNumber, blocks int) (*types.Block, []*types.Receipt, uint64, int, error) {
|
||||
func (oracle *Oracle) resolveBlockRange(ctx context.Context, reqEnd rpc.BlockNumber, blocks uint64) (*types.Block, []*types.Receipt, uint64, uint64, error) {
|
||||
var (
|
||||
headBlock *types.Header
|
||||
pendingBlock *types.Block
|
||||
@ -200,8 +200,8 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, reqEnd rpc.BlockNum
|
||||
return nil, nil, 0, 0, nil
|
||||
}
|
||||
// Ensure not trying to retrieve before genesis.
|
||||
if int(reqEnd+1) < blocks {
|
||||
blocks = int(reqEnd + 1)
|
||||
if uint64(reqEnd+1) < blocks {
|
||||
blocks = uint64(reqEnd + 1)
|
||||
}
|
||||
return pendingBlock, pendingReceipts, uint64(reqEnd), blocks, nil
|
||||
}
|
||||
@ -220,7 +220,7 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, reqEnd rpc.BlockNum
|
||||
//
|
||||
// Note: baseFee includes the next block after the newest of the returned range, because this
|
||||
// value can be derived from the newest block.
|
||||
func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) {
|
||||
func (oracle *Oracle) FeeHistory(ctx context.Context, blocks uint64, unresolvedLastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) {
|
||||
if blocks < 1 {
|
||||
return common.Big0, nil, nil, nil, nil // returning with no data and no error means there are no retrievable blocks
|
||||
}
|
||||
@ -249,7 +249,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast
|
||||
if err != nil || blocks == 0 {
|
||||
return common.Big0, nil, nil, nil, err
|
||||
}
|
||||
oldestBlock := lastBlock + 1 - uint64(blocks)
|
||||
oldestBlock := lastBlock + 1 - blocks
|
||||
|
||||
var (
|
||||
next = oldestBlock
|
||||
@ -259,7 +259,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast
|
||||
for i, p := range rewardPercentiles {
|
||||
binary.LittleEndian.PutUint64(percentileKey[i*8:(i+1)*8], math.Float64bits(p))
|
||||
}
|
||||
for i := 0; i < maxBlockFetchers && i < blocks; i++ {
|
||||
for i := 0; i < maxBlockFetchers && i < int(blocks); i++ {
|
||||
go func() {
|
||||
for {
|
||||
// Retrieve the next block number to fetch with this goroutine
|
||||
@ -314,7 +314,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast
|
||||
if fees.err != nil {
|
||||
return common.Big0, nil, nil, nil, fees.err
|
||||
}
|
||||
i := int(fees.blockNumber - oldestBlock)
|
||||
i := fees.blockNumber - oldestBlock
|
||||
if fees.results.baseFee != nil {
|
||||
reward[i], baseFee[i], baseFee[i+1], gasUsedRatio[i] = fees.results.reward, fees.results.baseFee, fees.results.nextBaseFee, fees.results.gasUsedRatio
|
||||
} else {
|
||||
|
@ -28,8 +28,8 @@ import (
|
||||
func TestFeeHistory(t *testing.T) {
|
||||
var cases = []struct {
|
||||
pending bool
|
||||
maxHeader, maxBlock int
|
||||
count int
|
||||
maxHeader, maxBlock uint64
|
||||
count uint64
|
||||
last rpc.BlockNumber
|
||||
percent []float64
|
||||
expFirst uint64
|
||||
|
@ -42,8 +42,8 @@ var (
|
||||
type Config struct {
|
||||
Blocks int
|
||||
Percentile int
|
||||
MaxHeaderHistory int
|
||||
MaxBlockHistory int
|
||||
MaxHeaderHistory uint64
|
||||
MaxBlockHistory uint64
|
||||
Default *big.Int `toml:",omitempty"`
|
||||
MaxPrice *big.Int `toml:",omitempty"`
|
||||
IgnorePrice *big.Int `toml:",omitempty"`
|
||||
@ -71,7 +71,7 @@ type Oracle struct {
|
||||
fetchLock sync.Mutex
|
||||
|
||||
checkBlocks, percentile int
|
||||
maxHeaderHistory, maxBlockHistory int
|
||||
maxHeaderHistory, maxBlockHistory uint64
|
||||
|
||||
historyCache *lru.Cache[cacheKey, processedFees]
|
||||
}
|
||||
|
@ -418,7 +418,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
acc, err := accTrie.TryGetAccountByHash(account)
|
||||
acc, err := accTrie.GetAccountByHash(account)
|
||||
if err != nil || acc == nil {
|
||||
return nil, nil
|
||||
}
|
||||
@ -510,7 +510,7 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s
|
||||
|
||||
case 1:
|
||||
// If we're only retrieving an account trie node, fetch it directly
|
||||
blob, resolved, err := accTrie.TryGetNode(pathset[0])
|
||||
blob, resolved, err := accTrie.GetNode(pathset[0])
|
||||
loads += resolved // always account database reads, even for failures
|
||||
if err != nil {
|
||||
break
|
||||
@ -524,7 +524,7 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s
|
||||
if snap == nil {
|
||||
// We don't have the requested state snapshotted yet (or it is stale),
|
||||
// but can look up the account via the trie instead.
|
||||
account, err := accTrie.TryGetAccountByHash(common.BytesToHash(pathset[0]))
|
||||
account, err := accTrie.GetAccountByHash(common.BytesToHash(pathset[0]))
|
||||
loads += 8 // We don't know the exact cost of lookup, this is an estimate
|
||||
if err != nil || account == nil {
|
||||
break
|
||||
@ -545,7 +545,7 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s
|
||||
break
|
||||
}
|
||||
for _, path := range pathset[1:] {
|
||||
blob, resolved, err := stTrie.TryGetNode(path)
|
||||
blob, resolved, err := stTrie.GetNode(path)
|
||||
loads += resolved // always account database reads, even for failures
|
||||
if err != nil {
|
||||
break
|
||||
|
@ -216,7 +216,7 @@ func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash,
|
||||
for _, pathset := range paths {
|
||||
switch len(pathset) {
|
||||
case 1:
|
||||
blob, _, err := t.accountTrie.TryGetNode(pathset[0])
|
||||
blob, _, err := t.accountTrie.GetNode(pathset[0])
|
||||
if err != nil {
|
||||
t.logger.Info("Error handling req", "error", err)
|
||||
break
|
||||
@ -225,7 +225,7 @@ func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash,
|
||||
default:
|
||||
account := t.storageTries[(common.BytesToHash(pathset[0]))]
|
||||
for _, path := range pathset[1:] {
|
||||
blob, _, err := account.TryGetNode(path)
|
||||
blob, _, err := account.GetNode(path)
|
||||
if err != nil {
|
||||
t.logger.Info("Error handling req", "error", err)
|
||||
break
|
||||
@ -1381,7 +1381,7 @@ func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) {
|
||||
})
|
||||
key := key32(i)
|
||||
elem := &kv{key, value}
|
||||
accTrie.Update(elem.k, elem.v)
|
||||
accTrie.MustUpdate(elem.k, elem.v)
|
||||
entries = append(entries, elem)
|
||||
}
|
||||
sort.Sort(entries)
|
||||
@ -1431,7 +1431,7 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) {
|
||||
CodeHash: getCodeHash(uint64(i)),
|
||||
})
|
||||
elem := &kv{boundaries[i].Bytes(), value}
|
||||
accTrie.Update(elem.k, elem.v)
|
||||
accTrie.MustUpdate(elem.k, elem.v)
|
||||
entries = append(entries, elem)
|
||||
}
|
||||
// Fill other accounts if required
|
||||
@ -1443,7 +1443,7 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) {
|
||||
CodeHash: getCodeHash(i),
|
||||
})
|
||||
elem := &kv{key32(i), value}
|
||||
accTrie.Update(elem.k, elem.v)
|
||||
accTrie.MustUpdate(elem.k, elem.v)
|
||||
entries = append(entries, elem)
|
||||
}
|
||||
sort.Sort(entries)
|
||||
@ -1487,7 +1487,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
|
||||
CodeHash: codehash,
|
||||
})
|
||||
elem := &kv{key, value}
|
||||
accTrie.Update(elem.k, elem.v)
|
||||
accTrie.MustUpdate(elem.k, elem.v)
|
||||
entries = append(entries, elem)
|
||||
|
||||
storageRoots[common.BytesToHash(key)] = stRoot
|
||||
@ -1551,7 +1551,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
|
||||
CodeHash: codehash,
|
||||
})
|
||||
elem := &kv{key, value}
|
||||
accTrie.Update(elem.k, elem.v)
|
||||
accTrie.MustUpdate(elem.k, elem.v)
|
||||
entries = append(entries, elem)
|
||||
|
||||
// we reuse the same one for all accounts
|
||||
@ -1599,7 +1599,7 @@ func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Databas
|
||||
key := crypto.Keccak256Hash(slotKey[:])
|
||||
|
||||
elem := &kv{key[:], rlpSlotValue}
|
||||
trie.Update(elem.k, elem.v)
|
||||
trie.MustUpdate(elem.k, elem.v)
|
||||
entries = append(entries, elem)
|
||||
}
|
||||
sort.Sort(entries)
|
||||
@ -1638,7 +1638,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
|
||||
val := []byte{0xde, 0xad, 0xbe, 0xef}
|
||||
|
||||
elem := &kv{key[:], val}
|
||||
trie.Update(elem.k, elem.v)
|
||||
trie.MustUpdate(elem.k, elem.v)
|
||||
entries = append(entries, elem)
|
||||
}
|
||||
// Fill other slots if required
|
||||
@ -1650,7 +1650,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
|
||||
rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
|
||||
|
||||
elem := &kv{key[:], rlpSlotValue}
|
||||
trie.Update(elem.k, elem.v)
|
||||
trie.MustUpdate(elem.k, elem.v)
|
||||
entries = append(entries, elem)
|
||||
}
|
||||
sort.Sort(entries)
|
||||
|
@ -805,7 +805,6 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
|
||||
// Swap out the noop logger to the standard tracer
|
||||
writer = bufio.NewWriter(dump)
|
||||
vmConf = vm.Config{
|
||||
Debug: true,
|
||||
Tracer: logger.NewJSONLogger(&logConfig, writer),
|
||||
EnablePreimageRecording: true,
|
||||
}
|
||||
@ -972,7 +971,7 @@ func (api *API) traceTx(ctx context.Context, message *core.Message, txctx *Conte
|
||||
}
|
||||
// end PluGeth injection
|
||||
}
|
||||
vmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Debug: true, Tracer: tracer, NoBaseFee: true})
|
||||
vmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer, NoBaseFee: true})
|
||||
|
||||
// Define a meaningful timeout of a single transaction trace
|
||||
if config.Timeout != nil {
|
||||
|
@ -835,8 +835,8 @@ func TestTraceChain(t *testing.T) {
|
||||
signer := types.HomesteadSigner{}
|
||||
|
||||
var (
|
||||
ref uint32 // total refs has made
|
||||
rel uint32 // total rels has made
|
||||
ref atomic.Uint32 // total refs has made
|
||||
rel atomic.Uint32 // total rels has made
|
||||
nonce uint64
|
||||
)
|
||||
backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
|
||||
@ -849,8 +849,8 @@ func TestTraceChain(t *testing.T) {
|
||||
nonce += 1
|
||||
}
|
||||
})
|
||||
backend.refHook = func() { atomic.AddUint32(&ref, 1) }
|
||||
backend.relHook = func() { atomic.AddUint32(&rel, 1) }
|
||||
backend.refHook = func() { ref.Add(1) }
|
||||
backend.relHook = func() { rel.Add(1) }
|
||||
api := NewAPI(backend)
|
||||
|
||||
single := `{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}`
|
||||
@ -863,7 +863,8 @@ func TestTraceChain(t *testing.T) {
|
||||
{10, 20, nil}, // the middle chain range, blocks [11, 20]
|
||||
}
|
||||
for _, c := range cases {
|
||||
ref, rel = 0, 0 // clean up the counters
|
||||
ref.Store(0)
|
||||
rel.Store(0)
|
||||
|
||||
from, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.start))
|
||||
to, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.end))
|
||||
@ -888,8 +889,9 @@ func TestTraceChain(t *testing.T) {
|
||||
if next != c.end+1 {
|
||||
t.Error("Missing tracing block")
|
||||
}
|
||||
if ref != rel {
|
||||
t.Errorf("Ref and deref actions are not equal, ref %d rel %d", ref, rel)
|
||||
|
||||
if nref, nrel := ref.Load(), rel.Load(); nref != nrel {
|
||||
t.Errorf("Ref and deref actions are not equal, ref %d rel %d", nref, nrel)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,7 +31,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -144,7 +143,7 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create call tracer: %v", err)
|
||||
}
|
||||
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
|
||||
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer})
|
||||
msg, err := core.TransactionToMessage(tx, signer, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to prepare transaction for tracing: %v", err)
|
||||
@ -247,7 +246,7 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
|
||||
if err != nil {
|
||||
b.Fatalf("failed to create call tracer: %v", err)
|
||||
}
|
||||
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
|
||||
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer})
|
||||
snap := statedb.Snapshot()
|
||||
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if _, err = st.TransitionDb(); err != nil {
|
||||
@ -260,30 +259,15 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestZeroValueToNotExitCall tests the calltracer(s) on the following:
|
||||
// Tx to A, A calls B with zero value. B does not already exist.
|
||||
// Expected: that enter/exit is invoked and the inner call is shown in the result
|
||||
func TestZeroValueToNotExitCall(t *testing.T) {
|
||||
var to = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
|
||||
privkey, err := crypto.HexToECDSA("0000000000000000deadbeef00000000000000000000000000000000deadbeef")
|
||||
if err != nil {
|
||||
t.Fatalf("err %v", err)
|
||||
}
|
||||
signer := types.NewEIP155Signer(big.NewInt(1))
|
||||
tx, err := types.SignNewTx(privkey, signer, &types.LegacyTx{
|
||||
GasPrice: big.NewInt(0),
|
||||
Gas: 50000,
|
||||
To: &to,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("err %v", err)
|
||||
}
|
||||
origin, _ := signer.Sender(tx)
|
||||
txContext := vm.TxContext{
|
||||
func TestInternals(t *testing.T) {
|
||||
var (
|
||||
to = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
|
||||
origin = common.HexToAddress("0x00000000000000000000000000000000feed")
|
||||
txContext = vm.TxContext{
|
||||
Origin: origin,
|
||||
GasPrice: big.NewInt(1),
|
||||
}
|
||||
context := vm.BlockContext{
|
||||
context = vm.BlockContext{
|
||||
CanTransfer: core.CanTransfer,
|
||||
Transfer: core.Transfer,
|
||||
Coinbase: common.Address{},
|
||||
@ -292,43 +276,104 @@ func TestZeroValueToNotExitCall(t *testing.T) {
|
||||
Difficulty: big.NewInt(0x30000),
|
||||
GasLimit: uint64(6000000),
|
||||
}
|
||||
var code = []byte{
|
||||
byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), // in and outs zero
|
||||
byte(vm.DUP1), byte(vm.PUSH1), 0xff, byte(vm.GAS), // value=0,address=0xff, gas=GAS
|
||||
byte(vm.CALL),
|
||||
}
|
||||
var alloc = core.GenesisAlloc{
|
||||
to: core.GenesisAccount{
|
||||
Nonce: 1,
|
||||
Code: code,
|
||||
},
|
||||
origin: core.GenesisAccount{
|
||||
Nonce: 0,
|
||||
Balance: big.NewInt(500000000000000),
|
||||
},
|
||||
}
|
||||
_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
|
||||
// Create the tracer, the EVM environment and run it
|
||||
tracer, err := tracers.DefaultDirectory.New("callTracer", nil, nil)
|
||||
)
|
||||
mkTracer := func(name string, cfg json.RawMessage) tracers.Tracer {
|
||||
tr, err := tracers.DefaultDirectory.New(name, nil, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create call tracer: %v", err)
|
||||
}
|
||||
evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer})
|
||||
msg, err := core.TransactionToMessage(tx, signer, nil)
|
||||
return tr
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
code []byte
|
||||
tracer tracers.Tracer
|
||||
want string
|
||||
}{
|
||||
{
|
||||
// TestZeroValueToNotExitCall tests the calltracer(s) on the following:
|
||||
// Tx to A, A calls B with zero value. B does not already exist.
|
||||
// Expected: that enter/exit is invoked and the inner call is shown in the result
|
||||
name: "ZeroValueToNotExitCall",
|
||||
code: []byte{
|
||||
byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), // in and outs zero
|
||||
byte(vm.DUP1), byte(vm.PUSH1), 0xff, byte(vm.GAS), // value=0,address=0xff, gas=GAS
|
||||
byte(vm.CALL),
|
||||
},
|
||||
tracer: mkTracer("callTracer", nil),
|
||||
want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0x6cbf","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`,
|
||||
},
|
||||
{
|
||||
name: "Stack depletion in LOG0",
|
||||
code: []byte{byte(vm.LOG3)},
|
||||
tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)),
|
||||
want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0xc350","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`,
|
||||
},
|
||||
{
|
||||
name: "Mem expansion in LOG0",
|
||||
code: []byte{
|
||||
byte(vm.PUSH1), 0x1,
|
||||
byte(vm.PUSH1), 0x0,
|
||||
byte(vm.MSTORE),
|
||||
byte(vm.PUSH1), 0xff,
|
||||
byte(vm.PUSH1), 0x0,
|
||||
byte(vm.LOG0),
|
||||
},
|
||||
tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)),
|
||||
want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}],"value":"0x0","type":"CALL"}`,
|
||||
},
|
||||
{
|
||||
// Leads to OOM on the prestate tracer
|
||||
name: "Prestate-tracer - mem expansion in CREATE2",
|
||||
code: []byte{
|
||||
byte(vm.PUSH1), 0x1,
|
||||
byte(vm.PUSH1), 0x0,
|
||||
byte(vm.MSTORE),
|
||||
byte(vm.PUSH1), 0x1,
|
||||
byte(vm.PUSH5), 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
byte(vm.PUSH1), 0x1,
|
||||
byte(vm.PUSH1), 0x0,
|
||||
byte(vm.CREATE2),
|
||||
byte(vm.PUSH1), 0xff,
|
||||
byte(vm.PUSH1), 0x0,
|
||||
byte(vm.LOG0),
|
||||
},
|
||||
tracer: mkTracer("prestateTracer", json.RawMessage(`{ "withLog": true }`)),
|
||||
want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52640350"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600164ffffffffff60016000f560ff6000a0"}}`,
|
||||
},
|
||||
} {
|
||||
_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(),
|
||||
core.GenesisAlloc{
|
||||
to: core.GenesisAccount{
|
||||
Code: tc.code,
|
||||
},
|
||||
origin: core.GenesisAccount{
|
||||
Balance: big.NewInt(500000000000000),
|
||||
},
|
||||
}, false)
|
||||
evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Tracer: tc.tracer})
|
||||
msg := &core.Message{
|
||||
To: &to,
|
||||
From: origin,
|
||||
Value: big.NewInt(0),
|
||||
GasLimit: 50000,
|
||||
GasPrice: big.NewInt(0),
|
||||
GasFeeCap: big.NewInt(0),
|
||||
GasTipCap: big.NewInt(0),
|
||||
SkipAccountChecks: false,
|
||||
}
|
||||
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(msg.GasLimit))
|
||||
if _, err := st.TransitionDb(); err != nil {
|
||||
t.Fatalf("test %v: failed to execute transaction: %v", tc.name, err)
|
||||
}
|
||||
// Retrieve the trace result and compare against the expected
|
||||
res, err := tc.tracer.GetResult()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to prepare transaction for tracing: %v", err)
|
||||
t.Fatalf("test %v: failed to retrieve trace result: %v", tc.name, err)
|
||||
}
|
||||
if string(res) != tc.want {
|
||||
t.Fatalf("test %v: trace mismatch\n have: %v\n want: %v\n", tc.name, string(res), tc.want)
|
||||
}
|
||||
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
|
||||
if _, err = st.TransitionDb(); err != nil {
|
||||
t.Fatalf("failed to execute transaction: %v", err)
|
||||
}
|
||||
// Retrieve the trace result and compare against the etalon
|
||||
res, err := tracer.GetResult()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve trace result: %v", err)
|
||||
}
|
||||
wantStr := `{"from":"0x682a80a6f560eec50d54e63cbeda1c324c5f8d1b","gas":"0x7148","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0x6cbf","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`
|
||||
if string(res) != wantStr {
|
||||
t.Fatalf("trace mismatch\n have: %v\n want: %v\n", string(res), wantStr)
|
||||
}
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create call tracer: %v", err)
|
||||
}
|
||||
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
|
||||
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer})
|
||||
|
||||
msg, err := core.TransactionToMessage(tx, signer, nil)
|
||||
if err != nil {
|
||||
@ -124,8 +124,8 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve trace result: %v", err)
|
||||
}
|
||||
ret := new([]flatCallTrace)
|
||||
if err := json.Unmarshal(res, ret); err != nil {
|
||||
ret := make([]flatCallTrace, 0)
|
||||
if err := json.Unmarshal(res, &ret); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal trace result: %v", err)
|
||||
}
|
||||
if !jsonEqualFlat(ret, test.Result) {
|
||||
|
@ -114,7 +114,7 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create call tracer: %v", err)
|
||||
}
|
||||
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
|
||||
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer})
|
||||
msg, err := core.TransactionToMessage(tx, signer, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to prepare transaction for tracing: %v", err)
|
||||
|
@ -47,7 +47,7 @@
|
||||
"input": "0xf907ef098504e3b29200830897be8080b9079c606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a1129a01060f46676a5dff6f407f0f51eb6f37f5c8c54e238c70221e18e65fc29d3ea65a0557b01c50ff4ffaac8ed6e5d31237a4ecbac843ab1bfe8bb0165a0060df7c54f",
|
||||
"result": {
|
||||
"from": "0x13e4acefe6a6700604929946e70e6443e4e73447",
|
||||
"gas": "0x5e106",
|
||||
"gas": "0x897be",
|
||||
"gasUsed": "0x897be",
|
||||
"input": "0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11",
|
||||
"output": "0x606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029",
|
||||
|
@ -399,7 +399,7 @@
|
||||
}
|
||||
],
|
||||
"from": "0x70c9217d814985faef62b124420f8dfbddd96433",
|
||||
"gas": "0x37b38",
|
||||
"gas": "0x3d090",
|
||||
"gasUsed": "0x1810b",
|
||||
"input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000",
|
||||
"to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b",
|
||||
|
@ -87,7 +87,7 @@
|
||||
}
|
||||
],
|
||||
"from": "0xa529806c67cc6486d4d62024471772f47f6fd672",
|
||||
"gas": "0x2d6e28",
|
||||
"gas": "0x2dc6c0",
|
||||
"gasUsed": "0xbd55",
|
||||
"input": "0x7065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e",
|
||||
"to": "0x269296dddce321a6bcbaa2f0181127593d732cba",
|
||||
|
@ -67,7 +67,7 @@
|
||||
],
|
||||
"error": "invalid jump destination",
|
||||
"from": "0xe4a13bc304682a903e9472f469c33801dd18d9e8",
|
||||
"gas": "0x435c8",
|
||||
"gas": "0x493e0",
|
||||
"gasUsed": "0x493e0",
|
||||
"input": "0x3b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8",
|
||||
"to": "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a",
|
||||
|
@ -54,7 +54,7 @@
|
||||
"from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31",
|
||||
"to": "0x6c06b16512b332e6cd8293a2974872674716ce18",
|
||||
"value": "0x0",
|
||||
"gas": "0x1a466",
|
||||
"gas": "0x1f97e",
|
||||
"gasUsed": "0x72de",
|
||||
"input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000"
|
||||
}
|
||||
|
@ -50,7 +50,7 @@
|
||||
"input": "0x02f9029d82053980849502f90085010c388d00832dc6c08080b90241608060405234801561001057600080fd5b50600060405161001f906100a2565b604051809103906000f08015801561003b573d6000803e3d6000fd5b5090508073ffffffffffffffffffffffffffffffffffffffff1663c04062266040518163ffffffff1660e01b815260040160006040518083038186803b15801561008457600080fd5b505afa158015610098573d6000803e3d6000fd5b50505050506100af565b610145806100fc83390190565b603f806100bd6000396000f3fe6080604052600080fdfea264697066735822122077f7dbd3450d6e817079cf3fe27107de5768bb3163a402b94e2206b468eb025664736f6c63430008070033608060405234801561001057600080fd5b50610125806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063c040622614602d575b600080fd5b60336035565b005b60036002116076576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401606d906097565b60405180910390fd5b565b6000608360128360b5565b9150608c8260c6565b602082019050919050565b6000602082019050818103600083015260ae816078565b9050919050565b600082825260208201905092915050565b7f546869732063616c6c6564206661696c6564000000000000000000000000000060008201525056fea264697066735822122033f8d92e29d467e5ea08d0024eab0b36b86b8cdb3542c6e89dbaabeb8ffaa42064736f6c63430008070033c001a07566181071cabaf58b70fc41557eb813bfc7a24f5c58554e7fed0bf7c031f169a0420af50b5fe791a4d839e181a676db5250b415dfb35cb85d544db7a1475ae2cc",
|
||||
"result": {
|
||||
"from": "0x3623191d4ccfbbdf09e8ebf6382a1f8257417bc1",
|
||||
"gas": "0x2cd774",
|
||||
"gas": "0x2dc6c0",
|
||||
"gasUsed": "0x25590",
|
||||
"input": "0x608060405234801561001057600080fd5b50600060405161001f906100a2565b604051809103906000f08015801561003b573d6000803e3d6000fd5b5090508073ffffffffffffffffffffffffffffffffffffffff1663c04062266040518163ffffffff1660e01b815260040160006040518083038186803b15801561008457600080fd5b505afa158015610098573d6000803e3d6000fd5b50505050506100af565b610145806100fc83390190565b603f806100bd6000396000f3fe6080604052600080fdfea264697066735822122077f7dbd3450d6e817079cf3fe27107de5768bb3163a402b94e2206b468eb025664736f6c63430008070033608060405234801561001057600080fd5b50610125806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063c040622614602d575b600080fd5b60336035565b005b60036002116076576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401606d906097565b60405180910390fd5b565b6000608360128360b5565b9150608c8260c6565b602082019050919050565b6000602082019050818103600083015260ae816078565b9050919050565b600082825260208201905092915050565b7f546869732063616c6c6564206661696c6564000000000000000000000000000060008201525056fea264697066735822122033f8d92e29d467e5ea08d0024eab0b36b86b8cdb3542c6e89dbaabeb8ffaa42064736f6c63430008070033",
|
||||
"output": "0x08c379a000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000012546869732063616c6c6564206661696c65640000000000000000000000000000",
|
||||
|
@ -71,7 +71,7 @@
|
||||
],
|
||||
"error": "execution reverted",
|
||||
"from": "0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826",
|
||||
"gas": "0x78d9e",
|
||||
"gas": "0x7dfa6",
|
||||
"gasUsed": "0x7c1c8",
|
||||
"input": "0x",
|
||||
"to": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76",
|
||||
|
@ -50,7 +50,7 @@
|
||||
"result": {
|
||||
"error": "out of gas",
|
||||
"from": "0x94194bc2aaf494501d7880b61274a169f6502a54",
|
||||
"gas": "0x7045",
|
||||
"gas": "0xca1d",
|
||||
"gasUsed": "0xca1d",
|
||||
"input": "0xa9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f9000",
|
||||
"to": "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62",
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user