diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 780f2a800..faf922df0 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -10,7 +10,7 @@ core/ @karalabe @holiman @rjl493456442
eth/ @karalabe @holiman @rjl493456442
eth/catalyst/ @gballet
eth/tracers/ @s1na
-graphql/ @gballet @s1na
+graphql/ @s1na
les/ @zsfelfoldi @rjl493456442
light/ @zsfelfoldi @rjl493456442
node/ @fjl
diff --git a/.gitignore b/.gitignore
index 1ee8b8302..e24e1d167 100644
--- a/.gitignore
+++ b/.gitignore
@@ -47,3 +47,4 @@ profile.cov
/dashboard/assets/package-lock.json
**/yarn-error.log
+logs/
\ No newline at end of file
diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go
index df3f52a40..b03f431f7 100644
--- a/accounts/abi/bind/base.go
+++ b/accounts/abi/bind/base.go
@@ -34,6 +34,11 @@ import (
const basefeeWiggleMultiplier = 2
+var (
+ errNoEventSignature = errors.New("no event signature")
+ errEventSignatureMismatch = errors.New("event signature mismatch")
+)
+
// SignerFn is a signer function callback when a contract requires a method to
// sign the transaction before submission.
type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error)
@@ -488,8 +493,12 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter
// UnpackLog unpacks a retrieved log into the provided output structure.
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
+ // Anonymous events are not supported.
+ if len(log.Topics) == 0 {
+ return errNoEventSignature
+ }
if log.Topics[0] != c.abi.Events[event].ID {
- return fmt.Errorf("event signature mismatch")
+ return errEventSignatureMismatch
}
if len(log.Data) > 0 {
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
@@ -507,8 +516,12 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log)
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error {
+ // Anonymous events are not supported.
+ if len(log.Topics) == 0 {
+ return errNoEventSignature
+ }
if log.Topics[0] != c.abi.Events[event].ID {
- return fmt.Errorf("event signature mismatch")
+ return errEventSignatureMismatch
}
if len(log.Data) > 0 {
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go
index 2307b9874..ca0128148 100644
--- a/accounts/abi/bind/base_test.go
+++ b/accounts/abi/bind/base_test.go
@@ -186,6 +186,23 @@ func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
unpackAndCheck(t, bc, expectedReceivedMap, mockLog)
}
+func TestUnpackAnonymousLogIntoMap(t *testing.T) {
+ mockLog := newMockLog(nil, common.HexToHash("0x0"))
+
+ abiString := `[{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"received","type":"event"}]`
+ parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
+ bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
+
+ var received map[string]interface{}
+ err := bc.UnpackLogIntoMap(received, "received", mockLog)
+ if err == nil {
+ t.Error("unpacking anonymous event is not supported")
+ }
+ if err.Error() != "no event signature" {
+ t.Errorf("expected error 'no event signature', got '%s'", err)
+ }
+}
+
func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
sliceBytes, err := rlp.EncodeToBytes([]string{"name1", "name2", "name3", "name4"})
if err != nil {
diff --git a/build/checksums.txt b/build/checksums.txt
index e349d3eba..9b038582a 100644
--- a/build/checksums.txt
+++ b/build/checksums.txt
@@ -1,19 +1,19 @@
# This file contains sha256 checksums of optional build dependencies.
-4d0e2850d197b4ddad3bdb0196300179d095bb3aefd4dfbc3b36702c3728f8ab go1.20.2.src.tar.gz
-c93b8ced9517d07e1cd4c362c6e2d5242cb139e29b417a328fbf19aded08764c go1.20.2.darwin-amd64.tar.gz
-7343c87f19e79c0063532e82e1c4d6f42175a32d99f7a4d15e658e88bf97f885 go1.20.2.darwin-arm64.tar.gz
-14f9be2004e042b3a64d0facb0c020756a9084a5c7333e33b0752b393b6016ea go1.20.2.freebsd-386.tar.gz
-b41b67b4f1b56797a7cecf6ee7f47fcf4f93960b2788a3683c07dd009d30b2a4 go1.20.2.freebsd-amd64.tar.gz
-ee240ed33ae57504c41f04c12236aeaa17fbeb6ea9fcd096cd9dc7a89d10d4db go1.20.2.linux-386.tar.gz
-4eaea32f59cde4dc635fbc42161031d13e1c780b87097f4b4234cfce671f1768 go1.20.2.linux-amd64.tar.gz
-78d632915bb75e9a6356a47a42625fd1a785c83a64a643fedd8f61e31b1b3bef go1.20.2.linux-arm64.tar.gz
-d79d56bafd6b52b8d8cbe3f8e967caaac5383a23d7a4fa9ac0e89778cd16a076 go1.20.2.linux-armv6l.tar.gz
-850564ddb760cb703db63bf20182dc4407abd2ff090a95fa66d6634d172fd095 go1.20.2.linux-ppc64le.tar.gz
-8da24c5c4205fe8115f594237e5db7bcb1d23df67bc1fa9a999954b1976896e8 go1.20.2.linux-s390x.tar.gz
-31838b291117495bbb93683603e98d5118bfabd2eb318b4d07540bfd524bab86 go1.20.2.windows-386.zip
-fe439f0e438f7555a7f5f7194ddb6f4a07b0de1fa414385d19f2aeb26d9f43db go1.20.2.windows-amd64.zip
-ac5010c8b8b22849228a8dea698d58b9c7be2195d30c6d778cce0f709858fa64 go1.20.2.windows-arm64.zip
+e447b498cde50215c4f7619e5124b0fc4e25fb5d16ea47271c47f278e7aa763a go1.20.3.src.tar.gz
+c1e1161d6d859deb576e6cfabeb40e3d042ceb1c6f444f617c3c9d76269c3565 go1.20.3.darwin-amd64.tar.gz
+86b0ed0f2b2df50fa8036eea875d1cf2d76cefdacf247c44639a1464b7e36b95 go1.20.3.darwin-arm64.tar.gz
+340e80abd047c597fdc0f50a6cc59617f06c297d62f7fc77f4a0164e2da6f7aa go1.20.3.freebsd-386.tar.gz
+2169fcd8b6c94c5fbe07c0b470ccfb6001d343f6548ad49f3d9ab78e3b5753c7 go1.20.3.freebsd-amd64.tar.gz
+e12384311403f1389d14cc1c1295bfb4e0dd5ab919403b80da429f671a223507 go1.20.3.linux-386.tar.gz
+979694c2c25c735755bf26f4f45e19e64e4811d661dd07b8c010f7a8e18adfca go1.20.3.linux-amd64.tar.gz
+eb186529f13f901e7a2c4438a05c2cd90d74706aaa0a888469b2a4a617b6ee54 go1.20.3.linux-arm64.tar.gz
+b421e90469a83671641f81b6e20df6500f033e9523e89cbe7b7223704dd1035c go1.20.3.linux-armv6l.tar.gz
+943c89aa1624ea544a022b31e3d6e16a037200e436370bdd5fd67f3fa60be282 go1.20.3.linux-ppc64le.tar.gz
+126cf823a5634ef2544b866db107b9d351d3ea70d9e240b0bdcfb46f4dcae54b go1.20.3.linux-s390x.tar.gz
+37e9146e1f9d681cfcaa6fee6c7b890c44c64bc50228c9588f3c4231346d33bd go1.20.3.windows-386.zip
+143a2837821c7dbacf7744cbb1a8421c1f48307c6fdfaeffc5f8c2f69e1b7932 go1.20.3.windows-amd64.zip
+158cb159e00bc979f473e0f5b5a561613129c5e51067967b72b8e072e5a4db81 go1.20.3.windows-arm64.zip
fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz
75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz
diff --git a/build/ci.go b/build/ci.go
index 49926621b..eab011752 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -139,7 +139,7 @@ var (
// This is the version of Go that will be downloaded by
//
// go run ci.go install -dlgo
- dlgoVersion = "1.20.2"
+ dlgoVersion = "1.20.3"
// This is the version of Go that will be used to bootstrap the PPA builder.
//
@@ -465,10 +465,6 @@ func maybeSkipArchive(env build.Environment) {
log.Printf("skipping archive creation because this is a PR build")
os.Exit(0)
}
- if env.IsCronJob {
- log.Printf("skipping archive creation because this is a cron job")
- os.Exit(0)
- }
if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") {
log.Printf("skipping archive creation because branch %q, tag %q is not on the inclusion list", env.Branch, env.Tag)
os.Exit(0)
diff --git a/cmd/devp2p/crawl.go b/cmd/devp2p/crawl.go
index 8c9755ac1..1b964164d 100644
--- a/cmd/devp2p/crawl.go
+++ b/cmd/devp2p/crawl.go
@@ -141,7 +141,7 @@ loop:
"added", atomic.LoadUint64(&added),
"updated", atomic.LoadUint64(&updated),
"removed", atomic.LoadUint64(&removed),
- "ignored(recent)", atomic.LoadUint64(&removed),
+ "ignored(recent)", atomic.LoadUint64(&recent),
"ignored(incompatible)", atomic.LoadUint64(&skipped))
}
}
diff --git a/cmd/devp2p/dns_cloudflare.go b/cmd/devp2p/dns_cloudflare.go
index bfe92257e..798fdc733 100644
--- a/cmd/devp2p/dns_cloudflare.go
+++ b/cmd/devp2p/dns_cloudflare.go
@@ -144,7 +144,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record)
} else if old.Content != val {
// Entry already exists, only change its content.
- log.Debug(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
+ log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
updated++
old.Content = val
err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old)
diff --git a/cmd/devp2p/dns_route53.go b/cmd/devp2p/dns_route53.go
index 81734eb2a..400ab1b1c 100644
--- a/cmd/devp2p/dns_route53.go
+++ b/cmd/devp2p/dns_route53.go
@@ -221,7 +221,13 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e
}
records = lrecords
- var changes []types.Change
+ var (
+ changes []types.Change
+ inserts int
+ upserts int
+ skips int
+ )
+
for path, newValue := range records {
prevRecords, exists := existing[path]
prevValue := strings.Join(prevRecords.values, "")
@@ -237,20 +243,30 @@ func (c *route53Client) computeChanges(name string, records map[string]string, e
if !exists {
// Entry is unknown, push a new one
- log.Info(fmt.Sprintf("Creating %s = %s", path, newValue))
+ log.Debug(fmt.Sprintf("Creating %s = %s", path, newValue))
changes = append(changes, newTXTChange("CREATE", path, ttl, newValue))
+ inserts++
} else if prevValue != newValue || prevRecords.ttl != ttl {
// Entry already exists, only change its content.
log.Info(fmt.Sprintf("Updating %s from %s to %s", path, prevValue, newValue))
changes = append(changes, newTXTChange("UPSERT", path, ttl, newValue))
+ upserts++
} else {
log.Debug(fmt.Sprintf("Skipping %s = %s", path, newValue))
+ skips++
}
}
// Iterate over the old records and delete anything stale.
- changes = append(changes, makeDeletionChanges(existing, records)...)
+ deletions := makeDeletionChanges(existing, records)
+ changes = append(changes, deletions...)
+ log.Info("Computed DNS changes",
+ "changes", len(changes),
+ "inserts", inserts,
+ "skips", skips,
+ "deleted", len(deletions),
+ "upserts", upserts)
// Ensure changes are in the correct order.
sortChanges(changes)
return changes
@@ -263,7 +279,7 @@ func makeDeletionChanges(records map[string]recordSet, keep map[string]string) [
if _, ok := keep[path]; ok {
continue
}
- log.Info(fmt.Sprintf("Deleting %s = %s", path, strings.Join(set.values, "")))
+ log.Debug(fmt.Sprintf("Deleting %s = %s", path, strings.Join(set.values, "")))
changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values...))
}
return changes
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index 5f796c1d6..156bce7ed 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -174,7 +174,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
return nil, nil, err
}
vmConfig.Tracer = tracer
- vmConfig.Debug = (tracer != nil)
statedb.SetTxContext(tx.Hash(), txIndex)
var (
diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go
index cb7466d86..0b87edd11 100644
--- a/cmd/evm/internal/t8ntool/transition.go
+++ b/cmd/evm/internal/t8ntool/transition.go
@@ -180,7 +180,6 @@ func Transition(ctx *cli.Context) error {
vmConfig := vm.Config{
Tracer: tracer,
- Debug: (tracer != nil),
}
// Construct the chainconfig
var chainConfig *params.ChainConfig
@@ -250,9 +249,9 @@ func Transition(ctx *cli.Context) error {
if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
if prestate.Env.BaseFee != nil {
// Already set, base fee has precedent over parent base fee.
- } else if prestate.Env.ParentBaseFee != nil {
+ } else if prestate.Env.ParentBaseFee != nil && prestate.Env.Number != 0 {
parent := &types.Header{
- Number: new(big.Int).SetUint64(prestate.Env.Number),
+ Number: new(big.Int).SetUint64(prestate.Env.Number - 1),
BaseFee: prestate.Env.ParentBaseFee,
GasUsed: prestate.Env.ParentGasUsed,
GasLimit: prestate.Env.ParentGasLimit,
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index 3a010da9f..18ec3330c 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -40,6 +40,7 @@ import (
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/urfave/cli/v2"
)
@@ -125,6 +126,7 @@ func runCmd(ctx *cli.Context) error {
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
genesisConfig *core.Genesis
+ preimages = ctx.Bool(DumpFlag.Name)
)
if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
@@ -139,10 +141,12 @@ func runCmd(ctx *cli.Context) error {
genesisConfig = gen
db := rawdb.NewMemoryDatabase()
genesis := gen.MustCommit(db)
- statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)
+ sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: preimages})
+ statedb, _ = state.New(genesis.Root(), sdb, nil)
chainConfig = gen.Config
} else {
- statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ sdb := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: preimages})
+ statedb, _ = state.New(common.Hash{}, sdb, nil)
genesisConfig = new(core.Genesis)
}
if ctx.String(SenderFlag.Name) != "" {
@@ -214,7 +218,6 @@ func runCmd(ctx *cli.Context) error {
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
EVMConfig: vm.Config{
Tracer: tracer,
- Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name),
},
}
diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index 5eba25c72..e9229eaec 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -92,7 +92,6 @@ func stateTestCmd(ctx *cli.Context) error {
// Iterate over all the tests, run them and aggregate the results
cfg := vm.Config{
Tracer: tracer,
- Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name),
}
results := make([]StatetestResult, 0, len(tests))
for key, test := range tests {
diff --git a/cmd/geth/accountcmd.go b/cmd/geth/accountcmd.go
index 5158b7606..a36da7d55 100644
--- a/cmd/geth/accountcmd.go
+++ b/cmd/geth/accountcmd.go
@@ -301,7 +301,11 @@ func accountUpdate(ctx *cli.Context) error {
utils.Fatalf("No accounts specified to update")
}
stack, _ := makeConfigNode(ctx)
- ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
+ backends := stack.AccountManager().Backends(keystore.KeyStoreType)
+ if len(backends) == 0 {
+ utils.Fatalf("Keystore is not available")
+ }
+ ks := backends[0].(*keystore.KeyStore)
for _, addr := range ctx.Args().Slice() {
account, oldPassword := unlockAccount(ks, addr, 0, nil)
@@ -326,7 +330,11 @@ func importWallet(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
passphrase := utils.GetPassPhraseWithList("", false, 0, utils.MakePasswordList(ctx))
- ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
+ backends := stack.AccountManager().Backends(keystore.KeyStoreType)
+ if len(backends) == 0 {
+ utils.Fatalf("Keystore is not available")
+ }
+ ks := backends[0].(*keystore.KeyStore)
acct, err := ks.ImportPreSaleKey(keyJSON, passphrase)
if err != nil {
utils.Fatalf("%v", err)
@@ -347,7 +355,11 @@ func accountImport(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
passphrase := utils.GetPassPhraseWithList("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
- ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
+ backends := stack.AccountManager().Backends(keystore.KeyStoreType)
+ if len(backends) == 0 {
+ utils.Fatalf("Keystore is not available")
+ }
+ ks := backends[0].(*keystore.KeyStore)
acct, err := ks.ImportECDSA(key, passphrase)
if err != nil {
utils.Fatalf("Could not create the account: %v", err)
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index f3a0c3d91..b750c55b2 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -480,7 +480,12 @@ func unlockAccounts(ctx *cli.Context, stack *node.Node) {
if !stack.Config().InsecureUnlockAllowed && stack.Config().ExtRPCEnabled() {
utils.Fatalf("Account unlock with HTTP access is forbidden!")
}
- ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
+ backends := stack.AccountManager().Backends(keystore.KeyStoreType)
+ if len(backends) == 0 {
+ log.Warn("Failed to unlock accounts, keystore is not available")
+ return
+ }
+ ks := backends[0].(*keystore.KeyStore)
passwords := utils.MakePasswordList(ctx)
for i, account := range unlocks {
unlockAccount(ks, account, i, passwords)
diff --git a/common/path.go b/common/path.go
index 69820cfe5..c1e382fd2 100644
--- a/common/path.go
+++ b/common/path.go
@@ -17,19 +17,10 @@
package common
import (
- "fmt"
"os"
"path/filepath"
- "runtime"
)
-// MakeName creates a node name that follows the ethereum convention
-// for such names. It adds the operation system name and Go runtime version
-// the name.
-func MakeName(name, version string) string {
- return fmt.Sprintf("%s/v%s/%s/%s", name, version, runtime.GOOS, runtime.Version())
-}
-
// FileExist checks if a file exists at filePath.
func FileExist(filePath string) bool {
_, err := os.Stat(filePath)
diff --git a/common/types.go b/common/types.go
index 218ca0be4..0b68a19dd 100644
--- a/common/types.go
+++ b/common/types.go
@@ -400,7 +400,7 @@ func (ma *MixedcaseAddress) UnmarshalJSON(input []byte) error {
}
// MarshalJSON marshals the original value
-func (ma *MixedcaseAddress) MarshalJSON() ([]byte, error) {
+func (ma MixedcaseAddress) MarshalJSON() ([]byte, error) {
if strings.HasPrefix(ma.original, "0x") || strings.HasPrefix(ma.original, "0X") {
return json.Marshal(fmt.Sprintf("0x%s", ma.original[2:]))
}
diff --git a/common/types_test.go b/common/types_test.go
index 94492278d..88c642522 100644
--- a/common/types_test.go
+++ b/common/types_test.go
@@ -154,6 +154,31 @@ func BenchmarkAddressHex(b *testing.B) {
}
}
+// Test checks if the customized json marshaller of MixedcaseAddress object
+// is invoked correctly. In golang the struct pointer will inherit the
+// non-pointer receiver methods, the reverse is not true. In the case of
+// MixedcaseAddress, it must define the MarshalJSON method in the object
+// but not the pointer level, so that this customized marshalled can be used
+// for both MixedcaseAddress object and pointer.
+func TestMixedcaseAddressMarshal(t *testing.T) {
+ var (
+ output string
+ input = "0xae967917c465db8578ca9024c205720b1a3651A9"
+ )
+ addr, err := NewMixedcaseAddressFromString(input)
+ if err != nil {
+ t.Fatal(err)
+ }
+ blob, err := json.Marshal(*addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ json.Unmarshal(blob, &output)
+ if output != input {
+ t.Fatal("Failed to marshal/unmarshal MixedcaseAddress object")
+ }
+}
+
func TestMixedcaseAccount_Address(t *testing.T) {
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md
// Note: 0X{checksum_addr} is not valid according to spec above
@@ -177,7 +202,7 @@ func TestMixedcaseAccount_Address(t *testing.T) {
}
}
- //These should throw exceptions:
+ // These should throw exceptions:
var r2 []MixedcaseAddress
for _, r := range []string{
`["0x11111111111111111111122222222222233333"]`, // Too short
diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go
index b4da9b553..1129ac06c 100644
--- a/consensus/beacon/consensus.go
+++ b/consensus/beacon/consensus.go
@@ -263,11 +263,19 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
// Verify existence / non-existence of withdrawalsHash.
shanghai := chain.Config().IsShanghai(header.Time)
if shanghai && header.WithdrawalsHash == nil {
- return fmt.Errorf("missing withdrawalsHash")
+ return errors.New("missing withdrawalsHash")
}
if !shanghai && header.WithdrawalsHash != nil {
return fmt.Errorf("invalid withdrawalsHash: have %x, expected nil", header.WithdrawalsHash)
}
+ // Verify the existence / non-existence of excessDataGas
+ cancun := chain.Config().IsCancun(header.Time)
+ if cancun && header.ExcessDataGas == nil {
+ return errors.New("missing excessDataGas")
+ }
+ if !cancun && header.ExcessDataGas != nil {
+ return fmt.Errorf("invalid excessDataGas: have %d, expected nil", header.ExcessDataGas)
+ }
return nil
}
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index 6c20803b2..2345a5ca0 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -301,9 +301,8 @@ func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
if chain.Config().IsShanghai(header.Time) {
return fmt.Errorf("clique does not support shanghai fork")
}
- // If all checks passed, validate any special fields for hard forks
- if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil {
- return err
+ if chain.Config().IsCancun(header.Time) {
+ return fmt.Errorf("clique does not support cancun fork")
}
// All basic checks passed, verify cascading fields
return c.verifyCascadingFields(chain, header, parents)
diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go
index d53918382..a2a6081f5 100644
--- a/consensus/ethash/algorithm.go
+++ b/consensus/ethash/algorithm.go
@@ -163,7 +163,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
rows := int(size) / hashBytes
// Start a monitoring goroutine to report progress on low end devices
- var progress uint32
+ var progress atomic.Uint32
done := make(chan struct{})
defer close(done)
@@ -174,7 +174,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
case <-done:
return
case <-time.After(3 * time.Second):
- logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start)))
+ logger.Info("Generating ethash verification cache", "percentage", progress.Load()*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start)))
}
}
}()
@@ -185,7 +185,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
keccak512(cache, seed)
for offset := uint64(hashBytes); offset < size; offset += hashBytes {
keccak512(cache[offset:], cache[offset-hashBytes:offset])
- atomic.AddUint32(&progress, 1)
+ progress.Add(1)
}
// Use a low-round version of randmemohash
temp := make([]byte, hashBytes)
@@ -200,7 +200,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
bitutil.XORBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
keccak512(cache[dstOff:], temp)
- atomic.AddUint32(&progress, 1)
+ progress.Add(1)
}
}
// Swap the byte order on big endian systems and return
@@ -299,7 +299,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
var pend sync.WaitGroup
pend.Add(threads)
- var progress uint64
+ var progress atomic.Uint64
for i := 0; i < threads; i++ {
go func(id int) {
defer pend.Done()
@@ -323,7 +323,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
}
copy(dataset[index*hashBytes:], item)
- if status := atomic.AddUint64(&progress, 1); status%percent == 0 {
+ if status := progress.Add(1); status%percent == 0 {
logger.Info("Generating DAG in progress", "percentage", (status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
}
}
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index c3c06c541..e3e0f28b3 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -313,6 +313,9 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa
if chain.Config().IsShanghai(header.Time) {
return fmt.Errorf("ethash does not support shanghai fork")
}
+ if chain.Config().IsCancun(header.Time) {
+ return fmt.Errorf("ethash does not support cancun fork")
+ }
// Verify the engine specific seal securing the block
if seal {
if err := ethash.verifySeal(chain, header, false); err != nil {
@@ -323,9 +326,6 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa
if err := misc.VerifyDAOHeaderExtraData(chain.Config(), header); err != nil {
return err
}
- if err := misc.VerifyForkHashes(chain.Config(), header, uncle); err != nil {
- return err
- }
return nil
}
diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go
index 6cb312482..462f10956 100644
--- a/consensus/ethash/ethash.go
+++ b/consensus/ethash/ethash.go
@@ -308,12 +308,12 @@ func (c *cache) finalizer() {
// dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
type dataset struct {
- epoch uint64 // Epoch for which this cache is relevant
- dump *os.File // File descriptor of the memory mapped cache
- mmap mmap.MMap // Memory map itself to unmap before releasing
- dataset []uint32 // The actual cache data content
- once sync.Once // Ensures the cache is generated only once
- done uint32 // Atomic flag to determine generation status
+ epoch uint64 // Epoch for which this cache is relevant
+ dump *os.File // File descriptor of the memory mapped cache
+ mmap mmap.MMap // Memory map itself to unmap before releasing
+ dataset []uint32 // The actual cache data content
+ once sync.Once // Ensures the cache is generated only once
+ done atomic.Bool // Atomic flag to determine generation status
}
// newDataset creates a new ethash mining dataset and returns it as a plain Go
@@ -326,7 +326,7 @@ func newDataset(epoch uint64) *dataset {
func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
d.once.Do(func() {
// Mark the dataset generated after we're done. This is needed for remote
- defer atomic.StoreUint32(&d.done, 1)
+ defer d.done.Store(true)
csize := cacheSize(d.epoch*epochLength + 1)
dsize := datasetSize(d.epoch*epochLength + 1)
@@ -390,7 +390,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
// or not (it may not have been started at all). This is useful for remote miners
// to default to verification caches instead of blocking on DAG generations.
func (d *dataset) generated() bool {
- return atomic.LoadUint32(&d.done) == 1
+ return d.done.Load()
}
// finalizer closes any file handlers and memory maps open.
diff --git a/consensus/misc/eip1559_test.go b/consensus/misc/eip1559_test.go
index 23cd9023d..1a9f96bc4 100644
--- a/consensus/misc/eip1559_test.go
+++ b/consensus/misc/eip1559_test.go
@@ -34,7 +34,6 @@ func copyConfig(original *params.ChainConfig) *params.ChainConfig {
DAOForkBlock: original.DAOForkBlock,
DAOForkSupport: original.DAOForkSupport,
EIP150Block: original.EIP150Block,
- EIP150Hash: original.EIP150Hash,
EIP155Block: original.EIP155Block,
EIP158Block: original.EIP158Block,
ByzantiumBlock: original.ByzantiumBlock,
diff --git a/consensus/misc/eip4844.go b/consensus/misc/eip4844.go
new file mode 100644
index 000000000..66ca9bd26
--- /dev/null
+++ b/consensus/misc/eip4844.go
@@ -0,0 +1,54 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package misc
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/params"
+)
+
+var (
+ minDataGasPrice = big.NewInt(params.BlobTxMinDataGasprice)
+ dataGaspriceUpdateFraction = big.NewInt(params.BlobTxDataGaspriceUpdateFraction)
+)
+
+// CalcBlobFee calculates the blobfee from the header's excess data gas field.
+func CalcBlobFee(excessDataGas *big.Int) *big.Int {
+ // If this block does not yet have EIP-4844 enabled, return the starting fee
+ if excessDataGas == nil {
+ return big.NewInt(params.BlobTxMinDataGasprice)
+ }
+ return fakeExponential(minDataGasPrice, excessDataGas, dataGaspriceUpdateFraction)
+}
+
+// fakeExponential approximates factor * e ** (numerator / denominator) using
+// Taylor expansion.
+func fakeExponential(factor, numerator, denominator *big.Int) *big.Int {
+ var (
+ output = new(big.Int)
+ accum = new(big.Int).Mul(factor, denominator)
+ )
+ for i := 1; accum.Sign() > 0; i++ {
+ output.Add(output, accum)
+
+ accum.Mul(accum, numerator)
+ accum.Div(accum, denominator)
+ accum.Div(accum, big.NewInt(int64(i)))
+ }
+ return output.Div(output, denominator)
+}
diff --git a/consensus/misc/eip4844_test.go b/consensus/misc/eip4844_test.go
new file mode 100644
index 000000000..5838cab8e
--- /dev/null
+++ b/consensus/misc/eip4844_test.go
@@ -0,0 +1,85 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package misc
+
+import (
+ "fmt"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/params"
+)
+
+func TestCalcBlobFee(t *testing.T) {
+ tests := []struct {
+ excessDataGas int64
+ blobfee int64
+ }{
+ {0, 1},
+ {1542706, 1},
+ {1542707, 2},
+ {10 * 1024 * 1024, 111},
+ }
+ have := CalcBlobFee(nil)
+ if have.Int64() != params.BlobTxMinDataGasprice {
+ t.Errorf("nil test: blobfee mismatch: have %v, want %v", have, params.BlobTxMinDataGasprice)
+ }
+ for i, tt := range tests {
+ have := CalcBlobFee(big.NewInt(tt.excessDataGas))
+ if have.Int64() != tt.blobfee {
+ t.Errorf("test %d: blobfee mismatch: have %v want %v", i, have, tt.blobfee)
+ }
+ }
+}
+
+func TestFakeExponential(t *testing.T) {
+ tests := []struct {
+ factor int64
+ numerator int64
+ denominator int64
+ want int64
+ }{
+ // When numerator == 0 the return value should always equal the value of factor
+ {1, 0, 1, 1},
+ {38493, 0, 1000, 38493},
+ {0, 1234, 2345, 0}, // should be 0
+ {1, 2, 1, 6}, // approximate 7.389
+ {1, 4, 2, 6},
+ {1, 3, 1, 16}, // approximate 20.09
+ {1, 6, 2, 18},
+ {1, 4, 1, 49}, // approximate 54.60
+ {1, 8, 2, 50},
+ {10, 8, 2, 542}, // approximate 540.598
+ {11, 8, 2, 596}, // approximate 600.58
+ {1, 5, 1, 136}, // approximate 148.4
+ {1, 5, 2, 11}, // approximate 12.18
+ {2, 5, 2, 23}, // approximate 24.36
+ {1, 50000000, 2225652, 5709098764},
+ }
+ for i, tt := range tests {
+ f, n, d := big.NewInt(tt.factor), big.NewInt(tt.numerator), big.NewInt(tt.denominator)
+ original := fmt.Sprintf("%d %d %d", f, n, d)
+ have := fakeExponential(f, n, d)
+ if have.Int64() != tt.want {
+ t.Errorf("test %d: fake exponential mismatch: have %v want %v", i, have, tt.want)
+ }
+ later := fmt.Sprintf("%d %d %d", f, n, d)
+ if original != later {
+ t.Errorf("test %d: fake exponential modified arguments: have\n%v\nwant\n%v", i, later, original)
+ }
+ }
+}
diff --git a/consensus/misc/forks.go b/consensus/misc/forks.go
deleted file mode 100644
index a6f3303ea..000000000
--- a/consensus/misc/forks.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2017 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package misc
-
-import (
- "fmt"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/params"
-)
-
-// VerifyForkHashes verifies that blocks conforming to network hard-forks do have
-// the correct hashes, to avoid clients going off on different chains. This is an
-// optional feature.
-func VerifyForkHashes(config *params.ChainConfig, header *types.Header, uncle bool) error {
- // We don't care about uncles
- if uncle {
- return nil
- }
- // If the homestead reprice hash is set, validate it
- if config.EIP150Block != nil && config.EIP150Block.Cmp(header.Number) == 0 {
- if config.EIP150Hash != (common.Hash{}) && config.EIP150Hash != header.Hash() {
- return fmt.Errorf("homestead gas reprice fork: have %#x, want %#x", header.Hash(), config.EIP150Hash)
- }
- }
- // All ok, return
- return nil
-}
diff --git a/core/blockchain.go b/core/blockchain.go
index b31f915bc..aa723598b 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -174,7 +174,7 @@ type BlockChain struct {
triegc *prque.Prque[int64, common.Hash] // Priority queue mapping block numbers to tries to gc
gcproc time.Duration // Accumulates canonical block processing for trie dumping
lastWrite uint64 // Last block when the state was flushed
- flushInterval int64 // Time interval (processing time) after which to flush a state
+ flushInterval atomic.Int64 // Time interval (processing time) after which to flush a state
triedb *trie.Database // The database handler for maintaining trie nodes.
stateCache state.Database // State database to reuse between imports (contains state cache)
@@ -215,8 +215,8 @@ type BlockChain struct {
wg sync.WaitGroup //
quit chan struct{} // shutdown signal, closed in Stop.
- running int32 // 0 if chain is running, 1 when stopped
- procInterrupt int32 // interrupt signaler for block processing
+ stopping atomic.Bool // false if chain is running, true when stopped
+ procInterrupt atomic.Bool // interrupt signaler for block processing
engine consensus.Engine
validator Validator // Block and state validator interface
@@ -233,7 +233,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
if cacheConfig == nil {
cacheConfig = defaultCacheConfig
}
-
// Open trie database with provided config
triedb := trie.NewDatabaseWithConfig(db, &trie.Config{
Cache: cacheConfig.TrieCleanLimit,
@@ -260,7 +259,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
cacheConfig: cacheConfig,
db: db,
triedb: triedb,
- flushInterval: int64(cacheConfig.TrieTimeLimit),
triegc: prque.New[int64, common.Hash](nil),
quit: make(chan struct{}),
chainmu: syncx.NewClosableMutex(),
@@ -273,6 +271,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
engine: engine,
vmConfig: vmConfig,
}
+ bc.flushInterval.Store(int64(cacheConfig.TrieTimeLimit))
bc.forker = NewForkChoice(bc, shouldPreserve)
bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb)
bc.validator = NewBlockValidator(chainConfig, bc, engine)
@@ -909,14 +908,14 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) {
headBlockGauge.Update(int64(block.NumberU64()))
}
-// stop stops the blockchain service. If any imports are currently in progress
+// stopWithoutSaving stops the blockchain service. If any imports are currently in progress
// it will abort them using the procInterrupt. This method stops all running
// goroutines, but does not do all the post-stop work of persisting data.
// OBS! It is generally recommended to use the Stop method!
// This method has been exposed to allow tests to stop the blockchain while simulating
// a crash.
func (bc *BlockChain) stopWithoutSaving() {
- if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) {
+ if !bc.stopping.CompareAndSwap(false, true) {
return
}
@@ -998,12 +997,12 @@ func (bc *BlockChain) Stop() {
// errInsertionInterrupted as soon as possible. Insertion is permanently disabled after
// calling this method.
func (bc *BlockChain) StopInsert() {
- atomic.StoreInt32(&bc.procInterrupt, 1)
+ bc.procInterrupt.Store(true)
}
// insertStopped returns true after StopInsert has been called.
func (bc *BlockChain) insertStopped() bool {
- return atomic.LoadInt32(&bc.procInterrupt) == 1
+ return bc.procInterrupt.Load()
}
func (bc *BlockChain) procFutureBlocks() {
@@ -1382,7 +1381,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
}
// Find the next state trie we need to commit
chosen := current - TriesInMemory
- flushInterval := time.Duration(atomic.LoadInt64(&bc.flushInterval))
+ flushInterval := time.Duration(bc.flushInterval.Load())
// If we exceeded time allowance, flush an entire trie to disk
// begin PluGeth code injection
@@ -1752,68 +1751,69 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
// If we have a followup block, run that against the current state to pre-cache
// transactions and probabilistically some of the account/storage trie nodes.
- var followupInterrupt uint32
+ var followupInterrupt atomic.Bool
if !bc.cacheConfig.TrieCleanNoPrefetch {
if followup, err := it.peek(); followup != nil && err == nil {
throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps)
- go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) {
+ go func(start time.Time, followup *types.Block, throwaway *state.StateDB) {
bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
blockPrefetchExecuteTimer.Update(time.Since(start))
- if atomic.LoadUint32(interrupt) == 1 {
+ if followupInterrupt.Load() {
blockPrefetchInterruptMeter.Mark(1)
}
- }(time.Now(), followup, throwaway, &followupInterrupt)
+ }(time.Now(), followup, throwaway)
}
}
// Process block using the parent state as reference point
- substart := time.Now()
+ pstart := time.Now()
receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
if err != nil {
bc.reportBlock(block, receipts, err)
- atomic.StoreUint32(&followupInterrupt, 1)
+ followupInterrupt.Store(true)
return it.index, err
}
+ ptime := time.Since(pstart)
- // Update the metrics touched during block processing
- accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them
- storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them
- accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them
- storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
- snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them
- snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them
- triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
- trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates
- trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates
-
- blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
-
- // Validate the state using the default validator
- substart = time.Now()
+ vstart := time.Now()
if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
bc.reportBlock(block, receipts, err)
- atomic.StoreUint32(&followupInterrupt, 1)
+ followupInterrupt.Store(true)
return it.index, err
}
- proctime := time.Since(start)
+ vtime := time.Since(vstart)
+ proctime := time.Since(start) // processing + validation
- // Update the metrics touched during block validation
- accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
- storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
- blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
+ // Update the metrics touched during block processing and validation
+ accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
+ storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
+ snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete(in processing)
+ snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete(in processing)
+ accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
+ storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
+ accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
+ storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete(in validation)
+ triehash := statedb.AccountHashes + statedb.StorageHashes // The time spent on tries hashing
+ trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
+ trieRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read
+ trieRead += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read
+ blockExecutionTimer.Update(ptime - trieRead) // The time spent on EVM processing
+ blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
// Write the block to the chain and get the status.
- substart = time.Now()
- var status WriteStatus
+ var (
+ wstart = time.Now()
+ status WriteStatus
+ )
if !setHead {
// Don't set the head, only insert the block
err = bc.writeBlockWithState(block, receipts, statedb)
} else {
status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false)
}
- atomic.StoreUint32(&followupInterrupt, 1)
+ followupInterrupt.Store(true)
if err != nil {
return it.index, err
}
@@ -1821,9 +1821,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
- triedbCommitTimer.Update(statedb.TrieDBCommits) // Triedb commits are complete, we can mark them
+ triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them
- blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits)
+ blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits)
blockInsertTimer.UpdateSince(start)
// Report the import stats before returning the various results
@@ -2524,5 +2524,5 @@ func (bc *BlockChain) SetBlockValidatorAndProcessorForTesting(v Validator, p Pro
// The interval is in terms of block processing time, not wall clock.
// It is thread-safe and can be called repeatedly without side effects.
func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
- atomic.StoreInt64(&bc.flushInterval, int64(interval))
+ bc.flushInterval.Store(int64(interval))
}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 0f9ef6a19..01ad157cc 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -3024,7 +3024,6 @@ func TestDeleteRecreateSlots(t *testing.T) {
})
// Import the canonical chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
- Debug: true,
Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
if err != nil {
@@ -3102,7 +3101,6 @@ func TestDeleteRecreateAccount(t *testing.T) {
})
// Import the canonical chain
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
- Debug: true,
Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
if err != nil {
diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go
index 0d2f6f950..d8f932041 100644
--- a/core/bloombits/matcher.go
+++ b/core/bloombits/matcher.go
@@ -83,7 +83,7 @@ type Matcher struct {
retrievals chan chan *Retrieval // Retriever processes waiting for task allocations
deliveries chan *Retrieval // Retriever processes waiting for task response deliveries
- running uint32 // Atomic flag whether a session is live or not
+ running atomic.Bool // Atomic flag whether a session is live or not
}
// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing
@@ -146,10 +146,10 @@ func (m *Matcher) addScheduler(idx uint) {
// channel is closed.
func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uint64) (*MatcherSession, error) {
// Make sure we're not creating concurrent sessions
- if atomic.SwapUint32(&m.running, 1) == 1 {
+ if m.running.Swap(true) {
return nil, errors.New("matcher already running")
}
- defer atomic.StoreUint32(&m.running, 0)
+ defer m.running.Store(false)
// Initiate a new matching round
session := &MatcherSession{
diff --git a/core/bloombits/matcher_test.go b/core/bloombits/matcher_test.go
index 93d4632b8..36764c3f1 100644
--- a/core/bloombits/matcher_test.go
+++ b/core/bloombits/matcher_test.go
@@ -160,7 +160,7 @@ func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, in
}
}
// Track the number of retrieval requests made
- var requested uint32
+ var requested atomic.Uint32
// Start the matching session for the filter and the retriever goroutines
quit := make(chan struct{})
@@ -208,15 +208,15 @@ func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, in
session.Close()
close(quit)
- if retrievals != 0 && requested != retrievals {
- t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested, retrievals)
+ if retrievals != 0 && requested.Load() != retrievals {
+ t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested.Load(), retrievals)
}
- return requested
+ return requested.Load()
}
// startRetrievers starts a batch of goroutines listening for section requests
// and serving them.
-func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *uint32, batch int) {
+func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *atomic.Uint32, batch int) {
requests := make(chan chan *Retrieval)
for i := 0; i < 10; i++ {
@@ -238,7 +238,7 @@ func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *ui
for i, section := range task.Sections {
if rand.Int()%4 != 0 { // Handle occasional missing deliveries
task.Bitsets[i] = generateBitset(task.Bit, section)
- atomic.AddUint32(retrievals, 1)
+ retrievals.Add(1)
}
}
request <- task
diff --git a/core/bloombits/scheduler_test.go b/core/bloombits/scheduler_test.go
index 49e113c11..dcaaa9152 100644
--- a/core/bloombits/scheduler_test.go
+++ b/core/bloombits/scheduler_test.go
@@ -45,13 +45,13 @@ func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
fetch := make(chan *request, 16)
defer close(fetch)
- var delivered uint32
+ var delivered atomic.Uint32
for i := 0; i < fetchers; i++ {
go func() {
defer fetchPend.Done()
for req := range fetch {
- atomic.AddUint32(&delivered, 1)
+ delivered.Add(1)
f.deliver([]uint64{
req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds)
@@ -97,7 +97,7 @@ func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
}
pend.Wait()
- if have := atomic.LoadUint32(&delivered); int(have) != requests {
+ if have := delivered.Load(); int(have) != requests {
t.Errorf("request count mismatch: have %v, want %v", have, requests)
}
}
diff --git a/core/chain_indexer.go b/core/chain_indexer.go
index 95901a0ea..23ab23ef0 100644
--- a/core/chain_indexer.go
+++ b/core/chain_indexer.go
@@ -75,7 +75,7 @@ type ChainIndexer struct {
backend ChainIndexerBackend // Background processor generating the index data content
children []*ChainIndexer // Child indexers to cascade chain updates to
- active uint32 // Flag whether the event loop was started
+ active atomic.Bool // Flag whether the event loop was started
update chan struct{} // Notification channel that headers should be processed
quit chan chan error // Quit channel to tear down running goroutines
ctx context.Context
@@ -166,7 +166,7 @@ func (c *ChainIndexer) Close() error {
errs = append(errs, err)
}
// If needed, tear down the secondary event loop
- if atomic.LoadUint32(&c.active) != 0 {
+ if c.active.Load() {
c.quit <- errc
if err := <-errc; err != nil {
errs = append(errs, err)
@@ -196,7 +196,7 @@ func (c *ChainIndexer) Close() error {
// queue.
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainHeadEvent, sub event.Subscription) {
// Mark the chain indexer as active, requiring an additional teardown
- atomic.StoreUint32(&c.active, 1)
+ c.active.Store(true)
defer sub.Unsubscribe()
diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go
index 4734e986e..c5447b16d 100644
--- a/core/rawdb/accessors_indexes_test.go
+++ b/core/rawdb/accessors_indexes_test.go
@@ -45,9 +45,10 @@ func (h *testHasher) Reset() {
h.hasher.Reset()
}
-func (h *testHasher) Update(key, val []byte) {
+func (h *testHasher) Update(key, val []byte) error {
h.hasher.Write(key)
h.hasher.Write(val)
+ return nil
}
func (h *testHasher) Hash() common.Hash {
diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go
index 167afc388..22dbda4a2 100644
--- a/core/rawdb/chain_freezer.go
+++ b/core/rawdb/chain_freezer.go
@@ -43,10 +43,7 @@ const (
// The background thread will keep moving ancient chain segments from key-value
// database to flat files for saving space on live database.
type chainFreezer struct {
- // WARNING: The `threshold` field is accessed atomically. On 32 bit platforms, only
- // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
- // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
- threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
+ threshold atomic.Uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
*Freezer
quit chan struct{}
@@ -60,12 +57,13 @@ func newChainFreezer(datadir string, namespace string, readonly bool) (*chainFre
if err != nil {
return nil, err
}
- return &chainFreezer{
- Freezer: freezer,
- threshold: params.FullImmutabilityThreshold,
- quit: make(chan struct{}),
- trigger: make(chan chan struct{}),
- }, nil
+ cf := chainFreezer{
+ Freezer: freezer,
+ quit: make(chan struct{}),
+ trigger: make(chan chan struct{}),
+ }
+ cf.threshold.Store(params.FullImmutabilityThreshold)
+ return &cf, nil
}
// Close closes the chain freezer instance and terminates the background thread.
@@ -124,8 +122,8 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
continue
}
number := ReadHeaderNumber(nfdb, hash)
- threshold := atomic.LoadUint64(&f.threshold)
- frozen := atomic.LoadUint64(&f.frozen)
+ threshold := f.threshold.Load()
+ frozen := f.frozen.Load()
switch {
case number == nil:
log.Error("Current full block number unavailable", "hash", hash)
@@ -186,7 +184,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
// Wipe out side chains also and track dangling side chains
var dangling []common.Hash
- frozen = atomic.LoadUint64(&f.frozen) // Needs reload after during freezeRange
+ frozen = f.frozen.Load() // Needs reload after during freezeRange
for number := first; number < frozen; number++ {
// Always keep the genesis block in active database
if number != 0 {
diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go
index 102943516..56bb15b71 100644
--- a/core/rawdb/chain_iterator.go
+++ b/core/rawdb/chain_iterator.go
@@ -132,11 +132,12 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool
}
}
// process runs in parallel
- nThreadsAlive := int32(threads)
+ var nThreadsAlive atomic.Int32
+ nThreadsAlive.Store(int32(threads))
process := func() {
defer func() {
// Last processor closes the result channel
- if atomic.AddInt32(&nThreadsAlive, -1) == 0 {
+ if nThreadsAlive.Add(-1) == 0 {
close(hashesCh)
}
}()
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 6c545032f..b8cc36a81 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -24,7 +24,6 @@ import (
"path"
"path/filepath"
"strings"
- "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -72,9 +71,9 @@ func (frdb *freezerdb) Freeze(threshold uint64) error {
}
// Set the freezer threshold to a temporary value
defer func(old uint64) {
- atomic.StoreUint64(&frdb.AncientStore.(*chainFreezer).threshold, old)
- }(atomic.LoadUint64(&frdb.AncientStore.(*chainFreezer).threshold))
- atomic.StoreUint64(&frdb.AncientStore.(*chainFreezer).threshold, threshold)
+ frdb.AncientStore.(*chainFreezer).threshold.Store(old)
+ }(frdb.AncientStore.(*chainFreezer).threshold.Load())
+ frdb.AncientStore.(*chainFreezer).threshold.Store(threshold)
// Trigger a freeze cycle and block until it's done
trigger := make(chan struct{}, 1)
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index d7b82a135..f5c943d32 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -62,11 +62,8 @@ const freezerTableSize = 2 * 1000 * 1000 * 1000
// reserving it for go-ethereum. This would also reduce the memory requirements
// of Geth, and thus also GC overhead.
type Freezer struct {
- // WARNING: The `frozen` and `tail` fields are accessed atomically. On 32 bit platforms, only
- // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
- // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
- frozen uint64 // Number of blocks already frozen
- tail uint64 // Number of the first stored item in the freezer
+ frozen atomic.Uint64 // Number of blocks already frozen
+ tail atomic.Uint64 // Number of the first stored item in the freezer
// This lock synchronizes writers and the truncate operation, as well as
// the "atomic" (batched) read operations.
@@ -212,12 +209,12 @@ func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]
// Ancients returns the length of the frozen items.
func (f *Freezer) Ancients() (uint64, error) {
- return atomic.LoadUint64(&f.frozen), nil
+ return f.frozen.Load(), nil
}
// Tail returns the number of first stored item in the freezer.
func (f *Freezer) Tail() (uint64, error) {
- return atomic.LoadUint64(&f.tail), nil
+ return f.tail.Load(), nil
}
// AncientSize returns the ancient size of the specified category.
@@ -251,7 +248,7 @@ func (f *Freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
defer f.writeLock.Unlock()
// Roll back all tables to the starting position in case of error.
- prevItem := atomic.LoadUint64(&f.frozen)
+ prevItem := f.frozen.Load()
defer func() {
if err != nil {
// The write operation has failed. Go back to the previous item position.
@@ -287,7 +284,7 @@ func (f *Freezer) TruncateHead(items uint64) error {
f.writeLock.Lock()
defer f.writeLock.Unlock()
- if atomic.LoadUint64(&f.frozen) <= items {
+ if f.frozen.Load() <= items {
return nil
}
for _, table := range f.tables {
@@ -295,7 +292,7 @@ func (f *Freezer) TruncateHead(items uint64) error {
return err
}
}
- atomic.StoreUint64(&f.frozen, items)
+ f.frozen.Store(items)
return nil
}
@@ -307,7 +304,7 @@ func (f *Freezer) TruncateTail(tail uint64) error {
f.writeLock.Lock()
defer f.writeLock.Unlock()
- if atomic.LoadUint64(&f.tail) >= tail {
+ if f.tail.Load() >= tail {
return nil
}
for _, table := range f.tables {
@@ -315,7 +312,7 @@ func (f *Freezer) TruncateTail(tail uint64) error {
return err
}
}
- atomic.StoreUint64(&f.tail, tail)
+ f.tail.Store(tail)
return nil
}
@@ -346,22 +343,22 @@ func (f *Freezer) validate() error {
)
// Hack to get boundary of any table
for kind, table := range f.tables {
- head = atomic.LoadUint64(&table.items)
- tail = atomic.LoadUint64(&table.itemHidden)
+ head = table.items.Load()
+ tail = table.itemHidden.Load()
name = kind
break
}
// Now check every table against those boundaries.
for kind, table := range f.tables {
- if head != atomic.LoadUint64(&table.items) {
- return fmt.Errorf("freezer tables %s and %s have differing head: %d != %d", kind, name, atomic.LoadUint64(&table.items), head)
+ if head != table.items.Load() {
+ return fmt.Errorf("freezer tables %s and %s have differing head: %d != %d", kind, name, table.items.Load(), head)
}
- if tail != atomic.LoadUint64(&table.itemHidden) {
- return fmt.Errorf("freezer tables %s and %s have differing tail: %d != %d", kind, name, atomic.LoadUint64(&table.itemHidden), tail)
+ if tail != table.itemHidden.Load() {
+ return fmt.Errorf("freezer tables %s and %s have differing tail: %d != %d", kind, name, table.itemHidden.Load(), tail)
}
}
- atomic.StoreUint64(&f.frozen, head)
- atomic.StoreUint64(&f.tail, tail)
+ f.frozen.Store(head)
+ f.tail.Store(tail)
return nil
}
@@ -372,11 +369,11 @@ func (f *Freezer) repair() error {
tail = uint64(0)
)
for _, table := range f.tables {
- items := atomic.LoadUint64(&table.items)
+ items := table.items.Load()
if head > items {
head = items
}
- hidden := atomic.LoadUint64(&table.itemHidden)
+ hidden := table.itemHidden.Load()
if hidden > tail {
tail = hidden
}
@@ -389,8 +386,8 @@ func (f *Freezer) repair() error {
return err
}
}
- atomic.StoreUint64(&f.frozen, head)
- atomic.StoreUint64(&f.tail, tail)
+ f.frozen.Store(head)
+ f.tail.Store(tail)
return nil
}
@@ -416,7 +413,7 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error {
// and that error will be returned.
forEach := func(t *freezerTable, offset uint64, fn func(uint64, []byte) error) error {
var (
- items = atomic.LoadUint64(&t.items)
+ items = t.items.Load()
batchSize = uint64(1024)
maxBytes = uint64(1024 * 1024)
)
@@ -439,7 +436,7 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error {
}
// TODO(s1na): This is a sanity-check since as of now no process does tail-deletion. But the migration
// process assumes no deletion at tail and needs to be modified to account for that.
- if table.itemOffset > 0 || table.itemHidden > 0 {
+ if table.itemOffset.Load() > 0 || table.itemHidden.Load() > 0 {
return fmt.Errorf("migration not supported for tail-deleted freezers")
}
ancientsPath := filepath.Dir(table.index.Name())
@@ -455,7 +452,7 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error {
out []byte
start = time.Now()
logged = time.Now()
- offset = newTable.items
+ offset = newTable.items.Load()
)
if offset > 0 {
log.Info("found previous migration attempt", "migrated", offset)
diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go
index 99d226e9a..c5dac62e2 100644
--- a/core/rawdb/freezer_batch.go
+++ b/core/rawdb/freezer_batch.go
@@ -18,7 +18,6 @@ package rawdb
import (
"fmt"
- "sync/atomic"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/rlp"
@@ -113,7 +112,7 @@ func (t *freezerTable) newBatch() *freezerTableBatch {
func (batch *freezerTableBatch) reset() {
batch.dataBuffer = batch.dataBuffer[:0]
batch.indexBuffer = batch.indexBuffer[:0]
- batch.curItem = atomic.LoadUint64(&batch.t.items)
+ batch.curItem = batch.t.items.Load()
batch.totalBytes = 0
}
@@ -207,7 +206,7 @@ func (batch *freezerTableBatch) commit() error {
// Update headBytes of table.
batch.t.headBytes += dataSize
- atomic.StoreUint64(&batch.t.items, batch.curItem)
+ batch.t.items.Store(batch.curItem)
// Update metrics.
batch.t.sizeGauge.Inc(dataSize + indexSize)
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
index b111797d5..928b37d70 100644
--- a/core/rawdb/freezer_table.go
+++ b/core/rawdb/freezer_table.go
@@ -88,18 +88,15 @@ func (i *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uin
// It consists of a data file (snappy encoded arbitrary data blobs) and an indexEntry
// file (uncompressed 64 bit indices into the data file).
type freezerTable struct {
- // WARNING: The `items` field is accessed atomically. On 32 bit platforms, only
- // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
- // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
- items uint64 // Number of items stored in the table (including items removed from tail)
- itemOffset uint64 // Number of items removed from the table
+ items atomic.Uint64 // Number of items stored in the table (including items removed from tail)
+ itemOffset atomic.Uint64 // Number of items removed from the table
// itemHidden is the number of items marked as deleted. Tail deletion is
// only supported at file level which means the actual deletion will be
// delayed until the entire data file is marked as deleted. Before that
// these items will be hidden to prevent being visited again. The value
// should never be lower than itemOffset.
- itemHidden uint64
+ itemHidden atomic.Uint64
noCompression bool // if true, disables snappy compression. Note: does not work retroactively
readonly bool
@@ -241,14 +238,14 @@ func (t *freezerTable) repair() error {
// which is not enough in theory but enough in practice.
// TODO: use uint64 to represent total removed items.
t.tailId = firstIndex.filenum
- t.itemOffset = uint64(firstIndex.offset)
+ t.itemOffset.Store(uint64(firstIndex.offset))
// Load metadata from the file
- meta, err := loadMetadata(t.meta, t.itemOffset)
+ meta, err := loadMetadata(t.meta, t.itemOffset.Load())
if err != nil {
return err
}
- t.itemHidden = meta.VirtualTail
+ t.itemHidden.Store(meta.VirtualTail)
// Read the last index, use the default value in case the freezer is empty
if offsetsSize == indexEntrySize {
@@ -331,7 +328,7 @@ func (t *freezerTable) repair() error {
}
}
// Update the item and byte counters and return
- t.items = t.itemOffset + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
+ t.items.Store(t.itemOffset.Load() + uint64(offsetsSize/indexEntrySize-1)) // last indexEntry points to the end of the data file
t.headBytes = contentSize
t.headId = lastIndex.filenum
@@ -346,9 +343,9 @@ func (t *freezerTable) repair() error {
return err
}
if verbose {
- t.logger.Info("Chain freezer table opened", "items", t.items, "size", t.headBytes)
+ t.logger.Info("Chain freezer table opened", "items", t.items.Load(), "size", t.headBytes)
} else {
- t.logger.Debug("Chain freezer table opened", "items", t.items, "size", common.StorageSize(t.headBytes))
+ t.logger.Debug("Chain freezer table opened", "items", t.items.Load(), "size", common.StorageSize(t.headBytes))
}
return nil
}
@@ -382,11 +379,11 @@ func (t *freezerTable) truncateHead(items uint64) error {
defer t.lock.Unlock()
// Ensure the given truncate target falls in the correct range
- existing := atomic.LoadUint64(&t.items)
+ existing := t.items.Load()
if existing <= items {
return nil
}
- if items < atomic.LoadUint64(&t.itemHidden) {
+ if items < t.itemHidden.Load() {
return errors.New("truncation below tail")
}
// We need to truncate, save the old size for metrics tracking
@@ -403,7 +400,7 @@ func (t *freezerTable) truncateHead(items uint64) error {
// Truncate the index file first, the tail position is also considered
// when calculating the new freezer table length.
- length := items - atomic.LoadUint64(&t.itemOffset)
+ length := items - t.itemOffset.Load()
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
return err
}
@@ -438,7 +435,7 @@ func (t *freezerTable) truncateHead(items uint64) error {
}
// All data files truncated, set internal counters and return
t.headBytes = int64(expected.offset)
- atomic.StoreUint64(&t.items, items)
+ t.items.Store(items)
// Retrieve the new size and update the total size counter
newSize, err := t.sizeNolock()
@@ -455,10 +452,10 @@ func (t *freezerTable) truncateTail(items uint64) error {
defer t.lock.Unlock()
// Ensure the given truncate target falls in the correct range
- if atomic.LoadUint64(&t.itemHidden) >= items {
+ if t.itemHidden.Load() >= items {
return nil
}
- if atomic.LoadUint64(&t.items) < items {
+ if t.items.Load() < items {
return errors.New("truncation above head")
}
// Load the new tail index by the given new tail position
@@ -466,10 +463,10 @@ func (t *freezerTable) truncateTail(items uint64) error {
newTailId uint32
buffer = make([]byte, indexEntrySize)
)
- if atomic.LoadUint64(&t.items) == items {
+ if t.items.Load() == items {
newTailId = t.headId
} else {
- offset := items - atomic.LoadUint64(&t.itemOffset)
+ offset := items - t.itemOffset.Load()
if _, err := t.index.ReadAt(buffer, int64((offset+1)*indexEntrySize)); err != nil {
return err
}
@@ -478,7 +475,7 @@ func (t *freezerTable) truncateTail(items uint64) error {
newTailId = newTail.filenum
}
// Update the virtual tail marker and hidden these entries in table.
- atomic.StoreUint64(&t.itemHidden, items)
+ t.itemHidden.Store(items)
if err := writeMetadata(t.meta, newMetadata(items)); err != nil {
return err
}
@@ -501,7 +498,7 @@ func (t *freezerTable) truncateTail(items uint64) error {
// Count how many items can be deleted from the file.
var (
newDeleted = items
- deleted = atomic.LoadUint64(&t.itemOffset)
+ deleted = t.itemOffset.Load()
)
for current := items - 1; current >= deleted; current -= 1 {
if _, err := t.index.ReadAt(buffer, int64((current-deleted+1)*indexEntrySize)); err != nil {
@@ -541,7 +538,7 @@ func (t *freezerTable) truncateTail(items uint64) error {
}
// Release any files before the current tail
t.tailId = newTailId
- atomic.StoreUint64(&t.itemOffset, newDeleted)
+ t.itemOffset.Store(newDeleted)
t.releaseFilesBefore(t.tailId, true)
// Retrieve the new size and update the total size counter
@@ -654,7 +651,7 @@ func (t *freezerTable) releaseFilesBefore(num uint32, remove bool) {
// it will return error.
func (t *freezerTable) getIndices(from, count uint64) ([]*indexEntry, error) {
// Apply the table-offset
- from = from - t.itemOffset
+ from = from - t.itemOffset.Load()
// For reading N items, we need N+1 indices.
buffer := make([]byte, (count+1)*indexEntrySize)
if _, err := t.index.ReadAt(buffer, int64(from*indexEntrySize)); err != nil {
@@ -744,8 +741,8 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
return nil, nil, errClosed
}
var (
- items = atomic.LoadUint64(&t.items) // the total items(head + 1)
- hidden = atomic.LoadUint64(&t.itemHidden) // the number of hidden items
+ items = t.items.Load() // the total items(head + 1)
+ hidden = t.itemHidden.Load() // the number of hidden items
)
// Ensure the start is written, not deleted from the tail, and that the
// caller actually wants something
@@ -826,13 +823,16 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
break
}
}
+
+ // Update metrics.
+ t.readMeter.Mark(int64(totalSize))
return output[:outputSize], sizes, nil
}
// has returns an indicator whether the specified number data is still accessible
// in the freezer table.
func (t *freezerTable) has(number uint64) bool {
- return atomic.LoadUint64(&t.items) > number && atomic.LoadUint64(&t.itemHidden) <= number
+ return t.items.Load() > number && t.itemHidden.Load() <= number
}
// size returns the total data size in the freezer table.
@@ -922,7 +922,7 @@ func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
return
}
fmt.Fprintf(w, "Version %d count %d, deleted %d, hidden %d\n", meta.Version,
- atomic.LoadUint64(&t.items), atomic.LoadUint64(&t.itemOffset), atomic.LoadUint64(&t.itemHidden))
+ t.items.Load(), t.itemOffset.Load(), t.itemHidden.Load())
buf := make([]byte, indexEntrySize)
diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go
index 6181d4d72..5c4cc40ed 100644
--- a/core/rawdb/freezer_table_test.go
+++ b/core/rawdb/freezer_table_test.go
@@ -24,7 +24,6 @@ import (
"os"
"path/filepath"
"reflect"
- "sync/atomic"
"testing"
"testing/quick"
@@ -191,7 +190,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
writeChunks(t, f, 255, 15)
// The last item should be there
- if _, err = f.Retrieve(f.items - 1); err != nil {
+ if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
t.Fatal(err)
}
f.Close()
@@ -317,7 +316,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
writeChunks(t, f, 9, 15)
// The last item should be there
- if _, err = f.Retrieve(f.items - 1); err != nil {
+ if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
f.Close()
t.Fatal(err)
}
@@ -350,8 +349,8 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
t.Fatal(err)
}
defer f.Close()
- if f.items != 7 {
- t.Fatalf("expected %d items, got %d", 7, f.items)
+ if f.items.Load() != 7 {
+ t.Fatalf("expected %d items, got %d", 7, f.items.Load())
}
if err := assertFileSize(fileToCrop, 15); err != nil {
t.Fatal(err)
@@ -374,7 +373,7 @@ func TestFreezerTruncate(t *testing.T) {
writeChunks(t, f, 30, 15)
// The last item should be there
- if _, err = f.Retrieve(f.items - 1); err != nil {
+ if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
t.Fatal(err)
}
f.Close()
@@ -388,8 +387,8 @@ func TestFreezerTruncate(t *testing.T) {
}
defer f.Close()
f.truncateHead(10) // 150 bytes
- if f.items != 10 {
- t.Fatalf("expected %d items, got %d", 10, f.items)
+ if f.items.Load() != 10 {
+ t.Fatalf("expected %d items, got %d", 10, f.items.Load())
}
// 45, 45, 45, 15 -- bytes should be 15
if f.headBytes != 15 {
@@ -444,9 +443,9 @@ func TestFreezerRepairFirstFile(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if f.items != 1 {
+ if f.items.Load() != 1 {
f.Close()
- t.Fatalf("expected %d items, got %d", 0, f.items)
+ t.Fatalf("expected %d items, got %d", 0, f.items.Load())
}
// Write 40 bytes
@@ -483,7 +482,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
writeChunks(t, f, 30, 15)
// The last item should be there
- if _, err = f.Retrieve(f.items - 1); err != nil {
+ if _, err = f.Retrieve(f.items.Load() - 1); err != nil {
t.Fatal(err)
}
f.Close()
@@ -495,9 +494,9 @@ func TestFreezerReadAndTruncate(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if f.items != 30 {
+ if f.items.Load() != 30 {
f.Close()
- t.Fatalf("expected %d items, got %d", 0, f.items)
+ t.Fatalf("expected %d items, got %d", 0, f.items.Load())
}
for y := byte(0); y < 30; y++ {
f.Retrieve(uint64(y))
@@ -1210,13 +1209,13 @@ func runRandTest(rt randTest) bool {
rt[i].err = fmt.Errorf("failed to reload table %v", err)
}
case opCheckAll:
- tail := atomic.LoadUint64(&f.itemHidden)
- head := atomic.LoadUint64(&f.items)
+ tail := f.itemHidden.Load()
+ head := f.items.Load()
if tail == head {
continue
}
- got, err := f.RetrieveItems(atomic.LoadUint64(&f.itemHidden), head-tail, 100000)
+ got, err := f.RetrieveItems(f.itemHidden.Load(), head-tail, 100000)
if err != nil {
rt[i].err = err
} else {
@@ -1238,7 +1237,7 @@ func runRandTest(rt randTest) bool {
if len(step.items) == 0 {
continue
}
- tail := atomic.LoadUint64(&f.itemHidden)
+ tail := f.itemHidden.Load()
for i := 0; i < len(step.items); i++ {
blobs = append(blobs, values[step.items[i]-tail])
}
@@ -1254,7 +1253,7 @@ func runRandTest(rt randTest) bool {
case opTruncateHead:
f.truncateHead(step.target)
- length := atomic.LoadUint64(&f.items) - atomic.LoadUint64(&f.itemHidden)
+ length := f.items.Load() - f.itemHidden.Load()
values = values[:length]
case opTruncateHeadAll:
@@ -1262,10 +1261,10 @@ func runRandTest(rt randTest) bool {
values = nil
case opTruncateTail:
- prev := atomic.LoadUint64(&f.itemHidden)
+ prev := f.itemHidden.Load()
f.truncateTail(step.target)
- truncated := atomic.LoadUint64(&f.itemHidden) - prev
+ truncated := f.itemHidden.Load() - prev
values = values[truncated:]
case opTruncateTailAll:
diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go
index 5998ffe4d..c38c9b1a8 100644
--- a/core/rawdb/freezer_test.go
+++ b/core/rawdb/freezer_test.go
@@ -273,10 +273,10 @@ func TestFreezerReadonlyValidate(t *testing.T) {
bBatch := f.tables["b"].newBatch()
require.NoError(t, bBatch.AppendRaw(0, item))
require.NoError(t, bBatch.commit())
- if f.tables["a"].items != 3 {
+ if f.tables["a"].items.Load() != 3 {
t.Fatalf("unexpected number of items in table")
}
- if f.tables["b"].items != 1 {
+ if f.tables["b"].items.Load() != 1 {
t.Fatalf("unexpected number of items in table")
}
require.NoError(t, f.Close())
diff --git a/core/state/database.go b/core/state/database.go
index d3c36c10a..82f620b46 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -68,36 +68,36 @@ type Trie interface {
// TODO(fjl): remove this when StateTrie is removed
GetKey([]byte) []byte
- // TryGet returns the value for key stored in the trie. The value bytes must
- // not be modified by the caller. If a node was not found in the database, a
- // trie.MissingNodeError is returned.
- TryGet(key []byte) ([]byte, error)
+ // GetStorage returns the value for key stored in the trie. The value bytes
+ // must not be modified by the caller. If a node was not found in the database,
+ // a trie.MissingNodeError is returned.
+ GetStorage(addr common.Address, key []byte) ([]byte, error)
- // TryGetAccount abstracts an account read from the trie. It retrieves the
+ // GetAccount abstracts an account read from the trie. It retrieves the
// account blob from the trie with provided account address and decodes it
// with associated decoding algorithm. If the specified account is not in
// the trie, nil will be returned. If the trie is corrupted(e.g. some nodes
// are missing or the account blob is incorrect for decoding), an error will
// be returned.
- TryGetAccount(address common.Address) (*types.StateAccount, error)
+ GetAccount(address common.Address) (*types.StateAccount, error)
- // TryUpdate associates key with value in the trie. If value has length zero, any
- // existing value is deleted from the trie. The value bytes must not be modified
+ // UpdateStorage associates key with value in the trie. If value has length zero,
+ // any existing value is deleted from the trie. The value bytes must not be modified
// by the caller while they are stored in the trie. If a node was not found in the
// database, a trie.MissingNodeError is returned.
- TryUpdate(key, value []byte) error
+ UpdateStorage(addr common.Address, key, value []byte) error
- // TryUpdateAccount abstracts an account write to the trie. It encodes the
+ // UpdateAccount abstracts an account write to the trie. It encodes the
// provided account object with associated algorithm and then updates it
// in the trie with provided address.
- TryUpdateAccount(address common.Address, account *types.StateAccount) error
+ UpdateAccount(address common.Address, account *types.StateAccount) error
- // TryDelete removes any existing value for key from the trie. If a node was not
- // found in the database, a trie.MissingNodeError is returned.
- TryDelete(key []byte) error
+ // DeleteStorage removes any existing value for key from the trie. If a node
+ // was not found in the database, a trie.MissingNodeError is returned.
+ DeleteStorage(addr common.Address, key []byte) error
- // TryDeleteAccount abstracts an account deletion from the trie.
- TryDeleteAccount(address common.Address) error
+ // DeleteAccount abstracts an account deletion from the trie.
+ DeleteAccount(address common.Address) error
// Hash returns the root hash of the trie. It does not write to the database and
// can be used even if the trie doesn't have one.
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index ed7cb963a..0e583e626 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -371,7 +371,7 @@ func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash
}
t := trie.NewStackTrieWithOwner(nodeWriter, owner)
for leaf := range in {
- t.TryUpdate(leaf.key[:], leaf.value)
+ t.Update(leaf.key[:], leaf.value)
}
var root common.Hash
if db == nil {
diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go
index f916a020e..4701acccd 100644
--- a/core/state/snapshot/difflayer.go
+++ b/core/state/snapshot/difflayer.go
@@ -103,7 +103,7 @@ type diffLayer struct {
memory uint64 // Approximate guess as to how much memory we use
root common.Hash // Root hash to which this snapshot diff belongs to
- stale uint32 // Signals that the layer became stale (state progressed)
+ stale atomic.Bool // Signals that the layer became stale (state progressed)
// destructSet is a very special helper marker. If an account is marked as
// deleted, then it's recorded in this set. However it's allowed that an account
@@ -267,7 +267,7 @@ func (dl *diffLayer) Parent() snapshot {
// Stale return whether this layer has become stale (was flattened across) or if
// it's still live.
func (dl *diffLayer) Stale() bool {
- return atomic.LoadUint32(&dl.stale) != 0
+ return dl.stale.Load()
}
// Account directly retrieves the account associated with a particular hash in
@@ -449,7 +449,7 @@ func (dl *diffLayer) flatten() snapshot {
// Before actually writing all our data to the parent, first ensure that the
// parent hasn't been 'corrupted' by someone else already flattening into it
- if atomic.SwapUint32(&parent.stale, 1) != 0 {
+ if parent.stale.Swap(true) {
panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo
}
// Overwrite all the updated accounts blindly, merge the sorted list
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index d46705d31..68c2f574b 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -230,7 +230,7 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix [
if origin == nil && !diskMore {
stackTr := trie.NewStackTrie(nil)
for i, key := range keys {
- stackTr.TryUpdate(key, vals[i])
+ stackTr.Update(key, vals[i])
}
if gotRoot := stackTr.Hash(); gotRoot != root {
return &proofResult{
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 1bac4fd56..546132e7d 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -161,7 +161,7 @@ func newHelper() *testHelper {
func (t *testHelper) addTrieAccount(acckey string, acc *Account) {
val, _ := rlp.EncodeToBytes(acc)
- t.accTrie.Update([]byte(acckey), val)
+ t.accTrie.MustUpdate([]byte(acckey), val)
}
func (t *testHelper) addSnapAccount(acckey string, acc *Account) {
@@ -186,7 +186,7 @@ func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string
id := trie.StorageTrieID(stateRoot, owner, common.Hash{})
stTrie, _ := trie.NewStateTrie(id, t.triedb)
for i, k := range keys {
- stTrie.Update([]byte(k), []byte(vals[i]))
+ stTrie.MustUpdate([]byte(k), []byte(vals[i]))
}
if !commit {
return stTrie.Hash().Bytes()
@@ -491,7 +491,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
)
acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
- helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
// Identical in the snap
key := hashData([]byte("acc-1"))
@@ -562,7 +562,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) {
)
acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
- helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
// Identical in the snap
key := hashData([]byte("acc-1"))
@@ -613,8 +613,8 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
{
acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
- helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val)
- helper.accTrie.Update(common.HexToHash("0x07").Bytes(), val)
+ helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val)
+ helper.accTrie.MustUpdate(common.HexToHash("0x07").Bytes(), val)
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x01"), val)
rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x02"), val)
@@ -650,7 +650,7 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) {
{
acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
- helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val)
+ helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val)
junk := make([]byte, 100)
copy(junk, []byte{0xde, 0xad})
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index 0f3fa2c7a..2e57a059d 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -22,7 +22,6 @@ import (
"errors"
"fmt"
"sync"
- "sync/atomic"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
@@ -272,7 +271,7 @@ func (t *Tree) Disable() {
case *diffLayer:
// If the layer is a simple diff, simply mark as stale
layer.lock.Lock()
- atomic.StoreUint32(&layer.stale, 1)
+ layer.stale.Store(true)
layer.lock.Unlock()
default:
@@ -726,7 +725,7 @@ func (t *Tree) Rebuild(root common.Hash) {
case *diffLayer:
// If the layer is a simple diff, simply mark as stale
layer.lock.Lock()
- atomic.StoreUint32(&layer.stale, 1)
+ layer.stale.Store(true)
layer.lock.Unlock()
default:
diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go
index 6893f6001..249da10aa 100644
--- a/core/state/snapshot/snapshot_test.go
+++ b/core/state/snapshot/snapshot_test.go
@@ -118,7 +118,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) {
if err := snaps.Cap(common.HexToHash("0x02"), 0); err != nil {
t.Fatalf("failed to merge diff layer onto disk: %v", err)
}
- // Since the base layer was modified, ensure that data retrieval on the external reference fail
+ // Since the base layer was modified, ensure that data retrievals on the external reference fail
if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale {
t.Errorf("stale reference returned account: %#x (err: %v)", acc, err)
}
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 7e34cba44..95fe6f52f 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -201,7 +201,7 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
s.db.setError(err)
return common.Hash{}
}
- enc, err = tr.TryGet(key.Bytes())
+ enc, err = tr.GetStorage(s.address, key.Bytes())
if metrics.EnabledExpensive {
s.db.StorageReads += time.Since(start)
}
@@ -253,7 +253,7 @@ func (s *stateObject) finalise(prefetch bool) {
}
}
if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash {
- s.db.prefetcher.prefetch(s.addrHash, s.data.Root, slotsToPrefetch)
+ s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch)
}
if len(s.dirtyStorage) > 0 {
s.dirtyStorage = make(Storage)
@@ -294,7 +294,7 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) {
var v []byte
if (value == common.Hash{}) {
- if err := tr.TryDelete(key[:]); err != nil {
+ if err := tr.DeleteStorage(s.address, key[:]); err != nil {
s.db.setError(err)
return nil, err
}
@@ -302,7 +302,7 @@ func (s *stateObject) updateTrie(db Database) (Trie, error) {
} else {
// Encoding []byte cannot fail, ok to ignore the error.
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
- if err := tr.TryUpdate(key[:], v); err != nil {
+ if err := tr.UpdateStorage(s.address, key[:], v); err != nil {
s.db.setError(err)
return nil, err
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 3aa6f2d3b..2953317e6 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -521,7 +521,7 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
}
// Encode the account and update the account trie
addr := obj.Address()
- if err := s.trie.TryUpdateAccount(addr, &obj.data); err != nil {
+ if err := s.trie.UpdateAccount(addr, &obj.data); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
}
@@ -542,7 +542,7 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
}
// Delete the account from the trie
addr := obj.Address()
- if err := s.trie.TryDeleteAccount(addr); err != nil {
+ if err := s.trie.DeleteAccount(addr); err != nil {
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
}
}
@@ -596,7 +596,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
if data == nil {
start := time.Now()
var err error
- data, err = s.trie.TryGetAccount(addr)
+ data, err = s.trie.GetAccount(addr)
if metrics.EnabledExpensive {
s.AccountReads += time.Since(start)
}
@@ -880,7 +880,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure
}
if s.prefetcher != nil && len(addressesToPrefetch) > 0 {
- s.prefetcher.prefetch(common.Hash{}, s.originalRoot, addressesToPrefetch)
+ s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch)
}
// Invalidate journal because reverting across transactions is not allowed.
s.clearJournalAndRefund()
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index aff91268a..090d55e47 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -213,14 +213,14 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
for i, node := range nodeElements {
if bypath {
if len(node.syncPath) == 1 {
- data, _, err := srcTrie.TryGetNode(node.syncPath[0])
+ data, _, err := srcTrie.GetNode(node.syncPath[0])
if err != nil {
t.Fatalf("failed to retrieve node data for path %x: %v", node.syncPath[0], err)
}
nodeResults[i] = trie.NodeSyncResult{Path: node.path, Data: data}
} else {
var acc types.StateAccount
- if err := rlp.DecodeBytes(srcTrie.Get(node.syncPath[0]), &acc); err != nil {
+ if err := rlp.DecodeBytes(srcTrie.MustGet(node.syncPath[0]), &acc); err != nil {
t.Fatalf("failed to decode account on path %x: %v", node.syncPath[0], err)
}
id := trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root)
@@ -228,7 +228,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
if err != nil {
t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err)
}
- data, _, err := stTrie.TryGetNode(node.syncPath[1])
+ data, _, err := stTrie.GetNode(node.syncPath[1])
if err != nil {
t.Fatalf("failed to retrieve node data for path %x: %v", node.syncPath[1], err)
}
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index f142c86bb..844f72fc1 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -141,7 +141,7 @@ func (p *triePrefetcher) copy() *triePrefetcher {
}
// prefetch schedules a batch of trie items to prefetch.
-func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, keys [][]byte) {
+func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) {
// If the prefetcher is an inactive one, bail out
if p.fetches != nil {
return
@@ -150,7 +150,7 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, keys [][]
id := p.trieID(owner, root)
fetcher := p.fetchers[id]
if fetcher == nil {
- fetcher = newSubfetcher(p.db, p.root, owner, root)
+ fetcher = newSubfetcher(p.db, p.root, owner, root, addr)
p.fetchers[id] = fetcher
}
fetcher.schedule(keys)
@@ -205,11 +205,12 @@ func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string {
// main prefetcher is paused and either all requested items are processed or if
// the trie being worked on is retrieved from the prefetcher.
type subfetcher struct {
- db Database // Database to load trie nodes through
- state common.Hash // Root hash of the state to prefetch
- owner common.Hash // Owner of the trie, usually account hash
- root common.Hash // Root hash of the trie to prefetch
- trie Trie // Trie being populated with nodes
+ db Database // Database to load trie nodes through
+ state common.Hash // Root hash of the state to prefetch
+ owner common.Hash // Owner of the trie, usually account hash
+ root common.Hash // Root hash of the trie to prefetch
+ addr common.Address // Address of the account that the trie belongs to
+ trie Trie // Trie being populated with nodes
tasks [][]byte // Items queued up for retrieval
lock sync.Mutex // Lock protecting the task queue
@@ -226,12 +227,13 @@ type subfetcher struct {
// newSubfetcher creates a goroutine to prefetch state items belonging to a
// particular root hash.
-func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash) *subfetcher {
+func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash, addr common.Address) *subfetcher {
sf := &subfetcher{
db: db,
state: state,
owner: owner,
root: root,
+ addr: addr,
wake: make(chan struct{}, 1),
stop: make(chan struct{}),
term: make(chan struct{}),
@@ -336,7 +338,11 @@ func (sf *subfetcher) loop() {
if _, ok := sf.seen[string(task)]; ok {
sf.dups++
} else {
- sf.trie.TryGet(task)
+ if len(task) == common.AddressLength {
+ sf.trie.GetAccount(common.BytesToAddress(task))
+ } else {
+ sf.trie.GetStorage(sf.addr, task)
+ }
sf.seen[string(task)] = struct{}{}
}
}
diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go
index cb0b67d7e..501bb7084 100644
--- a/core/state/trie_prefetcher_test.go
+++ b/core/state/trie_prefetcher_test.go
@@ -47,19 +47,19 @@ func TestCopyAndClose(t *testing.T) {
db := filledStateDB()
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "")
skey := common.HexToHash("aaa")
- prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
- prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
time.Sleep(1 * time.Second)
a := prefetcher.trie(common.Hash{}, db.originalRoot)
- prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
b := prefetcher.trie(common.Hash{}, db.originalRoot)
cpy := prefetcher.copy()
- cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
- cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
+ cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
c := cpy.trie(common.Hash{}, db.originalRoot)
prefetcher.close()
cpy2 := cpy.copy()
- cpy2.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ cpy2.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
d := cpy2.trie(common.Hash{}, db.originalRoot)
cpy.close()
cpy2.close()
@@ -72,7 +72,7 @@ func TestUseAfterClose(t *testing.T) {
db := filledStateDB()
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "")
skey := common.HexToHash("aaa")
- prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
a := prefetcher.trie(common.Hash{}, db.originalRoot)
prefetcher.close()
b := prefetcher.trie(common.Hash{}, db.originalRoot)
@@ -88,7 +88,7 @@ func TestCopyClose(t *testing.T) {
db := filledStateDB()
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "")
skey := common.HexToHash("aaa")
- prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()})
cpy := prefetcher.copy()
a := prefetcher.trie(common.Hash{}, db.originalRoot)
b := cpy.trie(common.Hash{}, db.originalRoot)
diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go
index c258eee4f..721f4056b 100644
--- a/core/state_prefetcher.go
+++ b/core/state_prefetcher.go
@@ -47,7 +47,7 @@ func newStatePrefetcher(config *params.ChainConfig, bc *BlockChain, engine conse
// Prefetch processes the state changes according to the Ethereum rules by running
// the transaction messages using the statedb, but any changes are discarded. The
// only goal is to pre-cache transaction signatures and state trie nodes.
-func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *uint32) {
+func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *atomic.Bool) {
var (
header = block.Header()
gaspool = new(GasPool).AddGas(block.GasLimit())
@@ -59,7 +59,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c
byzantium := p.config.IsByzantium(block.Number())
for i, tx := range block.Transactions() {
// If block precaching was interrupted, abort
- if interrupt != nil && atomic.LoadUint32(interrupt) == 1 {
+ if interrupt != nil && interrupt.Load() {
return
}
// Convert the transaction into an executable message and pre-cache its sender
diff --git a/core/state_transition.go b/core/state_transition.go
index 1859ff4a3..df30ee223 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -136,7 +136,7 @@ type Message struct {
Data []byte
AccessList types.AccessList
- // When SkipAccountCheckss is true, the message nonce is not checked against the
+ // When SkipAccountChecks is true, the message nonce is not checked against the
// account nonce in state. It also disables checking that the sender is an EOA.
// This field will be set to true for operations like RPC eth_call.
SkipAccountChecks bool
@@ -332,10 +332,10 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
return nil, err
}
- if st.evm.Config.Debug {
- st.evm.Config.Tracer.CaptureTxStart(st.initialGas)
+ if tracer := st.evm.Config.Tracer; tracer != nil {
+ tracer.CaptureTxStart(st.initialGas)
defer func() {
- st.evm.Config.Tracer.CaptureTxEnd(st.gasRemaining)
+ tracer.CaptureTxEnd(st.gasRemaining)
}()
}
diff --git a/core/txpool/list.go b/core/txpool/list.go
index 639d69bcb..fae7c2fca 100644
--- a/core/txpool/list.go
+++ b/core/txpool/list.go
@@ -270,10 +270,10 @@ func newList(strict bool) *list {
}
}
-// Overlaps returns whether the transaction specified has the same nonce as one
-// already contained within the list.
-func (l *list) Overlaps(tx *types.Transaction) bool {
- return l.txs.Get(tx.Nonce()) != nil
+// Contains returns whether the list contains a transaction
+// with the provided nonce.
+func (l *list) Contains(nonce uint64) bool {
+ return l.txs.Get(nonce) != nil
}
// Add tries to insert a new transaction into the list, returning whether the
diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go
index ac4486c6c..9eb19b009 100644
--- a/core/txpool/txpool.go
+++ b/core/txpool/txpool.go
@@ -23,6 +23,7 @@ import (
"math/big"
"sort"
"sync"
+ "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -250,14 +251,14 @@ type TxPool struct {
signer types.Signer
mu sync.RWMutex
- istanbul bool // Fork indicator whether we are in the istanbul stage.
- eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions.
- eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions.
- shanghai bool // Fork indicator whether we are in the Shanghai stage.
+ istanbul atomic.Bool // Fork indicator whether we are in the istanbul stage.
+ eip2718 atomic.Bool // Fork indicator whether we are using EIP-2718 type transactions.
+ eip1559 atomic.Bool // Fork indicator whether we are using EIP-1559 type transactions.
+ shanghai atomic.Bool // Fork indicator whether we are in the Shanghai stage.
currentState *state.StateDB // Current state in the blockchain head
pendingNonces *noncer // Pending state tracking virtual nonces
- currentMaxGas uint64 // Current gas limit for transaction caps
+ currentMaxGas atomic.Uint64 // Current gas limit for transaction caps
locals *accountSet // Set of local transaction to exempt from eviction rules
journal *journal // Journal of local transaction to back up to disk
@@ -592,15 +593,17 @@ func (pool *TxPool) local() map[common.Address]types.Transactions {
return txs
}
-// validateTx checks whether a transaction is valid according to the consensus
-// rules and adheres to some heuristic limits of the local node (price and size).
-func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
+// validateTxBasics checks whether a transaction is valid according to the consensus
+// rules, but does not check state-dependent validation such as sufficient balance.
+// This check is meant as an early check which only needs to be performed once,
+// and does not require the pool mutex to be held.
+func (pool *TxPool) validateTxBasics(tx *types.Transaction, local bool) error {
// Accept only legacy transactions until EIP-2718/2930 activates.
- if !pool.eip2718 && tx.Type() != types.LegacyTxType {
+ if !pool.eip2718.Load() && tx.Type() != types.LegacyTxType {
return core.ErrTxTypeNotSupported
}
// Reject dynamic fee transactions until EIP-1559 activates.
- if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType {
+ if !pool.eip1559.Load() && tx.Type() == types.DynamicFeeTxType {
return core.ErrTxTypeNotSupported
}
// Reject transactions over defined size to prevent DOS attacks
@@ -608,7 +611,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
return ErrOversizedData
}
// Check whether the init code size has been exceeded.
- if pool.shanghai && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
+ if pool.shanghai.Load() && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
return fmt.Errorf("%w: code size %v limit %v", core.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize)
}
// Transactions can't be negative. This may never happen using RLP decoded
@@ -617,7 +620,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
return ErrNegativeValue
}
// Ensure the transaction doesn't exceed the current block limit gas.
- if pool.currentMaxGas < tx.Gas() {
+ if pool.currentMaxGas.Load() < tx.Gas() {
return ErrGasLimit
}
// Sanity check for extremely large numbers
@@ -632,14 +635,29 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
return core.ErrTipAboveFeeCap
}
// Make sure the transaction is signed properly.
- from, err := types.Sender(pool.signer, tx)
- if err != nil {
+ if _, err := types.Sender(pool.signer, tx); err != nil {
return ErrInvalidSender
}
// Drop non-local transactions under our own minimal accepted gas price or tip
if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 {
return ErrUnderpriced
}
+ // Ensure the transaction has more gas than the basic tx fee.
+ intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul.Load(), pool.shanghai.Load())
+ if err != nil {
+ return err
+ }
+ if tx.Gas() < intrGas {
+ return core.ErrIntrinsicGas
+ }
+ return nil
+}
+
+// validateTx checks whether a transaction is valid according to the consensus
+// rules and adheres to some heuristic limits of the local node (price and size).
+func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
+ // Signature has been checked already, this cannot error.
+ from, _ := types.Sender(pool.signer, tx)
// Ensure the transaction adheres to nonce ordering
if pool.currentState.GetNonce(from) > tx.Nonce() {
return core.ErrNonceTooLow
@@ -664,15 +682,6 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
return ErrOverdraft
}
}
-
- // Ensure the transaction has more gas than the basic tx fee.
- intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul, pool.shanghai)
- if err != nil {
- return err
- }
- if tx.Gas() < intrGas {
- return core.ErrIntrinsicGas
- }
return nil
}
@@ -736,11 +745,11 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
}
// If the new transaction is a future transaction it should never churn pending transactions
- if !isLocal && pool.isFuture(from, tx) {
+ if !isLocal && pool.isGapped(from, tx) {
var replacesPending bool
for _, dropTx := range drop {
dropSender, _ := types.Sender(pool.signer, dropTx)
- if list := pool.pending[dropSender]; list != nil && list.Overlaps(dropTx) {
+ if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) {
replacesPending = true
break
}
@@ -765,7 +774,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
}
// Try to replace an existing transaction in the pending pool
- if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
+ if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) {
// Nonce already pending, check if required price bump is met
inserted, old := list.Add(tx, pool.config.PriceBump)
if !inserted {
@@ -808,18 +817,26 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
return replaced, nil
}
-// isFuture reports whether the given transaction is immediately executable.
-func (pool *TxPool) isFuture(from common.Address, tx *types.Transaction) bool {
- list := pool.pending[from]
- if list == nil {
- return pool.pendingNonces.get(from) != tx.Nonce()
+// isGapped reports whether the given transaction is immediately executable.
+func (pool *TxPool) isGapped(from common.Address, tx *types.Transaction) bool {
+ // Short circuit if transaction matches pending nonce and can be promoted
+ // to pending list as an executable transaction.
+ next := pool.pendingNonces.get(from)
+ if tx.Nonce() == next {
+ return false
}
- // Sender has pending transactions.
- if old := list.txs.Get(tx.Nonce()); old != nil {
- return false // It replaces a pending transaction.
+ // The transaction has a nonce gap with pending list, it's only considered
+ // as executable if transactions in queue can fill up the nonce gap.
+ queue, ok := pool.queue[from]
+ if !ok {
+ return true
}
- // Not replacing, check if parent nonce exists in pending.
- return list.txs.Get(tx.Nonce()-1) == nil
+ for nonce := next; nonce < tx.Nonce(); nonce++ {
+ if !queue.Contains(nonce) {
+ return true // txs in queue can't fill up the nonce gap
+ }
+ }
+ return false
}
// enqueueTx inserts a new transaction into the non-executable transaction queue.
@@ -969,12 +986,12 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
knownTxMeter.Mark(1)
continue
}
- // Exclude transactions with invalid signatures as soon as
- // possible and cache senders in transactions before
- // obtaining lock
- _, err := types.Sender(pool.signer, tx)
- if err != nil {
- errs[i] = ErrInvalidSender
+ // Exclude transactions with basic errors, e.g invalid signatures and
+ // insufficient intrinsic gas as soon as possible and cache senders
+ // in transactions before obtaining lock
+
+ if err := pool.validateTxBasics(tx, local); err != nil {
+ errs[i] = err
invalidTxMeter.Mark(1)
continue
}
@@ -1364,7 +1381,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
}
pool.currentState = statedb
pool.pendingNonces = newNoncer(statedb)
- pool.currentMaxGas = newHead.GasLimit
+ pool.currentMaxGas.Store(newHead.GasLimit)
// Inject any transactions discarded due to reorgs
log.Debug("Reinjecting stale transactions", "count", len(reinject))
@@ -1373,10 +1390,10 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
// Update all fork indicator by next pending block number.
next := new(big.Int).Add(newHead.Number, big.NewInt(1))
- pool.istanbul = pool.chainconfig.IsIstanbul(next)
- pool.eip2718 = pool.chainconfig.IsBerlin(next)
- pool.eip1559 = pool.chainconfig.IsLondon(next)
- pool.shanghai = pool.chainconfig.IsShanghai(uint64(time.Now().Unix()))
+ pool.istanbul.Store(pool.chainconfig.IsIstanbul(next))
+ pool.eip2718.Store(pool.chainconfig.IsBerlin(next))
+ pool.eip1559.Store(pool.chainconfig.IsLondon(next))
+ pool.shanghai.Store(pool.chainconfig.IsShanghai(uint64(time.Now().Unix())))
}
// promoteExecutables moves transactions that have become processable from the
@@ -1400,7 +1417,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
}
log.Trace("Removed old queued transactions", "count", len(forwards))
// Drop all transactions that are too costly (low balance or out of gas)
- drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
+ drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load())
for _, tx := range drops {
hash := tx.Hash()
pool.all.Remove(hash)
@@ -1597,7 +1614,7 @@ func (pool *TxPool) demoteUnexecutables() {
log.Trace("Removed old pending transaction", "hash", hash)
}
// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
- drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas)
+ drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load())
for _, tx := range drops {
hash := tx.Hash()
log.Trace("Removed unpayable pending transaction", "hash", hash)
diff --git a/core/txpool/txpool2_test.go b/core/txpool/txpool2_test.go
index 6d84975d8..7e2a9eb90 100644
--- a/core/txpool/txpool2_test.go
+++ b/core/txpool/txpool2_test.go
@@ -42,7 +42,7 @@ func count(t *testing.T, pool *TxPool) (pending int, queued int) {
return pending, queued
}
-func fillPool(t *testing.T, pool *TxPool) {
+func fillPool(t testing.TB, pool *TxPool) {
t.Helper()
// Create a number of test accounts, fund them and make transactions
executableTxs := types.Transactions{}
@@ -189,7 +189,7 @@ func TestTransactionZAttack(t *testing.T) {
key, _ := crypto.GenerateKey()
pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
for j := 0; j < int(pool.config.GlobalSlots); j++ {
- overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 60000000000, 21000, big.NewInt(500), key))
+ overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 600000000000, 21000, big.NewInt(500), key))
}
}
pool.AddRemotesSync(overDraftTxs)
@@ -210,3 +210,27 @@ func TestTransactionZAttack(t *testing.T) {
newIvPending, ivPending, pool.config.GlobalSlots, newQueued)
}
}
+
+func BenchmarkFutureAttack(b *testing.B) {
+ // Create the pool to test the limit enforcement with
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(1000000, statedb, new(event.Feed))
+ config := testTxPoolConfig
+ config.GlobalQueue = 100
+ config.GlobalSlots = 100
+ pool := NewTxPool(config, eip1559Config, blockchain)
+ defer pool.Stop()
+ fillPool(b, pool)
+
+ key, _ := crypto.GenerateKey()
+ pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000))
+ futureTxs := types.Transactions{}
+
+ for n := 0; n < b.N; n++ {
+ futureTxs = append(futureTxs, pricedTransaction(1000+uint64(n), 100000, big.NewInt(500), key))
+ }
+ b.ResetTimer()
+ for i := 0; i < 5; i++ {
+ pool.AddRemotesSync(futureTxs)
+ }
+}
diff --git a/core/txpool/txpool_test.go b/core/txpool/txpool_test.go
index 7771c5f7c..a4889fa62 100644
--- a/core/txpool/txpool_test.go
+++ b/core/txpool/txpool_test.go
@@ -293,28 +293,29 @@ func TestInvalidTransactions(t *testing.T) {
tx := transaction(0, 100, key)
from, _ := deriveSender(tx)
+ // Intrinsic gas too low
testAddBalance(pool, from, big.NewInt(1))
- if err := pool.AddRemote(tx); !errors.Is(err, core.ErrInsufficientFunds) {
- t.Error("expected", core.ErrInsufficientFunds)
+ if err, want := pool.AddRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) {
+ t.Errorf("want %v have %v", want, err)
}
- balance := new(big.Int).Add(tx.Value(), new(big.Int).Mul(new(big.Int).SetUint64(tx.Gas()), tx.GasPrice()))
- testAddBalance(pool, from, balance)
- if err := pool.AddRemote(tx); !errors.Is(err, core.ErrIntrinsicGas) {
- t.Error("expected", core.ErrIntrinsicGas, "got", err)
+ // Insufficient funds
+ tx = transaction(0, 100000, key)
+ if err, want := pool.AddRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) {
+ t.Errorf("want %v have %v", want, err)
}
testSetNonce(pool, from, 1)
testAddBalance(pool, from, big.NewInt(0xffffffffffffff))
tx = transaction(0, 100000, key)
- if err := pool.AddRemote(tx); !errors.Is(err, core.ErrNonceTooLow) {
- t.Error("expected", core.ErrNonceTooLow)
+ if err, want := pool.AddRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) {
+ t.Errorf("want %v have %v", want, err)
}
tx = transaction(1, 100000, key)
pool.gasPrice = big.NewInt(1000)
- if err := pool.AddRemote(tx); err != ErrUnderpriced {
- t.Error("expected", ErrUnderpriced, "got", err)
+ if err, want := pool.AddRemote(tx), ErrUnderpriced; !errors.Is(err, want) {
+ t.Errorf("want %v have %v", want, err)
}
if err := pool.AddLocal(tx); err != nil {
t.Error("expected", nil, "got", err)
@@ -1217,22 +1218,22 @@ func TestAllowedTxSize(t *testing.T) {
// All those fields are summed up to at most 213 bytes.
baseSize := uint64(213)
dataSize := txMaxSize - baseSize
-
+ maxGas := pool.currentMaxGas.Load()
// Try adding a transaction with maximal allowed size
- tx := pricedDataTransaction(0, pool.currentMaxGas, big.NewInt(1), key, dataSize)
+ tx := pricedDataTransaction(0, maxGas, big.NewInt(1), key, dataSize)
if err := pool.addRemoteSync(tx); err != nil {
t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err)
}
// Try adding a transaction with random allowed size
- if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentMaxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
+ if err := pool.addRemoteSync(pricedDataTransaction(1, maxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
t.Fatalf("failed to add transaction of random allowed size: %v", err)
}
// Try adding a transaction of minimal not allowed size
- if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, big.NewInt(1), key, txMaxSize)); err == nil {
+ if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, txMaxSize)); err == nil {
t.Fatalf("expected rejection on slightly oversize transaction")
}
// Try adding a transaction of random not allowed size
- if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
+ if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
t.Fatalf("expected rejection on oversize transaction")
}
// Run some sanity checks on the pool internals
diff --git a/core/types.go b/core/types.go
index 4c5b74a49..36eb0d1de 100644
--- a/core/types.go
+++ b/core/types.go
@@ -17,6 +17,8 @@
package core
import (
+ "sync/atomic"
+
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -39,7 +41,7 @@ type Prefetcher interface {
// Prefetch processes the state changes according to the Ethereum rules by running
// the transaction messages using the statedb, but any changes are discarded. The
// only goal is to pre-cache transaction signatures and state trie nodes.
- Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *uint32)
+ Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *atomic.Bool)
}
// Processor is an interface for processing blocks using a given initial state.
diff --git a/core/types/block.go b/core/types/block.go
index e2c71abeb..a1a14f5b1 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -85,6 +85,9 @@ type Header struct {
// WithdrawalsHash was added by EIP-4895 and is ignored in legacy headers.
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
+ // ExcessDataGas was added by EIP-4844 and is ignored in legacy headers.
+ ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"`
+
/*
TODO (MariusVanDerWijden) Add this field once needed
// Random was added during the merge and contains the BeaconState randomness
diff --git a/core/types/block_test.go b/core/types/block_test.go
index 49197c923..966015eb0 100644
--- a/core/types/block_test.go
+++ b/core/types/block_test.go
@@ -232,9 +232,10 @@ func (h *testHasher) Reset() {
h.hasher.Reset()
}
-func (h *testHasher) Update(key, val []byte) {
+func (h *testHasher) Update(key, val []byte) error {
h.hasher.Write(key)
h.hasher.Write(val)
+ return nil
}
func (h *testHasher) Hash() common.Hash {
diff --git a/core/types/hashing.go b/core/types/hashing.go
index 3df75432a..fbdeaf0d0 100644
--- a/core/types/hashing.go
+++ b/core/types/hashing.go
@@ -62,7 +62,7 @@ func prefixedRlpHash(prefix byte, x interface{}) (h common.Hash) {
// This is internal, do not use.
type TrieHasher interface {
Reset()
- Update([]byte, []byte)
+ Update([]byte, []byte) error
Hash() common.Hash
}
@@ -83,7 +83,7 @@ func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte {
return common.CopyBytes(buf.Bytes())
}
-// DeriveSha creates the tree hashes of transactions and receipts in a block header.
+// DeriveSha creates the tree hashes of transactions, receipts, and withdrawals in a block header.
func DeriveSha(list DerivableList, hasher TrieHasher) common.Hash {
hasher.Reset()
@@ -93,6 +93,9 @@ func DeriveSha(list DerivableList, hasher TrieHasher) common.Hash {
// StackTrie requires values to be inserted in increasing hash order, which is not the
// order that `list` provides hashes in. This insertion sequence ensures that the
// order is correct.
+ //
+ // The error returned by hasher is omitted because hasher will produce an incorrect
+ // hash in case any error occurs.
var indexBuf []byte
for i := 1; i < list.Len() && i <= 0x7f; i++ {
indexBuf = rlp.AppendUint64(indexBuf[:0], uint64(i))
diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go
index 294a3977d..c5b9f690d 100644
--- a/core/types/hashing_test.go
+++ b/core/types/hashing_test.go
@@ -219,9 +219,10 @@ func (d *hashToHumanReadable) Reset() {
d.data = make([]byte, 0)
}
-func (d *hashToHumanReadable) Update(i []byte, i2 []byte) {
+func (d *hashToHumanReadable) Update(i []byte, i2 []byte) error {
l := fmt.Sprintf("%x %x\n", i, i2)
d.data = append(d.data, []byte(l)...)
+ return nil
}
func (d *hashToHumanReadable) Hash() common.Hash {
diff --git a/core/vm/analysis.go b/core/vm/analysis.go
index 4aa8cfe70..38af9084a 100644
--- a/core/vm/analysis.go
+++ b/core/vm/analysis.go
@@ -63,7 +63,7 @@ func (bits *bitvec) codeSegment(pos uint64) bool {
// codeBitmap collects data locations in code.
func codeBitmap(code []byte) bitvec {
// The bitmap is 4 bytes longer than necessary, in case the code
- // ends with a PUSH32, the algorithm will push zeroes onto the
+ // ends with a PUSH32, the algorithm will set bits on the
// bitvector outside the bounds of the actual code.
bits := make(bitvec, len(code)/8+1+4)
return codeBitmapInternal(code, bits)
diff --git a/core/vm/evm.go b/core/vm/evm.go
index d78ea0792..01017572d 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -114,8 +114,7 @@ type EVM struct {
// used throughout the execution of the tx.
interpreter *EVMInterpreter
// abort is used to abort the EVM calling operations
- // NOTE: must be set atomically
- abort int32
+ abort atomic.Bool
// callGasTemp holds the gas available for the current call. This is needed because the
// available gas is calculated in gasCall* according to the 63/64 rule and later
// applied in opCall*.
@@ -147,12 +146,12 @@ func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) {
// Cancel cancels any running EVM operation. This may be called concurrently and
// it's safe to be called multiple times.
func (evm *EVM) Cancel() {
- atomic.StoreInt32(&evm.abort, 1)
+ evm.abort.Store(true)
}
// Cancelled returns true if Cancel has been called
func (evm *EVM) Cancelled() bool {
- return atomic.LoadInt32(&evm.abort) == 1
+ return evm.abort.Load()
}
// Interpreter returns the current interpreter
@@ -183,11 +182,12 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
}
snapshot := evm.StateDB.Snapshot()
p, isPrecompile := evm.precompile(addr)
+ debug := evm.Config.Tracer != nil
if !evm.StateDB.Exist(addr) {
if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 {
// Calling a non existing account, don't do anything, but ping the tracer
- if evm.Config.Debug {
+ if debug {
if evm.depth == 0 {
evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
evm.Config.Tracer.CaptureEnd(ret, 0, nil)
@@ -203,7 +203,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
evm.Context.Transfer(evm.StateDB, caller.Address(), addr, value)
// Capture the tracer start/end events in debug mode
- if evm.Config.Debug {
+ if debug {
if evm.depth == 0 {
evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
defer func(startGas uint64) { // Lazy evaluation of the parameters
@@ -273,7 +273,7 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
var snapshot = evm.StateDB.Snapshot()
// Invoke tracer hooks that signal entering/exiting a call frame
- if evm.Config.Debug {
+ if evm.Config.Tracer != nil {
evm.Config.Tracer.CaptureEnter(CALLCODE, caller.Address(), addr, input, gas, value)
defer func(startGas uint64) {
evm.Config.Tracer.CaptureExit(ret, startGas-gas, err)
@@ -314,7 +314,7 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
var snapshot = evm.StateDB.Snapshot()
// Invoke tracer hooks that signal entering/exiting a call frame
- if evm.Config.Debug {
+ if evm.Config.Tracer != nil {
// NOTE: caller must, at all times be a contract. It should never happen
// that caller is something other than a Contract.
parent := caller.(*Contract)
@@ -368,7 +368,7 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
evm.StateDB.AddBalance(addr, big0)
// Invoke tracer hooks that signal entering/exiting a call frame
- if evm.Config.Debug {
+ if evm.Config.Tracer != nil {
evm.Config.Tracer.CaptureEnter(STATICCALL, caller.Address(), addr, input, gas, nil)
defer func(startGas uint64) {
evm.Config.Tracer.CaptureExit(ret, startGas-gas, err)
@@ -451,7 +451,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
contract := NewContract(caller, AccountRef(address), value, gas)
contract.SetCodeOptionalHash(&address, codeAndHash)
- if evm.Config.Debug {
+ if evm.Config.Tracer != nil {
if evm.depth == 0 {
evm.Config.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value)
} else {
@@ -494,7 +494,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
}
}
- if evm.Config.Debug {
+ if evm.Config.Tracer != nil {
if evm.depth == 0 {
evm.Config.Tracer.CaptureEnd(ret, gas-contract.Gas, err)
} else {
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 77b6e02bf..505aef412 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -17,8 +17,6 @@
package vm
import (
- "sync/atomic"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@@ -531,7 +529,7 @@ func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
}
func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if atomic.LoadInt32(&interpreter.evm.abort) != 0 {
+ if interpreter.evm.abort.Load() {
return nil, errStopToken
}
pos := scope.Stack.pop()
@@ -543,7 +541,7 @@ func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
}
func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if atomic.LoadInt32(&interpreter.evm.abort) != 0 {
+ if interpreter.evm.abort.Load() {
return nil, errStopToken
}
pos, cond := scope.Stack.pop(), scope.Stack.pop()
@@ -824,9 +822,9 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance)
interpreter.evm.StateDB.Suicide(scope.Contract.Address())
- if interpreter.evm.Config.Debug {
- interpreter.evm.Config.Tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance)
- interpreter.evm.Config.Tracer.CaptureExit([]byte{}, 0, nil)
+ if tracer := interpreter.evm.Config.Tracer; tracer != nil {
+ tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance)
+ tracer.CaptureExit([]byte{}, 0, nil)
}
return nil, errStopToken
}
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 0ab520b90..5b2082bc9 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -25,7 +25,6 @@ import (
// Config are the configuration options for the Interpreter
type Config struct {
- Debug bool // Enables debugging
Tracer EVMLogger // Opcode logger
NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls)
EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages
@@ -143,6 +142,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
gasCopy uint64 // for EVMLogger to log gas remaining before execution
logged bool // deferred EVMLogger should ignore already logged steps
res []byte // result of the opcode execution function
+ debug = in.evm.Config.Tracer != nil
)
// Don't move this deferred function, it's placed before the capturestate-deferred method,
// so that it get's executed _after_: the capturestate needs the stacks before
@@ -152,7 +152,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
}()
contract.Input = input
- if in.evm.Config.Debug {
+ if debug {
defer func() {
if err != nil {
if !logged {
@@ -168,7 +168,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
// the execution of one of the operations or until the done flag is set by the
// parent context.
for {
- if in.evm.Config.Debug {
+ if debug {
// Capture pre-execution values for tracing.
logged, pcCopy, gasCopy = false, pc, contract.Gas
}
@@ -213,14 +213,14 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
return nil, ErrOutOfGas
}
// Do tracing before memory expansion
- if in.evm.Config.Debug {
+ if debug {
in.evm.Config.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
logged = true
}
if memorySize > 0 {
mem.Resize(memorySize)
}
- } else if in.evm.Config.Debug {
+ } else if debug {
in.evm.Config.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
logged = true
}
diff --git a/core/vm/jump_table_export.go b/core/vm/jump_table_export.go
new file mode 100644
index 000000000..0d61b00ed
--- /dev/null
+++ b/core/vm/jump_table_export.go
@@ -0,0 +1,74 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package vm
+
+import (
+ "errors"
+
+ "github.com/ethereum/go-ethereum/params"
+)
+
+// LookupInstructionSet returns the instructionset for the fork configured by
+// the rules.
+func LookupInstructionSet(rules params.Rules) (JumpTable, error) {
+ switch {
+ case rules.IsPrague:
+ return newShanghaiInstructionSet(), errors.New("prague-fork not defined yet")
+ case rules.IsCancun:
+ return newShanghaiInstructionSet(), errors.New("cancun-fork not defined yet")
+ case rules.IsShanghai:
+ return newShanghaiInstructionSet(), nil
+ case rules.IsMerge:
+ return newMergeInstructionSet(), nil
+ case rules.IsLondon:
+ return newLondonInstructionSet(), nil
+ case rules.IsBerlin:
+ return newBerlinInstructionSet(), nil
+ case rules.IsIstanbul:
+ return newIstanbulInstructionSet(), nil
+ case rules.IsConstantinople:
+ return newConstantinopleInstructionSet(), nil
+ case rules.IsByzantium:
+ return newByzantiumInstructionSet(), nil
+ case rules.IsEIP158:
+ return newSpuriousDragonInstructionSet(), nil
+ case rules.IsEIP150:
+ return newTangerineWhistleInstructionSet(), nil
+ case rules.IsHomestead:
+ return newHomesteadInstructionSet(), nil
+ }
+ return newFrontierInstructionSet(), nil
+}
+
+// Stack returns the mininum and maximum stack requirements.
+func (op *operation) Stack() (int, int) {
+ return op.minStack, op.maxStack
+}
+
+// HasCost returns true if the opcode has a cost. Opcodes which do _not_ have
+// a cost assigned are one of two things:
+// - undefined, a.k.a invalid opcodes,
+// - the STOP opcode.
+// This method can thus be used to check if an opcode is "Invalid (or STOP)".
+func (op *operation) HasCost() bool {
+ // Ideally, we'd check this:
+ // return op.execute == opUndefined
+ // However, go-lang does now allow that. So we'll just check some other
+ // 'indicators' that this is an invalid op. Alas, STOP is impossible to
+ // filter out
+ return op.dynamicGas != nil || op.constantGas != 0
+}
diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go
index 9f199eb8f..910491c60 100644
--- a/core/vm/opcodes.go
+++ b/core/vm/opcodes.go
@@ -204,6 +204,12 @@ const (
LOG4
)
+// 0xb0 range.
+const (
+ TLOAD OpCode = 0xb3
+ TSTORE OpCode = 0xb4
+)
+
// 0xf0 range - closures.
const (
CREATE OpCode = 0xf0
@@ -219,12 +225,6 @@ const (
SELFDESTRUCT OpCode = 0xff
)
-// 0xb0 range.
-const (
- TLOAD OpCode = 0xb3
- TSTORE OpCode = 0xb4
-)
-
// Since the opcodes aren't all in order we can't use a regular slice.
var opCodeToString = map[OpCode]string{
// 0x0 range - arithmetic ops.
@@ -290,9 +290,7 @@ var opCodeToString = map[OpCode]string{
BASEFEE: "BASEFEE",
// 0x50 range - 'storage' and execution.
- POP: "POP",
- //DUP: "DUP",
- //SWAP: "SWAP",
+ POP: "POP",
MLOAD: "MLOAD",
MSTORE: "MSTORE",
MSTORE8: "MSTORE8",
@@ -306,7 +304,7 @@ var opCodeToString = map[OpCode]string{
JUMPDEST: "JUMPDEST",
PUSH0: "PUSH0",
- // 0x60 range - push.
+ // 0x60 range - pushes.
PUSH1: "PUSH1",
PUSH2: "PUSH2",
PUSH3: "PUSH3",
@@ -340,6 +338,7 @@ var opCodeToString = map[OpCode]string{
PUSH31: "PUSH31",
PUSH32: "PUSH32",
+ // 0x80 - dups.
DUP1: "DUP1",
DUP2: "DUP2",
DUP3: "DUP3",
@@ -357,6 +356,7 @@ var opCodeToString = map[OpCode]string{
DUP15: "DUP15",
DUP16: "DUP16",
+ // 0x90 - swaps.
SWAP1: "SWAP1",
SWAP2: "SWAP2",
SWAP3: "SWAP3",
@@ -373,17 +373,19 @@ var opCodeToString = map[OpCode]string{
SWAP14: "SWAP14",
SWAP15: "SWAP15",
SWAP16: "SWAP16",
- LOG0: "LOG0",
- LOG1: "LOG1",
- LOG2: "LOG2",
- LOG3: "LOG3",
- LOG4: "LOG4",
+
+ // 0xa0 range - logging ops.
+ LOG0: "LOG0",
+ LOG1: "LOG1",
+ LOG2: "LOG2",
+ LOG3: "LOG3",
+ LOG4: "LOG4",
// 0xb0 range.
TLOAD: "TLOAD",
TSTORE: "TSTORE",
- // 0xf0 range.
+ // 0xf0 range - closures.
CREATE: "CREATE",
CALL: "CALL",
RETURN: "RETURN",
@@ -473,8 +475,6 @@ var stringToOp = map[string]OpCode{
"GAS": GAS,
"JUMPDEST": JUMPDEST,
"PUSH0": PUSH0,
- "TLOAD": TLOAD,
- "TSTORE": TSTORE,
"PUSH1": PUSH1,
"PUSH2": PUSH2,
"PUSH3": PUSH3,
@@ -544,6 +544,8 @@ var stringToOp = map[string]OpCode{
"LOG2": LOG2,
"LOG3": LOG3,
"LOG4": LOG4,
+ "TLOAD": TLOAD,
+ "TSTORE": TSTORE,
"CREATE": CREATE,
"CREATE2": CREATE2,
"CALL": CALL,
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 56ff5eeab..005ef0c75 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -57,7 +57,6 @@ func setDefaults(cfg *Config) {
DAOForkBlock: new(big.Int),
DAOForkSupport: false,
EIP150Block: new(big.Int),
- EIP150Hash: common.Hash{},
EIP155Block: new(big.Int),
EIP158Block: new(big.Int),
ByzantiumBlock: new(big.Int),
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index 607259106..62953a436 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -335,7 +335,6 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode
b.Fatal(err)
}
cfg.EVMConfig = vm.Config{
- Debug: true,
Tracer: tracer,
}
}
@@ -511,7 +510,6 @@ func TestEip2929Cases(t *testing.T) {
code, ops)
Execute(code, nil, &Config{
EVMConfig: vm.Config{
- Debug: true,
Tracer: logger.NewMarkdownLogger(nil, os.Stdout),
ExtraEips: []int{2929},
},
@@ -665,7 +663,6 @@ func TestColdAccountAccessCost(t *testing.T) {
tracer := logger.NewStructLogger(nil)
Execute(tc.code, nil, &Config{
EVMConfig: vm.Config{
- Debug: true,
Tracer: tracer,
},
})
@@ -837,7 +834,6 @@ func TestRuntimeJSTracer(t *testing.T) {
GasLimit: 1000000,
State: statedb,
EVMConfig: vm.Config{
- Debug: true,
Tracer: tracer,
}})
if err != nil {
@@ -872,7 +868,6 @@ func TestJSTracerCreateTx(t *testing.T) {
_, _, _, err = Create(code, &Config{
State: statedb,
EVMConfig: vm.Config{
- Debug: true,
Tracer: tracer,
}})
if err != nil {
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 643f6369d..ac160b073 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -327,7 +327,7 @@ func (b *EthAPIBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error)
return b.gpo.SuggestTipCap(ctx)
}
-func (b *EthAPIBackend) FeeHistory(ctx context.Context, blockCount int, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (firstBlock *big.Int, reward [][]*big.Int, baseFee []*big.Int, gasUsedRatio []float64, err error) {
+func (b *EthAPIBackend) FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (firstBlock *big.Int, reward [][]*big.Int, baseFee []*big.Int, gasUsedRatio []float64, err error) {
return b.gpo.FeeHistory(ctx, blockCount, lastBlock, rewardPercentiles)
}
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index 9077f20bf..1fe984247 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -20,7 +20,6 @@ package catalyst
import (
"errors"
"fmt"
- "math/big"
"sync"
"time"
@@ -64,11 +63,6 @@ const (
// attached before starting to issue warnings.
beaconUpdateStartupTimeout = 30 * time.Second
- // beaconUpdateExchangeTimeout is the max time allowed for a beacon client to
- // do a transition config exchange before it's considered offline and the user
- // is warned.
- beaconUpdateExchangeTimeout = 2 * time.Minute
-
// beaconUpdateConsensusTimeout is the max time allowed for a beacon client
// to send a consensus update before it's considered offline and the user is
// warned.
@@ -667,14 +661,13 @@ func (api *ConsensusAPI) heartbeat() {
// attached, so no need to print scary warnings to the user.
time.Sleep(beaconUpdateStartupTimeout)
- var (
- offlineLogged time.Time
- ttd = api.eth.BlockChain().Config().TerminalTotalDifficulty
- )
// If the network is not yet merged/merging, don't bother continuing.
- if ttd == nil {
+ if api.eth.BlockChain().Config().TerminalTotalDifficulty == nil {
return
}
+
+ var offlineLogged time.Time
+
for {
// Sleep a bit and retrieve the last known consensus updates
time.Sleep(5 * time.Second)
@@ -698,20 +691,14 @@ func (api *ConsensusAPI) heartbeat() {
offlineLogged = time.Time{}
continue
}
- if time.Since(lastTransitionUpdate) > beaconUpdateExchangeTimeout {
- if time.Since(offlineLogged) > beaconUpdateWarnFrequency {
+
+ if time.Since(offlineLogged) > beaconUpdateWarnFrequency {
+ if lastForkchoiceUpdate.IsZero() && lastNewPayloadUpdate.IsZero() {
if lastTransitionUpdate.IsZero() {
log.Warn("Post-merge network, but no beacon client seen. Please launch one to follow the chain!")
} else {
- log.Warn("Previously seen beacon client is offline. Please ensure it is operational to follow the chain!")
+ log.Warn("Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!")
}
- offlineLogged = time.Now()
- }
- continue
- }
- if time.Since(offlineLogged) > beaconUpdateWarnFrequency {
- if lastForkchoiceUpdate.IsZero() && lastNewPayloadUpdate.IsZero() {
- log.Warn("Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!")
} else {
log.Warn("Beacon client online, but no consensus updates received in a while. Please fix your beacon client to follow the chain!")
}
@@ -719,62 +706,6 @@ func (api *ConsensusAPI) heartbeat() {
}
continue
}
- if time.Since(lastTransitionUpdate) <= beaconUpdateExchangeTimeout {
- offlineLogged = time.Time{}
- continue
- }
- if time.Since(offlineLogged) > beaconUpdateWarnFrequency {
- // Retrieve the last few blocks and make a rough estimate as
- // to when the merge transition should happen
- var (
- chain = api.eth.BlockChain()
- head = chain.CurrentHeader()
- htd = chain.GetTd(head.Hash(), head.Number.Uint64())
- )
- if htd.Cmp(ttd) >= 0 {
- if lastTransitionUpdate.IsZero() {
- log.Warn("Merge already reached, but no beacon client seen. Please launch one to follow the chain!")
- } else {
- log.Warn("Merge already reached, but previously seen beacon client is offline. Please ensure it is operational to follow the chain!")
- }
- offlineLogged = time.Now()
- continue
- }
- var eta time.Duration
- if head.Number.Uint64() > 0 {
- // Accumulate the last 64 difficulties to estimate the growth
- var (
- deltaDiff uint64
- deltaTime uint64
- current = head
- )
- for i := 0; i < 64; i++ {
- parent := chain.GetHeader(current.ParentHash, current.Number.Uint64()-1)
- if parent == nil {
- break
- }
- deltaDiff += current.Difficulty.Uint64()
- deltaTime += current.Time - parent.Time
- current = parent
- }
- // Estimate an ETA based on the block times and the difficulty growth
- if deltaTime > 0 {
- growth := deltaDiff / deltaTime
- left := new(big.Int).Sub(ttd, htd)
- eta = time.Duration(new(big.Int).Div(left, new(big.Int).SetUint64(growth+1)).Uint64()) * time.Second
- }
- }
- message := "Merge is configured, but previously seen beacon client is offline. Please ensure it is operational before the transition arrives!"
- if lastTransitionUpdate.IsZero() {
- message = "Merge is configured, but no beacon client seen. Please ensure you have one available before the transition arrives!"
- }
- if eta < time.Second {
- log.Warn(message)
- } else {
- log.Warn(message, "eta", common.PrettyAge(time.Now().Add(-eta))) // weird hack, but duration formatted doesn't handle days
- }
- offlineLogged = time.Now()
- }
}
}
diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go
index fb6e6935e..f38122200 100644
--- a/eth/catalyst/api_test.go
+++ b/eth/catalyst/api_test.go
@@ -879,15 +879,10 @@ func TestNewPayloadOnInvalidTerminalBlock(t *testing.T) {
genesis, preMergeBlocks := generateMergeChain(100, false)
n, ethservice := startEthService(t, genesis, preMergeBlocks)
defer n.Close()
-
- ethservice.BlockChain().Config().TerminalTotalDifficulty = preMergeBlocks[0].Difficulty() //.Sub(genesis.Config.TerminalTotalDifficulty, preMergeBlocks[len(preMergeBlocks)-1].Difficulty())
-
- var (
- api = NewConsensusAPI(ethservice)
- parent = preMergeBlocks[len(preMergeBlocks)-1]
- )
+ api := NewConsensusAPI(ethservice)
// Test parent already post TTD in FCU
+ parent := preMergeBlocks[len(preMergeBlocks)-2]
fcState := engine.ForkchoiceStateV1{
HeadBlockHash: parent.Hash(),
SafeBlockHash: common.Hash{},
@@ -913,6 +908,28 @@ func TestNewPayloadOnInvalidTerminalBlock(t *testing.T) {
t.Fatalf("error preparing payload, err=%v", err)
}
data := *payload.Resolve().ExecutionPayload
+ // We need to recompute the blockhash, since the miner computes a wrong (correct) blockhash
+ txs, _ := decodeTransactions(data.Transactions)
+ header := &types.Header{
+ ParentHash: data.ParentHash,
+ UncleHash: types.EmptyUncleHash,
+ Coinbase: data.FeeRecipient,
+ Root: data.StateRoot,
+ TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
+ ReceiptHash: data.ReceiptsRoot,
+ Bloom: types.BytesToBloom(data.LogsBloom),
+ Difficulty: common.Big0,
+ Number: new(big.Int).SetUint64(data.Number),
+ GasLimit: data.GasLimit,
+ GasUsed: data.GasUsed,
+ Time: data.Timestamp,
+ BaseFee: data.BaseFeePerGas,
+ Extra: data.ExtraData,
+ MixDigest: data.Random,
+ }
+ block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */)
+ data.BlockHash = block.Hash()
+ // Send the new payload
resp2, err := api.NewPayloadV1(data)
if err != nil {
t.Fatalf("error sending NewPayload, err=%v", err)
@@ -1240,9 +1257,10 @@ func TestNilWithdrawals(t *testing.T) {
func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) {
genesis, blocks := generateMergeChain(10, true)
- n, ethservice := startEthService(t, genesis, blocks)
// enable shanghai on the last block
- ethservice.BlockChain().Config().ShanghaiTime = &blocks[len(blocks)-1].Header().Time
+ time := blocks[len(blocks)-1].Header().Time + 1
+ genesis.Config.ShanghaiTime = &time
+ n, ethservice := startEthService(t, genesis, blocks)
var (
parent = ethservice.BlockChain().CurrentBlock()
diff --git a/eth/downloader/beaconsync.go b/eth/downloader/beaconsync.go
index ff985e6b0..df8af68bc 100644
--- a/eth/downloader/beaconsync.go
+++ b/eth/downloader/beaconsync.go
@@ -19,7 +19,6 @@ package downloader
import (
"fmt"
"sync"
- "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -371,7 +370,7 @@ func (d *Downloader) fetchBeaconHeaders(from uint64) error {
continue
}
// If the pivot block is committed, signal header sync termination
- if atomic.LoadInt32(&d.committed) == 1 {
+ if d.committed.Load() {
select {
case d.headerProcCh <- nil:
return nil
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index fb9de7991..a3d8a2106 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -98,7 +98,7 @@ type headerTask struct {
}
type Downloader struct {
- mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
+ mode atomic.Uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
mux *event.TypeMux // Event multiplexer to announce sync operation events
checkpoint uint64 // Checkpoint block number to enforce head against (e.g. snap sync)
@@ -122,9 +122,9 @@ type Downloader struct {
// Status
synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
- synchronising int32
- notified int32
- committed int32
+ synchronising atomic.Bool
+ notified atomic.Bool
+ committed atomic.Bool
ancientLimit uint64 // The maximum block number which can be regarded as ancient data.
// Channels
@@ -292,7 +292,7 @@ func (d *Downloader) Progress() ethereum.SyncProgress {
// Synchronising returns whether the downloader is currently retrieving blocks.
func (d *Downloader) Synchronising() bool {
- return atomic.LoadInt32(&d.synchronising) > 0
+ return d.synchronising.Load()
}
// RegisterPeer injects a new download peer into the set of block source to be
@@ -392,13 +392,13 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int,
return d.synchroniseMock(id, hash)
}
// Make sure only one goroutine is ever allowed past this point at once
- if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
+ if !d.synchronising.CompareAndSwap(false, true) {
return errBusy
}
- defer atomic.StoreInt32(&d.synchronising, 0)
+ defer d.synchronising.Store(false)
// Post a user notification of the sync (only once per session)
- if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
+ if d.notified.CompareAndSwap(false, true) {
log.Info("Block synchronisation started")
}
if mode == SnapSync {
@@ -435,7 +435,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int,
defer d.Cancel() // No matter what, we can't leave the cancel channel open
// Atomically set the requested sync mode
- atomic.StoreUint32(&d.mode, uint32(mode))
+ d.mode.Store(uint32(mode))
// Retrieve the origin peer and initiate the downloading process
var p *peerConnection
@@ -452,7 +452,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int,
}
func (d *Downloader) getMode() SyncMode {
- return SyncMode(atomic.LoadUint32(&d.mode))
+ return SyncMode(d.mode.Load())
}
// syncWithPeer starts a block synchronization based on the hash chain from the
@@ -562,9 +562,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *
rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber)
}
}
- d.committed = 1
+ d.committed.Store(true)
if mode == SnapSync && pivot.Number.Uint64() != 0 {
- d.committed = 0
+ d.committed.Store(false)
}
if mode == SnapSync {
// Set the ancient data limitation. If we are running snap sync, all block
@@ -1128,7 +1128,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
// If no more headers are inbound, notify the content fetchers and return
if len(headers) == 0 {
// Don't abort header fetches while the pivot is downloading
- if atomic.LoadInt32(&d.committed) == 0 && pivot <= from {
+ if !d.committed.Load() && pivot <= from {
p.log.Debug("No headers, waiting for pivot commit")
select {
case <-time.After(fsHeaderContCheck):
@@ -1669,7 +1669,7 @@ func (d *Downloader) processSnapSyncContent() error {
results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
}
// Split around the pivot block and process the two sides via snap/full sync
- if atomic.LoadInt32(&d.committed) == 0 {
+ if !d.committed.Load() {
latest := results[len(results)-1].Header
// If the height is above the pivot block by 2 sets, it means the pivot
// become stale in the network and it was garbage collected, move to a
@@ -1794,7 +1794,7 @@ func (d *Downloader) commitPivotBlock(result *fetchResult) error {
if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil {
return err
}
- atomic.StoreInt32(&d.committed, 1)
+ d.committed.Store(true)
return nil
}
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index a884c1e95..37f7a7670 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -476,9 +476,10 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
tester.newPeer("peer", protocol, testChainBase.blocks[1:])
// Wrap the importer to allow stepping
- blocked, proceed := uint32(0), make(chan struct{})
+ var blocked atomic.Uint32
+ proceed := make(chan struct{})
tester.downloader.chainInsertHook = func(results []*fetchResult) {
- atomic.StoreUint32(&blocked, uint32(len(results)))
+ blocked.Store(uint32(len(results)))
<-proceed
}
// Start a synchronisation concurrently
@@ -505,7 +506,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
tester.downloader.queue.resultCache.lock.Lock()
{
cached = tester.downloader.queue.resultCache.countCompleted()
- frozen = int(atomic.LoadUint32(&blocked))
+ frozen = int(blocked.Load())
retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
}
tester.downloader.queue.resultCache.lock.Unlock()
@@ -528,8 +529,8 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
}
// Permit the blocked blocks to import
- if atomic.LoadUint32(&blocked) > 0 {
- atomic.StoreUint32(&blocked, uint32(0))
+ if blocked.Load() > 0 {
+ blocked.Store(uint32(0))
proceed <- struct{}{}
}
}
@@ -786,12 +787,12 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
tester.newPeer("peer", protocol, chain.blocks[1:])
// Instrument the downloader to signal body requests
- bodiesHave, receiptsHave := int32(0), int32(0)
+ var bodiesHave, receiptsHave atomic.Int32
tester.downloader.bodyFetchHook = func(headers []*types.Header) {
- atomic.AddInt32(&bodiesHave, int32(len(headers)))
+ bodiesHave.Add(int32(len(headers)))
}
tester.downloader.receiptFetchHook = func(headers []*types.Header) {
- atomic.AddInt32(&receiptsHave, int32(len(headers)))
+ receiptsHave.Add(int32(len(headers)))
}
// Synchronise with the peer and make sure all blocks were retrieved
if err := tester.sync("peer", nil, mode); err != nil {
@@ -811,11 +812,11 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
receiptsNeeded++
}
}
- if int(bodiesHave) != bodiesNeeded {
- t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
+ if int(bodiesHave.Load()) != bodiesNeeded {
+ t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave.Load(), bodiesNeeded)
}
- if int(receiptsHave) != receiptsNeeded {
- t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
+ if int(receiptsHave.Load()) != receiptsNeeded {
+ t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave.Load(), receiptsNeeded)
}
}
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index 5af5068c9..e9907297a 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -61,7 +61,7 @@ type fetchRequest struct {
// fetchResult is a struct collecting partial results from data fetchers until
// all outstanding pieces complete and the result as a whole can be processed.
type fetchResult struct {
- pending int32 // Flag telling what deliveries are outstanding
+ pending atomic.Int32 // Flag telling what deliveries are outstanding
Header *types.Header
Uncles []*types.Header
@@ -75,38 +75,38 @@ func newFetchResult(header *types.Header, fastSync bool) *fetchResult {
Header: header,
}
if !header.EmptyBody() {
- item.pending |= (1 << bodyType)
+ item.pending.Store(item.pending.Load() | (1 << bodyType))
} else if header.WithdrawalsHash != nil {
item.Withdrawals = make(types.Withdrawals, 0)
}
if fastSync && !header.EmptyReceipts() {
- item.pending |= (1 << receiptType)
+ item.pending.Store(item.pending.Load() | (1 << receiptType))
}
return item
}
// SetBodyDone flags the body as finished.
func (f *fetchResult) SetBodyDone() {
- if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 {
- atomic.AddInt32(&f.pending, -1)
+ if v := f.pending.Load(); (v & (1 << bodyType)) != 0 {
+ f.pending.Add(-1)
}
}
// AllDone checks if item is done.
func (f *fetchResult) AllDone() bool {
- return atomic.LoadInt32(&f.pending) == 0
+ return f.pending.Load() == 0
}
// SetReceiptsDone flags the receipts as finished.
func (f *fetchResult) SetReceiptsDone() {
- if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 {
- atomic.AddInt32(&f.pending, -2)
+ if v := f.pending.Load(); (v & (1 << receiptType)) != 0 {
+ f.pending.Add(-2)
}
}
// Done checks if the given type is done already
func (f *fetchResult) Done(kind uint) bool {
- v := atomic.LoadInt32(&f.pending)
+ v := f.pending.Load()
return v&(1<= int32(len(r.items)) {
break
@@ -156,7 +156,7 @@ func (r *resultStore) countCompleted() int {
break
}
}
- atomic.StoreInt32(&r.indexIncomplete, index)
+ r.indexIncomplete.Store(index)
return int(index)
}
@@ -179,7 +179,7 @@ func (r *resultStore) GetCompleted(limit int) []*fetchResult {
}
// Advance the expected block number of the first cache entry
r.resultOffset += uint64(limit)
- atomic.AddInt32(&r.indexIncomplete, int32(-limit))
+ r.indexIncomplete.Add(int32(-limit))
return results
}
diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go
index b19494a7b..6a76d78ac 100644
--- a/eth/downloader/skeleton_test.go
+++ b/eth/downloader/skeleton_test.go
@@ -82,8 +82,8 @@ type skeletonTestPeer struct {
serve func(origin uint64) []*types.Header // Hook to allow custom responses
- served uint64 // Number of headers served by this peer
- dropped uint64 // Flag whether the peer was dropped (stop responding)
+ served atomic.Uint64 // Number of headers served by this peer
+ dropped atomic.Uint64 // Flag whether the peer was dropped (stop responding)
}
// newSkeletonTestPeer creates a new mock peer to test the skeleton sync with.
@@ -113,7 +113,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski
// Since skeleton test peer are in-memory mocks, dropping the does not make
// them inaccessible. As such, check a local `dropped` field to see if the
// peer has been dropped and should not respond any more.
- if atomic.LoadUint64(&p.dropped) != 0 {
+ if p.dropped.Load() != 0 {
return nil, errors.New("peer already dropped")
}
// Skeleton sync retrieves batches of headers going backward without gaps.
@@ -161,7 +161,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski
}
}
}
- atomic.AddUint64(&p.served, uint64(len(headers)))
+ p.served.Add(uint64(len(headers)))
hashes := make([]common.Hash, len(headers))
for i, header := range headers {
@@ -182,7 +182,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski
sink <- res
if err := <-res.Done; err != nil {
log.Warn("Skeleton test peer response rejected", "err", err)
- atomic.AddUint64(&p.dropped, 1)
+ p.dropped.Add(1)
}
}()
return req, nil
@@ -817,7 +817,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
dropped := make(map[string]int)
drop := func(peer string) {
if p := peerset.Peer(peer); p != nil {
- atomic.AddUint64(&p.peer.(*skeletonTestPeer).dropped, 1)
+ p.peer.(*skeletonTestPeer).dropped.Add(1)
}
peerset.Unregister(peer)
dropped[peer]++
@@ -895,14 +895,14 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
if !tt.unpredictable {
var served uint64
for _, peer := range tt.peers {
- served += atomic.LoadUint64(&peer.served)
+ served += peer.served.Load()
}
if served != tt.midserve {
t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve)
}
var drops uint64
for _, peer := range tt.peers {
- drops += atomic.LoadUint64(&peer.dropped)
+ drops += peer.dropped.Load()
}
if drops != tt.middrop {
t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop)
@@ -950,20 +950,20 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
if !tt.unpredictable {
served := uint64(0)
for _, peer := range tt.peers {
- served += atomic.LoadUint64(&peer.served)
+ served += peer.served.Load()
}
if tt.newPeer != nil {
- served += atomic.LoadUint64(&tt.newPeer.served)
+ served += tt.newPeer.served.Load()
}
if served != tt.endserve {
t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve)
}
drops := uint64(0)
for _, peer := range tt.peers {
- drops += atomic.LoadUint64(&peer.dropped)
+ drops += peer.dropped.Load()
}
if tt.newPeer != nil {
- drops += atomic.LoadUint64(&tt.newPeer.dropped)
+ drops += tt.newPeer.dropped.Load()
}
if drops != tt.enddrop {
t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop)
diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go
index 47cc31999..82edf9a7f 100644
--- a/eth/gasprice/feehistory.go
+++ b/eth/gasprice/feehistory.go
@@ -142,7 +142,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) {
// also returned if requested and available.
// Note: an error is only returned if retrieving the head header has failed. If there are no
// retrievable blocks in the specified range then zero block count is returned with no error.
-func (oracle *Oracle) resolveBlockRange(ctx context.Context, reqEnd rpc.BlockNumber, blocks int) (*types.Block, []*types.Receipt, uint64, int, error) {
+func (oracle *Oracle) resolveBlockRange(ctx context.Context, reqEnd rpc.BlockNumber, blocks uint64) (*types.Block, []*types.Receipt, uint64, uint64, error) {
var (
headBlock *types.Header
pendingBlock *types.Block
@@ -200,8 +200,8 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, reqEnd rpc.BlockNum
return nil, nil, 0, 0, nil
}
// Ensure not trying to retrieve before genesis.
- if int(reqEnd+1) < blocks {
- blocks = int(reqEnd + 1)
+ if uint64(reqEnd+1) < blocks {
+ blocks = uint64(reqEnd + 1)
}
return pendingBlock, pendingReceipts, uint64(reqEnd), blocks, nil
}
@@ -220,7 +220,7 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, reqEnd rpc.BlockNum
//
// Note: baseFee includes the next block after the newest of the returned range, because this
// value can be derived from the newest block.
-func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) {
+func (oracle *Oracle) FeeHistory(ctx context.Context, blocks uint64, unresolvedLastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) {
if blocks < 1 {
return common.Big0, nil, nil, nil, nil // returning with no data and no error means there are no retrievable blocks
}
@@ -249,7 +249,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast
if err != nil || blocks == 0 {
return common.Big0, nil, nil, nil, err
}
- oldestBlock := lastBlock + 1 - uint64(blocks)
+ oldestBlock := lastBlock + 1 - blocks
var (
next = oldestBlock
@@ -259,7 +259,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast
for i, p := range rewardPercentiles {
binary.LittleEndian.PutUint64(percentileKey[i*8:(i+1)*8], math.Float64bits(p))
}
- for i := 0; i < maxBlockFetchers && i < blocks; i++ {
+ for i := 0; i < maxBlockFetchers && i < int(blocks); i++ {
go func() {
for {
// Retrieve the next block number to fetch with this goroutine
@@ -314,7 +314,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast
if fees.err != nil {
return common.Big0, nil, nil, nil, fees.err
}
- i := int(fees.blockNumber - oldestBlock)
+ i := fees.blockNumber - oldestBlock
if fees.results.baseFee != nil {
reward[i], baseFee[i], baseFee[i+1], gasUsedRatio[i] = fees.results.reward, fees.results.baseFee, fees.results.nextBaseFee, fees.results.gasUsedRatio
} else {
diff --git a/eth/gasprice/feehistory_test.go b/eth/gasprice/feehistory_test.go
index b54874d68..1bcfb287a 100644
--- a/eth/gasprice/feehistory_test.go
+++ b/eth/gasprice/feehistory_test.go
@@ -28,8 +28,8 @@ import (
func TestFeeHistory(t *testing.T) {
var cases = []struct {
pending bool
- maxHeader, maxBlock int
- count int
+ maxHeader, maxBlock uint64
+ count uint64
last rpc.BlockNumber
percent []float64
expFirst uint64
diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go
index 604ad5e10..8e98a3409 100644
--- a/eth/gasprice/gasprice.go
+++ b/eth/gasprice/gasprice.go
@@ -42,8 +42,8 @@ var (
type Config struct {
Blocks int
Percentile int
- MaxHeaderHistory int
- MaxBlockHistory int
+ MaxHeaderHistory uint64
+ MaxBlockHistory uint64
Default *big.Int `toml:",omitempty"`
MaxPrice *big.Int `toml:",omitempty"`
IgnorePrice *big.Int `toml:",omitempty"`
@@ -71,7 +71,7 @@ type Oracle struct {
fetchLock sync.Mutex
checkBlocks, percentile int
- maxHeaderHistory, maxBlockHistory int
+ maxHeaderHistory, maxBlockHistory uint64
historyCache *lru.Cache[cacheKey, processedFees]
}
diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go
index d7c940044..55781ac54 100644
--- a/eth/protocols/snap/handler.go
+++ b/eth/protocols/snap/handler.go
@@ -418,7 +418,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
if err != nil {
return nil, nil
}
- acc, err := accTrie.TryGetAccountByHash(account)
+ acc, err := accTrie.GetAccountByHash(account)
if err != nil || acc == nil {
return nil, nil
}
@@ -510,7 +510,7 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s
case 1:
// If we're only retrieving an account trie node, fetch it directly
- blob, resolved, err := accTrie.TryGetNode(pathset[0])
+ blob, resolved, err := accTrie.GetNode(pathset[0])
loads += resolved // always account database reads, even for failures
if err != nil {
break
@@ -524,7 +524,7 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s
if snap == nil {
// We don't have the requested state snapshotted yet (or it is stale),
// but can look up the account via the trie instead.
- account, err := accTrie.TryGetAccountByHash(common.BytesToHash(pathset[0]))
+ account, err := accTrie.GetAccountByHash(common.BytesToHash(pathset[0]))
loads += 8 // We don't know the exact cost of lookup, this is an estimate
if err != nil || account == nil {
break
@@ -545,7 +545,7 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s
break
}
for _, path := range pathset[1:] {
- blob, resolved, err := stTrie.TryGetNode(path)
+ blob, resolved, err := stTrie.GetNode(path)
loads += resolved // always account database reads, even for failures
if err != nil {
break
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index 0a6117972..6a3b482d5 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -216,7 +216,7 @@ func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash,
for _, pathset := range paths {
switch len(pathset) {
case 1:
- blob, _, err := t.accountTrie.TryGetNode(pathset[0])
+ blob, _, err := t.accountTrie.GetNode(pathset[0])
if err != nil {
t.logger.Info("Error handling req", "error", err)
break
@@ -225,7 +225,7 @@ func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash,
default:
account := t.storageTries[(common.BytesToHash(pathset[0]))]
for _, path := range pathset[1:] {
- blob, _, err := account.TryGetNode(path)
+ blob, _, err := account.GetNode(path)
if err != nil {
t.logger.Info("Error handling req", "error", err)
break
@@ -1381,7 +1381,7 @@ func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) {
})
key := key32(i)
elem := &kv{key, value}
- accTrie.Update(elem.k, elem.v)
+ accTrie.MustUpdate(elem.k, elem.v)
entries = append(entries, elem)
}
sort.Sort(entries)
@@ -1431,7 +1431,7 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) {
CodeHash: getCodeHash(uint64(i)),
})
elem := &kv{boundaries[i].Bytes(), value}
- accTrie.Update(elem.k, elem.v)
+ accTrie.MustUpdate(elem.k, elem.v)
entries = append(entries, elem)
}
// Fill other accounts if required
@@ -1443,7 +1443,7 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) {
CodeHash: getCodeHash(i),
})
elem := &kv{key32(i), value}
- accTrie.Update(elem.k, elem.v)
+ accTrie.MustUpdate(elem.k, elem.v)
entries = append(entries, elem)
}
sort.Sort(entries)
@@ -1487,7 +1487,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
CodeHash: codehash,
})
elem := &kv{key, value}
- accTrie.Update(elem.k, elem.v)
+ accTrie.MustUpdate(elem.k, elem.v)
entries = append(entries, elem)
storageRoots[common.BytesToHash(key)] = stRoot
@@ -1551,7 +1551,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
CodeHash: codehash,
})
elem := &kv{key, value}
- accTrie.Update(elem.k, elem.v)
+ accTrie.MustUpdate(elem.k, elem.v)
entries = append(entries, elem)
// we reuse the same one for all accounts
@@ -1599,7 +1599,7 @@ func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Databas
key := crypto.Keccak256Hash(slotKey[:])
elem := &kv{key[:], rlpSlotValue}
- trie.Update(elem.k, elem.v)
+ trie.MustUpdate(elem.k, elem.v)
entries = append(entries, elem)
}
sort.Sort(entries)
@@ -1638,7 +1638,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
val := []byte{0xde, 0xad, 0xbe, 0xef}
elem := &kv{key[:], val}
- trie.Update(elem.k, elem.v)
+ trie.MustUpdate(elem.k, elem.v)
entries = append(entries, elem)
}
// Fill other slots if required
@@ -1650,7 +1650,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))
elem := &kv{key[:], rlpSlotValue}
- trie.Update(elem.k, elem.v)
+ trie.MustUpdate(elem.k, elem.v)
entries = append(entries, elem)
}
sort.Sort(entries)
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index 54c32449a..b0c1de365 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -805,7 +805,6 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
// Swap out the noop logger to the standard tracer
writer = bufio.NewWriter(dump)
vmConf = vm.Config{
- Debug: true,
Tracer: logger.NewJSONLogger(&logConfig, writer),
EnablePreimageRecording: true,
}
@@ -972,7 +971,7 @@ func (api *API) traceTx(ctx context.Context, message *core.Message, txctx *Conte
}
// end PluGeth injection
}
- vmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Debug: true, Tracer: tracer, NoBaseFee: true})
+ vmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer, NoBaseFee: true})
// Define a meaningful timeout of a single transaction trace
if config.Timeout != nil {
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index b1eaf60b1..634302103 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -835,8 +835,8 @@ func TestTraceChain(t *testing.T) {
signer := types.HomesteadSigner{}
var (
- ref uint32 // total refs has made
- rel uint32 // total rels has made
+ ref atomic.Uint32 // total refs has made
+ rel atomic.Uint32 // total rels has made
nonce uint64
)
backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {
@@ -849,8 +849,8 @@ func TestTraceChain(t *testing.T) {
nonce += 1
}
})
- backend.refHook = func() { atomic.AddUint32(&ref, 1) }
- backend.relHook = func() { atomic.AddUint32(&rel, 1) }
+ backend.refHook = func() { ref.Add(1) }
+ backend.relHook = func() { rel.Add(1) }
api := NewAPI(backend)
single := `{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}`
@@ -863,7 +863,8 @@ func TestTraceChain(t *testing.T) {
{10, 20, nil}, // the middle chain range, blocks [11, 20]
}
for _, c := range cases {
- ref, rel = 0, 0 // clean up the counters
+ ref.Store(0)
+ rel.Store(0)
from, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.start))
to, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.end))
@@ -888,8 +889,9 @@ func TestTraceChain(t *testing.T) {
if next != c.end+1 {
t.Error("Missing tracing block")
}
- if ref != rel {
- t.Errorf("Ref and deref actions are not equal, ref %d rel %d", ref, rel)
+
+ if nref, nrel := ref.Load(), rel.Load(); nref != nrel {
+ t.Errorf("Ref and deref actions are not equal, ref %d rel %d", nref, nrel)
}
}
}
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index 62182e3a8..e517d436a 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
@@ -144,7 +143,7 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
- evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
+ evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer})
msg, err := core.TransactionToMessage(tx, signer, nil)
if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
@@ -247,7 +246,7 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
if err != nil {
b.Fatalf("failed to create call tracer: %v", err)
}
- evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
+ evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer})
snap := statedb.Snapshot()
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if _, err = st.TransitionDb(); err != nil {
@@ -260,75 +259,121 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
}
}
-// TestZeroValueToNotExitCall tests the calltracer(s) on the following:
-// Tx to A, A calls B with zero value. B does not already exist.
-// Expected: that enter/exit is invoked and the inner call is shown in the result
-func TestZeroValueToNotExitCall(t *testing.T) {
- var to = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
- privkey, err := crypto.HexToECDSA("0000000000000000deadbeef00000000000000000000000000000000deadbeef")
- if err != nil {
- t.Fatalf("err %v", err)
+func TestInternals(t *testing.T) {
+ var (
+ to = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ origin = common.HexToAddress("0x00000000000000000000000000000000feed")
+ txContext = vm.TxContext{
+ Origin: origin,
+ GasPrice: big.NewInt(1),
+ }
+ context = vm.BlockContext{
+ CanTransfer: core.CanTransfer,
+ Transfer: core.Transfer,
+ Coinbase: common.Address{},
+ BlockNumber: new(big.Int).SetUint64(8000000),
+ Time: 5,
+ Difficulty: big.NewInt(0x30000),
+ GasLimit: uint64(6000000),
+ }
+ )
+ mkTracer := func(name string, cfg json.RawMessage) tracers.Tracer {
+ tr, err := tracers.DefaultDirectory.New(name, nil, cfg)
+ if err != nil {
+ t.Fatalf("failed to create call tracer: %v", err)
+ }
+ return tr
}
- signer := types.NewEIP155Signer(big.NewInt(1))
- tx, err := types.SignNewTx(privkey, signer, &types.LegacyTx{
- GasPrice: big.NewInt(0),
- Gas: 50000,
- To: &to,
- })
- if err != nil {
- t.Fatalf("err %v", err)
- }
- origin, _ := signer.Sender(tx)
- txContext := vm.TxContext{
- Origin: origin,
- GasPrice: big.NewInt(1),
- }
- context := vm.BlockContext{
- CanTransfer: core.CanTransfer,
- Transfer: core.Transfer,
- Coinbase: common.Address{},
- BlockNumber: new(big.Int).SetUint64(8000000),
- Time: 5,
- Difficulty: big.NewInt(0x30000),
- GasLimit: uint64(6000000),
- }
- var code = []byte{
- byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), // in and outs zero
- byte(vm.DUP1), byte(vm.PUSH1), 0xff, byte(vm.GAS), // value=0,address=0xff, gas=GAS
- byte(vm.CALL),
- }
- var alloc = core.GenesisAlloc{
- to: core.GenesisAccount{
- Nonce: 1,
- Code: code,
+
+ for _, tc := range []struct {
+ name string
+ code []byte
+ tracer tracers.Tracer
+ want string
+ }{
+ {
+ // TestZeroValueToNotExitCall tests the calltracer(s) on the following:
+ // Tx to A, A calls B with zero value. B does not already exist.
+ // Expected: that enter/exit is invoked and the inner call is shown in the result
+ name: "ZeroValueToNotExitCall",
+ code: []byte{
+ byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), // in and outs zero
+ byte(vm.DUP1), byte(vm.PUSH1), 0xff, byte(vm.GAS), // value=0,address=0xff, gas=GAS
+ byte(vm.CALL),
+ },
+ tracer: mkTracer("callTracer", nil),
+ want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0x6cbf","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`,
},
- origin: core.GenesisAccount{
- Nonce: 0,
- Balance: big.NewInt(500000000000000),
+ {
+ name: "Stack depletion in LOG0",
+ code: []byte{byte(vm.LOG3)},
+ tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)),
+ want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0xc350","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`,
},
- }
- _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
- // Create the tracer, the EVM environment and run it
- tracer, err := tracers.DefaultDirectory.New("callTracer", nil, nil)
- if err != nil {
- t.Fatalf("failed to create call tracer: %v", err)
- }
- evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer})
- msg, err := core.TransactionToMessage(tx, signer, nil)
- if err != nil {
- t.Fatalf("failed to prepare transaction for tracing: %v", err)
- }
- st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
- if _, err = st.TransitionDb(); err != nil {
- t.Fatalf("failed to execute transaction: %v", err)
- }
- // Retrieve the trace result and compare against the etalon
- res, err := tracer.GetResult()
- if err != nil {
- t.Fatalf("failed to retrieve trace result: %v", err)
- }
- wantStr := `{"from":"0x682a80a6f560eec50d54e63cbeda1c324c5f8d1b","gas":"0x7148","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0x6cbf","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`
- if string(res) != wantStr {
- t.Fatalf("trace mismatch\n have: %v\n want: %v\n", string(res), wantStr)
+ {
+ name: "Mem expansion in LOG0",
+ code: []byte{
+ byte(vm.PUSH1), 0x1,
+ byte(vm.PUSH1), 0x0,
+ byte(vm.MSTORE),
+ byte(vm.PUSH1), 0xff,
+ byte(vm.PUSH1), 0x0,
+ byte(vm.LOG0),
+ },
+ tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)),
+ want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0xc350","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}],"value":"0x0","type":"CALL"}`,
+ },
+ {
+ // Leads to OOM on the prestate tracer
+ name: "Prestate-tracer - mem expansion in CREATE2",
+ code: []byte{
+ byte(vm.PUSH1), 0x1,
+ byte(vm.PUSH1), 0x0,
+ byte(vm.MSTORE),
+ byte(vm.PUSH1), 0x1,
+ byte(vm.PUSH5), 0xff, 0xff, 0xff, 0xff, 0xff,
+ byte(vm.PUSH1), 0x1,
+ byte(vm.PUSH1), 0x0,
+ byte(vm.CREATE2),
+ byte(vm.PUSH1), 0xff,
+ byte(vm.PUSH1), 0x0,
+ byte(vm.LOG0),
+ },
+ tracer: mkTracer("prestateTracer", json.RawMessage(`{ "withLog": true }`)),
+ want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52640350"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600164ffffffffff60016000f560ff6000a0"}}`,
+ },
+ } {
+ _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(),
+ core.GenesisAlloc{
+ to: core.GenesisAccount{
+ Code: tc.code,
+ },
+ origin: core.GenesisAccount{
+ Balance: big.NewInt(500000000000000),
+ },
+ }, false)
+ evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Tracer: tc.tracer})
+ msg := &core.Message{
+ To: &to,
+ From: origin,
+ Value: big.NewInt(0),
+ GasLimit: 50000,
+ GasPrice: big.NewInt(0),
+ GasFeeCap: big.NewInt(0),
+ GasTipCap: big.NewInt(0),
+ SkipAccountChecks: false,
+ }
+ st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(msg.GasLimit))
+ if _, err := st.TransitionDb(); err != nil {
+ t.Fatalf("test %v: failed to execute transaction: %v", tc.name, err)
+ }
+ // Retrieve the trace result and compare against the expected
+ res, err := tc.tracer.GetResult()
+ if err != nil {
+ t.Fatalf("test %v: failed to retrieve trace result: %v", tc.name, err)
+ }
+ if string(res) != tc.want {
+ t.Fatalf("test %v: trace mismatch\n have: %v\n want: %v\n", tc.name, string(res), tc.want)
+ }
}
}
diff --git a/eth/tracers/internal/tracetest/flat_calltrace_test.go b/eth/tracers/internal/tracetest/flat_calltrace_test.go
index 8cd5a42bc..16c01de25 100644
--- a/eth/tracers/internal/tracetest/flat_calltrace_test.go
+++ b/eth/tracers/internal/tracetest/flat_calltrace_test.go
@@ -107,7 +107,7 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
if err != nil {
return fmt.Errorf("failed to create call tracer: %v", err)
}
- evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
+ evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer})
msg, err := core.TransactionToMessage(tx, signer, nil)
if err != nil {
@@ -124,8 +124,8 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
if err != nil {
return fmt.Errorf("failed to retrieve trace result: %v", err)
}
- ret := new([]flatCallTrace)
- if err := json.Unmarshal(res, ret); err != nil {
+ ret := make([]flatCallTrace, 0)
+ if err := json.Unmarshal(res, &ret); err != nil {
return fmt.Errorf("failed to unmarshal trace result: %v", err)
}
if !jsonEqualFlat(ret, test.Result) {
diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go
index f578e2f0f..5fc99dd12 100644
--- a/eth/tracers/internal/tracetest/prestate_test.go
+++ b/eth/tracers/internal/tracetest/prestate_test.go
@@ -114,7 +114,7 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
- evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
+ evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer})
msg, err := core.TransactionToMessage(tx, signer, nil)
if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/create.json b/eth/tracers/internal/tracetest/testdata/call_tracer/create.json
index 8557f8efd..df0b2872b 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/create.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/create.json
@@ -47,7 +47,7 @@
"input": "0xf907ef098504e3b29200830897be8080b9079c606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a1129a01060f46676a5dff6f407f0f51eb6f37f5c8c54e238c70221e18e65fc29d3ea65a0557b01c50ff4ffaac8ed6e5d31237a4ecbac843ab1bfe8bb0165a0060df7c54f",
"result": {
"from": "0x13e4acefe6a6700604929946e70e6443e4e73447",
- "gas": "0x5e106",
+ "gas": "0x897be",
"gasUsed": "0x897be",
"input": "0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11",
"output": "0x606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json b/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json
index 174f23fc4..975616064 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json
@@ -399,7 +399,7 @@
}
],
"from": "0x70c9217d814985faef62b124420f8dfbddd96433",
- "gas": "0x37b38",
+ "gas": "0x3d090",
"gasUsed": "0x1810b",
"input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000",
"to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json
index 5fd946f73..6a2cda7dc 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json
@@ -87,7 +87,7 @@
}
],
"from": "0xa529806c67cc6486d4d62024471772f47f6fd672",
- "gas": "0x2d6e28",
+ "gas": "0x2dc6c0",
"gasUsed": "0xbd55",
"input": "0x7065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e",
"to": "0x269296dddce321a6bcbaa2f0181127593d732cba",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json
index 95c588926..bb16a4a43 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json
@@ -67,7 +67,7 @@
],
"error": "invalid jump destination",
"from": "0xe4a13bc304682a903e9472f469c33801dd18d9e8",
- "gas": "0x435c8",
+ "gas": "0x493e0",
"gasUsed": "0x493e0",
"input": "0x3b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8",
"to": "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json
index 4d7305a15..9b45b52fe 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json
@@ -54,7 +54,7 @@
"from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31",
"to": "0x6c06b16512b332e6cd8293a2974872674716ce18",
"value": "0x0",
- "gas": "0x1a466",
+ "gas": "0x1f97e",
"gasUsed": "0x72de",
"input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000"
}
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json
index b5355f65f..ad0627ccd 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json
@@ -50,7 +50,7 @@
"input": "0x02f9029d82053980849502f90085010c388d00832dc6c08080b90241608060405234801561001057600080fd5b50600060405161001f906100a2565b604051809103906000f08015801561003b573d6000803e3d6000fd5b5090508073ffffffffffffffffffffffffffffffffffffffff1663c04062266040518163ffffffff1660e01b815260040160006040518083038186803b15801561008457600080fd5b505afa158015610098573d6000803e3d6000fd5b50505050506100af565b610145806100fc83390190565b603f806100bd6000396000f3fe6080604052600080fdfea264697066735822122077f7dbd3450d6e817079cf3fe27107de5768bb3163a402b94e2206b468eb025664736f6c63430008070033608060405234801561001057600080fd5b50610125806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063c040622614602d575b600080fd5b60336035565b005b60036002116076576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401606d906097565b60405180910390fd5b565b6000608360128360b5565b9150608c8260c6565b602082019050919050565b6000602082019050818103600083015260ae816078565b9050919050565b600082825260208201905092915050565b7f546869732063616c6c6564206661696c6564000000000000000000000000000060008201525056fea264697066735822122033f8d92e29d467e5ea08d0024eab0b36b86b8cdb3542c6e89dbaabeb8ffaa42064736f6c63430008070033c001a07566181071cabaf58b70fc41557eb813bfc7a24f5c58554e7fed0bf7c031f169a0420af50b5fe791a4d839e181a676db5250b415dfb35cb85d544db7a1475ae2cc",
"result": {
"from": "0x3623191d4ccfbbdf09e8ebf6382a1f8257417bc1",
- "gas": "0x2cd774",
+ "gas": "0x2dc6c0",
"gasUsed": "0x25590",
"input": "0x608060405234801561001057600080fd5b50600060405161001f906100a2565b604051809103906000f08015801561003b573d6000803e3d6000fd5b5090508073ffffffffffffffffffffffffffffffffffffffff1663c04062266040518163ffffffff1660e01b815260040160006040518083038186803b15801561008457600080fd5b505afa158015610098573d6000803e3d6000fd5b50505050506100af565b610145806100fc83390190565b603f806100bd6000396000f3fe6080604052600080fdfea264697066735822122077f7dbd3450d6e817079cf3fe27107de5768bb3163a402b94e2206b468eb025664736f6c63430008070033608060405234801561001057600080fd5b50610125806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063c040622614602d575b600080fd5b60336035565b005b60036002116076576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401606d906097565b60405180910390fd5b565b6000608360128360b5565b9150608c8260c6565b602082019050919050565b6000602082019050818103600083015260ae816078565b9050919050565b600082825260208201905092915050565b7f546869732063616c6c6564206661696c6564000000000000000000000000000060008201525056fea264697066735822122033f8d92e29d467e5ea08d0024eab0b36b86b8cdb3542c6e89dbaabeb8ffaa42064736f6c63430008070033",
"output": "0x08c379a000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000012546869732063616c6c6564206661696c65640000000000000000000000000000",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json
index 2be2dee23..a023ed6d9 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json
@@ -71,7 +71,7 @@
],
"error": "execution reverted",
"from": "0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826",
- "gas": "0x78d9e",
+ "gas": "0x7dfa6",
"gasUsed": "0x7c1c8",
"input": "0x",
"to": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json b/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json
index 8022f53a9..333bdd038 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json
@@ -50,7 +50,7 @@
"result": {
"error": "out of gas",
"from": "0x94194bc2aaf494501d7880b61274a169f6502a54",
- "gas": "0x7045",
+ "gas": "0xca1d",
"gasUsed": "0xca1d",
"input": "0xa9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f9000",
"to": "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json b/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json
index aee894d11..3207a298a 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json
@@ -48,7 +48,7 @@
"result": {
"error": "execution reverted",
"from": "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9",
- "gas": "0x2d55e8",
+ "gas": "0x2dc6c0",
"gasUsed": "0x719b",
"input": "0x73b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a98800000000000000000000000000000000000000000000000000000000000000000000000000000000",
"to": "0xabbcd5b340c80b5f1c0545c04c987b87310296ae",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json b/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json
index 8c8abd4d6..f02e5c686 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json
@@ -27,7 +27,7 @@
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
- "IstanbulBlock":1561651,
+ "IstanbulBlock": 1561651,
"chainId": 5,
"daoForkSupport": true,
"eip150Block": 0,
@@ -53,7 +53,7 @@
"result": {
"error": "execution reverted",
"from": "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1",
- "gas": "0x2d7308",
+ "gas": "0x2dc6c0",
"gasUsed": "0x5940",
"input": "0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1",
"to": "0xf58833cf0c791881b494eb79d461e08a1f043f52",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json b/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json
index a89d4dc74..620df1d61 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json
@@ -64,7 +64,7 @@
}
],
"from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb",
- "gas": "0x10738",
+ "gas": "0x15f90",
"gasUsed": "0x6fcb",
"input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5",
"to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json b/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json
index 0a6d66a5c..6c7d01de1 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json
@@ -69,7 +69,7 @@
}
],
"from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb",
- "gas": "0x10738",
+ "gas": "0x15f90",
"gasUsed": "0x9751",
"input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5",
"output": "0x0000000000000000000000000000000000000000000000000000000000000001",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json b/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json
index 5e25a01ce..affb4ab03 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json
@@ -61,7 +61,7 @@
},
"result": {
"from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb",
- "gas": "0x10738",
+ "gas": "0x15f90",
"gasUsed": "0x9751",
"input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5",
"output": "0x0000000000000000000000000000000000000000000000000000000000000001",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json b/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json
index 76fae3c39..499b449a6 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json
@@ -52,7 +52,7 @@
"result": {
"error": "invalid jump destination",
"from": "0x70c9217d814985faef62b124420f8dfbddd96433",
- "gas": "0x37b38",
+ "gas": "0x3d090",
"gasUsed": "0x3d090",
"input": "0x51a34eb8000000000000000000000000000000000000000000000027fad02094277c0000",
"to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/big_slow.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/big_slow.json
index e54ede92b..617f52a14 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/big_slow.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/big_slow.json
@@ -46,7 +46,7 @@
{
"action": {
"from": "0xf8bda96b67036ee48107f2a0695ea673479dda56",
- "gas": "0x22410c",
+ "gas": "0x231860",
"init": "0x5b620186a05a131560135760016020526000565b600080601f600039601f565b6000f3",
"value": "0x0"
},
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_fail_hide.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_fail_hide.json
index be198885c..c796804a4 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_fail_hide.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_fail_hide.json
@@ -54,7 +54,7 @@
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
- "gas": "0xcf08",
+ "gas": "0x19f78",
"init": "0x60206000600060006013600462030d40f260025560005160005500"
},
"result": {
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_oog.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_oog.json
index 94b864ff4..fb29e4966 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_oog.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_oog.json
@@ -54,7 +54,7 @@
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
- "gas": "0xcf08",
+ "gas": "0x1a758",
"init": "0x7f18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c600052601c6020527f73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f6040527feeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549606052602060806080600060006001610bb7f260025560a060020a60805106600055600054321460015500"
},
"result": {
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_throw.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_throw.json
index 506dc5ff6..3c1e370f9 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_throw.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_throw.json
@@ -54,7 +54,7 @@
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
- "gas": "0xcf08",
+ "gas": "0x1a034",
"init": "0x36600060003760406103e8366000600060095af26001556103e8516002556104085160035500"
},
"error": "out of gas",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/create.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/create.json
index b83236690..11bc4eae0 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/create.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/create.json
@@ -49,7 +49,7 @@
{
"action": {
"from": "0x13e4acefe6a6700604929946e70e6443e4e73447",
- "gas": "0x5e106",
+ "gas": "0x897be",
"init": "0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11",
"value": "0x0"
},
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/deep_calls.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/deep_calls.json
index 5931b4080..375a16361 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/deep_calls.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/deep_calls.json
@@ -113,7 +113,7 @@
"action": {
"callType": "call",
"from": "0x70c9217d814985faef62b124420f8dfbddd96433",
- "gas": "0x37b38",
+ "gas": "0x3d090",
"input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000",
"to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b",
"value": "0x0"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall.json
index 3a03ffc0f..e5a37cbfd 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall.json
@@ -66,7 +66,7 @@
"action": {
"callType": "call",
"from": "0xa529806c67cc6486d4d62024471772f47f6fd672",
- "gas": "0x2d6e28",
+ "gas": "0x2dc6c0",
"input": "0x7065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e",
"to": "0x269296dddce321a6bcbaa2f0181127593d732cba",
"value": "0x0"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall_parent_value.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall_parent_value.json
index 800a6a428..177912420 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall_parent_value.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall_parent_value.json
@@ -59,7 +59,7 @@
"action": {
"callType": "call",
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
- "gas": "0x10b68",
+ "gas": "0x15f90",
"input": "0x4e45375a47413941",
"to": "0x91765918420bcb5ad22ee0997abed04056705798",
"value": "0x8ac7230489e80000"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/gas.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/gas.json
index 3b44a5e2c..d977dbe30 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/gas.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/gas.json
@@ -54,7 +54,7 @@
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
- "gas": "0xcf08",
+ "gas": "0x1a9c8",
"init": "0x601b565b6000555b005b630badf00d6003565b63c001f00d6003565b7319e7e376e7c213b7e7e7e46cc70a5dd086daff2a7f22ae6da6b482f9b1b19b0b897c3fd43884180a1c5ee361e1107a1bc635649dda600052601b603f537f16433dce375ce6dc8151d3f0a22728bc4a1d9fd6ed39dfd18b4609331937367f6040527f306964c0cf5d74f04129fdc60b54d35b596dde1bf89ad92cb4123318f4c0e40060605260206080607f60006000600161fffff21560075760805114601257600956"
},
"result": {
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/include_precompiled.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/include_precompiled.json
index d33375bfd..0f28c07a9 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/include_precompiled.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/include_precompiled.json
@@ -83,7 +83,7 @@
"balance": "0x0",
"callType": "call",
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
- "gas": "0x119d28",
+ "gas": "0x124f80",
"input": "0x13f955e100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000019004000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a67040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae704000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc30304000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de04000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e000000000000000000000000000000000",
"refundAddress": "0x0000000000000000000000000000000000000000",
"to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_create_oog_outer_throw.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_create_oog_outer_throw.json
index 170948e15..6c4ce1806 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_create_oog_outer_throw.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_create_oog_outer_throw.json
@@ -58,7 +58,7 @@
"action": {
"callType": "call",
"from": "0xe4a13bc304682a903e9472f469c33801dd18d9e8",
- "gas": "0x435c8",
+ "gas": "0x493e0",
"input": "0x3b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8",
"to": "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a",
"value": "0x0"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_instafail.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_instafail.json
index 328b74327..4de08f2cc 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_instafail.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_instafail.json
@@ -54,7 +54,7 @@
"action": {
"callType": "call",
"from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31",
- "gas": "0x1a466",
+ "gas": "0x1f97e",
"input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000",
"to": "0x6c06b16512b332e6cd8293a2974872674716ce18",
"value": "0x0"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_precompiled_wrong_gas.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_precompiled_wrong_gas.json
index 6b5738101..70442fdb9 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_precompiled_wrong_gas.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_precompiled_wrong_gas.json
@@ -80,7 +80,7 @@
"balance": "0x0",
"callType": "call",
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
- "gas": "0x119d28",
+ "gas": "0x124f80",
"input": "0x13f955e100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000019004000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a67040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae704000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc30304000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de04000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e000000000000000000000000000000000",
"refundAddress": "0x0000000000000000000000000000000000000000",
"to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_throw_outer_revert.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_throw_outer_revert.json
index b11b8e040..bc9470871 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_throw_outer_revert.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_throw_outer_revert.json
@@ -61,7 +61,7 @@
"action": {
"callType": "call",
"from": "0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826",
- "gas": "0x78d9e",
+ "gas": "0x7dfa6",
"input": "0x",
"to": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76",
"value": "0xe92596fd6290000"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create.json
index 64425dbad..3fcc61fc8 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create.json
@@ -54,7 +54,7 @@
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
- "gas": "0x3951c",
+ "gas": "0x53e90",
"init": "0x60606040525b60405161015b806102a0833901809050604051809103906000f0600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b610247806100596000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900480632ef9db1314610044578063e37678761461007157610042565b005b61005b6004803590602001803590602001506100ad565b6040518082815260200191505060405180910390f35b61008860048035906020018035906020015061008a565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000600060008484604051808381526020018281526020019250505060405180910390209150610120600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff167f6164640000000000000000000000000000000000000000000000000000000000846101e3565b9050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681868660405180807f616464000000000000000000000000000000000000000000000000000000000081526020015060200184815260200183815260200182815260200193505050506000604051808303816000866161da5a03f191505050600060005060008281526020019081526020016000206000505492506101db565b505092915050565b60004340848484604051808581526020018473ffffffffffffffffffffffffffffffffffffffff166c0100000000000000000000000002815260140183815260200182815260200194505050505060405180910390209050610240565b9392505050566060604052610148806100136000396000f30060606040526000357c010000000000000000000000000000000000000000000000000000000090048063471407e614610044578063e37678761461007757610042565b005b6100616004803590602001803590602001803590602001506100b3565b6040518082815260200191505060405180910390f35b61008e600480359060200180359060200150610090565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000818301905080506100c684826100d5565b8090506100ce565b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff16828260405180807f7265676973746572496e74000000000000000000000000000000000000000000815260200150602001838152602001828152602001925050506000604051808303816000866161da5a03f1915050505b505056"
},
"result": {
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create2_action_gas.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create2_action_gas.json
index bbd9904d9..0eaa3f867 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create2_action_gas.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create2_action_gas.json
@@ -54,7 +54,7 @@
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
- "gas": "0xcf6c",
+ "gas": "0x19ed8",
"init": "0x6000600060006000f500"
},
"result": {
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_action_gas.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_action_gas.json
index 19ae5fc5d..132b84df3 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_action_gas.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_action_gas.json
@@ -54,7 +54,7 @@
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
- "gas": "0xcf08",
+ "gas": "0x19ee4",
"init": "0x5a600055600060006000f0505a60015500"
},
"error": "out of gas",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_inerror.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_inerror.json
index a62d4bb64..28e96684b 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_inerror.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_inerror.json
@@ -61,7 +61,7 @@
"from": "0xa3b31cbd5168d3c99756660d4b7625d679e12573",
"to": "0x76554b33410b6d90b7dc889bfed0451ad195f27e",
"value": "0x0",
- "gas": "0x2e138",
+ "gas": "0x33450",
"input": "0x391521f4",
"callType": "call"
},
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_pointer_issue.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_pointer_issue.json
index 792845538..c3191d61b 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_pointer_issue.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_pointer_issue.json
@@ -99,7 +99,7 @@
{
"action": {
"from": "0x5409ed021d9299bf6814279a6a1411a7e866a631",
- "gas": "0x215c47",
+ "gas": "0x2c8c7f",
"init": "0x60806040523480156200001157600080fd5b5060405162002d2c38038062002d2c83398101806040526200003791908101906200051d565b6000805433600160a060020a031991821617825560018054909116600160a060020a0386161790558251849084908490849081906200007e906004906020870190620003d0565b50825162000094906005906020860190620003d0565b50620000b0836010640100000000620019476200036f82021704565b9150620000cd846010640100000000620019476200036f82021704565b60028054600160a060020a03948516600160a060020a031991821617909155600380549285169290911691909117905550600154604080517f4552433230546f6b656e28616464726573732900000000000000000000000000815290519081900360130181207f6070410800000000000000000000000000000000000000000000000000000000825291909216945063607041089350620001739250906004016200068e565b602060405180830381600087803b1580156200018e57600080fd5b505af1158015620001a3573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250620001c99190810190620004f4565b9050600160a060020a038116151562000219576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200021090620006b0565b60405180910390fd5b6002546040517f095ea7b3000000000000000000000000000000000000000000000000000000008152600160a060020a039091169063095ea7b39062000268908490600019906004016200066f565b602060405180830381600087803b1580156200028357600080fd5b505af115801562000298573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250620002be9190810190620005a1565b506003546040517f095ea7b3000000000000000000000000000000000000000000000000000000008152600160a060020a039091169063095ea7b3906200030e908490600019906004016200066f565b602060405180830381600087803b1580156200032957600080fd5b505af11580156200033e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250620003649190810190620005a1565b50505050506200077a565b600081601401835110151515620003b4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000210906200069e565b506014818301810151910190600160a060020a03165b92915050565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200041357805160ff191683800117855562000443565b8280016001018555821562000443579182015b828111156200044357825182559160200191906001019062000426565b506200045192915062000455565b5090565b6200047291905b808211156200045157600081556001016200045c565b90565b600062000483825162000711565b9392505050565b600062000483825162000742565b6000601f82018313620004aa57600080fd5b8151620004c1620004bb82620006e9565b620006c2565b91508082526020830160208301858383011115620004de57600080fd5b620004eb83828462000747565b50505092915050565b6000602082840312156200050757600080fd5b600062000515848462000475565b949350505050565b6000806000606084860312156200053357600080fd5b600062000541868662000475565b93505060208401516001604060020a038111156200055e57600080fd5b6200056c8682870162000498565b92505060408401516001604060020a038111156200058957600080fd5b620005978682870162000498565b9150509250925092565b600060208284031215620005b457600080fd5b60006200051584846200048a565b620005cd8162000711565b82525050565b620005cd816200071d565b602681527f475245415445525f4f525f455155414c5f544f5f32305f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b601881527f554e524547495354455245445f41535345545f50524f58590000000000000000602082015260400190565b620005cd8162000472565b604081016200067f8285620005c2565b62000483602083018462000664565b60208101620003ca8284620005d3565b60208082528101620003ca81620005de565b60208082528101620003ca8162000634565b6040518181016001604060020a0381118282101715620006e157600080fd5b604052919050565b60006001604060020a038211156200070057600080fd5b506020601f91909101601f19160190565b600160a060020a031690565b7fffffffff000000000000000000000000000000000000000000000000000000001690565b151590565b60005b83811015620007645781810151838201526020016200074a565b8381111562000774576000848401525b50505050565b6125a2806200078a6000396000f30060806040526004361061006c5763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166318978e8281146100c8578063630f1e6c146100f25780638da5cb5b146101125780639395525c14610134578063f2fde38b14610147575b60025473ffffffffffffffffffffffffffffffffffffffff1633146100c6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612388565b60405180910390fd5b005b6100db6100d6366004611df1565b610167565b6040516100e9929190612488565b60405180910390f35b3480156100fe57600080fd5b506100c661010d366004611eec565b6102f7565b34801561011e57600080fd5b50610127610388565b6040516100e99190612337565b6100db610142366004611d0b565b6103a4565b34801561015357600080fd5b506100c6610162366004611ce5565b61050a565b61016f6119fa565b6101776119fa565b6000806101826105bb565b60048054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff610100600188161502019095169490940493840181900481028201810190925282815261025c939092909183018282801561022d5780601f106102025761010080835404028352916020019161022d565b820191906000526020600020905b81548152906001019060200180831161021057829003601f168201915b50505050508c600081518110151561024157fe5b6020908102909101015161014001519063ffffffff61069616565b156102875761026c8b8b8b6107c3565b935061028084600001518560600151610ac1565b90506102ae565b6102928b8b8b610b03565b9350836060015191506102a68883896107c3565b845190935090505b6102c2846020015184602001518888610d15565b6102e98b60008151811015156102d457fe5b90602001906020020151610140015182610f29565b505097509795505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610348576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612438565b61038383838080601f01602080910402602001604051908101604052809392919081815260200183838082843750879450610f299350505050565b505050565b60005473ffffffffffffffffffffffffffffffffffffffff1681565b6103ac6119fa565b6103b46119fa565b60008060006103c16105bb565b60048054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6101006001881615020190951694909404938401819004810282018101909252828152610441939092909183018282801561022d5780601f106102025761010080835404028352916020019161022d565b156104925761046a670de0b6b3a7640000610464670de0b6b3a76400008a611045565b3461108f565b92506104778b848c6110e7565b945061048b85600001518660600151610ac1565b90506104d6565b6104ad670d2f13f7789f0000670de0b6b3a76400003461108f565b92506104ba8b848c6110e7565b9450846060015191506104ce89838a6107c3565b855190945090505b6104ea856020015185602001518989610d15565b6104fc8b60008151811015156102d457fe5b505050965096945050505050565b60005473ffffffffffffffffffffffffffffffffffffffff16331461055b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612438565b73ffffffffffffffffffffffffffffffffffffffff8116156105b857600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83161790555b50565b600034116105f5576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612398565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663d0e30db0346040518263ffffffff167c01000000000000000000000000000000000000000000000000000000000281526004016000604051808303818588803b15801561067b57600080fd5b505af115801561068f573d6000803e3d6000fd5b5050505050565b6000815183511480156107ba5750816040518082805190602001908083835b602083106106f257805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016106b5565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0180199092169116179052604051919093018190038120885190955088945090928392508401908083835b6020831061078757805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161074a565b6001836020036101000a038019825116818451168082178552505050505050905001915050604051809103902060001916145b90505b92915050565b6107cb6119fa565b60608060008060008060006107de6119fa565b8a15156107ea57610ab2565b6004805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561088e5780601f106108635761010080835404028352916020019161088e565b820191906000526020600020905b81548152906001019060200180831161087157829003601f168201915b505060058054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6101006001881615020190951694909404938401819004810282018101909252828152969e509194509250840190508282801561093d5780601f106109125761010080835404028352916020019161093d565b820191906000526020600020905b81548152906001019060200180831161092057829003601f168201915b50505050509650600095508b519450600093505b838514610a7857878c8581518110151561096757fe5b6020908102909101015161014001528b5187908d908690811061098657fe5b60209081029091010151610160015261099f8b87610ac1565b9250610a068c858151811015156109b257fe5b9060200190602002015160a00151610a008e878151811015156109d157fe5b90602001906020020151608001518f888151811015156109ed57fe5b9060200190602002015160e00151610ac1565b8561128b565b9150610a418c85815181101515610a1957fe5b90602001906020020151838c87815181101515610a3257fe5b906020019060200201516112e6565b9050610a4d898261135e565b610a5f89600001518a60600151610ac1565b95508a8610610a6d57610a78565b600190930192610951565b8a861015610ab2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612418565b50505050505050509392505050565b600082821115610afd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123b8565b50900390565b610b0b6119fa565b606080600080600080610b1c6119fa565b60008b6000815181101515610b2d57fe5b6020908102919091018101516101400151600580546040805160026001841615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190931692909204601f8101869004860283018601909152808252929b5092909190830182828015610be55780601f10610bba57610100808354040283529160200191610be5565b820191906000526020600020905b815481529060010190602001808311610bc857829003601f168201915b505050505096508b519550600094505b848614610cdb57878c86815181101515610c0b57fe5b6020908102909101015161014001528b5187908d9087908110610c2a57fe5b6020908102909101015161016001528851610c46908c90610ac1565b9350610c898c86815181101515610c5957fe5b9060200190602002015160a001518d87815181101515610c7557fe5b90602001906020020151608001518661128b565b9250610cb58c86815181101515610c9c57fe5b90602001906020020151848c88815181101515610a3257fe5b9150610cc1898361135e565b5087518a8110610cd057610cdb565b600190940193610bf5565b8a811015610ab2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612418565b600080808066b1a2bc2ec50000861115610d5b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612448565b610d658888611045565b935034841115610da1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123a8565b610dab3485610ac1565b9250610dc086670de0b6b3a76400008a61108f565b915082821115610dfc576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612428565b6000831115610f1f576002546040517f2e1a7d4d00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690632e1a7d4d90610e5b9086906004016124a4565b600060405180830381600087803b158015610e7557600080fd5b505af1158015610e89573d6000803e3d6000fd5b505050506000821115610edb5760405173ffffffffffffffffffffffffffffffffffffffff86169083156108fc029084906000818181858888f19350505050158015610ed9573d6000803e3d6000fd5b505b610ee58383610ac1565b90506000811115610f1f57604051339082156108fc029083906000818181858888f19350505050158015610f1d573d6000803e3d6000fd5b505b5050505050505050565b6000610f3b838263ffffffff6113c016565b604080517f4552433230546f6b656e28616464726573732900000000000000000000000000815290519081900360130190209091507fffffffff0000000000000000000000000000000000000000000000000000000080831691161415610fab57610fa6838361142d565b610383565b604080517f455243373231546f6b656e28616464726573732c75696e7432353629000000008152905190819003601c0190207fffffffff000000000000000000000000000000000000000000000000000000008281169116141561101357610fa6838361161b565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123f8565b600082820183811015611084576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123e8565b8091505b5092915050565b60008083116110ca576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123d8565b6110dd6110d78584611703565b8461175e565b90505b9392505050565b6110ef6119fa565b60608060008060006110ff6119fa565b89600081518110151561110e57fe5b6020908102919091018101516101400151600580546040805160026001841615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190931692909204601f8101869004860283018601909152808252929950929091908301828280156111c65780601f1061119b576101008083540402835291602001916111c6565b820191906000526020600020905b8154815290600101906020018083116111a957829003601f168201915b5050505050945089519350600092505b82841461127e57858a848151811015156111ec57fe5b602090810290910101516101400152895185908b908590811061120b57fe5b90602001906020020151610160018190525061122b898860200151610ac1565b91506112578a8481518110151561123e57fe5b90602001906020020151838a86815181101515610a3257fe5b9050611263878261135e565b602087015189116112735761127e565b6001909201916111d6565b5050505050509392505050565b60008083116112c6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123d8565b6110dd6110d76112d68685611703565b6112e1866001610ac1565b611045565b6112ee6119fa565b606060006112fd868686611775565b600154815191935073ffffffffffffffffffffffffffffffffffffffff1691506080908390602082016000855af1801561135457825184526020830151602085015260408301516040850152606083015160608501525b5050509392505050565b8151815161136c9190611045565b8252602080830151908201516113829190611045565b60208301526040808301519082015161139b9190611045565b6040830152606080830151908201516113b49190611045565b60609092019190915250565b600081600401835110151515611402576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612468565b5001602001517fffffffff000000000000000000000000000000000000000000000000000000001690565b60008061144184601063ffffffff61194716565b604080517f7472616e7366657228616464726573732c75696e7432353629000000000000008152905190819003601901812091935073ffffffffffffffffffffffffffffffffffffffff8416919061149f903390879060240161236d565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009094169390931783525181519192909182919080838360005b8381101561154357818101518382015260200161152b565b50505050905090810190601f1680156115705780820380516001836020036101000a031916815260200191505b509150506000604051808303816000865af1925050508015156115bf576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612408565b3d156115dc575060003d602014156115dc5760206000803e506000515b801515611615576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612408565b50505050565b60008060018314611658576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612478565b61166984601063ffffffff61194716565b915061167c84602463ffffffff6119a816565b6040517f23b872dd00000000000000000000000000000000000000000000000000000000815290915073ffffffffffffffffffffffffffffffffffffffff8316906323b872dd906116d590309033908690600401612345565b600060405180830381600087803b1580156116ef57600080fd5b505af1158015610f1f573d6000803e3d6000fd5b6000808315156117165760009150611088565b5082820282848281151561172657fe5b0414611084576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123e8565b600080828481151561176c57fe5b04949350505050565b604080517fb4be83d5000000000000000000000000000000000000000000000000000000006020808301919091526060602483018181528751608485019081528884015160a48601529488015160c48501529087015160e4840152608087015161010484015260a087015161012484015260c087015161014484015260e08701516101648401526101008701516101848401526101208701516101a4840152610140870180516101c485019081526101608901516101e4860152610180905251805161020485018190529394919384936044870192849261022489019291820191601f82010460005b8181101561187c57835185526020948501949093019260010161185e565b50505050818103610160808401919091528a0151805180835260209283019291820191601f82010460005b818110156118c55783518552602094850194909301926001016118a7565b50505089845250848103602093840190815288518083529093918201918981019190601f82010460005b8181101561190d5783518552602094850194909301926001016118ef565b5050507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08883030188525060405250505050509392505050565b600081601401835110151515611989576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612458565b50016014015173ffffffffffffffffffffffffffffffffffffffff1690565b60006107ba83836000816020018351101515156119f1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123c8565b50016020015190565b608060405190810160405280600081526020016000815260200160008152602001600081525090565b60006107ba8235612540565b6000601f82018313611a4057600080fd5b8135611a53611a4e826124d9565b6124b2565b81815260209384019390925082018360005b83811015611a915781358601611a7b8882611b41565b8452506020928301929190910190600101611a65565b5050505092915050565b6000601f82018313611aac57600080fd5b8135611aba611a4e826124d9565b81815260209384019390925082018360005b83811015611a915781358601611ae28882611b90565b8452506020928301929190910190600101611acc565b600080601f83018413611b0a57600080fd5b50813567ffffffffffffffff811115611b2257600080fd5b602083019150836001820283011115611b3a57600080fd5b9250929050565b6000601f82018313611b5257600080fd5b8135611b60611a4e826124fa565b91508082526020830160208301858383011115611b7c57600080fd5b611b8783828461255c565b50505092915050565b60006101808284031215611ba357600080fd5b611bae6101806124b2565b90506000611bbc8484611a23565b8252506020611bcd84848301611a23565b6020830152506040611be184828501611a23565b6040830152506060611bf584828501611a23565b6060830152506080611c0984828501611cd9565b60808301525060a0611c1d84828501611cd9565b60a08301525060c0611c3184828501611cd9565b60c08301525060e0611c4584828501611cd9565b60e083015250610100611c5a84828501611cd9565b61010083015250610120611c7084828501611cd9565b6101208301525061014082013567ffffffffffffffff811115611c9257600080fd5b611c9e84828501611b41565b6101408301525061016082013567ffffffffffffffff811115611cc057600080fd5b611ccc84828501611b41565b6101608301525092915050565b60006107ba8235612559565b600060208284031215611cf757600080fd5b6000611d038484611a23565b949350505050565b60008060008060008060c08789031215611d2457600080fd5b863567ffffffffffffffff811115611d3b57600080fd5b611d4789828a01611a9b565b965050602087013567ffffffffffffffff811115611d6457600080fd5b611d7089828a01611a2f565b955050604087013567ffffffffffffffff811115611d8d57600080fd5b611d9989828a01611a9b565b945050606087013567ffffffffffffffff811115611db657600080fd5b611dc289828a01611a2f565b9350506080611dd389828a01611cd9565b92505060a0611de489828a01611a23565b9150509295509295509295565b600080600080600080600060e0888a031215611e0c57600080fd5b873567ffffffffffffffff811115611e2357600080fd5b611e2f8a828b01611a9b565b9750506020611e408a828b01611cd9565b965050604088013567ffffffffffffffff811115611e5d57600080fd5b611e698a828b01611a2f565b955050606088013567ffffffffffffffff811115611e8657600080fd5b611e928a828b01611a9b565b945050608088013567ffffffffffffffff811115611eaf57600080fd5b611ebb8a828b01611a2f565b93505060a0611ecc8a828b01611cd9565b92505060c0611edd8a828b01611a23565b91505092959891949750929550565b600080600060408486031215611f0157600080fd5b833567ffffffffffffffff811115611f1857600080fd5b611f2486828701611af8565b93509350506020611f3786828701611cd9565b9150509250925092565b611f4a81612540565b82525050565b602381527f44454641554c545f46554e4354494f4e5f574554485f434f4e54524143545f4f60208201527f4e4c590000000000000000000000000000000000000000000000000000000000604082015260600190565b601181527f494e56414c49445f4d53475f56414c5545000000000000000000000000000000602082015260400190565b600d81527f4f564552534f4c445f5745544800000000000000000000000000000000000000602082015260400190565b601181527f55494e543235365f554e444552464c4f57000000000000000000000000000000602082015260400190565b602681527f475245415445525f4f525f455155414c5f544f5f33325f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b601081527f4449564953494f4e5f42595f5a45524f00000000000000000000000000000000602082015260400190565b601081527f55494e543235365f4f564552464c4f5700000000000000000000000000000000602082015260400190565b601781527f554e535550504f525445445f41535345545f50524f5859000000000000000000602082015260400190565b600f81527f5452414e534645525f4641494c45440000000000000000000000000000000000602082015260400190565b601481527f434f4d504c4554455f46494c4c5f4641494c4544000000000000000000000000602082015260400190565b601a81527f494e53554646494349454e545f4554485f52454d41494e494e47000000000000602082015260400190565b601381527f4f4e4c595f434f4e54524143545f4f574e455200000000000000000000000000602082015260400190565b601881527f4645455f50455243454e544147455f544f4f5f4c415247450000000000000000602082015260400190565b602681527f475245415445525f4f525f455155414c5f544f5f32305f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b602581527f475245415445525f4f525f455155414c5f544f5f345f4c454e4754485f52455160208201527f5549524544000000000000000000000000000000000000000000000000000000604082015260600190565b600e81527f494e56414c49445f414d4f554e54000000000000000000000000000000000000602082015260400190565b805160808301906122f9848261232e565b50602082015161230c602085018261232e565b50604082015161231f604085018261232e565b50606082015161161560608501825b611f4a81612559565b602081016107bd8284611f41565b606081016123538286611f41565b6123606020830185611f41565b611d03604083018461232e565b6040810161237b8285611f41565b6110e0602083018461232e565b602080825281016107bd81611f50565b602080825281016107bd81611fa6565b602080825281016107bd81611fd6565b602080825281016107bd81612006565b602080825281016107bd81612036565b602080825281016107bd8161208c565b602080825281016107bd816120bc565b602080825281016107bd816120ec565b602080825281016107bd8161211c565b602080825281016107bd8161214c565b602080825281016107bd8161217c565b602080825281016107bd816121ac565b602080825281016107bd816121dc565b602080825281016107bd8161220c565b602080825281016107bd81612262565b602080825281016107bd816122b8565b610100810161249782856122e8565b6110e060808301846122e8565b602081016107bd828461232e565b60405181810167ffffffffffffffff811182821017156124d157600080fd5b604052919050565b600067ffffffffffffffff8211156124f057600080fd5b5060209081020190565b600067ffffffffffffffff82111561251157600080fd5b506020601f919091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160190565b73ffffffffffffffffffffffffffffffffffffffff1690565b90565b828183375060009101525600a265627a7a72305820d9f418f11e0f91f06f6f9d22924be0add925495eeb76a6388b5417adb505eeb36c6578706572696d656e74616cf5003700000000000000000000000048bacb9266a570d521063ef5dd96e61686dbe788000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000024f47261b0000000000000000000000000871dd7c2b4b25e1aa18728e9d5f2af4c4e431f5c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024f47261b00000000000000000000000000b1ba0af832d7c05fd64161e0db78e85978e808200000000000000000000000000000000000000000000000000000000",
"value": "0x0"
},
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/oog.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/oog.json
index 26ae2f060..bd6059fae 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/oog.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/oog.json
@@ -52,7 +52,7 @@
"action": {
"callType": "call",
"from": "0x94194bc2aaf494501d7880b61274a169f6502a54",
- "gas": "0x7045",
+ "gas": "0xca1d",
"input": "0xa9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f9000",
"to": "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62",
"value": "0x0"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/option_convert_parity_errors.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/option_convert_parity_errors.json
index 0216c318b..8888d3e68 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/option_convert_parity_errors.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/option_convert_parity_errors.json
@@ -55,7 +55,7 @@
"action": {
"callType": "call",
"from": "0x94194bc2aaf494501d7880b61274a169f6502a54",
- "gas": "0x7045",
+ "gas": "0xca1d",
"input": "0xa9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f9000",
"to": "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62",
"value": "0x0"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/result_output.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/result_output.json
index f58d20cd2..62baf333b 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/result_output.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/result_output.json
@@ -68,7 +68,7 @@
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"to": "0x531f76bad925f6a925474996c7d738c1008045f6",
"value": "0xde0b6b3a7640000",
- "gas": "0x3b920",
+ "gas": "0x40b28",
"input": "0x",
"callType": "call"
},
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert.json
index 897aebb0e..b0346d860 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert.json
@@ -50,7 +50,7 @@
"action": {
"callType": "call",
"from": "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9",
- "gas": "0x2d55e8",
+ "gas": "0x2dc6c0",
"input": "0x73b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a98800000000000000000000000000000000000000000000000000000000000000000000000000000000",
"to": "0xabbcd5b340c80b5f1c0545c04c987b87310296ae",
"value": "0x0"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert_reason.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert_reason.json
index 62dbaf20d..6759b05e5 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert_reason.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert_reason.json
@@ -55,7 +55,7 @@
"action": {
"callType": "call",
"from": "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1",
- "gas": "0x2d7308",
+ "gas": "0x2dc6c0",
"input": "0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1",
"to": "0xf58833cf0c791881b494eb79d461e08a1f043f52",
"value": "0x0"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/selfdestruct.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/selfdestruct.json
index cd34d0b6d..74fd87cc6 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/selfdestruct.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/selfdestruct.json
@@ -54,7 +54,7 @@
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
- "gas": "0xcf08",
+ "gas": "0x19ecc",
"init": "0x605a600053600160006001f0ff00"
},
"result": {
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/simple.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/simple.json
index 6d084410a..a7244e974 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/simple.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/simple.json
@@ -61,7 +61,7 @@
"action": {
"callType": "call",
"from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb",
- "gas": "0x10738",
+ "gas": "0x15f90",
"input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5",
"to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe",
"value": "0x0"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/skip_no_balance_error.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/skip_no_balance_error.json
index d530fe908..96060d554 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/skip_no_balance_error.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/skip_no_balance_error.json
@@ -54,7 +54,7 @@
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
- "gas": "0xd550",
+ "gas": "0x1aab0",
"init": "0x7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b5547f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000000000000000000000000000000000000000000037f055"
},
"error": "out of gas",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/staticcall_precompiled.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/staticcall_precompiled.json
index 9291149bd..45ffbe2db 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/staticcall_precompiled.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/staticcall_precompiled.json
@@ -65,7 +65,7 @@
"balance": "0x0",
"callType": "call",
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
- "gas": "0x4053e",
+ "gas": "0x48196",
"input": "0x200b1e64000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000001b9af799918107e9a339eba0584b8b60b35aae6f087c74f6bfc00c9301849b204d094ed65e09c76c2597f5516f9440aad2921e50dde096e7caaa65a536d4d9265e00000000000000000000000000000000000000000000000000000000000000504269747669657720697320616e20616d617a696e6720776562736974652e20596f752073686f756c6420646566696e6974656c792061646420796f75722070726f6475637420746f2069742e20e282bf00000000000000000000000000000000",
"refundAddress": "0x0000000000000000000000000000000000000000",
"to": "0x8521f13dd5e4bc3dab3cf0f01a195a5af899e851",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/suicide.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/suicide.json
index bd9e057c0..16d43767d 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/suicide.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/suicide.json
@@ -55,7 +55,7 @@
"action": {
"callType": "call",
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
- "gas": "0x445708",
+ "gas": "0x44aa20",
"input": "0x41c0e1b5",
"to": "0x8ee79c5b3f6e1d214d2c4fcf7ea4092a32e26e91",
"value": "0x0"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/throw.json b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/throw.json
index b119bed52..a001178a4 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_flat/throw.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_flat/throw.json
@@ -54,7 +54,7 @@
"action": {
"callType": "call",
"from": "0x70c9217d814985faef62b124420f8dfbddd96433",
- "gas": "0x37b38",
+ "gas": "0x3d090",
"input": "0x51a34eb8000000000000000000000000000000000000000000000027fad02094277c0000",
"to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b",
"value": "0x0"
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json
index 8557f8efd..df0b2872b 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json
@@ -47,7 +47,7 @@
"input": "0xf907ef098504e3b29200830897be8080b9079c606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a1129a01060f46676a5dff6f407f0f51eb6f37f5c8c54e238c70221e18e65fc29d3ea65a0557b01c50ff4ffaac8ed6e5d31237a4ecbac843ab1bfe8bb0165a0060df7c54f",
"result": {
"from": "0x13e4acefe6a6700604929946e70e6443e4e73447",
- "gas": "0x5e106",
+ "gas": "0x897be",
"gasUsed": "0x897be",
"input": "0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11",
"output": "0x606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json
index ef28a930b..80fc0b0ad 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json
@@ -404,7 +404,7 @@
}
],
"from": "0x70c9217d814985faef62b124420f8dfbddd96433",
- "gas": "0x37b38",
+ "gas": "0x3d090",
"gasUsed": "0x1810b",
"input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000",
"output": "0x",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json
index c4c1390fa..2cd28bacc 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json
@@ -86,7 +86,7 @@
}
],
"from": "0xa529806c67cc6486d4d62024471772f47f6fd672",
- "gas": "0x2d6e28",
+ "gas": "0x2dc6c0",
"gasUsed": "0xbd55",
"input": "0x7065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e",
"output": "0x",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json
index 0b60e34d0..07fda21d4 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json
@@ -67,7 +67,7 @@
],
"error": "invalid jump destination",
"from": "0xe4a13bc304682a903e9472f469c33801dd18d9e8",
- "gas": "0x435c8",
+ "gas": "0x493e0",
"gasUsed": "0x493e0",
"input": "0x3b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8",
"to": "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json
index c1ed766ef..16e413623 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json
@@ -54,7 +54,7 @@
"from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31",
"to": "0x6c06b16512b332e6cd8293a2974872674716ce18",
"value": "0x0",
- "gas": "0x1a466",
+ "gas": "0x1f97e",
"gasUsed": "0x72de",
"input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000",
"output": "0x",
@@ -64,7 +64,7 @@
"from": "0x6c06b16512b332e6cd8293a2974872674716ce18",
"to": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31",
"value": "0x14d1120d7b160000",
- "error":"internal failure",
+ "error": "internal failure",
"input": "0x"
}
]
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json
index 2be2dee23..a023ed6d9 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json
@@ -71,7 +71,7 @@
],
"error": "execution reverted",
"from": "0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826",
- "gas": "0x78d9e",
+ "gas": "0x7dfa6",
"gasUsed": "0x7c1c8",
"input": "0x",
"to": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json
index 8022f53a9..333bdd038 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json
@@ -50,7 +50,7 @@
"result": {
"error": "out of gas",
"from": "0x94194bc2aaf494501d7880b61274a169f6502a54",
- "gas": "0x7045",
+ "gas": "0xca1d",
"gasUsed": "0xca1d",
"input": "0xa9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f9000",
"to": "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json
index aee894d11..3207a298a 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json
@@ -48,7 +48,7 @@
"result": {
"error": "execution reverted",
"from": "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9",
- "gas": "0x2d55e8",
+ "gas": "0x2dc6c0",
"gasUsed": "0x719b",
"input": "0x73b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a98800000000000000000000000000000000000000000000000000000000000000000000000000000000",
"to": "0xabbcd5b340c80b5f1c0545c04c987b87310296ae",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert_reason.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert_reason.json
index 4f7fee97d..5c7e5629e 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert_reason.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert_reason.json
@@ -53,7 +53,7 @@
"result": {
"error": "execution reverted",
"from": "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1",
- "gas": "0x2d7308",
+ "gas": "0x2dc6c0",
"gasUsed": "0x5940",
"input": "0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1",
"to": "0xf58833cf0c791881b494eb79d461e08a1f043f52",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/selfdestruct.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/selfdestruct.json
index 55b63dbdb..11b23a990 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/selfdestruct.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/selfdestruct.json
@@ -62,7 +62,7 @@
}
],
"from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb",
- "gas": "0x10738",
+ "gas": "0x15f90",
"gasUsed": "0x6fcb",
"input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5",
"output": "0x",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/simple.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/simple.json
index c9192a19f..37723f17d 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/simple.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/simple.json
@@ -67,7 +67,7 @@
}
],
"from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb",
- "gas": "0x10738",
+ "gas": "0x15f90",
"gasUsed": "0x9751",
"input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5",
"output": "0x0000000000000000000000000000000000000000000000000000000000000001",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/throw.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/throw.json
index 76fae3c39..499b449a6 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/throw.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/throw.json
@@ -52,7 +52,7 @@
"result": {
"error": "invalid jump destination",
"from": "0x70c9217d814985faef62b124420f8dfbddd96433",
- "gas": "0x37b38",
+ "gas": "0x3d090",
"gasUsed": "0x3d090",
"input": "0x51a34eb8000000000000000000000000000000000000000000000027fad02094277c0000",
"to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json
index b18c80e58..9264f1e2f 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json
@@ -77,7 +77,7 @@
},
"result": {
"from": "0x4f5777744b500616697cb655dcb02ee6cd51deb5",
- "gas": "0x2dced",
+ "gas": "0x33085",
"gasUsed": "0x1a9e5",
"to": "0x200edd17f30485a8735878661960cd7a9a95733f",
"input": "0xba51a6df0000000000000000000000000000000000000000000000000000000000000000",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json
index 2c8213802..f63dbd47d 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json
@@ -134,7 +134,7 @@
},
"result": {
"from": "0x3de712784baf97260455ae25fb74f574ec9c1add",
- "gas": "0x7e2c0",
+ "gas": "0x84398",
"gasUsed": "0x27ec3",
"to": "0x6ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5",
"input": "0xbbd4f854e9efd3ab89acad6a3edf9828c3b00ed1c4a74e974d05d32d3b2fb15aa16fc3770000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000de0b6b3a7640000000000000000000000000000000000000000000000000000080d29fa5cccfadac",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json
index 649a5b1b5..5e5d95386 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json
@@ -299,7 +299,7 @@
},
"result": {
"from": "0xbe3ae5cb97c253dda67181c6e34e43f5c275e08b",
- "gas": "0x3514c8",
+ "gas": "0x3567e0",
"gasUsed": "0x26e1ef",
"to": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e",
"input": "0xbe9a6555",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json
index 858931558..1ffffd240 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json
@@ -167,7 +167,7 @@
},
"result": {
"from": "0x3fcb0342353c541e210013aaddc2e740b9a33d08",
- "gas": "0x2b0868",
+ "gas": "0x2dc6c0",
"gasUsed": "0x2570bf",
"to": "0x350e0ffc780a6a75b44cc52e1ff9092870668945",
"input": "0xe021fadb000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000006e00000000000000000000000000000000000000000000000000000000000000d4000000000000000000000000000000000000000000000000000000000000013a00000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fd000000000000000000000000000000000000000000000000000000000000034300000000000000000000000000000000000000000000000000000000000002fd0000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003900000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000036000000000000000000000000000000000000000000000000000000000000003a000000000000000000000000000000000000000000000000000000000000003a00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000035000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000000000000000000000000000000000000000003b00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000003c00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000033000000000000000000000000000000000000000000000000000000000000003d000000000000000000000000000000000000000000000000000000000000003d00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000003e00000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000038000000000000000000000000000000000000000000000000000000000000003700000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000039000000000000000000000000000000000000000000000000000000000000003900000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000036000000000000000000000000000000000000000000000000000000000000003a000000000000000000000000000000000000000000000000000000000000003a00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000035000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000000000000000000000000000000000000000003b00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000003c00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000033000000000000000000000000000000000000000000000000000000000000003d000000000000000000000000000000000000000000000000000000000000003d00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000038000000000000000000000000000000000000000000000000000000000000003800000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000032fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebebebffffffffffffffffffffffffffffffffffffffffffffffffffffffffff888888ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb3b3b3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe3e3e3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3e3e3effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdbdbdbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f4f4fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb0b0b0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0a0a0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b5b5bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababaffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaeaffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa9a9a9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb9b9b9fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababaffffffffffffffffffffffffffffffffffffffffffffffffffffffffff636363fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9f9f9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaeaffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9c9c9cfffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8f8f8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfdfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4d4e53ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f494b00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e08000",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json
index 09aa7af46..116606b3c 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json
@@ -102,7 +102,7 @@
},
"result": {
"from": "0x6412becf35cc7e2a9e7e47966e443f295e1e4f4a",
- "gas": "0x2bb38",
+ "gas": "0x30d40",
"gasUsed": "0x249eb",
"to": "0x50739060a2c32dc076e507ae1a893aab28ecfe68",
"input": "0x",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json
index 1a03f0e7f..30f177706 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json
@@ -63,7 +63,7 @@
},
"result": {
"from": "0xd1220a0cf47c7b9be7a2e6ba89f429762e7b9adb",
- "gas": "0x1f36d",
+ "gas": "0x24d45",
"gasUsed": "0xc6a5",
"to": "0xf4eced2f682ce333f96f2d8966c613ded8fc95dd",
"input": "0xa9059cbb000000000000000000000000dbf03b407c01e7cd3cbea99509d93f8dddc8c6fb0000000000000000000000000000000000000000000000000000000000989680",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json
index 4e0aec529..30346d07f 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json
@@ -137,7 +137,7 @@
},
"result": {
"from": "0xe6002189a74b43e6868b20c1311bc108e38aac57",
- "gas": "0xa59c8",
+ "gas": "0xaae60",
"gasUsed": "0xaae60",
"to": "0x630a0cd35d5bd57e61410fda76fea850225cda18",
"input": "0xe1fa763800000000000000000000000000000000000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000000",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json
index 8df52db23..eb2514427 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json
@@ -75,7 +75,7 @@
},
"result": {
"from": "0x01115b41bd2731353dd3e6abf44818fdc035aaf1",
- "gas": "0x28e28",
+ "gas": "0x30d40",
"gasUsed": "0x288c9",
"to": "0xcf1476387d780169410d4e936d75a206fda2a68c",
"input": "0xb61d27f6000000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c18941300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000008861393035396362623030303030303030303030303030303030303030303030303930643363313831326465323636396266383037626437373538636562316533343937616337653430303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303031633662663532363334303030000000000000000000000000000000000000000000000000",
diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json
index c805296ad..e73081107 100644
--- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json
+++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json
@@ -78,7 +78,7 @@
},
"result": {
"from": "0x4f5777744b500616697cb655dcb02ee6cd51deb5",
- "gas": "0x2dced",
+ "gas": "0x33085",
"gasUsed": "0x1a9e5",
"to": "0x200edd17f30485a8735878661960cd7a9a95733f",
"input": "0xba51a6df0000000000000000000000000000000000000000000000000000000000000000",
diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go
index 8e52f5b21..2a2789e93 100644
--- a/eth/tracers/js/goja.go
+++ b/eth/tracers/js/goja.go
@@ -32,10 +32,6 @@ import (
jsassets "github.com/ethereum/go-ethereum/eth/tracers/js/internal/tracers"
)
-const (
- memoryPadLimit = 1024 * 1024
-)
-
var assetTracers = make(map[string]string)
// init retrieves the JavaScript transaction tracers included in go-ethereum.
@@ -239,7 +235,7 @@ func (t *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr
t.ctx["from"] = t.vm.ToValue(from.Bytes())
t.ctx["to"] = t.vm.ToValue(to.Bytes())
t.ctx["input"] = t.vm.ToValue(input)
- t.ctx["gas"] = t.vm.ToValue(gas)
+ t.ctx["gas"] = t.vm.ToValue(t.gasLimit)
t.ctx["gasPrice"] = t.vm.ToValue(env.TxContext.GasPrice)
valueBig, err := t.toBig(t.vm, value.String())
if err != nil {
@@ -571,14 +567,10 @@ func (mo *memoryObj) slice(begin, end int64) ([]byte, error) {
if end < begin || begin < 0 {
return nil, fmt.Errorf("tracer accessed out of bound memory: offset %d, end %d", begin, end)
}
- mlen := mo.memory.Len()
- if end-int64(mlen) > memoryPadLimit {
- return nil, fmt.Errorf("tracer reached limit for padding memory slice: end %d, memorySize %d", end, mlen)
+ slice, err := tracers.GetMemoryCopyPadded(mo.memory, begin, end-begin)
+ if err != nil {
+ return nil, err
}
- slice := make([]byte, end-begin)
- end = min(end, int64(mo.memory.Len()))
- ptr := mo.memory.GetPtr(begin, end-begin)
- copy(slice[:], ptr[:])
return slice, nil
}
@@ -959,10 +951,3 @@ func (l *steplog) setupObject() *goja.Object {
o.Set("contract", l.contract.setupObject())
return o
}
-
-func min(a, b int64) int64 {
- if a < b {
- return a
- }
- return b
-}
diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go
index 524d17474..bf6427faf 100644
--- a/eth/tracers/js/tracer_test.go
+++ b/eth/tracers/js/tracer_test.go
@@ -62,7 +62,7 @@ func testCtx() *vmContext {
func runTrace(tracer tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainConfig, contractCode []byte) (json.RawMessage, error) {
var (
- env = vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, chaincfg, vm.Config{Debug: true, Tracer: tracer})
+ env = vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, chaincfg, vm.Config{Tracer: tracer})
gasLimit uint64 = 31000
startGas uint64 = 10000
value = big.NewInt(0)
@@ -150,12 +150,12 @@ func TestTracer(t *testing.T) {
}, {
code: "{res: [], step: function(log) { if (log.op.toString() === 'STOP') { this.res.push(log.memory.slice(5, 1025 * 1024)) } }, fault: function() {}, result: function() { return this.res }}",
want: "",
- fail: "tracer reached limit for padding memory slice: end 1049600, memorySize 32 at step (:1:83(20)) in server-side tracer function 'step'",
+ fail: "reached limit for padding memory slice: 1049568 at step (:1:83(20)) in server-side tracer function 'step'",
contract: []byte{byte(vm.PUSH1), byte(0xff), byte(vm.PUSH1), byte(0x00), byte(vm.MSTORE8), byte(vm.STOP)},
},
} {
if have, err := execTracer(tt.code, tt.contract); tt.want != string(have) || tt.fail != err {
- t.Errorf("testcase %d: expected return value to be '%s' got '%s', error to be '%s' got '%s'\n\tcode: %v", i, tt.want, string(have), tt.fail, err, tt.code)
+ t.Errorf("testcase %d: expected return value to be \n'%s'\n\tgot\n'%s'\nerror to be\n'%s'\n\tgot\n'%s'\n\tcode: %v", i, tt.want, string(have), tt.fail, err, tt.code)
}
}
}
@@ -180,7 +180,7 @@ func TestHaltBetweenSteps(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})
+ env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: tracer})
scope := &vm.ScopeContext{
Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0),
}
@@ -204,7 +204,7 @@ func TestNoStepExec(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(100)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})
+ env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(100)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: tracer})
tracer.CaptureStart(env, common.Address{}, common.Address{}, false, []byte{}, 1000, big.NewInt(0))
tracer.CaptureEnd(nil, 0, nil)
ret, err := tracer.GetResult()
@@ -229,7 +229,7 @@ func TestNoStepExec(t *testing.T) {
}
func TestIsPrecompile(t *testing.T) {
- chaincfg := ¶ms.ChainConfig{ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: false, EIP150Block: big.NewInt(0), EIP150Hash: common.Hash{}, EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(100), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(200), MuirGlacierBlock: big.NewInt(0), BerlinBlock: big.NewInt(300), LondonBlock: big.NewInt(0), TerminalTotalDifficulty: nil, Ethash: new(params.EthashConfig), Clique: nil}
+ chaincfg := ¶ms.ChainConfig{ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: false, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(100), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(200), MuirGlacierBlock: big.NewInt(0), BerlinBlock: big.NewInt(300), LondonBlock: big.NewInt(0), TerminalTotalDifficulty: nil, Ethash: new(params.EthashConfig), Clique: nil}
chaincfg.ByzantiumBlock = big.NewInt(100)
chaincfg.IstanbulBlock = big.NewInt(200)
chaincfg.BerlinBlock = big.NewInt(300)
diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go
index 5e75318b9..c7f171c5b 100644
--- a/eth/tracers/logger/logger.go
+++ b/eth/tracers/logger/logger.go
@@ -116,8 +116,8 @@ type StructLogger struct {
gasLimit uint64
usedGas uint64
- interrupt uint32 // Atomic flag to signal execution interruption
- reason error // Textual reason for the interruption
+ interrupt atomic.Bool // Atomic flag to signal execution interruption
+ reason error // Textual reason for the interruption
}
// NewStructLogger returns a new logger
@@ -149,7 +149,7 @@ func (l *StructLogger) CaptureStart(env *vm.EVM, from common.Address, to common.
// CaptureState also tracks SLOAD/SSTORE ops to track storage change.
func (l *StructLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
// If tracing was interrupted, set the error and stop
- if atomic.LoadUint32(&l.interrupt) > 0 {
+ if l.interrupt.Load() {
return
}
// check if already accumulated the specified number of logs
@@ -258,7 +258,7 @@ func (l *StructLogger) GetResult() (json.RawMessage, error) {
// Stop terminates execution of the tracer at the first opportune moment.
func (l *StructLogger) Stop(err error) {
l.reason = err
- atomic.StoreUint32(&l.interrupt, 1)
+ l.interrupt.Store(true)
}
func (l *StructLogger) CaptureTxStart(gasLimit uint64) {
diff --git a/eth/tracers/logger/logger_test.go b/eth/tracers/logger/logger_test.go
index 1bc7456d3..bde43e5ff 100644
--- a/eth/tracers/logger/logger_test.go
+++ b/eth/tracers/logger/logger_test.go
@@ -55,7 +55,7 @@ func (*dummyStatedb) SetState(_ common.Address, _ common.Hash, _ common.Hash) {}
func TestStoreCapture(t *testing.T) {
var (
logger = NewStructLogger(nil)
- env = vm.NewEVM(vm.BlockContext{}, vm.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: logger})
+ env = vm.NewEVM(vm.BlockContext{}, vm.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: logger})
contract = vm.NewContract(&dummyContractRef{}, &dummyContractRef{}, new(big.Int), 100000)
)
contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x0, byte(vm.SSTORE)}
diff --git a/eth/tracers/native/4byte.go b/eth/tracers/native/4byte.go
index 1b4649baa..5a2c4f911 100644
--- a/eth/tracers/native/4byte.go
+++ b/eth/tracers/native/4byte.go
@@ -48,7 +48,7 @@ func init() {
type fourByteTracer struct {
noopTracer
ids map[string]int // ids aggregates the 4byte ids found
- interrupt uint32 // Atomic flag to signal execution interruption
+ interrupt atomic.Bool // Atomic flag to signal execution interruption
reason error // Textual reason for the interruption
activePrecompiles []common.Address // Updated on CaptureStart based on given rules
}
@@ -93,7 +93,7 @@ func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to commo
// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct).
func (t *fourByteTracer) CaptureEnter(op vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
// Skip if tracing was interrupted
- if atomic.LoadUint32(&t.interrupt) > 0 {
+ if t.interrupt.Load() {
return
}
if len(input) < 4 {
@@ -124,7 +124,7 @@ func (t *fourByteTracer) GetResult() (json.RawMessage, error) {
// Stop terminates execution of the tracer at the first opportune moment.
func (t *fourByteTracer) Stop(err error) {
t.reason = err
- atomic.StoreUint32(&t.interrupt, 1)
+ t.interrupt.Store(true)
}
func bytesToHex(s []byte) string {
diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go
index 02ee152a5..4ac03e512 100644
--- a/eth/tracers/native/call.go
+++ b/eth/tracers/native/call.go
@@ -102,8 +102,8 @@ type callTracer struct {
callstack []callFrame
config callTracerConfig
gasLimit uint64
- interrupt uint32 // Atomic flag to signal execution interruption
- reason error // Textual reason for the interruption
+ interrupt atomic.Bool // Atomic flag to signal execution interruption
+ reason error // Textual reason for the interruption
}
type callTracerConfig struct {
@@ -133,7 +133,7 @@ func (t *callTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Ad
From: from,
To: &toCopy,
Input: common.CopyBytes(input),
- Gas: gas,
+ Gas: t.gasLimit,
Value: value,
}
if create {
@@ -148,6 +148,10 @@ func (t *callTracer) CaptureEnd(output []byte, gasUsed uint64, err error) {
// CaptureState implements the EVMLogger interface to trace a single step of VM execution.
func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
+ // skip if the previous op caused an error
+ if err != nil {
+ return
+ }
// Only logs need to be captured via opcode processing
if !t.config.WithLog {
return
@@ -157,7 +161,7 @@ func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco
return
}
// Skip if tracing was interrupted
- if atomic.LoadUint32(&t.interrupt) > 0 {
+ if t.interrupt.Load() {
return
}
switch op {
@@ -176,7 +180,12 @@ func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco
topics[i] = common.Hash(topic.Bytes32())
}
- data := scope.Memory.GetCopy(int64(mStart.Uint64()), int64(mSize.Uint64()))
+ data, err := tracers.GetMemoryCopyPadded(scope.Memory, int64(mStart.Uint64()), int64(mSize.Uint64()))
+ if err != nil {
+ // mSize was unrealistically large
+ return
+ }
+
log := callLog{Address: scope.Contract.Address(), Topics: topics, Data: hexutil.Bytes(data)}
t.callstack[len(t.callstack)-1].Logs = append(t.callstack[len(t.callstack)-1].Logs, log)
}
@@ -188,7 +197,7 @@ func (t *callTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.
return
}
// Skip if tracing was interrupted
- if atomic.LoadUint32(&t.interrupt) > 0 {
+ if t.interrupt.Load() {
return
}
@@ -253,7 +262,7 @@ func (t *callTracer) GetResult() (json.RawMessage, error) {
// Stop terminates execution of the tracer at the first opportune moment.
func (t *callTracer) Stop(err error) {
t.reason = err
- atomic.StoreUint32(&t.interrupt, 1)
+ t.interrupt.Store(true)
}
// clearFailedLogs clears the logs of a callframe and all its children
diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go
index 948d09ef7..b71d5d621 100644
--- a/eth/tracers/native/prestate.go
+++ b/eth/tracers/native/prestate.go
@@ -62,8 +62,8 @@ type prestateTracer struct {
to common.Address
gasLimit uint64 // Amount of gas bought for the whole tx
config prestateTracerConfig
- interrupt uint32 // Atomic flag to signal execution interruption
- reason error // Textual reason for the interruption
+ interrupt atomic.Bool // Atomic flag to signal execution interruption
+ reason error // Textual reason for the interruption
created map[common.Address]bool
deleted map[common.Address]bool
}
@@ -133,6 +133,13 @@ func (t *prestateTracer) CaptureEnd(output []byte, gasUsed uint64, err error) {
// CaptureState implements the EVMLogger interface to trace a single step of VM execution.
func (t *prestateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
+ if err != nil {
+ return
+ }
+ // Skip if tracing was interrupted
+ if t.interrupt.Load() {
+ return
+ }
stack := scope.Stack
stackData := stack.Data()
stackLen := len(stackData)
@@ -256,7 +263,7 @@ func (t *prestateTracer) GetResult() (json.RawMessage, error) {
// Stop terminates execution of the tracer at the first opportune moment.
func (t *prestateTracer) Stop(err error) {
t.reason = err
- atomic.StoreUint32(&t.interrupt, 1)
+ t.interrupt.Store(true)
}
// lookupAccount fetches details of an account and adds it to the prestate
diff --git a/eth/tracers/tracers.go b/eth/tracers/tracers.go
index 856f52a10..023f73ef3 100644
--- a/eth/tracers/tracers.go
+++ b/eth/tracers/tracers.go
@@ -19,6 +19,7 @@ package tracers
import (
"encoding/json"
+ "fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
@@ -95,3 +96,27 @@ func (d *directory) IsJS(name string) bool {
// JS eval will execute JS code
return true
}
+
+const (
+ memoryPadLimit = 1024 * 1024
+)
+
+// GetMemoryCopyPadded returns offset + size as a new slice.
+// It zero-pads the slice if it extends beyond memory bounds.
+func GetMemoryCopyPadded(m *vm.Memory, offset, size int64) ([]byte, error) {
+ if offset < 0 || size < 0 {
+ return nil, fmt.Errorf("offset or size must not be negative")
+ }
+ if int(offset+size) < m.Len() { // slice fully inside memory
+ return m.GetCopy(offset, size), nil
+ }
+ paddingNeeded := int(offset+size) - m.Len()
+ if paddingNeeded > memoryPadLimit {
+ return nil, fmt.Errorf("reached limit for padding memory slice: %d", paddingNeeded)
+ }
+ cpy := make([]byte, size)
+ if overlap := int64(m.Len()) - offset; overlap > 0 {
+ copy(cpy, m.GetPtr(offset, overlap))
+ }
+ return cpy, nil
+}
diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go
index 7c5ec6565..759e3a4dd 100644
--- a/eth/tracers/tracers_test.go
+++ b/eth/tracers/tracers_test.go
@@ -87,7 +87,7 @@ func BenchmarkTransactionTrace(b *testing.B) {
//EnableMemory: false,
//EnableReturnData: false,
})
- evm := vm.NewEVM(context, txContext, statedb, params.AllEthashProtocolChanges, vm.Config{Debug: true, Tracer: tracer})
+ evm := vm.NewEVM(context, txContext, statedb, params.AllEthashProtocolChanges, vm.Config{Tracer: tracer})
msg, err := core.TransactionToMessage(tx, signer, nil)
if err != nil {
b.Fatalf("failed to prepare transaction for tracing: %v", err)
@@ -109,3 +109,41 @@ func BenchmarkTransactionTrace(b *testing.B) {
tracer.Reset()
}
}
+
+func TestMemCopying(t *testing.T) {
+ for i, tc := range []struct {
+ memsize int64
+ offset int64
+ size int64
+ wantErr string
+ wantSize int
+ }{
+ {0, 0, 100, "", 100}, // Should pad up to 100
+ {0, 100, 0, "", 0}, // No need to pad (0 size)
+ {100, 50, 100, "", 100}, // Should pad 100-150
+ {100, 50, 5, "", 5}, // Wanted range fully within memory
+ {100, -50, 0, "offset or size must not be negative", 0}, // Errror
+ {0, 1, 1024*1024 + 1, "reached limit for padding memory slice: 1048578", 0}, // Errror
+ {10, 0, 1024*1024 + 100, "reached limit for padding memory slice: 1048666", 0}, // Errror
+
+ } {
+ mem := vm.NewMemory()
+ mem.Resize(uint64(tc.memsize))
+ cpy, err := GetMemoryCopyPadded(mem, tc.offset, tc.size)
+ if want := tc.wantErr; want != "" {
+ if err == nil {
+ t.Fatalf("test %d: want '%v' have no error", i, want)
+ }
+ if have := err.Error(); want != have {
+ t.Fatalf("test %d: want '%v' have '%v'", i, want, have)
+ }
+ continue
+ }
+ if err != nil {
+ t.Fatalf("test %d: unexpected error: %v", i, err)
+ }
+ if want, have := tc.wantSize, len(cpy); have != want {
+ t.Fatalf("test %d: want %v have %v", i, want, have)
+ }
+ }
+}
diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go
index c8353b25a..47beaf63c 100644
--- a/ethclient/ethclient.go
+++ b/ethclient/ethclient.go
@@ -320,7 +320,14 @@ func (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, err
// SubscribeNewHead subscribes to notifications about the current blockchain head
// on the given channel.
func (ec *Client) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) {
- return ec.c.EthSubscribe(ctx, ch, "newHeads")
+ sub, err := ec.c.EthSubscribe(ctx, ch, "newHeads")
+ if err != nil {
+ // Defensively prefer returning nil interface explicitly on error-path, instead
+ // of letting default golang behavior wrap it with non-nil interface that stores
+ // nil concrete type value.
+ return nil, err
+ }
+ return sub, nil
}
// State Access
@@ -389,7 +396,14 @@ func (ec *Client) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuer
if err != nil {
return nil, err
}
- return ec.c.EthSubscribe(ctx, ch, "logs", arg)
+ sub, err := ec.c.EthSubscribe(ctx, ch, "logs", arg)
+ if err != nil {
+ // Defensively prefer returning nil interface explicitly on error-path, instead
+ // of letting default golang behavior wrap it with non-nil interface that stores
+ // nil concrete type value.
+ return nil, err
+ }
+ return sub, nil
}
func toFilterArg(q ethereum.FilterQuery) (interface{}, error) {
diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go
index 4e374c9e2..7c01b879b 100644
--- a/ethdb/pebble/pebble.go
+++ b/ethdb/pebble/pebble.go
@@ -75,14 +75,14 @@ type Database struct {
log log.Logger // Contextual logger tracking the database path
- activeComp int // Current number of active compactions
- compStartTime time.Time // The start time of the earliest currently-active compaction
- compTime int64 // Total time spent in compaction in ns
- level0Comp uint32 // Total number of level-zero compactions
- nonLevel0Comp uint32 // Total number of non level-zero compactions
- writeDelayStartTime time.Time // The start time of the latest write stall
- writeDelayCount int64 // Total number of write stall counts
- writeDelayTime int64 // Total time spent in write stalls
+ activeComp int // Current number of active compactions
+ compStartTime time.Time // The start time of the earliest currently-active compaction
+ compTime atomic.Int64 // Total time spent in compaction in ns
+ level0Comp atomic.Uint32 // Total number of level-zero compactions
+ nonLevel0Comp atomic.Uint32 // Total number of non level-zero compactions
+ writeDelayStartTime time.Time // The start time of the latest write stall
+ writeDelayCount atomic.Int64 // Total number of write stall counts
+ writeDelayTime atomic.Int64 // Total time spent in write stalls
}
func (d *Database) onCompactionBegin(info pebble.CompactionInfo) {
@@ -91,16 +91,16 @@ func (d *Database) onCompactionBegin(info pebble.CompactionInfo) {
}
l0 := info.Input[0]
if l0.Level == 0 {
- atomic.AddUint32(&d.level0Comp, 1)
+ d.level0Comp.Add(1)
} else {
- atomic.AddUint32(&d.nonLevel0Comp, 1)
+ d.nonLevel0Comp.Add(1)
}
d.activeComp++
}
func (d *Database) onCompactionEnd(info pebble.CompactionInfo) {
if d.activeComp == 1 {
- atomic.AddInt64(&d.compTime, int64(time.Since(d.compStartTime)))
+ d.compTime.Add(int64(time.Since(d.compStartTime)))
} else if d.activeComp == 0 {
panic("should not happen")
}
@@ -112,7 +112,7 @@ func (d *Database) onWriteStallBegin(b pebble.WriteStallBeginInfo) {
}
func (d *Database) onWriteStallEnd() {
- atomic.AddInt64(&d.writeDelayTime, int64(time.Since(d.writeDelayStartTime)))
+ d.writeDelayTime.Add(int64(time.Since(d.writeDelayStartTime)))
}
// New returns a wrapped pebble DB object. The namespace is the prefix that the
@@ -407,11 +407,11 @@ func (d *Database) meter(refresh time.Duration) {
nWrite int64
metrics = d.db.Metrics()
- compTime = atomic.LoadInt64(&d.compTime)
- writeDelayCount = atomic.LoadInt64(&d.writeDelayCount)
- writeDelayTime = atomic.LoadInt64(&d.writeDelayTime)
- nonLevel0CompCount = int64(atomic.LoadUint32(&d.nonLevel0Comp))
- level0CompCount = int64(atomic.LoadUint32(&d.level0Comp))
+ compTime = d.compTime.Load()
+ writeDelayCount = d.writeDelayCount.Load()
+ writeDelayTime = d.writeDelayTime.Load()
+ nonLevel0CompCount = int64(d.nonLevel0Comp.Load())
+ level0CompCount = int64(d.level0Comp.Load())
)
writeDelayTimes[i%2] = writeDelayTime
writeDelayCounts[i%2] = writeDelayCount
diff --git a/go.mod b/go.mod
index 6faf3a773..c00f61b75 100644
--- a/go.mod
+++ b/go.mod
@@ -30,18 +30,18 @@ require (
github.com/gofrs/flock v0.8.1
github.com/golang-jwt/jwt/v4 v4.3.0
github.com/golang/protobuf v1.5.2
- github.com/golang/snappy v0.0.4
+ github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa
github.com/google/uuid v1.3.0
github.com/gorilla/websocket v1.4.2
github.com/graph-gophers/graphql-go v1.3.0
github.com/hashicorp/go-bexpr v0.1.10
github.com/holiman/bloomfilter/v2 v2.0.3
- github.com/holiman/uint256 v1.2.0
+ github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c
github.com/huin/goupnp v1.0.3
github.com/influxdata/influxdb v1.8.3
github.com/influxdata/influxdb-client-go/v2 v2.4.0
- github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
+ github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c
github.com/jackpal/go-nat-pmp v1.0.2
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e
github.com/julienschmidt/httprouter v1.3.0
@@ -67,7 +67,8 @@ require (
golang.org/x/sys v0.6.0
golang.org/x/text v0.8.0
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af
- golang.org/x/tools v0.6.0
+ golang.org/x/tools v0.7.0
+ gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
)
@@ -75,6 +76,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect
github.com/DataDog/zstd v1.5.2 // indirect
+ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.1.1 // indirect
@@ -96,6 +98,7 @@ require (
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/klauspost/compress v1.15.15 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
@@ -117,7 +120,7 @@ require (
github.com/tklauser/go-sysconf v0.3.5 // indirect
github.com/tklauser/numcpus v0.2.2 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
- golang.org/x/mod v0.8.0 // indirect
+ golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
google.golang.org/protobuf v1.28.1 // indirect
diff --git a/go.sum b/go.sum
index 88e2762f6..d71146cd4 100644
--- a/go.sum
+++ b/go.sum
@@ -26,7 +26,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSu
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
@@ -251,6 +251,8 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
+github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -310,8 +312,8 @@ github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vA
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k=
github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
-github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
-github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
+github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c h1:qSHzRbhzK8RdXOsAdfDgO49TtqC1oZ+acxPrkfTxcCs=
+github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM=
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
@@ -773,8 +775,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
-golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
+golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -850,6 +852,8 @@ gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
diff --git a/graphql/graphql.go b/graphql/graphql.go
index 0c13cc80f..45a1f7a2a 100644
--- a/graphql/graphql.go
+++ b/graphql/graphql.go
@@ -24,6 +24,7 @@ import (
"math/big"
"sort"
"strconv"
+ "sync"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
@@ -184,32 +185,39 @@ func (at *AccessTuple) StorageKeys(ctx context.Context) []common.Hash {
// Transaction represents an Ethereum transaction.
// backend and hash are mandatory; all others will be fetched when required.
type Transaction struct {
- r *Resolver
- hash common.Hash
+ r *Resolver
+ hash common.Hash // Must be present after initialization
+ mu sync.Mutex
+ // mu protects following resources
tx *types.Transaction
block *Block
index uint64
}
// resolve returns the internal transaction object, fetching it if needed.
-func (t *Transaction) resolve(ctx context.Context) (*types.Transaction, error) {
- if t.tx == nil {
- // Try to return an already finalized transaction
- tx, blockHash, _, index, err := t.r.backend.GetTransaction(ctx, t.hash)
- if err == nil && tx != nil {
- t.tx = tx
- blockNrOrHash := rpc.BlockNumberOrHashWithHash(blockHash, false)
- t.block = &Block{
- r: t.r,
- numberOrHash: &blockNrOrHash,
- }
- t.index = index
- return t.tx, nil
- }
- // No finalized transaction, try to retrieve it from the pool
- t.tx = t.r.backend.GetPoolTransaction(t.hash)
+// It also returns the block the tx blongs to, unless it is a pending tx.
+func (t *Transaction) resolve(ctx context.Context) (*types.Transaction, *Block, error) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.tx != nil {
+ return t.tx, t.block, nil
}
- return t.tx, nil
+ // Try to return an already finalized transaction
+ tx, blockHash, _, index, err := t.r.backend.GetTransaction(ctx, t.hash)
+ if err == nil && tx != nil {
+ t.tx = tx
+ blockNrOrHash := rpc.BlockNumberOrHashWithHash(blockHash, false)
+ t.block = &Block{
+ r: t.r,
+ numberOrHash: &blockNrOrHash,
+ hash: blockHash,
+ }
+ t.index = index
+ return t.tx, t.block, nil
+ }
+ // No finalized transaction, try to retrieve it from the pool
+ t.tx = t.r.backend.GetPoolTransaction(t.hash)
+ return t.tx, nil, nil
}
func (t *Transaction) Hash(ctx context.Context) common.Hash {
@@ -217,7 +225,7 @@ func (t *Transaction) Hash(ctx context.Context) common.Hash {
}
func (t *Transaction) InputData(ctx context.Context) (hexutil.Bytes, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return hexutil.Bytes{}, err
}
@@ -225,7 +233,7 @@ func (t *Transaction) InputData(ctx context.Context) (hexutil.Bytes, error) {
}
func (t *Transaction) Gas(ctx context.Context) (hexutil.Uint64, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return 0, err
}
@@ -233,7 +241,7 @@ func (t *Transaction) Gas(ctx context.Context) (hexutil.Uint64, error) {
}
func (t *Transaction) GasPrice(ctx context.Context) (hexutil.Big, error) {
- tx, err := t.resolve(ctx)
+ tx, block, err := t.resolve(ctx)
if err != nil || tx == nil {
return hexutil.Big{}, err
}
@@ -241,8 +249,8 @@ func (t *Transaction) GasPrice(ctx context.Context) (hexutil.Big, error) {
case types.AccessListTxType:
return hexutil.Big(*tx.GasPrice()), nil
case types.DynamicFeeTxType:
- if t.block != nil {
- if baseFee, _ := t.block.BaseFeePerGas(ctx); baseFee != nil {
+ if block != nil {
+ if baseFee, _ := block.BaseFeePerGas(ctx); baseFee != nil {
// price = min(tip, gasFeeCap - baseFee) + baseFee
return (hexutil.Big)(*math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee.ToInt()), tx.GasFeeCap())), nil
}
@@ -254,15 +262,15 @@ func (t *Transaction) GasPrice(ctx context.Context) (hexutil.Big, error) {
}
func (t *Transaction) EffectiveGasPrice(ctx context.Context) (*hexutil.Big, error) {
- tx, err := t.resolve(ctx)
+ tx, block, err := t.resolve(ctx)
if err != nil || tx == nil {
return nil, err
}
// Pending tx
- if t.block == nil {
+ if block == nil {
return nil, nil
}
- header, err := t.block.resolveHeader(ctx)
+ header, err := block.resolveHeader(ctx)
if err != nil || header == nil {
return nil, err
}
@@ -273,7 +281,7 @@ func (t *Transaction) EffectiveGasPrice(ctx context.Context) (*hexutil.Big, erro
}
func (t *Transaction) MaxFeePerGas(ctx context.Context) (*hexutil.Big, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return nil, err
}
@@ -288,7 +296,7 @@ func (t *Transaction) MaxFeePerGas(ctx context.Context) (*hexutil.Big, error) {
}
func (t *Transaction) MaxPriorityFeePerGas(ctx context.Context) (*hexutil.Big, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return nil, err
}
@@ -303,15 +311,15 @@ func (t *Transaction) MaxPriorityFeePerGas(ctx context.Context) (*hexutil.Big, e
}
func (t *Transaction) EffectiveTip(ctx context.Context) (*hexutil.Big, error) {
- tx, err := t.resolve(ctx)
+ tx, block, err := t.resolve(ctx)
if err != nil || tx == nil {
return nil, err
}
// Pending tx
- if t.block == nil {
+ if block == nil {
return nil, nil
}
- header, err := t.block.resolveHeader(ctx)
+ header, err := block.resolveHeader(ctx)
if err != nil || header == nil {
return nil, err
}
@@ -327,7 +335,7 @@ func (t *Transaction) EffectiveTip(ctx context.Context) (*hexutil.Big, error) {
}
func (t *Transaction) Value(ctx context.Context) (hexutil.Big, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return hexutil.Big{}, err
}
@@ -338,7 +346,7 @@ func (t *Transaction) Value(ctx context.Context) (hexutil.Big, error) {
}
func (t *Transaction) Nonce(ctx context.Context) (hexutil.Uint64, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return 0, err
}
@@ -346,7 +354,7 @@ func (t *Transaction) Nonce(ctx context.Context) (hexutil.Uint64, error) {
}
func (t *Transaction) To(ctx context.Context, args BlockNumberArgs) (*Account, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return nil, err
}
@@ -362,7 +370,7 @@ func (t *Transaction) To(ctx context.Context, args BlockNumberArgs) (*Account, e
}
func (t *Transaction) From(ctx context.Context, args BlockNumberArgs) (*Account, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return nil, err
}
@@ -376,17 +384,20 @@ func (t *Transaction) From(ctx context.Context, args BlockNumberArgs) (*Account,
}
func (t *Transaction) Block(ctx context.Context) (*Block, error) {
- if _, err := t.resolve(ctx); err != nil {
+ _, block, err := t.resolve(ctx)
+ if err != nil {
return nil, err
}
- return t.block, nil
+ return block, nil
}
func (t *Transaction) Index(ctx context.Context) (*int32, error) {
- if _, err := t.resolve(ctx); err != nil {
+ _, block, err := t.resolve(ctx)
+ if err != nil {
return nil, err
}
- if t.block == nil {
+ // Pending tx
+ if block == nil {
return nil, nil
}
index := int32(t.index)
@@ -395,13 +406,15 @@ func (t *Transaction) Index(ctx context.Context) (*int32, error) {
// getReceipt returns the receipt associated with this transaction, if any.
func (t *Transaction) getReceipt(ctx context.Context) (*types.Receipt, error) {
- if _, err := t.resolve(ctx); err != nil {
+ _, block, err := t.resolve(ctx)
+ if err != nil {
return nil, err
}
- if t.block == nil {
+ // Pending tx
+ if block == nil {
return nil, nil
}
- receipts, err := t.block.resolveReceipts(ctx)
+ receipts, err := block.resolveReceipts(ctx)
if err != nil {
return nil, err
}
@@ -451,28 +464,25 @@ func (t *Transaction) CreatedContract(ctx context.Context, args BlockNumberArgs)
}
func (t *Transaction) Logs(ctx context.Context) (*[]*Log, error) {
- if _, err := t.resolve(ctx); err != nil {
+ _, block, err := t.resolve(ctx)
+ if err != nil {
return nil, err
}
- if t.block == nil {
+ // Pending tx
+ if block == nil {
return nil, nil
}
- if _, ok := t.block.numberOrHash.Hash(); !ok {
- header, err := t.r.backend.HeaderByNumberOrHash(ctx, *t.block.numberOrHash)
- if err != nil {
- return nil, err
- }
- hash := header.Hash()
- t.block.numberOrHash.BlockHash = &hash
+ h, err := block.Hash(ctx)
+ if err != nil {
+ return nil, err
}
- return t.getLogs(ctx)
+ return t.getLogs(ctx, h)
}
// getLogs returns log objects for the given tx.
// Assumes block hash is resolved.
-func (t *Transaction) getLogs(ctx context.Context) (*[]*Log, error) {
+func (t *Transaction) getLogs(ctx context.Context, hash common.Hash) (*[]*Log, error) {
var (
- hash, _ = t.block.numberOrHash.Hash()
filter = t.r.filterSystem.NewBlockFilter(hash, nil, nil)
logs, err = filter.Logs(ctx)
)
@@ -494,7 +504,7 @@ func (t *Transaction) getLogs(ctx context.Context) (*[]*Log, error) {
}
func (t *Transaction) Type(ctx context.Context) (*int32, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil {
return nil, err
}
@@ -503,7 +513,7 @@ func (t *Transaction) Type(ctx context.Context) (*int32, error) {
}
func (t *Transaction) AccessList(ctx context.Context) (*[]*AccessTuple, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return nil, err
}
@@ -519,7 +529,7 @@ func (t *Transaction) AccessList(ctx context.Context) (*[]*AccessTuple, error) {
}
func (t *Transaction) R(ctx context.Context) (hexutil.Big, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return hexutil.Big{}, err
}
@@ -528,7 +538,7 @@ func (t *Transaction) R(ctx context.Context) (hexutil.Big, error) {
}
func (t *Transaction) S(ctx context.Context) (hexutil.Big, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return hexutil.Big{}, err
}
@@ -537,7 +547,7 @@ func (t *Transaction) S(ctx context.Context) (hexutil.Big, error) {
}
func (t *Transaction) V(ctx context.Context) (hexutil.Big, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return hexutil.Big{}, err
}
@@ -546,7 +556,7 @@ func (t *Transaction) V(ctx context.Context) (hexutil.Big, error) {
}
func (t *Transaction) Raw(ctx context.Context) (hexutil.Bytes, error) {
- tx, err := t.resolve(ctx)
+ tx, _, err := t.resolve(ctx)
if err != nil || tx == nil {
return hexutil.Bytes{}, err
}
@@ -568,16 +578,20 @@ type BlockType int
// when required.
type Block struct {
r *Resolver
- numberOrHash *rpc.BlockNumberOrHash
- hash common.Hash
- header *types.Header
- block *types.Block
- receipts []*types.Receipt
+ numberOrHash *rpc.BlockNumberOrHash // Field resolvers assume numberOrHash is always present
+ mu sync.Mutex
+ // mu protects following resources
+ hash common.Hash // Must be resolved during initialization
+ header *types.Header
+ block *types.Block
+ receipts []*types.Receipt
}
// resolve returns the internal Block object representing this block, fetching
// it if necessary.
func (b *Block) resolve(ctx context.Context) (*types.Block, error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
if b.block != nil {
return b.block, nil
}
@@ -587,10 +601,10 @@ func (b *Block) resolve(ctx context.Context) (*types.Block, error) {
}
var err error
b.block, err = b.r.backend.BlockByNumberOrHash(ctx, *b.numberOrHash)
- if b.block != nil && b.header == nil {
- b.header = b.block.Header()
- if hash, ok := b.numberOrHash.Hash(); ok {
- b.hash = hash
+ if b.block != nil {
+ b.hash = b.block.Hash()
+ if b.header == nil {
+ b.header = b.block.Header()
}
}
return b.block, err
@@ -600,39 +614,39 @@ func (b *Block) resolve(ctx context.Context) (*types.Block, error) {
// if necessary. Call this function instead of `resolve` unless you need the
// additional data (transactions and uncles).
func (b *Block) resolveHeader(ctx context.Context) (*types.Header, error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.header != nil {
+ return b.header, nil
+ }
if b.numberOrHash == nil && b.hash == (common.Hash{}) {
return nil, errBlockInvariant
}
var err error
- if b.header == nil {
- if b.hash != (common.Hash{}) {
- b.header, err = b.r.backend.HeaderByHash(ctx, b.hash)
- } else {
- b.header, err = b.r.backend.HeaderByNumberOrHash(ctx, *b.numberOrHash)
- }
+ b.header, err = b.r.backend.HeaderByNumberOrHash(ctx, *b.numberOrHash)
+ if err != nil {
+ return nil, err
}
- return b.header, err
+ if b.hash == (common.Hash{}) {
+ b.hash = b.header.Hash()
+ }
+ return b.header, nil
}
// resolveReceipts returns the list of receipts for this block, fetching them
// if necessary.
func (b *Block) resolveReceipts(ctx context.Context) ([]*types.Receipt, error) {
- if b.receipts == nil {
- hash := b.hash
- if hash == (common.Hash{}) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return nil, err
- }
- hash = header.Hash()
- }
- receipts, err := b.r.backend.GetReceipts(ctx, hash)
- if err != nil {
- return nil, err
- }
- b.receipts = receipts
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.receipts != nil {
+ return b.receipts, nil
}
- return b.receipts, nil
+ receipts, err := b.r.backend.GetReceipts(ctx, b.hash)
+ if err != nil {
+ return nil, err
+ }
+ b.receipts = receipts
+ return receipts, nil
}
func (b *Block) Number(ctx context.Context) (Long, error) {
@@ -645,13 +659,8 @@ func (b *Block) Number(ctx context.Context) (Long, error) {
}
func (b *Block) Hash(ctx context.Context) (common.Hash, error) {
- if b.hash == (common.Hash{}) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return common.Hash{}, err
- }
- b.hash = header.Hash()
- }
+ b.mu.Lock()
+ defer b.mu.Unlock()
return b.hash, nil
}
@@ -705,11 +714,18 @@ func (b *Block) Parent(ctx context.Context) (*Block, error) {
if b.header == nil || b.header.Number.Uint64() < 1 {
return nil, nil
}
- num := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(b.header.Number.Uint64() - 1))
+ var (
+ num = rpc.BlockNumber(b.header.Number.Uint64() - 1)
+ hash = b.header.ParentHash
+ numOrHash = rpc.BlockNumberOrHash{
+ BlockNumber: &num,
+ BlockHash: &hash,
+ }
+ )
return &Block{
r: b.r,
- numberOrHash: &num,
- hash: b.header.ParentHash,
+ numberOrHash: &numOrHash,
+ hash: hash,
}, nil
}
@@ -798,6 +814,7 @@ func (b *Block) Ommers(ctx context.Context) (*[]*Block, error) {
r: b.r,
numberOrHash: &blockNumberOrHash,
header: uncle,
+ hash: uncle.Hash(),
})
}
return &ret, nil
@@ -820,17 +837,13 @@ func (b *Block) LogsBloom(ctx context.Context) (hexutil.Bytes, error) {
}
func (b *Block) TotalDifficulty(ctx context.Context) (hexutil.Big, error) {
- h := b.hash
- if h == (common.Hash{}) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return hexutil.Big{}, err
- }
- h = header.Hash()
+ hash, err := b.Hash(ctx)
+ if err != nil {
+ return hexutil.Big{}, err
}
- td := b.r.backend.GetTd(ctx, h)
+ td := b.r.backend.GetTd(ctx, hash)
if td == nil {
- return hexutil.Big{}, fmt.Errorf("total difficulty not found %x", b.hash)
+ return hexutil.Big{}, fmt.Errorf("total difficulty not found %x", hash)
}
return hexutil.Big(*td), nil
}
@@ -948,6 +961,7 @@ func (b *Block) OmmerAt(ctx context.Context, args struct{ Index int32 }) (*Block
r: b.r,
numberOrHash: &blockNumberOrHash,
header: uncle,
+ hash: uncle.Hash(),
}, nil
}
@@ -997,15 +1011,11 @@ func (b *Block) Logs(ctx context.Context, args struct{ Filter BlockFilterCriteri
if args.Filter.Topics != nil {
topics = *args.Filter.Topics
}
- hash := b.hash
- if hash == (common.Hash{}) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return nil, err
- }
- hash = header.Hash()
- }
// Construct the range filter
+ hash, err := b.Hash(ctx)
+ if err != nil {
+ return nil, err
+ }
filter := b.r.filterSystem.NewBlockFilter(hash, addresses, topics)
// Run the filter and return all the logs
@@ -1015,12 +1025,6 @@ func (b *Block) Logs(ctx context.Context, args struct{ Filter BlockFilterCriteri
func (b *Block) Account(ctx context.Context, args struct {
Address common.Address
}) (*Account, error) {
- if b.numberOrHash == nil {
- _, err := b.resolveHeader(ctx)
- if err != nil {
- return nil, err
- }
- }
return &Account{
r: b.r,
address: args.Address,
@@ -1063,12 +1067,6 @@ func (c *CallResult) Status() Long {
func (b *Block) Call(ctx context.Context, args struct {
Data ethapi.TransactionArgs
}) (*CallResult, error) {
- if b.numberOrHash == nil {
- _, err := b.resolve(ctx)
- if err != nil {
- return nil, err
- }
- }
result, err := ethapi.DoCall(ctx, b.r.backend, args.Data, *b.numberOrHash, nil, b.r.backend.RPCEVMTimeout(), b.r.backend.RPCGasCap())
if err != nil {
return nil, err
@@ -1088,12 +1086,6 @@ func (b *Block) Call(ctx context.Context, args struct {
func (b *Block) EstimateGas(ctx context.Context, args struct {
Data ethapi.TransactionArgs
}) (Long, error) {
- if b.numberOrHash == nil {
- _, err := b.resolveHeader(ctx)
- if err != nil {
- return 0, err
- }
- }
gas, err := ethapi.DoEstimateGas(ctx, b.r.backend, args.Data, *b.numberOrHash, b.r.backend.RPCGasCap())
return Long(gas), err
}
@@ -1173,29 +1165,21 @@ func (r *Resolver) Block(ctx context.Context, args struct {
Number *Long
Hash *common.Hash
}) (*Block, error) {
- var block *Block
+ var numberOrHash rpc.BlockNumberOrHash
if args.Number != nil {
if *args.Number < 0 {
return nil, nil
}
number := rpc.BlockNumber(*args.Number)
- numberOrHash := rpc.BlockNumberOrHashWithNumber(number)
- block = &Block{
- r: r,
- numberOrHash: &numberOrHash,
- }
+ numberOrHash = rpc.BlockNumberOrHashWithNumber(number)
} else if args.Hash != nil {
- numberOrHash := rpc.BlockNumberOrHashWithHash(*args.Hash, false)
- block = &Block{
- r: r,
- numberOrHash: &numberOrHash,
- }
+ numberOrHash = rpc.BlockNumberOrHashWithHash(*args.Hash, false)
} else {
- numberOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
- block = &Block{
- r: r,
- numberOrHash: &numberOrHash,
- }
+ numberOrHash = rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
+ }
+ block := &Block{
+ r: r,
+ numberOrHash: &numberOrHash,
}
// Resolve the header, return nil if it doesn't exist.
// Note we don't resolve block directly here since it will require an
@@ -1256,7 +1240,7 @@ func (r *Resolver) Transaction(ctx context.Context, args struct{ Hash common.Has
hash: args.Hash,
}
// Resolve the transaction; if it doesn't exist, return nil.
- t, err := tx.resolve(ctx)
+ t, _, err := tx.resolve(ctx)
if err != nil {
return nil, err
} else if t == nil {
diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go
index 46acd1529..9354eac0f 100644
--- a/graphql/graphql_test.go
+++ b/graphql/graphql_test.go
@@ -156,6 +156,7 @@ func TestGraphQLBlockSerialization(t *testing.T) {
t.Fatalf("could not post: %v", err)
}
bodyBytes, err := io.ReadAll(resp.Body)
+ resp.Body.Close()
if err != nil {
t.Fatalf("could not read from response body: %v", err)
}
@@ -239,6 +240,7 @@ func TestGraphQLBlockSerializationEIP2718(t *testing.T) {
t.Fatalf("could not post: %v", err)
}
bodyBytes, err := io.ReadAll(resp.Body)
+ resp.Body.Close()
if err != nil {
t.Fatalf("could not read from response body: %v", err)
}
@@ -263,11 +265,12 @@ func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) {
if err != nil {
t.Fatalf("could not post: %v", err)
}
+ resp.Body.Close()
// make sure the request is not handled successfully
assert.Equal(t, http.StatusNotFound, resp.StatusCode)
}
-func TestGraphQLTransactionLogs(t *testing.T) {
+func TestGraphQLConcurrentResolvers(t *testing.T) {
var (
key, _ = crypto.GenerateKey()
addr = crypto.PubkeyToAddress(key.PublicKey)
@@ -292,8 +295,9 @@ func TestGraphQLTransactionLogs(t *testing.T) {
)
defer stack.Close()
- handler := newGQLService(t, stack, genesis, 1, func(i int, gen *core.BlockGen) {
- tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{To: &dad, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)})
+ var tx *types.Transaction
+ handler, chain := newGQLService(t, stack, genesis, 1, func(i int, gen *core.BlockGen) {
+ tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{To: &dad, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)})
gen.AddTx(tx)
tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{To: &dad, Nonce: 1, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)})
gen.AddTx(tx)
@@ -304,18 +308,59 @@ func TestGraphQLTransactionLogs(t *testing.T) {
if err := stack.Start(); err != nil {
t.Fatalf("could not start node: %v", err)
}
- query := `{block { transactions { logs { account { address } } } } }`
- res := handler.Schema.Exec(context.Background(), query, "", map[string]interface{}{})
- if res.Errors != nil {
- t.Fatalf("graphql query failed: %v", res.Errors)
- }
- have, err := json.Marshal(res.Data)
- if err != nil {
- t.Fatalf("failed to encode graphql response: %s", err)
- }
- want := fmt.Sprintf(`{"block":{"transactions":[{"logs":[{"account":{"address":"%s"}},{"account":{"address":"%s"}}]},{"logs":[{"account":{"address":"%s"}},{"account":{"address":"%s"}}]},{"logs":[{"account":{"address":"%s"}},{"account":{"address":"%s"}}]}]}}`, dadStr, dadStr, dadStr, dadStr, dadStr, dadStr)
- if string(have) != want {
- t.Errorf("response unmatch. expected %s, got %s", want, have)
+
+ for i, tt := range []struct {
+ body string
+ want string
+ }{
+ // Multiple txes race to get/set the block hash.
+ {
+ body: "{block { transactions { logs { account { address } } } } }",
+ want: fmt.Sprintf(`{"block":{"transactions":[{"logs":[{"account":{"address":"%s"}},{"account":{"address":"%s"}}]},{"logs":[{"account":{"address":"%s"}},{"account":{"address":"%s"}}]},{"logs":[{"account":{"address":"%s"}},{"account":{"address":"%s"}}]}]}}`, dadStr, dadStr, dadStr, dadStr, dadStr, dadStr),
+ },
+ // Multiple fields of a tx race to resolve it. Happens in this case
+ // because resolving the tx body belonging to a log is delayed.
+ {
+ body: `{block { logs(filter: {}) { transaction { nonce value gasPrice }}}}`,
+ want: `{"block":{"logs":[{"transaction":{"nonce":"0x0","value":"0x0","gasPrice":"0x3b9aca00"}},{"transaction":{"nonce":"0x0","value":"0x0","gasPrice":"0x3b9aca00"}},{"transaction":{"nonce":"0x1","value":"0x0","gasPrice":"0x3b9aca00"}},{"transaction":{"nonce":"0x1","value":"0x0","gasPrice":"0x3b9aca00"}},{"transaction":{"nonce":"0x2","value":"0x0","gasPrice":"0x3b9aca00"}},{"transaction":{"nonce":"0x2","value":"0x0","gasPrice":"0x3b9aca00"}}]}}`,
+ },
+ // Multiple txes of a block race to set/retrieve receipts of a block.
+ {
+ body: "{block { transactions { status gasUsed } } }",
+ want: `{"block":{"transactions":[{"status":1,"gasUsed":21768},{"status":1,"gasUsed":21768},{"status":1,"gasUsed":21768}]}}`,
+ },
+ // Multiple fields of block race to resolve header and body.
+ {
+ body: "{ block { number hash gasLimit ommerCount transactionCount totalDifficulty } }",
+ want: fmt.Sprintf(`{"block":{"number":1,"hash":"%s","gasLimit":11500000,"ommerCount":0,"transactionCount":3,"totalDifficulty":"0x200000"}}`, chain[len(chain)-1].Hash()),
+ },
+ // Multiple fields of a block race to resolve the header and body.
+ {
+ body: fmt.Sprintf(`{ transaction(hash: "%s") { block { number hash gasLimit ommerCount transactionCount } } }`, tx.Hash()),
+ want: fmt.Sprintf(`{"transaction":{"block":{"number":1,"hash":"%s","gasLimit":11500000,"ommerCount":0,"transactionCount":3}}}`, chain[len(chain)-1].Hash()),
+ },
+ // Account fields race the resolve the state object.
+ {
+ body: fmt.Sprintf(`{ block { account(address: "%s") { balance transactionCount code } } }`, dadStr),
+ want: `{"block":{"account":{"balance":"0x0","transactionCount":"0x0","code":"0x60006000a060006000a060006000f3"}}}`,
+ },
+ // Test values for a non-existent account.
+ {
+ body: fmt.Sprintf(`{ block { account(address: "%s") { balance transactionCount code } } }`, "0x1111111111111111111111111111111111111111"),
+ want: `{"block":{"account":{"balance":"0x0","transactionCount":"0x0","code":"0x"}}}`,
+ },
+ } {
+ res := handler.Schema.Exec(context.Background(), tt.body, "", map[string]interface{}{})
+ if res.Errors != nil {
+ t.Fatalf("failed to execute query for testcase #%d: %v", i, res.Errors)
+ }
+ have, err := json.Marshal(res.Data)
+ if err != nil {
+ t.Fatalf("failed to encode graphql response for testcase #%d: %s", i, err)
+ }
+ if string(have) != tt.want {
+ t.Errorf("response unmatch for testcase #%d.\nExpected:\n%s\nGot:\n%s\n", i, tt.want, have)
+ }
}
}
@@ -333,7 +378,7 @@ func createNode(t *testing.T) *node.Node {
return stack
}
-func newGQLService(t *testing.T, stack *node.Node, gspec *core.Genesis, genBlocks int, genfunc func(i int, gen *core.BlockGen)) *handler {
+func newGQLService(t *testing.T, stack *node.Node, gspec *core.Genesis, genBlocks int, genfunc func(i int, gen *core.BlockGen)) (*handler, []*types.Block) {
ethConf := ðconfig.Config{
Genesis: gspec,
Ethash: ethash.Config{
@@ -364,5 +409,5 @@ func newGQLService(t *testing.T, stack *node.Node, gspec *core.Genesis, genBlock
if err != nil {
t.Fatalf("could not create graphql service: %v", err)
}
- return handler
+ return handler, chain
}
diff --git a/internal/debug/flags.go b/internal/debug/flags.go
index e014a85d7..d425a17db 100644
--- a/internal/debug/flags.go
+++ b/internal/debug/flags.go
@@ -20,8 +20,9 @@ import (
"fmt"
"io"
"net/http"
- _ "net/http/pprof" // nolint: gosec
+ _ "net/http/pprof"
"os"
+ "path/filepath"
"runtime"
"github.com/ethereum/go-ethereum/internal/flags"
@@ -32,6 +33,7 @@ import (
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
+ "gopkg.in/natefinch/lumberjack.v2"
)
var Memsize memsizeui.Handler
@@ -43,15 +45,28 @@ var (
Value: 3,
Category: flags.LoggingCategory,
}
+ logVmoduleFlag = &cli.StringFlag{
+ Name: "log.vmodule",
+ Usage: "Per-module verbosity: comma-separated list of = (e.g. eth/*=5,p2p=4)",
+ Value: "",
+ Category: flags.LoggingCategory,
+ }
vmoduleFlag = &cli.StringFlag{
Name: "vmodule",
Usage: "Per-module verbosity: comma-separated list of = (e.g. eth/*=5,p2p=4)",
Value: "",
+ Hidden: true,
Category: flags.LoggingCategory,
}
logjsonFlag = &cli.BoolFlag{
Name: "log.json",
Usage: "Format logs with JSON",
+ Hidden: true,
+ Category: flags.LoggingCategory,
+ }
+ logFormatFlag = &cli.StringFlag{
+ Name: "log.format",
+ Usage: "Log format to use (json|logfmt|terminal)",
Category: flags.LoggingCategory,
}
logFileFlag = &cli.StringFlag{
@@ -70,6 +85,34 @@ var (
Usage: "Prepends log messages with call-site location (file and line number)",
Category: flags.LoggingCategory,
}
+ logRotateFlag = &cli.BoolFlag{
+ Name: "log.rotate",
+ Usage: "Enables log file rotation",
+ }
+ logMaxSizeMBsFlag = &cli.IntFlag{
+ Name: "log.maxsize",
+ Usage: "Maximum size in MBs of a single log file",
+ Value: 100,
+ Category: flags.LoggingCategory,
+ }
+ logMaxBackupsFlag = &cli.IntFlag{
+ Name: "log.maxbackups",
+ Usage: "Maximum number of log files to retain",
+ Value: 10,
+ Category: flags.LoggingCategory,
+ }
+ logMaxAgeFlag = &cli.IntFlag{
+ Name: "log.maxage",
+ Usage: "Maximum number of days to retain a log file",
+ Value: 30,
+ Category: flags.LoggingCategory,
+ }
+ logCompressFlag = &cli.BoolFlag{
+ Name: "log.compress",
+ Usage: "Compress the log files",
+ Value: false,
+ Category: flags.LoggingCategory,
+ }
pprofFlag = &cli.BoolFlag{
Name: "pprof",
Usage: "Enable the pprof HTTP server",
@@ -113,11 +156,18 @@ var (
// Flags holds all command-line flags required for debugging.
var Flags = []cli.Flag{
verbosityFlag,
+ logVmoduleFlag,
vmoduleFlag,
- logjsonFlag,
- logFileFlag,
backtraceAtFlag,
debugFlag,
+ logjsonFlag,
+ logFormatFlag,
+ logFileFlag,
+ logRotateFlag,
+ logMaxSizeMBsFlag,
+ logMaxBackupsFlag,
+ logMaxAgeFlag,
+ logCompressFlag,
pprofFlag,
pprofAddrFlag,
pprofPortFlag,
@@ -141,35 +191,83 @@ func init() {
// Setup initializes profiling and logging based on the CLI flags.
// It should be called as early as possible in the program.
func Setup(ctx *cli.Context) error {
- logFile := ctx.String(logFileFlag.Name)
- useColor := logFile == "" && os.Getenv("TERM") != "dumb" && (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd()))
-
- var logfmt log.Format
- if ctx.Bool(logjsonFlag.Name) {
+ var (
+ logfmt log.Format
+ output = io.Writer(os.Stderr)
+ logFmtFlag = ctx.String(logFormatFlag.Name)
+ )
+ switch {
+ case ctx.Bool(logjsonFlag.Name):
+ // Retain backwards compatibility with `--log.json` flag if `--log.format` not set
+ defer log.Warn("The flag '--log.json' is deprecated, please use '--log.format=json' instead")
logfmt = log.JSONFormat()
- } else {
- logfmt = log.TerminalFormat(useColor)
- }
-
- if logFile != "" {
- var err error
- logOutputStream, err = log.FileHandler(logFile, logfmt)
- if err != nil {
- return err
- }
- } else {
- output := io.Writer(os.Stderr)
+ case logFmtFlag == "json":
+ logfmt = log.JSONFormat()
+ case logFmtFlag == "logfmt":
+ logfmt = log.LogfmtFormat()
+ case logFmtFlag == "", logFmtFlag == "terminal":
+ useColor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
if useColor {
output = colorable.NewColorableStderr()
}
- logOutputStream = log.StreamHandler(output, logfmt)
+ logfmt = log.TerminalFormat(useColor)
+ default:
+ // Unknown log format specified
+ return fmt.Errorf("unknown log format: %v", ctx.String(logFormatFlag.Name))
}
- glogger.SetHandler(logOutputStream)
+ var (
+ stdHandler = log.StreamHandler(output, logfmt)
+ ostream = stdHandler
+ logFile = ctx.String(logFileFlag.Name)
+ rotation = ctx.Bool(logRotateFlag.Name)
+ )
+ if len(logFile) > 0 {
+ if err := validateLogLocation(filepath.Dir(logFile)); err != nil {
+ return fmt.Errorf("failed to initiatilize file logger: %v", err)
+ }
+ }
+ context := []interface{}{"rotate", rotation}
+ if len(logFmtFlag) > 0 {
+ context = append(context, "format", logFmtFlag)
+ } else {
+ context = append(context, "format", "terminal")
+ }
+ if rotation {
+ // Lumberjack uses -lumberjack.log in is.TempDir() if empty.
+ // so typically /tmp/geth-lumberjack.log on linux
+ if len(logFile) > 0 {
+ context = append(context, "location", logFile)
+ } else {
+ context = append(context, "location", filepath.Join(os.TempDir(), "geth-lumberjack.log"))
+ }
+ ostream = log.MultiHandler(log.StreamHandler(&lumberjack.Logger{
+ Filename: logFile,
+ MaxSize: ctx.Int(logMaxSizeMBsFlag.Name),
+ MaxBackups: ctx.Int(logMaxBackupsFlag.Name),
+ MaxAge: ctx.Int(logMaxAgeFlag.Name),
+ Compress: ctx.Bool(logCompressFlag.Name),
+ }, logfmt), stdHandler)
+ } else if logFile != "" {
+ if logOutputStream, err := log.FileHandler(logFile, logfmt); err != nil {
+ return err
+ } else {
+ ostream = log.MultiHandler(logOutputStream, stdHandler)
+ context = append(context, "location", logFile)
+ }
+ }
+ glogger.SetHandler(ostream)
// logging
verbosity := ctx.Int(verbosityFlag.Name)
glogger.Verbosity(log.Lvl(verbosity))
- vmodule := ctx.String(vmoduleFlag.Name)
+ vmodule := ctx.String(logVmoduleFlag.Name)
+ if vmodule == "" {
+ // Retain backwards compatibility with `--vmodule` flag if `--log.vmodule` not set
+ vmodule = ctx.String(vmoduleFlag.Name)
+ if vmodule != "" {
+ defer log.Warn("The flag '--vmodule' is deprecated, please use '--log.vmodule' instead")
+ }
+ }
glogger.Vmodule(vmodule)
debug := ctx.Bool(debugFlag.Name)
@@ -215,6 +313,9 @@ func Setup(ctx *cli.Context) error {
// It cannot be imported because it will cause a cyclical dependency.
StartPProf(address, !ctx.IsSet("metrics.addr"))
}
+ if len(logFile) > 0 || rotation {
+ log.Info("Logging configured", context...)
+ }
return nil
}
@@ -242,3 +343,17 @@ func Exit() {
closer.Close()
}
}
+
+func validateLogLocation(path string) error {
+ if err := os.MkdirAll(path, os.ModePerm); err != nil {
+ return fmt.Errorf("error creating the directory: %w", err)
+ }
+ // Check if the path is writable by trying to create a temporary file
+ tmp := filepath.Join(path, "tmp")
+ if f, err := os.Create(tmp); err != nil {
+ return err
+ } else {
+ f.Close()
+ }
+ return os.Remove(tmp)
+}
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 0cf29e5fe..b7ad30a6e 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -89,7 +89,7 @@ type feeHistoryResult struct {
// FeeHistory returns the fee market history.
func (s *EthereumAPI) FeeHistory(ctx context.Context, blockCount math.HexOrDecimal64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*feeHistoryResult, error) {
- oldest, reward, baseFee, gasUsed, err := s.b.FeeHistory(ctx, int(blockCount), lastBlock, rewardPercentiles)
+ oldest, reward, baseFee, gasUsed, err := s.b.FeeHistory(ctx, uint64(blockCount), lastBlock, rewardPercentiles)
if err != nil {
return nil, err
}
@@ -1473,7 +1473,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
// Apply the transaction with the access list tracer
tracer := logger.NewAccessListTracer(accessList, args.from(), to, precompiles)
- config := vm.Config{Tracer: tracer, Debug: true, NoBaseFee: true}
+ config := vm.Config{Tracer: tracer, NoBaseFee: true}
vmenv, _, err := b.GetEVM(ctx, msg, statedb, header, &config)
if err != nil {
return nil, 0, nil, err
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index 98887afc8..0249c8664 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -44,7 +44,7 @@ type Backend interface {
SyncProgress() ethereum.SyncProgress
SuggestGasTipCap(ctx context.Context) (*big.Int, error)
- FeeHistory(ctx context.Context, blockCount int, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error)
+ FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error)
ChainDb() ethdb.Database
AccountManager() *accounts.Manager
ExtRPCEnabled() bool
diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go
index 1b533861d..24c15b777 100644
--- a/internal/ethapi/transaction_args_test.go
+++ b/internal/ethapi/transaction_args_test.go
@@ -258,7 +258,7 @@ func (b *backendMock) ChainConfig() *params.ChainConfig { return b.config }
// Other methods needed to implement Backend interface.
func (b *backendMock) SyncProgress() ethereum.SyncProgress { return ethereum.SyncProgress{} }
-func (b *backendMock) FeeHistory(ctx context.Context, blockCount int, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) {
+func (b *backendMock) FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) {
return nil, nil, nil, nil, nil
}
func (b *backendMock) ChainDb() ethdb.Database { return nil }
diff --git a/les/api_backend.go b/les/api_backend.go
index 4b0369845..2d1fccd9a 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -272,7 +272,7 @@ func (b *LesApiBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error)
return b.gpo.SuggestTipCap(ctx)
}
-func (b *LesApiBackend) FeeHistory(ctx context.Context, blockCount int, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (firstBlock *big.Int, reward [][]*big.Int, baseFee []*big.Int, gasUsedRatio []float64, err error) {
+func (b *LesApiBackend) FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (firstBlock *big.Int, reward [][]*big.Int, baseFee []*big.Int, gasUsedRatio []float64, err error) {
return b.gpo.FeeHistory(ctx, blockCount, lastBlock, rewardPercentiles)
}
diff --git a/les/server_handler.go b/les/server_handler.go
index 2ea496ac2..39c7ace1c 100644
--- a/les/server_handler.go
+++ b/les/server_handler.go
@@ -364,7 +364,7 @@ func getAccount(triedb *trie.Database, root, hash common.Hash) (types.StateAccou
if err != nil {
return types.StateAccount{}, err
}
- blob, err := trie.TryGet(hash[:])
+ blob, err := trie.Get(hash[:])
if err != nil {
return types.StateAccount{}, err
}
diff --git a/light/postprocess.go b/light/postprocess.go
index e800a1f0f..763ba2752 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -206,8 +206,7 @@ func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) e
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], num)
data, _ := rlp.EncodeToBytes(ChtNode{hash, td})
- c.trie.Update(encNumber[:], data)
- return nil
+ return c.trie.Update(encNumber[:], data)
}
// Commit implements core.ChainIndexerBackend
@@ -450,10 +449,15 @@ func (b *BloomTrieIndexerBackend) Commit() error {
decompSize += uint64(len(decomp))
compSize += uint64(len(comp))
+
+ var terr error
if len(comp) > 0 {
- b.trie.Update(encKey[:], comp)
+ terr = b.trie.Update(encKey[:], comp)
} else {
- b.trie.Delete(encKey[:])
+ terr = b.trie.Delete(encKey[:])
+ }
+ if terr != nil {
+ return terr
}
}
root, nodes := b.trie.Commit(false)
diff --git a/light/trie.go b/light/trie.go
index 0ccab1588..38dd6b5c2 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -105,21 +105,21 @@ type odrTrie struct {
trie *trie.Trie
}
-func (t *odrTrie) TryGet(key []byte) ([]byte, error) {
+func (t *odrTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) {
key = crypto.Keccak256(key)
var res []byte
err := t.do(key, func() (err error) {
- res, err = t.trie.TryGet(key)
+ res, err = t.trie.Get(key)
return err
})
return res, err
}
-func (t *odrTrie) TryGetAccount(address common.Address) (*types.StateAccount, error) {
+func (t *odrTrie) GetAccount(address common.Address) (*types.StateAccount, error) {
var res types.StateAccount
key := crypto.Keccak256(address.Bytes())
err := t.do(key, func() (err error) {
- value, err := t.trie.TryGet(key)
+ value, err := t.trie.Get(key)
if err != nil {
return err
}
@@ -131,36 +131,36 @@ func (t *odrTrie) TryGetAccount(address common.Address) (*types.StateAccount, er
return &res, err
}
-func (t *odrTrie) TryUpdateAccount(address common.Address, acc *types.StateAccount) error {
+func (t *odrTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error {
key := crypto.Keccak256(address.Bytes())
value, err := rlp.EncodeToBytes(acc)
if err != nil {
return fmt.Errorf("decoding error in account update: %w", err)
}
return t.do(key, func() error {
- return t.trie.TryUpdate(key, value)
+ return t.trie.Update(key, value)
})
}
-func (t *odrTrie) TryUpdate(key, value []byte) error {
+func (t *odrTrie) UpdateStorage(_ common.Address, key, value []byte) error {
key = crypto.Keccak256(key)
return t.do(key, func() error {
- return t.trie.TryUpdate(key, value)
+ return t.trie.Update(key, value)
})
}
-func (t *odrTrie) TryDelete(key []byte) error {
+func (t *odrTrie) DeleteStorage(_ common.Address, key []byte) error {
key = crypto.Keccak256(key)
return t.do(key, func() error {
- return t.trie.TryDelete(key)
+ return t.trie.Delete(key)
})
}
// TryDeleteAccount abstracts an account deletion from the trie.
-func (t *odrTrie) TryDeleteAccount(address common.Address) error {
+func (t *odrTrie) DeleteAccount(address common.Address) error {
key := crypto.Keccak256(address.Bytes())
return t.do(key, func() error {
- return t.trie.TryDelete(key)
+ return t.trie.Delete(key)
})
}
diff --git a/log/format.go b/log/format.go
index d7e2f820a..b10786efa 100644
--- a/log/format.go
+++ b/log/format.go
@@ -12,6 +12,8 @@ import (
"sync/atomic"
"time"
"unicode/utf8"
+
+ "github.com/holiman/uint256"
)
const (
@@ -339,12 +341,20 @@ func formatLogfmtValue(value interface{}, term bool) string {
return v.Format(timeFormat)
case *big.Int:
- // Big ints get consumed by the Stringer clause so we need to handle
+ // Big ints get consumed by the Stringer clause, so we need to handle
// them earlier on.
if v == nil {
return ""
}
return formatLogfmtBigInt(v)
+
+ case *uint256.Int:
+ // Uint256s get consumed by the Stringer clause, so we need to handle
+ // them earlier on.
+ if v == nil {
+ return ""
+ }
+ return formatLogfmtUint256(v)
}
if term {
if s, ok := value.(TerminalStringer); ok {
@@ -469,6 +479,36 @@ func formatLogfmtBigInt(n *big.Int) string {
return string(buf[i+1:])
}
+// formatLogfmtUint256 formats n with thousand separators.
+func formatLogfmtUint256(n *uint256.Int) string {
+ if n.IsUint64() {
+ return FormatLogfmtUint64(n.Uint64())
+ }
+ var (
+ text = n.Dec()
+ buf = make([]byte, len(text)+len(text)/3)
+ comma = 0
+ i = len(buf) - 1
+ )
+ for j := len(text) - 1; j >= 0; j, i = j-1, i-1 {
+ c := text[j]
+
+ switch {
+ case c == '-':
+ buf[i] = c
+ case comma == 3:
+ buf[i] = ','
+ i--
+ comma = 0
+ fallthrough
+ default:
+ buf[i] = c
+ comma++
+ }
+ }
+ return string(buf[i+1:])
+}
+
// escapeString checks if the provided string needs escaping/quoting, and
// calls strconv.Quote if needed
func escapeString(s string) string {
diff --git a/log/format_test.go b/log/format_test.go
index cfcfe8580..e08c1d1a4 100644
--- a/log/format_test.go
+++ b/log/format_test.go
@@ -7,6 +7,8 @@ import (
"math/rand"
"strings"
"testing"
+
+ "github.com/holiman/uint256"
)
func TestPrettyInt64(t *testing.T) {
@@ -80,6 +82,24 @@ func TestPrettyBigInt(t *testing.T) {
}
}
+func TestPrettyUint256(t *testing.T) {
+ tests := []struct {
+ int string
+ s string
+ }{
+ {"111222333444555678999", "111,222,333,444,555,678,999"},
+ {"11122233344455567899900", "11,122,233,344,455,567,899,900"},
+ }
+
+ for _, tt := range tests {
+ v := new(uint256.Int)
+ v.SetFromDecimal(tt.int)
+ if have := formatLogfmtUint256(v); have != tt.s {
+ t.Errorf("invalid output %s, want %s", have, tt.s)
+ }
+ }
+}
+
var sink string
func BenchmarkPrettyInt64Logfmt(b *testing.B) {
diff --git a/metrics/counter.go b/metrics/counter.go
index 2f78c90d5..55e1c5954 100644
--- a/metrics/counter.go
+++ b/metrics/counter.go
@@ -38,13 +38,13 @@ func NewCounter() Counter {
if !Enabled {
return NilCounter{}
}
- return &StandardCounter{0}
+ return &StandardCounter{}
}
// NewCounterForced constructs a new StandardCounter and returns it no matter if
// the global switch is enabled or not.
func NewCounterForced() Counter {
- return &StandardCounter{0}
+ return &StandardCounter{}
}
// NewRegisteredCounter constructs and registers a new StandardCounter.
@@ -115,27 +115,27 @@ func (NilCounter) Snapshot() Counter { return NilCounter{} }
// StandardCounter is the standard implementation of a Counter and uses the
// sync/atomic package to manage a single int64 value.
type StandardCounter struct {
- count int64
+ count atomic.Int64
}
// Clear sets the counter to zero.
func (c *StandardCounter) Clear() {
- atomic.StoreInt64(&c.count, 0)
+ c.count.Store(0)
}
// Count returns the current count.
func (c *StandardCounter) Count() int64 {
- return atomic.LoadInt64(&c.count)
+ return c.count.Load()
}
// Dec decrements the counter by the given amount.
func (c *StandardCounter) Dec(i int64) {
- atomic.AddInt64(&c.count, -i)
+ c.count.Add(-i)
}
// Inc increments the counter by the given amount.
func (c *StandardCounter) Inc(i int64) {
- atomic.AddInt64(&c.count, i)
+ c.count.Add(i)
}
// Snapshot returns a read-only copy of the counter.
diff --git a/metrics/counter_float64.go b/metrics/counter_float64.go
new file mode 100644
index 000000000..d1197bb8e
--- /dev/null
+++ b/metrics/counter_float64.go
@@ -0,0 +1,155 @@
+package metrics
+
+import (
+ "math"
+ "sync/atomic"
+)
+
+// CounterFloat64 holds a float64 value that can be incremented and decremented.
+type CounterFloat64 interface {
+ Clear()
+ Count() float64
+ Dec(float64)
+ Inc(float64)
+ Snapshot() CounterFloat64
+}
+
+// GetOrRegisterCounterFloat64 returns an existing CounterFloat64 or constructs and registers
+// a new StandardCounterFloat64.
+func GetOrRegisterCounterFloat64(name string, r Registry) CounterFloat64 {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewCounterFloat64).(CounterFloat64)
+}
+
+// GetOrRegisterCounterFloat64Forced returns an existing CounterFloat64 or constructs and registers a
+// new CounterFloat64 no matter the global switch is enabled or not.
+// Be sure to unregister the counter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterCounterFloat64Forced(name string, r Registry) CounterFloat64 {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewCounterFloat64Forced).(CounterFloat64)
+}
+
+// NewCounterFloat64 constructs a new StandardCounterFloat64.
+func NewCounterFloat64() CounterFloat64 {
+ if !Enabled {
+ return NilCounterFloat64{}
+ }
+ return &StandardCounterFloat64{}
+}
+
+// NewCounterFloat64Forced constructs a new StandardCounterFloat64 and returns it no matter if
+// the global switch is enabled or not.
+func NewCounterFloat64Forced() CounterFloat64 {
+ return &StandardCounterFloat64{}
+}
+
+// NewRegisteredCounterFloat64 constructs and registers a new StandardCounterFloat64.
+func NewRegisteredCounterFloat64(name string, r Registry) CounterFloat64 {
+ c := NewCounterFloat64()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewRegisteredCounterFloat64Forced constructs and registers a new StandardCounterFloat64
+// and launches a goroutine no matter the global switch is enabled or not.
+// Be sure to unregister the counter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredCounterFloat64Forced(name string, r Registry) CounterFloat64 {
+ c := NewCounterFloat64Forced()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// CounterFloat64Snapshot is a read-only copy of another CounterFloat64.
+type CounterFloat64Snapshot float64
+
+// Clear panics.
+func (CounterFloat64Snapshot) Clear() {
+ panic("Clear called on a CounterFloat64Snapshot")
+}
+
+// Count returns the value at the time the snapshot was taken.
+func (c CounterFloat64Snapshot) Count() float64 { return float64(c) }
+
+// Dec panics.
+func (CounterFloat64Snapshot) Dec(float64) {
+ panic("Dec called on a CounterFloat64Snapshot")
+}
+
+// Inc panics.
+func (CounterFloat64Snapshot) Inc(float64) {
+ panic("Inc called on a CounterFloat64Snapshot")
+}
+
+// Snapshot returns the snapshot.
+func (c CounterFloat64Snapshot) Snapshot() CounterFloat64 { return c }
+
+// NilCounterFloat64 is a no-op CounterFloat64.
+type NilCounterFloat64 struct{}
+
+// Clear is a no-op.
+func (NilCounterFloat64) Clear() {}
+
+// Count is a no-op.
+func (NilCounterFloat64) Count() float64 { return 0.0 }
+
+// Dec is a no-op.
+func (NilCounterFloat64) Dec(i float64) {}
+
+// Inc is a no-op.
+func (NilCounterFloat64) Inc(i float64) {}
+
+// Snapshot is a no-op.
+func (NilCounterFloat64) Snapshot() CounterFloat64 { return NilCounterFloat64{} }
+
+// StandardCounterFloat64 is the standard implementation of a CounterFloat64 and uses the
+// atomic to manage a single float64 value.
+type StandardCounterFloat64 struct {
+ floatBits atomic.Uint64
+}
+
+// Clear sets the counter to zero.
+func (c *StandardCounterFloat64) Clear() {
+ c.floatBits.Store(0)
+}
+
+// Count returns the current value.
+func (c *StandardCounterFloat64) Count() float64 {
+ return math.Float64frombits(c.floatBits.Load())
+}
+
+// Dec decrements the counter by the given amount.
+func (c *StandardCounterFloat64) Dec(v float64) {
+ atomicAddFloat(&c.floatBits, -v)
+}
+
+// Inc increments the counter by the given amount.
+func (c *StandardCounterFloat64) Inc(v float64) {
+ atomicAddFloat(&c.floatBits, v)
+}
+
+// Snapshot returns a read-only copy of the counter.
+func (c *StandardCounterFloat64) Snapshot() CounterFloat64 {
+ return CounterFloat64Snapshot(c.Count())
+}
+
+func atomicAddFloat(fbits *atomic.Uint64, v float64) {
+ for {
+ loadedBits := fbits.Load()
+ newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
+ if fbits.CompareAndSwap(loadedBits, newBits) {
+ break
+ }
+ }
+}
diff --git a/metrics/counter_float_64_test.go b/metrics/counter_float_64_test.go
new file mode 100644
index 000000000..f17aca330
--- /dev/null
+++ b/metrics/counter_float_64_test.go
@@ -0,0 +1,99 @@
+package metrics
+
+import (
+ "sync"
+ "testing"
+)
+
+func BenchmarkCounterFloat64(b *testing.B) {
+ c := NewCounterFloat64()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ c.Inc(1.0)
+ }
+}
+
+func BenchmarkCounterFloat64Parallel(b *testing.B) {
+ c := NewCounterFloat64()
+ b.ResetTimer()
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ for i := 0; i < b.N; i++ {
+ c.Inc(1.0)
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ if have, want := c.Count(), 10.0*float64(b.N); have != want {
+ b.Fatalf("have %f want %f", have, want)
+ }
+}
+
+func TestCounterFloat64Clear(t *testing.T) {
+ c := NewCounterFloat64()
+ c.Inc(1.0)
+ c.Clear()
+ if count := c.Count(); count != 0 {
+ t.Errorf("c.Count(): 0 != %v\n", count)
+ }
+}
+
+func TestCounterFloat64Dec1(t *testing.T) {
+ c := NewCounterFloat64()
+ c.Dec(1.0)
+ if count := c.Count(); count != -1.0 {
+ t.Errorf("c.Count(): -1.0 != %v\n", count)
+ }
+}
+
+func TestCounterFloat64Dec2(t *testing.T) {
+ c := NewCounterFloat64()
+ c.Dec(2.0)
+ if count := c.Count(); count != -2.0 {
+ t.Errorf("c.Count(): -2.0 != %v\n", count)
+ }
+}
+
+func TestCounterFloat64Inc1(t *testing.T) {
+ c := NewCounterFloat64()
+ c.Inc(1.0)
+ if count := c.Count(); count != 1.0 {
+ t.Errorf("c.Count(): 1.0 != %v\n", count)
+ }
+}
+
+func TestCounterFloat64Inc2(t *testing.T) {
+ c := NewCounterFloat64()
+ c.Inc(2.0)
+ if count := c.Count(); count != 2.0 {
+ t.Errorf("c.Count(): 2.0 != %v\n", count)
+ }
+}
+
+func TestCounterFloat64Snapshot(t *testing.T) {
+ c := NewCounterFloat64()
+ c.Inc(1.0)
+ snapshot := c.Snapshot()
+ c.Inc(1.0)
+ if count := snapshot.Count(); count != 1.0 {
+ t.Errorf("c.Count(): 1.0 != %v\n", count)
+ }
+}
+
+func TestCounterFloat64Zero(t *testing.T) {
+ c := NewCounterFloat64()
+ if count := c.Count(); count != 0 {
+ t.Errorf("c.Count(): 0 != %v\n", count)
+ }
+}
+
+func TestGetOrRegisterCounterFloat64(t *testing.T) {
+ r := NewRegistry()
+ NewRegisteredCounterFloat64("foo", r).Inc(47.0)
+ if c := GetOrRegisterCounterFloat64("foo", r); c.Count() != 47.0 {
+ t.Fatal(c)
+ }
+}
diff --git a/metrics/ewma.go b/metrics/ewma.go
index 039286493..ed95cba19 100644
--- a/metrics/ewma.go
+++ b/metrics/ewma.go
@@ -75,7 +75,7 @@ func (NilEWMA) Update(n int64) {}
// of uncounted events and processes them on each tick. It uses the
// sync/atomic package to manage uncounted events.
type StandardEWMA struct {
- uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+ uncounted atomic.Int64
alpha float64
rate float64
init bool
@@ -97,8 +97,8 @@ func (a *StandardEWMA) Snapshot() EWMA {
// Tick ticks the clock to update the moving average. It assumes it is called
// every five seconds.
func (a *StandardEWMA) Tick() {
- count := atomic.LoadInt64(&a.uncounted)
- atomic.AddInt64(&a.uncounted, -count)
+ count := a.uncounted.Load()
+ a.uncounted.Add(-count)
instantRate := float64(count) / float64(5*time.Second)
a.mutex.Lock()
defer a.mutex.Unlock()
@@ -112,5 +112,5 @@ func (a *StandardEWMA) Tick() {
// Update adds n uncounted events.
func (a *StandardEWMA) Update(n int64) {
- atomic.AddInt64(&a.uncounted, n)
+ a.uncounted.Add(n)
}
diff --git a/metrics/exp/exp.go b/metrics/exp/exp.go
index 3ebe8cc68..2b04eeab2 100644
--- a/metrics/exp/exp.go
+++ b/metrics/exp/exp.go
@@ -100,6 +100,11 @@ func (exp *exp) publishCounter(name string, metric metrics.Counter) {
v.Set(metric.Count())
}
+func (exp *exp) publishCounterFloat64(name string, metric metrics.CounterFloat64) {
+ v := exp.getFloat(name)
+ v.Set(metric.Count())
+}
+
func (exp *exp) publishGauge(name string, metric metrics.Gauge) {
v := exp.getInt(name)
v.Set(metric.Value())
@@ -167,6 +172,8 @@ func (exp *exp) syncToExpvar() {
switch i := i.(type) {
case metrics.Counter:
exp.publishCounter(name, i)
+ case metrics.CounterFloat64:
+ exp.publishCounterFloat64(name, i)
case metrics.Gauge:
exp.publishGauge(name, i)
case metrics.GaugeFloat64:
diff --git a/metrics/gauge.go b/metrics/gauge.go
index b6b2758b0..81137d7f7 100644
--- a/metrics/gauge.go
+++ b/metrics/gauge.go
@@ -25,7 +25,7 @@ func NewGauge() Gauge {
if !Enabled {
return NilGauge{}
}
- return &StandardGauge{0}
+ return &StandardGauge{}
}
// NewRegisteredGauge constructs and registers a new StandardGauge.
@@ -101,7 +101,7 @@ func (NilGauge) Value() int64 { return 0 }
// StandardGauge is the standard implementation of a Gauge and uses the
// sync/atomic package to manage a single int64 value.
type StandardGauge struct {
- value int64
+ value atomic.Int64
}
// Snapshot returns a read-only copy of the gauge.
@@ -111,22 +111,22 @@ func (g *StandardGauge) Snapshot() Gauge {
// Update updates the gauge's value.
func (g *StandardGauge) Update(v int64) {
- atomic.StoreInt64(&g.value, v)
+ g.value.Store(v)
}
// Value returns the gauge's current value.
func (g *StandardGauge) Value() int64 {
- return atomic.LoadInt64(&g.value)
+ return g.value.Load()
}
// Dec decrements the gauge's current value by the given amount.
func (g *StandardGauge) Dec(i int64) {
- atomic.AddInt64(&g.value, -i)
+ g.value.Add(-i)
}
// Inc increments the gauge's current value by the given amount.
func (g *StandardGauge) Inc(i int64) {
- atomic.AddInt64(&g.value, i)
+ g.value.Add(i)
}
// FunctionalGauge returns value from given function
diff --git a/metrics/gauge_float64.go b/metrics/gauge_float64.go
index 66819c957..237ff8036 100644
--- a/metrics/gauge_float64.go
+++ b/metrics/gauge_float64.go
@@ -1,6 +1,9 @@
package metrics
-import "sync"
+import (
+ "math"
+ "sync/atomic"
+)
// GaugeFloat64s hold a float64 value that can be set arbitrarily.
type GaugeFloat64 interface {
@@ -23,9 +26,7 @@ func NewGaugeFloat64() GaugeFloat64 {
if !Enabled {
return NilGaugeFloat64{}
}
- return &StandardGaugeFloat64{
- value: 0.0,
- }
+ return &StandardGaugeFloat64{}
}
// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
@@ -83,10 +84,9 @@ func (NilGaugeFloat64) Update(v float64) {}
func (NilGaugeFloat64) Value() float64 { return 0.0 }
// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
-// sync.Mutex to manage a single float64 value.
+// atomic to manage a single float64 value.
type StandardGaugeFloat64 struct {
- mutex sync.Mutex
- value float64
+ floatBits atomic.Uint64
}
// Snapshot returns a read-only copy of the gauge.
@@ -96,16 +96,12 @@ func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
// Update updates the gauge's value.
func (g *StandardGaugeFloat64) Update(v float64) {
- g.mutex.Lock()
- defer g.mutex.Unlock()
- g.value = v
+ g.floatBits.Store(math.Float64bits(v))
}
// Value returns the gauge's current value.
func (g *StandardGaugeFloat64) Value() float64 {
- g.mutex.Lock()
- defer g.mutex.Unlock()
- return g.value
+ return math.Float64frombits(g.floatBits.Load())
}
// FunctionalGaugeFloat64 returns value from given function
diff --git a/metrics/gauge_float64_test.go b/metrics/gauge_float64_test.go
index 7b854d232..647d09000 100644
--- a/metrics/gauge_float64_test.go
+++ b/metrics/gauge_float64_test.go
@@ -1,6 +1,9 @@
package metrics
-import "testing"
+import (
+ "sync"
+ "testing"
+)
func BenchmarkGaugeFloat64(b *testing.B) {
g := NewGaugeFloat64()
@@ -10,6 +13,24 @@ func BenchmarkGaugeFloat64(b *testing.B) {
}
}
+func BenchmarkGaugeFloat64Parallel(b *testing.B) {
+ c := NewGaugeFloat64()
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ for i := 0; i < b.N; i++ {
+ c.Update(float64(i))
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ if have, want := c.Value(), float64(b.N-1); have != want {
+ b.Fatalf("have %f want %f", have, want)
+ }
+}
+
func TestGaugeFloat64(t *testing.T) {
g := NewGaugeFloat64()
g.Update(47.0)
diff --git a/metrics/graphite.go b/metrics/graphite.go
index 142eec86b..29f72b0c4 100644
--- a/metrics/graphite.go
+++ b/metrics/graphite.go
@@ -67,6 +67,8 @@ func graphite(c *GraphiteConfig) error {
switch metric := i.(type) {
case Counter:
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
+ case CounterFloat64:
+ fmt.Fprintf(w, "%s.%s.count %f %d\n", c.Prefix, name, metric.Count(), now)
case Gauge:
fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
case GaugeFloat64:
diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go
index 748c692e1..5dfbbab3e 100644
--- a/metrics/influxdb/influxdb.go
+++ b/metrics/influxdb/influxdb.go
@@ -2,260 +2,112 @@ package influxdb
import (
"fmt"
- uurl "net/url"
- "time"
- "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
- "github.com/influxdata/influxdb/client"
)
-type reporter struct {
- reg metrics.Registry
- interval time.Duration
-
- url uurl.URL
- database string
- username string
- password string
- namespace string
- tags map[string]string
-
- client *client.Client
-
- cache map[string]int64
-}
-
-// InfluxDB starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval.
-func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, password, namespace string) {
- InfluxDBWithTags(r, d, url, database, username, password, namespace, nil)
-}
-
-// InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
-func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) {
- u, err := uurl.Parse(url)
- if err != nil {
- log.Warn("Unable to parse InfluxDB", "url", url, "err", err)
- return
- }
-
- rep := &reporter{
- reg: r,
- interval: d,
- url: *u,
- database: database,
- username: username,
- password: password,
- namespace: namespace,
- tags: tags,
- cache: make(map[string]int64),
- }
- if err := rep.makeClient(); err != nil {
- log.Warn("Unable to make InfluxDB client", "err", err)
- return
- }
-
- rep.run()
-}
-
-// InfluxDBWithTagsOnce runs once an InfluxDB reporter and post the given metrics.Registry with the specified tags
-func InfluxDBWithTagsOnce(r metrics.Registry, url, database, username, password, namespace string, tags map[string]string) error {
- u, err := uurl.Parse(url)
- if err != nil {
- return fmt.Errorf("unable to parse InfluxDB. url: %s, err: %v", url, err)
- }
-
- rep := &reporter{
- reg: r,
- url: *u,
- database: database,
- username: username,
- password: password,
- namespace: namespace,
- tags: tags,
- cache: make(map[string]int64),
- }
- if err := rep.makeClient(); err != nil {
- return fmt.Errorf("unable to make InfluxDB client. err: %v", err)
- }
-
- if err := rep.send(); err != nil {
- return fmt.Errorf("unable to send to InfluxDB. err: %v", err)
- }
-
- return nil
-}
-
-func (r *reporter) makeClient() (err error) {
- r.client, err = client.NewClient(client.Config{
- URL: r.url,
- Username: r.username,
- Password: r.password,
- Timeout: 10 * time.Second,
- })
-
- return
-}
-
-func (r *reporter) run() {
- intervalTicker := time.NewTicker(r.interval)
- pingTicker := time.NewTicker(time.Second * 5)
-
- defer intervalTicker.Stop()
- defer pingTicker.Stop()
-
- for {
- select {
- case <-intervalTicker.C:
- if err := r.send(); err != nil {
- log.Warn("Unable to send to InfluxDB", "err", err)
- }
- case <-pingTicker.C:
- _, _, err := r.client.Ping()
- if err != nil {
- log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err)
-
- if err = r.makeClient(); err != nil {
- log.Warn("Unable to make InfluxDB client", "err", err)
- }
- }
+func readMeter(namespace, name string, i interface{}) (string, map[string]interface{}) {
+ switch metric := i.(type) {
+ case metrics.Counter:
+ measurement := fmt.Sprintf("%s%s.count", namespace, name)
+ fields := map[string]interface{}{
+ "value": metric.Count(),
}
- }
-}
-
-func (r *reporter) send() error {
- var pts []client.Point
-
- r.reg.Each(func(name string, i interface{}) {
- now := time.Now()
- namespace := r.namespace
-
- switch metric := i.(type) {
- case metrics.Counter:
- count := metric.Count()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.count", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "value": count,
- },
- Time: now,
- })
- case metrics.Gauge:
- ms := metric.Snapshot()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "value": ms.Value(),
- },
- Time: now,
- })
- case metrics.GaugeFloat64:
- ms := metric.Snapshot()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "value": ms.Value(),
- },
- Time: now,
- })
- case metrics.Histogram:
- ms := metric.Snapshot()
- if ms.Count() > 0 {
- ps := ms.Percentiles([]float64{0.25, 0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
- fields := map[string]interface{}{
- "count": ms.Count(),
- "max": ms.Max(),
- "mean": ms.Mean(),
- "min": ms.Min(),
- "stddev": ms.StdDev(),
- "variance": ms.Variance(),
- "p25": ps[0],
- "p50": ps[1],
- "p75": ps[2],
- "p95": ps[3],
- "p99": ps[4],
- "p999": ps[5],
- "p9999": ps[6],
- }
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.histogram", namespace, name),
- Tags: r.tags,
- Fields: fields,
- Time: now,
- })
- }
- case metrics.Meter:
- ms := metric.Snapshot()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.meter", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "count": ms.Count(),
- "m1": ms.Rate1(),
- "m5": ms.Rate5(),
- "m15": ms.Rate15(),
- "mean": ms.RateMean(),
- },
- Time: now,
- })
- case metrics.Timer:
- ms := metric.Snapshot()
- ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.timer", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "count": ms.Count(),
- "max": ms.Max(),
- "mean": ms.Mean(),
- "min": ms.Min(),
- "stddev": ms.StdDev(),
- "variance": ms.Variance(),
- "p50": ps[0],
- "p75": ps[1],
- "p95": ps[2],
- "p99": ps[3],
- "p999": ps[4],
- "p9999": ps[5],
- "m1": ms.Rate1(),
- "m5": ms.Rate5(),
- "m15": ms.Rate15(),
- "meanrate": ms.RateMean(),
- },
- Time: now,
- })
- case metrics.ResettingTimer:
- t := metric.Snapshot()
-
- if len(t.Values()) > 0 {
- ps := t.Percentiles([]float64{50, 95, 99})
- val := t.Values()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.span", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "count": len(val),
- "max": val[len(val)-1],
- "mean": t.Mean(),
- "min": val[0],
- "p50": ps[0],
- "p95": ps[1],
- "p99": ps[2],
- },
- Time: now,
- })
- }
+ return measurement, fields
+ case metrics.CounterFloat64:
+ measurement := fmt.Sprintf("%s%s.count", namespace, name)
+ fields := map[string]interface{}{
+ "value": metric.Count(),
}
- })
+ return measurement, fields
+ case metrics.Gauge:
+ measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
+ fields := map[string]interface{}{
+ "value": metric.Snapshot().Value(),
+ }
+ return measurement, fields
+ case metrics.GaugeFloat64:
+ measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
+ fields := map[string]interface{}{
+ "value": metric.Snapshot().Value(),
+ }
+ return measurement, fields
+ case metrics.Histogram:
+ ms := metric.Snapshot()
+ if ms.Count() <= 0 {
+ break
+ }
+ ps := ms.Percentiles([]float64{0.25, 0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
+ measurement := fmt.Sprintf("%s%s.histogram", namespace, name)
+ fields := map[string]interface{}{
+ "count": ms.Count(),
+ "max": ms.Max(),
+ "mean": ms.Mean(),
+ "min": ms.Min(),
+ "stddev": ms.StdDev(),
+ "variance": ms.Variance(),
+ "p25": ps[0],
+ "p50": ps[1],
+ "p75": ps[2],
+ "p95": ps[3],
+ "p99": ps[4],
+ "p999": ps[5],
+ "p9999": ps[6],
+ }
+ return measurement, fields
+ case metrics.Meter:
+ ms := metric.Snapshot()
+ measurement := fmt.Sprintf("%s%s.meter", namespace, name)
+ fields := map[string]interface{}{
+ "count": ms.Count(),
+ "m1": ms.Rate1(),
+ "m5": ms.Rate5(),
+ "m15": ms.Rate15(),
+ "mean": ms.RateMean(),
+ }
+ return measurement, fields
+ case metrics.Timer:
+ ms := metric.Snapshot()
+ ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
- bps := client.BatchPoints{
- Points: pts,
- Database: r.database,
+ measurement := fmt.Sprintf("%s%s.timer", namespace, name)
+ fields := map[string]interface{}{
+ "count": ms.Count(),
+ "max": ms.Max(),
+ "mean": ms.Mean(),
+ "min": ms.Min(),
+ "stddev": ms.StdDev(),
+ "variance": ms.Variance(),
+ "p50": ps[0],
+ "p75": ps[1],
+ "p95": ps[2],
+ "p99": ps[3],
+ "p999": ps[4],
+ "p9999": ps[5],
+ "m1": ms.Rate1(),
+ "m5": ms.Rate5(),
+ "m15": ms.Rate15(),
+ "meanrate": ms.RateMean(),
+ }
+ return measurement, fields
+ case metrics.ResettingTimer:
+ t := metric.Snapshot()
+ if len(t.Values()) == 0 {
+ break
+ }
+ ps := t.Percentiles([]float64{50, 95, 99})
+ val := t.Values()
+ measurement := fmt.Sprintf("%s%s.span", namespace, name)
+ fields := map[string]interface{}{
+ "count": len(val),
+ "max": val[len(val)-1],
+ "mean": t.Mean(),
+ "min": val[0],
+ "p50": ps[0],
+ "p95": ps[1],
+ "p99": ps[2],
+ }
+ return measurement, fields
}
-
- _, err := r.client.Write(bps)
- return err
+ return "", nil
}
diff --git a/metrics/influxdb/influxdbv1.go b/metrics/influxdb/influxdbv1.go
new file mode 100644
index 000000000..f65d30ef9
--- /dev/null
+++ b/metrics/influxdb/influxdbv1.go
@@ -0,0 +1,145 @@
+package influxdb
+
+import (
+ "fmt"
+ uurl "net/url"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ client "github.com/influxdata/influxdb1-client/v2"
+)
+
+type reporter struct {
+ reg metrics.Registry
+ interval time.Duration
+
+ url uurl.URL
+ database string
+ username string
+ password string
+ namespace string
+ tags map[string]string
+
+ client client.Client
+
+ cache map[string]int64
+}
+
+// InfluxDB starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval.
+func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, password, namespace string) {
+ InfluxDBWithTags(r, d, url, database, username, password, namespace, nil)
+}
+
+// InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
+func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) {
+ u, err := uurl.Parse(url)
+ if err != nil {
+ log.Warn("Unable to parse InfluxDB", "url", url, "err", err)
+ return
+ }
+
+ rep := &reporter{
+ reg: r,
+ interval: d,
+ url: *u,
+ database: database,
+ username: username,
+ password: password,
+ namespace: namespace,
+ tags: tags,
+ cache: make(map[string]int64),
+ }
+ if err := rep.makeClient(); err != nil {
+ log.Warn("Unable to make InfluxDB client", "err", err)
+ return
+ }
+
+ rep.run()
+}
+
+// InfluxDBWithTagsOnce runs once an InfluxDB reporter and post the given metrics.Registry with the specified tags
+func InfluxDBWithTagsOnce(r metrics.Registry, url, database, username, password, namespace string, tags map[string]string) error {
+ u, err := uurl.Parse(url)
+ if err != nil {
+ return fmt.Errorf("unable to parse InfluxDB. url: %s, err: %v", url, err)
+ }
+
+ rep := &reporter{
+ reg: r,
+ url: *u,
+ database: database,
+ username: username,
+ password: password,
+ namespace: namespace,
+ tags: tags,
+ cache: make(map[string]int64),
+ }
+ if err := rep.makeClient(); err != nil {
+ return fmt.Errorf("unable to make InfluxDB client. err: %v", err)
+ }
+
+ if err := rep.send(); err != nil {
+ return fmt.Errorf("unable to send to InfluxDB. err: %v", err)
+ }
+
+ return nil
+}
+
+func (r *reporter) makeClient() (err error) {
+ r.client, err = client.NewHTTPClient(client.HTTPConfig{
+ Addr: r.url.String(),
+ Username: r.username,
+ Password: r.password,
+ Timeout: 10 * time.Second,
+ })
+
+ return
+}
+
+func (r *reporter) run() {
+ intervalTicker := time.NewTicker(r.interval)
+ pingTicker := time.NewTicker(time.Second * 5)
+
+ defer intervalTicker.Stop()
+ defer pingTicker.Stop()
+
+ for {
+ select {
+ case <-intervalTicker.C:
+ if err := r.send(); err != nil {
+ log.Warn("Unable to send to InfluxDB", "err", err)
+ }
+ case <-pingTicker.C:
+ _, _, err := r.client.Ping(0)
+ if err != nil {
+ log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err)
+
+ if err = r.makeClient(); err != nil {
+ log.Warn("Unable to make InfluxDB client", "err", err)
+ }
+ }
+ }
+ }
+}
+
+func (r *reporter) send() error {
+ bps, err := client.NewBatchPoints(
+ client.BatchPointsConfig{
+ Database: r.database,
+ })
+ if err != nil {
+ return err
+ }
+ r.reg.Each(func(name string, i interface{}) {
+ now := time.Now()
+ measurement, fields := readMeter(r.namespace, name, i)
+ if fields == nil {
+ return
+ }
+ if p, err := client.NewPoint(measurement, r.tags, fields, now); err == nil {
+ bps.AddPoint(p)
+ }
+ })
+ return r.client.Write(bps)
+}
diff --git a/metrics/influxdb/influxdbv2.go b/metrics/influxdb/influxdbv2.go
index bfb762196..7984898f3 100644
--- a/metrics/influxdb/influxdbv2.go
+++ b/metrics/influxdb/influxdbv2.go
@@ -2,7 +2,6 @@ package influxdb
import (
"context"
- "fmt"
"time"
"github.com/ethereum/go-ethereum/log"
@@ -24,8 +23,6 @@ type v2Reporter struct {
client influxdb2.Client
write api.WriteAPI
-
- cache map[string]int64
}
// InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
@@ -39,7 +36,6 @@ func InfluxDBV2WithTags(r metrics.Registry, d time.Duration, endpoint string, to
organization: organization,
namespace: namespace,
tags: tags,
- cache: make(map[string]int64),
}
rep.client = influxdb2.NewClient(rep.endpoint, rep.token)
@@ -81,136 +77,13 @@ func (r *v2Reporter) run() {
func (r *v2Reporter) send() {
r.reg.Each(func(name string, i interface{}) {
now := time.Now()
- namespace := r.namespace
-
- switch metric := i.(type) {
- case metrics.Counter:
- v := metric.Count()
- l := r.cache[name]
-
- measurement := fmt.Sprintf("%s%s.count", namespace, name)
- fields := map[string]interface{}{
- "value": v - l,
- }
-
- pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
- r.write.WritePoint(pt)
-
- r.cache[name] = v
-
- case metrics.Gauge:
- ms := metric.Snapshot()
-
- measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
- fields := map[string]interface{}{
- "value": ms.Value(),
- }
-
- pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
- r.write.WritePoint(pt)
-
- case metrics.GaugeFloat64:
- ms := metric.Snapshot()
-
- measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
- fields := map[string]interface{}{
- "value": ms.Value(),
- }
-
- pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
- r.write.WritePoint(pt)
-
- case metrics.Histogram:
- ms := metric.Snapshot()
-
- if ms.Count() > 0 {
- ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
- measurement := fmt.Sprintf("%s%s.histogram", namespace, name)
- fields := map[string]interface{}{
- "count": ms.Count(),
- "max": ms.Max(),
- "mean": ms.Mean(),
- "min": ms.Min(),
- "stddev": ms.StdDev(),
- "variance": ms.Variance(),
- "p50": ps[0],
- "p75": ps[1],
- "p95": ps[2],
- "p99": ps[3],
- "p999": ps[4],
- "p9999": ps[5],
- }
-
- pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
- r.write.WritePoint(pt)
- }
-
- case metrics.Meter:
- ms := metric.Snapshot()
-
- measurement := fmt.Sprintf("%s%s.meter", namespace, name)
- fields := map[string]interface{}{
- "count": ms.Count(),
- "m1": ms.Rate1(),
- "m5": ms.Rate5(),
- "m15": ms.Rate15(),
- "mean": ms.RateMean(),
- }
-
- pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
- r.write.WritePoint(pt)
-
- case metrics.Timer:
- ms := metric.Snapshot()
- ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
-
- measurement := fmt.Sprintf("%s%s.timer", namespace, name)
- fields := map[string]interface{}{
- "count": ms.Count(),
- "max": ms.Max(),
- "mean": ms.Mean(),
- "min": ms.Min(),
- "stddev": ms.StdDev(),
- "variance": ms.Variance(),
- "p50": ps[0],
- "p75": ps[1],
- "p95": ps[2],
- "p99": ps[3],
- "p999": ps[4],
- "p9999": ps[5],
- "m1": ms.Rate1(),
- "m5": ms.Rate5(),
- "m15": ms.Rate15(),
- "meanrate": ms.RateMean(),
- }
-
- pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
- r.write.WritePoint(pt)
-
- case metrics.ResettingTimer:
- t := metric.Snapshot()
-
- if len(t.Values()) > 0 {
- ps := t.Percentiles([]float64{50, 95, 99})
- val := t.Values()
-
- measurement := fmt.Sprintf("%s%s.span", namespace, name)
- fields := map[string]interface{}{
- "count": len(val),
- "max": val[len(val)-1],
- "mean": t.Mean(),
- "min": val[0],
- "p50": ps[0],
- "p95": ps[1],
- "p99": ps[2],
- }
-
- pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
- r.write.WritePoint(pt)
- }
+ measurement, fields := readMeter(r.namespace, name, i)
+ if fields == nil {
+ return
}
+ pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
+ r.write.WritePoint(pt)
})
-
// Force all unwritten data to be sent
r.write.Flush()
}
diff --git a/metrics/librato/client.go b/metrics/librato/client.go
index 729c2da9a..f1b9e1e91 100644
--- a/metrics/librato/client.go
+++ b/metrics/librato/client.go
@@ -87,9 +87,11 @@ func (c *LibratoClient) PostMetrics(batch Batch) (err error) {
req.Header.Set("Content-Type", "application/json")
req.SetBasicAuth(c.Email, c.Token)
- if resp, err = http.DefaultClient.Do(req); err != nil {
+ resp, err = http.DefaultClient.Do(req)
+ if err != nil {
return
}
+ defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
var body []byte
diff --git a/metrics/librato/librato.go b/metrics/librato/librato.go
index b16493413..3d45f4c7b 100644
--- a/metrics/librato/librato.go
+++ b/metrics/librato/librato.go
@@ -107,6 +107,17 @@ func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot B
}
snapshot.Counters = append(snapshot.Counters, measurement)
}
+ case metrics.CounterFloat64:
+ if m.Count() > 0 {
+ measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
+ measurement[Value] = m.Count()
+ measurement[Attributes] = map[string]interface{}{
+ DisplayUnitsLong: Operations,
+ DisplayUnitsShort: OperationsShort,
+ DisplayMin: "0",
+ }
+ snapshot.Counters = append(snapshot.Counters, measurement)
+ }
case metrics.Gauge:
measurement[Name] = name
measurement[Value] = float64(m.Value())
diff --git a/metrics/log.go b/metrics/log.go
index 0c8ea7c97..d1ce627a8 100644
--- a/metrics/log.go
+++ b/metrics/log.go
@@ -24,6 +24,9 @@ func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
case Counter:
l.Printf("counter %s\n", name)
l.Printf(" count: %9d\n", metric.Count())
+ case CounterFloat64:
+ l.Printf("counter %s\n", name)
+ l.Printf(" count: %f\n", metric.Count())
case Gauge:
l.Printf("gauge %s\n", name)
l.Printf(" value: %9d\n", metric.Value())
diff --git a/metrics/meter.go b/metrics/meter.go
index 60ae919d0..e8564d6a5 100644
--- a/metrics/meter.go
+++ b/metrics/meter.go
@@ -101,11 +101,7 @@ func NewRegisteredMeterForced(name string, r Registry) Meter {
// MeterSnapshot is a read-only copy of another Meter.
type MeterSnapshot struct {
- // WARNING: The `temp` field is accessed atomically.
- // On 32 bit platforms, only 64-bit aligned fields can be atomic. The struct is
- // guaranteed to be so aligned, so take advantage of that. For more information,
- // see https://golang.org/pkg/sync/atomic/#pkg-note-BUG.
- temp int64
+ temp atomic.Int64
count int64
rate1, rate5, rate15, rateMean float64
}
@@ -173,7 +169,7 @@ type StandardMeter struct {
snapshot *MeterSnapshot
a1, a5, a15 EWMA
startTime time.Time
- stopped uint32
+ stopped atomic.Bool
}
func newStandardMeter() *StandardMeter {
@@ -188,8 +184,8 @@ func newStandardMeter() *StandardMeter {
// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
func (m *StandardMeter) Stop() {
- stopped := atomic.SwapUint32(&m.stopped, 1)
- if stopped != 1 {
+ stopped := m.stopped.Swap(true)
+ if !stopped {
arbiter.Lock()
delete(arbiter.meters, m)
arbiter.Unlock()
@@ -207,7 +203,7 @@ func (m *StandardMeter) Count() int64 {
// Mark records the occurrence of n events.
func (m *StandardMeter) Mark(n int64) {
- atomic.AddInt64(&m.snapshot.temp, n)
+ m.snapshot.temp.Add(n)
}
// Rate1 returns the one-minute moving average rate of events per second.
@@ -241,7 +237,14 @@ func (m *StandardMeter) RateMean() float64 {
// Snapshot returns a read-only copy of the meter.
func (m *StandardMeter) Snapshot() Meter {
m.lock.RLock()
- snapshot := *m.snapshot
+ snapshot := MeterSnapshot{
+ count: m.snapshot.count,
+ rate1: m.snapshot.rate1,
+ rate5: m.snapshot.rate5,
+ rate15: m.snapshot.rate15,
+ rateMean: m.snapshot.rateMean,
+ }
+ snapshot.temp.Store(m.snapshot.temp.Load())
m.lock.RUnlock()
return &snapshot
}
@@ -257,7 +260,7 @@ func (m *StandardMeter) updateSnapshot() {
func (m *StandardMeter) updateMeter() {
// should only run with write lock held on m.lock
- n := atomic.SwapInt64(&m.snapshot.temp, 0)
+ n := m.snapshot.temp.Swap(0)
m.snapshot.count += n
m.a1.Update(n)
m.a5.Update(n)
diff --git a/metrics/metrics.go b/metrics/metrics.go
index ff7196b56..c206f1692 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -144,6 +144,9 @@ func CollectProcessMetrics(refresh time.Duration) {
cpuSysLoad = GetOrRegisterGauge("system/cpu/sysload", DefaultRegistry)
cpuSysWait = GetOrRegisterGauge("system/cpu/syswait", DefaultRegistry)
cpuProcLoad = GetOrRegisterGauge("system/cpu/procload", DefaultRegistry)
+ cpuSysLoadTotal = GetOrRegisterCounterFloat64("system/cpu/sysload/total", DefaultRegistry)
+ cpuSysWaitTotal = GetOrRegisterCounterFloat64("system/cpu/syswait/total", DefaultRegistry)
+ cpuProcLoadTotal = GetOrRegisterCounterFloat64("system/cpu/procload/total", DefaultRegistry)
cpuThreads = GetOrRegisterGauge("system/cpu/threads", DefaultRegistry)
cpuGoroutines = GetOrRegisterGauge("system/cpu/goroutines", DefaultRegistry)
cpuSchedLatency = getOrRegisterRuntimeHistogram("system/cpu/schedlatency", secondsToNs, nil)
@@ -172,13 +175,17 @@ func CollectProcessMetrics(refresh time.Duration) {
secondsSinceLastCollect := collectTime.Sub(lastCollectTime).Seconds()
lastCollectTime = collectTime
if secondsSinceLastCollect > 0 {
- sysLoad := (cpustats[now].GlobalTime - cpustats[prev].GlobalTime) / secondsSinceLastCollect
- sysWait := (cpustats[now].GlobalWait - cpustats[prev].GlobalWait) / secondsSinceLastCollect
- procLoad := (cpustats[now].LocalTime - cpustats[prev].LocalTime) / secondsSinceLastCollect
+ sysLoad := cpustats[now].GlobalTime - cpustats[prev].GlobalTime
+ sysWait := cpustats[now].GlobalWait - cpustats[prev].GlobalWait
+ procLoad := cpustats[now].LocalTime - cpustats[prev].LocalTime
// Convert to integer percentage.
- cpuSysLoad.Update(int64(sysLoad * 100))
- cpuSysWait.Update(int64(sysWait * 100))
- cpuProcLoad.Update(int64(procLoad * 100))
+ cpuSysLoad.Update(int64(sysLoad / secondsSinceLastCollect * 100))
+ cpuSysWait.Update(int64(sysWait / secondsSinceLastCollect * 100))
+ cpuProcLoad.Update(int64(procLoad / secondsSinceLastCollect * 100))
+ // increment counters (ms)
+ cpuSysLoadTotal.Inc(sysLoad)
+ cpuSysWaitTotal.Inc(sysWait)
+ cpuProcLoadTotal.Inc(procLoad)
}
// Threads
diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go
index e3fde1ea6..534c44139 100644
--- a/metrics/metrics_test.go
+++ b/metrics/metrics_test.go
@@ -18,6 +18,7 @@ func TestReadRuntimeValues(t *testing.T) {
func BenchmarkMetrics(b *testing.B) {
r := NewRegistry()
c := NewRegisteredCounter("counter", r)
+ cf := NewRegisteredCounterFloat64("counterfloat64", r)
g := NewRegisteredGauge("gauge", r)
gf := NewRegisteredGaugeFloat64("gaugefloat64", r)
h := NewRegisteredHistogram("histogram", r, NewUniformSample(100))
@@ -71,6 +72,7 @@ func BenchmarkMetrics(b *testing.B) {
//log.Println("go", i)
for i := 0; i < b.N; i++ {
c.Inc(1)
+ cf.Inc(1.0)
g.Update(int64(i))
gf.Update(float64(i))
h.Update(int64(i))
diff --git a/metrics/opentsdb.go b/metrics/opentsdb.go
index 3fde55454..c9fd2e75d 100644
--- a/metrics/opentsdb.go
+++ b/metrics/opentsdb.go
@@ -71,6 +71,8 @@ func openTSDB(c *OpenTSDBConfig) error {
switch metric := i.(type) {
case Counter:
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+ case CounterFloat64:
+ fmt.Fprintf(w, "put %s.%s.count %d %f host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
case Gauge:
fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
case GaugeFloat64:
diff --git a/metrics/prometheus/collector.go b/metrics/prometheus/collector.go
index e8d5e4f5d..2bd9bf22c 100644
--- a/metrics/prometheus/collector.go
+++ b/metrics/prometheus/collector.go
@@ -50,6 +50,10 @@ func (c *collector) addCounter(name string, m metrics.Counter) {
c.writeGaugeCounter(name, m.Count())
}
+func (c *collector) addCounterFloat64(name string, m metrics.CounterFloat64) {
+ c.writeGaugeCounter(name, m.Count())
+}
+
func (c *collector) addGauge(name string, m metrics.Gauge) {
c.writeGaugeCounter(name, m.Value())
}
diff --git a/metrics/prometheus/collector_test.go b/metrics/prometheus/collector_test.go
index 43f2f804d..ff87c8e76 100644
--- a/metrics/prometheus/collector_test.go
+++ b/metrics/prometheus/collector_test.go
@@ -20,6 +20,10 @@ func TestCollector(t *testing.T) {
counter.Inc(12345)
c.addCounter("test/counter", counter)
+ counterfloat64 := metrics.NewCounterFloat64()
+ counterfloat64.Inc(54321.98)
+ c.addCounterFloat64("test/counter_float64", counterfloat64)
+
gauge := metrics.NewGauge()
gauge.Update(23456)
c.addGauge("test/gauge", gauge)
@@ -61,6 +65,9 @@ func TestCollector(t *testing.T) {
const expectedOutput = `# TYPE test_counter gauge
test_counter 12345
+# TYPE test_counter_float64 gauge
+test_counter_float64 54321.98
+
# TYPE test_gauge gauge
test_gauge 23456
diff --git a/metrics/prometheus/prometheus.go b/metrics/prometheus/prometheus.go
index c8408d8ca..d966fa9a8 100644
--- a/metrics/prometheus/prometheus.go
+++ b/metrics/prometheus/prometheus.go
@@ -45,6 +45,8 @@ func Handler(reg metrics.Registry) http.Handler {
switch m := i.(type) {
case metrics.Counter:
c.addCounter(name, m.Snapshot())
+ case metrics.CounterFloat64:
+ c.addCounterFloat64(name, m.Snapshot())
case metrics.Gauge:
c.addGauge(name, m.Snapshot())
case metrics.GaugeFloat64:
diff --git a/metrics/registry.go b/metrics/registry.go
index c5435adf2..4c6224835 100644
--- a/metrics/registry.go
+++ b/metrics/registry.go
@@ -120,6 +120,8 @@ func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
switch metric := i.(type) {
case Counter:
values["count"] = metric.Count()
+ case CounterFloat64:
+ values["count"] = metric.Count()
case Gauge:
values["value"] = metric.Value()
case GaugeFloat64:
@@ -196,7 +198,7 @@ func (r *StandardRegistry) register(name string, i interface{}) error {
return DuplicateMetric(name)
}
switch i.(type) {
- case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer, ResettingTimer:
+ case Counter, CounterFloat64, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer, ResettingTimer:
r.metrics[name] = i
}
return nil
diff --git a/metrics/syslog.go b/metrics/syslog.go
index 551a2bd0f..f23b07e19 100644
--- a/metrics/syslog.go
+++ b/metrics/syslog.go
@@ -17,6 +17,8 @@ func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
switch metric := i.(type) {
case Counter:
w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
+ case CounterFloat64:
+ w.Info(fmt.Sprintf("counter %s: count: %f", name, metric.Count()))
case Gauge:
w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
case GaugeFloat64:
diff --git a/metrics/writer.go b/metrics/writer.go
index 88521a80d..256fbd14c 100644
--- a/metrics/writer.go
+++ b/metrics/writer.go
@@ -29,6 +29,9 @@ func WriteOnce(r Registry, w io.Writer) {
case Counter:
fmt.Fprintf(w, "counter %s\n", namedMetric.name)
fmt.Fprintf(w, " count: %9d\n", metric.Count())
+ case CounterFloat64:
+ fmt.Fprintf(w, "counter %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %f\n", metric.Count())
case Gauge:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
fmt.Fprintf(w, " value: %9d\n", metric.Value())
diff --git a/miner/stress/1559/main.go b/miner/stress/1559/main.go
index c27875000..2e8b78d85 100644
--- a/miner/stress/1559/main.go
+++ b/miner/stress/1559/main.go
@@ -205,7 +205,6 @@ func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
genesis.GasLimit = 8_000_000
genesis.Config.ChainID = big.NewInt(18)
- genesis.Config.EIP150Hash = common.Hash{}
genesis.Alloc = core.GenesisAlloc{}
for _, faucet := range faucets {
diff --git a/miner/stress/beacon/main.go b/miner/stress/beacon/main.go
index 516862c9c..65318f1a0 100644
--- a/miner/stress/beacon/main.go
+++ b/miner/stress/beacon/main.go
@@ -127,7 +127,11 @@ func newNode(typ nodetype, genesis *core.Genesis, enodes []*enode.Node) *ethNode
// Inject the signer key and start sealing with it
stack.AccountManager().AddBackend(keystore.NewPlaintextKeyStore("beacon-stress"))
- store := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
+ ks := stack.AccountManager().Backends(keystore.KeyStoreType)
+ if len(ks) == 0 {
+ panic("Keystore is not available")
+ }
+ store := ks[0].(*keystore.KeyStore)
if _, err := store.NewAccount(""); err != nil {
panic(err)
}
diff --git a/miner/stress/clique/main.go b/miner/stress/clique/main.go
index 688c2b698..74962c6d5 100644
--- a/miner/stress/clique/main.go
+++ b/miner/stress/clique/main.go
@@ -153,7 +153,6 @@ func makeGenesis(faucets []*ecdsa.PrivateKey, sealers []*ecdsa.PrivateKey) *core
genesis.Config.ChainID = big.NewInt(18)
genesis.Config.Clique.Period = 1
- genesis.Config.EIP150Hash = common.Hash{}
genesis.Alloc = core.GenesisAlloc{}
for _, faucet := range faucets {
diff --git a/miner/stress/ethash/main.go b/miner/stress/ethash/main.go
index 6b6e7059d..6905bf01f 100644
--- a/miner/stress/ethash/main.go
+++ b/miner/stress/ethash/main.go
@@ -139,7 +139,6 @@ func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
genesis.GasLimit = 25000000
genesis.Config.ChainID = big.NewInt(18)
- genesis.Config.EIP150Hash = common.Hash{}
genesis.Alloc = core.GenesisAlloc{}
for _, faucet := range faucets {
diff --git a/miner/worker.go b/miner/worker.go
index 67a5842d2..c481239d4 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -164,7 +164,7 @@ const (
// newWorkReq represents a request for new sealing work submitting with relative interrupt notifier.
type newWorkReq struct {
- interrupt *int32
+ interrupt *atomic.Int32
noempty bool
timestamp int64
}
@@ -239,15 +239,15 @@ type worker struct {
snapshotState *state.StateDB
// atomic status counters
- running int32 // The indicator whether the consensus engine is running or not.
- newTxs int32 // New arrival transaction count since last sealing work submitting.
+ running atomic.Bool // The indicator whether the consensus engine is running or not.
+ newTxs atomic.Int32 // New arrival transaction count since last sealing work submitting.
// noempty is the flag used to control whether the feature of pre-seal empty
// block is enabled. The default value is false(pre-seal is enabled by default).
// But in some special scenario the consensus engine will seal blocks instantaneously,
// in this case this feature will add all empty blocks into canonical chain
// non-stop and no real transaction will be included.
- noempty uint32
+ noempty atomic.Bool
// newpayloadTimeout is the maximum timeout allowance for creating payload.
// The default value is 2 seconds but node operator can set it to arbitrary
@@ -372,12 +372,12 @@ func (w *worker) setRecommitInterval(interval time.Duration) {
// disablePreseal disables pre-sealing feature
func (w *worker) disablePreseal() {
- atomic.StoreUint32(&w.noempty, 1)
+ w.noempty.Store(true)
}
// enablePreseal enables pre-sealing feature
func (w *worker) enablePreseal() {
- atomic.StoreUint32(&w.noempty, 0)
+ w.noempty.Store(false)
}
// pending returns the pending state and corresponding block.
@@ -409,24 +409,24 @@ func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) {
// start sets the running status as 1 and triggers new work submitting.
func (w *worker) start() {
- atomic.StoreInt32(&w.running, 1)
+ w.running.Store(true)
w.startCh <- struct{}{}
}
// stop sets the running status as 0.
func (w *worker) stop() {
- atomic.StoreInt32(&w.running, 0)
+ w.running.Store(false)
}
// isRunning returns an indicator whether worker is running or not.
func (w *worker) isRunning() bool {
- return atomic.LoadInt32(&w.running) == 1
+ return w.running.Load()
}
// close terminates all background threads maintained by the worker.
// Note the worker does not support being closed multiple times.
func (w *worker) close() {
- atomic.StoreInt32(&w.running, 0)
+ w.running.Store(false)
close(w.exitCh)
w.wg.Wait()
}
@@ -457,7 +457,7 @@ func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) t
func (w *worker) newWorkLoop(recommit time.Duration) {
defer w.wg.Done()
var (
- interrupt *int32
+ interrupt *atomic.Int32
minRecommit = recommit // minimal resubmit interval specified by user.
timestamp int64 // timestamp for each round of sealing.
)
@@ -469,16 +469,16 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
// commit aborts in-flight transaction execution with given signal and resubmits a new one.
commit := func(noempty bool, s int32) {
if interrupt != nil {
- atomic.StoreInt32(interrupt, s)
+ interrupt.Store(s)
}
- interrupt = new(int32)
+ interrupt = new(atomic.Int32)
select {
case w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp}:
case <-w.exitCh:
return
}
timer.Reset(recommit)
- atomic.StoreInt32(&w.newTxs, 0)
+ w.newTxs.Store(0)
}
// clearPending cleans the stale pending tasks.
clearPending := func(number uint64) {
@@ -508,7 +508,7 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
// higher priced transactions. Disable this overhead for pending blocks.
if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) {
// Short circuit if no new transaction arrives.
- if atomic.LoadInt32(&w.newTxs) == 0 {
+ if w.newTxs.Load() == 0 {
timer.Reset(recommit)
continue
}
@@ -650,7 +650,7 @@ func (w *worker) mainLoop() {
w.commitWork(nil, true, time.Now().Unix())
}
}
- atomic.AddInt32(&w.newTxs, int32(len(ev.Txs)))
+ w.newTxs.Add(int32(len(ev.Txs)))
// System stopped
case <-w.exitCh:
@@ -877,7 +877,7 @@ func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*
return receipt.Logs, nil
}
-func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *int32) error {
+func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *atomic.Int32) error {
gasLimit := env.header.GasLimit
if env.gasPool == nil {
env.gasPool = new(core.GasPool).AddGas(gasLimit)
@@ -887,7 +887,7 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP
for {
// Check interruption signal and abort building if it's fired.
if interrupt != nil {
- if signal := atomic.LoadInt32(interrupt); signal != commitInterruptNone {
+ if signal := interrupt.Load(); signal != commitInterruptNone {
return signalToErr(signal)
}
}
@@ -918,37 +918,22 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP
logs, err := w.commitTransaction(env, tx)
switch {
- case errors.Is(err, core.ErrGasLimitReached):
- // Pop the current out-of-gas transaction without shifting in the next from the account
- log.Trace("Gas limit exceeded for current block", "sender", from)
- txs.Pop()
-
case errors.Is(err, core.ErrNonceTooLow):
// New head notification data race between the transaction pool and miner, shift
log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
txs.Shift()
- case errors.Is(err, core.ErrNonceTooHigh):
- // Reorg notification data race between the transaction pool and miner, skip account =
- log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce())
- txs.Pop()
-
case errors.Is(err, nil):
// Everything ok, collect the logs and shift in the next transaction from the same account
coalescedLogs = append(coalescedLogs, logs...)
env.tcount++
txs.Shift()
- case errors.Is(err, types.ErrTxTypeNotSupported):
- // Pop the unsupported transaction without shifting in the next from the account
- log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type())
- txs.Pop()
-
default:
- // Strange error, discard the transaction and get the next in line (note, the
- // nonce-too-high clause will prevent us from executing in vain).
+ // Transaction is regarded as invalid, drop all consecutive transactions from
+ // the same sender because of `nonce-too-high` clause.
log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
- txs.Shift()
+ txs.Pop()
}
}
if !w.isRunning() && len(coalescedLogs) > 0 {
@@ -1067,7 +1052,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
// fillTransactions retrieves the pending transactions from the txpool and fills them
// into the given sealing block. The transaction selection and ordering strategy can
// be customized with the plugin in the future.
-func (w *worker) fillTransactions(interrupt *int32, env *environment) error {
+func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) error {
// Split the pending transactions into locals and remotes
// Fill the block with all available pending transactions.
pending := w.eth.TxPool().Pending(true)
@@ -1102,9 +1087,9 @@ func (w *worker) generateWork(params *generateParams) (*types.Block, *big.Int, e
defer work.discard()
if !params.noTxs {
- interrupt := new(int32)
+ interrupt := new(atomic.Int32)
timer := time.AfterFunc(w.newpayloadTimeout, func() {
- atomic.StoreInt32(interrupt, commitInterruptTimeout)
+ interrupt.Store(commitInterruptTimeout)
})
defer timer.Stop()
@@ -1122,7 +1107,7 @@ func (w *worker) generateWork(params *generateParams) (*types.Block, *big.Int, e
// commitWork generates several new sealing tasks based on the parent block
// and submit them to the sealer.
-func (w *worker) commitWork(interrupt *int32, noempty bool, timestamp int64) {
+func (w *worker) commitWork(interrupt *atomic.Int32, noempty bool, timestamp int64) {
start := time.Now()
// Set the coinbase if the worker is running or it's required
@@ -1143,7 +1128,7 @@ func (w *worker) commitWork(interrupt *int32, noempty bool, timestamp int64) {
}
// Create an empty block based on temporary copied state for
// sealing in advance without waiting block execution finished.
- if !noempty && atomic.LoadUint32(&w.noempty) == 0 {
+ if !noempty && !w.noempty.Load() {
w.commit(work.copy(), nil, false, start)
}
// Fill pending transactions from the txpool into the block.
diff --git a/miner/worker_test.go b/miner/worker_test.go
index e60de6793..9db64a240 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -454,11 +454,11 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co
progress = make(chan struct{}, 10)
result = make([]float64, 0, 10)
index = 0
- start uint32
+ start atomic.Bool
)
w.resubmitHook = func(minInterval time.Duration, recommitInterval time.Duration) {
// Short circuit if interval checking hasn't started.
- if atomic.LoadUint32(&start) == 0 {
+ if !start.Load() {
return
}
var wantMinInterval, wantRecommitInterval time.Duration
@@ -493,7 +493,7 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co
w.start()
time.Sleep(time.Second) // Ensure two tasks have been submitted due to start opt
- atomic.StoreUint32(&start, 1)
+ start.Store(true)
w.setRecommitInterval(3 * time.Second)
select {
diff --git a/node/node_test.go b/node/node_test.go
index 560d487fa..04810a815 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -615,6 +615,7 @@ func doHTTPRequest(t *testing.T, req *http.Request) *http.Response {
if err != nil {
t.Fatalf("could not issue a GET request to the given endpoint: %v", err)
}
+ t.Cleanup(func() { resp.Body.Close() })
return resp
}
diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go
index 4d10e61e2..0790dddec 100644
--- a/node/rpcstack_test.go
+++ b/node/rpcstack_test.go
@@ -320,6 +320,7 @@ func baseRpcRequest(t *testing.T, url, bodyStr string, extraHeaders ...string) *
if err != nil {
t.Fatal(err)
}
+ t.Cleanup(func() { resp.Body.Close() })
return resp
}
diff --git a/p2p/server.go b/p2p/server.go
index 610b82d78..f7bf948b6 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -94,7 +94,6 @@ type Config struct {
DiscoveryV5 bool `toml:",omitempty"`
// Name sets the node name of this server.
- // Use common.MakeName to create a name that follows existing conventions.
Name string `toml:"-"`
// BootstrapNodes are used to establish connectivity
@@ -444,7 +443,7 @@ func (srv *Server) Start() (err error) {
return errors.New("server already running")
}
srv.running = true
- srv.log = srv.Config.Logger
+ srv.log = srv.Logger
if srv.log == nil {
srv.log = log.Root()
}
@@ -502,7 +501,7 @@ func (srv *Server) setupLocalNode() error {
sort.Sort(capsByNameAndVersion(srv.ourHandshake.Caps))
// Create the local node.
- db, err := enode.OpenDB(srv.Config.NodeDatabase)
+ db, err := enode.OpenDB(srv.NodeDatabase)
if err != nil {
return err
}
diff --git a/p2p/simulations/adapters/exec.go b/p2p/simulations/adapters/exec.go
index 7bfa8aab6..1d812514d 100644
--- a/p2p/simulations/adapters/exec.go
+++ b/p2p/simulations/adapters/exec.go
@@ -428,9 +428,11 @@ func execP2PNode() {
// Send status to the host.
statusJSON, _ := json.Marshal(status)
- if _, err := http.Post(statusURL, "application/json", bytes.NewReader(statusJSON)); err != nil {
+ resp, err := http.Post(statusURL, "application/json", bytes.NewReader(statusJSON))
+ if err != nil {
log.Crit("Can't post startup info", "url", statusURL, "err", err)
}
+ resp.Body.Close()
if stackErr != nil {
os.Exit(1)
}
diff --git a/p2p/simulations/mocker_test.go b/p2p/simulations/mocker_test.go
index 56d81942b..0112ee5cf 100644
--- a/p2p/simulations/mocker_test.go
+++ b/p2p/simulations/mocker_test.go
@@ -124,6 +124,7 @@ func TestMocker(t *testing.T) {
if err != nil {
t.Fatalf("Could not start mocker: %s", err)
}
+ resp.Body.Close()
if resp.StatusCode != 200 {
t.Fatalf("Invalid Status Code received for starting mocker, expected 200, got %d", resp.StatusCode)
}
@@ -145,15 +146,17 @@ func TestMocker(t *testing.T) {
if err != nil {
t.Fatalf("Could not stop mocker: %s", err)
}
+ resp.Body.Close()
if resp.StatusCode != 200 {
t.Fatalf("Invalid Status Code received for stopping mocker, expected 200, got %d", resp.StatusCode)
}
//reset the network
- _, err = http.Post(s.URL+"/reset", "", nil)
+ resp, err = http.Post(s.URL+"/reset", "", nil)
if err != nil {
t.Fatalf("Could not reset network: %s", err)
}
+ resp.Body.Close()
//now the number of nodes in the network should be zero
nodesInfo, err = client.GetNodes()
diff --git a/params/bootnodes.go b/params/bootnodes.go
index 45e27c441..4ae94cfbd 100644
--- a/params/bootnodes.go
+++ b/params/bootnodes.go
@@ -31,10 +31,12 @@ var MainnetBootnodes = []string{
// SepoliaBootnodes are the enode URLs of the P2P bootstrap nodes running on the
// Sepolia test network.
var SepoliaBootnodes = []string{
- // geth
- "enode://9246d00bc8fd1742e5ad2428b80fc4dc45d786283e05ef6edbd9002cbc335d40998444732fbe921cb88e1d2c73d1b1de53bae6a2237996e9bfe14f871baf7066@18.168.182.86:30303",
- // besu
- "enode://ec66ddcf1a974950bd4c782789a7e04f8aa7110a72569b6e65fcd51e937e74eed303b1ea734e4d19cfaec9fbff9b6ee65bf31dcb50ba79acce9dd63a6aca61c7@52.14.151.177:30303",
+ // EF DevOps
+ "enode://4e5e92199ee224a01932a377160aa432f31d0b351f84ab413a8e0a42f4f36476f8fb1cbe914af0d9aef0d51665c214cf653c651c4bbd9d5550a934f241f1682b@138.197.51.181:30303", // sepolia-bootnode-1-nyc3
+ "enode://143e11fb766781d22d92a2e33f8f104cddae4411a122295ed1fdb6638de96a6ce65f5b7c964ba3763bba27961738fef7d3ecc739268f3e5e771fb4c87b6234ba@146.190.1.103:30303", // sepolia-bootnode-1-sfo3
+ "enode://8b61dc2d06c3f96fddcbebb0efb29d60d3598650275dc469c22229d3e5620369b0d3dedafd929835fe7f489618f19f456fe7c0df572bf2d914a9f4e006f783a9@170.64.250.88:30303", // sepolia-bootnode-1-syd1
+ "enode://10d62eff032205fcef19497f35ca8477bea0eadfff6d769a147e895d8b2b8f8ae6341630c645c30f5df6e67547c03494ced3d9c5764e8622a26587b083b028e8@139.59.49.206:30303", // sepolia-bootnode-1-blr1
+ "enode://9e9492e2e8836114cc75f5b929784f4f46c324ad01daf87d956f98b3b6c5fcba95524d6e5cf9861dc96a2c8a171ea7105bb554a197455058de185fa870970c7c@138.68.123.152:30303", // sepolia-bootnode-1-ams3
}
// RinkebyBootnodes are the enode URLs of the P2P bootstrap nodes running on the
diff --git a/params/config.go b/params/config.go
index e04b16673..82d00bc8f 100644
--- a/params/config.go
+++ b/params/config.go
@@ -62,7 +62,6 @@ var (
DAOForkBlock: big.NewInt(1_920_000),
DAOForkSupport: true,
EIP150Block: big.NewInt(2_463_000),
- EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"),
EIP155Block: big.NewInt(2_675_000),
EIP158Block: big.NewInt(2_675_000),
ByzantiumBlock: big.NewInt(4_370_000),
@@ -139,7 +138,6 @@ var (
DAOForkBlock: nil,
DAOForkSupport: true,
EIP150Block: big.NewInt(2),
- EIP150Hash: common.HexToHash("0x9b095b36c15eaf13044373aef8ee0bd3a382a5abb92e402afa44b8249c3a90e9"),
EIP155Block: big.NewInt(3),
EIP158Block: big.NewInt(3),
ByzantiumBlock: big.NewInt(1_035_301),
@@ -231,7 +229,6 @@ var (
DAOForkBlock: nil,
DAOForkSupport: false,
EIP150Block: big.NewInt(0),
- EIP150Hash: common.Hash{},
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
@@ -261,7 +258,6 @@ var (
DAOForkBlock: nil,
DAOForkSupport: false,
EIP150Block: big.NewInt(0),
- EIP150Hash: common.Hash{},
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
@@ -291,7 +287,6 @@ var (
DAOForkBlock: nil,
DAOForkSupport: false,
EIP150Block: big.NewInt(0),
- EIP150Hash: common.Hash{},
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
@@ -321,7 +316,6 @@ var (
DAOForkBlock: nil,
DAOForkSupport: false,
EIP150Block: nil,
- EIP150Hash: common.Hash{},
EIP155Block: nil,
EIP158Block: nil,
ByzantiumBlock: nil,
@@ -415,9 +409,7 @@ type ChainConfig struct {
DAOForkSupport bool `json:"daoForkSupport,omitempty"` // Whether the nodes supports or opposes the DAO hard-fork
// EIP150 implements the Gas price changes (https://github.com/ethereum/EIPs/issues/150)
- EIP150Block *big.Int `json:"eip150Block,omitempty"` // EIP150 HF block (nil = no fork)
- EIP150Hash common.Hash `json:"eip150Hash,omitempty"` // EIP150 HF hash (needed for header only clients as only gas pricing changed)
-
+ EIP150Block *big.Int `json:"eip150Block,omitempty"` // EIP150 HF block (nil = no fork)
EIP155Block *big.Int `json:"eip155Block,omitempty"` // EIP155 HF block
EIP158Block *big.Int `json:"eip158Block,omitempty"` // EIP158 HF block
@@ -956,7 +948,7 @@ type Rules struct {
IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool
IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool
IsBerlin, IsLondon bool
- IsMerge, IsShanghai, isCancun, isPrague bool
+ IsMerge, IsShanghai, IsCancun, IsPrague bool
}
// Rules ensures c's ChainID is not nil.
@@ -979,7 +971,7 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules
IsLondon: c.IsLondon(num),
IsMerge: isMerge,
IsShanghai: c.IsShanghai(timestamp),
- isCancun: c.IsCancun(timestamp),
- isPrague: c.IsPrague(timestamp),
+ IsCancun: c.IsCancun(timestamp),
+ IsPrague: c.IsPrague(timestamp),
}
}
diff --git a/params/protocol_params.go b/params/protocol_params.go
index bb703d0b7..ab5ed13c0 100644
--- a/params/protocol_params.go
+++ b/params/protocol_params.go
@@ -159,6 +159,9 @@ const (
// up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529
RefundQuotient uint64 = 2
RefundQuotientEIP3529 uint64 = 5
+
+ BlobTxMinDataGasprice = 1 // Minimum gas price for data blobs
+ BlobTxDataGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for data gas price
)
// Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations
diff --git a/params/version.go b/params/version.go
index 2ac4a554b..734809304 100644
--- a/params/version.go
+++ b/params/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 11 // Minor version component of the current release
- VersionPatch = 5 // Patch version component of the current release
+ VersionPatch = 6 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
diff --git a/rlp/rlpgen/gen_test.go b/rlp/rlpgen/gen_test.go
index ff3b8385a..3b4f5df28 100644
--- a/rlp/rlpgen/gen_test.go
+++ b/rlp/rlpgen/gen_test.go
@@ -75,7 +75,7 @@ func TestOutput(t *testing.T) {
t.Fatal("error loading expected test output:", err)
}
if !bytes.Equal(output, wantOutput) {
- t.Fatal("output mismatch:\n", string(output))
+ t.Fatalf("output mismatch, want: %v got %v", string(wantOutput), string(output))
}
})
}
diff --git a/rpc/http_test.go b/rpc/http_test.go
index 528e1bcfc..584842a9a 100644
--- a/rpc/http_test.go
+++ b/rpc/http_test.go
@@ -94,6 +94,7 @@ func confirmHTTPRequestYieldsStatusCode(t *testing.T, method, contentType, body
if err != nil {
t.Fatalf("request failed: %v", err)
}
+ resp.Body.Close()
confirmStatusCode(t, resp.StatusCode, expectedStatusCode)
}
diff --git a/signer/core/testdata/expfail_extradata-2.json b/signer/core/testdata/expfail_extradata.json
similarity index 100%
rename from signer/core/testdata/expfail_extradata-2.json
rename to signer/core/testdata/expfail_extradata.json
diff --git a/signer/core/testdata/expfail_extradata-1.json b/signer/core/testdata/expfail_nonexistant_type2.json
similarity index 100%
rename from signer/core/testdata/expfail_extradata-1.json
rename to signer/core/testdata/expfail_nonexistant_type2.json
diff --git a/signer/core/uiapi.go b/signer/core/uiapi.go
index 59466d8fa..924203a13 100644
--- a/signer/core/uiapi.go
+++ b/signer/core/uiapi.go
@@ -111,7 +111,11 @@ func (s *UIServerAPI) DeriveAccount(url string, path string, pin *bool) (account
// fetchKeystore retrieves the encrypted keystore from the account manager.
func fetchKeystore(am *accounts.Manager) *keystore.KeyStore {
- return am.Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
+ ks := am.Backends(keystore.KeyStoreType)
+ if len(ks) == 0 {
+ return nil
+ }
+ return ks[0].(*keystore.KeyStore)
}
// ImportRawKey stores the given hex encoded ECDSA key into the key directory,
diff --git a/tests/difficulty_test.go b/tests/difficulty_test.go
index a8273303e..03e14df7c 100644
--- a/tests/difficulty_test.go
+++ b/tests/difficulty_test.go
@@ -20,7 +20,6 @@ import (
"math/big"
"testing"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
)
@@ -31,7 +30,6 @@ var (
DAOForkBlock: big.NewInt(1920000),
DAOForkSupport: true,
EIP150Block: big.NewInt(2463000),
- EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"),
EIP155Block: big.NewInt(2675000),
EIP158Block: big.NewInt(2675000),
ByzantiumBlock: big.NewInt(4370000),
@@ -43,7 +41,6 @@ var (
DAOForkBlock: nil,
DAOForkSupport: true,
EIP150Block: big.NewInt(0),
- EIP150Hash: common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d"),
EIP155Block: big.NewInt(10),
EIP158Block: big.NewInt(10),
ByzantiumBlock: big.NewInt(1_700_000),
diff --git a/tests/fuzzers/les/les-fuzzer.go b/tests/fuzzers/les/les-fuzzer.go
index 924a749e5..926de0458 100644
--- a/tests/fuzzers/les/les-fuzzer.go
+++ b/tests/fuzzers/les/les-fuzzer.go
@@ -93,13 +93,13 @@ func makeTries() (chtTrie *trie.Trie, bloomTrie *trie.Trie, chtKeys, bloomKeys [
// The element in CHT is ->
key := make([]byte, 8)
binary.BigEndian.PutUint64(key, uint64(i+1))
- chtTrie.Update(key, []byte{0x1, 0xf})
+ chtTrie.MustUpdate(key, []byte{0x1, 0xf})
chtKeys = append(chtKeys, key)
// The element in Bloom trie is <2 byte bit index> + -> bloom
key2 := make([]byte, 10)
binary.BigEndian.PutUint64(key2[2:], uint64(i+1))
- bloomTrie.Update(key2, []byte{0x2, 0xe})
+ bloomTrie.MustUpdate(key2, []byte{0x2, 0xe})
bloomKeys = append(bloomKeys, key2)
}
return
diff --git a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
index bca93bbe1..2881c7a7c 100644
--- a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
+++ b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
@@ -69,8 +69,8 @@ func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) {
for i := byte(0); i < byte(size); i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}, false}
- trie.Update(value.k, value.v)
- trie.Update(value2.k, value2.v)
+ trie.MustUpdate(value.k, value.v)
+ trie.MustUpdate(value2.k, value2.v)
vals[string(value.k)] = value
vals[string(value2.k)] = value2
}
@@ -82,7 +82,7 @@ func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) {
k := f.randBytes(32)
v := f.randBytes(20)
value := &kv{k, v, false}
- trie.Update(k, v)
+ trie.MustUpdate(k, v)
vals[string(k)] = value
if f.exhausted {
return nil, nil
diff --git a/tests/fuzzers/rlp/rlp_fuzzer.go b/tests/fuzzers/rlp/rlp_fuzzer.go
index ac02e1651..9fcdb5776 100644
--- a/tests/fuzzers/rlp/rlp_fuzzer.go
+++ b/tests/fuzzers/rlp/rlp_fuzzer.go
@@ -19,9 +19,11 @@ package rlp
import (
"bytes"
"fmt"
+ "math/big"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/uint256"
)
func decodeEncode(input []byte, val interface{}, i int) {
@@ -126,5 +128,16 @@ func Fuzz(input []byte) int {
var rs types.Receipts
decodeEncode(input, &rs, i)
}
+ {
+ i++
+ var v struct {
+ AnIntPtr *big.Int
+ AnInt big.Int
+ AnU256Ptr *uint256.Int
+ AnU256 uint256.Int
+ NotAnU256 [4]uint64
+ }
+ decodeEncode(input, &v, i)
+ }
return 1
}
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index 435aa3a47..809dba8ce 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -175,7 +175,7 @@ func (f *fuzzer) fuzz() int {
}
keys[string(k)] = struct{}{}
vals = append(vals, kv{k: k, v: v})
- trieA.Update(k, v)
+ trieA.MustUpdate(k, v)
useful = true
}
if !useful {
@@ -195,7 +195,7 @@ func (f *fuzzer) fuzz() int {
if f.debugging {
fmt.Printf("{\"%#x\" , \"%#x\"} // stacktrie.Update\n", kv.k, kv.v)
}
- trieB.Update(kv.k, kv.v)
+ trieB.MustUpdate(kv.k, kv.v)
}
rootB := trieB.Hash()
trieB.Commit()
@@ -223,7 +223,7 @@ func (f *fuzzer) fuzz() int {
checked int
)
for _, kv := range vals {
- trieC.Update(kv.k, kv.v)
+ trieC.MustUpdate(kv.k, kv.v)
}
rootC, _ := trieC.Commit()
if rootA != rootC {
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
index 40dec76b8..c0cbceff3 100644
--- a/tests/fuzzers/trie/trie-fuzzer.go
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -147,13 +147,13 @@ func runRandTest(rt randTest) error {
for i, step := range rt {
switch step.op {
case opUpdate:
- tr.Update(step.key, step.value)
+ tr.MustUpdate(step.key, step.value)
values[string(step.key)] = string(step.value)
case opDelete:
- tr.Delete(step.key)
+ tr.MustDelete(step.key)
delete(values, string(step.key))
case opGet:
- v := tr.Get(step.key)
+ v := tr.MustGet(step.key)
want := values[string(step.key)]
if string(v) != want {
rt[i].err = fmt.Errorf("mismatch for key %#x, got %#x want %#x", step.key, v, want)
@@ -176,7 +176,7 @@ func runRandTest(rt randTest) error {
checktr := trie.NewEmpty(triedb)
it := trie.NewIterator(tr.NodeIterator(nil))
for it.Next() {
- checktr.Update(it.Key, it.Value)
+ checktr.MustUpdate(it.Key, it.Value)
}
if tr.Hash() != checktr.Hash() {
return fmt.Errorf("hash mismatch in opItercheckhash")
diff --git a/tests/state_test.go b/tests/state_test.go
index 787427f01..9d3862e1d 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -115,8 +115,7 @@ func withTrace(t *testing.T, gasLimit uint64, test func(vm.Config) error) {
}
buf := new(bytes.Buffer)
w := bufio.NewWriter(buf)
- tracer := logger.NewJSONLogger(&logger.Config{}, w)
- config.Debug, config.Tracer = true, tracer
+ config.Tracer = logger.NewJSONLogger(&logger.Config{}, w)
err2 := test(config)
if !reflect.DeepEqual(err, err2) {
t.Errorf("different error for second run: %v", err2)
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 98acc468a..14b6fe534 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
"golang.org/x/crypto/sha3"
)
@@ -284,7 +285,7 @@ func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {
}
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) {
- sdb := state.NewDatabase(db)
+ sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
statedb, _ := state.New(common.Hash{}, sdb, nil)
for addr, a := range accounts {
statedb.SetCode(addr, a.Code)
diff --git a/trie/errors.go b/trie/errors.go
index afe344bed..bd82b950a 100644
--- a/trie/errors.go
+++ b/trie/errors.go
@@ -22,7 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
)
-// MissingNodeError is returned by the trie functions (TryGet, TryUpdate, TryDelete)
+// MissingNodeError is returned by the trie functions (Get, Update, Delete)
// in the case where a trie node is not present in the local database. It contains
// information necessary for retrieving the missing node.
type MissingNodeError struct {
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index ac2a3d2a4..6dc38db6f 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -58,7 +58,7 @@ func TestIterator(t *testing.T) {
all := make(map[string]string)
for _, val := range vals {
all[val.k] = val.v
- trie.Update([]byte(val.k), []byte(val.v))
+ trie.MustUpdate([]byte(val.k), []byte(val.v))
}
root, nodes := trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
@@ -89,8 +89,8 @@ func TestIteratorLargeData(t *testing.T) {
for i := byte(0); i < 255; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
value2 := &kv{common.LeftPadBytes([]byte{10, i}, 32), []byte{i}, false}
- trie.Update(value.k, value.v)
- trie.Update(value2.k, value2.v)
+ trie.MustUpdate(value.k, value.v)
+ trie.MustUpdate(value2.k, value2.v)
vals[string(value.k)] = value
vals[string(value2.k)] = value2
}
@@ -178,7 +178,7 @@ var testdata2 = []kvs{
func TestIteratorSeek(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for _, val := range testdata1 {
- trie.Update([]byte(val.k), []byte(val.v))
+ trie.MustUpdate([]byte(val.k), []byte(val.v))
}
// Seek to the middle.
@@ -220,7 +220,7 @@ func TestDifferenceIterator(t *testing.T) {
dba := NewDatabase(rawdb.NewMemoryDatabase())
triea := NewEmpty(dba)
for _, val := range testdata1 {
- triea.Update([]byte(val.k), []byte(val.v))
+ triea.MustUpdate([]byte(val.k), []byte(val.v))
}
rootA, nodesA := triea.Commit(false)
dba.Update(NewWithNodeSet(nodesA))
@@ -229,7 +229,7 @@ func TestDifferenceIterator(t *testing.T) {
dbb := NewDatabase(rawdb.NewMemoryDatabase())
trieb := NewEmpty(dbb)
for _, val := range testdata2 {
- trieb.Update([]byte(val.k), []byte(val.v))
+ trieb.MustUpdate([]byte(val.k), []byte(val.v))
}
rootB, nodesB := trieb.Commit(false)
dbb.Update(NewWithNodeSet(nodesB))
@@ -262,7 +262,7 @@ func TestUnionIterator(t *testing.T) {
dba := NewDatabase(rawdb.NewMemoryDatabase())
triea := NewEmpty(dba)
for _, val := range testdata1 {
- triea.Update([]byte(val.k), []byte(val.v))
+ triea.MustUpdate([]byte(val.k), []byte(val.v))
}
rootA, nodesA := triea.Commit(false)
dba.Update(NewWithNodeSet(nodesA))
@@ -271,7 +271,7 @@ func TestUnionIterator(t *testing.T) {
dbb := NewDatabase(rawdb.NewMemoryDatabase())
trieb := NewEmpty(dbb)
for _, val := range testdata2 {
- trieb.Update([]byte(val.k), []byte(val.v))
+ trieb.MustUpdate([]byte(val.k), []byte(val.v))
}
rootB, nodesB := trieb.Commit(false)
dbb.Update(NewWithNodeSet(nodesB))
@@ -314,7 +314,7 @@ func TestUnionIterator(t *testing.T) {
func TestIteratorNoDups(t *testing.T) {
tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for _, val := range testdata1 {
- tr.Update([]byte(val.k), []byte(val.v))
+ tr.MustUpdate([]byte(val.k), []byte(val.v))
}
checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
}
@@ -329,7 +329,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
tr := NewEmpty(triedb)
for _, val := range testdata1 {
- tr.Update([]byte(val.k), []byte(val.v))
+ tr.MustUpdate([]byte(val.k), []byte(val.v))
}
_, nodes := tr.Commit(false)
triedb.Update(NewWithNodeSet(nodes))
@@ -421,7 +421,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
ctr := NewEmpty(triedb)
for _, val := range testdata1 {
- ctr.Update([]byte(val.k), []byte(val.v))
+ ctr.MustUpdate([]byte(val.k), []byte(val.v))
}
root, nodes := ctr.Commit(false)
triedb.Update(NewWithNodeSet(nodes))
@@ -540,7 +540,7 @@ func makeLargeTestTrie() (*Database, *StateTrie, *loggingDb) {
binary.BigEndian.PutUint64(val, uint64(i))
key = crypto.Keccak256(key)
val = crypto.Keccak256(val)
- trie.Update(key, val)
+ trie.MustUpdate(key, val)
}
_, nodes := trie.Commit(false)
triedb.Update(NewWithNodeSet(nodes))
@@ -580,7 +580,7 @@ func TestIteratorNodeBlob(t *testing.T) {
all := make(map[string]string)
for _, val := range vals {
all[val.k] = val.v
- trie.Update([]byte(val.k), []byte(val.v))
+ trie.MustUpdate([]byte(val.k), []byte(val.v))
}
_, nodes := trie.Commit(false)
triedb.Update(NewWithNodeSet(nodes))
diff --git a/trie/proof.go b/trie/proof.go
index f11dfc47a..65df7577b 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -498,7 +498,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
if proof == nil {
tr := NewStackTrie(nil)
for index, key := range keys {
- tr.TryUpdate(key, values[index])
+ tr.Update(key, values[index])
}
if have, want := tr.Hash(), rootHash; have != want {
return false, fmt.Errorf("invalid proof, want hash %x, got %x", want, have)
@@ -568,7 +568,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
tr.root = nil
}
for index, key := range keys {
- tr.TryUpdate(key, values[index])
+ tr.Update(key, values[index])
}
if tr.Hash() != rootHash {
return false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash())
diff --git a/trie/proof_test.go b/trie/proof_test.go
index 6b23bcdb2..69e3f8e9c 100644
--- a/trie/proof_test.go
+++ b/trie/proof_test.go
@@ -403,7 +403,7 @@ func TestOneElementRangeProof(t *testing.T) {
// Test the mini trie with only a single element.
tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
entry := &kv{randBytes(32), randBytes(20), false}
- tinyTrie.Update(entry.k, entry.v)
+ tinyTrie.MustUpdate(entry.k, entry.v)
first = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
last = entry.k
@@ -477,7 +477,7 @@ func TestSingleSideRangeProof(t *testing.T) {
var entries entrySlice
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
- trie.Update(value.k, value.v)
+ trie.MustUpdate(value.k, value.v)
entries = append(entries, value)
}
sort.Sort(entries)
@@ -512,7 +512,7 @@ func TestReverseSingleSideRangeProof(t *testing.T) {
var entries entrySlice
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
- trie.Update(value.k, value.v)
+ trie.MustUpdate(value.k, value.v)
entries = append(entries, value)
}
sort.Sort(entries)
@@ -619,7 +619,7 @@ func TestGappedRangeProof(t *testing.T) {
var entries []*kv // Sorted entries
for i := byte(0); i < 10; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
- trie.Update(value.k, value.v)
+ trie.MustUpdate(value.k, value.v)
entries = append(entries, value)
}
first, last := 2, 8
@@ -693,7 +693,7 @@ func TestHasRightElement(t *testing.T) {
var entries entrySlice
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
- trie.Update(value.k, value.v)
+ trie.MustUpdate(value.k, value.v)
entries = append(entries, value)
}
sort.Sort(entries)
@@ -1047,14 +1047,14 @@ func randomTrie(n int) (*Trie, map[string]*kv) {
for i := byte(0); i < 100; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}, false}
- trie.Update(value.k, value.v)
- trie.Update(value2.k, value2.v)
+ trie.MustUpdate(value.k, value.v)
+ trie.MustUpdate(value2.k, value2.v)
vals[string(value.k)] = value
vals[string(value2.k)] = value2
}
for i := 0; i < n; i++ {
value := &kv{randBytes(32), randBytes(20), false}
- trie.Update(value.k, value.v)
+ trie.MustUpdate(value.k, value.v)
vals[string(value.k)] = value
}
return trie, vals
@@ -1071,7 +1071,7 @@ func nonRandomTrie(n int) (*Trie, map[string]*kv) {
binary.LittleEndian.PutUint64(value, i-max)
//value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
elem := &kv{key, value, false}
- trie.Update(elem.k, elem.v)
+ trie.MustUpdate(elem.k, elem.v)
vals[string(elem.k)] = elem
}
return trie, vals
@@ -1088,7 +1088,7 @@ func TestRangeProofKeysWithSharedPrefix(t *testing.T) {
}
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for i, key := range keys {
- trie.Update(key, vals[i])
+ trie.MustUpdate(key, vals[i])
}
root := trie.Hash()
proof := memorydb.New()
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 83b92cebd..5bfd24650 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -19,7 +19,6 @@ package trie
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -72,29 +71,28 @@ func NewStateTrie(id *ID, db *Database) (*StateTrie, error) {
return &StateTrie{trie: *trie, preimages: db.preimages}, nil
}
-// Get returns the value for key stored in the trie.
+// MustGet returns the value for key stored in the trie.
// The value bytes must not be modified by the caller.
-func (t *StateTrie) Get(key []byte) []byte {
- res, err := t.TryGet(key)
- if err != nil {
- log.Error("Unhandled trie error in StateTrie.Get", "err", err)
- }
- return res
+//
+// This function will omit any encountered error but just
+// print out an error message.
+func (t *StateTrie) MustGet(key []byte) []byte {
+ return t.trie.MustGet(t.hashKey(key))
}
-// TryGet returns the value for key stored in the trie.
-// The value bytes must not be modified by the caller.
-// If the specified node is not in the trie, nil will be returned.
+// GetStorage attempts to retrieve a storage slot with provided account address
+// and slot key. The value bytes must not be modified by the caller.
+// If the specified storage slot is not in the trie, nil will be returned.
// If a trie node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) TryGet(key []byte) ([]byte, error) {
- return t.trie.TryGet(t.hashKey(key))
+func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) {
+ return t.trie.Get(t.hashKey(key))
}
-// TryGetAccount attempts to retrieve an account with provided account address.
+// GetAccount attempts to retrieve an account with provided account address.
// If the specified account is not in the trie, nil will be returned.
// If a trie node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) TryGetAccount(address common.Address) (*types.StateAccount, error) {
- res, err := t.trie.TryGet(t.hashKey(address.Bytes()))
+func (t *StateTrie) GetAccount(address common.Address) (*types.StateAccount, error) {
+ res, err := t.trie.Get(t.hashKey(address.Bytes()))
if res == nil || err != nil {
return nil, err
}
@@ -103,11 +101,11 @@ func (t *StateTrie) TryGetAccount(address common.Address) (*types.StateAccount,
return ret, err
}
-// TryGetAccountByHash does the same thing as TryGetAccount, however
-// it expects an account hash that is the hash of address. This constitutes an
-// abstraction leak, since the client code needs to know the key format.
-func (t *StateTrie) TryGetAccountByHash(addrHash common.Hash) (*types.StateAccount, error) {
- res, err := t.trie.TryGet(addrHash.Bytes())
+// GetAccountByHash does the same thing as GetAccount, however it expects an
+// account hash that is the hash of address. This constitutes an abstraction
+// leak, since the client code needs to know the key format.
+func (t *StateTrie) GetAccountByHash(addrHash common.Hash) (*types.StateAccount, error) {
+ res, err := t.trie.Get(addrHash.Bytes())
if res == nil || err != nil {
return nil, err
}
@@ -116,27 +114,30 @@ func (t *StateTrie) TryGetAccountByHash(addrHash common.Hash) (*types.StateAccou
return ret, err
}
-// TryGetNode attempts to retrieve a trie node by compact-encoded path. It is not
+// GetNode attempts to retrieve a trie node by compact-encoded path. It is not
// possible to use keybyte-encoding as the path might contain odd nibbles.
// If the specified trie node is not in the trie, nil will be returned.
// If a trie node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) TryGetNode(path []byte) ([]byte, int, error) {
- return t.trie.TryGetNode(path)
+func (t *StateTrie) GetNode(path []byte) ([]byte, int, error) {
+ return t.trie.GetNode(path)
}
-// Update associates key with value in the trie. Subsequent calls to
+// MustUpdate associates key with value in the trie. Subsequent calls to
// Get will return value. If value has length zero, any existing value
// is deleted from the trie and calls to Get will return nil.
//
// The value bytes must not be modified by the caller while they are
// stored in the trie.
-func (t *StateTrie) Update(key, value []byte) {
- if err := t.TryUpdate(key, value); err != nil {
- log.Error("Unhandled trie error in StateTrie.Update", "err", err)
- }
+//
+// This function will omit any encountered error but just print out an
+// error message.
+func (t *StateTrie) MustUpdate(key, value []byte) {
+ hk := t.hashKey(key)
+ t.trie.MustUpdate(hk, value)
+ t.getSecKeyCache()[string(hk)] = common.CopyBytes(key)
}
-// TryUpdate associates key with value in the trie. Subsequent calls to
+// UpdateStorage associates key with value in the trie. Subsequent calls to
// Get will return value. If value has length zero, any existing value
// is deleted from the trie and calls to Get will return nil.
//
@@ -144,9 +145,9 @@ func (t *StateTrie) Update(key, value []byte) {
// stored in the trie.
//
// If a node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) TryUpdate(key, value []byte) error {
+func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error {
hk := t.hashKey(key)
- err := t.trie.TryUpdate(hk, value)
+ err := t.trie.Update(hk, value)
if err != nil {
return err
}
@@ -154,42 +155,42 @@ func (t *StateTrie) TryUpdate(key, value []byte) error {
return nil
}
-// TryUpdateAccount account will abstract the write of an account to the
-// secure trie.
-func (t *StateTrie) TryUpdateAccount(address common.Address, acc *types.StateAccount) error {
+// UpdateAccount will abstract the write of an account to the secure trie.
+func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error {
hk := t.hashKey(address.Bytes())
data, err := rlp.EncodeToBytes(acc)
if err != nil {
return err
}
- if err := t.trie.TryUpdate(hk, data); err != nil {
+ if err := t.trie.Update(hk, data); err != nil {
return err
}
t.getSecKeyCache()[string(hk)] = address.Bytes()
return nil
}
-// Delete removes any existing value for key from the trie.
-func (t *StateTrie) Delete(key []byte) {
- if err := t.TryDelete(key); err != nil {
- log.Error("Unhandled trie error in StateTrie.Delete", "err", err)
- }
-}
-
-// TryDelete removes any existing value for key from the trie.
-// If the specified trie node is not in the trie, nothing will be changed.
-// If a node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) TryDelete(key []byte) error {
+// MustDelete removes any existing value for key from the trie. This function
+// will omit any encountered error but just print out an error message.
+func (t *StateTrie) MustDelete(key []byte) {
hk := t.hashKey(key)
delete(t.getSecKeyCache(), string(hk))
- return t.trie.TryDelete(hk)
+ t.trie.MustDelete(hk)
}
-// TryDeleteAccount abstracts an account deletion from the trie.
-func (t *StateTrie) TryDeleteAccount(address common.Address) error {
+// DeleteStorage removes any existing storage slot from the trie.
+// If the specified trie node is not in the trie, nothing will be changed.
+// If a node is not found in the database, a MissingNodeError is returned.
+func (t *StateTrie) DeleteStorage(_ common.Address, key []byte) error {
+ hk := t.hashKey(key)
+ delete(t.getSecKeyCache(), string(hk))
+ return t.trie.Delete(hk)
+}
+
+// DeleteAccount abstracts an account deletion from the trie.
+func (t *StateTrie) DeleteAccount(address common.Address) error {
hk := t.hashKey(address.Bytes())
delete(t.getSecKeyCache(), string(hk))
- return t.trie.TryDelete(hk)
+ return t.trie.Delete(hk)
}
// GetKey returns the sha3 preimage of a hashed key that was
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index d3e6c6706..a55c10a60 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -45,17 +45,17 @@ func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) {
// Map the same data under multiple keys
key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
content[string(key)] = val
- trie.Update(key, val)
+ trie.MustUpdate(key, val)
key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
content[string(key)] = val
- trie.Update(key, val)
+ trie.MustUpdate(key, val)
// Add some other data to inflate the trie
for j := byte(3); j < 13; j++ {
key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
content[string(key)] = val
- trie.Update(key, val)
+ trie.MustUpdate(key, val)
}
}
root, nodes := trie.Commit(false)
@@ -81,9 +81,9 @@ func TestSecureDelete(t *testing.T) {
}
for _, val := range vals {
if val.v != "" {
- trie.Update([]byte(val.k), []byte(val.v))
+ trie.MustUpdate([]byte(val.k), []byte(val.v))
} else {
- trie.Delete([]byte(val.k))
+ trie.MustDelete([]byte(val.k))
}
}
hash := trie.Hash()
@@ -95,13 +95,13 @@ func TestSecureDelete(t *testing.T) {
func TestSecureGetKey(t *testing.T) {
trie := newEmptySecure()
- trie.Update([]byte("foo"), []byte("bar"))
+ trie.MustUpdate([]byte("foo"), []byte("bar"))
key := []byte("foo")
value := []byte("bar")
seckey := crypto.Keccak256(key)
- if !bytes.Equal(trie.Get(key), value) {
+ if !bytes.Equal(trie.MustGet(key), value) {
t.Errorf("Get did not return bar")
}
if k := trie.GetKey(seckey); !bytes.Equal(k, key) {
@@ -128,15 +128,15 @@ func TestStateTrieConcurrency(t *testing.T) {
for j := byte(0); j < 255; j++ {
// Map the same data under multiple keys
key, val := common.LeftPadBytes([]byte{byte(index), 1, j}, 32), []byte{j}
- tries[index].Update(key, val)
+ tries[index].MustUpdate(key, val)
key, val = common.LeftPadBytes([]byte{byte(index), 2, j}, 32), []byte{j}
- tries[index].Update(key, val)
+ tries[index].MustUpdate(key, val)
// Add some other data to inflate the trie
for k := byte(3); k < 13; k++ {
key, val = common.LeftPadBytes([]byte{byte(index), k, j}, 32), []byte{k, j}
- tries[index].Update(key, val)
+ tries[index].MustUpdate(key, val)
}
}
tries[index].Commit(false)
diff --git a/trie/stacktrie.go b/trie/stacktrie.go
index 83034e29a..030d2a596 100644
--- a/trie/stacktrie.go
+++ b/trie/stacktrie.go
@@ -202,8 +202,8 @@ const (
hashedNode
)
-// TryUpdate inserts a (key, value) pair into the stack trie
-func (st *StackTrie) TryUpdate(key, value []byte) error {
+// Update inserts a (key, value) pair into the stack trie.
+func (st *StackTrie) Update(key, value []byte) error {
k := keybytesToHex(key)
if len(value) == 0 {
panic("deletion not supported")
@@ -212,8 +212,10 @@ func (st *StackTrie) TryUpdate(key, value []byte) error {
return nil
}
-func (st *StackTrie) Update(key, value []byte) {
- if err := st.TryUpdate(key, value); err != nil {
+// MustUpdate is a wrapper of Update and will omit any encountered error but
+// just print out an error message.
+func (st *StackTrie) MustUpdate(key, value []byte) {
+ if err := st.Update(key, value); err != nil {
log.Error("Unhandled trie error in StackTrie.Update", "err", err)
}
}
diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go
index 3e6cc8cd5..ea3eef788 100644
--- a/trie/stacktrie_test.go
+++ b/trie/stacktrie_test.go
@@ -174,7 +174,7 @@ func TestStackTrieInsertAndHash(t *testing.T) {
st.Reset()
for j := 0; j < l; j++ {
kv := &test[j]
- if err := st.TryUpdate(common.FromHex(kv.K), []byte(kv.V)); err != nil {
+ if err := st.Update(common.FromHex(kv.K), []byte(kv.V)); err != nil {
t.Fatal(err)
}
}
@@ -193,8 +193,8 @@ func TestSizeBug(t *testing.T) {
leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
- nt.TryUpdate(leaf, value)
- st.TryUpdate(leaf, value)
+ nt.Update(leaf, value)
+ st.Update(leaf, value)
if nt.Hash() != st.Hash() {
t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
@@ -218,8 +218,8 @@ func TestEmptyBug(t *testing.T) {
}
for _, kv := range kvs {
- nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V))
- st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V))
+ nt.Update(common.FromHex(kv.K), common.FromHex(kv.V))
+ st.Update(common.FromHex(kv.K), common.FromHex(kv.V))
}
if nt.Hash() != st.Hash() {
@@ -241,8 +241,8 @@ func TestValLength56(t *testing.T) {
}
for _, kv := range kvs {
- nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V))
- st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V))
+ nt.Update(common.FromHex(kv.K), common.FromHex(kv.V))
+ st.Update(common.FromHex(kv.K), common.FromHex(kv.V))
}
if nt.Hash() != st.Hash() {
@@ -263,8 +263,8 @@ func TestUpdateSmallNodes(t *testing.T) {
{"65", "3000"}, // stacktrie.Update
}
for _, kv := range kvs {
- nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V))
- st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V))
+ nt.Update(common.FromHex(kv.K), common.FromHex(kv.V))
+ st.Update(common.FromHex(kv.K), common.FromHex(kv.V))
}
if nt.Hash() != st.Hash() {
t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
@@ -291,8 +291,8 @@ func TestUpdateVariableKeys(t *testing.T) {
{"0x3330353463653239356131303167617430", "313131"},
}
for _, kv := range kvs {
- nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V))
- st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V))
+ nt.Update(common.FromHex(kv.K), common.FromHex(kv.V))
+ st.Update(common.FromHex(kv.K), common.FromHex(kv.V))
}
if nt.Hash() != st.Hash() {
t.Fatalf("error %x != %x", st.Hash(), nt.Hash())
@@ -309,7 +309,7 @@ func TestStacktrieNotModifyValues(t *testing.T) {
value := make([]byte, 1, 100)
value[0] = 0x2
want := common.CopyBytes(value)
- st.TryUpdate([]byte{0x01}, value)
+ st.Update([]byte{0x01}, value)
st.Hash()
if have := value; !bytes.Equal(have, want) {
t.Fatalf("tiny trie: have %#x want %#x", have, want)
@@ -330,7 +330,7 @@ func TestStacktrieNotModifyValues(t *testing.T) {
for i := 0; i < 1000; i++ {
key := common.BigToHash(keyB)
value := getValue(i)
- st.TryUpdate(key.Bytes(), value)
+ st.Update(key.Bytes(), value)
vals = append(vals, value)
keyB = keyB.Add(keyB, keyDelta)
keyDelta.Add(keyDelta, common.Big1)
@@ -371,7 +371,7 @@ func TestStacktrieSerialization(t *testing.T) {
keyDelta.Add(keyDelta, common.Big1)
}
for i, k := range keys {
- nt.TryUpdate(k, common.CopyBytes(vals[i]))
+ nt.Update(k, common.CopyBytes(vals[i]))
}
for i, k := range keys {
@@ -384,7 +384,7 @@ func TestStacktrieSerialization(t *testing.T) {
t.Fatal(err)
}
st = newSt
- st.TryUpdate(k, common.CopyBytes(vals[i]))
+ st.Update(k, common.CopyBytes(vals[i]))
}
if have, want := st.Hash(), nt.Hash(); have != want {
t.Fatalf("have %#x want %#x", have, want)
diff --git a/trie/sync_test.go b/trie/sync_test.go
index 8fec37833..70898604f 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -40,17 +40,17 @@ func makeTestTrie() (*Database, *StateTrie, map[string][]byte) {
// Map the same data under multiple keys
key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
content[string(key)] = val
- trie.Update(key, val)
+ trie.MustUpdate(key, val)
key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
content[string(key)] = val
- trie.Update(key, val)
+ trie.MustUpdate(key, val)
// Add some other data to inflate the trie
for j := byte(3); j < 13; j++ {
key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
content[string(key)] = val
- trie.Update(key, val)
+ trie.MustUpdate(key, val)
}
}
root, nodes := trie.Commit(false)
@@ -74,7 +74,7 @@ func checkTrieContents(t *testing.T, db *Database, root []byte, content map[stri
t.Fatalf("inconsistent trie at %x: %v", root, err)
}
for key, val := range content {
- if have := trie.Get([]byte(key)); !bytes.Equal(have, val) {
+ if have := trie.MustGet([]byte(key)); !bytes.Equal(have, val) {
t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
}
}
@@ -154,7 +154,7 @@ func testIterativeSync(t *testing.T, count int, bypath bool) {
}
} else {
for i, element := range elements {
- data, _, err := srcTrie.TryGetNode(element.syncPath[len(element.syncPath)-1])
+ data, _, err := srcTrie.GetNode(element.syncPath[len(element.syncPath)-1])
if err != nil {
t.Fatalf("failed to retrieve node data for path %x: %v", element.path, err)
}
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
index 5e627c89c..1b9f44108 100644
--- a/trie/tracer_test.go
+++ b/trie/tracer_test.go
@@ -64,7 +64,7 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
// Determine all new nodes are tracked
for _, val := range vals {
- trie.Update([]byte(val.k), []byte(val.v))
+ trie.MustUpdate([]byte(val.k), []byte(val.v))
}
insertSet := copySet(trie.tracer.inserts) // copy before commit
deleteSet := copySet(trie.tracer.deletes) // copy before commit
@@ -82,7 +82,7 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
// Determine all deletions are tracked
trie, _ = New(TrieID(root), db)
for _, val := range vals {
- trie.Delete([]byte(val.k))
+ trie.MustDelete([]byte(val.k))
}
insertSet, deleteSet = copySet(trie.tracer.inserts), copySet(trie.tracer.deletes)
if !compareSet(insertSet, nil) {
@@ -104,10 +104,10 @@ func TestTrieTracerNoop(t *testing.T) {
func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for _, val := range vals {
- trie.Update([]byte(val.k), []byte(val.v))
+ trie.MustUpdate([]byte(val.k), []byte(val.v))
}
for _, val := range vals {
- trie.Delete([]byte(val.k))
+ trie.MustDelete([]byte(val.k))
}
if len(trie.tracer.inserts) != 0 {
t.Fatal("Unexpected insertion set")
@@ -132,7 +132,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
)
// Create trie from scratch
for _, val := range vals {
- trie.Update([]byte(val.k), []byte(val.v))
+ trie.MustUpdate([]byte(val.k), []byte(val.v))
}
root, nodes := trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
@@ -146,7 +146,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie, _ = New(TrieID(root), db)
orig = trie.Copy()
for _, val := range vals {
- trie.Update([]byte(val.k), randBytes(32))
+ trie.MustUpdate([]byte(val.k), randBytes(32))
}
root, nodes = trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
@@ -163,7 +163,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
for i := 0; i < 30; i++ {
key := randBytes(32)
keys = append(keys, string(key))
- trie.Update(key, randBytes(32))
+ trie.MustUpdate(key, randBytes(32))
}
root, nodes = trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
@@ -177,7 +177,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie, _ = New(TrieID(root), db)
orig = trie.Copy()
for _, key := range keys {
- trie.Update([]byte(key), nil)
+ trie.MustUpdate([]byte(key), nil)
}
root, nodes = trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
@@ -191,7 +191,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie, _ = New(TrieID(root), db)
orig = trie.Copy()
for _, val := range vals {
- trie.Update([]byte(val.k), nil)
+ trie.MustUpdate([]byte(val.k), nil)
}
root, nodes = trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
@@ -210,7 +210,7 @@ func TestAccessListLeak(t *testing.T) {
)
// Create trie from scratch
for _, val := range standard {
- trie.Update([]byte(val.k), []byte(val.v))
+ trie.MustUpdate([]byte(val.k), []byte(val.v))
}
root, nodes := trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
@@ -260,7 +260,7 @@ func TestTinyTree(t *testing.T) {
trie = NewEmpty(db)
)
for _, val := range tiny {
- trie.Update([]byte(val.k), randBytes(32))
+ trie.MustUpdate([]byte(val.k), randBytes(32))
}
root, set := trie.Commit(false)
db.Update(NewWithNodeSet(set))
@@ -268,7 +268,7 @@ func TestTinyTree(t *testing.T) {
trie, _ = New(TrieID(root), db)
orig := trie.Copy()
for _, val := range tiny {
- trie.Update([]byte(val.k), []byte(val.v))
+ trie.MustUpdate([]byte(val.k), []byte(val.v))
}
root, set = trie.Commit(false)
db.Update(NewWithNodeSet(set))
diff --git a/trie/trie.go b/trie/trie.go
index 17bacba00..4b6f6a55b 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -105,28 +105,30 @@ func (t *Trie) NodeIterator(start []byte) NodeIterator {
return newNodeIterator(t, start)
}
-// Get returns the value for key stored in the trie.
-// The value bytes must not be modified by the caller.
-func (t *Trie) Get(key []byte) []byte {
- res, err := t.TryGet(key)
+// MustGet is a wrapper of Get and will omit any encountered error but just
+// print out an error message.
+func (t *Trie) MustGet(key []byte) []byte {
+ res, err := t.Get(key)
if err != nil {
log.Error("Unhandled trie error in Trie.Get", "err", err)
}
return res
}
-// TryGet returns the value for key stored in the trie.
+// Get returns the value for key stored in the trie.
// The value bytes must not be modified by the caller.
-// If a node was not found in the database, a MissingNodeError is returned.
-func (t *Trie) TryGet(key []byte) ([]byte, error) {
- value, newroot, didResolve, err := t.tryGet(t.root, keybytesToHex(key), 0)
+//
+// If the requested node is not present in trie, no error will be returned.
+// If the trie is corrupted, a MissingNodeError is returned.
+func (t *Trie) Get(key []byte) ([]byte, error) {
+ value, newroot, didResolve, err := t.get(t.root, keybytesToHex(key), 0)
if err == nil && didResolve {
t.root = newroot
}
return value, err
}
-func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode node, didResolve bool, err error) {
+func (t *Trie) get(origNode node, key []byte, pos int) (value []byte, newnode node, didResolve bool, err error) {
switch n := (origNode).(type) {
case nil:
return nil, nil, false, nil
@@ -137,14 +139,14 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode
// key not found in trie
return nil, n, false, nil
}
- value, newnode, didResolve, err = t.tryGet(n.Val, key, pos+len(n.Key))
+ value, newnode, didResolve, err = t.get(n.Val, key, pos+len(n.Key))
if err == nil && didResolve {
n = n.copy()
n.Val = newnode
}
return value, n, didResolve, err
case *fullNode:
- value, newnode, didResolve, err = t.tryGet(n.Children[key[pos]], key, pos+1)
+ value, newnode, didResolve, err = t.get(n.Children[key[pos]], key, pos+1)
if err == nil && didResolve {
n = n.copy()
n.Children[key[pos]] = newnode
@@ -155,17 +157,30 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode
if err != nil {
return nil, n, true, err
}
- value, newnode, _, err := t.tryGet(child, key, pos)
+ value, newnode, _, err := t.get(child, key, pos)
return value, newnode, true, err
default:
panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode))
}
}
-// TryGetNode attempts to retrieve a trie node by compact-encoded path. It is not
-// possible to use keybyte-encoding as the path might contain odd nibbles.
-func (t *Trie) TryGetNode(path []byte) ([]byte, int, error) {
- item, newroot, resolved, err := t.tryGetNode(t.root, compactToHex(path), 0)
+// MustGetNode is a wrapper of GetNode and will omit any encountered error but
+// just print out an error message.
+func (t *Trie) MustGetNode(path []byte) ([]byte, int) {
+ item, resolved, err := t.GetNode(path)
+ if err != nil {
+ log.Error("Unhandled trie error in Trie.GetNode", "err", err)
+ }
+ return item, resolved
+}
+
+// GetNode retrieves a trie node by compact-encoded path. It is not possible
+// to use keybyte-encoding as the path might contain odd nibbles.
+//
+// If the requested node is not present in trie, no error will be returned.
+// If the trie is corrupted, a MissingNodeError is returned.
+func (t *Trie) GetNode(path []byte) ([]byte, int, error) {
+ item, newroot, resolved, err := t.getNode(t.root, compactToHex(path), 0)
if err != nil {
return nil, resolved, err
}
@@ -175,10 +190,10 @@ func (t *Trie) TryGetNode(path []byte) ([]byte, int, error) {
if item == nil {
return nil, resolved, nil
}
- return item, resolved, err
+ return item, resolved, nil
}
-func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, newnode node, resolved int, err error) {
+func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnode node, resolved int, err error) {
// If non-existent path requested, abort
if origNode == nil {
return nil, nil, 0, nil
@@ -211,7 +226,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new
// Path branches off from short node
return nil, n, 0, nil
}
- item, newnode, resolved, err = t.tryGetNode(n.Val, path, pos+len(n.Key))
+ item, newnode, resolved, err = t.getNode(n.Val, path, pos+len(n.Key))
if err == nil && resolved > 0 {
n = n.copy()
n.Val = newnode
@@ -219,7 +234,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new
return item, n, resolved, err
case *fullNode:
- item, newnode, resolved, err = t.tryGetNode(n.Children[path[pos]], path, pos+1)
+ item, newnode, resolved, err = t.getNode(n.Children[path[pos]], path, pos+1)
if err == nil && resolved > 0 {
n = n.copy()
n.Children[path[pos]] = newnode
@@ -231,7 +246,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new
if err != nil {
return nil, n, 1, err
}
- item, newnode, resolved, err := t.tryGetNode(child, path, pos)
+ item, newnode, resolved, err := t.getNode(child, path, pos)
return item, newnode, resolved + 1, err
default:
@@ -239,33 +254,28 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new
}
}
+// MustUpdate is a wrapper of Update and will omit any encountered error but
+// just print out an error message.
+func (t *Trie) MustUpdate(key, value []byte) {
+ if err := t.Update(key, value); err != nil {
+ log.Error("Unhandled trie error in Trie.Update", "err", err)
+ }
+}
+
// Update associates key with value in the trie. Subsequent calls to
// Get will return value. If value has length zero, any existing value
// is deleted from the trie and calls to Get will return nil.
//
// The value bytes must not be modified by the caller while they are
// stored in the trie.
-func (t *Trie) Update(key, value []byte) {
- if err := t.TryUpdate(key, value); err != nil {
- log.Error("Unhandled trie error in Trie.Update", "err", err)
- }
+//
+// If the requested node is not present in trie, no error will be returned.
+// If the trie is corrupted, a MissingNodeError is returned.
+func (t *Trie) Update(key, value []byte) error {
+ return t.update(key, value)
}
-// TryUpdate associates key with value in the trie. Subsequent calls to
-// Get will return value. If value has length zero, any existing value
-// is deleted from the trie and calls to Get will return nil.
-//
-// The value bytes must not be modified by the caller while they are
-// stored in the trie.
-//
-// If a node was not found in the database, a MissingNodeError is returned.
-func (t *Trie) TryUpdate(key, value []byte) error {
- return t.tryUpdate(key, value)
-}
-
-// tryUpdate expects an RLP-encoded value and performs the core function
-// for TryUpdate and TryUpdateAccount.
-func (t *Trie) tryUpdate(key, value []byte) error {
+func (t *Trie) update(key, value []byte) error {
t.unhashed++
k := keybytesToHex(key)
if len(value) != 0 {
@@ -363,16 +373,19 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
}
}
-// Delete removes any existing value for key from the trie.
-func (t *Trie) Delete(key []byte) {
- if err := t.TryDelete(key); err != nil {
+// MustDelete is a wrapper of Delete and will omit any encountered error but
+// just print out an error message.
+func (t *Trie) MustDelete(key []byte) {
+ if err := t.Delete(key); err != nil {
log.Error("Unhandled trie error in Trie.Delete", "err", err)
}
}
-// TryDelete removes any existing value for key from the trie.
-// If a node was not found in the database, a MissingNodeError is returned.
-func (t *Trie) TryDelete(key []byte) error {
+// Delete removes any existing value for key from the trie.
+//
+// If the requested node is not present in trie, no error will be returned.
+// If the trie is corrupted, a MissingNodeError is returned.
+func (t *Trie) Delete(key []byte) error {
t.unhashed++
k := keybytesToHex(key)
_, n, err := t.delete(t.root, nil, k)
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 089bb44a9..82ead8b44 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -56,8 +56,8 @@ func TestNull(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
key := make([]byte, 32)
value := []byte("test")
- trie.Update(key, value)
- if !bytes.Equal(trie.Get(key), value) {
+ trie.MustUpdate(key, value)
+ if !bytes.Equal(trie.MustGet(key), value) {
t.Fatal("wrong value")
}
}
@@ -90,27 +90,27 @@ func testMissingNode(t *testing.T, memonly bool) {
}
trie, _ = New(TrieID(root), triedb)
- _, err := trie.TryGet([]byte("120000"))
+ _, err := trie.Get([]byte("120000"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
trie, _ = New(TrieID(root), triedb)
- _, err = trie.TryGet([]byte("120099"))
+ _, err = trie.Get([]byte("120099"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
trie, _ = New(TrieID(root), triedb)
- _, err = trie.TryGet([]byte("123456"))
+ _, err = trie.Get([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
trie, _ = New(TrieID(root), triedb)
- err = trie.TryUpdate([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv"))
+ err = trie.Update([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
trie, _ = New(TrieID(root), triedb)
- err = trie.TryDelete([]byte("123456"))
+ err = trie.Delete([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
@@ -123,27 +123,27 @@ func testMissingNode(t *testing.T, memonly bool) {
}
trie, _ = New(TrieID(root), triedb)
- _, err = trie.TryGet([]byte("120000"))
+ _, err = trie.Get([]byte("120000"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
trie, _ = New(TrieID(root), triedb)
- _, err = trie.TryGet([]byte("120099"))
+ _, err = trie.Get([]byte("120099"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
trie, _ = New(TrieID(root), triedb)
- _, err = trie.TryGet([]byte("123456"))
+ _, err = trie.Get([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
trie, _ = New(TrieID(root), triedb)
- err = trie.TryUpdate([]byte("120099"), []byte("zxcv"))
+ err = trie.Update([]byte("120099"), []byte("zxcv"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
trie, _ = New(TrieID(root), triedb)
- err = trie.TryDelete([]byte("123456"))
+ err = trie.Delete([]byte("123456"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
@@ -311,8 +311,8 @@ func TestReplication(t *testing.T) {
func TestLargeValue(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- trie.Update([]byte("key1"), []byte{99, 99, 99, 99})
- trie.Update([]byte("key2"), bytes.Repeat([]byte{1}, 32))
+ trie.MustUpdate([]byte("key1"), []byte{99, 99, 99, 99})
+ trie.MustUpdate([]byte("key2"), bytes.Repeat([]byte{1}, 32))
trie.Hash()
}
@@ -460,13 +460,13 @@ func runRandTest(rt randTest) bool {
switch step.op {
case opUpdate:
- tr.Update(step.key, step.value)
+ tr.MustUpdate(step.key, step.value)
values[string(step.key)] = string(step.value)
case opDelete:
- tr.Delete(step.key)
+ tr.MustDelete(step.key)
delete(values, string(step.key))
case opGet:
- v := tr.Get(step.key)
+ v := tr.MustGet(step.key)
want := values[string(step.key)]
if string(v) != want {
rt[i].err = fmt.Errorf("mismatch for key %#x, got %#x want %#x", step.key, v, want)
@@ -509,7 +509,7 @@ func runRandTest(rt randTest) bool {
checktr := NewEmpty(triedb)
it := NewIterator(tr.NodeIterator(nil))
for it.Next() {
- checktr.Update(it.Key, it.Value)
+ checktr.MustUpdate(it.Key, it.Value)
}
if tr.Hash() != checktr.Hash() {
rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash")
@@ -595,13 +595,13 @@ func benchGet(b *testing.B) {
k := make([]byte, 32)
for i := 0; i < benchElemCount; i++ {
binary.LittleEndian.PutUint64(k, uint64(i))
- trie.Update(k, k)
+ trie.MustUpdate(k, k)
}
binary.LittleEndian.PutUint64(k, benchElemCount/2)
b.ResetTimer()
for i := 0; i < b.N; i++ {
- trie.Get(k)
+ trie.MustGet(k)
}
b.StopTimer()
}
@@ -612,7 +612,7 @@ func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
e.PutUint64(k, uint64(i))
- trie.Update(k, k)
+ trie.MustUpdate(k, k)
}
return trie
}
@@ -640,11 +640,11 @@ func BenchmarkHash(b *testing.B) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
i := 0
for ; i < len(addresses)/2; i++ {
- trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
trie.Hash()
for ; i < len(addresses); i++ {
- trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
b.ResetTimer()
b.ReportAllocs()
@@ -670,7 +670,7 @@ func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) {
addresses, accounts := makeAccounts(b.N)
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for i := 0; i < len(addresses); i++ {
- trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Insert the accounts into the trie and hash it
trie.Hash()
@@ -683,22 +683,22 @@ func TestTinyTrie(t *testing.T) {
// Create a realistic account trie to hash
_, accounts := makeAccounts(5)
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3])
+ trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3])
if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root {
t.Errorf("1: got %x, exp %x", root, exp)
}
- trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001338"), accounts[4])
+ trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001338"), accounts[4])
if exp, root := common.HexToHash("ec63b967e98a5720e7f720482151963982890d82c9093c0d486b7eb8883a66b1"), trie.Hash(); exp != root {
t.Errorf("2: got %x, exp %x", root, exp)
}
- trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001339"), accounts[4])
+ trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001339"), accounts[4])
if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root {
t.Errorf("3: got %x, exp %x", root, exp)
}
checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
it := NewIterator(trie.NodeIterator(nil))
for it.Next() {
- checktr.Update(it.Key, it.Value)
+ checktr.MustUpdate(it.Key, it.Value)
}
if troot, itroot := trie.Hash(), checktr.Hash(); troot != itroot {
t.Fatalf("hash mismatch in opItercheckhash, trie: %x, check: %x", troot, itroot)
@@ -710,7 +710,7 @@ func TestCommitAfterHash(t *testing.T) {
addresses, accounts := makeAccounts(1000)
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for i := 0; i < len(addresses); i++ {
- trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Insert the accounts into the trie and hash it
trie.Hash()
@@ -820,7 +820,7 @@ func TestCommitSequence(t *testing.T) {
trie := NewEmpty(db)
// Fill the trie with elements
for i := 0; i < tc.count; i++ {
- trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Flush trie -> database
root, nodes := trie.Commit(false)
@@ -861,7 +861,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
}
prng.Read(key)
prng.Read(val)
- trie.Update(key, val)
+ trie.MustUpdate(key, val)
}
// Flush trie -> database
root, nodes := trie.Commit(false)
@@ -899,8 +899,8 @@ func TestCommitSequenceStackTrie(t *testing.T) {
val = make([]byte, 1+prng.Intn(1024))
}
prng.Read(val)
- trie.TryUpdate(key, val)
- stTrie.TryUpdate(key, val)
+ trie.Update(key, val)
+ stTrie.Update(key, val)
}
// Flush trie -> database
root, nodes := trie.Commit(false)
@@ -948,8 +948,8 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
// Add a single small-element to the trie(s)
key := make([]byte, 5)
key[0] = 1
- trie.TryUpdate(key, []byte{0x1})
- stTrie.TryUpdate(key, []byte{0x1})
+ trie.Update(key, []byte{0x1})
+ stTrie.Update(key, []byte{0x1})
// Flush trie -> database
root, nodes := trie.Commit(false)
// Flush memdb -> disk (sponge)
@@ -1017,7 +1017,7 @@ func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byt
b.ReportAllocs()
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for i := 0; i < len(addresses); i++ {
- trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Insert the accounts into the trie and hash it
b.StartTimer()
@@ -1068,7 +1068,7 @@ func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accou
b.ReportAllocs()
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for i := 0; i < len(addresses); i++ {
- trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Insert the accounts into the trie and hash it
trie.Hash()
@@ -1121,7 +1121,7 @@ func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts []
triedb := NewDatabase(rawdb.NewMemoryDatabase())
trie := NewEmpty(triedb)
for i := 0; i < len(addresses); i++ {
- trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
h := trie.Hash()
_, nodes := trie.Commit(false)
@@ -1132,15 +1132,15 @@ func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts []
}
func getString(trie *Trie, k string) []byte {
- return trie.Get([]byte(k))
+ return trie.MustGet([]byte(k))
}
func updateString(trie *Trie, k, v string) {
- trie.Update([]byte(k), []byte(v))
+ trie.MustUpdate([]byte(k), []byte(v))
}
func deleteString(trie *Trie, k string) {
- trie.Delete([]byte(k))
+ trie.MustDelete([]byte(k))
}
func TestDecodeNode(t *testing.T) {