Merge commit '3f907d6a6' into merge/geth-v1.13.4
This commit is contained in:
commit
5d2d49229b
@ -1,25 +1,30 @@
|
|||||||
# This file contains sha256 checksums of optional build dependencies.
|
# This file contains sha256 checksums of optional build dependencies.
|
||||||
|
|
||||||
|
# version:spec-tests 1.0.5
|
||||||
# https://github.com/ethereum/execution-spec-tests/releases
|
# https://github.com/ethereum/execution-spec-tests/releases
|
||||||
24bac679f3a2d8240d8e08e7f6a70b70c2dabf673317d924cf1d1887b9fe1f81 fixtures.tar.gz
|
# https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.5/
|
||||||
|
d4fd06a0e5f94beb970f3c68374b38ef9de82d4be77517d326bcf739c3cbf3a2 fixtures_develop.tar.gz
|
||||||
|
|
||||||
|
# version:golang 1.21.3
|
||||||
# https://go.dev/dl/
|
# https://go.dev/dl/
|
||||||
bfa36bf75e9a1e9cbbdb9abcf9d1707e479bd3a07880a8ae3564caee5711cb99 go1.21.1.src.tar.gz
|
186f2b6f8c8b704e696821b09ab2041a5c1ee13dcbc3156a13adcf75931ee488 go1.21.3.src.tar.gz
|
||||||
809f5b0ef4f7dcdd5f51e9630a5b2e5a1006f22a047126d61560cdc365678a19 go1.21.1.darwin-amd64.tar.gz
|
27014fc69e301d7588a169ca239b3cc609f0aa1abf38528bf0d20d3b259211eb go1.21.3.darwin-amd64.tar.gz
|
||||||
ffd40391a1e995855488b008ad9326ff8c2e81803a6e80894401003bae47fcf1 go1.21.1.darwin-arm64.tar.gz
|
65302a7a9f7a4834932b3a7a14cb8be51beddda757b567a2f9e0cbd0d7b5a6ab go1.21.3.darwin-arm64.tar.gz
|
||||||
9919a9a4dc82371aba3da5b7c830bcb6249fc1502cd26d959eb340a60e41ee01 go1.21.1.freebsd-386.tar.gz
|
8e0cd2f66cf1bde9d07b4aee01e3d7c3cfdd14e20650488e1683da4b8492594a go1.21.3.freebsd-386.tar.gz
|
||||||
2571f10f6047e04d87c1f5986a05e5e8f7b511faf98803ef12b66d563845d2a1 go1.21.1.freebsd-amd64.tar.gz
|
6e74f65f586e93d1f3947894766f69e9b2ebda488592a09df61f36f06bfe58a8 go1.21.3.freebsd-amd64.tar.gz
|
||||||
b93850666cdadbd696a986cf7b03111fe99db8c34a9aaa113d7c96d0081e1901 go1.21.1.linux-386.tar.gz
|
fb209fd070db500a84291c5a95251cceeb1723e8f6142de9baca5af70a927c0e go1.21.3.linux-386.tar.gz
|
||||||
b3075ae1ce5dab85f89bc7905d1632de23ca196bd8336afd93fa97434cfa55ae go1.21.1.linux-amd64.tar.gz
|
1241381b2843fae5a9707eec1f8fb2ef94d827990582c7c7c32f5bdfbfd420c8 go1.21.3.linux-amd64.tar.gz
|
||||||
7da1a3936a928fd0b2602ed4f3ef535b8cd1990f1503b8d3e1acc0fa0759c967 go1.21.1.linux-arm64.tar.gz
|
fc90fa48ae97ba6368eecb914343590bbb61b388089510d0c56c2dde52987ef3 go1.21.3.linux-arm64.tar.gz
|
||||||
f3716a43f59ae69999841d6007b42c9e286e8d8ce470656fb3e70d7be2d7ca85 go1.21.1.linux-armv6l.tar.gz
|
a1ddcaaf0821a12a800884c14cb4268ce1c1f5a0301e9060646f1e15e611c6c7 go1.21.3.linux-armv6l.tar.gz
|
||||||
eddf018206f8a5589bda75252b72716d26611efebabdca5d0083ec15e9e41ab7 go1.21.1.linux-ppc64le.tar.gz
|
3b0e10a3704f164a6e85e0377728ec5fd21524fabe4c925610e34076586d5826 go1.21.3.linux-ppc64le.tar.gz
|
||||||
a83b3e8eb4dbf76294e773055eb51397510ff4d612a247bad9903560267bba6d go1.21.1.linux-s390x.tar.gz
|
4c78e2e6f4c684a3d5a9bdc97202729053f44eb7be188206f0627ef3e18716b6 go1.21.3.linux-s390x.tar.gz
|
||||||
170256c820f466f29d64876f25f4dfa4029ed9902a0a9095d8bd603aecf4d83b go1.21.1.windows-386.zip
|
e36737f4f2fadb4d2f919ec4ce517133a56e06064cca6e82fc883bb000c4d56c go1.21.3.windows-386.zip
|
||||||
10a4f5b63215d11d1770453733dbcbf024f3f74872f84e28d7ea59f0250316c6 go1.21.1.windows-amd64.zip
|
27c8daf157493f288d42a6f38debc6a2cb391f6543139eba9152fceca0be2a10 go1.21.3.windows-amd64.zip
|
||||||
41135ce6e0ced4bc1e459cb96bd4090c9dc2062e24179c3f337d855af9b560ef go1.21.1.windows-arm64.zip
|
bfb7a5c56f9ded07d8ae0e0b3702ac07b65e68fa8f33da24ed6df4ce01fe2c5c go1.21.3.windows-arm64.zip
|
||||||
|
|
||||||
# https://github.com/golangci/golangci-lint/releases
|
# version:golangci 1.51.1
|
||||||
|
# https://github.com/golangci/golangci-lint/releases/
|
||||||
|
# https://github.com/golangci/golangci-lint/releases/download/v1.51.1/
|
||||||
fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz
|
fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz
|
||||||
75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz
|
75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz
|
||||||
e06b3459aaed356e1667580be00b05f41f3b2e29685d12cdee571c23e1edb414 golangci-lint-1.51.1-freebsd-386.tar.gz
|
e06b3459aaed356e1667580be00b05f41f3b2e29685d12cdee571c23e1edb414 golangci-lint-1.51.1-freebsd-386.tar.gz
|
||||||
@ -48,4 +53,12 @@ bce02f7232723cb727755ee11f168a700a00896a25d37f87c4b173bce55596b4 golangci-lint-
|
|||||||
cf6403f84707ce8c98664736772271bc8874f2e760c2fd0f00cf3e85963507e9 golangci-lint-1.51.1-windows-armv7.zip
|
cf6403f84707ce8c98664736772271bc8874f2e760c2fd0f00cf3e85963507e9 golangci-lint-1.51.1-windows-armv7.zip
|
||||||
|
|
||||||
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
|
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
|
||||||
|
#
|
||||||
|
# This version is fine to be old and full of security holes, we just use it
|
||||||
|
# to build the latest Go. Don't change it. If it ever becomes insufficient,
|
||||||
|
# we need to switch over to a recursive builder to jump across supported
|
||||||
|
# versions.
|
||||||
|
#
|
||||||
|
# version:ppa-builder 1.19.6
|
||||||
|
# https://go.dev/dl/
|
||||||
d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz
|
d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz
|
||||||
|
48
build/ci.go
48
build/ci.go
@ -136,23 +136,6 @@ var (
|
|||||||
"golang-go": "/usr/lib/go",
|
"golang-go": "/usr/lib/go",
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is the version of Go that will be downloaded by
|
|
||||||
//
|
|
||||||
// go run ci.go install -dlgo
|
|
||||||
dlgoVersion = "1.21.1"
|
|
||||||
|
|
||||||
// This is the version of Go that will be used to bootstrap the PPA builder.
|
|
||||||
//
|
|
||||||
// This version is fine to be old and full of security holes, we just use it
|
|
||||||
// to build the latest Go. Don't change it. If it ever becomes insufficient,
|
|
||||||
// we need to switch over to a recursive builder to jumpt across supported
|
|
||||||
// versions.
|
|
||||||
gobootVersion = "1.19.6"
|
|
||||||
|
|
||||||
// This is the version of execution-spec-tests that we are using.
|
|
||||||
// When updating, you must also update build/checksums.txt.
|
|
||||||
executionSpecTestsVersion = "1.0.2"
|
|
||||||
|
|
||||||
// This is where the tests should be unpacked.
|
// This is where the tests should be unpacked.
|
||||||
executionSpecTestsDir = "tests/spec-tests"
|
executionSpecTestsDir = "tests/spec-tests"
|
||||||
)
|
)
|
||||||
@ -192,6 +175,8 @@ func main() {
|
|||||||
doWindowsInstaller(os.Args[2:])
|
doWindowsInstaller(os.Args[2:])
|
||||||
case "purge":
|
case "purge":
|
||||||
doPurge(os.Args[2:])
|
doPurge(os.Args[2:])
|
||||||
|
case "sanitycheck":
|
||||||
|
doSanityCheck()
|
||||||
default:
|
default:
|
||||||
log.Fatal("unknown command ", os.Args[1])
|
log.Fatal("unknown command ", os.Args[1])
|
||||||
}
|
}
|
||||||
@ -213,9 +198,8 @@ func doInstall(cmdline []string) {
|
|||||||
tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
|
tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
|
||||||
if *dlgo {
|
if *dlgo {
|
||||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||||
tc.Root = build.DownloadGo(csdb, dlgoVersion)
|
tc.Root = build.DownloadGo(csdb)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disable CLI markdown doc generation in release builds.
|
// Disable CLI markdown doc generation in release builds.
|
||||||
buildTags := []string{"urfave_cli_no_docs"}
|
buildTags := []string{"urfave_cli_no_docs"}
|
||||||
|
|
||||||
@ -312,7 +296,7 @@ func doTest(cmdline []string) {
|
|||||||
// Configure the toolchain.
|
// Configure the toolchain.
|
||||||
tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
|
tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
|
||||||
if *dlgo {
|
if *dlgo {
|
||||||
tc.Root = build.DownloadGo(csdb, dlgoVersion)
|
tc.Root = build.DownloadGo(csdb)
|
||||||
}
|
}
|
||||||
gotest := tc.Go("test")
|
gotest := tc.Go("test")
|
||||||
|
|
||||||
@ -345,8 +329,12 @@ func doTest(cmdline []string) {
|
|||||||
|
|
||||||
// downloadSpecTestFixtures downloads and extracts the execution-spec-tests fixtures.
|
// downloadSpecTestFixtures downloads and extracts the execution-spec-tests fixtures.
|
||||||
func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
|
func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
|
||||||
|
executionSpecTestsVersion, err := build.Version(csdb, "spec-tests")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
ext := ".tar.gz"
|
ext := ".tar.gz"
|
||||||
base := "fixtures" // TODO(MariusVanDerWijden) rename once the version becomes part of the filename
|
base := "fixtures_develop" // TODO(MariusVanDerWijden) rename once the version becomes part of the filename
|
||||||
url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/v%s/%s%s", executionSpecTestsVersion, base, ext)
|
url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/v%s/%s%s", executionSpecTestsVersion, base, ext)
|
||||||
archivePath := filepath.Join(cachedir, base+ext)
|
archivePath := filepath.Join(cachedir, base+ext)
|
||||||
if err := csdb.DownloadFile(url, archivePath); err != nil {
|
if err := csdb.DownloadFile(url, archivePath); err != nil {
|
||||||
@ -377,9 +365,11 @@ func doLint(cmdline []string) {
|
|||||||
|
|
||||||
// downloadLinter downloads and unpacks golangci-lint.
|
// downloadLinter downloads and unpacks golangci-lint.
|
||||||
func downloadLinter(cachedir string) string {
|
func downloadLinter(cachedir string) string {
|
||||||
const version = "1.51.1"
|
|
||||||
|
|
||||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||||
|
version, err := build.Version(csdb, "golangci")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
arch := runtime.GOARCH
|
arch := runtime.GOARCH
|
||||||
ext := ".tar.gz"
|
ext := ".tar.gz"
|
||||||
|
|
||||||
@ -761,6 +751,10 @@ func doDebianSource(cmdline []string) {
|
|||||||
// to bootstrap the builder Go.
|
// to bootstrap the builder Go.
|
||||||
func downloadGoBootstrapSources(cachedir string) string {
|
func downloadGoBootstrapSources(cachedir string) string {
|
||||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||||
|
gobootVersion, err := build.Version(csdb, "ppa-builder")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion)
|
file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion)
|
||||||
url := "https://dl.google.com/go/" + file
|
url := "https://dl.google.com/go/" + file
|
||||||
dst := filepath.Join(cachedir, file)
|
dst := filepath.Join(cachedir, file)
|
||||||
@ -773,6 +767,10 @@ func downloadGoBootstrapSources(cachedir string) string {
|
|||||||
// downloadGoSources downloads the Go source tarball.
|
// downloadGoSources downloads the Go source tarball.
|
||||||
func downloadGoSources(cachedir string) string {
|
func downloadGoSources(cachedir string) string {
|
||||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||||
|
dlgoVersion, err := build.Version(csdb, "golang")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
file := fmt.Sprintf("go%s.src.tar.gz", dlgoVersion)
|
file := fmt.Sprintf("go%s.src.tar.gz", dlgoVersion)
|
||||||
url := "https://dl.google.com/go/" + file
|
url := "https://dl.google.com/go/" + file
|
||||||
dst := filepath.Join(cachedir, file)
|
dst := filepath.Join(cachedir, file)
|
||||||
@ -1099,3 +1097,7 @@ func doPurge(cmdline []string) {
|
|||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func doSanityCheck() {
|
||||||
|
build.DownloadAndVerifyChecksums(build.MustLoadChecksums("build/checksums.txt"))
|
||||||
|
}
|
||||||
|
@ -1206,7 +1206,7 @@ func GenDoc(ctx *cli.Context) error {
|
|||||||
URL: accounts.URL{Path: ".. ignored .."},
|
URL: accounts.URL{Path: ".. ignored .."},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Address: common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"),
|
Address: common.MaxAddress,
|
||||||
},
|
},
|
||||||
}})
|
}})
|
||||||
}
|
}
|
||||||
|
@ -114,7 +114,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
|
|||||||
records = lrecords
|
records = lrecords
|
||||||
|
|
||||||
log.Info(fmt.Sprintf("Retrieving existing TXT records on %s", name))
|
log.Info(fmt.Sprintf("Retrieving existing TXT records on %s", name))
|
||||||
entries, err := c.DNSRecords(context.Background(), c.zoneID, cloudflare.DNSRecord{Type: "TXT"})
|
entries, _, err := c.ListDNSRecords(context.Background(), cloudflare.ZoneIdentifier(c.zoneID), cloudflare.ListDNSRecordsParams{Type: "TXT"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -141,14 +141,25 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
|
|||||||
if path != name {
|
if path != name {
|
||||||
ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare
|
ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare
|
||||||
}
|
}
|
||||||
record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}
|
record := cloudflare.CreateDNSRecordParams{Type: "TXT", Name: path, Content: val, TTL: ttl}
|
||||||
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record)
|
_, err = c.CreateDNSRecord(context.Background(), cloudflare.ZoneIdentifier(c.zoneID), record)
|
||||||
} else if old.Content != val {
|
} else if old.Content != val {
|
||||||
// Entry already exists, only change its content.
|
// Entry already exists, only change its content.
|
||||||
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
|
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
|
||||||
updated++
|
updated++
|
||||||
old.Content = val
|
|
||||||
err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old)
|
record := cloudflare.UpdateDNSRecordParams{
|
||||||
|
Type: old.Type,
|
||||||
|
Name: old.Name,
|
||||||
|
Content: val,
|
||||||
|
Data: old.Data,
|
||||||
|
ID: old.ID,
|
||||||
|
Priority: old.Priority,
|
||||||
|
TTL: old.TTL,
|
||||||
|
Proxied: old.Proxied,
|
||||||
|
Tags: old.Tags,
|
||||||
|
}
|
||||||
|
_, err = c.UpdateDNSRecord(context.Background(), cloudflare.ZoneIdentifier(c.zoneID), record)
|
||||||
} else {
|
} else {
|
||||||
skipped++
|
skipped++
|
||||||
log.Debug(fmt.Sprintf("Skipping %s = %q", path, val))
|
log.Debug(fmt.Sprintf("Skipping %s = %q", path, val))
|
||||||
@ -168,7 +179,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
|
|||||||
// Stale entry, nuke it.
|
// Stale entry, nuke it.
|
||||||
log.Debug(fmt.Sprintf("Deleting %s = %q", path, entry.Content))
|
log.Debug(fmt.Sprintf("Deleting %s = %q", path, entry.Content))
|
||||||
deleted++
|
deleted++
|
||||||
if err := c.DeleteDNSRecord(context.Background(), c.zoneID, entry.ID); err != nil {
|
if err := c.DeleteDNSRecord(context.Background(), cloudflare.ZoneIdentifier(c.zoneID), entry.ID); err != nil {
|
||||||
return fmt.Errorf("failed to delete %s: %v", path, err)
|
return fmt.Errorf("failed to delete %s: %v", path, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,7 @@ type accRangeTest struct {
|
|||||||
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
||||||
var (
|
var (
|
||||||
root = s.chain.RootAt(999)
|
root = s.chain.RootAt(999)
|
||||||
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
ffHash = common.MaxHash
|
||||||
zero = common.Hash{}
|
zero = common.Hash{}
|
||||||
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
|
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
|
||||||
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||||
@ -125,7 +125,7 @@ type stRangesTest struct {
|
|||||||
// TestSnapGetStorageRanges various forms of GetStorageRanges requests.
|
// TestSnapGetStorageRanges various forms of GetStorageRanges requests.
|
||||||
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
|
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
|
||||||
var (
|
var (
|
||||||
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
ffHash = common.MaxHash
|
||||||
zero = common.Hash{}
|
zero = common.Hash{}
|
||||||
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||||
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
|
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
|
||||||
@ -536,11 +536,7 @@ func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
|
|||||||
}
|
}
|
||||||
proofdb := nodes.Set()
|
proofdb := nodes.Set()
|
||||||
|
|
||||||
var end []byte
|
_, err = trie.VerifyRangeProof(tc.root, tc.origin[:], keys, accounts, proofdb)
|
||||||
if len(keys) > 0 {
|
|
||||||
end = keys[len(keys)-1]
|
|
||||||
}
|
|
||||||
_, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"sort"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
@ -60,9 +61,16 @@ func blockTestCmd(ctx *cli.Context) error {
|
|||||||
if err = json.Unmarshal(src, &tests); err != nil {
|
if err = json.Unmarshal(src, &tests); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for i, test := range tests {
|
// run them in order
|
||||||
|
var keys []string
|
||||||
|
for key := range tests {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for _, name := range keys {
|
||||||
|
test := tests[name]
|
||||||
if err := test.Run(false, rawdb.HashScheme, tracer); err != nil {
|
if err := test.Run(false, rawdb.HashScheme, tracer); err != nil {
|
||||||
return fmt.Errorf("test %v: %w", i, err)
|
return fmt.Errorf("test %v: %w", name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
2
cmd/evm/testdata/8/readme.md
vendored
2
cmd/evm/testdata/8/readme.md
vendored
@ -32,7 +32,7 @@ dir=./testdata/8 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json
|
|||||||
{"pc":4,"op":84,"gas":"0x48456","gasCost":"0x64","memSize":0,"stack":["0x3"],"depth":1,"refund":0,"opName":"SLOAD"}
|
{"pc":4,"op":84,"gas":"0x48456","gasCost":"0x64","memSize":0,"stack":["0x3"],"depth":1,"refund":0,"opName":"SLOAD"}
|
||||||
```
|
```
|
||||||
|
|
||||||
Simlarly, we can provide the input transactions via `stdin` instead of as file:
|
Similarly, we can provide the input transactions via `stdin` instead of as file:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ dir=./testdata/8 \
|
$ dir=./testdata/8 \
|
||||||
|
@ -969,17 +969,12 @@ var (
|
|||||||
DataDirFlag,
|
DataDirFlag,
|
||||||
AncientFlag,
|
AncientFlag,
|
||||||
RemoteDBFlag,
|
RemoteDBFlag,
|
||||||
|
DBEngineFlag,
|
||||||
StateSchemeFlag,
|
StateSchemeFlag,
|
||||||
HttpHeaderFlag,
|
HttpHeaderFlag,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
if rawdb.PebbleEnabled {
|
|
||||||
DatabaseFlags = append(DatabaseFlags, DBEngineFlag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeDataDir retrieves the currently requested data directory, terminating
|
// MakeDataDir retrieves the currently requested data directory, terminating
|
||||||
// if none (or the empty string) is specified. If the node is starting a testnet,
|
// if none (or the empty string) is specified. If the node is starting a testnet,
|
||||||
// then a subdirectory of the specified datadir will be used.
|
// then a subdirectory of the specified datadir will be used.
|
||||||
|
@ -44,6 +44,12 @@ const (
|
|||||||
var (
|
var (
|
||||||
hashT = reflect.TypeOf(Hash{})
|
hashT = reflect.TypeOf(Hash{})
|
||||||
addressT = reflect.TypeOf(Address{})
|
addressT = reflect.TypeOf(Address{})
|
||||||
|
|
||||||
|
// MaxAddress represents the maximum possible address value.
|
||||||
|
MaxAddress = HexToAddress("0xffffffffffffffffffffffffffffffffffffffff")
|
||||||
|
|
||||||
|
// MaxHash represents the maximum possible hash value.
|
||||||
|
MaxHash = HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
|
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/ethdb/leveldb"
|
"github.com/ethereum/go-ethereum/ethdb/leveldb"
|
||||||
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb/pebble"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/olekukonko/tablewriter"
|
"github.com/olekukonko/tablewriter"
|
||||||
)
|
)
|
||||||
@ -321,6 +322,16 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string, r
|
|||||||
return NewDatabase(db), nil
|
return NewDatabase(db), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
|
||||||
|
// moving immutable chain segments into cold storage.
|
||||||
|
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) {
|
||||||
|
db, err := pebble.New(file, cache, handles, namespace, readonly, ephemeral)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewDatabase(db), nil
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
dbPebble = "pebble"
|
dbPebble = "pebble"
|
||||||
dbLeveldb = "leveldb"
|
dbLeveldb = "leveldb"
|
||||||
@ -375,26 +386,16 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
|
|||||||
return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb)
|
return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb)
|
||||||
}
|
}
|
||||||
if o.Type == dbPebble || existingDb == dbPebble {
|
if o.Type == dbPebble || existingDb == dbPebble {
|
||||||
if PebbleEnabled {
|
|
||||||
log.Info("Using pebble as the backing database")
|
log.Info("Using pebble as the backing database")
|
||||||
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
|
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
|
||||||
} else {
|
|
||||||
return nil, errors.New("db.engine 'pebble' not supported on this platform")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if o.Type == dbLeveldb || existingDb == dbLeveldb {
|
if o.Type == dbLeveldb || existingDb == dbLeveldb {
|
||||||
log.Info("Using leveldb as the backing database")
|
log.Info("Using leveldb as the backing database")
|
||||||
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
||||||
}
|
}
|
||||||
// No pre-existing database, no user-requested one either. Default to Pebble
|
// No pre-existing database, no user-requested one either. Default to Pebble.
|
||||||
// on supported platforms and LevelDB on anything else.
|
|
||||||
if PebbleEnabled {
|
|
||||||
log.Info("Defaulting to pebble as the backing database")
|
log.Info("Defaulting to pebble as the backing database")
|
||||||
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
|
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
|
||||||
} else {
|
|
||||||
log.Info("Defaulting to leveldb as the backing database")
|
|
||||||
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open opens both a disk-based key-value database such as leveldb or pebble, but also
|
// Open opens both a disk-based key-value database such as leveldb or pebble, but also
|
||||||
|
@ -1,37 +0,0 @@
|
|||||||
// Copyright 2023 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
|
||||||
|
|
||||||
//go:build (arm64 || amd64) && !openbsd
|
|
||||||
|
|
||||||
package rawdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb/pebble"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Pebble is unsupported on 32bit architecture
|
|
||||||
const PebbleEnabled = true
|
|
||||||
|
|
||||||
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
|
|
||||||
// moving immutable chain segments into cold storage.
|
|
||||||
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) {
|
|
||||||
db, err := pebble.New(file, cache, handles, namespace, readonly, ephemeral)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return NewDatabase(db), nil
|
|
||||||
}
|
|
@ -1,34 +0,0 @@
|
|||||||
// Copyright 2023 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
//go:build !((arm64 || amd64) && !openbsd)
|
|
||||||
|
|
||||||
package rawdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Pebble is unsuported on 32bit architecture
|
|
||||||
const PebbleEnabled = false
|
|
||||||
|
|
||||||
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
|
|
||||||
// moving immutable chain segments into cold storage.
|
|
||||||
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) {
|
|
||||||
return nil, errors.New("pebble is not supported on this platform")
|
|
||||||
}
|
|
@ -247,11 +247,6 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix [
|
|||||||
ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
|
ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
|
||||||
return nil, errMissingTrie
|
return nil, errMissingTrie
|
||||||
}
|
}
|
||||||
// Firstly find out the key of last iterated element.
|
|
||||||
var last []byte
|
|
||||||
if len(keys) > 0 {
|
|
||||||
last = keys[len(keys)-1]
|
|
||||||
}
|
|
||||||
// Generate the Merkle proofs for the first and last element
|
// Generate the Merkle proofs for the first and last element
|
||||||
if origin == nil {
|
if origin == nil {
|
||||||
origin = common.Hash{}.Bytes()
|
origin = common.Hash{}.Bytes()
|
||||||
@ -266,9 +261,9 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix [
|
|||||||
tr: tr,
|
tr: tr,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
if last != nil {
|
if len(keys) > 0 {
|
||||||
if err := tr.Prove(last, proof); err != nil {
|
if err := tr.Prove(keys[len(keys)-1], proof); err != nil {
|
||||||
log.Debug("Failed to prove range", "kind", kind, "last", last, "err", err)
|
log.Debug("Failed to prove range", "kind", kind, "last", keys[len(keys)-1], "err", err)
|
||||||
return &proofResult{
|
return &proofResult{
|
||||||
keys: keys,
|
keys: keys,
|
||||||
vals: vals,
|
vals: vals,
|
||||||
@ -280,7 +275,7 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix [
|
|||||||
}
|
}
|
||||||
// Verify the snapshot segment with range prover, ensure that all flat states
|
// Verify the snapshot segment with range prover, ensure that all flat states
|
||||||
// in this range correspond to merkle trie.
|
// in this range correspond to merkle trie.
|
||||||
cont, err := trie.VerifyRangeProof(root, origin, last, keys, vals, proof)
|
cont, err := trie.VerifyRangeProof(root, origin, keys, vals, proof)
|
||||||
return &proofResult{
|
return &proofResult{
|
||||||
keys: keys,
|
keys: keys,
|
||||||
vals: vals,
|
vals: vals,
|
||||||
|
@ -132,7 +132,7 @@ func TestStateProcessorErrors(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
defer blockchain.Stop()
|
defer blockchain.Stop()
|
||||||
bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
|
bigNumber := new(big.Int).SetBytes(common.MaxHash.Bytes())
|
||||||
tooBigNumber := new(big.Int).Set(bigNumber)
|
tooBigNumber := new(big.Int).Set(bigNumber)
|
||||||
tooBigNumber.Add(tooBigNumber, common.Big1)
|
tooBigNumber.Add(tooBigNumber, common.Big1)
|
||||||
for i, tt := range []struct {
|
for i, tt := range []struct {
|
||||||
|
@ -61,7 +61,7 @@ const (
|
|||||||
maxTxUnderpricedSetSize = 32768
|
maxTxUnderpricedSetSize = 32768
|
||||||
|
|
||||||
// maxTxUnderpricedTimeout is the max time a transaction should be stuck in the underpriced set.
|
// maxTxUnderpricedTimeout is the max time a transaction should be stuck in the underpriced set.
|
||||||
maxTxUnderpricedTimeout = int64(5 * time.Minute)
|
maxTxUnderpricedTimeout = 5 * time.Minute
|
||||||
|
|
||||||
// txArriveTimeout is the time allowance before an announced transaction is
|
// txArriveTimeout is the time allowance before an announced transaction is
|
||||||
// explicitly requested.
|
// explicitly requested.
|
||||||
@ -167,7 +167,7 @@ type TxFetcher struct {
|
|||||||
drop chan *txDrop
|
drop chan *txDrop
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
|
|
||||||
underpriced *lru.Cache[common.Hash, int64] // Transactions discarded as too cheap (don't re-fetch)
|
underpriced *lru.Cache[common.Hash, time.Time] // Transactions discarded as too cheap (don't re-fetch)
|
||||||
|
|
||||||
// Stage 1: Waiting lists for newly discovered transactions that might be
|
// Stage 1: Waiting lists for newly discovered transactions that might be
|
||||||
// broadcast without needing explicit request/reply round trips.
|
// broadcast without needing explicit request/reply round trips.
|
||||||
@ -222,7 +222,7 @@ func NewTxFetcherForTests(
|
|||||||
fetching: make(map[common.Hash]string),
|
fetching: make(map[common.Hash]string),
|
||||||
requests: make(map[string]*txRequest),
|
requests: make(map[string]*txRequest),
|
||||||
alternates: make(map[common.Hash]map[string]struct{}),
|
alternates: make(map[common.Hash]map[string]struct{}),
|
||||||
underpriced: lru.NewCache[common.Hash, int64](maxTxUnderpricedSetSize),
|
underpriced: lru.NewCache[common.Hash, time.Time](maxTxUnderpricedSetSize),
|
||||||
hasTx: hasTx,
|
hasTx: hasTx,
|
||||||
addTxs: addTxs,
|
addTxs: addTxs,
|
||||||
fetchTxs: fetchTxs,
|
fetchTxs: fetchTxs,
|
||||||
@ -284,7 +284,7 @@ func (f *TxFetcher) Notify(peer string, types []byte, sizes []uint32, hashes []c
|
|||||||
// isKnownUnderpriced reports whether a transaction hash was recently found to be underpriced.
|
// isKnownUnderpriced reports whether a transaction hash was recently found to be underpriced.
|
||||||
func (f *TxFetcher) isKnownUnderpriced(hash common.Hash) bool {
|
func (f *TxFetcher) isKnownUnderpriced(hash common.Hash) bool {
|
||||||
prevTime, ok := f.underpriced.Peek(hash)
|
prevTime, ok := f.underpriced.Peek(hash)
|
||||||
if ok && prevTime+maxTxUnderpricedTimeout < time.Now().Unix() {
|
if ok && prevTime.Before(time.Now().Add(-maxTxUnderpricedTimeout)) {
|
||||||
f.underpriced.Remove(hash)
|
f.underpriced.Remove(hash)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -335,7 +335,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
|
|||||||
// Avoid re-request this transaction when we receive another
|
// Avoid re-request this transaction when we receive another
|
||||||
// announcement.
|
// announcement.
|
||||||
if errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced) {
|
if errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced) {
|
||||||
f.underpriced.Add(batch[j].Hash(), batch[j].Time().Unix())
|
f.underpriced.Add(batch[j].Hash(), batch[j].Time())
|
||||||
}
|
}
|
||||||
// Track a few interesting failure types
|
// Track a few interesting failure types
|
||||||
switch {
|
switch {
|
||||||
@ -593,8 +593,9 @@ func (f *TxFetcher) loop() {
|
|||||||
log.Warn("Announced transaction type mismatch", "peer", peer, "tx", hash, "type", delivery.metas[i].kind, "ann", meta.kind)
|
log.Warn("Announced transaction type mismatch", "peer", peer, "tx", hash, "type", delivery.metas[i].kind, "ann", meta.kind)
|
||||||
f.dropPeer(peer)
|
f.dropPeer(peer)
|
||||||
} else if delivery.metas[i].size != meta.size {
|
} else if delivery.metas[i].size != meta.size {
|
||||||
log.Warn("Announced transaction size mismatch", "peer", peer, "tx", hash, "size", delivery.metas[i].size, "ann", meta.size)
|
|
||||||
if math.Abs(float64(delivery.metas[i].size)-float64(meta.size)) > 8 {
|
if math.Abs(float64(delivery.metas[i].size)-float64(meta.size)) > 8 {
|
||||||
|
log.Warn("Announced transaction size mismatch", "peer", peer, "tx", hash, "size", delivery.metas[i].size, "ann", meta.size)
|
||||||
|
|
||||||
// Normally we should drop a peer considering this is a protocol violation.
|
// Normally we should drop a peer considering this is a protocol violation.
|
||||||
// However, due to the RLP vs consensus format messyness, allow a few bytes
|
// However, due to the RLP vs consensus format messyness, allow a few bytes
|
||||||
// wiggle-room where we only warn, but don't drop.
|
// wiggle-room where we only warn, but don't drop.
|
||||||
@ -618,8 +619,9 @@ func (f *TxFetcher) loop() {
|
|||||||
log.Warn("Announced transaction type mismatch", "peer", peer, "tx", hash, "type", delivery.metas[i].kind, "ann", meta.kind)
|
log.Warn("Announced transaction type mismatch", "peer", peer, "tx", hash, "type", delivery.metas[i].kind, "ann", meta.kind)
|
||||||
f.dropPeer(peer)
|
f.dropPeer(peer)
|
||||||
} else if delivery.metas[i].size != meta.size {
|
} else if delivery.metas[i].size != meta.size {
|
||||||
log.Warn("Announced transaction size mismatch", "peer", peer, "tx", hash, "size", delivery.metas[i].size, "ann", meta.size)
|
|
||||||
if math.Abs(float64(delivery.metas[i].size)-float64(meta.size)) > 8 {
|
if math.Abs(float64(delivery.metas[i].size)-float64(meta.size)) > 8 {
|
||||||
|
log.Warn("Announced transaction size mismatch", "peer", peer, "tx", hash, "size", delivery.metas[i].size, "ann", meta.size)
|
||||||
|
|
||||||
// Normally we should drop a peer considering this is a protocol violation.
|
// Normally we should drop a peer considering this is a protocol violation.
|
||||||
// However, due to the RLP vs consensus format messyness, allow a few bytes
|
// However, due to the RLP vs consensus format messyness, allow a few bytes
|
||||||
// wiggle-room where we only warn, but don't drop.
|
// wiggle-room where we only warn, but don't drop.
|
||||||
|
@ -1993,3 +1993,38 @@ func containsHash(slice []common.Hash, hash common.Hash) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests that a transaction is forgotten after the timeout.
|
||||||
|
func TestTransactionForgotten(t *testing.T) {
|
||||||
|
fetcher := NewTxFetcher(
|
||||||
|
func(common.Hash) bool { return false },
|
||||||
|
func(txs []*types.Transaction) []error {
|
||||||
|
errs := make([]error, len(txs))
|
||||||
|
for i := 0; i < len(errs); i++ {
|
||||||
|
errs[i] = txpool.ErrUnderpriced
|
||||||
|
}
|
||||||
|
return errs
|
||||||
|
},
|
||||||
|
func(string, []common.Hash) error { return nil },
|
||||||
|
func(string) {},
|
||||||
|
)
|
||||||
|
fetcher.Start()
|
||||||
|
defer fetcher.Stop()
|
||||||
|
// Create one TX which is 5 minutes old, and one which is recent
|
||||||
|
tx1 := types.NewTx(&types.LegacyTx{Nonce: 0})
|
||||||
|
tx1.SetTime(time.Now().Add(-maxTxUnderpricedTimeout - 1*time.Second))
|
||||||
|
tx2 := types.NewTx(&types.LegacyTx{Nonce: 1})
|
||||||
|
|
||||||
|
// Enqueue both in the fetcher. They will be immediately tagged as underpriced
|
||||||
|
if err := fetcher.Enqueue("asdf", []*types.Transaction{tx1, tx2}, false); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// isKnownUnderpriced should trigger removal of the first tx (no longer be known underpriced)
|
||||||
|
if fetcher.isKnownUnderpriced(tx1.Hash()) {
|
||||||
|
t.Fatal("transaction should be forgotten by now")
|
||||||
|
}
|
||||||
|
// isKnownUnderpriced should not trigger removal of the second
|
||||||
|
if !fetcher.isKnownUnderpriced(tx2.Hash()) {
|
||||||
|
t.Fatal("transaction should be known underpriced")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -367,7 +367,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
|
|||||||
if len(req.Origin) > 0 {
|
if len(req.Origin) > 0 {
|
||||||
origin, req.Origin = common.BytesToHash(req.Origin), nil
|
origin, req.Origin = common.BytesToHash(req.Origin), nil
|
||||||
}
|
}
|
||||||
var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
var limit = common.MaxHash
|
||||||
if len(req.Limit) > 0 {
|
if len(req.Limit) > 0 {
|
||||||
limit, req.Limit = common.BytesToHash(req.Limit), nil
|
limit, req.Limit = common.BytesToHash(req.Limit), nil
|
||||||
}
|
}
|
||||||
|
@ -67,7 +67,7 @@ func (r *hashRange) End() common.Hash {
|
|||||||
// If the end overflows (non divisible range), return a shorter interval
|
// If the end overflows (non divisible range), return a shorter interval
|
||||||
next, overflow := new(uint256.Int).AddOverflow(r.current, r.step)
|
next, overflow := new(uint256.Int).AddOverflow(r.current, r.step)
|
||||||
if overflow {
|
if overflow {
|
||||||
return common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
return common.MaxHash
|
||||||
}
|
}
|
||||||
return next.SubUint64(next, 1).Bytes32()
|
return next.SubUint64(next, 1).Bytes32()
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ func TestHashRanges(t *testing.T) {
|
|||||||
common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||||
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||||
common.HexToHash("0xbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.HexToHash("0xbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||||
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.MaxHash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Split a divisible part of the hash range up into 2 chunks
|
// Split a divisible part of the hash range up into 2 chunks
|
||||||
@ -58,7 +58,7 @@ func TestHashRanges(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ends: []common.Hash{
|
ends: []common.Hash{
|
||||||
common.HexToHash("0x8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.HexToHash("0x8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||||
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.MaxHash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Split the entire hash range into a non divisible 3 chunks
|
// Split the entire hash range into a non divisible 3 chunks
|
||||||
@ -73,7 +73,7 @@ func TestHashRanges(t *testing.T) {
|
|||||||
ends: []common.Hash{
|
ends: []common.Hash{
|
||||||
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
|
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
|
||||||
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"),
|
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"),
|
||||||
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.MaxHash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Split a part of hash range into a non divisible 3 chunks
|
// Split a part of hash range into a non divisible 3 chunks
|
||||||
@ -88,7 +88,7 @@ func TestHashRanges(t *testing.T) {
|
|||||||
ends: []common.Hash{
|
ends: []common.Hash{
|
||||||
common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
|
common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
|
||||||
common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555555"),
|
common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555555"),
|
||||||
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.MaxHash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Split a part of hash range into a non divisible 3 chunks, but with a
|
// Split a part of hash range into a non divisible 3 chunks, but with a
|
||||||
@ -108,7 +108,7 @@ func TestHashRanges(t *testing.T) {
|
|||||||
ends: []common.Hash{
|
ends: []common.Hash{
|
||||||
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5"),
|
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5"),
|
||||||
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb"),
|
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb"),
|
||||||
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
common.MaxHash,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -798,7 +798,7 @@ func (s *Syncer) loadSyncStatus() {
|
|||||||
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
|
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
|
||||||
if i == accountConcurrency-1 {
|
if i == accountConcurrency-1 {
|
||||||
// Make sure we don't overflow if the step is not a proper divisor
|
// Make sure we don't overflow if the step is not a proper divisor
|
||||||
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
last = common.MaxHash
|
||||||
}
|
}
|
||||||
batch := ethdb.HookedBatch{
|
batch := ethdb.HookedBatch{
|
||||||
Batch: s.db.NewBatch(),
|
Batch: s.db.NewBatch(),
|
||||||
@ -1874,7 +1874,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Some accounts are incomplete, leave as is for the storage and contract
|
// Some accounts are incomplete, leave as is for the storage and contract
|
||||||
// task assigners to pick up and fill.
|
// task assigners to pick up and fill
|
||||||
}
|
}
|
||||||
|
|
||||||
// processBytecodeResponse integrates an already validated bytecode response
|
// processBytecodeResponse integrates an already validated bytecode response
|
||||||
@ -2401,13 +2401,7 @@ func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, acco
|
|||||||
for i, node := range proof {
|
for i, node := range proof {
|
||||||
nodes[i] = node
|
nodes[i] = node
|
||||||
}
|
}
|
||||||
proofdb := nodes.Set()
|
cont, err := trie.VerifyRangeProof(root, req.origin[:], keys, accounts, nodes.Set())
|
||||||
|
|
||||||
var end []byte
|
|
||||||
if len(keys) > 0 {
|
|
||||||
end = keys[len(keys)-1]
|
|
||||||
}
|
|
||||||
cont, err := trie.VerifyRangeProof(root, req.origin[:], end, keys, accounts, proofdb)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Warn("Account range failed proof", "err", err)
|
logger.Warn("Account range failed proof", "err", err)
|
||||||
// Signal this request as failed, and ready for rescheduling
|
// Signal this request as failed, and ready for rescheduling
|
||||||
@ -2624,7 +2618,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
|
|||||||
// the requested data. For storage range queries that means the state being
|
// the requested data. For storage range queries that means the state being
|
||||||
// retrieved was either already pruned remotely, or the peer is not yet
|
// retrieved was either already pruned remotely, or the peer is not yet
|
||||||
// synced to our head.
|
// synced to our head.
|
||||||
if len(hashes) == 0 {
|
if len(hashes) == 0 && len(proof) == 0 {
|
||||||
logger.Debug("Peer rejected storage request")
|
logger.Debug("Peer rejected storage request")
|
||||||
s.statelessPeers[peer.ID()] = struct{}{}
|
s.statelessPeers[peer.ID()] = struct{}{}
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
@ -2636,6 +2630,13 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
|
|||||||
// Reconstruct the partial tries from the response and verify them
|
// Reconstruct the partial tries from the response and verify them
|
||||||
var cont bool
|
var cont bool
|
||||||
|
|
||||||
|
// If a proof was attached while the response is empty, it indicates that the
|
||||||
|
// requested range specified with 'origin' is empty. Construct an empty state
|
||||||
|
// response locally to finalize the range.
|
||||||
|
if len(hashes) == 0 && len(proof) > 0 {
|
||||||
|
hashes = append(hashes, []common.Hash{})
|
||||||
|
slots = append(slots, [][]byte{})
|
||||||
|
}
|
||||||
for i := 0; i < len(hashes); i++ {
|
for i := 0; i < len(hashes); i++ {
|
||||||
// Convert the keys and proofs into an internal format
|
// Convert the keys and proofs into an internal format
|
||||||
keys := make([][]byte, len(hashes[i]))
|
keys := make([][]byte, len(hashes[i]))
|
||||||
@ -2652,7 +2653,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
|
|||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
// No proof has been attached, the response must cover the entire key
|
// No proof has been attached, the response must cover the entire key
|
||||||
// space and hash to the origin root.
|
// space and hash to the origin root.
|
||||||
_, err = trie.VerifyRangeProof(req.roots[i], nil, nil, keys, slots[i], nil)
|
_, err = trie.VerifyRangeProof(req.roots[i], nil, keys, slots[i], nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.scheduleRevertStorageRequest(req) // reschedule request
|
s.scheduleRevertStorageRequest(req) // reschedule request
|
||||||
logger.Warn("Storage slots failed proof", "err", err)
|
logger.Warn("Storage slots failed proof", "err", err)
|
||||||
@ -2663,11 +2664,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
|
|||||||
// returned data is indeed part of the storage trie
|
// returned data is indeed part of the storage trie
|
||||||
proofdb := nodes.Set()
|
proofdb := nodes.Set()
|
||||||
|
|
||||||
var end []byte
|
cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], keys, slots[i], proofdb)
|
||||||
if len(keys) > 0 {
|
|
||||||
end = keys[len(keys)-1]
|
|
||||||
}
|
|
||||||
cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], end, keys, slots[i], proofdb)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.scheduleRevertStorageRequest(req) // reschedule request
|
s.scheduleRevertStorageRequest(req) // reschedule request
|
||||||
logger.Warn("Storage range failed proof", "err", err)
|
logger.Warn("Storage range failed proof", "err", err)
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
mrand "math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -34,6 +35,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/ethereum/go-ethereum/trie/testutil"
|
||||||
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
|
||||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
@ -253,7 +255,7 @@ func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, orig
|
|||||||
func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
|
func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {
|
||||||
var size uint64
|
var size uint64
|
||||||
if limit == (common.Hash{}) {
|
if limit == (common.Hash{}) {
|
||||||
limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
limit = common.MaxHash
|
||||||
}
|
}
|
||||||
for _, entry := range t.accountValues {
|
for _, entry := range t.accountValues {
|
||||||
if size > cap {
|
if size > cap {
|
||||||
@ -318,7 +320,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
|
|||||||
if len(origin) > 0 {
|
if len(origin) > 0 {
|
||||||
originHash = common.BytesToHash(origin)
|
originHash = common.BytesToHash(origin)
|
||||||
}
|
}
|
||||||
var limitHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
var limitHash = common.MaxHash
|
||||||
if len(limit) > 0 {
|
if len(limit) > 0 {
|
||||||
limitHash = common.BytesToHash(limit)
|
limitHash = common.BytesToHash(limit)
|
||||||
}
|
}
|
||||||
@ -762,7 +764,7 @@ func testSyncWithStorage(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false, false)
|
||||||
|
|
||||||
mkSource := func(name string) *testPeer {
|
mkSource := func(name string) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -772,7 +774,7 @@ func testSyncWithStorage(t *testing.T, scheme string) {
|
|||||||
source.storageValues = storageElems
|
source.storageValues = storageElems
|
||||||
return source
|
return source
|
||||||
}
|
}
|
||||||
syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
|
syncer := setupSyncer(scheme, mkSource("sourceA"))
|
||||||
done := checkStall(t, term)
|
done := checkStall(t, term)
|
||||||
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
|
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
|
||||||
t.Fatalf("sync failed: %v", err)
|
t.Fatalf("sync failed: %v", err)
|
||||||
@ -799,7 +801,7 @@ func testMultiSyncManyUseless(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
|
||||||
|
|
||||||
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
|
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -821,7 +823,7 @@ func testMultiSyncManyUseless(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("full", true, true, true),
|
mkSource("full", true, true, true),
|
||||||
mkSource("noAccounts", false, true, true),
|
mkSource("noAccounts", false, true, true),
|
||||||
mkSource("noStorage", true, false, true),
|
mkSource("noStorage", true, false, true),
|
||||||
@ -853,7 +855,7 @@ func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
|
||||||
|
|
||||||
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
|
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -875,7 +877,7 @@ func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("full", true, true, true),
|
mkSource("full", true, true, true),
|
||||||
mkSource("noAccounts", false, true, true),
|
mkSource("noAccounts", false, true, true),
|
||||||
mkSource("noStorage", true, false, true),
|
mkSource("noStorage", true, false, true),
|
||||||
@ -912,7 +914,7 @@ func testMultiSyncManyUnresponsive(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
|
||||||
|
|
||||||
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
|
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -934,7 +936,7 @@ func testMultiSyncManyUnresponsive(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("full", true, true, true),
|
mkSource("full", true, true, true),
|
||||||
mkSource("noAccounts", false, true, true),
|
mkSource("noAccounts", false, true, true),
|
||||||
mkSource("noStorage", true, false, true),
|
mkSource("noStorage", true, false, true),
|
||||||
@ -1215,7 +1217,7 @@ func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true, false)
|
||||||
|
|
||||||
mkSource := func(name string) *testPeer {
|
mkSource := func(name string) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -1226,7 +1228,7 @@ func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
|
|||||||
return source
|
return source
|
||||||
}
|
}
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("peer-a"),
|
mkSource("peer-a"),
|
||||||
mkSource("peer-b"),
|
mkSource("peer-b"),
|
||||||
)
|
)
|
||||||
@ -1257,7 +1259,7 @@ func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false, false)
|
||||||
|
|
||||||
mkSource := func(name string, slow bool) *testPeer {
|
mkSource := func(name string, slow bool) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -1273,7 +1275,7 @@ func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("nice-a", false),
|
mkSource("nice-a", false),
|
||||||
mkSource("slow", true),
|
mkSource("slow", true),
|
||||||
)
|
)
|
||||||
@ -1304,7 +1306,7 @@ func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
|
||||||
|
|
||||||
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
|
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -1317,7 +1319,7 @@ func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("nice-a", defaultStorageRequestHandler),
|
mkSource("nice-a", defaultStorageRequestHandler),
|
||||||
mkSource("nice-b", defaultStorageRequestHandler),
|
mkSource("nice-b", defaultStorageRequestHandler),
|
||||||
mkSource("nice-c", defaultStorageRequestHandler),
|
mkSource("nice-c", defaultStorageRequestHandler),
|
||||||
@ -1348,7 +1350,7 @@ func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
|
sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false, false)
|
||||||
|
|
||||||
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
|
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
|
||||||
source := newTestPeer(name, t, term)
|
source := newTestPeer(name, t, term)
|
||||||
@ -1360,7 +1362,7 @@ func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
|
|||||||
return source
|
return source
|
||||||
}
|
}
|
||||||
syncer := setupSyncer(
|
syncer := setupSyncer(
|
||||||
nodeScheme,
|
scheme,
|
||||||
mkSource("nice-a", defaultStorageRequestHandler),
|
mkSource("nice-a", defaultStorageRequestHandler),
|
||||||
mkSource("nice-b", defaultStorageRequestHandler),
|
mkSource("nice-b", defaultStorageRequestHandler),
|
||||||
mkSource("nice-c", defaultStorageRequestHandler),
|
mkSource("nice-c", defaultStorageRequestHandler),
|
||||||
@ -1413,6 +1415,45 @@ func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) {
|
|||||||
verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
|
verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestSyncWithUnevenStorage tests sync where the storage trie is not even
|
||||||
|
// and with a few empty ranges.
|
||||||
|
func TestSyncWithUnevenStorage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testSyncWithUnevenStorage(t, rawdb.HashScheme)
|
||||||
|
testSyncWithUnevenStorage(t, rawdb.PathScheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSyncWithUnevenStorage(t *testing.T, scheme string) {
|
||||||
|
var (
|
||||||
|
once sync.Once
|
||||||
|
cancel = make(chan struct{})
|
||||||
|
term = func() {
|
||||||
|
once.Do(func() {
|
||||||
|
close(cancel)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
)
|
||||||
|
accountTrie, accounts, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 256, false, false, true)
|
||||||
|
|
||||||
|
mkSource := func(name string) *testPeer {
|
||||||
|
source := newTestPeer(name, t, term)
|
||||||
|
source.accountTrie = accountTrie.Copy()
|
||||||
|
source.accountValues = accounts
|
||||||
|
source.setStorageTries(storageTries)
|
||||||
|
source.storageValues = storageElems
|
||||||
|
source.storageRequestHandler = func(t *testPeer, reqId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {
|
||||||
|
return defaultStorageRequestHandler(t, reqId, root, accounts, origin, limit, 128) // retrieve storage in large mode
|
||||||
|
}
|
||||||
|
return source
|
||||||
|
}
|
||||||
|
syncer := setupSyncer(scheme, mkSource("source"))
|
||||||
|
if err := syncer.Sync(accountTrie.Hash(), cancel); err != nil {
|
||||||
|
t.Fatalf("sync failed: %v", err)
|
||||||
|
}
|
||||||
|
verifyTrie(scheme, syncer.db, accountTrie.Hash(), t)
|
||||||
|
}
|
||||||
|
|
||||||
type kv struct {
|
type kv struct {
|
||||||
k, v []byte
|
k, v []byte
|
||||||
}
|
}
|
||||||
@ -1511,7 +1552,7 @@ func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
|
|||||||
for i := 0; i < accountConcurrency; i++ {
|
for i := 0; i < accountConcurrency; i++ {
|
||||||
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
|
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
|
||||||
if i == accountConcurrency-1 {
|
if i == accountConcurrency-1 {
|
||||||
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
last = common.MaxHash
|
||||||
}
|
}
|
||||||
boundaries = append(boundaries, last)
|
boundaries = append(boundaries, last)
|
||||||
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
|
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
|
||||||
@ -1608,7 +1649,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots
|
|||||||
}
|
}
|
||||||
|
|
||||||
// makeAccountTrieWithStorage spits out a trie, along with the leafs
|
// makeAccountTrieWithStorage spits out a trie, along with the leafs
|
||||||
func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
|
func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool, uneven bool) (*trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
|
||||||
var (
|
var (
|
||||||
db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
|
db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
|
||||||
accTrie = trie.NewEmpty(db)
|
accTrie = trie.NewEmpty(db)
|
||||||
@ -1633,6 +1674,8 @@ func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, bounda
|
|||||||
)
|
)
|
||||||
if boundary {
|
if boundary {
|
||||||
stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
|
stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
|
||||||
|
} else if uneven {
|
||||||
|
stRoot, stNodes, stEntries = makeUnevenStorageTrie(common.BytesToHash(key), slots, db)
|
||||||
} else {
|
} else {
|
||||||
stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
|
stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
|
||||||
}
|
}
|
||||||
@ -1675,7 +1718,7 @@ func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, bounda
|
|||||||
}
|
}
|
||||||
storageTries[common.BytesToHash(key)] = trie
|
storageTries[common.BytesToHash(key)] = trie
|
||||||
}
|
}
|
||||||
return db.Scheme(), accTrie, entries, storageTries, storageEntries
|
return accTrie, entries, storageTries, storageEntries
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
|
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
|
||||||
@ -1721,7 +1764,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
|
|||||||
for i := 0; i < accountConcurrency; i++ {
|
for i := 0; i < accountConcurrency; i++ {
|
||||||
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
|
last := common.BigToHash(new(big.Int).Add(next.Big(), step))
|
||||||
if i == accountConcurrency-1 {
|
if i == accountConcurrency-1 {
|
||||||
last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
last = common.MaxHash
|
||||||
}
|
}
|
||||||
boundaries = append(boundaries, last)
|
boundaries = append(boundaries, last)
|
||||||
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
|
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
|
||||||
@ -1752,6 +1795,38 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
|
|||||||
return root, nodes, entries
|
return root, nodes, entries
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// makeUnevenStorageTrie constructs a storage tries will states distributed in
|
||||||
|
// different range unevenly.
|
||||||
|
func makeUnevenStorageTrie(owner common.Hash, slots int, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) {
|
||||||
|
var (
|
||||||
|
entries []*kv
|
||||||
|
tr, _ = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
|
||||||
|
chosen = make(map[byte]struct{})
|
||||||
|
)
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
var n int
|
||||||
|
for {
|
||||||
|
n = mrand.Intn(15) // the last range is set empty deliberately
|
||||||
|
if _, ok := chosen[byte(n)]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
chosen[byte(n)] = struct{}{}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
for j := 0; j < slots/3; j++ {
|
||||||
|
key := append([]byte{byte(n)}, testutil.RandBytes(31)...)
|
||||||
|
val, _ := rlp.EncodeToBytes(testutil.RandBytes(32))
|
||||||
|
|
||||||
|
elem := &kv{key, val}
|
||||||
|
tr.MustUpdate(elem.k, elem.v)
|
||||||
|
entries = append(entries, elem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
slices.SortFunc(entries, (*kv).cmp)
|
||||||
|
root, nodes, _ := tr.Commit(false)
|
||||||
|
return root, nodes, entries
|
||||||
|
}
|
||||||
|
|
||||||
func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
|
func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
|
triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
|
||||||
|
@ -41,6 +41,7 @@ import (
|
|||||||
var (
|
var (
|
||||||
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
|
testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
|
||||||
|
testContract = common.HexToAddress("0xbeef")
|
||||||
testSlot = common.HexToHash("0xdeadbeef")
|
testSlot = common.HexToHash("0xdeadbeef")
|
||||||
testValue = crypto.Keccak256Hash(testSlot[:])
|
testValue = crypto.Keccak256Hash(testSlot[:])
|
||||||
testBalance = big.NewInt(2e15)
|
testBalance = big.NewInt(2e15)
|
||||||
@ -79,7 +80,8 @@ func newTestBackend(t *testing.T) (*node.Node, []*types.Block) {
|
|||||||
func generateTestChain() (*core.Genesis, []*types.Block) {
|
func generateTestChain() (*core.Genesis, []*types.Block) {
|
||||||
genesis := &core.Genesis{
|
genesis := &core.Genesis{
|
||||||
Config: params.AllEthashProtocolChanges,
|
Config: params.AllEthashProtocolChanges,
|
||||||
Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance, Storage: map[common.Hash]common.Hash{testSlot: testValue}}},
|
Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance, Storage: map[common.Hash]common.Hash{testSlot: testValue}},
|
||||||
|
testContract: {Nonce: 1, Code: []byte{0x13, 0x37}}},
|
||||||
ExtraData: []byte("test genesis"),
|
ExtraData: []byte("test genesis"),
|
||||||
Timestamp: 9000,
|
Timestamp: 9000,
|
||||||
}
|
}
|
||||||
@ -103,8 +105,11 @@ func TestGethClient(t *testing.T) {
|
|||||||
test func(t *testing.T)
|
test func(t *testing.T)
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"TestGetProof",
|
"TestGetProof1",
|
||||||
func(t *testing.T) { testGetProof(t, client) },
|
func(t *testing.T) { testGetProof(t, client, testAddr) },
|
||||||
|
}, {
|
||||||
|
"TestGetProof2",
|
||||||
|
func(t *testing.T) { testGetProof(t, client, testContract) },
|
||||||
}, {
|
}, {
|
||||||
"TestGetProofCanonicalizeKeys",
|
"TestGetProofCanonicalizeKeys",
|
||||||
func(t *testing.T) { testGetProofCanonicalizeKeys(t, client) },
|
func(t *testing.T) { testGetProofCanonicalizeKeys(t, client) },
|
||||||
@ -201,39 +206,42 @@ func testAccessList(t *testing.T, client *rpc.Client) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testGetProof(t *testing.T, client *rpc.Client) {
|
func testGetProof(t *testing.T, client *rpc.Client, addr common.Address) {
|
||||||
ec := New(client)
|
ec := New(client)
|
||||||
ethcl := ethclient.NewClient(client)
|
ethcl := ethclient.NewClient(client)
|
||||||
result, err := ec.GetProof(context.Background(), testAddr, []string{testSlot.String()}, nil)
|
result, err := ec.GetProof(context.Background(), addr, []string{testSlot.String()}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(result.Address[:], testAddr[:]) {
|
if result.Address != addr {
|
||||||
t.Fatalf("unexpected address, want: %v got: %v", testAddr, result.Address)
|
t.Fatalf("unexpected address, have: %v want: %v", result.Address, addr)
|
||||||
}
|
}
|
||||||
// test nonce
|
// test nonce
|
||||||
nonce, _ := ethcl.NonceAt(context.Background(), result.Address, nil)
|
if nonce, _ := ethcl.NonceAt(context.Background(), addr, nil); result.Nonce != nonce {
|
||||||
if result.Nonce != nonce {
|
|
||||||
t.Fatalf("invalid nonce, want: %v got: %v", nonce, result.Nonce)
|
t.Fatalf("invalid nonce, want: %v got: %v", nonce, result.Nonce)
|
||||||
}
|
}
|
||||||
// test balance
|
// test balance
|
||||||
balance, _ := ethcl.BalanceAt(context.Background(), result.Address, nil)
|
if balance, _ := ethcl.BalanceAt(context.Background(), addr, nil); result.Balance.Cmp(balance) != 0 {
|
||||||
if result.Balance.Cmp(balance) != 0 {
|
|
||||||
t.Fatalf("invalid balance, want: %v got: %v", balance, result.Balance)
|
t.Fatalf("invalid balance, want: %v got: %v", balance, result.Balance)
|
||||||
}
|
}
|
||||||
|
|
||||||
// test storage
|
// test storage
|
||||||
if len(result.StorageProof) != 1 {
|
if len(result.StorageProof) != 1 {
|
||||||
t.Fatalf("invalid storage proof, want 1 proof, got %v proof(s)", len(result.StorageProof))
|
t.Fatalf("invalid storage proof, want 1 proof, got %v proof(s)", len(result.StorageProof))
|
||||||
}
|
}
|
||||||
proof := result.StorageProof[0]
|
for _, proof := range result.StorageProof {
|
||||||
slotValue, _ := ethcl.StorageAt(context.Background(), testAddr, testSlot, nil)
|
|
||||||
if !bytes.Equal(slotValue, proof.Value.Bytes()) {
|
|
||||||
t.Fatalf("invalid storage proof value, want: %v, got: %v", slotValue, proof.Value.Bytes())
|
|
||||||
}
|
|
||||||
if proof.Key != testSlot.String() {
|
if proof.Key != testSlot.String() {
|
||||||
t.Fatalf("invalid storage proof key, want: %q, got: %q", testSlot.String(), proof.Key)
|
t.Fatalf("invalid storage proof key, want: %q, got: %q", testSlot.String(), proof.Key)
|
||||||
}
|
}
|
||||||
|
slotValue, _ := ethcl.StorageAt(context.Background(), addr, common.HexToHash(proof.Key), nil)
|
||||||
|
if have, want := common.BigToHash(proof.Value), common.BytesToHash(slotValue); have != want {
|
||||||
|
t.Fatalf("addr %x, invalid storage proof value: have: %v, want: %v", addr, have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// test code
|
||||||
|
code, _ := ethcl.CodeAt(context.Background(), addr, nil)
|
||||||
|
if have, want := result.CodeHash, crypto.Keccak256Hash(code); have != want {
|
||||||
|
t.Fatalf("codehash wrong, have %v want %v ", have, want)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testGetProofCanonicalizeKeys(t *testing.T, client *rpc.Client) {
|
func testGetProofCanonicalizeKeys(t *testing.T, client *rpc.Client) {
|
||||||
|
@ -14,8 +14,6 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//go:build (arm64 || amd64) && !openbsd
|
|
||||||
|
|
||||||
// Package pebble implements the key-value database layer based on pebble.
|
// Package pebble implements the key-value database layer based on pebble.
|
||||||
package pebble
|
package pebble
|
||||||
|
|
||||||
@ -148,8 +146,15 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e
|
|||||||
|
|
||||||
// The max memtable size is limited by the uint32 offsets stored in
|
// The max memtable size is limited by the uint32 offsets stored in
|
||||||
// internal/arenaskl.node, DeferredBatchOp, and flushableBatchEntry.
|
// internal/arenaskl.node, DeferredBatchOp, and flushableBatchEntry.
|
||||||
// Taken from https://github.com/cockroachdb/pebble/blob/master/open.go#L38
|
//
|
||||||
maxMemTableSize := 4<<30 - 1 // Capped by 4 GB
|
// - MaxUint32 on 64-bit platforms;
|
||||||
|
// - MaxInt on 32-bit platforms.
|
||||||
|
//
|
||||||
|
// It is used when slices are limited to Uint32 on 64-bit platforms (the
|
||||||
|
// length limit for slices is naturally MaxInt on 32-bit platforms).
|
||||||
|
//
|
||||||
|
// Taken from https://github.com/cockroachdb/pebble/blob/master/internal/constants/constants.go
|
||||||
|
maxMemTableSize := (1<<31)<<(^uint(0)>>63) - 1
|
||||||
|
|
||||||
// Two memory tables is configured which is identical to leveldb,
|
// Two memory tables is configured which is identical to leveldb,
|
||||||
// including a frozen memory table and another live one.
|
// including a frozen memory table and another live one.
|
||||||
|
@ -14,8 +14,6 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//go:build (arm64 || amd64) && !openbsd
|
|
||||||
|
|
||||||
package pebble
|
package pebble
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AzureBlobstoreConfig is an authentication and configuration struct containing
|
// AzureBlobstoreConfig is an authentication and configuration struct containing
|
||||||
@ -48,8 +49,8 @@ func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
u := fmt.Sprintf("https://%s.blob.core.windows.net/%s", config.Account, config.Container)
|
a := fmt.Sprintf("https://%s.blob.core.windows.net/", config.Account)
|
||||||
container, err := azblob.NewContainerClientWithSharedKey(u, credential, nil)
|
client, err := azblob.NewClientWithSharedKeyCredential(a, credential, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -60,38 +61,38 @@ func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig)
|
|||||||
}
|
}
|
||||||
defer in.Close()
|
defer in.Close()
|
||||||
|
|
||||||
blockblob := container.NewBlockBlobClient(name)
|
_, err = client.UploadFile(context.Background(), config.Container, name, in, nil)
|
||||||
_, err = blockblob.Upload(context.Background(), in, nil)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// AzureBlobstoreList lists all the files contained within an azure blobstore.
|
// AzureBlobstoreList lists all the files contained within an azure blobstore.
|
||||||
func AzureBlobstoreList(config AzureBlobstoreConfig) ([]*azblob.BlobItemInternal, error) {
|
func AzureBlobstoreList(config AzureBlobstoreConfig) ([]*container.BlobItem, error) {
|
||||||
// Create an authenticated client against the Azure cloud
|
// Create an authenticated client against the Azure cloud
|
||||||
credential, err := azblob.NewSharedKeyCredential(config.Account, config.Token)
|
credential, err := azblob.NewSharedKeyCredential(config.Account, config.Token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
u := fmt.Sprintf("https://%s.blob.core.windows.net/%s", config.Account, config.Container)
|
a := fmt.Sprintf("https://%s.blob.core.windows.net/", config.Account)
|
||||||
container, err := azblob.NewContainerClientWithSharedKey(u, credential, nil)
|
client, err := azblob.NewClientWithSharedKeyCredential(a, credential, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var maxResults int32 = 5000
|
pager := client.NewListBlobsFlatPager(config.Container, nil)
|
||||||
pager := container.ListBlobsFlat(&azblob.ContainerListBlobFlatSegmentOptions{
|
|
||||||
Maxresults: &maxResults,
|
var blobs []*container.BlobItem
|
||||||
})
|
for pager.More() {
|
||||||
var allBlobs []*azblob.BlobItemInternal
|
page, err := pager.NextPage(context.TODO())
|
||||||
for pager.NextPage(context.Background()) {
|
if err != nil {
|
||||||
res := pager.PageResponse()
|
return nil, err
|
||||||
allBlobs = append(allBlobs, res.ContainerListBlobFlatSegmentResult.Segment.BlobItems...)
|
|
||||||
}
|
}
|
||||||
return allBlobs, pager.Err()
|
blobs = append(blobs, page.Segment.BlobItems...)
|
||||||
|
}
|
||||||
|
return blobs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AzureBlobstoreDelete iterates over a list of files to delete and removes them
|
// AzureBlobstoreDelete iterates over a list of files to delete and removes them
|
||||||
// from the blobstore.
|
// from the blobstore.
|
||||||
func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []*azblob.BlobItemInternal) error {
|
func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []*container.BlobItem) error {
|
||||||
if *DryRunFlag {
|
if *DryRunFlag {
|
||||||
for _, blob := range blobs {
|
for _, blob := range blobs {
|
||||||
fmt.Printf("would delete %s (%s) from %s/%s\n", *blob.Name, blob.Properties.LastModified, config.Account, config.Container)
|
fmt.Printf("would delete %s (%s) from %s/%s\n", *blob.Name, blob.Properties.LastModified, config.Account, config.Container)
|
||||||
@ -103,15 +104,14 @@ func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []*azblob.BlobItemI
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
u := fmt.Sprintf("https://%s.blob.core.windows.net/%s", config.Account, config.Container)
|
a := fmt.Sprintf("https://%s.blob.core.windows.net/", config.Account)
|
||||||
container, err := azblob.NewContainerClientWithSharedKey(u, credential, nil)
|
client, err := azblob.NewClientWithSharedKeyCredential(a, credential, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Iterate over the blobs and delete them
|
// Iterate over the blobs and delete them
|
||||||
for _, blob := range blobs {
|
for _, blob := range blobs {
|
||||||
blockblob := container.NewBlockBlobClient(*blob.Name)
|
if _, err := client.DeleteBlob(context.Background(), config.Container, *blob.Name, nil); err != nil {
|
||||||
if _, err := blockblob.Delete(context.Background(), &azblob.DeleteBlobOptions{}); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fmt.Printf("deleted %s (%s)\n", *blob.Name, blob.Properties.LastModified)
|
fmt.Printf("deleted %s (%s)\n", *blob.Name, blob.Properties.LastModified)
|
||||||
|
@ -84,7 +84,11 @@ func (g *GoToolchain) goTool(command string, args ...string) *exec.Cmd {
|
|||||||
|
|
||||||
// DownloadGo downloads the Go binary distribution and unpacks it into a temporary
|
// DownloadGo downloads the Go binary distribution and unpacks it into a temporary
|
||||||
// directory. It returns the GOROOT of the unpacked toolchain.
|
// directory. It returns the GOROOT of the unpacked toolchain.
|
||||||
func DownloadGo(csdb *ChecksumDB, version string) string {
|
func DownloadGo(csdb *ChecksumDB) string {
|
||||||
|
version, err := Version(csdb, "golang")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
// Shortcut: if the Go version that runs this script matches the
|
// Shortcut: if the Go version that runs this script matches the
|
||||||
// requested version exactly, there is no need to download anything.
|
// requested version exactly, there is no need to download anything.
|
||||||
activeGo := strings.TrimPrefix(runtime.Version(), "go")
|
activeGo := strings.TrimPrefix(runtime.Version(), "go")
|
||||||
@ -126,3 +130,52 @@ func DownloadGo(csdb *ChecksumDB, version string) string {
|
|||||||
}
|
}
|
||||||
return goroot
|
return goroot
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Version returns the versions defined in the checksumdb.
|
||||||
|
func Version(csdb *ChecksumDB, version string) (string, error) {
|
||||||
|
for _, l := range csdb.allChecksums {
|
||||||
|
if !strings.HasPrefix(l, "# version:") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v := strings.Split(l, ":")[1]
|
||||||
|
parts := strings.Split(v, " ")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
log.Print("Erroneous version-string", "v", l)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if parts[0] == version {
|
||||||
|
log.Printf("Found version %q", parts[1])
|
||||||
|
return parts[1], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("no version found for '%v'", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadAndVerifyChecksums downloads all files and checks that they match
|
||||||
|
// the checksum given in checksums.txt.
|
||||||
|
// This task can be used to sanity-check new checksums.
|
||||||
|
func DownloadAndVerifyChecksums(csdb *ChecksumDB) {
|
||||||
|
var (
|
||||||
|
base = ""
|
||||||
|
ucache = os.TempDir()
|
||||||
|
)
|
||||||
|
for _, l := range csdb.allChecksums {
|
||||||
|
if strings.HasPrefix(l, "# https://") {
|
||||||
|
base = l[2:]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(l, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hashFile := strings.Split(l, " ")
|
||||||
|
if len(hashFile) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
file := hashFile[1]
|
||||||
|
url := base + file
|
||||||
|
dst := filepath.Join(ucache, file)
|
||||||
|
if err := csdb.DownloadFile(url, dst); err != nil {
|
||||||
|
log.Print(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -675,10 +675,6 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st
|
|||||||
keys = make([]common.Hash, len(storageKeys))
|
keys = make([]common.Hash, len(storageKeys))
|
||||||
keyLengths = make([]int, len(storageKeys))
|
keyLengths = make([]int, len(storageKeys))
|
||||||
storageProof = make([]StorageResult, len(storageKeys))
|
storageProof = make([]StorageResult, len(storageKeys))
|
||||||
|
|
||||||
storageTrie state.Trie
|
|
||||||
storageHash = types.EmptyRootHash
|
|
||||||
codeHash = types.EmptyCodeHash
|
|
||||||
)
|
)
|
||||||
// Deserialize all keys. This prevents state access on invalid input.
|
// Deserialize all keys. This prevents state access on invalid input.
|
||||||
for i, hexKey := range storageKeys {
|
for i, hexKey := range storageKeys {
|
||||||
@ -688,23 +684,22 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
state, header, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
|
statedb, header, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
|
||||||
if state == nil || err != nil {
|
if statedb == nil || err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if storageRoot := state.GetStorageRoot(address); storageRoot != types.EmptyRootHash && storageRoot != (common.Hash{}) {
|
codeHash := statedb.GetCodeHash(address)
|
||||||
|
storageRoot := statedb.GetStorageRoot(address)
|
||||||
|
|
||||||
|
if len(keys) > 0 {
|
||||||
|
var storageTrie state.Trie
|
||||||
|
if storageRoot != types.EmptyRootHash && storageRoot != (common.Hash{}) {
|
||||||
id := trie.StorageTrieID(header.Root, crypto.Keccak256Hash(address.Bytes()), storageRoot)
|
id := trie.StorageTrieID(header.Root, crypto.Keccak256Hash(address.Bytes()), storageRoot)
|
||||||
tr, err := trie.NewStateTrie(id, state.Database().TrieDB())
|
st, err := trie.NewStateTrie(id, statedb.Database().TrieDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
storageTrie = tr
|
storageTrie = st
|
||||||
}
|
|
||||||
// If we have a storageTrie, the account exists and we must update
|
|
||||||
// the storage root hash and the code hash.
|
|
||||||
if storageTrie != nil {
|
|
||||||
storageHash = storageTrie.Hash()
|
|
||||||
codeHash = state.GetCodeHash(address)
|
|
||||||
}
|
}
|
||||||
// Create the proofs for the storageKeys.
|
// Create the proofs for the storageKeys.
|
||||||
for i, key := range keys {
|
for i, key := range keys {
|
||||||
@ -718,7 +713,6 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st
|
|||||||
} else {
|
} else {
|
||||||
outputKey = hexutil.Encode(key[:])
|
outputKey = hexutil.Encode(key[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
if storageTrie == nil {
|
if storageTrie == nil {
|
||||||
storageProof[i] = StorageResult{outputKey, &hexutil.Big{}, []string{}}
|
storageProof[i] = StorageResult{outputKey, &hexutil.Big{}, []string{}}
|
||||||
continue
|
continue
|
||||||
@ -727,12 +721,12 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st
|
|||||||
if err := storageTrie.Prove(crypto.Keccak256(key.Bytes()), &proof); err != nil {
|
if err := storageTrie.Prove(crypto.Keccak256(key.Bytes()), &proof); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
value := (*hexutil.Big)(state.GetState(address, key).Big())
|
value := (*hexutil.Big)(statedb.GetState(address, key).Big())
|
||||||
storageProof[i] = StorageResult{outputKey, value, proof}
|
storageProof[i] = StorageResult{outputKey, value, proof}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// Create the accountProof.
|
// Create the accountProof.
|
||||||
tr, err := trie.NewStateTrie(trie.StateTrieID(header.Root), state.Database().TrieDB())
|
tr, err := trie.NewStateTrie(trie.StateTrieID(header.Root), statedb.Database().TrieDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -743,12 +737,12 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st
|
|||||||
return &AccountResult{
|
return &AccountResult{
|
||||||
Address: address,
|
Address: address,
|
||||||
AccountProof: accountProof,
|
AccountProof: accountProof,
|
||||||
Balance: (*hexutil.Big)(state.GetBalance(address)),
|
Balance: (*hexutil.Big)(statedb.GetBalance(address)),
|
||||||
CodeHash: codeHash,
|
CodeHash: codeHash,
|
||||||
Nonce: hexutil.Uint64(state.GetNonce(address)),
|
Nonce: hexutil.Uint64(statedb.GetNonce(address)),
|
||||||
StorageHash: storageHash,
|
StorageHash: storageRoot,
|
||||||
StorageProof: storageProof,
|
StorageProof: storageProof,
|
||||||
}, state.Error()
|
}, statedb.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
// decodeHash parses a hex-encoded 32-byte hash. The input may optionally
|
// decodeHash parses a hex-encoded 32-byte hash. The input may optionally
|
||||||
|
@ -33,7 +33,7 @@ import (
|
|||||||
|
|
||||||
// BuildPayloadArgs contains the provided parameters for building payload.
|
// BuildPayloadArgs contains the provided parameters for building payload.
|
||||||
// Check engine-api specification for more details.
|
// Check engine-api specification for more details.
|
||||||
// https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#payloadattributesv1
|
// https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#payloadattributesv3
|
||||||
type BuildPayloadArgs struct {
|
type BuildPayloadArgs struct {
|
||||||
Parent common.Hash // The parent block to build payload on top
|
Parent common.Hash // The parent block to build payload on top
|
||||||
Timestamp uint64 // The provided timestamp of generated payload
|
Timestamp uint64 // The provided timestamp of generated payload
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
VersionMajor = 1 // Major version component of the current release
|
VersionMajor = 1 // Major version component of the current release
|
||||||
VersionMinor = 13 // Minor version component of the current release
|
VersionMinor = 13 // Minor version component of the current release
|
||||||
VersionPatch = 3 // Patch version component of the current release
|
VersionPatch = 4 // Patch version component of the current release
|
||||||
VersionMeta = "stable" // Version metadata to append to the version string
|
VersionMeta = "stable" // Version metadata to append to the version string
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -63,10 +63,6 @@ func TestExecutionSpec(t *testing.T) {
|
|||||||
}
|
}
|
||||||
bt := new(testMatcher)
|
bt := new(testMatcher)
|
||||||
|
|
||||||
// cancun tests are not complete yet
|
|
||||||
bt.skipLoad(`^cancun/`)
|
|
||||||
bt.skipLoad(`-fork=Cancun`)
|
|
||||||
|
|
||||||
bt.walk(t, executionSpecDir, func(t *testing.T, name string, test *BlockTest) {
|
bt.walk(t, executionSpecDir, func(t *testing.T, name string, test *BlockTest) {
|
||||||
execBlockTest(t, bt, test)
|
execBlockTest(t, bt, test)
|
||||||
})
|
})
|
||||||
@ -75,14 +71,18 @@ func TestExecutionSpec(t *testing.T) {
|
|||||||
func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) {
|
func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) {
|
||||||
if err := bt.checkFailure(t, test.Run(false, rawdb.HashScheme, nil)); err != nil {
|
if err := bt.checkFailure(t, test.Run(false, rawdb.HashScheme, nil)); err != nil {
|
||||||
t.Errorf("test in hash mode without snapshotter failed: %v", err)
|
t.Errorf("test in hash mode without snapshotter failed: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if err := bt.checkFailure(t, test.Run(true, rawdb.HashScheme, nil)); err != nil {
|
if err := bt.checkFailure(t, test.Run(true, rawdb.HashScheme, nil)); err != nil {
|
||||||
t.Errorf("test in hash mode with snapshotter failed: %v", err)
|
t.Errorf("test in hash mode with snapshotter failed: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if err := bt.checkFailure(t, test.Run(false, rawdb.PathScheme, nil)); err != nil {
|
if err := bt.checkFailure(t, test.Run(false, rawdb.PathScheme, nil)); err != nil {
|
||||||
t.Errorf("test in path mode without snapshotter failed: %v", err)
|
t.Errorf("test in path mode without snapshotter failed: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if err := bt.checkFailure(t, test.Run(true, rawdb.PathScheme, nil)); err != nil {
|
if err := bt.checkFailure(t, test.Run(true, rawdb.PathScheme, nil)); err != nil {
|
||||||
t.Errorf("test in path mode with snapshotter failed: %v", err)
|
t.Errorf("test in path mode with snapshotter failed: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -128,7 +128,7 @@ func (f *fuzzer) fuzz() int {
|
|||||||
if len(keys) == 0 {
|
if len(keys) == 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
var first, last = keys[0], keys[len(keys)-1]
|
var first = keys[0]
|
||||||
testcase %= 6
|
testcase %= 6
|
||||||
switch testcase {
|
switch testcase {
|
||||||
case 0:
|
case 0:
|
||||||
@ -165,7 +165,7 @@ func (f *fuzzer) fuzz() int {
|
|||||||
}
|
}
|
||||||
ok = 1
|
ok = 1
|
||||||
//nodes, subtrie
|
//nodes, subtrie
|
||||||
hasMore, err := trie.VerifyRangeProof(tr.Hash(), first, last, keys, vals, proof)
|
hasMore, err := trie.VerifyRangeProof(tr.Hash(), first, keys, vals, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if hasMore {
|
if hasMore {
|
||||||
panic("err != nil && hasMore == true")
|
panic("err != nil && hasMore == true")
|
||||||
|
@ -481,7 +481,7 @@ func hasRightElement(node node, key []byte) bool {
|
|||||||
// Note: This method does not verify that the proof is of minimal form. If the input
|
// Note: This method does not verify that the proof is of minimal form. If the input
|
||||||
// proofs are 'bloated' with neighbour leaves or random data, aside from the 'useful'
|
// proofs are 'bloated' with neighbour leaves or random data, aside from the 'useful'
|
||||||
// data, then the proof will still be accepted.
|
// data, then the proof will still be accepted.
|
||||||
func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (bool, error) {
|
func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (bool, error) {
|
||||||
if len(keys) != len(values) {
|
if len(keys) != len(values) {
|
||||||
return false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values))
|
return false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values))
|
||||||
}
|
}
|
||||||
@ -520,6 +520,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
|
|||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
var lastKey = keys[len(keys)-1]
|
||||||
// Special case, there is only one element and two edge keys are same.
|
// Special case, there is only one element and two edge keys are same.
|
||||||
// In this case, we can't construct two edge paths. So handle it here.
|
// In this case, we can't construct two edge paths. So handle it here.
|
||||||
if len(keys) == 1 && bytes.Equal(firstKey, lastKey) {
|
if len(keys) == 1 && bytes.Equal(firstKey, lastKey) {
|
||||||
|
@ -191,7 +191,7 @@ func TestRangeProof(t *testing.T) {
|
|||||||
keys = append(keys, entries[i].k)
|
keys = append(keys, entries[i].k)
|
||||||
vals = append(vals, entries[i].v)
|
vals = append(vals, entries[i].v)
|
||||||
}
|
}
|
||||||
_, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof)
|
_, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
|
t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
|
||||||
}
|
}
|
||||||
@ -221,19 +221,10 @@ func TestRangeProofWithNonExistentProof(t *testing.T) {
|
|||||||
if bytes.Compare(first, entries[start].k) > 0 {
|
if bytes.Compare(first, entries[start].k) > 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Short circuit if the increased key is same with the next key
|
|
||||||
last := increaseKey(common.CopyBytes(entries[end-1].k))
|
|
||||||
if end != len(entries) && bytes.Equal(last, entries[end].k) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Short circuit if the increased key is overflow
|
|
||||||
if bytes.Compare(last, entries[end-1].k) < 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := trie.Prove(first, proof); err != nil {
|
if err := trie.Prove(first, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
t.Fatalf("Failed to prove the first node %v", err)
|
||||||
}
|
}
|
||||||
if err := trie.Prove(last, proof); err != nil {
|
if err := trie.Prove(entries[end-1].k, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
t.Fatalf("Failed to prove the last node %v", err)
|
||||||
}
|
}
|
||||||
var keys [][]byte
|
var keys [][]byte
|
||||||
@ -242,36 +233,15 @@ func TestRangeProofWithNonExistentProof(t *testing.T) {
|
|||||||
keys = append(keys, entries[i].k)
|
keys = append(keys, entries[i].k)
|
||||||
vals = append(vals, entries[i].v)
|
vals = append(vals, entries[i].v)
|
||||||
}
|
}
|
||||||
_, err := VerifyRangeProof(trie.Hash(), first, last, keys, vals, proof)
|
_, err := VerifyRangeProof(trie.Hash(), first, keys, vals, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
|
t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Special case, two edge proofs for two edge key.
|
|
||||||
proof := memorydb.New()
|
|
||||||
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
|
|
||||||
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes()
|
|
||||||
if err := trie.Prove(first, proof); err != nil {
|
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
|
||||||
}
|
|
||||||
if err := trie.Prove(last, proof); err != nil {
|
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
|
||||||
}
|
|
||||||
var k [][]byte
|
|
||||||
var v [][]byte
|
|
||||||
for i := 0; i < len(entries); i++ {
|
|
||||||
k = append(k, entries[i].k)
|
|
||||||
v = append(v, entries[i].v)
|
|
||||||
}
|
|
||||||
_, err := VerifyRangeProof(trie.Hash(), first, last, k, v, proof)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Failed to verify whole rang with non-existent edges")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRangeProofWithInvalidNonExistentProof tests such scenarios:
|
// TestRangeProofWithInvalidNonExistentProof tests such scenarios:
|
||||||
// - There exists a gap between the first element and the left edge proof
|
// - There exists a gap between the first element and the left edge proof
|
||||||
// - There exists a gap between the last element and the right edge proof
|
|
||||||
func TestRangeProofWithInvalidNonExistentProof(t *testing.T) {
|
func TestRangeProofWithInvalidNonExistentProof(t *testing.T) {
|
||||||
trie, vals := randomTrie(4096)
|
trie, vals := randomTrie(4096)
|
||||||
var entries []*kv
|
var entries []*kv
|
||||||
@ -298,29 +268,7 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) {
|
|||||||
k = append(k, entries[i].k)
|
k = append(k, entries[i].k)
|
||||||
v = append(v, entries[i].v)
|
v = append(v, entries[i].v)
|
||||||
}
|
}
|
||||||
_, err := VerifyRangeProof(trie.Hash(), first, k[len(k)-1], k, v, proof)
|
_, err := VerifyRangeProof(trie.Hash(), first, k, v, proof)
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("Expected to detect the error, got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Case 2
|
|
||||||
start, end = 100, 200
|
|
||||||
last := increaseKey(common.CopyBytes(entries[end-1].k))
|
|
||||||
proof = memorydb.New()
|
|
||||||
if err := trie.Prove(entries[start].k, proof); err != nil {
|
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
|
||||||
}
|
|
||||||
if err := trie.Prove(last, proof); err != nil {
|
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
|
||||||
}
|
|
||||||
end = 195 // Capped slice
|
|
||||||
k = make([][]byte, 0)
|
|
||||||
v = make([][]byte, 0)
|
|
||||||
for i := start; i < end; i++ {
|
|
||||||
k = append(k, entries[i].k)
|
|
||||||
v = append(v, entries[i].v)
|
|
||||||
}
|
|
||||||
_, err = VerifyRangeProof(trie.Hash(), k[0], last, k, v, proof)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected to detect the error, got nil")
|
t.Fatalf("Expected to detect the error, got nil")
|
||||||
}
|
}
|
||||||
@ -344,7 +292,7 @@ func TestOneElementRangeProof(t *testing.T) {
|
|||||||
if err := trie.Prove(entries[start].k, proof); err != nil {
|
if err := trie.Prove(entries[start].k, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
t.Fatalf("Failed to prove the first node %v", err)
|
||||||
}
|
}
|
||||||
_, err := VerifyRangeProof(trie.Hash(), entries[start].k, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
|
_, err := VerifyRangeProof(trie.Hash(), entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
@ -359,7 +307,7 @@ func TestOneElementRangeProof(t *testing.T) {
|
|||||||
if err := trie.Prove(entries[start].k, proof); err != nil {
|
if err := trie.Prove(entries[start].k, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
t.Fatalf("Failed to prove the last node %v", err)
|
||||||
}
|
}
|
||||||
_, err = VerifyRangeProof(trie.Hash(), first, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
|
_, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
@ -374,7 +322,7 @@ func TestOneElementRangeProof(t *testing.T) {
|
|||||||
if err := trie.Prove(last, proof); err != nil {
|
if err := trie.Prove(last, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
t.Fatalf("Failed to prove the last node %v", err)
|
||||||
}
|
}
|
||||||
_, err = VerifyRangeProof(trie.Hash(), entries[start].k, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
|
_, err = VerifyRangeProof(trie.Hash(), entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
@ -389,7 +337,7 @@ func TestOneElementRangeProof(t *testing.T) {
|
|||||||
if err := trie.Prove(last, proof); err != nil {
|
if err := trie.Prove(last, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
t.Fatalf("Failed to prove the last node %v", err)
|
||||||
}
|
}
|
||||||
_, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
|
_, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
@ -408,7 +356,7 @@ func TestOneElementRangeProof(t *testing.T) {
|
|||||||
if err := tinyTrie.Prove(last, proof); err != nil {
|
if err := tinyTrie.Prove(last, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
t.Fatalf("Failed to prove the last node %v", err)
|
||||||
}
|
}
|
||||||
_, err = VerifyRangeProof(tinyTrie.Hash(), first, last, [][]byte{entry.k}, [][]byte{entry.v}, proof)
|
_, err = VerifyRangeProof(tinyTrie.Hash(), first, [][]byte{entry.k}, [][]byte{entry.v}, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
@ -430,7 +378,7 @@ func TestAllElementsProof(t *testing.T) {
|
|||||||
k = append(k, entries[i].k)
|
k = append(k, entries[i].k)
|
||||||
v = append(v, entries[i].v)
|
v = append(v, entries[i].v)
|
||||||
}
|
}
|
||||||
_, err := VerifyRangeProof(trie.Hash(), nil, nil, k, v, nil)
|
_, err := VerifyRangeProof(trie.Hash(), nil, k, v, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
@ -443,7 +391,7 @@ func TestAllElementsProof(t *testing.T) {
|
|||||||
if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil {
|
if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
t.Fatalf("Failed to prove the last node %v", err)
|
||||||
}
|
}
|
||||||
_, err = VerifyRangeProof(trie.Hash(), k[0], k[len(k)-1], k, v, proof)
|
_, err = VerifyRangeProof(trie.Hash(), k[0], k, v, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
@ -451,14 +399,13 @@ func TestAllElementsProof(t *testing.T) {
|
|||||||
// Even with non-existent edge proofs, it should still work.
|
// Even with non-existent edge proofs, it should still work.
|
||||||
proof = memorydb.New()
|
proof = memorydb.New()
|
||||||
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
|
first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes()
|
||||||
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes()
|
|
||||||
if err := trie.Prove(first, proof); err != nil {
|
if err := trie.Prove(first, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
t.Fatalf("Failed to prove the first node %v", err)
|
||||||
}
|
}
|
||||||
if err := trie.Prove(last, proof); err != nil {
|
if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
t.Fatalf("Failed to prove the last node %v", err)
|
||||||
}
|
}
|
||||||
_, err = VerifyRangeProof(trie.Hash(), first, last, k, v, proof)
|
_, err = VerifyRangeProof(trie.Hash(), first, k, v, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
@ -491,43 +438,7 @@ func TestSingleSideRangeProof(t *testing.T) {
|
|||||||
k = append(k, entries[i].k)
|
k = append(k, entries[i].k)
|
||||||
v = append(v, entries[i].v)
|
v = append(v, entries[i].v)
|
||||||
}
|
}
|
||||||
_, err := VerifyRangeProof(trie.Hash(), common.Hash{}.Bytes(), k[len(k)-1], k, v, proof)
|
_, err := VerifyRangeProof(trie.Hash(), common.Hash{}.Bytes(), k, v, proof)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestReverseSingleSideRangeProof tests the range ends with 0xffff...fff.
|
|
||||||
func TestReverseSingleSideRangeProof(t *testing.T) {
|
|
||||||
for i := 0; i < 64; i++ {
|
|
||||||
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
|
|
||||||
var entries []*kv
|
|
||||||
for i := 0; i < 4096; i++ {
|
|
||||||
value := &kv{randBytes(32), randBytes(20), false}
|
|
||||||
trie.MustUpdate(value.k, value.v)
|
|
||||||
entries = append(entries, value)
|
|
||||||
}
|
|
||||||
slices.SortFunc(entries, (*kv).cmp)
|
|
||||||
|
|
||||||
var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1}
|
|
||||||
for _, pos := range cases {
|
|
||||||
proof := memorydb.New()
|
|
||||||
if err := trie.Prove(entries[pos].k, proof); err != nil {
|
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
|
||||||
}
|
|
||||||
last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
|
||||||
if err := trie.Prove(last.Bytes(), proof); err != nil {
|
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
|
||||||
}
|
|
||||||
k := make([][]byte, 0)
|
|
||||||
v := make([][]byte, 0)
|
|
||||||
for i := pos; i < len(entries); i++ {
|
|
||||||
k = append(k, entries[i].k)
|
|
||||||
v = append(v, entries[i].v)
|
|
||||||
}
|
|
||||||
_, err := VerifyRangeProof(trie.Hash(), k[0], last.Bytes(), k, v, proof)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
@ -561,7 +472,7 @@ func TestBadRangeProof(t *testing.T) {
|
|||||||
keys = append(keys, entries[i].k)
|
keys = append(keys, entries[i].k)
|
||||||
vals = append(vals, entries[i].v)
|
vals = append(vals, entries[i].v)
|
||||||
}
|
}
|
||||||
var first, last = keys[0], keys[len(keys)-1]
|
var first = keys[0]
|
||||||
testcase := mrand.Intn(6)
|
testcase := mrand.Intn(6)
|
||||||
var index int
|
var index int
|
||||||
switch testcase {
|
switch testcase {
|
||||||
@ -576,7 +487,7 @@ func TestBadRangeProof(t *testing.T) {
|
|||||||
case 2:
|
case 2:
|
||||||
// Gapped entry slice
|
// Gapped entry slice
|
||||||
index = mrand.Intn(end - start)
|
index = mrand.Intn(end - start)
|
||||||
if (index == 0 && start < 100) || (index == end-start-1 && end <= 100) {
|
if (index == 0 && start < 100) || (index == end-start-1) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
keys = append(keys[:index], keys[index+1:]...)
|
keys = append(keys[:index], keys[index+1:]...)
|
||||||
@ -599,7 +510,7 @@ func TestBadRangeProof(t *testing.T) {
|
|||||||
index = mrand.Intn(end - start)
|
index = mrand.Intn(end - start)
|
||||||
vals[index] = nil
|
vals[index] = nil
|
||||||
}
|
}
|
||||||
_, err := VerifyRangeProof(trie.Hash(), first, last, keys, vals, proof)
|
_, err := VerifyRangeProof(trie.Hash(), first, keys, vals, proof)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("%d Case %d index %d range: (%d->%d) expect error, got nil", i, testcase, index, start, end-1)
|
t.Fatalf("%d Case %d index %d range: (%d->%d) expect error, got nil", i, testcase, index, start, end-1)
|
||||||
}
|
}
|
||||||
@ -633,7 +544,7 @@ func TestGappedRangeProof(t *testing.T) {
|
|||||||
keys = append(keys, entries[i].k)
|
keys = append(keys, entries[i].k)
|
||||||
vals = append(vals, entries[i].v)
|
vals = append(vals, entries[i].v)
|
||||||
}
|
}
|
||||||
_, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof)
|
_, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("expect error, got nil")
|
t.Fatal("expect error, got nil")
|
||||||
}
|
}
|
||||||
@ -649,24 +560,22 @@ func TestSameSideProofs(t *testing.T) {
|
|||||||
slices.SortFunc(entries, (*kv).cmp)
|
slices.SortFunc(entries, (*kv).cmp)
|
||||||
|
|
||||||
pos := 1000
|
pos := 1000
|
||||||
first := decreaseKey(common.CopyBytes(entries[pos].k))
|
first := common.CopyBytes(entries[0].k)
|
||||||
first = decreaseKey(first)
|
|
||||||
last := decreaseKey(common.CopyBytes(entries[pos].k))
|
|
||||||
|
|
||||||
proof := memorydb.New()
|
proof := memorydb.New()
|
||||||
if err := trie.Prove(first, proof); err != nil {
|
if err := trie.Prove(first, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
t.Fatalf("Failed to prove the first node %v", err)
|
||||||
}
|
}
|
||||||
if err := trie.Prove(last, proof); err != nil {
|
if err := trie.Prove(entries[2000].k, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
t.Fatalf("Failed to prove the first node %v", err)
|
||||||
}
|
}
|
||||||
_, err := VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
|
_, err := VerifyRangeProof(trie.Hash(), first, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected error, got nil")
|
t.Fatalf("Expected error, got nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
first = increaseKey(common.CopyBytes(entries[pos].k))
|
first = increaseKey(common.CopyBytes(entries[pos].k))
|
||||||
last = increaseKey(common.CopyBytes(entries[pos].k))
|
last := increaseKey(common.CopyBytes(entries[pos].k))
|
||||||
last = increaseKey(last)
|
last = increaseKey(last)
|
||||||
|
|
||||||
proof = memorydb.New()
|
proof = memorydb.New()
|
||||||
@ -676,7 +585,7 @@ func TestSameSideProofs(t *testing.T) {
|
|||||||
if err := trie.Prove(last, proof); err != nil {
|
if err := trie.Prove(last, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the last node %v", err)
|
t.Fatalf("Failed to prove the last node %v", err)
|
||||||
}
|
}
|
||||||
_, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
|
_, err = VerifyRangeProof(trie.Hash(), first, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected error, got nil")
|
t.Fatalf("Expected error, got nil")
|
||||||
}
|
}
|
||||||
@ -703,15 +612,12 @@ func TestHasRightElement(t *testing.T) {
|
|||||||
{50, 100, true},
|
{50, 100, true},
|
||||||
{50, len(entries), false}, // No more element expected
|
{50, len(entries), false}, // No more element expected
|
||||||
{len(entries) - 1, len(entries), false}, // Single last element with two existent proofs(point to same key)
|
{len(entries) - 1, len(entries), false}, // Single last element with two existent proofs(point to same key)
|
||||||
{len(entries) - 1, -1, false}, // Single last element with non-existent right proof
|
|
||||||
{0, len(entries), false}, // The whole set with existent left proof
|
{0, len(entries), false}, // The whole set with existent left proof
|
||||||
{-1, len(entries), false}, // The whole set with non-existent left proof
|
{-1, len(entries), false}, // The whole set with non-existent left proof
|
||||||
{-1, -1, false}, // The whole set with non-existent left/right proof
|
|
||||||
}
|
}
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
var (
|
var (
|
||||||
firstKey []byte
|
firstKey []byte
|
||||||
lastKey []byte
|
|
||||||
start = c.start
|
start = c.start
|
||||||
end = c.end
|
end = c.end
|
||||||
proof = memorydb.New()
|
proof = memorydb.New()
|
||||||
@ -727,24 +633,16 @@ func TestHasRightElement(t *testing.T) {
|
|||||||
t.Fatalf("Failed to prove the first node %v", err)
|
t.Fatalf("Failed to prove the first node %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if c.end == -1 {
|
|
||||||
lastKey, end = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes(), len(entries)
|
|
||||||
if err := trie.Prove(lastKey, proof); err != nil {
|
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
lastKey = entries[c.end-1].k
|
|
||||||
if err := trie.Prove(entries[c.end-1].k, proof); err != nil {
|
if err := trie.Prove(entries[c.end-1].k, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
t.Fatalf("Failed to prove the first node %v", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
k := make([][]byte, 0)
|
k := make([][]byte, 0)
|
||||||
v := make([][]byte, 0)
|
v := make([][]byte, 0)
|
||||||
for i := start; i < end; i++ {
|
for i := start; i < end; i++ {
|
||||||
k = append(k, entries[i].k)
|
k = append(k, entries[i].k)
|
||||||
v = append(v, entries[i].v)
|
v = append(v, entries[i].v)
|
||||||
}
|
}
|
||||||
hasMore, err := VerifyRangeProof(trie.Hash(), firstKey, lastKey, k, v, proof)
|
hasMore, err := VerifyRangeProof(trie.Hash(), firstKey, k, v, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error, got %v", err)
|
t.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
@ -777,7 +675,7 @@ func TestEmptyRangeProof(t *testing.T) {
|
|||||||
if err := trie.Prove(first, proof); err != nil {
|
if err := trie.Prove(first, proof); err != nil {
|
||||||
t.Fatalf("Failed to prove the first node %v", err)
|
t.Fatalf("Failed to prove the first node %v", err)
|
||||||
}
|
}
|
||||||
_, err := VerifyRangeProof(trie.Hash(), first, nil, nil, nil, proof)
|
_, err := VerifyRangeProof(trie.Hash(), first, nil, nil, proof)
|
||||||
if c.err && err == nil {
|
if c.err && err == nil {
|
||||||
t.Fatalf("Expected error, got nil")
|
t.Fatalf("Expected error, got nil")
|
||||||
}
|
}
|
||||||
@ -817,7 +715,7 @@ func TestBloatedProof(t *testing.T) {
|
|||||||
trie.Prove(keys[0], want)
|
trie.Prove(keys[0], want)
|
||||||
trie.Prove(keys[len(keys)-1], want)
|
trie.Prove(keys[len(keys)-1], want)
|
||||||
|
|
||||||
if _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof); err != nil {
|
if _, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof); err != nil {
|
||||||
t.Fatalf("expected bloated proof to succeed, got %v", err)
|
t.Fatalf("expected bloated proof to succeed, got %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -860,7 +758,7 @@ func TestEmptyValueRangeProof(t *testing.T) {
|
|||||||
keys = append(keys, entries[i].k)
|
keys = append(keys, entries[i].k)
|
||||||
vals = append(vals, entries[i].v)
|
vals = append(vals, entries[i].v)
|
||||||
}
|
}
|
||||||
_, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof)
|
_, err := VerifyRangeProof(trie.Hash(), keys[0], keys, vals, proof)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected failure on noop entry")
|
t.Fatalf("Expected failure on noop entry")
|
||||||
}
|
}
|
||||||
@ -895,7 +793,7 @@ func TestAllElementsEmptyValueRangeProof(t *testing.T) {
|
|||||||
keys = append(keys, entries[i].k)
|
keys = append(keys, entries[i].k)
|
||||||
vals = append(vals, entries[i].v)
|
vals = append(vals, entries[i].v)
|
||||||
}
|
}
|
||||||
_, err := VerifyRangeProof(trie.Hash(), nil, nil, keys, vals, nil)
|
_, err := VerifyRangeProof(trie.Hash(), nil, keys, vals, nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected failure on noop entry")
|
t.Fatalf("Expected failure on noop entry")
|
||||||
}
|
}
|
||||||
@ -1001,7 +899,7 @@ func benchmarkVerifyRangeProof(b *testing.B, size int) {
|
|||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
_, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, values, proof)
|
_, err := VerifyRangeProof(trie.Hash(), keys[0], keys, values, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
|
b.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err)
|
||||||
}
|
}
|
||||||
@ -1028,7 +926,7 @@ func benchmarkVerifyRangeNoProof(b *testing.B, size int) {
|
|||||||
}
|
}
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
_, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, values, nil)
|
_, err := VerifyRangeProof(trie.Hash(), keys[0], keys, values, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("Expected no error, got %v", err)
|
b.Fatalf("Expected no error, got %v", err)
|
||||||
}
|
}
|
||||||
@ -1087,15 +985,14 @@ func TestRangeProofKeysWithSharedPrefix(t *testing.T) {
|
|||||||
root := trie.Hash()
|
root := trie.Hash()
|
||||||
proof := memorydb.New()
|
proof := memorydb.New()
|
||||||
start := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
|
start := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
|
||||||
end := common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
|
||||||
if err := trie.Prove(start, proof); err != nil {
|
if err := trie.Prove(start, proof); err != nil {
|
||||||
t.Fatalf("failed to prove start: %v", err)
|
t.Fatalf("failed to prove start: %v", err)
|
||||||
}
|
}
|
||||||
if err := trie.Prove(end, proof); err != nil {
|
if err := trie.Prove(keys[len(keys)-1], proof); err != nil {
|
||||||
t.Fatalf("failed to prove end: %v", err)
|
t.Fatalf("failed to prove end: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
more, err := VerifyRangeProof(root, start, end, keys, vals, proof)
|
more, err := VerifyRangeProof(root, start, keys, vals, proof)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to verify range proof: %v", err)
|
t.Fatalf("failed to verify range proof: %v", err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user