diff --git a/Makefile b/Makefile
index 92413dda7..6a366b86c 100644
--- a/Makefile
+++ b/Makefile
@@ -56,7 +56,11 @@ ios:
.PHONY: statedifftest
statedifftest: | $(GOOSE)
- MODE=statediff go test ./statediff/... -v
+ MODE=statediff go test -p 1 ./statediff/... -v
+
+.PHONY: statediff_filewriting_test
+statediff_filetest: | $(GOOSE)
+ MODE=statediff STATEDIFF_DB=file go test -p 1 ./statediff/... -v
test: all
$(GORUN) build/ci.go test
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index 1a8ee4025..d77e261f9 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -18,15 +18,16 @@ package main
import (
"bufio"
+ "context"
"errors"
"fmt"
"math/big"
"os"
"reflect"
+ "time"
"unicode"
- "github.com/ethereum/go-ethereum/eth/downloader"
- "github.com/ethereum/go-ethereum/statediff"
+ "github.com/naoina/toml"
"gopkg.in/urfave/cli.v1"
"github.com/ethereum/go-ethereum/accounts/external"
@@ -35,13 +36,19 @@ import (
"github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/eth/catalyst"
+ "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
- "github.com/naoina/toml"
+ "github.com/ethereum/go-ethereum/statediff"
+ dumpdb "github.com/ethereum/go-ethereum/statediff/indexer/database/dump"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/file"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/shared"
)
var (
@@ -182,27 +189,86 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
}
if ctx.GlobalBool(utils.StateDiffFlag.Name) {
- var dbParams *statediff.DBParams
- if ctx.GlobalIsSet(utils.StateDiffDBFlag.Name) {
- dbParams = new(statediff.DBParams)
- dbParams.ConnectionURL = ctx.GlobalString(utils.StateDiffDBFlag.Name)
+ var indexerConfig interfaces.Config
+ var clientName, nodeID string
+ if ctx.GlobalIsSet(utils.StateDiffWritingFlag.Name) {
+ clientName = ctx.GlobalString(utils.StateDiffDBClientNameFlag.Name)
if ctx.GlobalIsSet(utils.StateDiffDBNodeIDFlag.Name) {
- dbParams.ID = ctx.GlobalString(utils.StateDiffDBNodeIDFlag.Name)
+ nodeID = ctx.GlobalString(utils.StateDiffDBNodeIDFlag.Name)
} else {
utils.Fatalf("Must specify node ID for statediff DB output")
}
- if ctx.GlobalIsSet(utils.StateDiffDBClientNameFlag.Name) {
- dbParams.ClientName = ctx.GlobalString(utils.StateDiffDBClientNameFlag.Name)
- } else {
- utils.Fatalf("Must specify client name for statediff DB output")
+
+ dbTypeStr := ctx.GlobalString(utils.StateDiffDBTypeFlag.Name)
+ dbType, err := shared.ResolveDBType(dbTypeStr)
+ if err != nil {
+ utils.Fatalf("%v", err)
}
- } else {
- if ctx.GlobalBool(utils.StateDiffWritingFlag.Name) {
- utils.Fatalf("Must pass DB parameters if enabling statediff write loop")
+ switch dbType {
+ case shared.FILE:
+ indexerConfig = file.Config{
+ FilePath: ctx.GlobalString(utils.StateDiffFilePath.Name),
+ }
+ case shared.POSTGRES:
+ driverTypeStr := ctx.GlobalString(utils.StateDiffDBDriverTypeFlag.Name)
+ driverType, err := postgres.ResolveDriverType(driverTypeStr)
+ if err != nil {
+ utils.Fatalf("%v", err)
+ }
+ pgConfig := postgres.Config{
+ Hostname: ctx.GlobalString(utils.StateDiffDBHostFlag.Name),
+ Port: ctx.GlobalInt(utils.StateDiffDBPortFlag.Name),
+ DatabaseName: ctx.GlobalString(utils.StateDiffDBNameFlag.Name),
+ Username: ctx.GlobalString(utils.StateDiffDBUserFlag.Name),
+ Password: ctx.GlobalString(utils.StateDiffDBPasswordFlag.Name),
+ ID: nodeID,
+ ClientName: clientName,
+ Driver: driverType,
+ }
+ if ctx.GlobalIsSet(utils.StateDiffDBMinConns.Name) {
+ pgConfig.MinConns = ctx.GlobalInt(utils.StateDiffDBMinConns.Name)
+ }
+ if ctx.GlobalIsSet(utils.StateDiffDBMaxConns.Name) {
+ pgConfig.MaxConns = ctx.GlobalInt(utils.StateDiffDBMaxConns.Name)
+ }
+ if ctx.GlobalIsSet(utils.StateDiffDBMaxIdleConns.Name) {
+ pgConfig.MaxIdle = ctx.GlobalInt(utils.StateDiffDBMaxIdleConns.Name)
+ }
+ if ctx.GlobalIsSet(utils.StateDiffDBMaxConnLifetime.Name) {
+ pgConfig.MaxConnLifetime = ctx.GlobalDuration(utils.StateDiffDBMaxConnLifetime.Name) * time.Second
+ }
+ if ctx.GlobalIsSet(utils.StateDiffDBMaxConnIdleTime.Name) {
+ pgConfig.MaxConnIdleTime = ctx.GlobalDuration(utils.StateDiffDBMaxConnIdleTime.Name) * time.Second
+ }
+ if ctx.GlobalIsSet(utils.StateDiffDBConnTimeout.Name) {
+ pgConfig.ConnTimeout = ctx.GlobalDuration(utils.StateDiffDBConnTimeout.Name) * time.Second
+ }
+ indexerConfig = pgConfig
+ case shared.DUMP:
+ dumpTypeStr := ctx.GlobalString(utils.StateDiffDBDumpDst.Name)
+ dumpType, err := dumpdb.ResolveDumpType(dumpTypeStr)
+ if err != nil {
+ utils.Fatalf("%v", err)
+ }
+ switch dumpType {
+ case dumpdb.STDERR:
+ indexerConfig = dumpdb.Config{Dump: os.Stdout}
+ case dumpdb.STDOUT:
+ indexerConfig = dumpdb.Config{Dump: os.Stderr}
+ case dumpdb.DISCARD:
+ indexerConfig = dumpdb.Config{Dump: dumpdb.NewDiscardWriterCloser()}
+ default:
+ utils.Fatalf("unrecognized dump destination: %s", dumpType)
+ }
+ default:
+ utils.Fatalf("unrecognized database type: %s", dbType)
}
}
- p := statediff.ServiceParams{
- DBParams: dbParams,
+ p := statediff.Config{
+ IndexerConfig: indexerConfig,
+ ID: nodeID,
+ ClientName: clientName,
+ Context: context.Background(),
EnableWriteLoop: ctx.GlobalBool(utils.StateDiffWritingFlag.Name),
NumWorkers: ctx.GlobalUint(utils.StateDiffWorkersFlag.Name),
}
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 8432b8005..990b40a60 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -149,11 +149,25 @@ var (
utils.GpoIgnoreGasPriceFlag,
utils.MinerNotifyFullFlag,
utils.StateDiffFlag,
- utils.StateDiffDBFlag,
+ utils.StateDiffDBTypeFlag,
+ utils.StateDiffDBDriverTypeFlag,
+ utils.StateDiffDBDumpDst,
+ utils.StateDiffDBNameFlag,
+ utils.StateDiffDBPasswordFlag,
+ utils.StateDiffDBUserFlag,
+ utils.StateDiffDBHostFlag,
+ utils.StateDiffDBPortFlag,
+ utils.StateDiffDBMaxConnLifetime,
+ utils.StateDiffDBMaxConnIdleTime,
+ utils.StateDiffDBMaxConns,
+ utils.StateDiffDBMinConns,
+ utils.StateDiffDBMaxIdleConns,
+ utils.StateDiffDBConnTimeout,
utils.StateDiffDBNodeIDFlag,
utils.StateDiffDBClientNameFlag,
utils.StateDiffWritingFlag,
utils.StateDiffWorkersFlag,
+ utils.StateDiffFilePath,
configFileFlag,
utils.CatalystFlag,
}
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index e61d2927c..885cc2c16 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -225,11 +225,25 @@ var AppHelpFlagGroups = []flags.FlagGroup{
Name: "STATE DIFF",
Flags: []cli.Flag{
utils.StateDiffFlag,
- utils.StateDiffDBFlag,
+ utils.StateDiffDBTypeFlag,
+ utils.StateDiffDBDriverTypeFlag,
+ utils.StateDiffDBDumpDst,
+ utils.StateDiffDBNameFlag,
+ utils.StateDiffDBPasswordFlag,
+ utils.StateDiffDBUserFlag,
+ utils.StateDiffDBHostFlag,
+ utils.StateDiffDBPortFlag,
+ utils.StateDiffDBMaxConnLifetime,
+ utils.StateDiffDBMaxConnIdleTime,
+ utils.StateDiffDBMaxConns,
+ utils.StateDiffDBMinConns,
+ utils.StateDiffDBMaxIdleConns,
+ utils.StateDiffDBConnTimeout,
utils.StateDiffDBNodeIDFlag,
utils.StateDiffDBClientNameFlag,
utils.StateDiffWritingFlag,
utils.StateDiffWorkersFlag,
+ utils.StateDiffFilePath,
},
},
{
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index d7a0b7a6a..ccc9ac89e 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -786,17 +786,80 @@ var (
Name: "statediff",
Usage: "Enables the processing of state diffs between each block",
}
- StateDiffDBFlag = cli.StringFlag{
- Name: "statediff.db",
- Usage: "PostgreSQL database connection string for writing state diffs",
+ StateDiffDBTypeFlag = cli.StringFlag{
+ Name: "statediff.db.type",
+ Usage: "Statediff database type (current options: postgres, file, dump)",
+ Value: "postgres",
+ }
+ StateDiffDBDriverTypeFlag = cli.StringFlag{
+ Name: "statediff.db.driver",
+ Usage: "Statediff database driver type",
+ Value: "pgx",
+ }
+ StateDiffDBDumpDst = cli.StringFlag{
+ Name: "statediff.dump.dst",
+ Usage: "Statediff database dump destination (default is stdout)",
+ Value: "stdout",
+ }
+ StateDiffDBHostFlag = cli.StringFlag{
+ Name: "statediff.db.host",
+ Usage: "Statediff database hostname/ip",
+ Value: "localhost",
+ }
+ StateDiffDBPortFlag = cli.IntFlag{
+ Name: "statediff.db.port",
+ Usage: "Statediff database port",
+ Value: 5432,
+ }
+ StateDiffDBNameFlag = cli.StringFlag{
+ Name: "statediff.db.name",
+ Usage: "Statediff database name",
+ }
+ StateDiffDBPasswordFlag = cli.StringFlag{
+ Name: "statediff.db.password",
+ Usage: "Statediff database password",
+ }
+ StateDiffDBUserFlag = cli.StringFlag{
+ Name: "statediff.db.user",
+ Usage: "Statediff database username",
+ Value: "postgres",
+ }
+ StateDiffDBMaxConnLifetime = cli.DurationFlag{
+ Name: "statediff.db.maxconnlifetime",
+ Usage: "Statediff database maximum connection lifetime (in seconds)",
+ }
+ StateDiffDBMaxConnIdleTime = cli.DurationFlag{
+ Name: "statediff.db.maxconnidletime",
+ Usage: "Statediff database maximum connection idle time (in seconds)",
+ }
+ StateDiffDBMaxConns = cli.IntFlag{
+ Name: "statediff.db.maxconns",
+ Usage: "Statediff database maximum connections",
+ }
+ StateDiffDBMinConns = cli.IntFlag{
+ Name: "statediff.db.minconns",
+ Usage: "Statediff database minimum connections",
+ }
+ StateDiffDBMaxIdleConns = cli.IntFlag{
+ Name: "statediff.db.maxidleconns",
+ Usage: "Statediff database maximum idle connections",
+ }
+ StateDiffDBConnTimeout = cli.DurationFlag{
+ Name: "statediff.db.conntimeout",
+ Usage: "Statediff database connection timeout (in seconds)",
}
StateDiffDBNodeIDFlag = cli.StringFlag{
- Name: "statediff.dbnodeid",
+ Name: "statediff.db.nodeid",
Usage: "Node ID to use when writing state diffs to database",
}
+ StateDiffFilePath = cli.StringFlag{
+ Name: "statediff.file.path",
+ Usage: "Full path (including filename) to write statediff data out to when operating in file mode",
+ }
StateDiffDBClientNameFlag = cli.StringFlag{
- Name: "statediff.dbclientname",
+ Name: "statediff.db.clientname",
Usage: "Client name to use when writing state diffs to database",
+ Value: "go-ethereum",
}
StateDiffWritingFlag = cli.BoolFlag{
Name: "statediff.writing",
@@ -804,7 +867,8 @@ var (
}
StateDiffWorkersFlag = cli.UintFlag{
Name: "statediff.workers",
- Usage: "Number of concurrent workers to use during statediff processing (0 = 1)",
+ Usage: "Number of concurrent workers to use during statediff processing (default 1)",
+ Value: 1,
}
)
@@ -1761,7 +1825,7 @@ func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, cfg node.C
}
// RegisterStateDiffService configures and registers a service to stream state diff data over RPC
-func RegisterStateDiffService(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params statediff.ServiceParams) {
+func RegisterStateDiffService(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params statediff.Config) {
if err := statediff.New(stack, ethServ, cfg, params); err != nil {
Fatalf("Failed to register the Statediff service: %v", err)
}
diff --git a/docker-compose.yml b/docker-compose.yml
index f1a37ddcb..d305d1dc1 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -3,7 +3,7 @@ version: '3.2'
services:
ipld-eth-db:
restart: always
- image: vulcanize/ipld-eth-db:v0.2.0
+ image: vulcanize/ipld-eth-db:v0.3.1
environment:
POSTGRES_USER: "vdbm"
POSTGRES_DB: "vulcanize_public"
@@ -14,4 +14,4 @@ services:
- "127.0.0.1:5432:5432"
volumes:
- geth_node:
\ No newline at end of file
+ geth_node:
diff --git a/go.mod b/go.mod
index 0f94c2611..da726b7c4 100644
--- a/go.mod
+++ b/go.mod
@@ -25,6 +25,7 @@ require (
github.com/fatih/color v1.7.0
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
+ github.com/georgysavva/scany v0.2.9
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-stack/stack v1.8.0
github.com/golang/protobuf v1.4.3
@@ -46,6 +47,10 @@ require (
github.com/ipfs/go-ipfs-blockstore v1.0.1
github.com/ipfs/go-ipfs-ds-help v1.0.0
github.com/ipfs/go-ipld-format v0.2.0
+ github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
+ github.com/jackc/pgconn v1.10.0
+ github.com/jackc/pgx v3.6.2+incompatible
+ github.com/jackc/pgx/v4 v4.13.0
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e
github.com/jmoiron/sqlx v1.2.0
@@ -55,7 +60,6 @@ require (
github.com/lib/pq v1.10.2
github.com/mattn/go-colorable v0.1.8
github.com/mattn/go-isatty v0.0.12
- github.com/mattn/go-sqlite3 v1.14.7 // indirect
github.com/multiformats/go-multihash v0.0.14
github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416
@@ -70,7 +74,7 @@ require (
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/tklauser/go-sysconf v0.3.5 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
- golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a
+ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912
golang.org/x/text v0.3.6
diff --git a/go.sum b/go.sum
index 0097b96f7..cf5e867db 100644
--- a/go.sum
+++ b/go.sum
@@ -42,6 +42,8 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
+github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
@@ -100,10 +102,17 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA=
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
+github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
+github.com/cockroachdb/cockroach-go/v2 v2.0.3 h1:ZA346ACHIZctef6trOTwBAEvPVm1k0uLm/bb2Atc+S8=
+github.com/cockroachdb/cockroach-go/v2 v2.0.3/go.mod h1:hAuDgiVgDVkfirP9JnhXEfcXEPRKBpYdGz+l7mvYSzw=
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f h1:C43yEtQ6NIf4ftFXD/V55gnGFgPbMQobd//YlnLjUJ8=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
@@ -116,6 +125,7 @@ github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vs
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
+github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
@@ -132,6 +142,7 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
@@ -142,6 +153,8 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
+github.com/georgysavva/scany v0.2.9 h1:Xt6rjYpHnMClTm/g+oZTnoSxUwiln5GqMNU+QeLNHQU=
+github.com/georgysavva/scany v0.2.9/go.mod h1:yeOeC1BdIdl6hOwy8uefL2WNSlseFzbhlG/frrh65SA=
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -152,9 +165,11 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
@@ -166,11 +181,15 @@ github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZp
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
+github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -281,6 +300,80 @@ github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc=
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg=
github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY=
+github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
+github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
+github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
+github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
+github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
+github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
+github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
+github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
+github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk=
+github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
+github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
+github.com/jackc/pgconn v1.6.4/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78=
+github.com/jackc/pgconn v1.7.0/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7JMeA=
+github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
+github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
+github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
+github.com/jackc/pgconn v1.10.0 h1:4EYhlDVEMsJ30nNj0mmgwIUXoq7e9sMJrVC2ED6QlCU=
+github.com/jackc/pgconn v1.10.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
+github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
+github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
+github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
+github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
+github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=
+github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
+github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=
+github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.5/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.1.1 h1:7PQ/4gLoqnl87ZxL7xjO0DR5gYuviDCZxQJsUlFW1eI=
+github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
+github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
+github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
+github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
+github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
+github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik=
+github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po=
+github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ=
+github.com/jackc/pgtype v1.4.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
+github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
+github.com/jackc/pgtype v1.8.1 h1:9k0IXtdJXHJbyAWQgbWr1lU+MEhPXZz6RIXxfR5oxXs=
+github.com/jackc/pgtype v1.8.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
+github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o=
+github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
+github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
+github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
+github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
+github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA=
+github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg=
+github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o=
+github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg=
+github.com/jackc/pgx/v4 v4.8.1/go.mod h1:4HOLxrl8wToZJReD04/yB20GDwf4KBYETvlHciCnwW0=
+github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
+github.com/jackc/pgx/v4 v4.13.0 h1:JCjhT5vmhMAf/YwBHLvrBn4OGdIQBiFG6ym8Zmdx570=
+github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0=
+github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.3 h1:JnPg/5Q9xVJGfjsO5CPUOjnJps1JaRUm8I9FXVCFK94=
+github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw=
@@ -288,6 +381,9 @@ github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsj
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
@@ -313,12 +409,13 @@ github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM52
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
@@ -329,6 +426,11 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.4.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
@@ -338,6 +440,7 @@ github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIG
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -346,6 +449,7 @@ github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpu
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
@@ -355,8 +459,8 @@ github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/Qd
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA=
-github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw=
+github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
@@ -441,14 +545,25 @@ github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRr
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
+github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
+github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
+github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shopspring/decimal v0.0.0-20200419222939-1884f454f8ea/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
@@ -461,6 +576,8 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57N
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -487,29 +604,45 @@ github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPyS
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc=
-golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -555,6 +688,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@@ -589,13 +723,16 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -618,6 +755,7 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@@ -648,25 +786,31 @@ golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -725,6 +869,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0=
diff --git a/params/version.go b/params/version.go
index 367fbf8a3..77dfbf312 100644
--- a/params/version.go
+++ b/params/version.go
@@ -21,10 +21,10 @@ import (
)
const (
- VersionMajor = 1 // Major version component of the current release
- VersionMinor = 10 // Minor version component of the current release
- VersionPatch = 11 // Patch version component of the current release
- VersionMeta = "statediff-0.0.27" // Version metadata to append to the version string
+ VersionMajor = 1 // Major version component of the current release
+ VersionMinor = 10 // Minor version component of the current release
+ VersionPatch = 11 // Patch version component of the current release
+ VersionMeta = "statediff-0.1.0" // Version metadata to append to the version string
)
// Version holds the textual version string.
diff --git a/statediff/README.md b/statediff/README.md
index 74c82f2d2..92c8ef387 100644
--- a/statediff/README.md
+++ b/statediff/README.md
@@ -73,25 +73,44 @@ type Payload struct {
## Usage
This state diffing service runs as an auxiliary service concurrent to the regular syncing process of the geth node.
-
### CLI configuration
This service introduces a CLI flag namespace `statediff`
`--statediff` flag is used to turn on the service
`--statediff.writing` is used to tell the service to write state diff objects it produces from synced ChainEvents directly to a configured Postgres database
`--statediff.workers` is used to set the number of concurrent workers to process state diff objects and write them into the database
-`--statediff.db` is the connection string for the Postgres database to write to
-`--statediff.db.init` indicates whether we need to initialize a new database; set true if its the first time running the process on a given database
-`--statediff.dbnodeid` is the node id to use in the Postgres database
-`--statediff.dbclientname` is the client name to use in the Postgres database
+`--statediff.db.type` is the type of database we write out to (current options: postgres, dump, file)
+`--statediff.dump.dst` is the destination to write to when operating in database dump mode (stdout, stderr, discard)
+`--statediff.db.driver` is the specific driver to use for the database (current options for postgres: pgx and sqlx)
+`--statediff.db.host` is the hostname/ip to dial to connect to the database
+`--statediff.db.port` is the port to dial to connect to the database
+`--statediff.db.name` is the name of the database to connect to
+`--statediff.db.user` is the user to connect to the database as
+`--statediff.db.password` is the password to use to connect to the database
+`--statediff.db.conntimeout` is the connection timeout (in seconds)
+`--statediff.db.maxconns` is the maximum number of database connections
+`--statediff.db.minconns` is the minimum number of database connections
+`--statediff.db.maxidleconns` is the maximum number of idle connections
+`--statediff.db.maxconnidletime` is the maximum lifetime for an idle connection (in seconds)
+`--statediff.db.maxconnlifetime` is the maximum lifetime for a connection (in seconds)
+`--statediff.db.nodeid` is the node id to use in the Postgres database
+`--statediff.db.clientname` is the client name to use in the Postgres database
+`--statediff.file.path` full path (including filename) to write statediff data out to when operating in file mode
The service can only operate in full sync mode (`--syncmode=full`), but only the historical RPC endpoints require an archive node (`--gcmode=archive`)
e.g.
`
-./build/bin/geth --syncmode=full --gcmode=archive --statediff --statediff.writing --statediff.db=postgres://localhost:5432/vulcanize_testing?sslmode=disable --statediff.db.init=true --statediff.dbnodeid={nodeId} --statediff.dbclientname={dbClientName}
+./build/bin/geth --syncmode=full --gcmode=archive --statediff --statediff.writing --statediff.db.type=postgres --statediff.db.driver=sqlx --statediff.db.host=localhost --statediff.db.port=5432 --statediff.db.name=vulcanize_test --statediff.db.user=postgres --statediff.db.nodeid=nodeid --statediff.db.clientname=clientname
`
+When operating in `--statediff.db.type=file` mode, the service will write SQL statements out to the file designated by
+`--statediff.file.path`. Please note that it writes out SQL statements with all `ON CONFLICT` constraint checks dropped.
+This is done so that we can scale out the production of the SQL statements horizontally, merge the separate SQL files produced,
+de-duplicate using unix tools (`sort statediff.sql | uniq` or `sort -u statediff.sql`), bulk load using psql
+(`psql db_name --set ON_ERROR_STOP=on -f statediff.sql`), and then add our primary and foreign key constraints and indexes
+back afterwards.
+
### RPC endpoints
The state diffing service exposes both a WS subscription endpoint, and a number of HTTP unary endpoints.
diff --git a/statediff/api.go b/statediff/api.go
index 923a0073f..5c534cddb 100644
--- a/statediff/api.go
+++ b/statediff/api.go
@@ -19,11 +19,11 @@ package statediff
import (
"context"
- "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/statediff/types"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
- . "github.com/ethereum/go-ethereum/statediff/types"
)
// APIName is the namespace used for the state diffing service API
@@ -117,7 +117,7 @@ func (api *PublicStateDiffAPI) StreamCodeAndCodeHash(ctx context.Context, blockN
// create subscription and start waiting for events
rpcSub := notifier.CreateSubscription()
- payloadChan := make(chan CodeAndCodeHash, chainEventChanSize)
+ payloadChan := make(chan types.CodeAndCodeHash, chainEventChanSize)
quitChan := make(chan bool)
api.sds.StreamCodeAndCodeHash(blockNumber, payloadChan, quitChan)
go func() {
diff --git a/statediff/builder.go b/statediff/builder.go
index 7befb6b3c..7811c3e82 100644
--- a/statediff/builder.go
+++ b/statediff/builder.go
@@ -29,8 +29,8 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
- sdtrie "github.com/ethereum/go-ethereum/statediff/trie"
- . "github.com/ethereum/go-ethereum/statediff/types"
+ "github.com/ethereum/go-ethereum/statediff/trie_helpers"
+ types2 "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/trie"
)
@@ -43,9 +43,9 @@ var (
// Builder interface exposes the method for building a state diff between two blocks
type Builder interface {
- BuildStateDiffObject(args Args, params Params) (StateObject, error)
- BuildStateTrieObject(current *types.Block) (StateObject, error)
- WriteStateDiffObject(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error
+ BuildStateDiffObject(args Args, params Params) (types2.StateObject, error)
+ BuildStateTrieObject(current *types.Block) (types2.StateObject, error)
+ WriteStateDiffObject(args types2.StateRoots, params Params, output types2.StateNodeSink, codeOutput types2.CodeSink) error
}
type builder struct {
@@ -53,20 +53,20 @@ type builder struct {
}
// convenience
-func stateNodeAppender(nodes *[]StateNode) StateNodeSink {
- return func(node StateNode) error {
+func stateNodeAppender(nodes *[]types2.StateNode) types2.StateNodeSink {
+ return func(node types2.StateNode) error {
*nodes = append(*nodes, node)
return nil
}
}
-func storageNodeAppender(nodes *[]StorageNode) StorageNodeSink {
- return func(node StorageNode) error {
+func storageNodeAppender(nodes *[]types2.StorageNode) types2.StorageNodeSink {
+ return func(node types2.StorageNode) error {
*nodes = append(*nodes, node)
return nil
}
}
-func codeMappingAppender(codeAndCodeHashes *[]CodeAndCodeHash) CodeSink {
- return func(c CodeAndCodeHash) error {
+func codeMappingAppender(codeAndCodeHashes *[]types2.CodeAndCodeHash) types2.CodeSink {
+ return func(c types2.CodeAndCodeHash) error {
*codeAndCodeHashes = append(*codeAndCodeHashes, c)
return nil
}
@@ -80,17 +80,17 @@ func NewBuilder(stateCache state.Database) Builder {
}
// BuildStateTrieObject builds a state trie object from the provided block
-func (sdb *builder) BuildStateTrieObject(current *types.Block) (StateObject, error) {
+func (sdb *builder) BuildStateTrieObject(current *types.Block) (types2.StateObject, error) {
currentTrie, err := sdb.stateCache.OpenTrie(current.Root())
if err != nil {
- return StateObject{}, fmt.Errorf("error creating trie for block %d: %v", current.Number(), err)
+ return types2.StateObject{}, fmt.Errorf("error creating trie for block %d: %v", current.Number(), err)
}
it := currentTrie.NodeIterator([]byte{})
stateNodes, codeAndCodeHashes, err := sdb.buildStateTrie(it)
if err != nil {
- return StateObject{}, fmt.Errorf("error collecting state nodes for block %d: %v", current.Number(), err)
+ return types2.StateObject{}, fmt.Errorf("error collecting state nodes for block %d: %v", current.Number(), err)
}
- return StateObject{
+ return types2.StateObject{
BlockNumber: current.Number(),
BlockHash: current.Hash(),
Nodes: stateNodes,
@@ -98,20 +98,20 @@ func (sdb *builder) BuildStateTrieObject(current *types.Block) (StateObject, err
}, nil
}
-func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAndCodeHash, error) {
- stateNodes := make([]StateNode, 0)
- codeAndCodeHashes := make([]CodeAndCodeHash, 0)
+func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]types2.StateNode, []types2.CodeAndCodeHash, error) {
+ stateNodes := make([]types2.StateNode, 0)
+ codeAndCodeHashes := make([]types2.CodeAndCodeHash, 0)
for it.Next(true) {
// skip value nodes
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
continue
}
- node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
+ node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
if err != nil {
return nil, nil, err
}
switch node.NodeType {
- case Leaf:
+ case types2.Leaf:
var account types.StateAccount
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
@@ -122,7 +122,7 @@ func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAnd
leafKey := encodedPath[1:]
node.LeafKey = leafKey
if !bytes.Equal(account.CodeHash, nullCodeHash) {
- var storageNodes []StorageNode
+ var storageNodes []types2.StorageNode
err := sdb.buildStorageNodesEventual(account.Root, nil, true, storageNodeAppender(&storageNodes))
if err != nil {
return nil, nil, fmt.Errorf("failed building eventual storage diffs for account %+v\r\nerror: %v", account, err)
@@ -134,13 +134,13 @@ func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAnd
if err != nil {
return nil, nil, fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err)
}
- codeAndCodeHashes = append(codeAndCodeHashes, CodeAndCodeHash{
+ codeAndCodeHashes = append(codeAndCodeHashes, types2.CodeAndCodeHash{
Hash: codeHash,
Code: code,
})
}
stateNodes = append(stateNodes, node)
- case Extension, Branch:
+ case types2.Extension, types2.Branch:
stateNodes = append(stateNodes, node)
default:
return nil, nil, fmt.Errorf("unexpected node type %s", node.NodeType)
@@ -150,16 +150,16 @@ func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAnd
}
// BuildStateDiffObject builds a statediff object from two blocks and the provided parameters
-func (sdb *builder) BuildStateDiffObject(args Args, params Params) (StateObject, error) {
- var stateNodes []StateNode
- var codeAndCodeHashes []CodeAndCodeHash
+func (sdb *builder) BuildStateDiffObject(args Args, params Params) (types2.StateObject, error) {
+ var stateNodes []types2.StateNode
+ var codeAndCodeHashes []types2.CodeAndCodeHash
err := sdb.WriteStateDiffObject(
- StateRoots{OldStateRoot: args.OldStateRoot, NewStateRoot: args.NewStateRoot},
+ types2.StateRoots{OldStateRoot: args.OldStateRoot, NewStateRoot: args.NewStateRoot},
params, stateNodeAppender(&stateNodes), codeMappingAppender(&codeAndCodeHashes))
if err != nil {
- return StateObject{}, err
+ return types2.StateObject{}, err
}
- return StateObject{
+ return types2.StateObject{
BlockHash: args.BlockHash,
BlockNumber: args.BlockNumber,
Nodes: stateNodes,
@@ -167,8 +167,8 @@ func (sdb *builder) BuildStateDiffObject(args Args, params Params) (StateObject,
}, nil
}
-// Writes a statediff object to output callback
-func (sdb *builder) WriteStateDiffObject(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error {
+// WriteStateDiffObject writes a statediff object to output callback
+func (sdb *builder) WriteStateDiffObject(args types2.StateRoots, params Params, output types2.StateNodeSink, codeOutput types2.CodeSink) error {
if !params.IntermediateStateNodes || len(params.WatchedAddresses) > 0 {
// if we are watching only specific accounts then we are only diffing leaf nodes
return sdb.buildStateDiffWithoutIntermediateStateNodes(args, params, output, codeOutput)
@@ -177,7 +177,7 @@ func (sdb *builder) WriteStateDiffObject(args StateRoots, params Params, output
}
}
-func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error {
+func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args types2.StateRoots, params Params, output types2.StateNodeSink, codeOutput types2.CodeSink) error {
// Load tries for old and new states
oldTrie, err := sdb.stateCache.OpenTrie(args.OldStateRoot)
if err != nil {
@@ -208,14 +208,14 @@ func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args StateRoots, pa
}
// collect and sort the leafkey keys for both account mappings into a slice
- createKeys := sortKeys(diffAccountsAtB)
- deleteKeys := sortKeys(diffAccountsAtA)
+ createKeys := trie_helpers.SortKeys(diffAccountsAtB)
+ deleteKeys := trie_helpers.SortKeys(diffAccountsAtA)
// and then find the intersection of these keys
// these are the leafkeys for the accounts which exist at both A and B but are different
// this also mutates the passed in createKeys and deleteKeys, removing the intersection keys
// and leaving the truly created or deleted keys in place
- updatedKeys := findIntersection(createKeys, deleteKeys)
+ updatedKeys := trie_helpers.FindIntersection(createKeys, deleteKeys)
// build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two
err = sdb.buildAccountUpdates(
@@ -232,7 +232,7 @@ func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args StateRoots, pa
return nil
}
-func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error {
+func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args types2.StateRoots, params Params, output types2.StateNodeSink, codeOutput types2.CodeSink) error {
// Load tries for old (A) and new (B) states
oldTrie, err := sdb.stateCache.OpenTrie(args.OldStateRoot)
if err != nil {
@@ -262,14 +262,14 @@ func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args StateRoots,
}
// collect and sort the leafkeys for both account mappings into a slice
- createKeys := sortKeys(diffAccountsAtB)
- deleteKeys := sortKeys(diffAccountsAtA)
+ createKeys := trie_helpers.SortKeys(diffAccountsAtB)
+ deleteKeys := trie_helpers.SortKeys(diffAccountsAtA)
// and then find the intersection of these keys
// these are the leafkeys for the accounts which exist at both A and B but are different
// this also mutates the passed in createKeys and deleteKeys, removing in intersection keys
// and leaving the truly created or deleted keys in place
- updatedKeys := findIntersection(createKeys, deleteKeys)
+ updatedKeys := trie_helpers.FindIntersection(createKeys, deleteKeys)
// build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two
err = sdb.buildAccountUpdates(
@@ -289,20 +289,20 @@ func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args StateRoots,
// createdAndUpdatedState returns
// a mapping of their leafkeys to all the accounts that exist in a different state at B than A
// and a slice of the paths for all of the nodes included in both
-func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddresses []common.Address) (AccountMap, map[string]bool, error) {
+func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddresses []common.Address) (types2.AccountMap, map[string]bool, error) {
diffPathsAtB := make(map[string]bool)
- diffAcountsAtB := make(AccountMap)
+ diffAcountsAtB := make(types2.AccountMap)
it, _ := trie.NewDifferenceIterator(a, b)
for it.Next(true) {
// skip value nodes
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
continue
}
- node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
+ node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
if err != nil {
return nil, nil, err
}
- if node.NodeType == Leaf {
+ if node.NodeType == types2.Leaf {
// created vs updated is important for leaf nodes since we need to diff their storage
// so we need to map all changed accounts at B to their leafkey, since account can change pathes but not leafkey
var account types.StateAccount
@@ -314,7 +314,7 @@ func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddres
encodedPath := trie.HexToCompact(valueNodePath)
leafKey := encodedPath[1:]
if isWatchedAddress(watchedAddresses, leafKey) {
- diffAcountsAtB[common.Bytes2Hex(leafKey)] = accountWrapper{
+ diffAcountsAtB[common.Bytes2Hex(leafKey)] = types2.AccountWrapper{
NodeType: node.NodeType,
Path: node.Path,
NodeValue: node.NodeValue,
@@ -333,21 +333,21 @@ func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddres
// a slice of all the intermediate nodes that exist in a different state at B than A
// a mapping of their leafkeys to all the accounts that exist in a different state at B than A
// and a slice of the paths for all of the nodes included in both
-func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIterator, output StateNodeSink) (AccountMap, map[string]bool, error) {
+func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIterator, output types2.StateNodeSink) (types2.AccountMap, map[string]bool, error) {
diffPathsAtB := make(map[string]bool)
- diffAcountsAtB := make(AccountMap)
+ diffAcountsAtB := make(types2.AccountMap)
it, _ := trie.NewDifferenceIterator(a, b)
for it.Next(true) {
// skip value nodes
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
continue
}
- node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
+ node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
if err != nil {
return nil, nil, err
}
switch node.NodeType {
- case Leaf:
+ case types2.Leaf:
// created vs updated is important for leaf nodes since we need to diff their storage
// so we need to map all changed accounts at B to their leafkey, since account can change paths but not leafkey
var account types.StateAccount
@@ -358,17 +358,17 @@ func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIt
valueNodePath := append(node.Path, partialPath...)
encodedPath := trie.HexToCompact(valueNodePath)
leafKey := encodedPath[1:]
- diffAcountsAtB[common.Bytes2Hex(leafKey)] = accountWrapper{
+ diffAcountsAtB[common.Bytes2Hex(leafKey)] = types2.AccountWrapper{
NodeType: node.NodeType,
Path: node.Path,
NodeValue: node.NodeValue,
LeafKey: leafKey,
Account: &account,
}
- case Extension, Branch:
+ case types2.Extension, types2.Branch:
// create a diff for any intermediate node that has changed at b
// created vs updated makes no difference for intermediate nodes since we do not need to diff storage
- if err := output(StateNode{
+ if err := output(types2.StateNode{
NodeType: node.NodeType,
Path: node.Path,
NodeValue: node.NodeValue,
@@ -386,20 +386,20 @@ func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIt
// deletedOrUpdatedState returns a slice of all the pathes that are emptied at B
// and a mapping of their leafkeys to all the accounts that exist in a different state at A than B
-func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB map[string]bool, output StateNodeSink) (AccountMap, error) {
- diffAccountAtA := make(AccountMap)
+func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB map[string]bool, output types2.StateNodeSink) (types2.AccountMap, error) {
+ diffAccountAtA := make(types2.AccountMap)
it, _ := trie.NewDifferenceIterator(b, a)
for it.Next(true) {
// skip value nodes
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
continue
}
- node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
+ node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
if err != nil {
return nil, err
}
switch node.NodeType {
- case Leaf:
+ case types2.Leaf:
// map all different accounts at A to their leafkey
var account types.StateAccount
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
@@ -409,7 +409,7 @@ func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB m
valueNodePath := append(node.Path, partialPath...)
encodedPath := trie.HexToCompact(valueNodePath)
leafKey := encodedPath[1:]
- diffAccountAtA[common.Bytes2Hex(leafKey)] = accountWrapper{
+ diffAccountAtA[common.Bytes2Hex(leafKey)] = types2.AccountWrapper{
NodeType: node.NodeType,
Path: node.Path,
NodeValue: node.NodeValue,
@@ -420,24 +420,24 @@ func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB m
// that means the node at this path was deleted (or moved) in B
// emit an empty "removed" diff to signify as such
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
- if err := output(StateNode{
+ if err := output(types2.StateNode{
Path: node.Path,
NodeValue: []byte{},
- NodeType: Removed,
+ NodeType: types2.Removed,
LeafKey: leafKey,
}); err != nil {
return nil, err
}
}
- case Extension, Branch:
+ case types2.Extension, types2.Branch:
// if this node's path did not show up in diffPathsAtB
// that means the node at this path was deleted (or moved) in B
// emit an empty "removed" diff to signify as such
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
- if err := output(StateNode{
+ if err := output(types2.StateNode{
Path: node.Path,
NodeValue: []byte{},
- NodeType: Removed,
+ NodeType: types2.Removed,
}); err != nil {
return nil, err
}
@@ -454,13 +454,13 @@ func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB m
// to generate the statediff node objects for all of the accounts that existed at both A and B but in different states
// needs to be called before building account creations and deletions as this mutates
// those account maps to remove the accounts which were updated
-func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updatedKeys []string,
- watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output StateNodeSink) error {
+func (sdb *builder) buildAccountUpdates(creations, deletions types2.AccountMap, updatedKeys []string,
+ watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output types2.StateNodeSink) error {
var err error
for _, key := range updatedKeys {
createdAcc := creations[key]
deletedAcc := deletions[key]
- var storageDiffs []StorageNode
+ var storageDiffs []types2.StorageNode
if deletedAcc.Account != nil && createdAcc.Account != nil {
oldSR := deletedAcc.Account.Root
newSR := createdAcc.Account.Root
@@ -471,7 +471,7 @@ func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updated
return fmt.Errorf("failed building incremental storage diffs for account with leafkey %s\r\nerror: %v", key, err)
}
}
- if err = output(StateNode{
+ if err = output(types2.StateNode{
NodeType: createdAcc.NodeType,
Path: createdAcc.Path,
NodeValue: createdAcc.NodeValue,
@@ -489,9 +489,9 @@ func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updated
// buildAccountCreations returns the statediff node objects for all the accounts that exist at B but not at A
// it also returns the code and codehash for created contract accounts
-func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output StateNodeSink, codeOutput CodeSink) error {
+func (sdb *builder) buildAccountCreations(accounts types2.AccountMap, watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output types2.StateNodeSink, codeOutput types2.CodeSink) error {
for _, val := range accounts {
- diff := StateNode{
+ diff := types2.StateNode{
NodeType: val.NodeType,
Path: val.Path,
LeafKey: val.LeafKey,
@@ -499,7 +499,7 @@ func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKey
}
if !bytes.Equal(val.Account.CodeHash, nullCodeHash) {
// For contract creations, any storage node contained is a diff
- var storageDiffs []StorageNode
+ var storageDiffs []types2.StorageNode
err := sdb.buildStorageNodesEventual(val.Account.Root, watchedStorageKeys, intermediateStorageNodes, storageNodeAppender(&storageDiffs))
if err != nil {
return fmt.Errorf("failed building eventual storage diffs for node %x\r\nerror: %v", val.Path, err)
@@ -511,7 +511,7 @@ func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKey
if err != nil {
return fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err)
}
- if err := codeOutput(CodeAndCodeHash{
+ if err := codeOutput(types2.CodeAndCodeHash{
Hash: codeHash,
Code: code,
}); err != nil {
@@ -528,7 +528,7 @@ func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKey
// buildStorageNodesEventual builds the storage diff node objects for a created account
// i.e. it returns all the storage nodes at this state, since there is no previous state
-func (sdb *builder) buildStorageNodesEventual(sr common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error {
+func (sdb *builder) buildStorageNodesEventual(sr common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) {
return nil
}
@@ -549,24 +549,24 @@ func (sdb *builder) buildStorageNodesEventual(sr common.Hash, watchedStorageKeys
// buildStorageNodesFromTrie returns all the storage diff node objects in the provided node interator
// if any storage keys are provided it will only return those leaf nodes
// including intermediate nodes can be turned on or off
-func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStorageKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error {
+func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStorageKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
for it.Next(true) {
// skip value nodes
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
continue
}
- node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
+ node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
if err != nil {
return err
}
switch node.NodeType {
- case Leaf:
+ case types2.Leaf:
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
valueNodePath := append(node.Path, partialPath...)
encodedPath := trie.HexToCompact(valueNodePath)
leafKey := encodedPath[1:]
if isWatchedStorageKey(watchedStorageKeys, leafKey) {
- if err := output(StorageNode{
+ if err := output(types2.StorageNode{
NodeType: node.NodeType,
Path: node.Path,
NodeValue: node.NodeValue,
@@ -575,9 +575,9 @@ func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStora
return err
}
}
- case Extension, Branch:
+ case types2.Extension, types2.Branch:
if intermediateNodes {
- if err := output(StorageNode{
+ if err := output(types2.StorageNode{
NodeType: node.NodeType,
Path: node.Path,
NodeValue: node.NodeValue,
@@ -593,7 +593,7 @@ func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStora
}
// buildStorageNodesIncremental builds the storage diff node objects for all nodes that exist in a different state at B than A
-func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error {
+func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
if bytes.Equal(newSR.Bytes(), oldSR.Bytes()) {
return nil
}
@@ -621,7 +621,7 @@ func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common
return nil
}
-func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) (map[string]bool, error) {
+func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) (map[string]bool, error) {
diffPathsAtB := make(map[string]bool)
it, _ := trie.NewDifferenceIterator(a, b)
for it.Next(true) {
@@ -629,18 +629,18 @@ func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
continue
}
- node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
+ node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
if err != nil {
return nil, err
}
switch node.NodeType {
- case Leaf:
+ case types2.Leaf:
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
valueNodePath := append(node.Path, partialPath...)
encodedPath := trie.HexToCompact(valueNodePath)
leafKey := encodedPath[1:]
if isWatchedStorageKey(watchedKeys, leafKey) {
- if err := output(StorageNode{
+ if err := output(types2.StorageNode{
NodeType: node.NodeType,
Path: node.Path,
NodeValue: node.NodeValue,
@@ -649,9 +649,9 @@ func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys
return nil, err
}
}
- case Extension, Branch:
+ case types2.Extension, types2.Branch:
if intermediateNodes {
- if err := output(StorageNode{
+ if err := output(types2.StorageNode{
NodeType: node.NodeType,
Path: node.Path,
NodeValue: node.NodeValue,
@@ -667,14 +667,14 @@ func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys
return diffPathsAtB, it.Error()
}
-func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB map[string]bool, watchedKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error {
+func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB map[string]bool, watchedKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
it, _ := trie.NewDifferenceIterator(b, a)
for it.Next(true) {
// skip value nodes
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
continue
}
- node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
+ node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
if err != nil {
return err
}
@@ -685,14 +685,14 @@ func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB
continue
}
switch node.NodeType {
- case Leaf:
+ case types2.Leaf:
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
valueNodePath := append(node.Path, partialPath...)
encodedPath := trie.HexToCompact(valueNodePath)
leafKey := encodedPath[1:]
if isWatchedStorageKey(watchedKeys, leafKey) {
- if err := output(StorageNode{
- NodeType: Removed,
+ if err := output(types2.StorageNode{
+ NodeType: types2.Removed,
Path: node.Path,
NodeValue: []byte{},
LeafKey: leafKey,
@@ -700,10 +700,10 @@ func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB
return err
}
}
- case Extension, Branch:
+ case types2.Extension, types2.Branch:
if intermediateNodes {
- if err := output(StorageNode{
- NodeType: Removed,
+ if err := output(types2.StorageNode{
+ NodeType: types2.Removed,
Path: node.Path,
NodeValue: []byte{},
}); err != nil {
diff --git a/statediff/builder_test.go b/statediff/builder_test.go
index 6a88bbba0..d4d67940e 100644
--- a/statediff/builder_test.go
+++ b/statediff/builder_test.go
@@ -24,23 +24,24 @@ import (
"sort"
"testing"
+ types2 "github.com/ethereum/go-ethereum/statediff/types"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff"
- "github.com/ethereum/go-ethereum/statediff/testhelpers"
- sdtypes "github.com/ethereum/go-ethereum/statediff/types"
+ "github.com/ethereum/go-ethereum/statediff/test_helpers"
)
var (
contractLeafKey []byte
- emptyDiffs = make([]sdtypes.StateNode, 0)
- emptyStorage = make([]sdtypes.StorageNode, 0)
+ emptyDiffs = make([]types2.StateNode, 0)
+ emptyStorage = make([]types2.StorageNode, 0)
block0, block1, block2, block3, block4, block5, block6 *types.Block
builder statediff.Builder
minerAddress = common.HexToAddress("0x0")
- minerLeafKey = testhelpers.AddressToLeafKey(minerAddress)
+ minerLeafKey = test_helpers.AddressToLeafKey(minerAddress)
slot0 = common.HexToHash("0")
slot1 = common.HexToHash("1")
@@ -122,8 +123,8 @@ var (
minerAccountAtBlock1, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
Balance: big.NewInt(2000002625000000000),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
minerAccountAtBlock1LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("3380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a"),
@@ -132,8 +133,8 @@ var (
minerAccountAtBlock2, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
Balance: big.NewInt(4000111203461610525),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
minerAccountAtBlock2LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("3380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a"),
@@ -142,9 +143,9 @@ var (
account1AtBlock1, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
- Balance: testhelpers.Block1Account1Balance,
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ Balance: test_helpers.Block1Account1Balance,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
account1AtBlock1LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("3926db69aaced518e9b9f0f434a473e7174109c943548bb8f23be41ca76d9ad2"),
@@ -153,8 +154,8 @@ var (
account1AtBlock2, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 2,
Balance: big.NewInt(999555797000009000),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
account1AtBlock2LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("3926db69aaced518e9b9f0f434a473e7174109c943548bb8f23be41ca76d9ad2"),
@@ -163,8 +164,8 @@ var (
account1AtBlock5, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 2,
Balance: big.NewInt(2999566008847709960),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
account1AtBlock5LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("3926db69aaced518e9b9f0f434a473e7174109c943548bb8f23be41ca76d9ad2"),
@@ -173,8 +174,8 @@ var (
account1AtBlock6, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 3,
Balance: big.NewInt(2999537516847709960),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
account1AtBlock6LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("3926db69aaced518e9b9f0f434a473e7174109c943548bb8f23be41ca76d9ad2"),
@@ -184,8 +185,8 @@ var (
account2AtBlock2, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
Balance: big.NewInt(1000),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
account2AtBlock2LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45"),
@@ -194,8 +195,8 @@ var (
account2AtBlock3, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
Balance: big.NewInt(2000013574009435976),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
account2AtBlock3LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45"),
@@ -204,8 +205,8 @@ var (
account2AtBlock4, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
Balance: big.NewInt(4000048088163070348),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
account2AtBlock4LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45"),
@@ -214,8 +215,8 @@ var (
account2AtBlock6, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
Balance: big.NewInt(6000063293259748636),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
account2AtBlock6LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45"),
@@ -224,33 +225,33 @@ var (
bankAccountAtBlock0, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
- Balance: big.NewInt(testhelpers.TestBankFunds.Int64()),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ Balance: big.NewInt(test_helpers.TestBankFunds.Int64()),
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
bankAccountAtBlock0LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("2000bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"),
bankAccountAtBlock0,
})
- block1BankBalance = big.NewInt(testhelpers.TestBankFunds.Int64() - testhelpers.BalanceChange10000 - testhelpers.GasFees)
+ block1BankBalance = big.NewInt(test_helpers.TestBankFunds.Int64() - test_helpers.BalanceChange10000 - test_helpers.GasFees)
bankAccountAtBlock1, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 1,
Balance: block1BankBalance,
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
bankAccountAtBlock1LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"),
bankAccountAtBlock1,
})
- block2BankBalance = block1BankBalance.Int64() - testhelpers.BalanceChange1Ether - testhelpers.GasFees
+ block2BankBalance = block1BankBalance.Int64() - test_helpers.BalanceChange1Ether - test_helpers.GasFees
bankAccountAtBlock2, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 2,
Balance: big.NewInt(block2BankBalance),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
bankAccountAtBlock2LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"),
@@ -259,8 +260,8 @@ var (
bankAccountAtBlock3, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 3,
Balance: big.NewInt(999914255999990000),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
bankAccountAtBlock3LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"),
@@ -269,8 +270,8 @@ var (
bankAccountAtBlock4, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 6,
Balance: big.NewInt(999826859999990000),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
bankAccountAtBlock4LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"),
@@ -279,8 +280,8 @@ var (
bankAccountAtBlock5, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 7,
Balance: big.NewInt(999805027999990000),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
bankAccountAtBlock5LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"),
@@ -469,10 +470,10 @@ func init() {
}
func TestBuilder(t *testing.T) {
- blocks, chain := testhelpers.MakeChain(3, testhelpers.Genesis, testhelpers.TestChainGen)
- contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr)
+ blocks, chain := test_helpers.MakeChain(3, test_helpers.Genesis, test_helpers.TestChainGen)
+ contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr)
defer chain.Stop()
- block0 = testhelpers.Genesis
+ block0 = test_helpers.Genesis
block1 = blocks[0]
block2 = blocks[1]
block3 = blocks[2]
@@ -482,7 +483,7 @@ func TestBuilder(t *testing.T) {
var tests = []struct {
name string
startingArguments statediff.Args
- expected *statediff.StateObject
+ expected *types2.StateObject
}{
{
"testEmptyDiff",
@@ -492,7 +493,7 @@ func TestBuilder(t *testing.T) {
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
Nodes: emptyDiffs,
@@ -502,19 +503,19 @@ func TestBuilder(t *testing.T) {
"testBlock0",
//10000 transferred from testBankAddress to account1Addr
statediff.Args{
- OldStateRoot: testhelpers.NullHash,
+ OldStateRoot: test_helpers.NullHash,
NewStateRoot: block0.Root(),
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock0LeafNode,
StorageNodes: emptyStorage,
},
@@ -530,28 +531,28 @@ func TestBuilder(t *testing.T) {
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock1LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x05'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: minerLeafKey,
NodeValue: minerAccountAtBlock1LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock1LeafNode,
StorageNodes: emptyStorage,
},
@@ -569,46 +570,46 @@ func TestBuilder(t *testing.T) {
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock2LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x05'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: minerLeafKey,
NodeValue: minerAccountAtBlock2LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock2LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock2LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{'\x02'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot0StorageKey.Bytes(),
NodeValue: slot0StorageLeafNode,
},
{
Path: []byte{'\x0b'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot1StorageKey.Bytes(),
NodeValue: slot1StorageLeafNode,
},
@@ -616,16 +617,16 @@ func TestBuilder(t *testing.T) {
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account2LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account2LeafKey,
NodeValue: account2AtBlock2LeafNode,
StorageNodes: emptyStorage,
},
},
- CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{
+ CodeAndCodeHashes: []types2.CodeAndCodeHash{
{
- Hash: testhelpers.CodeHash,
- Code: testhelpers.ByteCodeAfterDeployment,
+ Hash: test_helpers.CodeHash,
+ Code: test_helpers.ByteCodeAfterDeployment,
},
},
},
@@ -640,26 +641,26 @@ func TestBuilder(t *testing.T) {
BlockNumber: block3.Number(),
BlockHash: block3.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block3.Number(),
BlockHash: block3.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock3LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock3LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot3StorageKey.Bytes(),
NodeValue: slot3StorageLeafNode,
},
@@ -667,8 +668,8 @@ func TestBuilder(t *testing.T) {
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account2LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account2LeafKey,
NodeValue: account2AtBlock3LeafNode,
StorageNodes: emptyStorage,
},
@@ -700,10 +701,10 @@ func TestBuilder(t *testing.T) {
}
func TestBuilderWithIntermediateNodes(t *testing.T) {
- blocks, chain := testhelpers.MakeChain(3, testhelpers.Genesis, testhelpers.TestChainGen)
- contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr)
+ blocks, chain := test_helpers.MakeChain(3, test_helpers.Genesis, test_helpers.TestChainGen)
+ contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr)
defer chain.Stop()
- block0 = testhelpers.Genesis
+ block0 = test_helpers.Genesis
block1 = blocks[0]
block2 = blocks[1]
block3 = blocks[2]
@@ -717,7 +718,7 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
var tests = []struct {
name string
startingArguments statediff.Args
- expected *statediff.StateObject
+ expected *types2.StateObject
}{
{
"testEmptyDiff",
@@ -727,7 +728,7 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
Nodes: emptyDiffs,
@@ -737,19 +738,19 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
"testBlock0",
//10000 transferred from testBankAddress to account1Addr
statediff.Args{
- OldStateRoot: testhelpers.NullHash,
+ OldStateRoot: test_helpers.NullHash,
NewStateRoot: block0.Root(),
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock0LeafNode,
StorageNodes: emptyStorage,
},
@@ -765,34 +766,34 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block1BranchRootNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock1LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x05'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: minerLeafKey,
NodeValue: minerAccountAtBlock1LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock1LeafNode,
StorageNodes: emptyStorage,
},
@@ -810,57 +811,57 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block2BranchRootNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock2LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x05'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: minerLeafKey,
NodeValue: minerAccountAtBlock2LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock2LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock2LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block2StorageBranchRootNode,
},
{
Path: []byte{'\x02'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot0StorageKey.Bytes(),
NodeValue: slot0StorageLeafNode,
},
{
Path: []byte{'\x0b'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot1StorageKey.Bytes(),
NodeValue: slot1StorageLeafNode,
},
@@ -868,16 +869,16 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account2LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account2LeafKey,
NodeValue: account2AtBlock2LeafNode,
StorageNodes: emptyStorage,
},
},
- CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{
+ CodeAndCodeHashes: []types2.CodeAndCodeHash{
{
- Hash: testhelpers.CodeHash,
- Code: testhelpers.ByteCodeAfterDeployment,
+ Hash: test_helpers.CodeHash,
+ Code: test_helpers.ByteCodeAfterDeployment,
},
},
},
@@ -892,37 +893,37 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
BlockNumber: block3.Number(),
BlockHash: block3.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block3.Number(),
BlockHash: block3.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block3BranchRootNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock3LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock3LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block3StorageBranchRootNode,
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot3StorageKey.Bytes(),
NodeValue: slot3StorageLeafNode,
},
@@ -930,8 +931,8 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account2LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account2LeafKey,
NodeValue: account2AtBlock3LeafNode,
StorageNodes: emptyStorage,
},
@@ -977,22 +978,22 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
}
func TestBuilderWithWatchedAddressList(t *testing.T) {
- blocks, chain := testhelpers.MakeChain(3, testhelpers.Genesis, testhelpers.TestChainGen)
- contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr)
+ blocks, chain := test_helpers.MakeChain(3, test_helpers.Genesis, test_helpers.TestChainGen)
+ contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr)
defer chain.Stop()
- block0 = testhelpers.Genesis
+ block0 = test_helpers.Genesis
block1 = blocks[0]
block2 = blocks[1]
block3 = blocks[2]
params := statediff.Params{
- WatchedAddresses: []common.Address{testhelpers.Account1Addr, testhelpers.ContractAddr},
+ WatchedAddresses: []common.Address{test_helpers.Account1Addr, test_helpers.ContractAddr},
}
builder = statediff.NewBuilder(chain.StateCache())
var tests = []struct {
name string
startingArguments statediff.Args
- expected *statediff.StateObject
+ expected *types2.StateObject
}{
{
"testEmptyDiff",
@@ -1002,7 +1003,7 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
Nodes: emptyDiffs,
@@ -1012,12 +1013,12 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
"testBlock0",
//10000 transferred from testBankAddress to account1Addr
statediff.Args{
- OldStateRoot: testhelpers.NullHash,
+ OldStateRoot: test_helpers.NullHash,
NewStateRoot: block0.Root(),
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
Nodes: emptyDiffs,
@@ -1032,14 +1033,14 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock1LeafNode,
StorageNodes: emptyStorage,
},
@@ -1056,25 +1057,25 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock2LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{'\x02'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot0StorageKey.Bytes(),
NodeValue: slot0StorageLeafNode,
},
{
Path: []byte{'\x0b'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot1StorageKey.Bytes(),
NodeValue: slot1StorageLeafNode,
},
@@ -1082,16 +1083,16 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock2LeafNode,
StorageNodes: emptyStorage,
},
},
- CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{
+ CodeAndCodeHashes: []types2.CodeAndCodeHash{
{
- Hash: testhelpers.CodeHash,
- Code: testhelpers.ByteCodeAfterDeployment,
+ Hash: test_helpers.CodeHash,
+ Code: test_helpers.ByteCodeAfterDeployment,
},
},
},
@@ -1106,19 +1107,19 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
BlockNumber: block3.Number(),
BlockHash: block3.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block3.Number(),
BlockHash: block3.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock3LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot3StorageKey.Bytes(),
NodeValue: slot3StorageLeafNode,
},
@@ -1152,15 +1153,15 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
}
func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) {
- blocks, chain := testhelpers.MakeChain(3, testhelpers.Genesis, testhelpers.TestChainGen)
- contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr)
+ blocks, chain := test_helpers.MakeChain(3, test_helpers.Genesis, test_helpers.TestChainGen)
+ contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr)
defer chain.Stop()
- block0 = testhelpers.Genesis
+ block0 = test_helpers.Genesis
block1 = blocks[0]
block2 = blocks[1]
block3 = blocks[2]
params := statediff.Params{
- WatchedAddresses: []common.Address{testhelpers.Account1Addr, testhelpers.ContractAddr},
+ WatchedAddresses: []common.Address{test_helpers.Account1Addr, test_helpers.ContractAddr},
WatchedStorageSlots: []common.Hash{slot1StorageKey},
}
builder = statediff.NewBuilder(chain.StateCache())
@@ -1168,7 +1169,7 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) {
var tests = []struct {
name string
startingArguments statediff.Args
- expected *statediff.StateObject
+ expected *types2.StateObject
}{
{
"testEmptyDiff",
@@ -1178,7 +1179,7 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) {
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
Nodes: emptyDiffs,
@@ -1188,12 +1189,12 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) {
"testBlock0",
//10000 transferred from testBankAddress to account1Addr
statediff.Args{
- OldStateRoot: testhelpers.NullHash,
+ OldStateRoot: test_helpers.NullHash,
NewStateRoot: block0.Root(),
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block0.Number(),
BlockHash: block0.Hash(),
Nodes: emptyDiffs,
@@ -1208,14 +1209,14 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) {
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock1LeafNode,
StorageNodes: emptyStorage,
},
@@ -1232,19 +1233,19 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) {
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock2LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{'\x0b'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot1StorageKey.Bytes(),
NodeValue: slot1StorageLeafNode,
},
@@ -1252,16 +1253,16 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) {
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock2LeafNode,
StorageNodes: emptyStorage,
},
},
- CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{
+ CodeAndCodeHashes: []types2.CodeAndCodeHash{
{
- Hash: testhelpers.CodeHash,
- Code: testhelpers.ByteCodeAfterDeployment,
+ Hash: test_helpers.CodeHash,
+ Code: test_helpers.ByteCodeAfterDeployment,
},
},
},
@@ -1276,13 +1277,13 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) {
BlockNumber: block3.Number(),
BlockHash: block3.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block3.Number(),
BlockHash: block3.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock3LeafNode,
StorageNodes: emptyStorage,
@@ -1315,8 +1316,8 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) {
}
func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
- blocks, chain := testhelpers.MakeChain(6, testhelpers.Genesis, testhelpers.TestChainGen)
- contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr)
+ blocks, chain := test_helpers.MakeChain(6, test_helpers.Genesis, test_helpers.TestChainGen)
+ contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr)
defer chain.Stop()
block3 = blocks[2]
block4 = blocks[3]
@@ -1331,7 +1332,7 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
var tests = []struct {
name string
startingArguments statediff.Args
- expected *statediff.StateObject
+ expected *types2.StateObject
}{
// blocks 0-3 are the same as in TestBuilderWithIntermediateNodes
{
@@ -1342,49 +1343,49 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
BlockNumber: block4.Number(),
BlockHash: block4.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block4.Number(),
BlockHash: block4.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block4BranchRootNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock4LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock4LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block4StorageBranchRootNode,
},
{
Path: []byte{'\x04'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot2StorageKey.Bytes(),
NodeValue: slot2StorageLeafNode,
},
{
Path: []byte{'\x0b'},
- NodeType: sdtypes.Removed,
+ NodeType: types2.Removed,
LeafKey: slot1StorageKey.Bytes(),
NodeValue: []byte{},
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Removed,
+ NodeType: types2.Removed,
LeafKey: slot3StorageKey.Bytes(),
NodeValue: []byte{},
},
@@ -1392,8 +1393,8 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account2LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account2LeafKey,
NodeValue: account2AtBlock4LeafNode,
StorageNodes: emptyStorage,
},
@@ -1408,44 +1409,44 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
BlockNumber: block5.Number(),
BlockHash: block5.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block5.Number(),
BlockHash: block5.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block5BranchRootNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock5LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock5LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
NodeValue: slot0StorageLeafRootNode,
LeafKey: slot0StorageKey.Bytes(),
},
{
Path: []byte{'\x02'},
- NodeType: sdtypes.Removed,
+ NodeType: types2.Removed,
LeafKey: slot0StorageKey.Bytes(),
NodeValue: []byte{},
},
{
Path: []byte{'\x04'},
- NodeType: sdtypes.Removed,
+ NodeType: types2.Removed,
LeafKey: slot2StorageKey.Bytes(),
NodeValue: []byte{},
},
@@ -1453,8 +1454,8 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock5LeafNode,
StorageNodes: emptyStorage,
},
@@ -1469,34 +1470,34 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
BlockNumber: block6.Number(),
BlockHash: block6.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block6.Number(),
BlockHash: block6.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block6BranchRootNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Removed,
+ NodeType: types2.Removed,
LeafKey: contractLeafKey,
NodeValue: []byte{},
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account2LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account2LeafKey,
NodeValue: account2AtBlock6LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock6LeafNode,
StorageNodes: emptyStorage,
},
@@ -1528,8 +1529,8 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
}
func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.T) {
- blocks, chain := testhelpers.MakeChain(6, testhelpers.Genesis, testhelpers.TestChainGen)
- contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr)
+ blocks, chain := test_helpers.MakeChain(6, test_helpers.Genesis, test_helpers.TestChainGen)
+ contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr)
defer chain.Stop()
block3 = blocks[2]
block4 = blocks[3]
@@ -1544,7 +1545,7 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.
var tests = []struct {
name string
startingArguments statediff.Args
- expected *statediff.StateObject
+ expected *types2.StateObject
}{
// blocks 0-3 are the same as in TestBuilderWithIntermediateNodes
{
@@ -1555,38 +1556,38 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.
BlockNumber: block4.Number(),
BlockHash: block4.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block4.Number(),
BlockHash: block4.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock4LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock4LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{'\x04'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot2StorageKey.Bytes(),
NodeValue: slot2StorageLeafNode,
},
{
Path: []byte{'\x0b'},
- NodeType: sdtypes.Removed,
+ NodeType: types2.Removed,
LeafKey: slot1StorageKey.Bytes(),
NodeValue: []byte{},
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Removed,
+ NodeType: types2.Removed,
LeafKey: slot3StorageKey.Bytes(),
NodeValue: []byte{},
},
@@ -1594,8 +1595,8 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account2LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account2LeafKey,
NodeValue: account2AtBlock4LeafNode,
StorageNodes: emptyStorage,
},
@@ -1610,38 +1611,38 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.
BlockNumber: block5.Number(),
BlockHash: block5.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block5.Number(),
BlockHash: block5.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock5LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock5LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot0StorageKey.Bytes(),
NodeValue: slot0StorageLeafRootNode,
},
{
Path: []byte{'\x02'},
- NodeType: sdtypes.Removed,
+ NodeType: types2.Removed,
LeafKey: slot0StorageKey.Bytes(),
NodeValue: []byte{},
},
{
Path: []byte{'\x04'},
- NodeType: sdtypes.Removed,
+ NodeType: types2.Removed,
LeafKey: slot2StorageKey.Bytes(),
NodeValue: []byte{},
},
@@ -1649,8 +1650,8 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock5LeafNode,
StorageNodes: emptyStorage,
},
@@ -1665,27 +1666,27 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.
BlockNumber: block6.Number(),
BlockHash: block6.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block6.Number(),
BlockHash: block6.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Removed,
+ NodeType: types2.Removed,
LeafKey: contractLeafKey,
NodeValue: []byte{},
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account2LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account2LeafKey,
NodeValue: account2AtBlock6LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock6LeafNode,
StorageNodes: emptyStorage,
},
@@ -1740,8 +1741,8 @@ var (
bankAccountAtBlock01, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 1,
Balance: big.NewInt(3999629697375000000),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
bankAccountAtBlock01LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"),
@@ -1750,8 +1751,8 @@ var (
bankAccountAtBlock02, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 2,
Balance: big.NewInt(5999607323457344852),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
bankAccountAtBlock02LeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("2000bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"),
@@ -1800,10 +1801,10 @@ var (
)
func TestBuilderWithMovedAccount(t *testing.T) {
- blocks, chain := testhelpers.MakeChain(2, testhelpers.Genesis, testhelpers.TestSelfDestructChainGen)
- contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr)
+ blocks, chain := test_helpers.MakeChain(2, test_helpers.Genesis, test_helpers.TestSelfDestructChainGen)
+ contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr)
defer chain.Stop()
- block0 = testhelpers.Genesis
+ block0 = test_helpers.Genesis
block1 = blocks[0]
block2 = blocks[1]
params := statediff.Params{
@@ -1815,7 +1816,7 @@ func TestBuilderWithMovedAccount(t *testing.T) {
var tests = []struct {
name string
startingArguments statediff.Args
- expected *statediff.StateObject
+ expected *types2.StateObject
}{
{
"testBlock1",
@@ -1825,53 +1826,53 @@ func TestBuilderWithMovedAccount(t *testing.T) {
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block01BranchRootNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock01LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x01'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock01LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block01StorageBranchRootNode,
},
{
Path: []byte{'\x02'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot0StorageKey.Bytes(),
NodeValue: slot00StorageLeafNode,
},
{
Path: []byte{'\x0b'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot1StorageKey.Bytes(),
NodeValue: slot1StorageLeafNode,
},
},
},
},
- CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{
+ CodeAndCodeHashes: []types2.CodeAndCodeHash{
{
- Hash: testhelpers.CodeHash,
- Code: testhelpers.ByteCodeAfterDeployment,
+ Hash: test_helpers.CodeHash,
+ Code: test_helpers.ByteCodeAfterDeployment,
},
},
},
@@ -1884,27 +1885,27 @@ func TestBuilderWithMovedAccount(t *testing.T) {
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock02LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x01'},
- NodeType: sdtypes.Removed,
+ NodeType: types2.Removed,
LeafKey: contractLeafKey,
NodeValue: []byte{},
},
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Removed,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Removed,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: []byte{},
},
},
@@ -1936,10 +1937,10 @@ func TestBuilderWithMovedAccount(t *testing.T) {
}
func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) {
- blocks, chain := testhelpers.MakeChain(2, testhelpers.Genesis, testhelpers.TestSelfDestructChainGen)
- contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr)
+ blocks, chain := test_helpers.MakeChain(2, test_helpers.Genesis, test_helpers.TestSelfDestructChainGen)
+ contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr)
defer chain.Stop()
- block0 = testhelpers.Genesis
+ block0 = test_helpers.Genesis
block1 = blocks[0]
block2 = blocks[1]
params := statediff.Params{
@@ -1951,7 +1952,7 @@ func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) {
var tests = []struct {
name string
startingArguments statediff.Args
- expected *statediff.StateObject
+ expected *types2.StateObject
}{
{
"testBlock1",
@@ -1961,42 +1962,42 @@ func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) {
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock01LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x01'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock01LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{'\x02'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot0StorageKey.Bytes(),
NodeValue: slot00StorageLeafNode,
},
{
Path: []byte{'\x0b'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot1StorageKey.Bytes(),
NodeValue: slot1StorageLeafNode,
},
},
},
},
- CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{
+ CodeAndCodeHashes: []types2.CodeAndCodeHash{
{
- Hash: testhelpers.CodeHash,
- Code: testhelpers.ByteCodeAfterDeployment,
+ Hash: test_helpers.CodeHash,
+ Code: test_helpers.ByteCodeAfterDeployment,
},
},
},
@@ -2009,27 +2010,27 @@ func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) {
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
},
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock02LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x01'},
- NodeType: sdtypes.Removed,
+ NodeType: types2.Removed,
LeafKey: contractLeafKey,
NodeValue: []byte{},
},
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Removed,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Removed,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: []byte{},
},
},
@@ -2060,8 +2061,8 @@ func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) {
}
func TestBuildStateTrie(t *testing.T) {
- blocks, chain := testhelpers.MakeChain(3, testhelpers.Genesis, testhelpers.TestChainGen)
- contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr)
+ blocks, chain := test_helpers.MakeChain(3, test_helpers.Genesis, test_helpers.TestChainGen)
+ contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr)
defer chain.Stop()
block1 = blocks[0]
block2 = blocks[1]
@@ -2071,39 +2072,39 @@ func TestBuildStateTrie(t *testing.T) {
var tests = []struct {
name string
block *types.Block
- expected *statediff.StateObject
+ expected *types2.StateObject
}{
{
"testBlock1",
block1,
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block1BranchRootNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock1LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x05'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: minerLeafKey,
NodeValue: minerAccountAtBlock1LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock1LeafNode,
StorageNodes: emptyStorage,
},
@@ -2113,57 +2114,57 @@ func TestBuildStateTrie(t *testing.T) {
{
"testBlock2",
block2,
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block2BranchRootNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock2LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x05'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: minerLeafKey,
NodeValue: minerAccountAtBlock2LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock2LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock2LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block2StorageBranchRootNode,
},
{
Path: []byte{'\x02'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot0StorageKey.Bytes(),
NodeValue: slot0StorageLeafNode,
},
{
Path: []byte{'\x0b'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot1StorageKey.Bytes(),
NodeValue: slot1StorageLeafNode,
},
@@ -2171,16 +2172,16 @@ func TestBuildStateTrie(t *testing.T) {
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account2LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account2LeafKey,
NodeValue: account2AtBlock2LeafNode,
StorageNodes: emptyStorage,
},
},
- CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{
+ CodeAndCodeHashes: []types2.CodeAndCodeHash{
{
- Hash: testhelpers.CodeHash,
- Code: testhelpers.ByteCodeAfterDeployment,
+ Hash: test_helpers.CodeHash,
+ Code: test_helpers.ByteCodeAfterDeployment,
},
},
},
@@ -2188,63 +2189,63 @@ func TestBuildStateTrie(t *testing.T) {
{
"testBlock3",
block3,
- &statediff.StateObject{
+ &types2.StateObject{
BlockNumber: block3.Number(),
BlockHash: block3.Hash(),
- Nodes: []sdtypes.StateNode{
+ Nodes: []types2.StateNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block3BranchRootNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x00'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountAtBlock3LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x05'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: minerLeafKey,
NodeValue: minerAccountAtBlock2LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x0e'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1AtBlock2LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x06'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: contractLeafKey,
NodeValue: contractAccountAtBlock3LeafNode,
- StorageNodes: []sdtypes.StorageNode{
+ StorageNodes: []types2.StorageNode{
{
Path: []byte{},
- NodeType: sdtypes.Branch,
+ NodeType: types2.Branch,
NodeValue: block3StorageBranchRootNode,
},
{
Path: []byte{'\x02'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot0StorageKey.Bytes(),
NodeValue: slot0StorageLeafNode,
},
{
Path: []byte{'\x0b'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot1StorageKey.Bytes(),
NodeValue: slot1StorageLeafNode,
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
+ NodeType: types2.Leaf,
LeafKey: slot3StorageKey.Bytes(),
NodeValue: slot3StorageLeafNode,
},
@@ -2252,16 +2253,16 @@ func TestBuildStateTrie(t *testing.T) {
},
{
Path: []byte{'\x0c'},
- NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account2LeafKey,
+ NodeType: types2.Leaf,
+ LeafKey: test_helpers.Account2LeafKey,
NodeValue: account2AtBlock3LeafNode,
StorageNodes: emptyStorage,
},
},
- CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{
+ CodeAndCodeHashes: []types2.CodeAndCodeHash{
{
- Hash: testhelpers.CodeHash,
- Code: testhelpers.ByteCodeAfterDeployment,
+ Hash: test_helpers.CodeHash,
+ Code: test_helpers.ByteCodeAfterDeployment,
},
},
},
diff --git a/statediff/config.go b/statediff/config.go
new file mode 100644
index 000000000..dc9da579b
--- /dev/null
+++ b/statediff/config.go
@@ -0,0 +1,58 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package statediff
+
+import (
+ "context"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+)
+
+// Config contains instantiation parameters for the state diffing service
+type Config struct {
+ IndexerConfig interfaces.Config
+ // A unique ID used for this service
+ ID string
+ // Name for the client this service is running
+ ClientName string
+ // Whether to enable writing state diffs directly to track blockchain head
+ EnableWriteLoop bool
+ // Size of the worker pool
+ NumWorkers uint
+ // Context
+ Context context.Context
+}
+
+// Params contains config parameters for the state diff builder
+type Params struct {
+ IntermediateStateNodes bool
+ IntermediateStorageNodes bool
+ IncludeBlock bool
+ IncludeReceipts bool
+ IncludeTD bool
+ IncludeCode bool
+ WatchedAddresses []common.Address
+ WatchedStorageSlots []common.Hash
+}
+
+// Args bundles the arguments for the state diff builder
+type Args struct {
+ OldStateRoot, NewStateRoot, BlockHash common.Hash
+ BlockNumber *big.Int
+}
diff --git a/statediff/indexer/constructor.go b/statediff/indexer/constructor.go
new file mode 100644
index 000000000..9a66dba89
--- /dev/null
+++ b/statediff/indexer/constructor.go
@@ -0,0 +1,78 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package indexer
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/dump"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/file"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/node"
+ "github.com/ethereum/go-ethereum/statediff/indexer/shared"
+)
+
+// NewStateDiffIndexer creates and returns an implementation of the StateDiffIndexer interface
+func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, nodeInfo node.Info, config interfaces.Config) (interfaces.StateDiffIndexer, error) {
+ switch config.Type() {
+ case shared.FILE:
+ log.Info("Starting statediff service in SQL file writing mode")
+ fc, ok := config.(file.Config)
+ if !ok {
+ return nil, fmt.Errorf("file config is not the correct type: got %T, expected %T", config, file.Config{})
+ }
+ fc.NodeInfo = nodeInfo
+ return file.NewStateDiffIndexer(ctx, chainConfig, fc)
+ case shared.POSTGRES:
+ log.Info("Starting statediff service in Postgres writing mode")
+ pgc, ok := config.(postgres.Config)
+ if !ok {
+ return nil, fmt.Errorf("postgres config is not the correct type: got %T, expected %T", config, postgres.Config{})
+ }
+ var err error
+ var driver sql.Driver
+ switch pgc.Driver {
+ case postgres.PGX:
+ driver, err = postgres.NewPGXDriver(ctx, pgc, nodeInfo)
+ if err != nil {
+ return nil, err
+ }
+ case postgres.SQLX:
+ driver, err = postgres.NewSQLXDriver(ctx, pgc, nodeInfo)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unrecongized Postgres driver type: %s", pgc.Driver)
+ }
+ return sql.NewStateDiffIndexer(ctx, chainConfig, postgres.NewPostgresDB(driver))
+ case shared.DUMP:
+ log.Info("Starting statediff service in data dump mode")
+ dumpc, ok := config.(dump.Config)
+ if !ok {
+ return nil, fmt.Errorf("dump config is not the correct type: got %T, expected %T", config, dump.Config{})
+ }
+ return dump.NewStateDiffIndexer(chainConfig, dumpc), nil
+ default:
+ return nil, fmt.Errorf("unrecognized database type: %s", config.Type())
+ }
+}
diff --git a/statediff/indexer/database/dump/batch_tx.go b/statediff/indexer/database/dump/batch_tx.go
new file mode 100644
index 000000000..f1754b907
--- /dev/null
+++ b/statediff/indexer/database/dump/batch_tx.go
@@ -0,0 +1,94 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package dump
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+
+ "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ blockstore "github.com/ipfs/go-ipfs-blockstore"
+ dshelp "github.com/ipfs/go-ipfs-ds-help"
+ node "github.com/ipfs/go-ipld-format"
+)
+
+// BatchTx wraps a void with the state necessary for building the tx concurrently during trie difference iteration
+type BatchTx struct {
+ BlockNumber uint64
+ dump io.Writer
+ quit chan struct{}
+ iplds chan models.IPLDModel
+ ipldCache models.IPLDBatch
+
+ submit func(blockTx *BatchTx, err error) error
+}
+
+// Submit satisfies indexer.AtomicTx
+func (tx *BatchTx) Submit(err error) error {
+ return tx.submit(tx, err)
+}
+
+func (tx *BatchTx) flush() error {
+ if _, err := fmt.Fprintf(tx.dump, "%+v\r\n", tx.ipldCache); err != nil {
+ return err
+ }
+ tx.ipldCache = models.IPLDBatch{}
+ return nil
+}
+
+// run in background goroutine to synchronize concurrent appends to the ipldCache
+func (tx *BatchTx) cache() {
+ for {
+ select {
+ case i := <-tx.iplds:
+ tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key)
+ tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data)
+ case <-tx.quit:
+ tx.ipldCache = models.IPLDBatch{}
+ return
+ }
+ }
+}
+
+func (tx *BatchTx) cacheDirect(key string, value []byte) {
+ tx.iplds <- models.IPLDModel{
+ Key: key,
+ Data: value,
+ }
+}
+
+func (tx *BatchTx) cacheIPLD(i node.Node) {
+ tx.iplds <- models.IPLDModel{
+ Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
+ Data: i.RawData(),
+ }
+}
+
+func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error) {
+ c, err := ipld.RawdataToCid(codec, raw, mh)
+ if err != nil {
+ return "", "", err
+ }
+ prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String()
+ tx.iplds <- models.IPLDModel{
+ Key: prefixedKey,
+ Data: raw,
+ }
+ return c.String(), prefixedKey, err
+}
diff --git a/statediff/indexer/database/dump/config.go b/statediff/indexer/database/dump/config.go
new file mode 100644
index 000000000..6fb1f0a9e
--- /dev/null
+++ b/statediff/indexer/database/dump/config.go
@@ -0,0 +1,79 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package dump
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/statediff/indexer/shared"
+)
+
+// DumpType to explicitly type the dump destination
+type DumpType string
+
+const (
+ STDOUT = "Stdout"
+ STDERR = "Stderr"
+ DISCARD = "Discard"
+ UNKNOWN = "Unknown"
+)
+
+// ResolveDumpType resolves the dump type for the provided string
+func ResolveDumpType(str string) (DumpType, error) {
+ switch strings.ToLower(str) {
+ case "stdout", "out", "std out":
+ return STDOUT, nil
+ case "stderr", "err", "std err":
+ return STDERR, nil
+ case "discard", "void", "devnull", "dev null":
+ return DISCARD, nil
+ default:
+ return UNKNOWN, fmt.Errorf("unrecognized dump type: %s", str)
+ }
+}
+
+// Config for data dump
+type Config struct {
+ Dump io.WriteCloser
+}
+
+// Type satisfies interfaces.Config
+func (c Config) Type() shared.DBType {
+ return shared.DUMP
+}
+
+// NewDiscardWriterCloser returns a discardWrapper wrapping io.Discard
+func NewDiscardWriterCloser() io.WriteCloser {
+ return discardWrapper{blackhole: io.Discard}
+}
+
+// discardWrapper wraps io.Discard with io.Closer
+type discardWrapper struct {
+ blackhole io.Writer
+}
+
+// Write satisfies io.Writer
+func (dw discardWrapper) Write(b []byte) (int, error) {
+ return dw.blackhole.Write(b)
+}
+
+// Close satisfies io.Closer
+func (dw discardWrapper) Close() error {
+ return nil
+}
diff --git a/statediff/indexer/database/dump/indexer.go b/statediff/indexer/database/dump/indexer.go
new file mode 100644
index 000000000..e450f941a
--- /dev/null
+++ b/statediff/indexer/database/dump/indexer.go
@@ -0,0 +1,498 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package dump
+
+import (
+ "fmt"
+ "io"
+ "math/big"
+ "time"
+
+ ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+
+ "github.com/ipfs/go-cid"
+ node "github.com/ipfs/go-ipld-format"
+ "github.com/multiformats/go-multihash"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ "github.com/ethereum/go-ethereum/statediff/indexer/shared"
+ sdtypes "github.com/ethereum/go-ethereum/statediff/types"
+)
+
+var _ interfaces.StateDiffIndexer = &StateDiffIndexer{}
+
+var (
+ indexerMetrics = RegisterIndexerMetrics(metrics.DefaultRegistry)
+)
+
+// StateDiffIndexer satisfies the indexer.StateDiffIndexer interface for ethereum statediff objects on top of a void
+type StateDiffIndexer struct {
+ dump io.WriteCloser
+ chainConfig *params.ChainConfig
+}
+
+// NewStateDiffIndexer creates a void implementation of interfaces.StateDiffIndexer
+func NewStateDiffIndexer(chainConfig *params.ChainConfig, config Config) *StateDiffIndexer {
+ return &StateDiffIndexer{
+ dump: config.Dump,
+ chainConfig: chainConfig,
+ }
+}
+
+// ReportDBMetrics has nothing to report for dump
+func (sdi *StateDiffIndexer) ReportDBMetrics(time.Duration, <-chan bool) {}
+
+// PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts)
+// Returns an initiated DB transaction which must be Closed via defer to commit or rollback
+func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) {
+ start, t := time.Now(), time.Now()
+ blockHash := block.Hash()
+ blockHashStr := blockHash.String()
+ height := block.NumberU64()
+ traceMsg := fmt.Sprintf("indexer stats for statediff at %d with hash %s:\r\n", height, blockHashStr)
+ transactions := block.Transactions()
+ // Derive any missing fields
+ if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, transactions); err != nil {
+ return nil, err
+ }
+
+ // Generate the block iplds
+ headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
+ if err != nil {
+ return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
+ }
+
+ if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) {
+ return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
+ }
+ if len(txTrieNodes) != len(rctTrieNodes) {
+ return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
+ }
+
+ // Calculate reward
+ var reward *big.Int
+ // in PoA networks block reward is 0
+ if sdi.chainConfig.Clique != nil {
+ reward = big.NewInt(0)
+ } else {
+ reward = shared.CalcEthBlockReward(block.Header(), block.Uncles(), block.Transactions(), receipts)
+ }
+ t = time.Now()
+
+ blockTx := &BatchTx{
+ BlockNumber: height,
+ dump: sdi.dump,
+ iplds: make(chan models.IPLDModel),
+ quit: make(chan struct{}),
+ ipldCache: models.IPLDBatch{},
+ submit: func(self *BatchTx, err error) error {
+ close(self.quit)
+ close(self.iplds)
+ tDiff := time.Since(t)
+ indexerMetrics.tStateStoreCodeProcessing.Update(tDiff)
+ traceMsg += fmt.Sprintf("state, storage, and code storage processing time: %s\r\n", tDiff.String())
+ t = time.Now()
+ if err := self.flush(); err != nil {
+ traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String())
+ log.Debug(traceMsg)
+ return err
+ }
+ tDiff = time.Since(t)
+ indexerMetrics.tPostgresCommit.Update(tDiff)
+ traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String())
+ traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String())
+ log.Debug(traceMsg)
+ return err
+ },
+ }
+ go blockTx.cache()
+
+ tDiff := time.Since(t)
+ indexerMetrics.tFreePostgres.Update(tDiff)
+
+ traceMsg += fmt.Sprintf("time spent waiting for free postgres tx: %s:\r\n", tDiff.String())
+ t = time.Now()
+
+ // Publish and index header, collect headerID
+ var headerID string
+ headerID, err = sdi.processHeader(blockTx, block.Header(), headerNode, reward, totalDifficulty)
+ if err != nil {
+ return nil, err
+ }
+ tDiff = time.Since(t)
+ indexerMetrics.tHeaderProcessing.Update(tDiff)
+ traceMsg += fmt.Sprintf("header processing time: %s\r\n", tDiff.String())
+ t = time.Now()
+ // Publish and index uncles
+ err = sdi.processUncles(blockTx, headerID, height, uncleNodes)
+ if err != nil {
+ return nil, err
+ }
+ tDiff = time.Since(t)
+ indexerMetrics.tUncleProcessing.Update(tDiff)
+ traceMsg += fmt.Sprintf("uncle processing time: %s\r\n", tDiff.String())
+ t = time.Now()
+ // Publish and index receipts and txs
+ err = sdi.processReceiptsAndTxs(blockTx, processArgs{
+ headerID: headerID,
+ blockNumber: block.Number(),
+ receipts: receipts,
+ txs: transactions,
+ rctNodes: rctNodes,
+ rctTrieNodes: rctTrieNodes,
+ txNodes: txNodes,
+ txTrieNodes: txTrieNodes,
+ logTrieNodes: logTrieNodes,
+ logLeafNodeCIDs: logLeafNodeCIDs,
+ rctLeafNodeCIDs: rctLeafNodeCIDs,
+ })
+ if err != nil {
+ return nil, err
+ }
+ tDiff = time.Since(t)
+ indexerMetrics.tTxAndRecProcessing.Update(tDiff)
+ traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String())
+ t = time.Now()
+
+ return blockTx, err
+}
+
+// processHeader publishes and indexes a header IPLD in Postgres
+// it returns the headerID
+func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (string, error) {
+ tx.cacheIPLD(headerNode)
+
+ headerID := header.Hash().String()
+ mod := models.HeaderModel{
+ CID: headerNode.Cid().String(),
+ MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
+ ParentHash: header.ParentHash.String(),
+ BlockNumber: header.Number.String(),
+ BlockHash: headerID,
+ TotalDifficulty: td.String(),
+ Reward: reward.String(),
+ Bloom: header.Bloom.Bytes(),
+ StateRoot: header.Root.String(),
+ RctRoot: header.ReceiptHash.String(),
+ TxRoot: header.TxHash.String(),
+ UncleRoot: header.UncleHash.String(),
+ Timestamp: header.Time,
+ Coinbase: header.Coinbase.String(),
+ }
+ _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", mod)
+ return headerID, err
+}
+
+// processUncles publishes and indexes uncle IPLDs in Postgres
+func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNumber uint64, uncleNodes []*ipld2.EthHeader) error {
+ // publish and index uncles
+ for _, uncleNode := range uncleNodes {
+ tx.cacheIPLD(uncleNode)
+ var uncleReward *big.Int
+ // in PoA networks uncle reward is 0
+ if sdi.chainConfig.Clique != nil {
+ uncleReward = big.NewInt(0)
+ } else {
+ uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64())
+ }
+ uncle := models.UncleModel{
+ HeaderID: headerID,
+ CID: uncleNode.Cid().String(),
+ MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
+ ParentHash: uncleNode.ParentHash.String(),
+ BlockHash: uncleNode.Hash().String(),
+ Reward: uncleReward.String(),
+ }
+ if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", uncle); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// processArgs bundles arguments to processReceiptsAndTxs
+type processArgs struct {
+ headerID string
+ blockNumber *big.Int
+ receipts types.Receipts
+ txs types.Transactions
+ rctNodes []*ipld2.EthReceipt
+ rctTrieNodes []*ipld2.EthRctTrie
+ txNodes []*ipld2.EthTx
+ txTrieNodes []*ipld2.EthTxTrie
+ logTrieNodes [][]node.Node
+ logLeafNodeCIDs [][]cid.Cid
+ rctLeafNodeCIDs []cid.Cid
+}
+
+// processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres
+func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs) error {
+ // Process receipts and txs
+ signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
+ for i, receipt := range args.receipts {
+ for _, logTrieNode := range args.logTrieNodes[i] {
+ tx.cacheIPLD(logTrieNode)
+ }
+ txNode := args.txNodes[i]
+ tx.cacheIPLD(txNode)
+
+ // Indexing
+ // index tx
+ trx := args.txs[i]
+ trxID := trx.Hash().String()
+
+ var val string
+ if trx.Value() != nil {
+ val = trx.Value().String()
+ }
+
+ // derive sender for the tx that corresponds with this receipt
+ from, err := types.Sender(signer, trx)
+ if err != nil {
+ return fmt.Errorf("error deriving tx sender: %v", err)
+ }
+ txModel := models.TxModel{
+ HeaderID: args.headerID,
+ Dst: shared.HandleZeroAddrPointer(trx.To()),
+ Src: shared.HandleZeroAddr(from),
+ TxHash: trxID,
+ Index: int64(i),
+ Data: trx.Data(),
+ CID: txNode.Cid().String(),
+ MhKey: shared.MultihashKeyFromCID(txNode.Cid()),
+ Type: trx.Type(),
+ Value: val,
+ }
+ if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", txModel); err != nil {
+ return err
+ }
+
+ // index access list if this is one
+ for j, accessListElement := range trx.AccessList() {
+ storageKeys := make([]string, len(accessListElement.StorageKeys))
+ for k, storageKey := range accessListElement.StorageKeys {
+ storageKeys[k] = storageKey.Hex()
+ }
+ accessListElementModel := models.AccessListElementModel{
+ TxID: trxID,
+ Index: int64(j),
+ Address: accessListElement.Address.Hex(),
+ StorageKeys: storageKeys,
+ }
+ if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", accessListElementModel); err != nil {
+ return err
+ }
+ }
+
+ // this is the contract address if this receipt is for a contract creation tx
+ contract := shared.HandleZeroAddr(receipt.ContractAddress)
+ var contractHash string
+ if contract != "" {
+ contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
+ }
+
+ // index the receipt
+ if !args.rctLeafNodeCIDs[i].Defined() {
+ return fmt.Errorf("invalid receipt leaf node cid")
+ }
+
+ rctModel := &models.ReceiptModel{
+ TxID: trxID,
+ Contract: contract,
+ ContractHash: contractHash,
+ LeafCID: args.rctLeafNodeCIDs[i].String(),
+ LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]),
+ LogRoot: args.rctNodes[i].LogRoot.String(),
+ }
+ if len(receipt.PostState) == 0 {
+ rctModel.PostStatus = receipt.Status
+ } else {
+ rctModel.PostState = common.Bytes2Hex(receipt.PostState)
+ }
+
+ if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", rctModel); err != nil {
+ return err
+ }
+
+ logDataSet := make([]*models.LogsModel, len(receipt.Logs))
+ for idx, l := range receipt.Logs {
+ topicSet := make([]string, 4)
+ for ti, topic := range l.Topics {
+ topicSet[ti] = topic.Hex()
+ }
+
+ if !args.logLeafNodeCIDs[i][idx].Defined() {
+ return fmt.Errorf("invalid log cid")
+ }
+
+ logDataSet[idx] = &models.LogsModel{
+ ReceiptID: trxID,
+ Address: l.Address.String(),
+ Index: int64(l.Index),
+ Data: l.Data,
+ LeafCID: args.logLeafNodeCIDs[i][idx].String(),
+ LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]),
+ Topic0: topicSet[0],
+ Topic1: topicSet[1],
+ Topic2: topicSet[2],
+ Topic3: topicSet[3],
+ }
+ }
+
+ if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", logDataSet); err != nil {
+ return err
+ }
+ }
+
+ // publish trie nodes, these aren't indexed directly
+ for i, n := range args.txTrieNodes {
+ tx.cacheIPLD(n)
+ tx.cacheIPLD(args.rctTrieNodes[i])
+ }
+
+ return nil
+}
+
+// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD sql
+func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
+ tx, ok := batch.(*BatchTx)
+ if !ok {
+ return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
+ }
+ // publish the state node
+ if stateNode.NodeType == sdtypes.Removed {
+ // short circuit if it is a Removed node
+ // this assumes the db has been initialized and a public.blocks entry for the Removed node is present
+ stateModel := models.StateNodeModel{
+ HeaderID: headerID,
+ Path: stateNode.Path,
+ StateKey: common.BytesToHash(stateNode.LeafKey).String(),
+ CID: shared.RemovedNodeStateCID,
+ MhKey: shared.RemovedNodeMhKey,
+ NodeType: stateNode.NodeType.Int(),
+ }
+ _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", stateModel)
+ return err
+ }
+ stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
+ if err != nil {
+ return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
+ }
+ stateModel := models.StateNodeModel{
+ HeaderID: headerID,
+ Path: stateNode.Path,
+ StateKey: common.BytesToHash(stateNode.LeafKey).String(),
+ CID: stateCIDStr,
+ MhKey: stateMhKey,
+ NodeType: stateNode.NodeType.Int(),
+ }
+ // index the state node, collect the stateID to reference by FK
+ if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", stateModel); err != nil {
+ return err
+ }
+ // if we have a leaf, decode and index the account data
+ if stateNode.NodeType == sdtypes.Leaf {
+ var i []interface{}
+ if err := rlp.DecodeBytes(stateNode.NodeValue, &i); err != nil {
+ return fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
+ }
+ if len(i) != 2 {
+ return fmt.Errorf("eth IPLDPublisher expected state leaf node rlp to decode into two elements")
+ }
+ var account types.StateAccount
+ if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
+ return fmt.Errorf("error decoding state account rlp: %s", err.Error())
+ }
+ accountModel := models.StateAccountModel{
+ HeaderID: headerID,
+ StatePath: stateNode.Path,
+ Balance: account.Balance.String(),
+ Nonce: account.Nonce,
+ CodeHash: account.CodeHash,
+ StorageRoot: account.Root.String(),
+ }
+ if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", accountModel); err != nil {
+ return err
+ }
+ }
+ // if there are any storage nodes associated with this node, publish and index them
+ for _, storageNode := range stateNode.StorageNodes {
+ if storageNode.NodeType == sdtypes.Removed {
+ // short circuit if it is a Removed node
+ // this assumes the db has been initialized and a public.blocks entry for the Removed node is present
+ storageModel := models.StorageNodeModel{
+ HeaderID: headerID,
+ StatePath: stateNode.Path,
+ Path: storageNode.Path,
+ StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
+ CID: shared.RemovedNodeStorageCID,
+ MhKey: shared.RemovedNodeMhKey,
+ NodeType: storageNode.NodeType.Int(),
+ }
+ if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", storageModel); err != nil {
+ return err
+ }
+ continue
+ }
+ storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld2.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
+ if err != nil {
+ return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err)
+ }
+ storageModel := models.StorageNodeModel{
+ HeaderID: headerID,
+ StatePath: stateNode.Path,
+ Path: storageNode.Path,
+ StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
+ CID: storageCIDStr,
+ MhKey: storageMhKey,
+ NodeType: storageNode.NodeType.Int(),
+ }
+ if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", storageModel); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// PushCodeAndCodeHash publishes code and codehash pairs to the ipld sql
+func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error {
+ tx, ok := batch.(*BatchTx)
+ if !ok {
+ return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
+ }
+ // codec doesn't matter since db key is multihash-based
+ mhKey, err := shared.MultihashKeyFromKeccak256(codeAndCodeHash.Hash)
+ if err != nil {
+ return fmt.Errorf("error deriving multihash key from codehash: %v", err)
+ }
+ tx.cacheDirect(mhKey, codeAndCodeHash.Code)
+ return nil
+}
+
+// Close satisfies io.Closer
+func (sdi *StateDiffIndexer) Close() error {
+ return sdi.dump.Close()
+}
diff --git a/statediff/indexer/database/dump/metrics.go b/statediff/indexer/database/dump/metrics.go
new file mode 100644
index 000000000..700e42dc0
--- /dev/null
+++ b/statediff/indexer/database/dump/metrics.go
@@ -0,0 +1,94 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package dump
+
+import (
+ "strings"
+
+ "github.com/ethereum/go-ethereum/metrics"
+)
+
+const (
+ namespace = "statediff"
+)
+
+// Build a fully qualified metric name
+func metricName(subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ parts := []string{namespace, name}
+ if subsystem != "" {
+ parts = []string{namespace, subsystem, name}
+ }
+ // Prometheus uses _ but geth metrics uses / and replaces
+ return strings.Join(parts, "/")
+}
+
+type indexerMetricsHandles struct {
+ // The total number of processed blocks
+ blocks metrics.Counter
+ // The total number of processed transactions
+ transactions metrics.Counter
+ // The total number of processed receipts
+ receipts metrics.Counter
+ // The total number of processed logs
+ logs metrics.Counter
+ // The total number of access list entries processed
+ accessListEntries metrics.Counter
+ // Time spent waiting for free postgres tx
+ tFreePostgres metrics.Timer
+ // Postgres transaction commit duration
+ tPostgresCommit metrics.Timer
+ // Header processing time
+ tHeaderProcessing metrics.Timer
+ // Uncle processing time
+ tUncleProcessing metrics.Timer
+ // Tx and receipt processing time
+ tTxAndRecProcessing metrics.Timer
+ // State, storage, and code combined processing time
+ tStateStoreCodeProcessing metrics.Timer
+}
+
+func RegisterIndexerMetrics(reg metrics.Registry) indexerMetricsHandles {
+ ctx := indexerMetricsHandles{
+ blocks: metrics.NewCounter(),
+ transactions: metrics.NewCounter(),
+ receipts: metrics.NewCounter(),
+ logs: metrics.NewCounter(),
+ accessListEntries: metrics.NewCounter(),
+ tFreePostgres: metrics.NewTimer(),
+ tPostgresCommit: metrics.NewTimer(),
+ tHeaderProcessing: metrics.NewTimer(),
+ tUncleProcessing: metrics.NewTimer(),
+ tTxAndRecProcessing: metrics.NewTimer(),
+ tStateStoreCodeProcessing: metrics.NewTimer(),
+ }
+ subsys := "indexer"
+ reg.Register(metricName(subsys, "blocks"), ctx.blocks)
+ reg.Register(metricName(subsys, "transactions"), ctx.transactions)
+ reg.Register(metricName(subsys, "receipts"), ctx.receipts)
+ reg.Register(metricName(subsys, "logs"), ctx.logs)
+ reg.Register(metricName(subsys, "access_list_entries"), ctx.accessListEntries)
+ reg.Register(metricName(subsys, "t_free_postgres"), ctx.tFreePostgres)
+ reg.Register(metricName(subsys, "t_postgres_commit"), ctx.tPostgresCommit)
+ reg.Register(metricName(subsys, "t_header_processing"), ctx.tHeaderProcessing)
+ reg.Register(metricName(subsys, "t_uncle_processing"), ctx.tUncleProcessing)
+ reg.Register(metricName(subsys, "t_tx_receipt_processing"), ctx.tTxAndRecProcessing)
+ reg.Register(metricName(subsys, "t_state_store_code_processing"), ctx.tStateStoreCodeProcessing)
+ return ctx
+}
diff --git a/statediff/indexer/ipfs/models.go b/statediff/indexer/database/file/batch_tx.go
similarity index 65%
rename from statediff/indexer/ipfs/models.go
rename to statediff/indexer/database/file/batch_tx.go
index eb0312beb..39e5d3713 100644
--- a/statediff/indexer/ipfs/models.go
+++ b/statediff/indexer/database/file/batch_tx.go
@@ -1,5 +1,5 @@
// VulcanizeDB
-// Copyright © 2019 Vulcanize
+// Copyright © 2021 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
@@ -14,9 +14,16 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package ipfs
+package file
-type BlockModel struct {
- CID string `db:"key"`
- Data []byte `db:"data"`
+// BatchTx wraps a void with the state necessary for building the tx concurrently during trie difference iteration
+type BatchTx struct {
+ BlockNumber uint64
+
+ submit func(blockTx *BatchTx, err error) error
+}
+
+// Submit satisfies indexer.AtomicTx
+func (tx *BatchTx) Submit(err error) error {
+ return tx.submit(tx, err)
}
diff --git a/statediff/indexer/database/file/config.go b/statediff/indexer/database/file/config.go
new file mode 100644
index 000000000..c2c6804c0
--- /dev/null
+++ b/statediff/indexer/database/file/config.go
@@ -0,0 +1,45 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package file
+
+import (
+ "github.com/ethereum/go-ethereum/statediff/indexer/node"
+ "github.com/ethereum/go-ethereum/statediff/indexer/shared"
+)
+
+// Config holds params for writing sql statements out to a file
+type Config struct {
+ FilePath string
+ NodeInfo node.Info
+}
+
+// Type satisfies interfaces.Config
+func (c Config) Type() shared.DBType {
+ return shared.FILE
+}
+
+// TestConfig config for unit tests
+var TestConfig = Config{
+ FilePath: "./statediffing_test_file.sql",
+ NodeInfo: node.Info{
+ GenesisBlock: "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3",
+ NetworkID: "1",
+ ChainID: 1,
+ ID: "mockNodeID",
+ ClientName: "go-ethereum",
+ },
+}
diff --git a/statediff/indexer/database/file/helpers.go b/statediff/indexer/database/file/helpers.go
new file mode 100644
index 000000000..dc635110c
--- /dev/null
+++ b/statediff/indexer/database/file/helpers.go
@@ -0,0 +1,60 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package file
+
+import "bytes"
+
+// formatPostgresStringArray parses an array of strings into the proper Postgres string representation of that array
+func formatPostgresStringArray(a []string) string {
+ if a == nil {
+ return ""
+ }
+
+ if n := len(a); n > 0 {
+ // There will be at least two curly brackets, 2*N bytes of quotes,
+ // and N-1 bytes of delimiters.
+ b := make([]byte, 1, 1+3*n)
+ b[0] = '{'
+
+ b = appendArrayQuotedBytes(b, []byte(a[0]))
+ for i := 1; i < n; i++ {
+ b = append(b, ',')
+ b = appendArrayQuotedBytes(b, []byte(a[i]))
+ }
+
+ return string(append(b, '}'))
+ }
+
+ return "{}"
+}
+
+func appendArrayQuotedBytes(b, v []byte) []byte {
+ b = append(b, '"')
+ for {
+ i := bytes.IndexAny(v, `"\`)
+ if i < 0 {
+ b = append(b, v...)
+ break
+ }
+ if i > 0 {
+ b = append(b, v[:i]...)
+ }
+ b = append(b, '\\', v[i])
+ v = v[i+1:]
+ }
+ return append(b, '"')
+}
diff --git a/statediff/indexer/database/file/indexer.go b/statediff/indexer/database/file/indexer.go
new file mode 100644
index 000000000..870c1f259
--- /dev/null
+++ b/statediff/indexer/database/file/indexer.go
@@ -0,0 +1,480 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package file
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/ipfs/go-cid"
+ node "github.com/ipfs/go-ipld-format"
+ "github.com/multiformats/go-multihash"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+ "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ "github.com/ethereum/go-ethereum/statediff/indexer/shared"
+ sdtypes "github.com/ethereum/go-ethereum/statediff/types"
+)
+
+const defaultFilePath = "./statediff.sql"
+
+var _ interfaces.StateDiffIndexer = &StateDiffIndexer{}
+
+var (
+ indexerMetrics = RegisterIndexerMetrics(metrics.DefaultRegistry)
+)
+
+// StateDiffIndexer satisfies the indexer.StateDiffIndexer interface for ethereum statediff objects on top of a void
+type StateDiffIndexer struct {
+ fileWriter *SQLWriter
+ chainConfig *params.ChainConfig
+ nodeID string
+ wg *sync.WaitGroup
+}
+
+// NewStateDiffIndexer creates a void implementation of interfaces.StateDiffIndexer
+func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, config Config) (*StateDiffIndexer, error) {
+ filePath := config.FilePath
+ if filePath == "" {
+ filePath = defaultFilePath
+ }
+ if _, err := os.Stat(filePath); !errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("cannot create file, file (%s) already exists", filePath)
+ }
+ file, err := os.Create(filePath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create file (%s), err: %v", filePath, err)
+ }
+ log.Info("Writing statediff SQL statements to file", "file", filePath)
+ w := NewSQLWriter(file)
+ wg := new(sync.WaitGroup)
+ w.Loop()
+ w.upsertNode(config.NodeInfo)
+ w.upsertIPLDDirect(shared.RemovedNodeMhKey, []byte{})
+ return &StateDiffIndexer{
+ fileWriter: w,
+ chainConfig: chainConfig,
+ nodeID: config.NodeInfo.ID,
+ wg: wg,
+ }, nil
+}
+
+// ReportDBMetrics has nothing to report for dump
+func (sdi *StateDiffIndexer) ReportDBMetrics(time.Duration, <-chan bool) {}
+
+// PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts)
+// Returns an initiated DB transaction which must be Closed via defer to commit or rollback
+func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) {
+ start, t := time.Now(), time.Now()
+ blockHash := block.Hash()
+ blockHashStr := blockHash.String()
+ height := block.NumberU64()
+ traceMsg := fmt.Sprintf("indexer stats for statediff at %d with hash %s:\r\n", height, blockHashStr)
+ transactions := block.Transactions()
+ // Derive any missing fields
+ if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, transactions); err != nil {
+ return nil, err
+ }
+
+ // Generate the block iplds
+ headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
+ if err != nil {
+ return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
+ }
+
+ if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) {
+ return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
+ }
+ if len(txTrieNodes) != len(rctTrieNodes) {
+ return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
+ }
+
+ // Calculate reward
+ var reward *big.Int
+ // in PoA networks block reward is 0
+ if sdi.chainConfig.Clique != nil {
+ reward = big.NewInt(0)
+ } else {
+ reward = shared.CalcEthBlockReward(block.Header(), block.Uncles(), block.Transactions(), receipts)
+ }
+ t = time.Now()
+
+ blockTx := &BatchTx{
+ BlockNumber: height,
+ submit: func(self *BatchTx, err error) error {
+ tDiff := time.Since(t)
+ indexerMetrics.tStateStoreCodeProcessing.Update(tDiff)
+ traceMsg += fmt.Sprintf("state, storage, and code storage processing time: %s\r\n", tDiff.String())
+ t = time.Now()
+ sdi.fileWriter.Flush()
+ tDiff = time.Since(t)
+ indexerMetrics.tPostgresCommit.Update(tDiff)
+ traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String())
+ traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String())
+ log.Debug(traceMsg)
+ return err
+ },
+ }
+ tDiff := time.Since(t)
+ indexerMetrics.tFreePostgres.Update(tDiff)
+ traceMsg += fmt.Sprintf("time spent waiting for free postgres tx: %s:\r\n", tDiff.String())
+ t = time.Now()
+
+ // write header, collect headerID
+ headerID := sdi.processHeader(block.Header(), headerNode, reward, totalDifficulty)
+ tDiff = time.Since(t)
+ indexerMetrics.tHeaderProcessing.Update(tDiff)
+ traceMsg += fmt.Sprintf("header processing time: %s\r\n", tDiff.String())
+ t = time.Now()
+
+ // write uncles
+ sdi.processUncles(headerID, height, uncleNodes)
+ tDiff = time.Since(t)
+ indexerMetrics.tUncleProcessing.Update(tDiff)
+ traceMsg += fmt.Sprintf("uncle processing time: %s\r\n", tDiff.String())
+ t = time.Now()
+
+ // write receipts and txs
+ err = sdi.processReceiptsAndTxs(processArgs{
+ headerID: headerID,
+ blockNumber: block.Number(),
+ receipts: receipts,
+ txs: transactions,
+ rctNodes: rctNodes,
+ rctTrieNodes: rctTrieNodes,
+ txNodes: txNodes,
+ txTrieNodes: txTrieNodes,
+ logTrieNodes: logTrieNodes,
+ logLeafNodeCIDs: logLeafNodeCIDs,
+ rctLeafNodeCIDs: rctLeafNodeCIDs,
+ })
+ if err != nil {
+ return nil, err
+ }
+ tDiff = time.Since(t)
+ indexerMetrics.tTxAndRecProcessing.Update(tDiff)
+ traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String())
+ t = time.Now()
+
+ return blockTx, err
+}
+
+// processHeader write a header IPLD insert SQL stmt to a file
+// it returns the headerID
+func (sdi *StateDiffIndexer) processHeader(header *types.Header, headerNode node.Node, reward, td *big.Int) string {
+ sdi.fileWriter.upsertIPLDNode(headerNode)
+
+ var baseFee *string
+ if header.BaseFee != nil {
+ baseFee = new(string)
+ *baseFee = header.BaseFee.String()
+ }
+ headerID := header.Hash().String()
+ sdi.fileWriter.upsertHeaderCID(models.HeaderModel{
+ NodeID: sdi.nodeID,
+ CID: headerNode.Cid().String(),
+ MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
+ ParentHash: header.ParentHash.String(),
+ BlockNumber: header.Number.String(),
+ BlockHash: headerID,
+ TotalDifficulty: td.String(),
+ Reward: reward.String(),
+ Bloom: header.Bloom.Bytes(),
+ StateRoot: header.Root.String(),
+ RctRoot: header.ReceiptHash.String(),
+ TxRoot: header.TxHash.String(),
+ UncleRoot: header.UncleHash.String(),
+ Timestamp: header.Time,
+ Coinbase: header.Coinbase.String(),
+ })
+ return headerID
+}
+
+// processUncles writes uncle IPLD insert SQL stmts to a file
+func (sdi *StateDiffIndexer) processUncles(headerID string, blockNumber uint64, uncleNodes []*ipld2.EthHeader) {
+ // publish and index uncles
+ for _, uncleNode := range uncleNodes {
+ sdi.fileWriter.upsertIPLDNode(uncleNode)
+ var uncleReward *big.Int
+ // in PoA networks uncle reward is 0
+ if sdi.chainConfig.Clique != nil {
+ uncleReward = big.NewInt(0)
+ } else {
+ uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64())
+ }
+ sdi.fileWriter.upsertUncleCID(models.UncleModel{
+ HeaderID: headerID,
+ CID: uncleNode.Cid().String(),
+ MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
+ ParentHash: uncleNode.ParentHash.String(),
+ BlockHash: uncleNode.Hash().String(),
+ Reward: uncleReward.String(),
+ })
+ }
+}
+
+// processArgs bundles arguments to processReceiptsAndTxs
+type processArgs struct {
+ headerID string
+ blockNumber *big.Int
+ receipts types.Receipts
+ txs types.Transactions
+ rctNodes []*ipld2.EthReceipt
+ rctTrieNodes []*ipld2.EthRctTrie
+ txNodes []*ipld2.EthTx
+ txTrieNodes []*ipld2.EthTxTrie
+ logTrieNodes [][]node.Node
+ logLeafNodeCIDs [][]cid.Cid
+ rctLeafNodeCIDs []cid.Cid
+}
+
+// processReceiptsAndTxs writes receipt and tx IPLD insert SQL stmts to a file
+func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
+ // Process receipts and txs
+ signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
+ for i, receipt := range args.receipts {
+ for _, logTrieNode := range args.logTrieNodes[i] {
+ sdi.fileWriter.upsertIPLDNode(logTrieNode)
+ }
+ txNode := args.txNodes[i]
+ sdi.fileWriter.upsertIPLDNode(txNode)
+
+ // index tx
+ trx := args.txs[i]
+ txID := trx.Hash().String()
+
+ var val string
+ if trx.Value() != nil {
+ val = trx.Value().String()
+ }
+
+ // derive sender for the tx that corresponds with this receipt
+ from, err := types.Sender(signer, trx)
+ if err != nil {
+ return fmt.Errorf("error deriving tx sender: %v", err)
+ }
+ txModel := models.TxModel{
+ HeaderID: args.headerID,
+ Dst: shared.HandleZeroAddrPointer(trx.To()),
+ Src: shared.HandleZeroAddr(from),
+ TxHash: txID,
+ Index: int64(i),
+ Data: trx.Data(),
+ CID: txNode.Cid().String(),
+ MhKey: shared.MultihashKeyFromCID(txNode.Cid()),
+ Type: trx.Type(),
+ Value: val,
+ }
+ sdi.fileWriter.upsertTransactionCID(txModel)
+
+ // index access list if this is one
+ for j, accessListElement := range trx.AccessList() {
+ storageKeys := make([]string, len(accessListElement.StorageKeys))
+ for k, storageKey := range accessListElement.StorageKeys {
+ storageKeys[k] = storageKey.Hex()
+ }
+ accessListElementModel := models.AccessListElementModel{
+ TxID: txID,
+ Index: int64(j),
+ Address: accessListElement.Address.Hex(),
+ StorageKeys: storageKeys,
+ }
+ sdi.fileWriter.upsertAccessListElement(accessListElementModel)
+ }
+
+ // this is the contract address if this receipt is for a contract creation tx
+ contract := shared.HandleZeroAddr(receipt.ContractAddress)
+ var contractHash string
+ if contract != "" {
+ contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
+ }
+
+ // index receipt
+ if !args.rctLeafNodeCIDs[i].Defined() {
+ return fmt.Errorf("invalid receipt leaf node cid")
+ }
+
+ rctModel := &models.ReceiptModel{
+ TxID: txID,
+ Contract: contract,
+ ContractHash: contractHash,
+ LeafCID: args.rctLeafNodeCIDs[i].String(),
+ LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]),
+ LogRoot: args.rctNodes[i].LogRoot.String(),
+ }
+ if len(receipt.PostState) == 0 {
+ rctModel.PostStatus = receipt.Status
+ } else {
+ rctModel.PostState = common.Bytes2Hex(receipt.PostState)
+ }
+ sdi.fileWriter.upsertReceiptCID(rctModel)
+
+ // index logs
+ logDataSet := make([]*models.LogsModel, len(receipt.Logs))
+ for idx, l := range receipt.Logs {
+ topicSet := make([]string, 4)
+ for ti, topic := range l.Topics {
+ topicSet[ti] = topic.Hex()
+ }
+
+ if !args.logLeafNodeCIDs[i][idx].Defined() {
+ return fmt.Errorf("invalid log cid")
+ }
+
+ logDataSet[idx] = &models.LogsModel{
+ ReceiptID: txID,
+ Address: l.Address.String(),
+ Index: int64(l.Index),
+ Data: l.Data,
+ LeafCID: args.logLeafNodeCIDs[i][idx].String(),
+ LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]),
+ Topic0: topicSet[0],
+ Topic1: topicSet[1],
+ Topic2: topicSet[2],
+ Topic3: topicSet[3],
+ }
+ }
+ sdi.fileWriter.upsertLogCID(logDataSet)
+ }
+
+ // publish trie nodes, these aren't indexed directly
+ for i, n := range args.txTrieNodes {
+ sdi.fileWriter.upsertIPLDNode(n)
+ sdi.fileWriter.upsertIPLDNode(args.rctTrieNodes[i])
+ }
+
+ return nil
+}
+
+// PushStateNode writes a state diff node object (including any child storage nodes) IPLD insert SQL stmt to a file
+func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
+ // publish the state node
+ if stateNode.NodeType == sdtypes.Removed {
+ // short circuit if it is a Removed node
+ // this assumes the db has been initialized and a public.blocks entry for the Removed node is present
+ stateModel := models.StateNodeModel{
+ HeaderID: headerID,
+ Path: stateNode.Path,
+ StateKey: common.BytesToHash(stateNode.LeafKey).String(),
+ CID: shared.RemovedNodeStateCID,
+ MhKey: shared.RemovedNodeMhKey,
+ NodeType: stateNode.NodeType.Int(),
+ }
+ sdi.fileWriter.upsertStateCID(stateModel)
+ return nil
+ }
+ stateCIDStr, stateMhKey, err := sdi.fileWriter.upsertIPLDRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
+ if err != nil {
+ return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
+ }
+ stateModel := models.StateNodeModel{
+ HeaderID: headerID,
+ Path: stateNode.Path,
+ StateKey: common.BytesToHash(stateNode.LeafKey).String(),
+ CID: stateCIDStr,
+ MhKey: stateMhKey,
+ NodeType: stateNode.NodeType.Int(),
+ }
+ // index the state node
+ sdi.fileWriter.upsertStateCID(stateModel)
+ // if we have a leaf, decode and index the account data
+ if stateNode.NodeType == sdtypes.Leaf {
+ var i []interface{}
+ if err := rlp.DecodeBytes(stateNode.NodeValue, &i); err != nil {
+ return fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
+ }
+ if len(i) != 2 {
+ return fmt.Errorf("eth IPLDPublisher expected state leaf node rlp to decode into two elements")
+ }
+ var account types.StateAccount
+ if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
+ return fmt.Errorf("error decoding state account rlp: %s", err.Error())
+ }
+ accountModel := models.StateAccountModel{
+ HeaderID: headerID,
+ StatePath: stateNode.Path,
+ Balance: account.Balance.String(),
+ Nonce: account.Nonce,
+ CodeHash: account.CodeHash,
+ StorageRoot: account.Root.String(),
+ }
+ sdi.fileWriter.upsertStateAccount(accountModel)
+ }
+ // if there are any storage nodes associated with this node, publish and index them
+ for _, storageNode := range stateNode.StorageNodes {
+ if storageNode.NodeType == sdtypes.Removed {
+ // short circuit if it is a Removed node
+ // this assumes the db has been initialized and a public.blocks entry for the Removed node is present
+ storageModel := models.StorageNodeModel{
+ HeaderID: headerID,
+ StatePath: stateNode.Path,
+ Path: storageNode.Path,
+ StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
+ CID: shared.RemovedNodeStorageCID,
+ MhKey: shared.RemovedNodeMhKey,
+ NodeType: storageNode.NodeType.Int(),
+ }
+ sdi.fileWriter.upsertStorageCID(storageModel)
+ continue
+ }
+ storageCIDStr, storageMhKey, err := sdi.fileWriter.upsertIPLDRaw(ipld2.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
+ if err != nil {
+ return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err)
+ }
+ storageModel := models.StorageNodeModel{
+ HeaderID: headerID,
+ StatePath: stateNode.Path,
+ Path: storageNode.Path,
+ StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
+ CID: storageCIDStr,
+ MhKey: storageMhKey,
+ NodeType: storageNode.NodeType.Int(),
+ }
+ sdi.fileWriter.upsertStorageCID(storageModel)
+ }
+
+ return nil
+}
+
+// PushCodeAndCodeHash writes code and codehash pairs insert SQL stmts to a file
+func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error {
+ // codec doesn't matter since db key is multihash-based
+ mhKey, err := shared.MultihashKeyFromKeccak256(codeAndCodeHash.Hash)
+ if err != nil {
+ return fmt.Errorf("error deriving multihash key from codehash: %v", err)
+ }
+ sdi.fileWriter.upsertIPLDDirect(mhKey, codeAndCodeHash.Code)
+ return nil
+}
+
+// Close satisfies io.Closer
+func (sdi *StateDiffIndexer) Close() error {
+ return sdi.fileWriter.Close()
+}
diff --git a/statediff/indexer/database/file/indexer_legacy_test.go b/statediff/indexer/database/file/indexer_legacy_test.go
new file mode 100644
index 000000000..56bca2683
--- /dev/null
+++ b/statediff/indexer/database/file/indexer_legacy_test.go
@@ -0,0 +1,132 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package file_test
+
+import (
+ "context"
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/ipfs/go-cid"
+ "github.com/jmoiron/sqlx"
+ "github.com/multiformats/go-multihash"
+ "github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/file"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+ "github.com/ethereum/go-ethereum/statediff/indexer/mocks"
+ "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
+)
+
+var (
+ legacyData = mocks.NewLegacyData()
+ mockLegacyBlock *types.Block
+ legacyHeaderCID cid.Cid
+)
+
+func setupLegacy(t *testing.T) {
+ mockLegacyBlock = legacyData.MockBlock
+ legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256)
+ if _, err := os.Stat(file.TestConfig.FilePath); !errors.Is(err, os.ErrNotExist) {
+ err := os.Remove(file.TestConfig.FilePath)
+ require.NoError(t, err)
+ }
+ ind, err := file.NewStateDiffIndexer(context.Background(), legacyData.Config, file.TestConfig)
+ require.NoError(t, err)
+ var tx interfaces.Batch
+ tx, err = ind.PushBlock(
+ mockLegacyBlock,
+ legacyData.MockReceipts,
+ legacyData.MockBlock.Difficulty())
+ require.NoError(t, err)
+
+ defer func() {
+ if err := tx.Submit(err); err != nil {
+ t.Fatal(err)
+ }
+ if err := ind.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ for _, node := range legacyData.StateDiffs {
+ err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String())
+ require.NoError(t, err)
+ }
+
+ test_helpers.ExpectEqual(t, tx.(*file.BatchTx).BlockNumber, legacyData.BlockNumber.Uint64())
+
+ connStr := postgres.DefaultConfig.DbConnectionString()
+
+ sqlxdb, err = sqlx.Connect("postgres", connStr)
+ if err != nil {
+ t.Fatalf("failed to connect to db with connection string: %s err: %v", connStr, err)
+ }
+}
+
+func dumpData(t *testing.T) {
+ sqlFileBytes, err := os.ReadFile(file.TestConfig.FilePath)
+ require.NoError(t, err)
+
+ _, err = sqlxdb.Exec(string(sqlFileBytes))
+ require.NoError(t, err)
+}
+
+func tearDown(t *testing.T) {
+ file.TearDownDB(t, sqlxdb)
+ err := os.Remove(file.TestConfig.FilePath)
+ require.NoError(t, err)
+ err = sqlxdb.Close()
+ require.NoError(t, err)
+}
+
+func expectTrue(t *testing.T, value bool) {
+ if !value {
+ t.Fatalf("Assertion failed")
+ }
+}
+
+func TestFileIndexerLegacy(t *testing.T) {
+ t.Run("Publish and index header IPLDs", func(t *testing.T) {
+ setupLegacy(t)
+ dumpData(t)
+ defer tearDown(t)
+ pgStr := `SELECT cid, td, reward, block_hash, coinbase
+ FROM eth.header_cids
+ WHERE block_number = $1`
+ // check header was properly indexed
+ type res struct {
+ CID string
+ TD string
+ Reward string
+ BlockHash string `db:"block_hash"`
+ Coinbase string `db:"coinbase"`
+ }
+ header := new(res)
+ err = sqlxdb.QueryRowx(pgStr, legacyData.BlockNumber.Uint64()).StructScan(header)
+ require.NoError(t, err)
+
+ test_helpers.ExpectEqual(t, header.CID, legacyHeaderCID.String())
+ test_helpers.ExpectEqual(t, header.TD, legacyData.MockBlock.Difficulty().String())
+ test_helpers.ExpectEqual(t, header.Reward, "5000000000000011250")
+ test_helpers.ExpectEqual(t, header.Coinbase, legacyData.MockBlock.Coinbase().String())
+ require.Nil(t, legacyData.MockHeader.BaseFee)
+ })
+}
diff --git a/statediff/indexer/indexer_test.go b/statediff/indexer/database/file/indexer_test.go
similarity index 52%
rename from statediff/indexer/indexer_test.go
rename to statediff/indexer/database/file/indexer_test.go
index 67645a12d..cd6e89b20 100644
--- a/statediff/indexer/indexer_test.go
+++ b/statediff/indexer/database/file/indexer_test.go
@@ -14,50 +14,51 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package indexer_test
+package file_test
import (
"bytes"
+ "context"
+ "errors"
"fmt"
"os"
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/statediff/indexer"
- "github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
- "github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
- "github.com/ethereum/go-ethereum/statediff/indexer/mocks"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
- "github.com/ethereum/go-ethereum/statediff/indexer/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
+
"github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
+ "github.com/jmoiron/sqlx"
"github.com/multiformats/go-multihash"
"github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/file"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+ "github.com/ethereum/go-ethereum/statediff/indexer/mocks"
+ "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
)
var (
- db *postgres.DB
+ sqlxdb *sqlx.DB
err error
- ind *indexer.StateDiffIndexer
+ ind interfaces.StateDiffIndexer
ipfsPgGet = `SELECT data FROM public.blocks
WHERE key = $1`
tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte
mockBlock *types.Block
headerCID, trx1CID, trx2CID, trx3CID, trx4CID, trx5CID cid.Cid
rct1CID, rct2CID, rct3CID, rct4CID, rct5CID cid.Cid
+ rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5 []byte
state1CID, state2CID, storageCID cid.Cid
)
-func expectTrue(t *testing.T, value bool) {
- if !value {
- t.Fatalf("Assertion failed")
- }
-}
-
func init() {
if os.Getenv("MODE") != "statediff" {
fmt.Println("Skipping statediff test")
@@ -124,24 +125,52 @@ func init() {
trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx3, multihash.KECCAK_256)
trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx4, multihash.KECCAK_256)
trx5CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx5, multihash.KECCAK_256)
- rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct1, multihash.KECCAK_256)
- rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct2, multihash.KECCAK_256)
- rct3CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct3, multihash.KECCAK_256)
- rct4CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct4, multihash.KECCAK_256)
- rct5CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct5, multihash.KECCAK_256)
state1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.ContractLeafNode, multihash.KECCAK_256)
state2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.AccountLeafNode, multihash.KECCAK_256)
storageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, mocks.StorageLeafNode, multihash.KECCAK_256)
+
+ receiptTrie := ipld.NewRctTrie()
+
+ receiptTrie.Add(0, rct1)
+ receiptTrie.Add(1, rct2)
+ receiptTrie.Add(2, rct3)
+ receiptTrie.Add(3, rct4)
+ receiptTrie.Add(4, rct5)
+
+ rctLeafNodes, keys, _ := receiptTrie.GetLeafNodes()
+
+ rctleafNodeCids := make([]cid.Cid, len(rctLeafNodes))
+ orderedRctLeafNodes := make([][]byte, len(rctLeafNodes))
+ for i, rln := range rctLeafNodes {
+ var idx uint
+
+ r := bytes.NewReader(keys[i].TrieKey)
+ rlp.Decode(r, &idx)
+ rctleafNodeCids[idx] = rln.Cid()
+ orderedRctLeafNodes[idx] = rln.RawData()
+ }
+
+ rct1CID = rctleafNodeCids[0]
+ rct2CID = rctleafNodeCids[1]
+ rct3CID = rctleafNodeCids[2]
+ rct4CID = rctleafNodeCids[3]
+ rct5CID = rctleafNodeCids[4]
+
+ rctLeaf1 = orderedRctLeafNodes[0]
+ rctLeaf2 = orderedRctLeafNodes[1]
+ rctLeaf3 = orderedRctLeafNodes[2]
+ rctLeaf4 = orderedRctLeafNodes[3]
+ rctLeaf5 = orderedRctLeafNodes[4]
}
func setup(t *testing.T) {
- db, err = shared.SetupDB()
- if err != nil {
- t.Fatal(err)
+ if _, err := os.Stat(file.TestConfig.FilePath); !errors.Is(err, os.ErrNotExist) {
+ err := os.Remove(file.TestConfig.FilePath)
+ require.NoError(t, err)
}
- ind, err = indexer.NewStateDiffIndexer(mocks.TestConfig, db)
+ ind, err = file.NewStateDiffIndexer(context.Background(), mocks.TestConfig, file.TestConfig)
require.NoError(t, err)
- var tx *indexer.BlockTx
+ var tx interfaces.Batch
tx, err = ind.PushBlock(
mockBlock,
mocks.MockReceipts,
@@ -149,45 +178,55 @@ func setup(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- defer tx.Close(err)
- for _, node := range mocks.StateDiffs {
- err = ind.PushStateNode(tx, node)
- if err != nil {
+ defer func() {
+ if err := tx.Submit(err); err != nil {
t.Fatal(err)
}
+ if err := ind.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ for _, node := range mocks.StateDiffs {
+ err = ind.PushStateNode(tx, node, mockBlock.Hash().String())
+ require.NoError(t, err)
}
- shared.ExpectEqual(t, tx.BlockNumber, mocks.BlockNumber.Uint64())
+ test_helpers.ExpectEqual(t, tx.(*file.BatchTx).BlockNumber, mocks.BlockNumber.Uint64())
+
+ connStr := postgres.DefaultConfig.DbConnectionString()
+
+ sqlxdb, err = sqlx.Connect("postgres", connStr)
+ if err != nil {
+ t.Fatalf("failed to connect to db with connection string: %s err: %v", connStr, err)
+ }
}
-func tearDown(t *testing.T) {
- indexer.TearDownDB(t, db)
-}
-
-func TestPublishAndIndexer(t *testing.T) {
+func TestFileIndexer(t *testing.T) {
t.Run("Publish and index header IPLDs in a single tx", func(t *testing.T) {
setup(t)
+ dumpData(t)
defer tearDown(t)
- pgStr := `SELECT cid, td, reward, id, base_fee
+ pgStr := `SELECT cid, td, reward, block_hash, coinbase
FROM eth.header_cids
WHERE block_number = $1`
// check header was properly indexed
type res struct {
- CID string
- TD string
- Reward string
- ID int
- BaseFee *int64 `db:"base_fee"`
+ CID string
+ TD string
+ Reward string
+ BlockHash string `db:"block_hash"`
+ Coinbase string `db:"coinbase"`
}
header := new(res)
- err = db.QueryRowx(pgStr, mocks.BlockNumber.Uint64()).StructScan(header)
+ err = sqlxdb.QueryRowx(pgStr, mocks.BlockNumber.Uint64()).StructScan(header)
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, header.CID, headerCID.String())
- shared.ExpectEqual(t, header.TD, mocks.MockBlock.Difficulty().String())
- shared.ExpectEqual(t, header.Reward, "2000000000000021250")
- shared.ExpectEqual(t, *header.BaseFee, mocks.MockHeader.BaseFee.Int64())
+
+ test_helpers.ExpectEqual(t, header.CID, headerCID.String())
+ test_helpers.ExpectEqual(t, header.TD, mocks.MockBlock.Difficulty().String())
+ test_helpers.ExpectEqual(t, header.Reward, "2000000000000021250")
+ test_helpers.ExpectEqual(t, header.Coinbase, mocks.MockHeader.Coinbase.String())
dc, err := cid.Decode(header.CID)
if err != nil {
t.Fatal(err)
@@ -195,31 +234,37 @@ func TestPublishAndIndexer(t *testing.T) {
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
- err = db.Get(&data, ipfsPgGet, prefixedKey)
+ err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, data, mocks.MockHeaderRlp)
+ test_helpers.ExpectEqual(t, data, mocks.MockHeaderRlp)
})
-
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
setup(t)
+ dumpData(t)
defer tearDown(t)
- // check that txs were properly indexed
+
+ // check that txs were properly indexed and published
trxs := make([]string, 0)
- pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
+ pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
WHERE header_cids.block_number = $1`
- err = db.Select(&trxs, pgStr, mocks.BlockNumber.Uint64())
+ err = sqlxdb.Select(&trxs, pgStr, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, len(trxs), 5)
- expectTrue(t, shared.ListContainsString(trxs, trx1CID.String()))
- expectTrue(t, shared.ListContainsString(trxs, trx2CID.String()))
- expectTrue(t, shared.ListContainsString(trxs, trx3CID.String()))
- expectTrue(t, shared.ListContainsString(trxs, trx4CID.String()))
- expectTrue(t, shared.ListContainsString(trxs, trx5CID.String()))
- // and published
+ test_helpers.ExpectEqual(t, len(trxs), 5)
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx1CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx2CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx3CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx4CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx5CID.String()))
+
+ transactions := mocks.MockBlock.Transactions()
+ type txResult struct {
+ TxType uint8 `db:"tx_type"`
+ Value string
+ }
for _, c := range trxs {
dc, err := cid.Decode(c)
if err != nil {
@@ -228,58 +273,67 @@ func TestPublishAndIndexer(t *testing.T) {
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
- err = db.Get(&data, ipfsPgGet, prefixedKey)
+ err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
if err != nil {
t.Fatal(err)
}
+ txTypeAndValueStr := `SELECT tx_type, value FROM eth.transaction_cids WHERE cid = $1`
switch c {
case trx1CID.String():
- shared.ExpectEqual(t, data, tx1)
- var txType *uint8
- pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
- err = db.Get(&txType, pgStr, c)
+ test_helpers.ExpectEqual(t, data, tx1)
+ txRes := new(txResult)
+ err = sqlxdb.QueryRowx(txTypeAndValueStr, c).StructScan(txRes)
if err != nil {
t.Fatal(err)
}
- if txType != nil {
- t.Fatalf("expected nil tx_type, got %d", *txType)
+ if txRes.TxType != 0 {
+ t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[0].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[0].Value().String(), txRes.Value)
}
case trx2CID.String():
- shared.ExpectEqual(t, data, tx2)
- var txType *uint8
- pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
- err = db.Get(&txType, pgStr, c)
+ test_helpers.ExpectEqual(t, data, tx2)
+ txRes := new(txResult)
+ err = sqlxdb.QueryRowx(txTypeAndValueStr, c).StructScan(txRes)
if err != nil {
t.Fatal(err)
}
- if txType != nil {
- t.Fatalf("expected nil tx_type, got %d", *txType)
+ if txRes.TxType != 0 {
+ t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[1].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[1].Value().String(), txRes.Value)
}
case trx3CID.String():
- shared.ExpectEqual(t, data, tx3)
- var txType *uint8
- pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
- err = db.Get(&txType, pgStr, c)
+ test_helpers.ExpectEqual(t, data, tx3)
+ txRes := new(txResult)
+ err = sqlxdb.QueryRowx(txTypeAndValueStr, c).StructScan(txRes)
if err != nil {
t.Fatal(err)
}
- if txType != nil {
- t.Fatalf("expected nil tx_type, got %d", *txType)
+ if txRes.TxType != 0 {
+ t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[2].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[2].Value().String(), txRes.Value)
}
case trx4CID.String():
- shared.ExpectEqual(t, data, tx4)
- var txType *uint8
- pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
- err = db.Get(&txType, pgStr, c)
+ test_helpers.ExpectEqual(t, data, tx4)
+ txRes := new(txResult)
+ err = sqlxdb.QueryRowx(txTypeAndValueStr, c).StructScan(txRes)
if err != nil {
t.Fatal(err)
}
- if *txType != types.AccessListTxType {
- t.Fatalf("expected AccessListTxType (1), got %d", *txType)
+ if txRes.TxType != types.AccessListTxType {
+ t.Fatalf("expected AccessListTxType (1), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[3].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value)
}
accessListElementModels := make([]models.AccessListElementModel, 0)
- pgStr = `SELECT access_list_element.* FROM eth.access_list_element INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.id) WHERE cid = $1 ORDER BY access_list_element.index ASC`
- err = db.Select(&accessListElementModels, pgStr, c)
+ pgStr = `SELECT access_list_elements.* FROM eth.access_list_elements INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC`
+ err = sqlxdb.Select(&accessListElementModels, pgStr, c)
if err != nil {
t.Fatal(err)
}
@@ -295,18 +349,20 @@ func TestPublishAndIndexer(t *testing.T) {
Address: accessListElementModels[1].Address,
StorageKeys: accessListElementModels[1].StorageKeys,
}
- shared.ExpectEqual(t, model1, mocks.AccessListEntry1Model)
- shared.ExpectEqual(t, model2, mocks.AccessListEntry2Model)
+ test_helpers.ExpectEqual(t, model1, mocks.AccessListEntry1Model)
+ test_helpers.ExpectEqual(t, model2, mocks.AccessListEntry2Model)
case trx5CID.String():
- shared.ExpectEqual(t, data, tx5)
- var txType *uint8
- pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
- err = db.Get(&txType, pgStr, c)
+ test_helpers.ExpectEqual(t, data, tx5)
+ txRes := new(txResult)
+ err = sqlxdb.QueryRowx(txTypeAndValueStr, c).StructScan(txRes)
if err != nil {
t.Fatal(err)
}
- if *txType != types.DynamicFeeTxType {
- t.Fatalf("expected DynamicFeeTxType (2), got %d", *txType)
+ if txRes.TxType != types.DynamicFeeTxType {
+ t.Fatalf("expected DynamicFeeTxType (2), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[4].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[4].Value().String(), txRes.Value)
}
}
}
@@ -314,15 +370,16 @@ func TestPublishAndIndexer(t *testing.T) {
t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) {
setup(t)
+ dumpData(t)
defer tearDown(t)
rcts := make([]string, 0)
pgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
- WHERE receipt_cids.tx_id = transaction_cids.id
- AND transaction_cids.header_id = header_cids.id
+ WHERE receipt_cids.tx_id = transaction_cids.tx_hash
+ AND transaction_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1
ORDER BY transaction_cids.index`
- err = db.Select(&rcts, pgStr, mocks.BlockNumber.Uint64())
+ err = sqlxdb.Select(&rcts, pgStr, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
@@ -336,16 +393,16 @@ func TestPublishAndIndexer(t *testing.T) {
}
for i := range rcts {
results := make([]logIPLD, 0)
- pgStr = `SELECT log_cids.index, log_cids.address, log_cids.Topic0, log_cids.Topic1, data FROM eth.log_cids
- INNER JOIN eth.receipt_cids ON (log_cids.receipt_id = receipt_cids.id)
+ pgStr = `SELECT log_cids.index, log_cids.address, log_cids.topic0, log_cids.topic1, data FROM eth.log_cids
+ INNER JOIN eth.receipt_cids ON (log_cids.rct_id = receipt_cids.tx_id)
INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key)
WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC`
- err = db.Select(&results, pgStr, rcts[i])
+ err = sqlxdb.Select(&results, pgStr, rcts[i])
require.NoError(t, err)
// expecting MockLog1 and MockLog2 for mockReceipt4
expectedLogs := mocks.MockReceipts[i].Logs
- shared.ExpectEqual(t, len(results), len(expectedLogs))
+ test_helpers.ExpectEqual(t, len(results), len(expectedLogs))
var nodeElements []interface{}
for idx, r := range results {
@@ -357,34 +414,40 @@ func TestPublishAndIndexer(t *testing.T) {
require.NoError(t, err)
// 2nd element of the leaf node contains the encoded log data.
- shared.ExpectEqual(t, logRaw, nodeElements[1].([]byte))
+ test_helpers.ExpectEqual(t, logRaw, nodeElements[1].([]byte))
}
}
})
t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
setup(t)
+ dumpData(t)
defer tearDown(t)
- // check receipts were properly indexed
+ // check receipts were properly indexed and published
rcts := make([]string, 0)
pgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
- WHERE receipt_cids.tx_id = transaction_cids.id
- AND transaction_cids.header_id = header_cids.id
- AND header_cids.block_number = $1 order by transaction_cids.id`
- err = db.Select(&rcts, pgStr, mocks.BlockNumber.Uint64())
+ WHERE receipt_cids.tx_id = transaction_cids.tx_hash
+ AND transaction_cids.header_id = header_cids.block_hash
+ AND header_cids.block_number = $1 order by transaction_cids.index`
+ err = sqlxdb.Select(&rcts, pgStr, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, len(rcts), 5)
+ test_helpers.ExpectEqual(t, len(rcts), 5)
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct1CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct2CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct3CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct4CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String()))
- for idx, rctLeafCID := range rcts {
- result := make([]ipfs.BlockModel, 0)
+ for idx, c := range rcts {
+ result := make([]models.IPLDModel, 0)
pgStr = `SELECT data
FROM eth.receipt_cids
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key)
WHERE receipt_cids.leaf_cid = $1`
- err = db.Select(&result, pgStr, rctLeafCID)
+ err = sqlxdb.Select(&result, pgStr, c)
if err != nil {
t.Fatal(err)
}
@@ -397,11 +460,8 @@ func TestPublishAndIndexer(t *testing.T) {
expectedRct, err := mocks.MockReceipts[idx].MarshalBinary()
require.NoError(t, err)
- shared.ExpectEqual(t, expectedRct, nodeElements[1].([]byte))
- }
+ test_helpers.ExpectEqual(t, expectedRct, nodeElements[1].([]byte))
- // and published
- for _, c := range rcts {
dc, err := cid.Decode(c)
if err != nil {
t.Fatal(err)
@@ -409,74 +469,72 @@ func TestPublishAndIndexer(t *testing.T) {
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
- err = db.Get(&data, ipfsPgGet, prefixedKey)
+ err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
if err != nil {
t.Fatal(err)
}
-
+ postStatePgStr := `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
switch c {
case rct1CID.String():
- shared.ExpectEqual(t, data, rct1)
+ test_helpers.ExpectEqual(t, data, rctLeaf1)
var postStatus uint64
pgStr = `SELECT post_status FROM eth.receipt_cids WHERE leaf_cid = $1`
- err = db.Get(&postStatus, pgStr, c)
+ err = sqlxdb.Get(&postStatus, pgStr, c)
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, postStatus, mocks.ExpectedPostStatus)
+ test_helpers.ExpectEqual(t, postStatus, mocks.ExpectedPostStatus)
case rct2CID.String():
- shared.ExpectEqual(t, data, rct2)
+ test_helpers.ExpectEqual(t, data, rctLeaf2)
var postState string
- pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
- err = db.Get(&postState, pgStr, c)
+ err = sqlxdb.Get(&postState, postStatePgStr, c)
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, postState, mocks.ExpectedPostState1)
+ test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState1)
case rct3CID.String():
- shared.ExpectEqual(t, data, rct3)
+ test_helpers.ExpectEqual(t, data, rctLeaf3)
var postState string
- pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
- err = db.Get(&postState, pgStr, c)
+ err = sqlxdb.Get(&postState, postStatePgStr, c)
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, postState, mocks.ExpectedPostState2)
+ test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState2)
case rct4CID.String():
- shared.ExpectEqual(t, data, rct4)
+ test_helpers.ExpectEqual(t, data, rctLeaf4)
var postState string
- pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
- err = db.Get(&postState, pgStr, c)
+ err = sqlxdb.Get(&postState, postStatePgStr, c)
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, postState, mocks.ExpectedPostState3)
+ test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState3)
case rct5CID.String():
- shared.ExpectEqual(t, data, rct5)
+ test_helpers.ExpectEqual(t, data, rctLeaf5)
var postState string
- pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
- err = db.Get(&postState, pgStr, c)
+ err = sqlxdb.Get(&postState, postStatePgStr, c)
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, postState, mocks.ExpectedPostState3)
+ test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState3)
}
}
})
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
setup(t)
+ dumpData(t)
defer tearDown(t)
+
// check that state nodes were properly indexed and published
stateNodes := make([]models.StateNodeModel, 0)
- pgStr := `SELECT state_cids.id, state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
- FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
+ pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
+ FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
WHERE header_cids.block_number = $1 AND node_type != 3`
- err = db.Select(&stateNodes, pgStr, mocks.BlockNumber.Uint64())
+ err = sqlxdb.Select(&stateNodes, pgStr, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, len(stateNodes), 2)
+ test_helpers.ExpectEqual(t, len(stateNodes), 2)
for _, stateNode := range stateNodes {
var data []byte
dc, err := cid.Decode(stateNode.CID)
@@ -485,24 +543,24 @@ func TestPublishAndIndexer(t *testing.T) {
}
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
- err = db.Get(&data, ipfsPgGet, prefixedKey)
+ err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
if err != nil {
t.Fatal(err)
}
- pgStr = `SELECT * from eth.state_accounts WHERE state_id = $1`
+ pgStr = `SELECT * from eth.state_accounts WHERE header_id = $1 AND state_path = $2`
var account models.StateAccountModel
- err = db.Get(&account, pgStr, stateNode.ID)
+ err = sqlxdb.Get(&account, pgStr, stateNode.HeaderID, stateNode.Path)
if err != nil {
t.Fatal(err)
}
if stateNode.CID == state1CID.String() {
- shared.ExpectEqual(t, stateNode.NodeType, 2)
- shared.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex())
- shared.ExpectEqual(t, stateNode.Path, []byte{'\x06'})
- shared.ExpectEqual(t, data, mocks.ContractLeafNode)
- shared.ExpectEqual(t, account, models.StateAccountModel{
- ID: account.ID,
- StateID: stateNode.ID,
+ test_helpers.ExpectEqual(t, stateNode.NodeType, 2)
+ test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex())
+ test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x06'})
+ test_helpers.ExpectEqual(t, data, mocks.ContractLeafNode)
+ test_helpers.ExpectEqual(t, account, models.StateAccountModel{
+ HeaderID: account.HeaderID,
+ StatePath: stateNode.Path,
Balance: "0",
CodeHash: mocks.ContractCodeHash.Bytes(),
StorageRoot: mocks.ContractRoot,
@@ -510,13 +568,13 @@ func TestPublishAndIndexer(t *testing.T) {
})
}
if stateNode.CID == state2CID.String() {
- shared.ExpectEqual(t, stateNode.NodeType, 2)
- shared.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex())
- shared.ExpectEqual(t, stateNode.Path, []byte{'\x0c'})
- shared.ExpectEqual(t, data, mocks.AccountLeafNode)
- shared.ExpectEqual(t, account, models.StateAccountModel{
- ID: account.ID,
- StateID: stateNode.ID,
+ test_helpers.ExpectEqual(t, stateNode.NodeType, 2)
+ test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex())
+ test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x0c'})
+ test_helpers.ExpectEqual(t, data, mocks.AccountLeafNode)
+ test_helpers.ExpectEqual(t, account, models.StateAccountModel{
+ HeaderID: account.HeaderID,
+ StatePath: stateNode.Path,
Balance: "1000",
CodeHash: mocks.AccountCodeHash.Bytes(),
StorageRoot: mocks.AccountRoot,
@@ -527,14 +585,14 @@ func TestPublishAndIndexer(t *testing.T) {
// check that Removed state nodes were properly indexed and published
stateNodes = make([]models.StateNodeModel, 0)
- pgStr = `SELECT state_cids.id, state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
- FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
+ pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
+ FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
WHERE header_cids.block_number = $1 AND node_type = 3`
- err = db.Select(&stateNodes, pgStr, mocks.BlockNumber.Uint64())
+ err = sqlxdb.Select(&stateNodes, pgStr, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, len(stateNodes), 1)
+ test_helpers.ExpectEqual(t, len(stateNodes), 1)
stateNode := stateNodes[0]
var data []byte
dc, err := cid.Decode(stateNode.CID)
@@ -543,33 +601,35 @@ func TestPublishAndIndexer(t *testing.T) {
}
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
- shared.ExpectEqual(t, prefixedKey, indexer.RemovedNodeMhKey)
- err = db.Get(&data, ipfsPgGet, prefixedKey)
+ test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
+ err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, stateNode.CID, indexer.RemovedNodeStateCID)
- shared.ExpectEqual(t, stateNode.Path, []byte{'\x02'})
- shared.ExpectEqual(t, data, []byte{})
+ test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID)
+ test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x02'})
+ test_helpers.ExpectEqual(t, data, []byte{})
})
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
setup(t)
+ dumpData(t)
defer tearDown(t)
+
// check that storage nodes were properly indexed
storageNodes := make([]models.StorageNodeWithStateKeyModel, 0)
pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
- WHERE storage_cids.state_id = state_cids.id
- AND state_cids.header_id = header_cids.id
+ WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
+ AND state_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1
AND storage_cids.node_type != 3`
- err = db.Select(&storageNodes, pgStr, mocks.BlockNumber.Uint64())
+ err = sqlxdb.Select(&storageNodes, pgStr, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, len(storageNodes), 1)
- shared.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
+ test_helpers.ExpectEqual(t, len(storageNodes), 1)
+ test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
CID: storageCID.String(),
NodeType: 2,
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
@@ -583,27 +643,27 @@ func TestPublishAndIndexer(t *testing.T) {
}
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
- err = db.Get(&data, ipfsPgGet, prefixedKey)
+ err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, data, mocks.StorageLeafNode)
+ test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode)
// check that Removed storage nodes were properly indexed
storageNodes = make([]models.StorageNodeWithStateKeyModel, 0)
pgStr = `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
- WHERE storage_cids.state_id = state_cids.id
- AND state_cids.header_id = header_cids.id
+ WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
+ AND state_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1
AND storage_cids.node_type = 3`
- err = db.Select(&storageNodes, pgStr, mocks.BlockNumber.Uint64())
+ err = sqlxdb.Select(&storageNodes, pgStr, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, len(storageNodes), 1)
- shared.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
- CID: indexer.RemovedNodeStorageCID,
+ test_helpers.ExpectEqual(t, len(storageNodes), 1)
+ test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
+ CID: shared.RemovedNodeStorageCID,
NodeType: 3,
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
@@ -615,11 +675,11 @@ func TestPublishAndIndexer(t *testing.T) {
}
mhKey = dshelp.MultihashToDsKey(dc.Hash())
prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
- shared.ExpectEqual(t, prefixedKey, indexer.RemovedNodeMhKey)
- err = db.Get(&data, ipfsPgGet, prefixedKey)
+ test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
+ err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
if err != nil {
t.Fatal(err)
}
- shared.ExpectEqual(t, data, []byte{})
+ test_helpers.ExpectEqual(t, data, []byte{})
})
}
diff --git a/statediff/indexer/database/file/mainnet_tests/indexer_test.go b/statediff/indexer/database/file/mainnet_tests/indexer_test.go
new file mode 100644
index 000000000..b297b82d1
--- /dev/null
+++ b/statediff/indexer/database/file/mainnet_tests/indexer_test.go
@@ -0,0 +1,134 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package mainnet_tests
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "os"
+ "testing"
+
+ "github.com/jmoiron/sqlx"
+ "github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/file"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/mocks"
+ "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
+)
+
+var (
+ sqlxdb *sqlx.DB
+ chainConf = params.MainnetChainConfig
+)
+
+func init() {
+ if os.Getenv("MODE") != "statediff" {
+ fmt.Println("Skipping statediff test")
+ os.Exit(0)
+ }
+ if os.Getenv("STATEDIFF_DB") != "file" {
+ fmt.Println("Skipping statediff .sql file writing mode test")
+ os.Exit(0)
+ }
+}
+
+func TestPushBlockAndState(t *testing.T) {
+ conf := test_helpers.DefaultTestConfig
+ rawURL := os.Getenv(test_helpers.TEST_RAW_URL)
+ if rawURL == "" {
+ fmt.Printf("Warning: no raw url configured for statediffing mainnet tests, will look for local file and"+
+ "then try default endpoint (%s)\r\n", test_helpers.DefaultTestConfig.RawURL)
+ } else {
+ conf.RawURL = rawURL
+ }
+ for _, blockNumber := range test_helpers.ProblemBlocks {
+ conf.BlockNumber = big.NewInt(blockNumber)
+ tb, trs, err := test_helpers.TestBlockAndReceipts(conf)
+ require.NoError(t, err)
+ testPushBlockAndState(t, tb, trs)
+ }
+ testBlock, testReceipts, err := test_helpers.TestBlockAndReceiptsFromEnv(conf)
+ require.NoError(t, err)
+ testPushBlockAndState(t, testBlock, testReceipts)
+}
+
+func testPushBlockAndState(t *testing.T, block *types.Block, receipts types.Receipts) {
+ t.Run("Test PushBlock and PushStateNode", func(t *testing.T) {
+ setup(t, block, receipts)
+ dumpData(t)
+ tearDown(t)
+ })
+}
+
+func setup(t *testing.T, testBlock *types.Block, testReceipts types.Receipts) {
+ if _, err := os.Stat(file.TestConfig.FilePath); !errors.Is(err, os.ErrNotExist) {
+ err := os.Remove(file.TestConfig.FilePath)
+ require.NoError(t, err)
+ }
+ ind, err := file.NewStateDiffIndexer(context.Background(), chainConf, file.TestConfig)
+ require.NoError(t, err)
+ var tx interfaces.Batch
+ tx, err = ind.PushBlock(
+ testBlock,
+ testReceipts,
+ testBlock.Difficulty())
+ require.NoError(t, err)
+
+ defer func() {
+ if err := tx.Submit(err); err != nil {
+ t.Fatal(err)
+ }
+ if err := ind.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ for _, node := range mocks.StateDiffs {
+ err = ind.PushStateNode(tx, node, testBlock.Hash().String())
+ require.NoError(t, err)
+ }
+
+ test_helpers.ExpectEqual(t, tx.(*file.BatchTx).BlockNumber, testBlock.Number().Uint64())
+
+ connStr := postgres.DefaultConfig.DbConnectionString()
+
+ sqlxdb, err = sqlx.Connect("postgres", connStr)
+ if err != nil {
+ t.Fatalf("failed to connect to db with connection string: %s err: %v", connStr, err)
+ }
+}
+
+func dumpData(t *testing.T) {
+ sqlFileBytes, err := os.ReadFile(file.TestConfig.FilePath)
+ require.NoError(t, err)
+
+ _, err = sqlxdb.Exec(string(sqlFileBytes))
+ require.NoError(t, err)
+}
+
+func tearDown(t *testing.T) {
+ file.TearDownDB(t, sqlxdb)
+ err := os.Remove(file.TestConfig.FilePath)
+ require.NoError(t, err)
+ err = sqlxdb.Close()
+ require.NoError(t, err)
+}
diff --git a/statediff/indexer/database/file/metrics.go b/statediff/indexer/database/file/metrics.go
new file mode 100644
index 000000000..ca6e88f2b
--- /dev/null
+++ b/statediff/indexer/database/file/metrics.go
@@ -0,0 +1,94 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package file
+
+import (
+ "strings"
+
+ "github.com/ethereum/go-ethereum/metrics"
+)
+
+const (
+ namespace = "statediff"
+)
+
+// Build a fully qualified metric name
+func metricName(subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ parts := []string{namespace, name}
+ if subsystem != "" {
+ parts = []string{namespace, subsystem, name}
+ }
+ // Prometheus uses _ but geth metrics uses / and replaces
+ return strings.Join(parts, "/")
+}
+
+type indexerMetricsHandles struct {
+ // The total number of processed blocks
+ blocks metrics.Counter
+ // The total number of processed transactions
+ transactions metrics.Counter
+ // The total number of processed receipts
+ receipts metrics.Counter
+ // The total number of processed logs
+ logs metrics.Counter
+ // The total number of access list entries processed
+ accessListEntries metrics.Counter
+ // Time spent waiting for free postgres tx
+ tFreePostgres metrics.Timer
+ // Postgres transaction commit duration
+ tPostgresCommit metrics.Timer
+ // Header processing time
+ tHeaderProcessing metrics.Timer
+ // Uncle processing time
+ tUncleProcessing metrics.Timer
+ // Tx and receipt processing time
+ tTxAndRecProcessing metrics.Timer
+ // State, storage, and code combined processing time
+ tStateStoreCodeProcessing metrics.Timer
+}
+
+func RegisterIndexerMetrics(reg metrics.Registry) indexerMetricsHandles {
+ ctx := indexerMetricsHandles{
+ blocks: metrics.NewCounter(),
+ transactions: metrics.NewCounter(),
+ receipts: metrics.NewCounter(),
+ logs: metrics.NewCounter(),
+ accessListEntries: metrics.NewCounter(),
+ tFreePostgres: metrics.NewTimer(),
+ tPostgresCommit: metrics.NewTimer(),
+ tHeaderProcessing: metrics.NewTimer(),
+ tUncleProcessing: metrics.NewTimer(),
+ tTxAndRecProcessing: metrics.NewTimer(),
+ tStateStoreCodeProcessing: metrics.NewTimer(),
+ }
+ subsys := "indexer"
+ reg.Register(metricName(subsys, "blocks"), ctx.blocks)
+ reg.Register(metricName(subsys, "transactions"), ctx.transactions)
+ reg.Register(metricName(subsys, "receipts"), ctx.receipts)
+ reg.Register(metricName(subsys, "logs"), ctx.logs)
+ reg.Register(metricName(subsys, "access_list_entries"), ctx.accessListEntries)
+ reg.Register(metricName(subsys, "t_free_postgres"), ctx.tFreePostgres)
+ reg.Register(metricName(subsys, "t_postgres_commit"), ctx.tPostgresCommit)
+ reg.Register(metricName(subsys, "t_header_processing"), ctx.tHeaderProcessing)
+ reg.Register(metricName(subsys, "t_uncle_processing"), ctx.tUncleProcessing)
+ reg.Register(metricName(subsys, "t_tx_receipt_processing"), ctx.tTxAndRecProcessing)
+ reg.Register(metricName(subsys, "t_state_store_code_processing"), ctx.tStateStoreCodeProcessing)
+ return ctx
+}
diff --git a/statediff/indexer/database/file/test_helpers.go b/statediff/indexer/database/file/test_helpers.go
new file mode 100644
index 000000000..27d204d55
--- /dev/null
+++ b/statediff/indexer/database/file/test_helpers.go
@@ -0,0 +1,64 @@
+package file
+
+import (
+ "testing"
+
+ "github.com/jmoiron/sqlx"
+)
+
+// TearDownDB is used to tear down the watcher dbs after tests
+func TearDownDB(t *testing.T, db *sqlx.DB) {
+ tx, err := db.Begin()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = tx.Exec(`DELETE FROM eth.header_cids`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(`DELETE FROM eth.uncle_cids`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(`DELETE FROM eth.transaction_cids`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(`DELETE FROM eth.receipt_cids`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(`DELETE FROM eth.state_cids`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(`DELETE FROM eth.storage_cids`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(`DELETE FROM eth.state_accounts`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(`DELETE FROM eth.access_list_elements`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(`DELETE FROM eth.log_cids`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(`DELETE FROM blocks`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(`DELETE FROM nodes`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = tx.Commit()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/statediff/indexer/database/file/writer.go b/statediff/indexer/database/file/writer.go
new file mode 100644
index 000000000..48de0853d
--- /dev/null
+++ b/statediff/indexer/database/file/writer.go
@@ -0,0 +1,255 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package file
+
+import (
+ "fmt"
+ "io"
+
+ blockstore "github.com/ipfs/go-ipfs-blockstore"
+ dshelp "github.com/ipfs/go-ipfs-ds-help"
+ node "github.com/ipfs/go-ipld-format"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+ "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
+)
+
+var (
+ nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
+ pipeSize = 65336 // min(linuxPipeSize, macOSPipeSize)
+ writeBufferSize = pipeSize * 16 * 96
+)
+
+// SQLWriter writes sql statements to a file
+type SQLWriter struct {
+ wc io.WriteCloser
+ stmts chan []byte
+ collatedStmt []byte
+ collationIndex int
+
+ flushChan chan struct{}
+ flushFinished chan struct{}
+ quitChan chan struct{}
+ doneChan chan struct{}
+}
+
+// NewSQLWriter creates a new pointer to a Writer
+func NewSQLWriter(wc io.WriteCloser) *SQLWriter {
+ return &SQLWriter{
+ wc: wc,
+ stmts: make(chan []byte),
+ collatedStmt: make([]byte, writeBufferSize),
+ flushChan: make(chan struct{}),
+ flushFinished: make(chan struct{}),
+ quitChan: make(chan struct{}),
+ doneChan: make(chan struct{}),
+ }
+}
+
+// Loop enables concurrent writes to the underlying os.File
+// since os.File does not buffer, it utilizes an internal buffer that is the size of a unix pipe
+// by using copy() and tracking the index/size of the buffer, we require only the initial memory allocation
+func (sqw *SQLWriter) Loop() {
+ sqw.collationIndex = 0
+ go func() {
+ defer close(sqw.doneChan)
+ var l int
+ for {
+ select {
+ case stmt := <-sqw.stmts:
+ l = len(stmt)
+ if sqw.collationIndex+l > writeBufferSize {
+ if err := sqw.flush(); err != nil {
+ panic(fmt.Sprintf("error writing sql stmts buffer to file: %v", err))
+ }
+ if l > writeBufferSize {
+ if _, err := sqw.wc.Write(stmt); err != nil {
+ panic(fmt.Sprintf("error writing large sql stmt to file: %v", err))
+ }
+ continue
+ }
+ }
+ copy(sqw.collatedStmt[sqw.collationIndex:sqw.collationIndex+l], stmt)
+ sqw.collationIndex += l
+ case <-sqw.quitChan:
+ if err := sqw.flush(); err != nil {
+ panic(fmt.Sprintf("error writing sql stmts buffer to file: %v", err))
+ }
+ return
+ case <-sqw.flushChan:
+ if err := sqw.flush(); err != nil {
+ panic(fmt.Sprintf("error writing sql stmts buffer to file: %v", err))
+ }
+ sqw.flushFinished <- struct{}{}
+ }
+ }
+ }()
+}
+
+// Close satisfies io.Closer
+func (sqw *SQLWriter) Close() error {
+ close(sqw.quitChan)
+ <-sqw.doneChan
+ return sqw.wc.Close()
+}
+
+// Flush sends a flush signal to the looping process
+func (sqw *SQLWriter) Flush() {
+ sqw.flushChan <- struct{}{}
+ <-sqw.flushFinished
+}
+
+func (sqw *SQLWriter) flush() error {
+ if _, err := sqw.wc.Write(sqw.collatedStmt[0:sqw.collationIndex]); err != nil {
+ return err
+ }
+ sqw.collationIndex = 0
+ return nil
+}
+
+const (
+ nodeInsert = "INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES " +
+ "('%s', '%s', '%s', '%s', %d);\n"
+
+ ipldInsert = "INSERT INTO public.blocks (key, data) VALUES ('%s', '\\x%x');\n"
+
+ headerInsert = "INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, " +
+ "state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) VALUES " +
+ "('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '\\x%x', %d, '%s', %d, '%s');\n"
+
+ uncleInsert = "INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES " +
+ "('%s', '%s', '%s', '%s', '%s', '%s');\n"
+
+ txInsert = "INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type, " +
+ "value) VALUES ('%s', '%s', '%s', '%s', '%s', %d, '%s', '\\x%x', %d, '%s');\n"
+
+ alInsert = "INSERT INTO eth.access_list_elements (tx_id, index, address, storage_keys) VALUES ('%s', %d, '%s', '%s');\n"
+
+ rctInsert = "INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, " +
+ "post_status, log_root) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', %d, '%s');\n"
+
+ logInsert = "INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, " +
+ "topic3, log_data) VALUES ('%s', '%s', '%s', '%s', %d, '%s', '%s', '%s', '%s', '\\x%x');\n"
+
+ stateInsert = "INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) " +
+ "VALUES ('%s', '%s', '%s', '\\x%x', %d, %t, '%s');\n"
+
+ accountInsert = "INSERT INTO eth.state_accounts (header_id, state_path, balance, nonce, code_hash, storage_root) " +
+ "VALUES ('%s', '\\x%x', '%s', %d, '\\x%x', '%s');\n"
+
+ storageInsert = "INSERT INTO eth.storage_cids (header_id, state_path, storage_leaf_key, cid, storage_path, " +
+ "node_type, diff, mh_key) VALUES ('%s', '\\x%x', '%s', '%s', '\\x%x', %d, %t, '%s');\n"
+)
+
+func (sqw *SQLWriter) upsertNode(node nodeinfo.Info) {
+ sqw.stmts <- []byte(fmt.Sprintf(nodeInsert, node.GenesisBlock, node.NetworkID, node.ID, node.ClientName, node.ChainID))
+}
+
+func (sqw *SQLWriter) upsertIPLD(ipld models.IPLDModel) {
+ sqw.stmts <- []byte(fmt.Sprintf(ipldInsert, ipld.Key, ipld.Data))
+}
+
+func (sqw *SQLWriter) upsertIPLDDirect(key string, value []byte) {
+ sqw.upsertIPLD(models.IPLDModel{
+ Key: key,
+ Data: value,
+ })
+}
+
+func (sqw *SQLWriter) upsertIPLDNode(i node.Node) {
+ sqw.upsertIPLD(models.IPLDModel{
+ Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
+ Data: i.RawData(),
+ })
+}
+
+func (sqw *SQLWriter) upsertIPLDRaw(codec, mh uint64, raw []byte) (string, string, error) {
+ c, err := ipld.RawdataToCid(codec, raw, mh)
+ if err != nil {
+ return "", "", err
+ }
+ prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String()
+ sqw.upsertIPLD(models.IPLDModel{
+ Key: prefixedKey,
+ Data: raw,
+ })
+ return c.String(), prefixedKey, err
+}
+
+func (sqw *SQLWriter) upsertHeaderCID(header models.HeaderModel) {
+ stmt := fmt.Sprintf(headerInsert, header.BlockNumber, header.BlockHash, header.ParentHash, header.CID,
+ header.TotalDifficulty, header.NodeID, header.Reward, header.StateRoot, header.TxRoot,
+ header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1, header.Coinbase)
+ sqw.stmts <- []byte(stmt)
+ indexerMetrics.blocks.Inc(1)
+}
+
+func (sqw *SQLWriter) upsertUncleCID(uncle models.UncleModel) {
+ sqw.stmts <- []byte(fmt.Sprintf(uncleInsert, uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID,
+ uncle.Reward, uncle.MhKey))
+}
+
+func (sqw *SQLWriter) upsertTransactionCID(transaction models.TxModel) {
+ sqw.stmts <- []byte(fmt.Sprintf(txInsert, transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst,
+ transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type, transaction.Value))
+ indexerMetrics.transactions.Inc(1)
+}
+
+func (sqw *SQLWriter) upsertAccessListElement(accessListElement models.AccessListElementModel) {
+ sqw.stmts <- []byte(fmt.Sprintf(alInsert, accessListElement.TxID, accessListElement.Index, accessListElement.Address,
+ formatPostgresStringArray(accessListElement.StorageKeys)))
+ indexerMetrics.accessListEntries.Inc(1)
+}
+
+func (sqw *SQLWriter) upsertReceiptCID(rct *models.ReceiptModel) {
+ sqw.stmts <- []byte(fmt.Sprintf(rctInsert, rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey,
+ rct.PostState, rct.PostStatus, rct.LogRoot))
+ indexerMetrics.receipts.Inc(1)
+}
+
+func (sqw *SQLWriter) upsertLogCID(logs []*models.LogsModel) {
+ for _, l := range logs {
+ sqw.stmts <- []byte(fmt.Sprintf(logInsert, l.LeafCID, l.LeafMhKey, l.ReceiptID, l.Address, l.Index, l.Topic0,
+ l.Topic1, l.Topic2, l.Topic3, l.Data))
+ indexerMetrics.logs.Inc(1)
+ }
+}
+
+func (sqw *SQLWriter) upsertStateCID(stateNode models.StateNodeModel) {
+ var stateKey string
+ if stateNode.StateKey != nullHash.String() {
+ stateKey = stateNode.StateKey
+ }
+ sqw.stmts <- []byte(fmt.Sprintf(stateInsert, stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path,
+ stateNode.NodeType, true, stateNode.MhKey))
+}
+
+func (sqw *SQLWriter) upsertStateAccount(stateAccount models.StateAccountModel) {
+ sqw.stmts <- []byte(fmt.Sprintf(accountInsert, stateAccount.HeaderID, stateAccount.StatePath, stateAccount.Balance,
+ stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot))
+}
+
+func (sqw *SQLWriter) upsertStorageCID(storageCID models.StorageNodeModel) {
+ var storageKey string
+ if storageCID.StorageKey != nullHash.String() {
+ storageKey = storageCID.StorageKey
+ }
+ sqw.stmts <- []byte(fmt.Sprintf(storageInsert, storageCID.HeaderID, storageCID.StatePath, storageKey, storageCID.CID,
+ storageCID.Path, storageCID.NodeType, true, storageCID.MhKey))
+}
diff --git a/statediff/indexer/database/sql/batch_tx.go b/statediff/indexer/database/sql/batch_tx.go
new file mode 100644
index 000000000..fb1b289a1
--- /dev/null
+++ b/statediff/indexer/database/sql/batch_tx.go
@@ -0,0 +1,105 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package sql
+
+import (
+ "context"
+
+ blockstore "github.com/ipfs/go-ipfs-blockstore"
+ dshelp "github.com/ipfs/go-ipfs-ds-help"
+ node "github.com/ipfs/go-ipld-format"
+ "github.com/lib/pq"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+ "github.com/ethereum/go-ethereum/statediff/indexer/models"
+)
+
+// BatchTx wraps a sql tx with the state necessary for building the tx concurrently during trie difference iteration
+type BatchTx struct {
+ BlockNumber uint64
+ ctx context.Context
+ dbtx Tx
+ stm string
+ quit chan struct{}
+ iplds chan models.IPLDModel
+ ipldCache models.IPLDBatch
+
+ submit func(blockTx *BatchTx, err error) error
+}
+
+// Submit satisfies indexer.AtomicTx
+func (tx *BatchTx) Submit(err error) error {
+ return tx.submit(tx, err)
+}
+
+func (tx *BatchTx) flush() error {
+ _, err := tx.dbtx.Exec(tx.ctx, tx.stm, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values))
+ if err != nil {
+ return err
+ }
+ tx.ipldCache = models.IPLDBatch{}
+ return nil
+}
+
+// run in background goroutine to synchronize concurrent appends to the ipldCache
+func (tx *BatchTx) cache() {
+ for {
+ select {
+ case i := <-tx.iplds:
+ tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key)
+ tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data)
+ case <-tx.quit:
+ tx.ipldCache = models.IPLDBatch{}
+ return
+ }
+ }
+}
+
+func (tx *BatchTx) cacheDirect(key string, value []byte) {
+ tx.iplds <- models.IPLDModel{
+ Key: key,
+ Data: value,
+ }
+}
+
+func (tx *BatchTx) cacheIPLD(i node.Node) {
+ tx.iplds <- models.IPLDModel{
+ Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
+ Data: i.RawData(),
+ }
+}
+
+func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error) {
+ c, err := ipld.RawdataToCid(codec, raw, mh)
+ if err != nil {
+ return "", "", err
+ }
+ prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String()
+ tx.iplds <- models.IPLDModel{
+ Key: prefixedKey,
+ Data: raw,
+ }
+ return c.String(), prefixedKey, err
+}
+
+// rollback sql transaction and log any error
+func rollback(ctx context.Context, tx Tx) {
+ if err := tx.Rollback(ctx); err != nil {
+ log.Error(err.Error())
+ }
+}
diff --git a/statediff/indexer/indexer.go b/statediff/indexer/database/sql/indexer.go
similarity index 64%
rename from statediff/indexer/indexer.go
rename to statediff/indexer/database/sql/indexer.go
index bab5fa938..c8d526d6f 100644
--- a/statediff/indexer/indexer.go
+++ b/statediff/indexer/database/sql/indexer.go
@@ -14,18 +14,21 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-// Package indexer provides an interface for pushing and indexing IPLD objects into a Postgres database
+// Package sql provides an interface for pushing and indexing IPLD objects into a sql database
// Metrics for reporting processing and connection stats are defined in ./metrics.go
-package indexer
+
+package sql
import (
+ "context"
"fmt"
"math/big"
"time"
+ ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
- "github.com/jmoiron/sqlx"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/common"
@@ -35,58 +38,39 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
- "github.com/ethereum/go-ethereum/statediff/indexer/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
)
+var _ interfaces.StateDiffIndexer = &StateDiffIndexer{}
+
var (
indexerMetrics = RegisterIndexerMetrics(metrics.DefaultRegistry)
dbMetrics = RegisterDBMetrics(metrics.DefaultRegistry)
)
-const (
- RemovedNodeStorageCID = "bagmacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya"
- RemovedNodeStateCID = "baglacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya"
- RemovedNodeMhKey = "/blocks/DMQMLUSGAGDPOIZ4SJ7H3MW4Y4B4BZIAWZJ4VARHHN57VWAELWC2I4A"
-)
-
-// Indexer interface to allow substitution of mocks for testing
-type Indexer interface {
- PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (*BlockTx, error)
- PushStateNode(tx *BlockTx, stateNode sdtypes.StateNode) error
- PushCodeAndCodeHash(tx *BlockTx, codeAndCodeHash sdtypes.CodeAndCodeHash) error
- ReportDBMetrics(delay time.Duration, quit <-chan bool)
-}
-
-// StateDiffIndexer satisfies the Indexer interface for ethereum statediff objects
+// StateDiffIndexer satisfies the indexer.StateDiffIndexer interface for ethereum statediff objects on top of an SQL sql
type StateDiffIndexer struct {
+ ctx context.Context
chainConfig *params.ChainConfig
- dbWriter *PostgresCIDWriter
- init bool
+ dbWriter *Writer
}
-// NewStateDiffIndexer creates a pointer to a new PayloadConverter which satisfies the PayloadConverter interface
-func NewStateDiffIndexer(chainConfig *params.ChainConfig, db *postgres.DB) (*StateDiffIndexer, error) {
+// NewStateDiffIndexer creates a sql implementation of interfaces.StateDiffIndexer
+func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, db Database) (*StateDiffIndexer, error) {
// Write the removed node to the db on init
- if err := shared.PublishDirectWithDB(db, RemovedNodeMhKey, []byte{}); err != nil {
+ if _, err := db.Exec(ctx, db.InsertIPLDStm(), shared.RemovedNodeMhKey, []byte{}); err != nil {
return nil, err
}
return &StateDiffIndexer{
+ ctx: ctx,
chainConfig: chainConfig,
- dbWriter: NewPostgresCIDWriter(db),
+ dbWriter: NewWriter(db),
}, nil
}
-type BlockTx struct {
- dbtx *sqlx.Tx
- BlockNumber uint64
- headerID int64
- Close func(err error) error
-}
-
// ReportDBMetrics is a reporting function to run as goroutine
func (sdi *StateDiffIndexer) ReportDBMetrics(delay time.Duration, quit <-chan bool) {
if !metrics.Enabled {
@@ -106,9 +90,9 @@ func (sdi *StateDiffIndexer) ReportDBMetrics(delay time.Duration, quit <-chan bo
}()
}
-// PushBlock pushes and indexes block data in database, except state & storage nodes (includes header, uncles, transactions & receipts)
+// PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts)
// Returns an initiated DB transaction which must be Closed via defer to commit or rollback
-func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (*BlockTx, error) {
+func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) {
start, t := time.Now(), time.Now()
blockHash := block.Hash()
blockHashStr := blockHash.String()
@@ -121,13 +105,16 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
}
// Generate the block iplds
- headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld.FromBlockAndReceipts(block, receipts)
+ headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
if err != nil {
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
}
if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) {
- return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d)to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
+ return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
+ }
+ if len(txTrieNodes) != len(rctTrieNodes) {
+ return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
}
// Calculate reward
@@ -136,37 +123,52 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
if sdi.chainConfig.Clique != nil {
reward = big.NewInt(0)
} else {
- reward = CalcEthBlockReward(block.Header(), block.Uncles(), block.Transactions(), receipts)
+ reward = shared.CalcEthBlockReward(block.Header(), block.Uncles(), block.Transactions(), receipts)
}
t = time.Now()
+
// Begin new db tx for everything
- tx, err := sdi.dbWriter.db.Beginx()
+ tx, err := sdi.dbWriter.db.Begin(sdi.ctx)
if err != nil {
return nil, err
}
defer func() {
if p := recover(); p != nil {
- shared.Rollback(tx)
+ rollback(sdi.ctx, tx)
panic(p)
} else if err != nil {
- shared.Rollback(tx)
+ rollback(sdi.ctx, tx)
}
}()
- blockTx := &BlockTx{
- dbtx: tx,
+ blockTx := &BatchTx{
+ ctx: sdi.ctx,
+ BlockNumber: height,
+ stm: sdi.dbWriter.db.InsertIPLDsStm(),
+ iplds: make(chan models.IPLDModel),
+ quit: make(chan struct{}),
+ ipldCache: models.IPLDBatch{},
+ dbtx: tx,
// handle transaction commit or rollback for any return case
- Close: func(err error) error {
+ submit: func(self *BatchTx, err error) error {
+ close(self.quit)
+ close(self.iplds)
if p := recover(); p != nil {
- shared.Rollback(tx)
+ rollback(sdi.ctx, tx)
panic(p)
} else if err != nil {
- shared.Rollback(tx)
+ rollback(sdi.ctx, tx)
} else {
tDiff := time.Since(t)
indexerMetrics.tStateStoreCodeProcessing.Update(tDiff)
traceMsg += fmt.Sprintf("state, storage, and code storage processing time: %s\r\n", tDiff.String())
t = time.Now()
- err = tx.Commit()
+ if err := self.flush(); err != nil {
+ rollback(sdi.ctx, tx)
+ traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String())
+ log.Debug(traceMsg)
+ return err
+ }
+ err = tx.Commit(sdi.ctx)
tDiff = time.Since(t)
indexerMetrics.tPostgresCommit.Update(tDiff)
traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String())
@@ -176,6 +178,8 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
return err
},
}
+ go blockTx.cache()
+
tDiff := time.Since(t)
indexerMetrics.tFreePostgres.Update(tDiff)
@@ -183,8 +187,8 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
t = time.Now()
// Publish and index header, collect headerID
- var headerID int64
- headerID, err = sdi.processHeader(tx, block.Header(), headerNode, reward, totalDifficulty)
+ var headerID string
+ headerID, err = sdi.processHeader(blockTx, block.Header(), headerNode, reward, totalDifficulty)
if err != nil {
return nil, err
}
@@ -193,7 +197,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
traceMsg += fmt.Sprintf("header processing time: %s\r\n", tDiff.String())
t = time.Now()
// Publish and index uncles
- err = sdi.processUncles(tx, headerID, height, uncleNodes)
+ err = sdi.processUncles(blockTx, headerID, height, uncleNodes)
if err != nil {
return nil, err
}
@@ -202,7 +206,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
traceMsg += fmt.Sprintf("uncle processing time: %s\r\n", tDiff.String())
t = time.Now()
// Publish and index receipts and txs
- err = sdi.processReceiptsAndTxs(tx, processArgs{
+ err = sdi.processReceiptsAndTxs(blockTx, processArgs{
headerID: headerID,
blockNumber: block.Number(),
receipts: receipts,
@@ -223,32 +227,27 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String())
t = time.Now()
- blockTx.BlockNumber = height
- blockTx.headerID = headerID
return blockTx, err
}
// processHeader publishes and indexes a header IPLD in Postgres
// it returns the headerID
-func (sdi *StateDiffIndexer) processHeader(tx *sqlx.Tx, header *types.Header, headerNode node.Node, reward, td *big.Int) (int64, error) {
- // publish header
- if err := shared.PublishIPLD(tx, headerNode); err != nil {
- return 0, fmt.Errorf("error publishing header IPLD: %v", err)
- }
+func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (string, error) {
+ tx.cacheIPLD(headerNode)
- var baseFee *int64
+ var baseFee *string
if header.BaseFee != nil {
- baseFee = new(int64)
- *baseFee = header.BaseFee.Int64()
+ baseFee = new(string)
+ *baseFee = header.BaseFee.String()
}
-
+ headerID := header.Hash().String()
// index header
- return sdi.dbWriter.upsertHeaderCID(tx, models.HeaderModel{
+ return headerID, sdi.dbWriter.upsertHeaderCID(tx.dbtx, models.HeaderModel{
CID: headerNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: header.ParentHash.String(),
BlockNumber: header.Number.String(),
- BlockHash: header.Hash().String(),
+ BlockHash: headerID,
TotalDifficulty: td.String(),
Reward: reward.String(),
Bloom: header.Bloom.Bytes(),
@@ -257,32 +256,31 @@ func (sdi *StateDiffIndexer) processHeader(tx *sqlx.Tx, header *types.Header, he
TxRoot: header.TxHash.String(),
UncleRoot: header.UncleHash.String(),
Timestamp: header.Time,
- BaseFee: baseFee,
+ Coinbase: header.Coinbase.String(),
})
}
// processUncles publishes and indexes uncle IPLDs in Postgres
-func (sdi *StateDiffIndexer) processUncles(tx *sqlx.Tx, headerID int64, blockNumber uint64, uncleNodes []*ipld.EthHeader) error {
+func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNumber uint64, uncleNodes []*ipld2.EthHeader) error {
// publish and index uncles
for _, uncleNode := range uncleNodes {
- if err := shared.PublishIPLD(tx, uncleNode); err != nil {
- return fmt.Errorf("error publishing uncle IPLD: %v", err)
- }
+ tx.cacheIPLD(uncleNode)
var uncleReward *big.Int
// in PoA networks uncle reward is 0
if sdi.chainConfig.Clique != nil {
uncleReward = big.NewInt(0)
} else {
- uncleReward = CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64())
+ uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64())
}
uncle := models.UncleModel{
+ HeaderID: headerID,
CID: uncleNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
ParentHash: uncleNode.ParentHash.String(),
BlockHash: uncleNode.Hash().String(),
Reward: uncleReward.String(),
}
- if err := sdi.dbWriter.upsertUncleCID(tx, uncle, headerID); err != nil {
+ if err := sdi.dbWriter.upsertUncleCID(tx.dbtx, uncle); err != nil {
return err
}
}
@@ -291,98 +289,57 @@ func (sdi *StateDiffIndexer) processUncles(tx *sqlx.Tx, headerID int64, blockNum
// processArgs bundles arguments to processReceiptsAndTxs
type processArgs struct {
- headerID int64
+ headerID string
blockNumber *big.Int
receipts types.Receipts
txs types.Transactions
- rctNodes []*ipld.EthReceipt
- rctTrieNodes []*ipld.EthRctTrie
- txNodes []*ipld.EthTx
- txTrieNodes []*ipld.EthTxTrie
- logTrieNodes [][]*ipld.EthLogTrie
+ rctNodes []*ipld2.EthReceipt
+ rctTrieNodes []*ipld2.EthRctTrie
+ txNodes []*ipld2.EthTx
+ txTrieNodes []*ipld2.EthTxTrie
+ logTrieNodes [][]node.Node
logLeafNodeCIDs [][]cid.Cid
rctLeafNodeCIDs []cid.Cid
}
// processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres
-func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *sqlx.Tx, args processArgs) error {
+func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs) error {
// Process receipts and txs
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
for i, receipt := range args.receipts {
- // tx that corresponds with this receipt
+ for _, logTrieNode := range args.logTrieNodes[i] {
+ tx.cacheIPLD(logTrieNode)
+ }
+ txNode := args.txNodes[i]
+ tx.cacheIPLD(txNode)
+
+ // index tx
trx := args.txs[i]
+ txID := trx.Hash().String()
+
+ var val string
+ if trx.Value() != nil {
+ val = trx.Value().String()
+ }
+
+ // derive sender for the tx that corresponds with this receipt
from, err := types.Sender(signer, trx)
if err != nil {
return fmt.Errorf("error deriving tx sender: %v", err)
}
-
- for _, trie := range args.logTrieNodes[i] {
- if err = shared.PublishIPLD(tx, trie); err != nil {
- return fmt.Errorf("error publishing log trie node IPLD: %w", err)
- }
- }
-
- // publish the txs and receipts
- txNode := args.txNodes[i]
- if err := shared.PublishIPLD(tx, txNode); err != nil {
- return fmt.Errorf("error publishing tx IPLD: %v", err)
- }
-
- // Indexing
- // extract topic and contract data from the receipt for indexing
- mappedContracts := make(map[string]bool) // use map to avoid duplicate addresses
- logDataSet := make([]*models.LogsModel, len(receipt.Logs))
- for idx, l := range receipt.Logs {
- topicSet := make([]string, 4)
- for ti, topic := range l.Topics {
- topicSet[ti] = topic.Hex()
- }
-
- if !args.logLeafNodeCIDs[i][idx].Defined() {
- return fmt.Errorf("invalid log cid")
- }
-
- mappedContracts[l.Address.String()] = true
- logDataSet[idx] = &models.LogsModel{
- ID: 0,
- Address: l.Address.String(),
- Index: int64(l.Index),
- Data: l.Data,
- LeafCID: args.logLeafNodeCIDs[i][idx].String(),
- LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]),
- Topic0: topicSet[0],
- Topic1: topicSet[1],
- Topic2: topicSet[2],
- Topic3: topicSet[3],
- }
- }
- // these are the contracts seen in the logs
- logContracts := make([]string, 0, len(mappedContracts))
- for addr := range mappedContracts {
- logContracts = append(logContracts, addr)
- }
- // this is the contract address if this receipt is for a contract creation tx
- contract := shared.HandleZeroAddr(receipt.ContractAddress)
- var contractHash string
- if contract != "" {
- contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
- }
- // index tx first so that the receipt can reference it by FK
txModel := models.TxModel{
- Dst: shared.HandleZeroAddrPointer(trx.To()),
- Src: shared.HandleZeroAddr(from),
- TxHash: trx.Hash().String(),
- Index: int64(i),
- Data: trx.Data(),
- CID: txNode.Cid().String(),
- MhKey: shared.MultihashKeyFromCID(txNode.Cid()),
+ HeaderID: args.headerID,
+ Dst: shared.HandleZeroAddrPointer(trx.To()),
+ Src: shared.HandleZeroAddr(from),
+ TxHash: txID,
+ Index: int64(i),
+ Data: trx.Data(),
+ CID: txNode.Cid().String(),
+ MhKey: shared.MultihashKeyFromCID(txNode.Cid()),
+ Type: trx.Type(),
+ Value: val,
}
- txType := trx.Type()
- if txType != types.LegacyTxType {
- txModel.Type = &txType
- }
- txID, err := sdi.dbWriter.upsertTransactionCID(tx, txModel, args.headerID)
- if err != nil {
+ if err := sdi.dbWriter.upsertTransactionCID(tx.dbtx, txModel); err != nil {
return err
}
@@ -393,21 +350,30 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *sqlx.Tx, args processArgs
storageKeys[k] = storageKey.Hex()
}
accessListElementModel := models.AccessListElementModel{
+ TxID: txID,
Index: int64(j),
Address: accessListElement.Address.Hex(),
StorageKeys: storageKeys,
}
- if err := sdi.dbWriter.upsertAccessListElement(tx, accessListElementModel, txID); err != nil {
+ if err := sdi.dbWriter.upsertAccessListElement(tx.dbtx, accessListElementModel); err != nil {
return err
}
}
- // index the receipt
+ // this is the contract address if this receipt is for a contract creation tx
+ contract := shared.HandleZeroAddr(receipt.ContractAddress)
+ var contractHash string
+ if contract != "" {
+ contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
+ }
+
+ // index receipt
if !args.rctLeafNodeCIDs[i].Defined() {
return fmt.Errorf("invalid receipt leaf node cid")
}
rctModel := &models.ReceiptModel{
+ TxID: txID,
Contract: contract,
ContractHash: contractHash,
LeafCID: args.rctLeafNodeCIDs[i].String(),
@@ -420,62 +386,84 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *sqlx.Tx, args processArgs
rctModel.PostState = common.Bytes2Hex(receipt.PostState)
}
- receiptID, err := sdi.dbWriter.upsertReceiptCID(tx, rctModel, txID)
- if err != nil {
+ if err := sdi.dbWriter.upsertReceiptCID(tx.dbtx, rctModel); err != nil {
return err
}
- if err = sdi.dbWriter.upsertLogCID(tx, logDataSet, receiptID); err != nil {
+ // index logs
+ logDataSet := make([]*models.LogsModel, len(receipt.Logs))
+ for idx, l := range receipt.Logs {
+ topicSet := make([]string, 4)
+ for ti, topic := range l.Topics {
+ topicSet[ti] = topic.Hex()
+ }
+
+ if !args.logLeafNodeCIDs[i][idx].Defined() {
+ return fmt.Errorf("invalid log cid")
+ }
+
+ logDataSet[idx] = &models.LogsModel{
+ ReceiptID: txID,
+ Address: l.Address.String(),
+ Index: int64(l.Index),
+ Data: l.Data,
+ LeafCID: args.logLeafNodeCIDs[i][idx].String(),
+ LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]),
+ Topic0: topicSet[0],
+ Topic1: topicSet[1],
+ Topic2: topicSet[2],
+ Topic3: topicSet[3],
+ }
+ }
+
+ if err := sdi.dbWriter.upsertLogCID(tx.dbtx, logDataSet); err != nil {
return err
}
}
// publish trie nodes, these aren't indexed directly
- for _, n := range args.txTrieNodes {
- if err := shared.PublishIPLD(tx, n); err != nil {
- return fmt.Errorf("error publishing tx trie node IPLD: %w", err)
- }
- }
-
- for _, n := range args.rctTrieNodes {
- if err := shared.PublishIPLD(tx, n); err != nil {
- return fmt.Errorf("error publishing rct trie node IPLD: %w", err)
- }
+ for i, n := range args.txTrieNodes {
+ tx.cacheIPLD(n)
+ tx.cacheIPLD(args.rctTrieNodes[i])
}
return nil
}
-// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD database
-func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateNode) error {
+// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD sql
+func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
+ tx, ok := batch.(*BatchTx)
+ if !ok {
+ return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
+ }
// publish the state node
if stateNode.NodeType == sdtypes.Removed {
// short circuit if it is a Removed node
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
stateModel := models.StateNodeModel{
+ HeaderID: headerID,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
- CID: RemovedNodeStateCID,
- MhKey: RemovedNodeMhKey,
+ CID: shared.RemovedNodeStateCID,
+ MhKey: shared.RemovedNodeMhKey,
NodeType: stateNode.NodeType.Int(),
}
- _, err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel, tx.headerID)
- return err
+ return sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel)
}
- stateCIDStr, stateMhKey, err := shared.PublishRaw(tx.dbtx, ipld.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
+ stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
if err != nil {
- return fmt.Errorf("error publishing state node IPLD: %v", err)
+ return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
}
stateModel := models.StateNodeModel{
+ HeaderID: headerID,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
CID: stateCIDStr,
MhKey: stateMhKey,
NodeType: stateNode.NodeType.Int(),
}
- // index the state node, collect the stateID to reference by FK
- stateID, err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel, tx.headerID)
- if err != nil {
+ // index the state node
+ if err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel); err != nil {
return err
}
// if we have a leaf, decode and index the account data
@@ -492,12 +480,14 @@ func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateN
return fmt.Errorf("error decoding state account rlp: %s", err.Error())
}
accountModel := models.StateAccountModel{
+ HeaderID: headerID,
+ StatePath: stateNode.Path,
Balance: account.Balance.String(),
Nonce: account.Nonce,
CodeHash: account.CodeHash,
StorageRoot: account.Root.String(),
}
- if err := sdi.dbWriter.upsertStateAccount(tx.dbtx, accountModel, stateID); err != nil {
+ if err := sdi.dbWriter.upsertStateAccount(tx.dbtx, accountModel); err != nil {
return err
}
}
@@ -507,29 +497,33 @@ func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateN
// short circuit if it is a Removed node
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
storageModel := models.StorageNodeModel{
+ HeaderID: headerID,
+ StatePath: stateNode.Path,
Path: storageNode.Path,
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
- CID: RemovedNodeStorageCID,
- MhKey: RemovedNodeMhKey,
+ CID: shared.RemovedNodeStorageCID,
+ MhKey: shared.RemovedNodeMhKey,
NodeType: storageNode.NodeType.Int(),
}
- if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel, stateID); err != nil {
+ if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel); err != nil {
return err
}
continue
}
- storageCIDStr, storageMhKey, err := shared.PublishRaw(tx.dbtx, ipld.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
+ storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld2.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
if err != nil {
- return fmt.Errorf("error publishing storage node IPLD: %v", err)
+ return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err)
}
storageModel := models.StorageNodeModel{
+ HeaderID: headerID,
+ StatePath: stateNode.Path,
Path: storageNode.Path,
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
CID: storageCIDStr,
MhKey: storageMhKey,
NodeType: storageNode.NodeType.Int(),
}
- if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel, stateID); err != nil {
+ if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel); err != nil {
return err
}
}
@@ -537,15 +531,22 @@ func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateN
return nil
}
-// PushCodeAndCodeHash publishes code and codehash pairs to the ipld database
-func (sdi *StateDiffIndexer) PushCodeAndCodeHash(tx *BlockTx, codeAndCodeHash sdtypes.CodeAndCodeHash) error {
+// PushCodeAndCodeHash publishes code and codehash pairs to the ipld sql
+func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error {
+ tx, ok := batch.(*BatchTx)
+ if !ok {
+ return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
+ }
// codec doesn't matter since db key is multihash-based
mhKey, err := shared.MultihashKeyFromKeccak256(codeAndCodeHash.Hash)
if err != nil {
return fmt.Errorf("error deriving multihash key from codehash: %v", err)
}
- if err := shared.PublishDirect(tx.dbtx, mhKey, codeAndCodeHash.Code); err != nil {
- return fmt.Errorf("error publishing code IPLD: %v", err)
- }
+ tx.cacheDirect(mhKey, codeAndCodeHash.Code)
return nil
}
+
+// Close satisfies io.Closer
+func (sdi *StateDiffIndexer) Close() error {
+ return sdi.dbWriter.Close()
+}
diff --git a/statediff/indexer/database/sql/indexer_shared_test.go b/statediff/indexer/database/sql/indexer_shared_test.go
new file mode 100644
index 000000000..8bbab22ba
--- /dev/null
+++ b/statediff/indexer/database/sql/indexer_shared_test.go
@@ -0,0 +1,143 @@
+package sql_test
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/rlp"
+
+ "github.com/ipfs/go-cid"
+ "github.com/multiformats/go-multihash"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+ "github.com/ethereum/go-ethereum/statediff/indexer/mocks"
+)
+
+var (
+ db sql.Database
+ err error
+ ind interfaces.StateDiffIndexer
+ ipfsPgGet = `SELECT data FROM public.blocks
+ WHERE key = $1`
+ tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte
+ mockBlock *types.Block
+ headerCID, trx1CID, trx2CID, trx3CID, trx4CID, trx5CID cid.Cid
+ rct1CID, rct2CID, rct3CID, rct4CID, rct5CID cid.Cid
+ rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5 []byte
+ state1CID, state2CID, storageCID cid.Cid
+)
+
+func init() {
+ if os.Getenv("MODE") != "statediff" {
+ fmt.Println("Skipping statediff test")
+ os.Exit(0)
+ }
+
+ mockBlock = mocks.MockBlock
+ txs, rcts := mocks.MockBlock.Transactions(), mocks.MockReceipts
+
+ buf := new(bytes.Buffer)
+ txs.EncodeIndex(0, buf)
+ tx1 = make([]byte, buf.Len())
+ copy(tx1, buf.Bytes())
+ buf.Reset()
+
+ txs.EncodeIndex(1, buf)
+ tx2 = make([]byte, buf.Len())
+ copy(tx2, buf.Bytes())
+ buf.Reset()
+
+ txs.EncodeIndex(2, buf)
+ tx3 = make([]byte, buf.Len())
+ copy(tx3, buf.Bytes())
+ buf.Reset()
+
+ txs.EncodeIndex(3, buf)
+ tx4 = make([]byte, buf.Len())
+ copy(tx4, buf.Bytes())
+ buf.Reset()
+
+ txs.EncodeIndex(4, buf)
+ tx5 = make([]byte, buf.Len())
+ copy(tx5, buf.Bytes())
+ buf.Reset()
+
+ rcts.EncodeIndex(0, buf)
+ rct1 = make([]byte, buf.Len())
+ copy(rct1, buf.Bytes())
+ buf.Reset()
+
+ rcts.EncodeIndex(1, buf)
+ rct2 = make([]byte, buf.Len())
+ copy(rct2, buf.Bytes())
+ buf.Reset()
+
+ rcts.EncodeIndex(2, buf)
+ rct3 = make([]byte, buf.Len())
+ copy(rct3, buf.Bytes())
+ buf.Reset()
+
+ rcts.EncodeIndex(3, buf)
+ rct4 = make([]byte, buf.Len())
+ copy(rct4, buf.Bytes())
+ buf.Reset()
+
+ rcts.EncodeIndex(4, buf)
+ rct5 = make([]byte, buf.Len())
+ copy(rct5, buf.Bytes())
+ buf.Reset()
+
+ headerCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockHeaderRlp, multihash.KECCAK_256)
+ trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx1, multihash.KECCAK_256)
+ trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx2, multihash.KECCAK_256)
+ trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx3, multihash.KECCAK_256)
+ trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx4, multihash.KECCAK_256)
+ trx5CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx5, multihash.KECCAK_256)
+ state1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.ContractLeafNode, multihash.KECCAK_256)
+ state2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.AccountLeafNode, multihash.KECCAK_256)
+ storageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, mocks.StorageLeafNode, multihash.KECCAK_256)
+
+ receiptTrie := ipld.NewRctTrie()
+
+ receiptTrie.Add(0, rct1)
+ receiptTrie.Add(1, rct2)
+ receiptTrie.Add(2, rct3)
+ receiptTrie.Add(3, rct4)
+ receiptTrie.Add(4, rct5)
+
+ rctLeafNodes, keys, _ := receiptTrie.GetLeafNodes()
+
+ rctleafNodeCids := make([]cid.Cid, len(rctLeafNodes))
+ orderedRctLeafNodes := make([][]byte, len(rctLeafNodes))
+ for i, rln := range rctLeafNodes {
+ var idx uint
+
+ r := bytes.NewReader(keys[i].TrieKey)
+ rlp.Decode(r, &idx)
+ rctleafNodeCids[idx] = rln.Cid()
+ orderedRctLeafNodes[idx] = rln.RawData()
+ }
+
+ rct1CID = rctleafNodeCids[0]
+ rct2CID = rctleafNodeCids[1]
+ rct3CID = rctleafNodeCids[2]
+ rct4CID = rctleafNodeCids[3]
+ rct5CID = rctleafNodeCids[4]
+
+ rctLeaf1 = orderedRctLeafNodes[0]
+ rctLeaf2 = orderedRctLeafNodes[1]
+ rctLeaf3 = orderedRctLeafNodes[2]
+ rctLeaf4 = orderedRctLeafNodes[3]
+ rctLeaf5 = orderedRctLeafNodes[4]
+}
+
+func expectTrue(t *testing.T, value bool) {
+ if !value {
+ t.Fatalf("Assertion failed")
+ }
+}
diff --git a/statediff/indexer/database/sql/interfaces.go b/statediff/indexer/database/sql/interfaces.go
new file mode 100644
index 000000000..445b35d9b
--- /dev/null
+++ b/statediff/indexer/database/sql/interfaces.go
@@ -0,0 +1,87 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package sql
+
+import (
+ "context"
+ "io"
+ "time"
+)
+
+// Database interfaces required by the sql indexer
+type Database interface {
+ Driver
+ Statements
+}
+
+// Driver interface has all the methods required by a driver implementation to support the sql indexer
+type Driver interface {
+ QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow
+ Exec(ctx context.Context, sql string, args ...interface{}) (Result, error)
+ Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error
+ Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error
+ Begin(ctx context.Context) (Tx, error)
+ Stats() Stats
+ NodeID() string
+ Context() context.Context
+ io.Closer
+}
+
+// Statements interface to accommodate different SQL query syntax
+type Statements interface {
+ InsertHeaderStm() string
+ InsertUncleStm() string
+ InsertTxStm() string
+ InsertAccessListElementStm() string
+ InsertRctStm() string
+ InsertLogStm() string
+ InsertStateStm() string
+ InsertAccountStm() string
+ InsertStorageStm() string
+ InsertIPLDStm() string
+ InsertIPLDsStm() string
+}
+
+// Tx interface to accommodate different concrete SQL transaction types
+type Tx interface {
+ QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow
+ Exec(ctx context.Context, sql string, args ...interface{}) (Result, error)
+ Commit(ctx context.Context) error
+ Rollback(ctx context.Context) error
+}
+
+// ScannableRow interface to accommodate different concrete row types
+type ScannableRow interface {
+ Scan(dest ...interface{}) error
+}
+
+// Result interface to accommodate different concrete result types
+type Result interface {
+ RowsAffected() (int64, error)
+}
+
+// Stats interface to accommodate different concrete sql stats types
+type Stats interface {
+ MaxOpen() int64
+ Open() int64
+ InUse() int64
+ Idle() int64
+ WaitCount() int64
+ WaitDuration() time.Duration
+ MaxIdleClosed() int64
+ MaxLifetimeClosed() int64
+}
diff --git a/statediff/indexer/database/sql/mainnet_tests/indexer_test.go b/statediff/indexer/database/sql/mainnet_tests/indexer_test.go
new file mode 100644
index 000000000..68c9bc464
--- /dev/null
+++ b/statediff/indexer/database/sql/mainnet_tests/indexer_test.go
@@ -0,0 +1,109 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package mainnet_tests
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/mocks"
+ "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
+)
+
+var (
+ err error
+ db sql.Database
+ ind interfaces.StateDiffIndexer
+ chainConf = params.MainnetChainConfig
+)
+
+func init() {
+ if os.Getenv("MODE") != "statediff" {
+ fmt.Println("Skipping statediff test")
+ os.Exit(0)
+ }
+}
+
+func TestPushBlockAndState(t *testing.T) {
+ conf := test_helpers.DefaultTestConfig
+ rawURL := os.Getenv(test_helpers.TEST_RAW_URL)
+ if rawURL == "" {
+ fmt.Printf("Warning: no raw url configured for statediffing mainnet tests, will look for local file and"+
+ "then try default endpoint (%s)\r\n", test_helpers.DefaultTestConfig.RawURL)
+ } else {
+ conf.RawURL = rawURL
+ }
+ for _, blockNumber := range test_helpers.ProblemBlocks {
+ conf.BlockNumber = big.NewInt(blockNumber)
+ tb, trs, err := test_helpers.TestBlockAndReceipts(conf)
+ require.NoError(t, err)
+ testPushBlockAndState(t, tb, trs)
+ }
+ testBlock, testReceipts, err := test_helpers.TestBlockAndReceiptsFromEnv(conf)
+ require.NoError(t, err)
+ testPushBlockAndState(t, testBlock, testReceipts)
+}
+
+func testPushBlockAndState(t *testing.T, block *types.Block, receipts types.Receipts) {
+ t.Run("Test PushBlock and PushStateNode", func(t *testing.T) {
+ setup(t, block, receipts)
+ tearDown(t)
+ })
+}
+
+func setup(t *testing.T, testBlock *types.Block, testReceipts types.Receipts) {
+ db, err = postgres.SetupSQLXDB()
+ if err != nil {
+ t.Fatal(err)
+ }
+ ind, err = sql.NewStateDiffIndexer(context.Background(), chainConf, db)
+ require.NoError(t, err)
+ var tx interfaces.Batch
+ tx, err = ind.PushBlock(
+ testBlock,
+ testReceipts,
+ testBlock.Difficulty())
+ require.NoError(t, err)
+
+ defer func() {
+ if err := tx.Submit(err); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ for _, node := range mocks.StateDiffs {
+ err = ind.PushStateNode(tx, node, testBlock.Hash().String())
+ require.NoError(t, err)
+ }
+
+ test_helpers.ExpectEqual(t, tx.(*sql.BatchTx).BlockNumber, testBlock.Number().Uint64())
+}
+
+func tearDown(t *testing.T) {
+ sql.TearDownDB(t, db)
+ err = ind.Close()
+ require.NoError(t, err)
+}
diff --git a/statediff/indexer/metrics.go b/statediff/indexer/database/sql/metrics.go
similarity index 79%
rename from statediff/indexer/metrics.go
rename to statediff/indexer/database/sql/metrics.go
index 2d37816f6..b0946a722 100644
--- a/statediff/indexer/metrics.go
+++ b/statediff/indexer/database/sql/metrics.go
@@ -1,7 +1,22 @@
-package indexer
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package sql
import (
- "database/sql"
"strings"
"github.com/ethereum/go-ethereum/metrics"
@@ -79,7 +94,7 @@ func RegisterIndexerMetrics(reg metrics.Registry) indexerMetricsHandles {
}
type dbMetricsHandles struct {
- // Maximum number of open connections to the database
+ // Maximum number of open connections to the sql
maxOpen metrics.Gauge
// The number of established connections both in use and idle
open metrics.Gauge
@@ -120,13 +135,13 @@ func RegisterDBMetrics(reg metrics.Registry) dbMetricsHandles {
return ctx
}
-func (met *dbMetricsHandles) Update(stats sql.DBStats) {
- met.maxOpen.Update(int64(stats.MaxOpenConnections))
- met.open.Update(int64(stats.OpenConnections))
- met.inUse.Update(int64(stats.InUse))
- met.idle.Update(int64(stats.Idle))
- met.waitedFor.Inc(stats.WaitCount)
- met.blockedMilliseconds.Inc(stats.WaitDuration.Milliseconds())
- met.closedMaxIdle.Inc(stats.MaxIdleClosed)
- met.closedMaxLifetime.Inc(stats.MaxLifetimeClosed)
+func (met *dbMetricsHandles) Update(stats Stats) {
+ met.maxOpen.Update(stats.MaxOpen())
+ met.open.Update(stats.Open())
+ met.inUse.Update(stats.InUse())
+ met.idle.Update(stats.Idle())
+ met.waitedFor.Inc(stats.WaitCount())
+ met.blockedMilliseconds.Inc(stats.WaitDuration().Milliseconds())
+ met.closedMaxIdle.Inc(stats.MaxIdleClosed())
+ met.closedMaxLifetime.Inc(stats.MaxLifetimeClosed())
}
diff --git a/statediff/indexer/database/sql/pgx_indexer_legacy_test.go b/statediff/indexer/database/sql/pgx_indexer_legacy_test.go
new file mode 100644
index 000000000..768652b46
--- /dev/null
+++ b/statediff/indexer/database/sql/pgx_indexer_legacy_test.go
@@ -0,0 +1,89 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package sql_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/multiformats/go-multihash"
+ "github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+ "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
+)
+
+func setupLegacyPGX(t *testing.T) {
+ mockLegacyBlock = legacyData.MockBlock
+ legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256)
+
+ db, err = postgres.SetupPGXDB()
+ require.NoError(t, err)
+
+ ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, db)
+ require.NoError(t, err)
+ var tx interfaces.Batch
+ tx, err = ind.PushBlock(
+ mockLegacyBlock,
+ legacyData.MockReceipts,
+ legacyData.MockBlock.Difficulty())
+ require.NoError(t, err)
+
+ defer func() {
+ if err := tx.Submit(err); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ for _, node := range legacyData.StateDiffs {
+ err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String())
+ require.NoError(t, err)
+ }
+
+ test_helpers.ExpectEqual(t, tx.(*sql.BatchTx).BlockNumber, legacyData.BlockNumber.Uint64())
+}
+
+func TestPGXIndexerLegacy(t *testing.T) {
+ t.Run("Publish and index header IPLDs", func(t *testing.T) {
+ setupLegacyPGX(t)
+ defer tearDown(t)
+ pgStr := `SELECT cid, cast(td AS TEXT), cast(reward AS TEXT), block_hash, coinbase
+ FROM eth.header_cids
+ WHERE block_number = $1`
+ // check header was properly indexed
+ type res struct {
+ CID string
+ TD string
+ Reward string
+ BlockHash string `db:"block_hash"`
+ Coinbase string `db:"coinbase"`
+ }
+ header := new(res)
+
+ err = db.QueryRow(context.Background(), pgStr, legacyData.BlockNumber.Uint64()).Scan(
+ &header.CID, &header.TD, &header.Reward, &header.BlockHash, &header.Coinbase)
+ require.NoError(t, err)
+
+ test_helpers.ExpectEqual(t, header.CID, legacyHeaderCID.String())
+ test_helpers.ExpectEqual(t, header.TD, legacyData.MockBlock.Difficulty().String())
+ test_helpers.ExpectEqual(t, header.Reward, "5000000000000011250")
+ test_helpers.ExpectEqual(t, header.Coinbase, legacyData.MockHeader.Coinbase.String())
+ require.Nil(t, legacyData.MockHeader.BaseFee)
+ })
+}
diff --git a/statediff/indexer/database/sql/pgx_indexer_test.go b/statediff/indexer/database/sql/pgx_indexer_test.go
new file mode 100644
index 000000000..426160cf9
--- /dev/null
+++ b/statediff/indexer/database/sql/pgx_indexer_test.go
@@ -0,0 +1,546 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package sql_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/ipfs/go-cid"
+ blockstore "github.com/ipfs/go-ipfs-blockstore"
+ dshelp "github.com/ipfs/go-ipfs-ds-help"
+ "github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/mocks"
+ "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ "github.com/ethereum/go-ethereum/statediff/indexer/shared"
+ "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
+)
+
+func setupPGX(t *testing.T) {
+ db, err = postgres.SetupPGXDB()
+ if err != nil {
+ t.Fatal(err)
+ }
+ ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, db)
+ require.NoError(t, err)
+ var tx interfaces.Batch
+ tx, err = ind.PushBlock(
+ mockBlock,
+ mocks.MockReceipts,
+ mocks.MockBlock.Difficulty())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := tx.Submit(err); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ for _, node := range mocks.StateDiffs {
+ err = ind.PushStateNode(tx, node, mockBlock.Hash().String())
+ require.NoError(t, err)
+ }
+
+ test_helpers.ExpectEqual(t, tx.(*sql.BatchTx).BlockNumber, mocks.BlockNumber.Uint64())
+}
+
+func TestPGXIndexer(t *testing.T) {
+ t.Run("Publish and index header IPLDs in a single tx", func(t *testing.T) {
+ setupPGX(t)
+ defer tearDown(t)
+ pgStr := `SELECT cid, cast(td AS TEXT), cast(reward AS TEXT), block_hash, coinbase
+ FROM eth.header_cids
+ WHERE block_number = $1`
+ // check header was properly indexed
+ type res struct {
+ CID string
+ TD string
+ Reward string
+ BlockHash string `db:"block_hash"`
+ Coinbase string `db:"coinbase"`
+ }
+ header := new(res)
+ err = db.QueryRow(context.Background(), pgStr, mocks.BlockNumber.Uint64()).Scan(
+ &header.CID,
+ &header.TD,
+ &header.Reward,
+ &header.BlockHash,
+ &header.Coinbase)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, header.CID, headerCID.String())
+ test_helpers.ExpectEqual(t, header.TD, mocks.MockBlock.Difficulty().String())
+ test_helpers.ExpectEqual(t, header.Reward, "2000000000000021250")
+ test_helpers.ExpectEqual(t, header.Coinbase, mocks.MockHeader.Coinbase.String())
+ dc, err := cid.Decode(header.CID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey := dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
+ var data []byte
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, data, mocks.MockHeaderRlp)
+ })
+
+ t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
+ setupPGX(t)
+ defer tearDown(t)
+ // check that txs were properly indexed and published
+ trxs := make([]string, 0)
+ pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
+ WHERE header_cids.block_number = $1`
+ err = db.Select(context.Background(), &trxs, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, len(trxs), 5)
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx1CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx2CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx3CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx4CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx5CID.String()))
+
+ transactions := mocks.MockBlock.Transactions()
+ type txResult struct {
+ TxType uint8 `db:"tx_type"`
+ Value string
+ }
+ for _, c := range trxs {
+ dc, err := cid.Decode(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey := dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
+ var data []byte
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ txTypeAndValueStr := `SELECT tx_type, CAST(value as TEXT) FROM eth.transaction_cids WHERE cid = $1`
+ switch c {
+ case trx1CID.String():
+ test_helpers.ExpectEqual(t, data, tx1)
+ txRes := new(txResult)
+ err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if txRes.TxType != 0 {
+ t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[0].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[0].Value().String(), txRes.Value)
+ }
+ case trx2CID.String():
+ test_helpers.ExpectEqual(t, data, tx2)
+ txRes := new(txResult)
+ err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if txRes.TxType != 0 {
+ t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[1].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[1].Value().String(), txRes.Value)
+ }
+ case trx3CID.String():
+ test_helpers.ExpectEqual(t, data, tx3)
+ txRes := new(txResult)
+ err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if txRes.TxType != 0 {
+ t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[2].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[2].Value().String(), txRes.Value)
+ }
+ case trx4CID.String():
+ test_helpers.ExpectEqual(t, data, tx4)
+ txRes := new(txResult)
+ err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if txRes.TxType != types.AccessListTxType {
+ t.Fatalf("expected AccessListTxType (1), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[3].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value)
+ }
+ accessListElementModels := make([]models.AccessListElementModel, 0)
+ pgStr = `SELECT access_list_elements.* FROM eth.access_list_elements INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC`
+ err = db.Select(context.Background(), &accessListElementModels, pgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(accessListElementModels) != 2 {
+ t.Fatalf("expected two access list entries, got %d", len(accessListElementModels))
+ }
+ model1 := models.AccessListElementModel{
+ Index: accessListElementModels[0].Index,
+ Address: accessListElementModels[0].Address,
+ }
+ model2 := models.AccessListElementModel{
+ Index: accessListElementModels[1].Index,
+ Address: accessListElementModels[1].Address,
+ StorageKeys: accessListElementModels[1].StorageKeys,
+ }
+ test_helpers.ExpectEqual(t, model1, mocks.AccessListEntry1Model)
+ test_helpers.ExpectEqual(t, model2, mocks.AccessListEntry2Model)
+ case trx5CID.String():
+ test_helpers.ExpectEqual(t, data, tx5)
+ txRes := new(txResult)
+ err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if txRes.TxType != types.DynamicFeeTxType {
+ t.Fatalf("expected DynamicFeeTxType (2), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[4].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[4].Value().String(), txRes.Value)
+ }
+ }
+ }
+ })
+
+ t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) {
+ setupPGX(t)
+ defer tearDown(t)
+
+ rcts := make([]string, 0)
+ pgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
+ WHERE receipt_cids.tx_id = transaction_cids.tx_hash
+ AND transaction_cids.header_id = header_cids.block_hash
+ AND header_cids.block_number = $1
+ ORDER BY transaction_cids.index`
+ err = db.Select(context.Background(), &rcts, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ type logIPLD struct {
+ Index int `db:"index"`
+ Address string `db:"address"`
+ Data []byte `db:"data"`
+ Topic0 string `db:"topic0"`
+ Topic1 string `db:"topic1"`
+ }
+ for i := range rcts {
+ results := make([]logIPLD, 0)
+ pgStr = `SELECT log_cids.index, log_cids.address, log_cids.topic0, log_cids.topic1, data FROM eth.log_cids
+ INNER JOIN eth.receipt_cids ON (log_cids.rct_id = receipt_cids.tx_id)
+ INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key)
+ WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC`
+ err = db.Select(context.Background(), &results, pgStr, rcts[i])
+ require.NoError(t, err)
+
+ // expecting MockLog1 and MockLog2 for mockReceipt4
+ expectedLogs := mocks.MockReceipts[i].Logs
+ test_helpers.ExpectEqual(t, len(results), len(expectedLogs))
+
+ var nodeElements []interface{}
+ for idx, r := range results {
+ // Decode the log leaf node.
+ err = rlp.DecodeBytes(r.Data, &nodeElements)
+ require.NoError(t, err)
+
+ logRaw, err := rlp.EncodeToBytes(expectedLogs[idx])
+ require.NoError(t, err)
+
+ // 2nd element of the leaf node contains the encoded log data.
+ test_helpers.ExpectEqual(t, logRaw, nodeElements[1].([]byte))
+ }
+ }
+ })
+
+ t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
+ setupPGX(t)
+ defer tearDown(t)
+
+ // check receipts were properly indexed and published
+ rcts := make([]string, 0)
+ pgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
+ WHERE receipt_cids.tx_id = transaction_cids.tx_hash
+ AND transaction_cids.header_id = header_cids.block_hash
+ AND header_cids.block_number = $1 order by transaction_cids.index`
+ err = db.Select(context.Background(), &rcts, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, len(rcts), 5)
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct1CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct2CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct3CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct4CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String()))
+
+ for idx, c := range rcts {
+ result := make([]models.IPLDModel, 0)
+ pgStr = `SELECT data
+ FROM eth.receipt_cids
+ INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key)
+ WHERE receipt_cids.leaf_cid = $1`
+ err = db.Select(context.Background(), &result, pgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Decode the log leaf node.
+ var nodeElements []interface{}
+ err = rlp.DecodeBytes(result[0].Data, &nodeElements)
+ require.NoError(t, err)
+
+ expectedRct, err := mocks.MockReceipts[idx].MarshalBinary()
+ require.NoError(t, err)
+
+ test_helpers.ExpectEqual(t, expectedRct, nodeElements[1].([]byte))
+
+ dc, err := cid.Decode(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey := dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
+ var data []byte
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ postStatePgStr := `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
+ switch c {
+ case rct1CID.String():
+ test_helpers.ExpectEqual(t, data, rctLeaf1)
+ var postStatus uint64
+ pgStr = `SELECT post_status FROM eth.receipt_cids WHERE leaf_cid = $1`
+ err = db.Get(context.Background(), &postStatus, pgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, postStatus, mocks.ExpectedPostStatus)
+ case rct2CID.String():
+ test_helpers.ExpectEqual(t, data, rctLeaf2)
+ var postState string
+ err = db.Get(context.Background(), &postState, postStatePgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState1)
+ case rct3CID.String():
+ test_helpers.ExpectEqual(t, data, rctLeaf3)
+ var postState string
+ err = db.Get(context.Background(), &postState, postStatePgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState2)
+ case rct4CID.String():
+ test_helpers.ExpectEqual(t, data, rctLeaf4)
+ var postState string
+ err = db.Get(context.Background(), &postState, postStatePgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState3)
+ case rct5CID.String():
+ test_helpers.ExpectEqual(t, data, rctLeaf5)
+ var postState string
+ err = db.Get(context.Background(), &postState, postStatePgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState3)
+ }
+ }
+ })
+
+ t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
+ setupPGX(t)
+ defer tearDown(t)
+ // check that state nodes were properly indexed and published
+ stateNodes := make([]models.StateNodeModel, 0)
+ pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
+ FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
+ WHERE header_cids.block_number = $1 AND node_type != 3`
+ err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, len(stateNodes), 2)
+ for _, stateNode := range stateNodes {
+ var data []byte
+ dc, err := cid.Decode(stateNode.CID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey := dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pgStr = `SELECT header_id, state_path, cast(balance AS TEXT), nonce, code_hash, storage_root from eth.state_accounts WHERE header_id = $1 AND state_path = $2`
+ var account models.StateAccountModel
+ err = db.Get(context.Background(), &account, pgStr, stateNode.HeaderID, stateNode.Path)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if stateNode.CID == state1CID.String() {
+ test_helpers.ExpectEqual(t, stateNode.NodeType, 2)
+ test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex())
+ test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x06'})
+ test_helpers.ExpectEqual(t, data, mocks.ContractLeafNode)
+ test_helpers.ExpectEqual(t, account, models.StateAccountModel{
+ HeaderID: account.HeaderID,
+ StatePath: stateNode.Path,
+ Balance: "0",
+ CodeHash: mocks.ContractCodeHash.Bytes(),
+ StorageRoot: mocks.ContractRoot,
+ Nonce: 1,
+ })
+ }
+ if stateNode.CID == state2CID.String() {
+ test_helpers.ExpectEqual(t, stateNode.NodeType, 2)
+ test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex())
+ test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x0c'})
+ test_helpers.ExpectEqual(t, data, mocks.AccountLeafNode)
+ test_helpers.ExpectEqual(t, account, models.StateAccountModel{
+ HeaderID: account.HeaderID,
+ StatePath: stateNode.Path,
+ Balance: "1000",
+ CodeHash: mocks.AccountCodeHash.Bytes(),
+ StorageRoot: mocks.AccountRoot,
+ Nonce: 0,
+ })
+ }
+ }
+
+ // check that Removed state nodes were properly indexed and published
+ stateNodes = make([]models.StateNodeModel, 0)
+ pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
+ FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
+ WHERE header_cids.block_number = $1 AND node_type = 3`
+ err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, len(stateNodes), 1)
+ stateNode := stateNodes[0]
+ var data []byte
+ dc, err := cid.Decode(stateNode.CID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey := dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
+ test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID)
+ test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x02'})
+ test_helpers.ExpectEqual(t, data, []byte{})
+ })
+
+ t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
+ setupPGX(t)
+ defer tearDown(t)
+ // check that storage nodes were properly indexed
+ storageNodes := make([]models.StorageNodeWithStateKeyModel, 0)
+ pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
+ FROM eth.storage_cids, eth.state_cids, eth.header_cids
+ WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
+ AND state_cids.header_id = header_cids.block_hash
+ AND header_cids.block_number = $1
+ AND storage_cids.node_type != 3`
+ err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, len(storageNodes), 1)
+ test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
+ CID: storageCID.String(),
+ NodeType: 2,
+ StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
+ StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
+ Path: []byte{},
+ })
+ var data []byte
+ dc, err := cid.Decode(storageNodes[0].CID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey := dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode)
+
+ // check that Removed storage nodes were properly indexed
+ storageNodes = make([]models.StorageNodeWithStateKeyModel, 0)
+ pgStr = `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
+ FROM eth.storage_cids, eth.state_cids, eth.header_cids
+ WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
+ AND state_cids.header_id = header_cids.block_hash
+ AND header_cids.block_number = $1
+ AND storage_cids.node_type = 3`
+ err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, len(storageNodes), 1)
+ test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
+ CID: shared.RemovedNodeStorageCID,
+ NodeType: 3,
+ StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
+ StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
+ Path: []byte{'\x03'},
+ })
+ dc, err = cid.Decode(storageNodes[0].CID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey = dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
+ test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, data, []byte{})
+ })
+}
diff --git a/statediff/indexer/database/sql/postgres/config.go b/statediff/indexer/database/sql/postgres/config.go
new file mode 100644
index 000000000..4fe2972ed
--- /dev/null
+++ b/statediff/indexer/database/sql/postgres/config.go
@@ -0,0 +1,98 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package postgres
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/ethereum/go-ethereum/statediff/indexer/shared"
+)
+
+// DriverType to explicitly type the kind of sql driver we are using
+type DriverType string
+
+const (
+ PGX DriverType = "PGX"
+ SQLX DriverType = "SQLX"
+ Unknown DriverType = "Unknown"
+)
+
+// ResolveDriverType resolves a DriverType from a provided string
+func ResolveDriverType(str string) (DriverType, error) {
+ switch strings.ToLower(str) {
+ case "pgx", "pgxpool":
+ return PGX, nil
+ case "sqlx":
+ return SQLX, nil
+ default:
+ return Unknown, fmt.Errorf("unrecognized driver type string: %s", str)
+ }
+}
+
+// DefaultConfig are default parameters for connecting to a Postgres sql
+var DefaultConfig = Config{
+ Hostname: "localhost",
+ Port: 5432,
+ DatabaseName: "vulcanize_public",
+ Username: "vdbm",
+ Password: "password",
+}
+
+// Config holds params for a Postgres db
+type Config struct {
+ // conn string params
+ Hostname string
+ Port int
+ DatabaseName string
+ Username string
+ Password string
+
+ // conn settings
+ MaxConns int
+ MaxIdle int
+ MinConns int
+ MaxConnIdleTime time.Duration
+ MaxConnLifetime time.Duration
+ ConnTimeout time.Duration
+
+ // node info params
+ ID string
+ ClientName string
+
+ // driver type
+ Driver DriverType
+}
+
+// Type satisfies interfaces.Config
+func (c Config) Type() shared.DBType {
+ return shared.POSTGRES
+}
+
+// DbConnectionString constructs and returns the connection string from the config
+func (c Config) DbConnectionString() string {
+ if len(c.Username) > 0 && len(c.Password) > 0 {
+ return fmt.Sprintf("postgresql://%s:%s@%s:%d/%s?sslmode=disable",
+ c.Username, c.Password, c.Hostname, c.Port, c.DatabaseName)
+ }
+ if len(c.Username) > 0 && len(c.Password) == 0 {
+ return fmt.Sprintf("postgresql://%s@%s:%d/%s?sslmode=disable",
+ c.Username, c.Hostname, c.Port, c.DatabaseName)
+ }
+ return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", c.Hostname, c.Port, c.DatabaseName)
+}
diff --git a/statediff/indexer/database/sql/postgres/database.go b/statediff/indexer/database/sql/postgres/database.go
new file mode 100644
index 000000000..4cff518a0
--- /dev/null
+++ b/statediff/indexer/database/sql/postgres/database.go
@@ -0,0 +1,101 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package postgres
+
+import "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+
+var _ sql.Database = &DB{}
+
+const (
+ createNodeStm = `INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5)
+ ON CONFLICT (node_id) DO NOTHING`
+)
+
+// NewPostgresDB returns a postgres.DB using the provided driver
+func NewPostgresDB(driver sql.Driver) *DB {
+ return &DB{driver}
+}
+
+// DB implements sql.Database using a configured driver and Postgres statement syntax
+type DB struct {
+ sql.Driver
+}
+
+// InsertHeaderStm satisfies the sql.Statements interface
+func (db *DB) InsertHeaderStm() string {
+ return `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase)
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
+ ON CONFLICT (block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)`
+}
+
+// InsertUncleStm satisfies the sql.Statements interface
+func (db *DB) InsertUncleStm() string {
+ return `INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6)
+ ON CONFLICT (block_hash) DO NOTHING`
+}
+
+// InsertTxStm satisfies the sql.Statements interface
+func (db *DB) InsertTxStm() string {
+ return `INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type, value) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
+ ON CONFLICT (tx_hash) DO NOTHING`
+}
+
+// InsertAccessListElementStm satisfies the sql.Statements interface
+func (db *DB) InsertAccessListElementStm() string {
+ return `INSERT INTO eth.access_list_elements (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4)
+ ON CONFLICT (tx_id, index) DO NOTHING`
+}
+
+// InsertRctStm satisfies the sql.Statements interface
+func (db *DB) InsertRctStm() string {
+ return `INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ ON CONFLICT (tx_id) DO NOTHING`
+}
+
+// InsertLogStm satisfies the sql.Statements interface
+func (db *DB) InsertLogStm() string {
+ return `INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
+ ON CONFLICT (rct_id, index) DO NOTHING`
+}
+
+// InsertStateStm satisfies the sql.Statements interface
+func (db *DB) InsertStateStm() string {
+ return `INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
+ ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)`
+}
+
+// InsertAccountStm satisfies the sql.Statements interface
+func (db *DB) InsertAccountStm() string {
+ return `INSERT INTO eth.state_accounts (header_id, state_path, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6)
+ ON CONFLICT (header_id, state_path) DO NOTHING`
+}
+
+// InsertStorageStm satisfies the sql.Statements interface
+func (db *DB) InsertStorageStm() string {
+ return `INSERT INTO eth.storage_cids (header_id, state_path, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ ON CONFLICT (header_id, state_path, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($3, $4, $6, $7, $8)`
+}
+
+// InsertIPLDStm satisfies the sql.Statements interface
+func (db *DB) InsertIPLDStm() string {
+ return `INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`
+}
+
+// InsertIPLDsStm satisfies the sql.Statements interface
+func (db *DB) InsertIPLDsStm() string {
+ return `INSERT INTO public.blocks (key, data) VALUES (unnest($1::TEXT[]), unnest($2::BYTEA[])) ON CONFLICT (key) DO NOTHING`
+}
diff --git a/statediff/indexer/postgres/errors.go b/statediff/indexer/database/sql/postgres/errors.go
similarity index 100%
rename from statediff/indexer/postgres/errors.go
rename to statediff/indexer/database/sql/postgres/errors.go
diff --git a/statediff/indexer/database/sql/postgres/pgx.go b/statediff/indexer/database/sql/postgres/pgx.go
new file mode 100644
index 000000000..936a3765d
--- /dev/null
+++ b/statediff/indexer/database/sql/postgres/pgx.go
@@ -0,0 +1,233 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package postgres
+
+import (
+ "context"
+ "time"
+
+ "github.com/georgysavva/scany/pgxscan"
+ "github.com/jackc/pgconn"
+ "github.com/jackc/pgx/v4"
+ "github.com/jackc/pgx/v4/pgxpool"
+
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+ "github.com/ethereum/go-ethereum/statediff/indexer/node"
+)
+
+// PGXDriver driver, implements sql.Driver
+type PGXDriver struct {
+ ctx context.Context
+ pool *pgxpool.Pool
+ nodeInfo node.Info
+ nodeID string
+}
+
+// NewPGXDriver returns a new pgx driver
+// it initializes the connection pool and creates the node info table
+func NewPGXDriver(ctx context.Context, config Config, node node.Info) (*PGXDriver, error) {
+ pgConf, err := MakeConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ dbPool, err := pgxpool.ConnectConfig(ctx, pgConf)
+ if err != nil {
+ return nil, ErrDBConnectionFailed(err)
+ }
+ pg := &PGXDriver{ctx: ctx, pool: dbPool, nodeInfo: node}
+ nodeErr := pg.createNode()
+ if nodeErr != nil {
+ return &PGXDriver{}, ErrUnableToSetNode(nodeErr)
+ }
+ return pg, nil
+}
+
+// MakeConfig creates a pgxpool.Config from the provided Config
+func MakeConfig(config Config) (*pgxpool.Config, error) {
+ conf, err := pgxpool.ParseConfig("")
+ if err != nil {
+ return nil, err
+ }
+
+ //conf.ConnConfig.BuildStatementCache = nil
+ conf.ConnConfig.Config.Host = config.Hostname
+ conf.ConnConfig.Config.Port = uint16(config.Port)
+ conf.ConnConfig.Config.Database = config.DatabaseName
+ conf.ConnConfig.Config.User = config.Username
+ conf.ConnConfig.Config.Password = config.Password
+
+ if config.ConnTimeout != 0 {
+ conf.ConnConfig.Config.ConnectTimeout = config.ConnTimeout
+ }
+ if config.MaxConns != 0 {
+ conf.MaxConns = int32(config.MaxConns)
+ }
+ if config.MinConns != 0 {
+ conf.MinConns = int32(config.MinConns)
+ }
+ if config.MaxConnLifetime != 0 {
+ conf.MaxConnLifetime = config.MaxConnLifetime
+ }
+ if config.MaxConnIdleTime != 0 {
+ conf.MaxConnIdleTime = config.MaxConnIdleTime
+ }
+ return conf, nil
+}
+
+func (pgx *PGXDriver) createNode() error {
+ _, err := pgx.pool.Exec(
+ pgx.ctx,
+ createNodeStm,
+ pgx.nodeInfo.GenesisBlock, pgx.nodeInfo.NetworkID,
+ pgx.nodeInfo.ID, pgx.nodeInfo.ClientName,
+ pgx.nodeInfo.ChainID)
+ if err != nil {
+ return ErrUnableToSetNode(err)
+ }
+ pgx.nodeID = pgx.nodeInfo.ID
+ return nil
+}
+
+// QueryRow satisfies sql.Database
+func (pgx *PGXDriver) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow {
+ return pgx.pool.QueryRow(ctx, sql, args...)
+}
+
+// Exec satisfies sql.Database
+func (pgx *PGXDriver) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) {
+ res, err := pgx.pool.Exec(ctx, sql, args...)
+ return resultWrapper{ct: res}, err
+}
+
+// Select satisfies sql.Database
+func (pgx *PGXDriver) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return pgxscan.Select(ctx, pgx.pool, dest, query, args...)
+}
+
+// Get satisfies sql.Database
+func (pgx *PGXDriver) Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return pgxscan.Get(ctx, pgx.pool, dest, query, args...)
+}
+
+// Begin satisfies sql.Database
+func (pgx *PGXDriver) Begin(ctx context.Context) (sql.Tx, error) {
+ tx, err := pgx.pool.Begin(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return pgxTxWrapper{tx: tx}, nil
+}
+
+func (pgx *PGXDriver) Stats() sql.Stats {
+ stats := pgx.pool.Stat()
+ return pgxStatsWrapper{stats: stats}
+}
+
+// NodeID satisfies sql.Database
+func (pgx *PGXDriver) NodeID() string {
+ return pgx.nodeID
+}
+
+// Close satisfies sql.Database/io.Closer
+func (pgx *PGXDriver) Close() error {
+ pgx.pool.Close()
+ return nil
+}
+
+// Context satisfies sql.Database
+func (pgx *PGXDriver) Context() context.Context {
+ return pgx.ctx
+}
+
+type resultWrapper struct {
+ ct pgconn.CommandTag
+}
+
+// RowsAffected satisfies sql.Result
+func (r resultWrapper) RowsAffected() (int64, error) {
+ return r.ct.RowsAffected(), nil
+}
+
+type pgxStatsWrapper struct {
+ stats *pgxpool.Stat
+}
+
+// MaxOpen satisfies sql.Stats
+func (s pgxStatsWrapper) MaxOpen() int64 {
+ return int64(s.stats.MaxConns())
+}
+
+// Open satisfies sql.Stats
+func (s pgxStatsWrapper) Open() int64 {
+ return int64(s.stats.TotalConns())
+}
+
+// InUse satisfies sql.Stats
+func (s pgxStatsWrapper) InUse() int64 {
+ return int64(s.stats.AcquiredConns())
+}
+
+// Idle satisfies sql.Stats
+func (s pgxStatsWrapper) Idle() int64 {
+ return int64(s.stats.IdleConns())
+}
+
+// WaitCount satisfies sql.Stats
+func (s pgxStatsWrapper) WaitCount() int64 {
+ return s.stats.EmptyAcquireCount()
+}
+
+// WaitDuration satisfies sql.Stats
+func (s pgxStatsWrapper) WaitDuration() time.Duration {
+ return s.stats.AcquireDuration()
+}
+
+// MaxIdleClosed satisfies sql.Stats
+func (s pgxStatsWrapper) MaxIdleClosed() int64 {
+ // this stat isn't supported by pgxpool, but we don't want to panic
+ return 0
+}
+
+// MaxLifetimeClosed satisfies sql.Stats
+func (s pgxStatsWrapper) MaxLifetimeClosed() int64 {
+ return s.stats.CanceledAcquireCount()
+}
+
+type pgxTxWrapper struct {
+ tx pgx.Tx
+}
+
+// QueryRow satisfies sql.Tx
+func (t pgxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow {
+ return t.tx.QueryRow(ctx, sql, args...)
+}
+
+// Exec satisfies sql.Tx
+func (t pgxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) {
+ res, err := t.tx.Exec(ctx, sql, args...)
+ return resultWrapper{ct: res}, err
+}
+
+// Commit satisfies sql.Tx
+func (t pgxTxWrapper) Commit(ctx context.Context) error {
+ return t.tx.Commit(ctx)
+}
+
+// Rollback satisfies sql.Tx
+func (t pgxTxWrapper) Rollback(ctx context.Context) error {
+ return t.tx.Rollback(ctx)
+}
diff --git a/statediff/indexer/database/sql/postgres/pgx_test.go b/statediff/indexer/database/sql/postgres/pgx_test.go
new file mode 100644
index 000000000..64616e356
--- /dev/null
+++ b/statediff/indexer/database/sql/postgres/pgx_test.go
@@ -0,0 +1,121 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package postgres_test
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "strings"
+ "testing"
+
+ "github.com/jackc/pgx/v4/pgxpool"
+
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
+ "github.com/ethereum/go-ethereum/statediff/indexer/node"
+ "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
+)
+
+var (
+ pgConfig, _ = postgres.MakeConfig(postgres.DefaultConfig)
+ ctx = context.Background()
+)
+
+func expectContainsSubstring(t *testing.T, full string, sub string) {
+ if !strings.Contains(full, sub) {
+ t.Fatalf("Expected \"%v\" to contain substring \"%v\"\n", full, sub)
+ }
+}
+
+func TestPostgresPGX(t *testing.T) {
+ t.Run("connects to the sql", func(t *testing.T) {
+ dbPool, err := pgxpool.ConnectConfig(context.Background(), pgConfig)
+ if err != nil {
+ t.Fatalf("failed to connect to db with connection string: %s err: %v", pgConfig.ConnString(), err)
+ }
+ if dbPool == nil {
+ t.Fatal("DB pool is nil")
+ }
+ dbPool.Close()
+ })
+
+ t.Run("serializes big.Int to db", func(t *testing.T) {
+ // postgres driver doesn't support go big.Int type
+ // various casts in golang uint64, int64, overflow for
+ // transaction value (in wei) even though
+ // postgres numeric can handle an arbitrary
+ // sized int, so use string representation of big.Int
+ // and cast on insert
+
+ dbPool, err := pgxpool.ConnectConfig(context.Background(), pgConfig)
+ if err != nil {
+ t.Fatalf("failed to connect to db with connection string: %s err: %v", pgConfig.ConnString(), err)
+ }
+ defer dbPool.Close()
+
+ bi := new(big.Int)
+ bi.SetString("34940183920000000000", 10)
+ test_helpers.ExpectEqual(t, bi.String(), "34940183920000000000")
+
+ defer dbPool.Exec(ctx, `DROP TABLE IF EXISTS example`)
+ _, err = dbPool.Exec(ctx, "CREATE TABLE example ( id INTEGER, data NUMERIC )")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sqlStatement := `
+ INSERT INTO example (id, data)
+ VALUES (1, cast($1 AS NUMERIC))`
+ _, err = dbPool.Exec(ctx, sqlStatement, bi.String())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var data string
+ err = dbPool.QueryRow(ctx, `SELECT cast(data AS TEXT) FROM example WHERE id = 1`).Scan(&data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ test_helpers.ExpectEqual(t, data, bi.String())
+ actual := new(big.Int)
+ actual.SetString(data, 10)
+ test_helpers.ExpectEqual(t, actual, bi)
+ })
+
+ t.Run("throws error when can't connect to the database", func(t *testing.T) {
+ goodInfo := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"}
+ _, err := postgres.NewPGXDriver(ctx, postgres.Config{}, goodInfo)
+ if err == nil {
+ t.Fatal("Expected an error")
+ }
+
+ expectContainsSubstring(t, err.Error(), postgres.DbConnectionFailedMsg)
+ })
+
+ t.Run("throws error when can't create node", func(t *testing.T) {
+ badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
+ badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
+
+ _, err := postgres.NewPGXDriver(ctx, postgres.DefaultConfig, badInfo)
+ if err == nil {
+ t.Fatal("Expected an error")
+ }
+
+ expectContainsSubstring(t, err.Error(), postgres.SettingNodeFailedMsg)
+ })
+}
diff --git a/statediff/indexer/postgres/postgres_suite_test.go b/statediff/indexer/database/sql/postgres/postgres_suite_test.go
similarity index 100%
rename from statediff/indexer/postgres/postgres_suite_test.go
rename to statediff/indexer/database/sql/postgres/postgres_suite_test.go
diff --git a/statediff/indexer/database/sql/postgres/sqlx.go b/statediff/indexer/database/sql/postgres/sqlx.go
new file mode 100644
index 000000000..406b44a19
--- /dev/null
+++ b/statediff/indexer/database/sql/postgres/sqlx.go
@@ -0,0 +1,190 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package postgres
+
+import (
+ "context"
+ coresql "database/sql"
+ "time"
+
+ "github.com/jmoiron/sqlx"
+
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+ "github.com/ethereum/go-ethereum/statediff/indexer/node"
+)
+
+// SQLXDriver driver, implements sql.Driver
+type SQLXDriver struct {
+ ctx context.Context
+ db *sqlx.DB
+ nodeInfo node.Info
+ nodeID string
+}
+
+// NewSQLXDriver returns a new sqlx driver for Postgres
+// it initializes the connection pool and creates the node info table
+func NewSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDriver, error) {
+ db, err := sqlx.ConnectContext(ctx, "postgres", config.DbConnectionString())
+ if err != nil {
+ return &SQLXDriver{}, ErrDBConnectionFailed(err)
+ }
+ if config.MaxConns > 0 {
+ db.SetMaxOpenConns(config.MaxConns)
+ }
+ if config.MaxIdle > 0 {
+ db.SetMaxIdleConns(config.MaxIdle)
+ }
+ if config.MaxConnLifetime > 0 {
+ lifetime := config.MaxConnLifetime
+ db.SetConnMaxLifetime(lifetime)
+ }
+ driver := &SQLXDriver{ctx: ctx, db: db, nodeInfo: node}
+ if err := driver.createNode(); err != nil {
+ return &SQLXDriver{}, ErrUnableToSetNode(err)
+ }
+ return driver, nil
+}
+
+func (driver *SQLXDriver) createNode() error {
+ _, err := driver.db.Exec(
+ createNodeStm,
+ driver.nodeInfo.GenesisBlock, driver.nodeInfo.NetworkID,
+ driver.nodeInfo.ID, driver.nodeInfo.ClientName,
+ driver.nodeInfo.ChainID)
+ if err != nil {
+ return ErrUnableToSetNode(err)
+ }
+ driver.nodeID = driver.nodeInfo.ID
+ return nil
+}
+
+// QueryRow satisfies sql.Database
+func (driver *SQLXDriver) QueryRow(_ context.Context, sql string, args ...interface{}) sql.ScannableRow {
+ return driver.db.QueryRowx(sql, args...)
+}
+
+// Exec satisfies sql.Database
+func (driver *SQLXDriver) Exec(_ context.Context, sql string, args ...interface{}) (sql.Result, error) {
+ return driver.db.Exec(sql, args...)
+}
+
+// Select satisfies sql.Database
+func (driver *SQLXDriver) Select(_ context.Context, dest interface{}, query string, args ...interface{}) error {
+ return driver.db.Select(dest, query, args...)
+}
+
+// Get satisfies sql.Database
+func (driver *SQLXDriver) Get(_ context.Context, dest interface{}, query string, args ...interface{}) error {
+ return driver.db.Get(dest, query, args...)
+}
+
+// Begin satisfies sql.Database
+func (driver *SQLXDriver) Begin(_ context.Context) (sql.Tx, error) {
+ tx, err := driver.db.Beginx()
+ if err != nil {
+ return nil, err
+ }
+ return sqlxTxWrapper{tx: tx}, nil
+}
+
+func (driver *SQLXDriver) Stats() sql.Stats {
+ stats := driver.db.Stats()
+ return sqlxStatsWrapper{stats: stats}
+}
+
+// NodeID satisfies sql.Database
+func (driver *SQLXDriver) NodeID() string {
+ return driver.nodeID
+}
+
+// Close satisfies sql.Database/io.Closer
+func (driver *SQLXDriver) Close() error {
+ return driver.db.Close()
+}
+
+// Context satisfies sql.Database
+func (driver *SQLXDriver) Context() context.Context {
+ return driver.ctx
+}
+
+type sqlxStatsWrapper struct {
+ stats coresql.DBStats
+}
+
+// MaxOpen satisfies sql.Stats
+func (s sqlxStatsWrapper) MaxOpen() int64 {
+ return int64(s.stats.MaxOpenConnections)
+}
+
+// Open satisfies sql.Stats
+func (s sqlxStatsWrapper) Open() int64 {
+ return int64(s.stats.OpenConnections)
+}
+
+// InUse satisfies sql.Stats
+func (s sqlxStatsWrapper) InUse() int64 {
+ return int64(s.stats.InUse)
+}
+
+// Idle satisfies sql.Stats
+func (s sqlxStatsWrapper) Idle() int64 {
+ return int64(s.stats.Idle)
+}
+
+// WaitCount satisfies sql.Stats
+func (s sqlxStatsWrapper) WaitCount() int64 {
+ return s.stats.WaitCount
+}
+
+// WaitDuration satisfies sql.Stats
+func (s sqlxStatsWrapper) WaitDuration() time.Duration {
+ return s.stats.WaitDuration
+}
+
+// MaxIdleClosed satisfies sql.Stats
+func (s sqlxStatsWrapper) MaxIdleClosed() int64 {
+ return s.stats.MaxIdleClosed
+}
+
+// MaxLifetimeClosed satisfies sql.Stats
+func (s sqlxStatsWrapper) MaxLifetimeClosed() int64 {
+ return s.stats.MaxLifetimeClosed
+}
+
+type sqlxTxWrapper struct {
+ tx *sqlx.Tx
+}
+
+// QueryRow satisfies sql.Tx
+func (t sqlxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow {
+ return t.tx.QueryRowx(sql, args...)
+}
+
+// Exec satisfies sql.Tx
+func (t sqlxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) {
+ return t.tx.Exec(sql, args...)
+}
+
+// Commit satisfies sql.Tx
+func (t sqlxTxWrapper) Commit(ctx context.Context) error {
+ return t.tx.Commit()
+}
+
+// Rollback satisfies sql.Tx
+func (t sqlxTxWrapper) Rollback(ctx context.Context) error {
+ return t.tx.Rollback()
+}
diff --git a/statediff/indexer/postgres/postgres_test.go b/statediff/indexer/database/sql/postgres/sqlx_test.go
similarity index 66%
rename from statediff/indexer/postgres/postgres_test.go
rename to statediff/indexer/database/sql/postgres/sqlx_test.go
index f3bbdffd0..03f24e9f5 100644
--- a/statediff/indexer/postgres/postgres_test.go
+++ b/statediff/indexer/database/sql/postgres/sqlx_test.go
@@ -25,40 +25,29 @@ import (
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/node"
- "github.com/ethereum/go-ethereum/statediff/indexer/postgres"
- "github.com/ethereum/go-ethereum/statediff/indexer/shared"
+ "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
)
-var DBParams = postgres.ConnectionParams{
- Name: "vulcanize_public",
- Password: "password",
- Port: 5432,
- Hostname: "localhost",
- User: "vdbm",
-}
-
-func expectContainsSubstring(t *testing.T, full string, sub string) {
- if !strings.Contains(full, sub) {
- t.Fatalf("Expected \"%v\" to contain substring \"%v\"\n", full, sub)
- }
-}
-
-func TestPostgresDB(t *testing.T) {
+func TestPostgresSQLX(t *testing.T) {
var sqlxdb *sqlx.DB
t.Run("connects to the database", func(t *testing.T) {
var err error
- pgConfig := postgres.DbConnectionString(DBParams)
-
- sqlxdb, err = sqlx.Connect("postgres", pgConfig)
+ connStr := postgres.DefaultConfig.DbConnectionString()
+ sqlxdb, err = sqlx.Connect("postgres", connStr)
if err != nil {
- t.Fatalf("failed to connect to db with connection string: %s err: %v", pgConfig, err)
+ t.Fatalf("failed to connect to db with connection string: %s err: %v", connStr, err)
}
if sqlxdb == nil {
t.Fatal("DB is nil")
}
+ err = sqlxdb.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
})
t.Run("serializes big.Int to db", func(t *testing.T) {
@@ -69,18 +58,16 @@ func TestPostgresDB(t *testing.T) {
// sized int, so use string representation of big.Int
// and cast on insert
- pgConnectString := postgres.DbConnectionString(DBParams)
- db, err := sqlx.Connect("postgres", pgConnectString)
- if err != nil {
- t.Fatal(err)
- }
+ connStr := postgres.DefaultConfig.DbConnectionString()
+ db, err := sqlx.Connect("postgres", connStr)
if err != nil {
t.Fatal(err)
}
+ defer db.Close()
bi := new(big.Int)
bi.SetString("34940183920000000000", 10)
- shared.ExpectEqual(t, bi.String(), "34940183920000000000")
+ test_helpers.ExpectEqual(t, bi.String(), "34940183920000000000")
defer db.Exec(`DROP TABLE IF EXISTS example`)
_, err = db.Exec("CREATE TABLE example ( id INTEGER, data NUMERIC )")
@@ -102,19 +89,15 @@ func TestPostgresDB(t *testing.T) {
t.Fatal(err)
}
- shared.ExpectEqual(t, bi.String(), data)
+ test_helpers.ExpectEqual(t, data, bi.String())
actual := new(big.Int)
actual.SetString(data, 10)
- shared.ExpectEqual(t, actual, bi)
+ test_helpers.ExpectEqual(t, actual, bi)
})
t.Run("throws error when can't connect to the database", func(t *testing.T) {
- invalidDatabase := postgres.ConnectionParams{}
- node := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"}
-
- _, err := postgres.NewDB(postgres.DbConnectionString(invalidDatabase),
- postgres.ConnectionConfig{}, node)
-
+ goodInfo := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"}
+ _, err := postgres.NewSQLXDriver(ctx, postgres.Config{}, goodInfo)
if err == nil {
t.Fatal("Expected an error")
}
@@ -124,13 +107,13 @@ func TestPostgresDB(t *testing.T) {
t.Run("throws error when can't create node", func(t *testing.T) {
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
- node := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
-
- _, err := postgres.NewDB(postgres.DbConnectionString(DBParams), postgres.ConnectionConfig{}, node)
+ badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
+ _, err := postgres.NewSQLXDriver(ctx, postgres.DefaultConfig, badInfo)
if err == nil {
t.Fatal("Expected an error")
}
+
expectContainsSubstring(t, err.Error(), postgres.SettingNodeFailedMsg)
})
}
diff --git a/statediff/indexer/database/sql/postgres/test_helpers.go b/statediff/indexer/database/sql/postgres/test_helpers.go
new file mode 100644
index 000000000..491701c4b
--- /dev/null
+++ b/statediff/indexer/database/sql/postgres/test_helpers.go
@@ -0,0 +1,42 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package postgres
+
+import (
+ "context"
+
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+ "github.com/ethereum/go-ethereum/statediff/indexer/node"
+)
+
+// SetupSQLXDB is used to setup a sqlx db for tests
+func SetupSQLXDB() (sql.Database, error) {
+ driver, err := NewSQLXDriver(context.Background(), DefaultConfig, node.Info{})
+ if err != nil {
+ return nil, err
+ }
+ return NewPostgresDB(driver), nil
+}
+
+// SetupPGXDB is used to setup a pgx db for tests
+func SetupPGXDB() (sql.Database, error) {
+ driver, err := NewPGXDriver(context.Background(), DefaultConfig, node.Info{})
+ if err != nil {
+ return nil, err
+ }
+ return NewPostgresDB(driver), nil
+}
diff --git a/statediff/indexer/indexer_legacy_test.go b/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go
similarity index 52%
rename from statediff/indexer/indexer_legacy_test.go
rename to statediff/indexer/database/sql/sqlx_indexer_legacy_test.go
index 4b1563190..08f3f080e 100644
--- a/statediff/indexer/indexer_legacy_test.go
+++ b/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go
@@ -14,19 +14,24 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package indexer_test
+package sql_test
import (
+ "context"
"testing"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/statediff/indexer"
- "github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
- "github.com/ethereum/go-ethereum/statediff/indexer/mocks"
- "github.com/ethereum/go-ethereum/statediff/indexer/shared"
"github.com/ipfs/go-cid"
+ "github.com/jmoiron/sqlx"
"github.com/multiformats/go-multihash"
"github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+ "github.com/ethereum/go-ethereum/statediff/indexer/mocks"
+ "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
)
var (
@@ -35,54 +40,58 @@ var (
legacyHeaderCID cid.Cid
)
-func setupLegacy(t *testing.T) {
+func setupLegacySQLX(t *testing.T) {
mockLegacyBlock = legacyData.MockBlock
legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256)
- db, err = shared.SetupDB()
+ db, err = postgres.SetupSQLXDB()
require.NoError(t, err)
- ind, err = indexer.NewStateDiffIndexer(legacyData.Config, db)
+ ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, db)
require.NoError(t, err)
- var tx *indexer.BlockTx
+ var tx interfaces.Batch
tx, err = ind.PushBlock(
mockLegacyBlock,
legacyData.MockReceipts,
legacyData.MockBlock.Difficulty())
require.NoError(t, err)
- defer tx.Close(err)
+ defer func() {
+ if err := tx.Submit(err); err != nil {
+ t.Fatal(err)
+ }
+ }()
for _, node := range legacyData.StateDiffs {
- err = ind.PushStateNode(tx, node)
+ err = ind.PushStateNode(tx, node, mockLegacyBlock.Hash().String())
require.NoError(t, err)
}
- shared.ExpectEqual(t, tx.BlockNumber, legacyData.BlockNumber.Uint64())
+ test_helpers.ExpectEqual(t, tx.(*sql.BatchTx).BlockNumber, legacyData.BlockNumber.Uint64())
}
-func TestPublishAndIndexerLegacy(t *testing.T) {
- t.Run("Publish and index header IPLDs in a legacy tx", func(t *testing.T) {
- setupLegacy(t)
+func TestSQLXIndexerLegacy(t *testing.T) {
+ t.Run("Publish and index header IPLDs", func(t *testing.T) {
+ setupLegacySQLX(t)
defer tearDown(t)
- pgStr := `SELECT cid, td, reward, id, base_fee
+ pgStr := `SELECT cid, td, reward, block_hash, coinbase
FROM eth.header_cids
WHERE block_number = $1`
// check header was properly indexed
type res struct {
- CID string
- TD string
- Reward string
- ID int
- BaseFee *int64 `db:"base_fee"`
+ CID string
+ TD string
+ Reward string
+ BlockHash string `db:"block_hash"`
+ Coinbase string `db:"coinbase"`
}
header := new(res)
- err = db.QueryRowx(pgStr, legacyData.BlockNumber.Uint64()).StructScan(header)
+ err = db.QueryRow(context.Background(), pgStr, legacyData.BlockNumber.Uint64()).(*sqlx.Row).StructScan(header)
require.NoError(t, err)
- shared.ExpectEqual(t, header.CID, legacyHeaderCID.String())
- shared.ExpectEqual(t, header.TD, legacyData.MockBlock.Difficulty().String())
- shared.ExpectEqual(t, header.Reward, "5000000000000011250")
+ test_helpers.ExpectEqual(t, header.CID, legacyHeaderCID.String())
+ test_helpers.ExpectEqual(t, header.TD, legacyData.MockBlock.Difficulty().String())
+ test_helpers.ExpectEqual(t, header.Reward, "5000000000000011250")
+ test_helpers.ExpectEqual(t, header.Coinbase, legacyData.MockHeader.Coinbase.String())
require.Nil(t, legacyData.MockHeader.BaseFee)
- require.Nil(t, header.BaseFee)
})
}
diff --git a/statediff/indexer/database/sql/sqlx_indexer_test.go b/statediff/indexer/database/sql/sqlx_indexer_test.go
new file mode 100644
index 000000000..2c7ad4da5
--- /dev/null
+++ b/statediff/indexer/database/sql/sqlx_indexer_test.go
@@ -0,0 +1,548 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package sql_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/ipfs/go-cid"
+ blockstore "github.com/ipfs/go-ipfs-blockstore"
+ dshelp "github.com/ipfs/go-ipfs-ds-help"
+ "github.com/jmoiron/sqlx"
+ "github.com/stretchr/testify/require"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/mocks"
+ "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ "github.com/ethereum/go-ethereum/statediff/indexer/shared"
+ "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
+)
+
+func setupSQLX(t *testing.T) {
+ db, err = postgres.SetupSQLXDB()
+ if err != nil {
+ t.Fatal(err)
+ }
+ ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, db)
+ require.NoError(t, err)
+ var tx interfaces.Batch
+ tx, err = ind.PushBlock(
+ mockBlock,
+ mocks.MockReceipts,
+ mocks.MockBlock.Difficulty())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := tx.Submit(err); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ for _, node := range mocks.StateDiffs {
+ err = ind.PushStateNode(tx, node, mockBlock.Hash().String())
+ require.NoError(t, err)
+ }
+
+ test_helpers.ExpectEqual(t, tx.(*sql.BatchTx).BlockNumber, mocks.BlockNumber.Uint64())
+}
+
+func tearDown(t *testing.T) {
+ sql.TearDownDB(t, db)
+ if err := ind.Close(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSQLXIndexer(t *testing.T) {
+ t.Run("Publish and index header IPLDs in a single tx", func(t *testing.T) {
+ setupSQLX(t)
+ defer tearDown(t)
+ pgStr := `SELECT cid, td, reward, block_hash, coinbase
+ FROM eth.header_cids
+ WHERE block_number = $1`
+ // check header was properly indexed
+ type res struct {
+ CID string
+ TD string
+ Reward string
+ BlockHash string `db:"block_hash"`
+ Coinbase string `db:"coinbase"`
+ }
+ header := new(res)
+ err = db.QueryRow(context.Background(), pgStr, mocks.BlockNumber.Uint64()).(*sqlx.Row).StructScan(header)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, header.CID, headerCID.String())
+ test_helpers.ExpectEqual(t, header.TD, mocks.MockBlock.Difficulty().String())
+ test_helpers.ExpectEqual(t, header.Reward, "2000000000000021250")
+ test_helpers.ExpectEqual(t, header.Coinbase, mocks.MockHeader.Coinbase.String())
+ dc, err := cid.Decode(header.CID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey := dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
+ var data []byte
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, data, mocks.MockHeaderRlp)
+ })
+
+ t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
+ setupSQLX(t)
+ defer tearDown(t)
+ // check that txs were properly indexed and published
+ trxs := make([]string, 0)
+ pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
+ WHERE header_cids.block_number = $1`
+ err = db.Select(context.Background(), &trxs, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, len(trxs), 5)
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx1CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx2CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx3CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx4CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(trxs, trx5CID.String()))
+
+ transactions := mocks.MockBlock.Transactions()
+ type txResult struct {
+ TxType uint8 `db:"tx_type"`
+ Value string
+ }
+ for _, c := range trxs {
+ dc, err := cid.Decode(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey := dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
+ var data []byte
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ txTypeAndValueStr := `SELECT tx_type, value FROM eth.transaction_cids WHERE cid = $1`
+ switch c {
+ case trx1CID.String():
+ test_helpers.ExpectEqual(t, data, tx1)
+ txRes := new(txResult)
+ err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if txRes.TxType != 0 {
+ t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[0].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[0].Value().String(), txRes.Value)
+ }
+ case trx2CID.String():
+ test_helpers.ExpectEqual(t, data, tx2)
+ txRes := new(txResult)
+ err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if txRes.TxType != 0 {
+ t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[1].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[1].Value().String(), txRes.Value)
+ }
+ case trx3CID.String():
+ test_helpers.ExpectEqual(t, data, tx3)
+ txRes := new(txResult)
+ err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if txRes.TxType != 0 {
+ t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[2].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[2].Value().String(), txRes.Value)
+ }
+ case trx4CID.String():
+ test_helpers.ExpectEqual(t, data, tx4)
+ txRes := new(txResult)
+ err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if txRes.TxType != types.AccessListTxType {
+ t.Fatalf("expected AccessListTxType (1), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[3].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value)
+ }
+ accessListElementModels := make([]models.AccessListElementModel, 0)
+ pgStr = `SELECT access_list_elements.* FROM eth.access_list_elements INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC`
+ err = db.Select(context.Background(), &accessListElementModels, pgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(accessListElementModels) != 2 {
+ t.Fatalf("expected two access list entries, got %d", len(accessListElementModels))
+ }
+ model1 := models.AccessListElementModel{
+ Index: accessListElementModels[0].Index,
+ Address: accessListElementModels[0].Address,
+ }
+ model2 := models.AccessListElementModel{
+ Index: accessListElementModels[1].Index,
+ Address: accessListElementModels[1].Address,
+ StorageKeys: accessListElementModels[1].StorageKeys,
+ }
+ test_helpers.ExpectEqual(t, model1, mocks.AccessListEntry1Model)
+ test_helpers.ExpectEqual(t, model2, mocks.AccessListEntry2Model)
+ case trx5CID.String():
+ test_helpers.ExpectEqual(t, data, tx5)
+ txRes := new(txResult)
+ err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if txRes.TxType != types.DynamicFeeTxType {
+ t.Fatalf("expected DynamicFeeTxType (2), got %d", txRes.TxType)
+ }
+ if txRes.Value != transactions[4].Value().String() {
+ t.Fatalf("expected tx value %s got %s", transactions[4].Value().String(), txRes.Value)
+ }
+ }
+ }
+ })
+
+ t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) {
+ setupSQLX(t)
+ defer tearDown(t)
+
+ rcts := make([]string, 0)
+ pgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
+ WHERE receipt_cids.tx_id = transaction_cids.tx_hash
+ AND transaction_cids.header_id = header_cids.block_hash
+ AND header_cids.block_number = $1
+ ORDER BY transaction_cids.index`
+ err = db.Select(context.Background(), &rcts, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ type logIPLD struct {
+ Index int `db:"index"`
+ Address string `db:"address"`
+ Data []byte `db:"data"`
+ Topic0 string `db:"topic0"`
+ Topic1 string `db:"topic1"`
+ }
+ for i := range rcts {
+ results := make([]logIPLD, 0)
+ pgStr = `SELECT log_cids.index, log_cids.address, log_cids.topic0, log_cids.topic1, data FROM eth.log_cids
+ INNER JOIN eth.receipt_cids ON (log_cids.rct_id = receipt_cids.tx_id)
+ INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key)
+ WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC`
+ err = db.Select(context.Background(), &results, pgStr, rcts[i])
+ require.NoError(t, err)
+
+ // expecting MockLog1 and MockLog2 for mockReceipt4
+ expectedLogs := mocks.MockReceipts[i].Logs
+ test_helpers.ExpectEqual(t, len(results), len(expectedLogs))
+
+ var nodeElements []interface{}
+ for idx, r := range results {
+ // Decode the log leaf node.
+ err = rlp.DecodeBytes(r.Data, &nodeElements)
+ require.NoError(t, err)
+
+ logRaw, err := rlp.EncodeToBytes(expectedLogs[idx])
+ require.NoError(t, err)
+
+ // 2nd element of the leaf node contains the encoded log data.
+ test_helpers.ExpectEqual(t, logRaw, nodeElements[1].([]byte))
+ }
+ }
+ })
+
+ t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
+ setupSQLX(t)
+ defer tearDown(t)
+
+ // check receipts were properly indexed and published
+ rcts := make([]string, 0)
+ pgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
+ WHERE receipt_cids.tx_id = transaction_cids.tx_hash
+ AND transaction_cids.header_id = header_cids.block_hash
+ AND header_cids.block_number = $1 order by transaction_cids.index`
+ err = db.Select(context.Background(), &rcts, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, len(rcts), 5)
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct1CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct2CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct3CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct4CID.String()))
+ expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String()))
+
+ for idx, c := range rcts {
+ result := make([]models.IPLDModel, 0)
+ pgStr = `SELECT data
+ FROM eth.receipt_cids
+ INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key)
+ WHERE receipt_cids.leaf_cid = $1`
+ err = db.Select(context.Background(), &result, pgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Decode the log leaf node.
+ var nodeElements []interface{}
+ err = rlp.DecodeBytes(result[0].Data, &nodeElements)
+ require.NoError(t, err)
+
+ expectedRct, err := mocks.MockReceipts[idx].MarshalBinary()
+ require.NoError(t, err)
+
+ test_helpers.ExpectEqual(t, expectedRct, nodeElements[1].([]byte))
+
+ dc, err := cid.Decode(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey := dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
+ var data []byte
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ postStatePgStr := `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
+ switch c {
+ case rct1CID.String():
+ test_helpers.ExpectEqual(t, data, rctLeaf1)
+ var postStatus uint64
+ pgStr = `SELECT post_status FROM eth.receipt_cids WHERE leaf_cid = $1`
+ err = db.Get(context.Background(), &postStatus, pgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, postStatus, mocks.ExpectedPostStatus)
+ case rct2CID.String():
+ test_helpers.ExpectEqual(t, data, rctLeaf2)
+ var postState string
+ err = db.Get(context.Background(), &postState, postStatePgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState1)
+ case rct3CID.String():
+ test_helpers.ExpectEqual(t, data, rctLeaf3)
+ var postState string
+ err = db.Get(context.Background(), &postState, postStatePgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState2)
+ case rct4CID.String():
+ test_helpers.ExpectEqual(t, data, rctLeaf4)
+ var postState string
+ err = db.Get(context.Background(), &postState, postStatePgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState3)
+ case rct5CID.String():
+ test_helpers.ExpectEqual(t, data, rctLeaf5)
+ var postState string
+ err = db.Get(context.Background(), &postState, postStatePgStr, c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState3)
+ }
+ }
+ })
+
+ t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
+ setupSQLX(t)
+ defer tearDown(t)
+ // check that state nodes were properly indexed and published
+ stateNodes := make([]models.StateNodeModel, 0)
+ pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
+ FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
+ WHERE header_cids.block_number = $1 AND node_type != 3`
+ err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, len(stateNodes), 2)
+ for _, stateNode := range stateNodes {
+ var data []byte
+ dc, err := cid.Decode(stateNode.CID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey := dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pgStr = `SELECT * from eth.state_accounts WHERE header_id = $1 AND state_path = $2`
+ var account models.StateAccountModel
+ err = db.Get(context.Background(), &account, pgStr, stateNode.HeaderID, stateNode.Path)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if stateNode.CID == state1CID.String() {
+ test_helpers.ExpectEqual(t, stateNode.NodeType, 2)
+ test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex())
+ test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x06'})
+ test_helpers.ExpectEqual(t, data, mocks.ContractLeafNode)
+ test_helpers.ExpectEqual(t, account, models.StateAccountModel{
+ HeaderID: account.HeaderID,
+ StatePath: stateNode.Path,
+ Balance: "0",
+ CodeHash: mocks.ContractCodeHash.Bytes(),
+ StorageRoot: mocks.ContractRoot,
+ Nonce: 1,
+ })
+ }
+ if stateNode.CID == state2CID.String() {
+ test_helpers.ExpectEqual(t, stateNode.NodeType, 2)
+ test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex())
+ test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x0c'})
+ test_helpers.ExpectEqual(t, data, mocks.AccountLeafNode)
+ test_helpers.ExpectEqual(t, account, models.StateAccountModel{
+ HeaderID: account.HeaderID,
+ StatePath: stateNode.Path,
+ Balance: "1000",
+ CodeHash: mocks.AccountCodeHash.Bytes(),
+ StorageRoot: mocks.AccountRoot,
+ Nonce: 0,
+ })
+ }
+ }
+
+ // check that Removed state nodes were properly indexed and published
+ stateNodes = make([]models.StateNodeModel, 0)
+ pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
+ FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
+ WHERE header_cids.block_number = $1 AND node_type = 3`
+ err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, len(stateNodes), 1)
+ stateNode := stateNodes[0]
+ var data []byte
+ dc, err := cid.Decode(stateNode.CID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey := dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
+ test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID)
+ test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x02'})
+ test_helpers.ExpectEqual(t, data, []byte{})
+ })
+
+ t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
+ setupSQLX(t)
+ defer tearDown(t)
+ // check that storage nodes were properly indexed
+ storageNodes := make([]models.StorageNodeWithStateKeyModel, 0)
+ pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
+ FROM eth.storage_cids, eth.state_cids, eth.header_cids
+ WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
+ AND state_cids.header_id = header_cids.block_hash
+ AND header_cids.block_number = $1
+ AND storage_cids.node_type != 3`
+ err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, len(storageNodes), 1)
+ test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
+ CID: storageCID.String(),
+ NodeType: 2,
+ StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
+ StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
+ Path: []byte{},
+ })
+ var data []byte
+ dc, err := cid.Decode(storageNodes[0].CID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey := dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode)
+
+ // check that Removed storage nodes were properly indexed
+ storageNodes = make([]models.StorageNodeWithStateKeyModel, 0)
+ pgStr = `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
+ FROM eth.storage_cids, eth.state_cids, eth.header_cids
+ WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
+ AND state_cids.header_id = header_cids.block_hash
+ AND header_cids.block_number = $1
+ AND storage_cids.node_type = 3`
+ err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64())
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, len(storageNodes), 1)
+ test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
+ CID: shared.RemovedNodeStorageCID,
+ NodeType: 3,
+ StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
+ StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
+ Path: []byte{'\x03'},
+ })
+ dc, err = cid.Decode(storageNodes[0].CID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mhKey = dshelp.MultihashToDsKey(dc.Hash())
+ prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
+ test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey)
+ err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ test_helpers.ExpectEqual(t, data, []byte{})
+ })
+}
diff --git a/statediff/indexer/test_helpers.go b/statediff/indexer/database/sql/test_helpers.go
similarity index 54%
rename from statediff/indexer/test_helpers.go
rename to statediff/indexer/database/sql/test_helpers.go
index 024bb58f0..b1032f8ff 100644
--- a/statediff/indexer/test_helpers.go
+++ b/statediff/indexer/database/sql/test_helpers.go
@@ -14,46 +14,66 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package indexer
+package sql
import (
+ "context"
"testing"
-
- "github.com/ethereum/go-ethereum/statediff/indexer/postgres"
)
// TearDownDB is used to tear down the watcher dbs after tests
-func TearDownDB(t *testing.T, db *postgres.DB) {
- tx, err := db.Beginx()
+func TearDownDB(t *testing.T, db Database) {
+ ctx := context.Background()
+ tx, err := db.Begin(ctx)
if err != nil {
t.Fatal(err)
}
- _, err = tx.Exec(`DELETE FROM eth.header_cids`)
+ _, err = tx.Exec(ctx, `DELETE FROM eth.header_cids`)
if err != nil {
t.Fatal(err)
}
- _, err = tx.Exec(`DELETE FROM eth.transaction_cids`)
+ _, err = tx.Exec(ctx, `DELETE FROM eth.uncle_cids`)
if err != nil {
t.Fatal(err)
}
- _, err = tx.Exec(`DELETE FROM eth.receipt_cids`)
+ _, err = tx.Exec(ctx, `DELETE FROM eth.transaction_cids`)
if err != nil {
t.Fatal(err)
}
- _, err = tx.Exec(`DELETE FROM eth.state_cids`)
+ _, err = tx.Exec(ctx, `DELETE FROM eth.receipt_cids`)
if err != nil {
t.Fatal(err)
}
- _, err = tx.Exec(`DELETE FROM eth.storage_cids`)
+ _, err = tx.Exec(ctx, `DELETE FROM eth.state_cids`)
if err != nil {
t.Fatal(err)
}
- _, err = tx.Exec(`DELETE FROM blocks`)
+ _, err = tx.Exec(ctx, `DELETE FROM eth.storage_cids`)
if err != nil {
t.Fatal(err)
}
- err = tx.Commit()
+ _, err = tx.Exec(ctx, `DELETE FROM eth.state_accounts`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(ctx, `DELETE FROM eth.access_list_elements`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(ctx, `DELETE FROM eth.log_cids`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(ctx, `DELETE FROM blocks`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = tx.Exec(ctx, `DELETE FROM nodes`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = tx.Commit(ctx)
if err != nil {
t.Fatal(err)
}
diff --git a/statediff/indexer/database/sql/writer.go b/statediff/indexer/database/sql/writer.go
new file mode 100644
index 000000000..3f1dfc0b5
--- /dev/null
+++ b/statediff/indexer/database/sql/writer.go
@@ -0,0 +1,184 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package sql
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/statediff/indexer/models"
+)
+
+var (
+ nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
+)
+
+// Writer handles processing and writing of indexed IPLD objects to Postgres
+type Writer struct {
+ db Database
+}
+
+// NewWriter creates a new pointer to a Writer
+func NewWriter(db Database) *Writer {
+ return &Writer{
+ db: db,
+ }
+}
+
+// Close satisfies io.Closer
+func (w *Writer) Close() error {
+ return w.db.Close()
+}
+
+/*
+INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase)
+VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
+ON CONFLICT (block_hash) DO UPDATE SET (block_number, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($1, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)
+*/
+func (w *Writer) upsertHeaderCID(tx Tx, header models.HeaderModel) error {
+ _, err := tx.Exec(w.db.Context(), w.db.InsertHeaderStm(),
+ header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, w.db.NodeID(),
+ header.Reward, header.StateRoot, header.TxRoot, header.RctRoot, header.UncleRoot, header.Bloom,
+ header.Timestamp, header.MhKey, 1, header.Coinbase)
+ if err != nil {
+ return fmt.Errorf("error upserting header_cids entry: %v", err)
+ }
+ indexerMetrics.blocks.Inc(1)
+ return nil
+}
+
+/*
+INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6)
+ON CONFLICT (block_hash) DO NOTHING
+*/
+func (w *Writer) upsertUncleCID(tx Tx, uncle models.UncleModel) error {
+ _, err := tx.Exec(w.db.Context(), w.db.InsertUncleStm(),
+ uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey)
+ if err != nil {
+ return fmt.Errorf("error upserting uncle_cids entry: %v", err)
+ }
+ return nil
+}
+
+/*
+INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type, value) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
+ON CONFLICT (tx_hash) DO NOTHING
+*/
+func (w *Writer) upsertTransactionCID(tx Tx, transaction models.TxModel) error {
+ _, err := tx.Exec(w.db.Context(), w.db.InsertTxStm(),
+ transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index,
+ transaction.MhKey, transaction.Data, transaction.Type, transaction.Value)
+ if err != nil {
+ return fmt.Errorf("error upserting transaction_cids entry: %v", err)
+ }
+ indexerMetrics.transactions.Inc(1)
+ return nil
+}
+
+/*
+INSERT INTO eth.access_list_elements (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4)
+ON CONFLICT (tx_id, index) DO NOTHING
+*/
+func (w *Writer) upsertAccessListElement(tx Tx, accessListElement models.AccessListElementModel) error {
+ _, err := tx.Exec(w.db.Context(), w.db.InsertAccessListElementStm(),
+ accessListElement.TxID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys)
+ if err != nil {
+ return fmt.Errorf("error upserting access_list_element entry: %v", err)
+ }
+ indexerMetrics.accessListEntries.Inc(1)
+ return nil
+}
+
+/*
+INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ON CONFLICT (tx_id) DO NOTHING
+*/
+func (w *Writer) upsertReceiptCID(tx Tx, rct *models.ReceiptModel) error {
+ _, err := tx.Exec(w.db.Context(), w.db.InsertRctStm(),
+ rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot)
+ if err != nil {
+ return fmt.Errorf("error upserting receipt_cids entry: %w", err)
+ }
+ indexerMetrics.receipts.Inc(1)
+ return nil
+}
+
+/*
+INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
+ON CONFLICT (rct_id, index) DO NOTHING
+*/
+func (w *Writer) upsertLogCID(tx Tx, logs []*models.LogsModel) error {
+ for _, log := range logs {
+ _, err := tx.Exec(w.db.Context(), w.db.InsertLogStm(),
+ log.LeafCID, log.LeafMhKey, log.ReceiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2,
+ log.Topic3, log.Data)
+ if err != nil {
+ return fmt.Errorf("error upserting logs entry: %w", err)
+ }
+ indexerMetrics.logs.Inc(1)
+ }
+ return nil
+}
+
+/*
+INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
+ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)
+*/
+func (w *Writer) upsertStateCID(tx Tx, stateNode models.StateNodeModel) error {
+ var stateKey string
+ if stateNode.StateKey != nullHash.String() {
+ stateKey = stateNode.StateKey
+ }
+ _, err := tx.Exec(w.db.Context(), w.db.InsertStateStm(),
+ stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey)
+ if err != nil {
+ return fmt.Errorf("error upserting state_cids entry: %v", err)
+ }
+ return nil
+}
+
+/*
+INSERT INTO eth.state_accounts (header_id, state_path, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6)
+ON CONFLICT (header_id, state_path) DO NOTHING
+*/
+func (w *Writer) upsertStateAccount(tx Tx, stateAccount models.StateAccountModel) error {
+ _, err := tx.Exec(w.db.Context(), w.db.InsertAccountStm(),
+ stateAccount.HeaderID, stateAccount.StatePath, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash,
+ stateAccount.StorageRoot)
+ if err != nil {
+ return fmt.Errorf("error upserting state_accounts entry: %v", err)
+ }
+ return nil
+}
+
+/*
+INSERT INTO eth.storage_cids (header_id, state_path, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ON CONFLICT (header_id, state_path, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($3, $4, $6, $7, $8)
+*/
+func (w *Writer) upsertStorageCID(tx Tx, storageCID models.StorageNodeModel) error {
+ var storageKey string
+ if storageCID.StorageKey != nullHash.String() {
+ storageKey = storageCID.StorageKey
+ }
+ _, err := tx.Exec(w.db.Context(), w.db.InsertStorageStm(),
+ storageCID.HeaderID, storageCID.StatePath, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType,
+ true, storageCID.MhKey)
+ if err != nil {
+ return fmt.Errorf("error upserting storage_cids entry: %v", err)
+ }
+ return nil
+}
diff --git a/statediff/indexer/helpers.go b/statediff/indexer/helpers.go
deleted file mode 100644
index 4e4f30c19..000000000
--- a/statediff/indexer/helpers.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package indexer
-
-import (
- "fmt"
-
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/statediff/types"
-)
-
-// ResolveFromNodeType wrapper around NodeType.Int() so that we maintain backwards compatibility
-func ResolveFromNodeType(nodeType types.NodeType) int {
- return nodeType.Int()
-}
-
-// ChainConfig returns the appropriate ethereum chain config for the provided chain id
-func ChainConfig(chainID uint64) (*params.ChainConfig, error) {
- switch chainID {
- case 1:
- return params.MainnetChainConfig, nil
- case 3:
- return params.RopstenChainConfig, nil
- case 4:
- return params.RinkebyChainConfig, nil
- case 5:
- return params.GoerliChainConfig, nil
- default:
- return nil, fmt.Errorf("chain config for chainid %d not available", chainID)
- }
-}
diff --git a/statediff/indexer/interfaces/interfaces.go b/statediff/indexer/interfaces/interfaces.go
new file mode 100644
index 000000000..8f951230d
--- /dev/null
+++ b/statediff/indexer/interfaces/interfaces.go
@@ -0,0 +1,46 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package interfaces
+
+import (
+ "io"
+ "math/big"
+ "time"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/statediff/indexer/shared"
+ sdtypes "github.com/ethereum/go-ethereum/statediff/types"
+)
+
+// StateDiffIndexer interface required to index statediff data
+type StateDiffIndexer interface {
+ PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (Batch, error)
+ PushStateNode(tx Batch, stateNode sdtypes.StateNode, headerID string) error
+ PushCodeAndCodeHash(tx Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error
+ ReportDBMetrics(delay time.Duration, quit <-chan bool)
+ io.Closer
+}
+
+// Batch required for indexing data atomically
+type Batch interface {
+ Submit(err error) error
+}
+
+// Config used to configure different underlying implementations
+type Config interface {
+ Type() shared.DBType
+}
diff --git a/statediff/indexer/ipfs/ipld/eip2930_test_data/eth-block-12252078 b/statediff/indexer/ipld/eip2930_test_data/eth-block-12252078
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eip2930_test_data/eth-block-12252078
rename to statediff/indexer/ipld/eip2930_test_data/eth-block-12252078
diff --git a/statediff/indexer/ipfs/ipld/eip2930_test_data/eth-block-12365585 b/statediff/indexer/ipld/eip2930_test_data/eth-block-12365585
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eip2930_test_data/eth-block-12365585
rename to statediff/indexer/ipld/eip2930_test_data/eth-block-12365585
diff --git a/statediff/indexer/ipfs/ipld/eip2930_test_data/eth-block-12365586 b/statediff/indexer/ipld/eip2930_test_data/eth-block-12365586
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eip2930_test_data/eth-block-12365586
rename to statediff/indexer/ipld/eip2930_test_data/eth-block-12365586
diff --git a/statediff/indexer/ipfs/ipld/eip2930_test_data/eth-receipts-12252078 b/statediff/indexer/ipld/eip2930_test_data/eth-receipts-12252078
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eip2930_test_data/eth-receipts-12252078
rename to statediff/indexer/ipld/eip2930_test_data/eth-receipts-12252078
diff --git a/statediff/indexer/ipfs/ipld/eip2930_test_data/eth-receipts-12365585 b/statediff/indexer/ipld/eip2930_test_data/eth-receipts-12365585
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eip2930_test_data/eth-receipts-12365585
rename to statediff/indexer/ipld/eip2930_test_data/eth-receipts-12365585
diff --git a/statediff/indexer/ipfs/ipld/eip2930_test_data/eth-receipts-12365586 b/statediff/indexer/ipld/eip2930_test_data/eth-receipts-12365586
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eip2930_test_data/eth-receipts-12365586
rename to statediff/indexer/ipld/eip2930_test_data/eth-receipts-12365586
diff --git a/statediff/indexer/ipfs/ipld/eth_account.go b/statediff/indexer/ipld/eth_account.go
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eth_account.go
rename to statediff/indexer/ipld/eth_account.go
diff --git a/statediff/indexer/ipfs/ipld/eth_account_test.go b/statediff/indexer/ipld/eth_account_test.go
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eth_account_test.go
rename to statediff/indexer/ipld/eth_account_test.go
diff --git a/statediff/indexer/ipfs/ipld/eth_header.go b/statediff/indexer/ipld/eth_header.go
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eth_header.go
rename to statediff/indexer/ipld/eth_header.go
index 5905bdd7e..9bc307277 100644
--- a/statediff/indexer/ipfs/ipld/eth_header.go
+++ b/statediff/indexer/ipld/eth_header.go
@@ -20,13 +20,13 @@ import (
"encoding/json"
"fmt"
- "github.com/ethereum/go-ethereum/common"
-
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/rlp"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
)
// EthHeader (eth-block, codec 0x90), represents an ethereum block header
diff --git a/statediff/indexer/ipfs/ipld/eth_header_test.go b/statediff/indexer/ipld/eth_header_test.go
similarity index 99%
rename from statediff/indexer/ipfs/ipld/eth_header_test.go
rename to statediff/indexer/ipld/eth_header_test.go
index d1ed13d37..ebbab2129 100644
--- a/statediff/indexer/ipfs/ipld/eth_header_test.go
+++ b/statediff/indexer/ipld/eth_header_test.go
@@ -9,10 +9,11 @@ import (
"strconv"
"testing"
- "github.com/ethereum/go-ethereum/core/types"
block "github.com/ipfs/go-block-format"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
+
+ "github.com/ethereum/go-ethereum/core/types"
)
func TestBlockBodyRlpParsing(t *testing.T) {
diff --git a/statediff/indexer/ipfs/ipld/eth_log.go b/statediff/indexer/ipld/eth_log.go
similarity index 99%
rename from statediff/indexer/ipfs/ipld/eth_log.go
rename to statediff/indexer/ipld/eth_log.go
index ebc762065..225c44117 100644
--- a/statediff/indexer/ipfs/ipld/eth_log.go
+++ b/statediff/indexer/ipld/eth_log.go
@@ -3,11 +3,12 @@ package ipld
import (
"fmt"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/rlp"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
)
// EthLog (eth-log, codec 0x9a), represents an ethereum block header
diff --git a/statediff/indexer/ipfs/ipld/eth_log_trie.go b/statediff/indexer/ipld/eth_log_trie.go
similarity index 91%
rename from statediff/indexer/ipfs/ipld/eth_log_trie.go
rename to statediff/indexer/ipld/eth_log_trie.go
index 2e36f0a68..8e8af9c79 100644
--- a/statediff/indexer/ipfs/ipld/eth_log_trie.go
+++ b/statediff/indexer/ipld/eth_log_trie.go
@@ -3,10 +3,13 @@ package ipld
import (
"fmt"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/rlp"
+ node "github.com/ipfs/go-ipld-format"
+
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
)
// EthLogTrie (eth-tx-trie codec 0x9p) represents
@@ -89,15 +92,15 @@ func newLogTrie() *logTrie {
}
// getNodes invokes the localTrie, which computes the root hash of the
-// log trie and returns its database keys, to return a slice
+// log trie and returns its sql keys, to return a slice
// of EthLogTrie nodes.
-func (rt *logTrie) getNodes() ([]*EthLogTrie, error) {
+func (rt *logTrie) getNodes() ([]node.Node, error) {
keys, err := rt.getKeys()
if err != nil {
return nil, err
}
- out := make([]*EthLogTrie, 0, len(keys))
+ out := make([]node.Node, 0, len(keys))
for _, k := range keys {
n, err := rt.getNodeFromDB(k)
if err != nil {
@@ -114,14 +117,8 @@ func (rt *logTrie) getNodeFromDB(key []byte) (*EthLogTrie, error) {
if err != nil {
return nil, err
}
-
- c, err := RawdataToCid(MEthLogTrie, rawdata, multihash.KECCAK_256)
- if err != nil {
- return nil, err
- }
-
tn := &TrieNode{
- cid: c,
+ cid: keccak256ToCid(MEthLogTrie, key),
rawdata: rawdata,
}
return &EthLogTrie{TrieNode: tn}, nil
@@ -134,7 +131,6 @@ func (rt *logTrie) getLeafNodes() ([]*EthLogTrie, []*nodeKey, error) {
if err != nil {
return nil, nil, err
}
-
out := make([]*EthLogTrie, 0, len(keys))
for _, k := range keys {
n, err := rt.getNodeFromDB(k.dbKey)
diff --git a/statediff/indexer/ipfs/ipld/eth_parser.go b/statediff/indexer/ipld/eth_parser.go
similarity index 78%
rename from statediff/indexer/ipfs/ipld/eth_parser.go
rename to statediff/indexer/ipld/eth_parser.go
index 0b4780f8a..4e08f2d24 100644
--- a/statediff/indexer/ipfs/ipld/eth_parser.go
+++ b/statediff/indexer/ipld/eth_parser.go
@@ -23,11 +23,13 @@ import (
"io"
"io/ioutil"
+ "github.com/ipfs/go-cid"
+ node "github.com/ipfs/go-ipld-format"
+ "github.com/multiformats/go-multihash"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ipfs/go-cid"
- "github.com/multiformats/go-multihash"
)
// FromBlockRLP takes an RLP message representing
@@ -123,7 +125,7 @@ func FromBlockJSON(r io.Reader) (*EthHeader, []*EthTx, []*EthTxTrie, error) {
// FromBlockAndReceipts takes a block and processes it
// to return it a set of IPLD nodes for further processing.
-func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHeader, []*EthHeader, []*EthTx, []*EthTxTrie, []*EthReceipt, []*EthRctTrie, [][]*EthLogTrie, [][]cid.Cid, []cid.Cid, error) {
+func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHeader, []*EthHeader, []*EthTx, []*EthTxTrie, []*EthReceipt, []*EthRctTrie, [][]node.Node, [][]cid.Cid, []cid.Cid, error) {
// Process the header
headerNode, err := NewEthHeader(block.Header())
if err != nil {
@@ -148,10 +150,10 @@ func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHe
}
// Process the receipts and logs
- rctNodes, tctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := processReceiptsAndLogs(receipts,
+ rctNodes, tctTrieNodes, logTrieAndLogNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := processReceiptsAndLogs(receipts,
block.Header().ReceiptHash[:])
- return headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, tctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err
+ return headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, tctTrieNodes, logTrieAndLogNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err
}
// processTransactions will take the found transactions in a parsed block body
@@ -180,11 +182,11 @@ func processTransactions(txs []*types.Transaction, expectedTxRoot []byte) ([]*Et
// processReceiptsAndLogs will take in receipts
// to return IPLD node slices for eth-rct, eth-rct-trie, eth-log, eth-log-trie, eth-log-trie-CID, eth-rct-trie-CID
-func processReceiptsAndLogs(rcts []*types.Receipt, expectedRctRoot []byte) ([]*EthReceipt, []*EthRctTrie, [][]*EthLogTrie, [][]cid.Cid, []cid.Cid, error) {
+func processReceiptsAndLogs(rcts []*types.Receipt, expectedRctRoot []byte) ([]*EthReceipt, []*EthRctTrie, [][]node.Node, [][]cid.Cid, []cid.Cid, error) {
// Pre allocating memory.
ethRctNodes := make([]*EthReceipt, 0, len(rcts))
ethLogleafNodeCids := make([][]cid.Cid, 0, len(rcts))
- ethLogTrieNodes := make([][]*EthLogTrie, 0, len(rcts))
+ ethLogTrieAndLogNodes := make([][]node.Node, 0, len(rcts))
receiptTrie := NewRctTrie()
@@ -195,7 +197,7 @@ func processReceiptsAndLogs(rcts []*types.Receipt, expectedRctRoot []byte) ([]*E
return nil, nil, nil, nil, nil, err
}
rct.LogRoot = logTrieHash
- ethLogTrieNodes = append(ethLogTrieNodes, logTrieNodes)
+ ethLogTrieAndLogNodes = append(ethLogTrieAndLogNodes, logTrieNodes)
ethLogleafNodeCids = append(ethLogleafNodeCids, leafNodeCids)
ethRct, err := NewReceipt(rct)
@@ -235,17 +237,33 @@ func processReceiptsAndLogs(rcts []*types.Receipt, expectedRctRoot []byte) ([]*E
ethRctleafNodeCids[idx] = rln.Cid()
}
- return ethRctNodes, rctTrieNodes, ethLogTrieNodes, ethLogleafNodeCids, ethRctleafNodeCids, err
+ return ethRctNodes, rctTrieNodes, ethLogTrieAndLogNodes, ethLogleafNodeCids, ethRctleafNodeCids, err
}
-func processLogs(logs []*types.Log) ([]*EthLogTrie, []cid.Cid, common.Hash, error) {
+const keccak256Length = 32
+
+func processLogs(logs []*types.Log) ([]node.Node, []cid.Cid, common.Hash, error) {
logTr := newLogTrie()
+ shortLog := make(map[uint64]*EthLog, len(logs))
for idx, log := range logs {
- ethLog, err := NewLog(log)
+ logRaw, err := rlp.EncodeToBytes(log)
if err != nil {
return nil, nil, common.Hash{}, err
}
- if err = logTr.Add(idx, ethLog.RawData()); err != nil {
+ // if len(logRaw) <= keccak256Length it is possible this value's "leaf node"
+ // will be stored in its parent branch but only if len(partialPathOfTheNode) + len(logRaw) <= keccak256Length
+ // But we can't tell what the partial path will be until the trie is Commit()-ed
+ // So wait until we collect all the leaf nodes, and if we are missing any at the indexes we note in shortLogCIDs
+ // we know that these "leaf nodes" were internalized into their parent branch node and we move forward with
+ // using the cid.Cid we cached in shortLogCIDs
+ if len(logRaw) <= keccak256Length {
+ logNode, err := NewLog(log)
+ if err != nil {
+ return nil, nil, common.Hash{}, err
+ }
+ shortLog[uint64(idx)] = logNode
+ }
+ if err = logTr.Add(idx, logRaw); err != nil {
return nil, nil, common.Hash{}, err
}
}
@@ -259,8 +277,7 @@ func processLogs(logs []*types.Log) ([]*EthLogTrie, []cid.Cid, common.Hash, erro
if err != nil {
return nil, nil, common.Hash{}, err
}
-
- leafNodeCids := make([]cid.Cid, len(leafNodes))
+ leafNodeCids := make([]cid.Cid, len(logs))
for i, ln := range leafNodes {
var idx uint
@@ -271,6 +288,15 @@ func processLogs(logs []*types.Log) ([]*EthLogTrie, []cid.Cid, common.Hash, erro
}
leafNodeCids[idx] = ln.Cid()
}
+ // this is where we check which logs <= keccak256Length were actually internalized into parent branch node
+ // and replace those that were with the cid.Cid for the raw log IPLD
+ for i, l := range shortLog {
+ if !leafNodeCids[i].Defined() {
+ leafNodeCids[i] = l.Cid()
+ // if the leaf node was internalized, we append an IPLD for log itself to the list of IPLDs we need to publish
+ logTrieNodes = append(logTrieNodes, l)
+ }
+ }
return logTrieNodes, leafNodeCids, common.BytesToHash(logTr.rootHash()), err
}
diff --git a/statediff/indexer/ipfs/ipld/eth_parser_test.go b/statediff/indexer/ipld/eth_parser_test.go
similarity index 99%
rename from statediff/indexer/ipfs/ipld/eth_parser_test.go
rename to statediff/indexer/ipld/eth_parser_test.go
index 9cb8d4e46..bcf28efde 100644
--- a/statediff/indexer/ipfs/ipld/eth_parser_test.go
+++ b/statediff/indexer/ipld/eth_parser_test.go
@@ -21,10 +21,11 @@ import (
"path/filepath"
"testing"
+ "github.com/stretchr/testify/require"
+
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
- "github.com/stretchr/testify/require"
)
type kind string
diff --git a/statediff/indexer/ipfs/ipld/eth_receipt.go b/statediff/indexer/ipld/eth_receipt.go
similarity index 99%
rename from statediff/indexer/ipfs/ipld/eth_receipt.go
rename to statediff/indexer/ipld/eth_receipt.go
index 0eb6a2f81..ccd785515 100644
--- a/statediff/indexer/ipfs/ipld/eth_receipt.go
+++ b/statediff/indexer/ipld/eth_receipt.go
@@ -21,10 +21,11 @@ import (
"fmt"
"strconv"
- "github.com/ethereum/go-ethereum/core/types"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
+
+ "github.com/ethereum/go-ethereum/core/types"
)
type EthReceipt struct {
diff --git a/statediff/indexer/ipfs/ipld/eth_receipt_trie.go b/statediff/indexer/ipld/eth_receipt_trie.go
similarity index 95%
rename from statediff/indexer/ipfs/ipld/eth_receipt_trie.go
rename to statediff/indexer/ipld/eth_receipt_trie.go
index fc1480703..75d40eedb 100644
--- a/statediff/indexer/ipfs/ipld/eth_receipt_trie.go
+++ b/statediff/indexer/ipld/eth_receipt_trie.go
@@ -121,7 +121,7 @@ func NewRctTrie() *rctTrie {
}
// GetNodes invokes the localTrie, which computes the root hash of the
-// transaction trie and returns its database keys, to return a slice
+// transaction trie and returns its sql keys, to return a slice
// of EthRctTrie nodes.
func (rt *rctTrie) GetNodes() ([]*EthRctTrie, error) {
keys, err := rt.getKeys()
@@ -166,14 +166,8 @@ func (rt *rctTrie) getNodeFromDB(key []byte) (*EthRctTrie, error) {
if err != nil {
return nil, err
}
-
- cid, err := RawdataToCid(MEthTxReceiptTrie, rawdata, multihash.KECCAK_256)
- if err != nil {
- return nil, err
- }
-
tn := &TrieNode{
- cid: cid,
+ cid: keccak256ToCid(MEthTxReceiptTrie, key),
rawdata: rawdata,
}
diff --git a/statediff/indexer/ipfs/ipld/eth_state.go b/statediff/indexer/ipld/eth_state.go
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eth_state.go
rename to statediff/indexer/ipld/eth_state.go
diff --git a/statediff/indexer/ipfs/ipld/eth_state_test.go b/statediff/indexer/ipld/eth_state_test.go
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eth_state_test.go
rename to statediff/indexer/ipld/eth_state_test.go
diff --git a/statediff/indexer/ipfs/ipld/eth_storage.go b/statediff/indexer/ipld/eth_storage.go
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eth_storage.go
rename to statediff/indexer/ipld/eth_storage.go
diff --git a/statediff/indexer/ipfs/ipld/eth_storage_test.go b/statediff/indexer/ipld/eth_storage_test.go
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eth_storage_test.go
rename to statediff/indexer/ipld/eth_storage_test.go
diff --git a/statediff/indexer/ipfs/ipld/eth_tx.go b/statediff/indexer/ipld/eth_tx.go
similarity index 99%
rename from statediff/indexer/ipfs/ipld/eth_tx.go
rename to statediff/indexer/ipld/eth_tx.go
index 394e235a8..99b1f9dbe 100644
--- a/statediff/indexer/ipfs/ipld/eth_tx.go
+++ b/statediff/indexer/ipld/eth_tx.go
@@ -22,11 +22,12 @@ import (
"strconv"
"strings"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/core/types"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
)
// EthTx (eth-tx codec 0x93) represents an ethereum transaction
diff --git a/statediff/indexer/ipfs/ipld/eth_tx_test.go b/statediff/indexer/ipld/eth_tx_test.go
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eth_tx_test.go
rename to statediff/indexer/ipld/eth_tx_test.go
diff --git a/statediff/indexer/ipfs/ipld/eth_tx_trie.go b/statediff/indexer/ipld/eth_tx_trie.go
similarity index 94%
rename from statediff/indexer/ipfs/ipld/eth_tx_trie.go
rename to statediff/indexer/ipld/eth_tx_trie.go
index 7e79ff164..bb4f66df0 100644
--- a/statediff/indexer/ipfs/ipld/eth_tx_trie.go
+++ b/statediff/indexer/ipld/eth_tx_trie.go
@@ -121,7 +121,7 @@ func newTxTrie() *txTrie {
}
// getNodes invokes the localTrie, which computes the root hash of the
-// transaction trie and returns its database keys, to return a slice
+// transaction trie and returns its sql keys, to return a slice
// of EthTxTrie nodes.
func (tt *txTrie) getNodes() ([]*EthTxTrie, error) {
keys, err := tt.getKeys()
@@ -135,12 +135,8 @@ func (tt *txTrie) getNodes() ([]*EthTxTrie, error) {
if err != nil {
return nil, err
}
- c, err := RawdataToCid(MEthTxTrie, rawdata, multihash.KECCAK_256)
- if err != nil {
- return nil, err
- }
tn := &TrieNode{
- cid: c,
+ cid: keccak256ToCid(MEthTxTrie, k),
rawdata: rawdata,
}
out = append(out, &EthTxTrie{TrieNode: tn})
diff --git a/statediff/indexer/ipfs/ipld/eth_tx_trie_test.go b/statediff/indexer/ipld/eth_tx_trie_test.go
similarity index 100%
rename from statediff/indexer/ipfs/ipld/eth_tx_trie_test.go
rename to statediff/indexer/ipld/eth_tx_trie_test.go
diff --git a/statediff/indexer/ipfs/ipld/shared.go b/statediff/indexer/ipld/shared.go
similarity index 96%
rename from statediff/indexer/ipfs/ipld/shared.go
rename to statediff/indexer/ipld/shared.go
index 993e00b42..e5c22a3c6 100644
--- a/statediff/indexer/ipfs/ipld/shared.go
+++ b/statediff/indexer/ipld/shared.go
@@ -27,7 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
- sdtrie "github.com/ethereum/go-ethereum/statediff/trie"
+ sdtrie "github.com/ethereum/go-ethereum/statediff/trie_helpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/trie"
)
@@ -143,7 +143,7 @@ func (lt *localTrie) commit() error {
return nil
}
-// getKeys returns the stored keys of the memory database
+// getKeys returns the stored keys of the memory sql
// of the localTrie for further processing.
func (lt *localTrie) getKeys() ([][]byte, error) {
if err := lt.commit(); err != nil {
@@ -167,7 +167,7 @@ type nodeKey struct {
TrieKey []byte
}
-// getLeafKeys returns the stored leaf keys from the memory database
+// getLeafKeys returns the stored leaf keys from the memory sql
// of the localTrie for further processing.
func (lt *localTrie) getLeafKeys() ([]*nodeKey, error) {
if err := lt.commit(); err != nil {
diff --git a/statediff/indexer/ipfs/ipld/test_data/error-tx-eth-block-body-json-999999 b/statediff/indexer/ipld/test_data/error-tx-eth-block-body-json-999999
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/error-tx-eth-block-body-json-999999
rename to statediff/indexer/ipld/test_data/error-tx-eth-block-body-json-999999
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-0 b/statediff/indexer/ipld/test_data/eth-block-body-json-0
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-0
rename to statediff/indexer/ipld/test_data/eth-block-body-json-0
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-4139497 b/statediff/indexer/ipld/test_data/eth-block-body-json-4139497
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-4139497
rename to statediff/indexer/ipld/test_data/eth-block-body-json-4139497
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-997522 b/statediff/indexer/ipld/test_data/eth-block-body-json-997522
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-997522
rename to statediff/indexer/ipld/test_data/eth-block-body-json-997522
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-999998 b/statediff/indexer/ipld/test_data/eth-block-body-json-999998
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-999998
rename to statediff/indexer/ipld/test_data/eth-block-body-json-999998
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-999999 b/statediff/indexer/ipld/test_data/eth-block-body-json-999999
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-999999
rename to statediff/indexer/ipld/test_data/eth-block-body-json-999999
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-rlp-997522 b/statediff/indexer/ipld/test_data/eth-block-body-rlp-997522
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-rlp-997522
rename to statediff/indexer/ipld/test_data/eth-block-body-rlp-997522
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-rlp-999999 b/statediff/indexer/ipld/test_data/eth-block-body-rlp-999999
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-rlp-999999
rename to statediff/indexer/ipld/test_data/eth-block-body-rlp-999999
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-header-rlp-999996 b/statediff/indexer/ipld/test_data/eth-block-header-rlp-999996
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-block-header-rlp-999996
rename to statediff/indexer/ipld/test_data/eth-block-header-rlp-999996
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-header-rlp-999997 b/statediff/indexer/ipld/test_data/eth-block-header-rlp-999997
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-block-header-rlp-999997
rename to statediff/indexer/ipld/test_data/eth-block-header-rlp-999997
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-header-rlp-999999 b/statediff/indexer/ipld/test_data/eth-block-header-rlp-999999
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-block-header-rlp-999999
rename to statediff/indexer/ipld/test_data/eth-block-header-rlp-999999
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-0e8b34 b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-0e8b34
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-0e8b34
rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-0e8b34
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-56864f b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-56864f
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-56864f
rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-56864f
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-6fc2d7 b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-6fc2d7
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-6fc2d7
rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-6fc2d7
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-727994 b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-727994
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-727994
rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-727994
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-c9070d b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-c9070d
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-c9070d
rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-c9070d
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-d5be90 b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-d5be90
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-d5be90
rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-d5be90
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-d7f897 b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-d7f897
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-d7f897
rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-d7f897
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-eb2f5f b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-eb2f5f
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-eb2f5f
rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-eb2f5f
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-000dd0 b/statediff/indexer/ipld/test_data/eth-storage-trie-rlp-000dd0
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-000dd0
rename to statediff/indexer/ipld/test_data/eth-storage-trie-rlp-000dd0
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-113049 b/statediff/indexer/ipld/test_data/eth-storage-trie-rlp-113049
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-113049
rename to statediff/indexer/ipld/test_data/eth-storage-trie-rlp-113049
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-9d1860 b/statediff/indexer/ipld/test_data/eth-storage-trie-rlp-9d1860
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-9d1860
rename to statediff/indexer/ipld/test_data/eth-storage-trie-rlp-9d1860
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-ffbcad b/statediff/indexer/ipld/test_data/eth-storage-trie-rlp-ffbcad
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-ffbcad
rename to statediff/indexer/ipld/test_data/eth-storage-trie-rlp-ffbcad
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-ffc25c b/statediff/indexer/ipld/test_data/eth-storage-trie-rlp-ffc25c
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-ffc25c
rename to statediff/indexer/ipld/test_data/eth-storage-trie-rlp-ffc25c
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-uncle-json-997522-0 b/statediff/indexer/ipld/test_data/eth-uncle-json-997522-0
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-uncle-json-997522-0
rename to statediff/indexer/ipld/test_data/eth-uncle-json-997522-0
diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-uncle-json-997522-1 b/statediff/indexer/ipld/test_data/eth-uncle-json-997522-1
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/eth-uncle-json-997522-1
rename to statediff/indexer/ipld/test_data/eth-uncle-json-997522-1
diff --git a/statediff/indexer/ipfs/ipld/test_data/tx_data b/statediff/indexer/ipld/test_data/tx_data
similarity index 100%
rename from statediff/indexer/ipfs/ipld/test_data/tx_data
rename to statediff/indexer/ipld/test_data/tx_data
diff --git a/statediff/indexer/ipfs/ipld/trie_node.go b/statediff/indexer/ipld/trie_node.go
similarity index 99%
rename from statediff/indexer/ipfs/ipld/trie_node.go
rename to statediff/indexer/ipld/trie_node.go
index a344bab4f..816217064 100644
--- a/statediff/indexer/ipfs/ipld/trie_node.go
+++ b/statediff/indexer/ipld/trie_node.go
@@ -20,9 +20,10 @@ import (
"encoding/json"
"fmt"
- "github.com/ethereum/go-ethereum/rlp"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
+
+ "github.com/ethereum/go-ethereum/rlp"
)
const (
diff --git a/statediff/indexer/mainnet_data/block_12579670.rlp b/statediff/indexer/mainnet_data/block_12579670.rlp
new file mode 100644
index 000000000..6b4f3f773
Binary files /dev/null and b/statediff/indexer/mainnet_data/block_12579670.rlp differ
diff --git a/statediff/indexer/mainnet_data/block_12600011.rlp b/statediff/indexer/mainnet_data/block_12600011.rlp
new file mode 100644
index 000000000..96032b0c2
Binary files /dev/null and b/statediff/indexer/mainnet_data/block_12600011.rlp differ
diff --git a/statediff/indexer/mainnet_data/block_12619985.rlp b/statediff/indexer/mainnet_data/block_12619985.rlp
new file mode 100644
index 000000000..0e735313f
Binary files /dev/null and b/statediff/indexer/mainnet_data/block_12619985.rlp differ
diff --git a/statediff/indexer/mainnet_data/block_12625121.rlp b/statediff/indexer/mainnet_data/block_12625121.rlp
new file mode 100644
index 000000000..d031e30ea
Binary files /dev/null and b/statediff/indexer/mainnet_data/block_12625121.rlp differ
diff --git a/statediff/indexer/mainnet_data/block_12655432.rlp b/statediff/indexer/mainnet_data/block_12655432.rlp
new file mode 100644
index 000000000..fafc6bd88
Binary files /dev/null and b/statediff/indexer/mainnet_data/block_12655432.rlp differ
diff --git a/statediff/indexer/mainnet_data/block_12914664.rlp b/statediff/indexer/mainnet_data/block_12914664.rlp
new file mode 100644
index 000000000..b8aaeaa61
Binary files /dev/null and b/statediff/indexer/mainnet_data/block_12914664.rlp differ
diff --git a/statediff/indexer/mainnet_data/receipts_12579670.rlp b/statediff/indexer/mainnet_data/receipts_12579670.rlp
new file mode 100644
index 000000000..e69de8fd9
Binary files /dev/null and b/statediff/indexer/mainnet_data/receipts_12579670.rlp differ
diff --git a/statediff/indexer/mainnet_data/receipts_12600011.rlp b/statediff/indexer/mainnet_data/receipts_12600011.rlp
new file mode 100644
index 000000000..ae6d4f0c2
Binary files /dev/null and b/statediff/indexer/mainnet_data/receipts_12600011.rlp differ
diff --git a/statediff/indexer/mainnet_data/receipts_12619985.rlp b/statediff/indexer/mainnet_data/receipts_12619985.rlp
new file mode 100644
index 000000000..a9ba84bd2
Binary files /dev/null and b/statediff/indexer/mainnet_data/receipts_12619985.rlp differ
diff --git a/statediff/indexer/mainnet_data/receipts_12625121.rlp b/statediff/indexer/mainnet_data/receipts_12625121.rlp
new file mode 100644
index 000000000..4d3a8532c
Binary files /dev/null and b/statediff/indexer/mainnet_data/receipts_12625121.rlp differ
diff --git a/statediff/indexer/mainnet_data/receipts_12655432.rlp b/statediff/indexer/mainnet_data/receipts_12655432.rlp
new file mode 100644
index 000000000..f209f01d8
Binary files /dev/null and b/statediff/indexer/mainnet_data/receipts_12655432.rlp differ
diff --git a/statediff/indexer/mainnet_data/receipts_12914664.rlp b/statediff/indexer/mainnet_data/receipts_12914664.rlp
new file mode 100644
index 000000000..3cf8e8895
Binary files /dev/null and b/statediff/indexer/mainnet_data/receipts_12914664.rlp differ
diff --git a/statediff/indexer/mocks/test_data.go b/statediff/indexer/mocks/test_data.go
index 2d544b6ea..dccf72e60 100644
--- a/statediff/indexer/mocks/test_data.go
+++ b/statediff/indexer/mocks/test_data.go
@@ -22,18 +22,16 @@ import (
"crypto/rand"
"math/big"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
-
- "github.com/ethereum/go-ethereum/trie"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/statediff/testhelpers"
+ "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ "github.com/ethereum/go-ethereum/statediff/test_helpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
+ "github.com/ethereum/go-ethereum/trie"
)
// Test variables
@@ -51,6 +49,7 @@ var (
Difficulty: big.NewInt(5000000),
Extra: []byte{},
BaseFee: big.NewInt(params.InitialBaseFee),
+ Coinbase: common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476777"),
}
MockTransactions, MockReceipts, SenderAddr = createTransactionsAndReceipts(TestConfig, BlockNumber)
MockBlock = types.NewBlock(&MockHeader, MockTransactions, nil, MockReceipts, new(trie.Trie))
@@ -111,7 +110,7 @@ var (
nonce1 = uint64(1)
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
ContractCodeHash = common.HexToHash("0x753f98a8d4328b15636e46f66f2cb4bc860100aa17967cc145fcd17d1d4710ea")
- ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress)
+ ContractLeafKey = test_helpers.AddressToLeafKey(ContractAddress)
ContractAccount, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: nonce1,
Balance: big.NewInt(0),
@@ -127,8 +126,8 @@ var (
nonce0 = uint64(0)
AccountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
- AccountLeafKey = testhelpers.Account2LeafKey
- RemovedLeafKey = testhelpers.Account1LeafKey
+ AccountLeafKey = test_helpers.Account2LeafKey
+ RemovedLeafKey = test_helpers.Account1LeafKey
Account, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: nonce0,
Balance: big.NewInt(1000),
@@ -218,6 +217,7 @@ func NewLegacyData() *LegacyData {
ReceiptHash: common.HexToHash("0x0"),
Difficulty: big.NewInt(5000000),
Extra: []byte{},
+ Coinbase: common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476888"),
}
mockTransactions, mockReceipts, senderAddr := createLegacyTransactionsAndReceipts(config, blockNumber)
@@ -308,7 +308,7 @@ func createTransactionsAndReceipts(config *params.ChainConfig, blockNumber *big.
GasPrice: big.NewInt(100),
Gas: 50,
To: &AnotherAddress,
- Value: big.NewInt(1000),
+ Value: big.NewInt(999),
Data: []byte{},
AccessList: types.AccessList{
AccessListEntry1,
diff --git a/statediff/indexer/models/batch.go b/statediff/indexer/models/batch.go
new file mode 100644
index 000000000..16096f292
--- /dev/null
+++ b/statediff/indexer/models/batch.go
@@ -0,0 +1,115 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package models
+
+import "github.com/lib/pq"
+
+// IPLDBatch holds the arguments for a batch insert of IPLD data
+type IPLDBatch struct {
+ Keys []string
+ Values [][]byte
+}
+
+// UncleBatch holds the arguments for a batch insert of uncle data
+type UncleBatch struct {
+ HeaderID []string
+ BlockHashes []string
+ ParentHashes []string
+ CIDs []string
+ MhKeys []string
+ Rewards []string
+}
+
+// TxBatch holds the arguments for a batch insert of tx data
+type TxBatch struct {
+ HeaderID string
+ Indexes []int64
+ TxHashes []string
+ CIDs []string
+ MhKeys []string
+ Dsts []string
+ Srcs []string
+ Datas [][]byte
+ Types []uint8
+}
+
+// AccessListBatch holds the arguments for a batch insert of access list data
+type AccessListBatch struct {
+ Indexes []int64
+ TxIDs []string
+ Addresses []string
+ StorageKeysSets []pq.StringArray
+}
+
+// ReceiptBatch holds the arguments for a batch insert of receipt data
+type ReceiptBatch struct {
+ TxIDs []string
+ LeafCIDs []string
+ LeafMhKeys []string
+ PostStatuses []uint64
+ PostStates []string
+ Contracts []string
+ ContractHashes []string
+ LogRoots []string
+}
+
+// LogBatch holds the arguments for a batch insert of log data
+type LogBatch struct {
+ LeafCIDs []string
+ LeafMhKeys []string
+ ReceiptIDs []string
+ Addresses []string
+ Indexes []int64
+ Datas [][]byte
+ Topic0s []string
+ Topic1s []string
+ Topic2s []string
+ Topic3s []string
+}
+
+// StateBatch holds the arguments for a batch insert of state data
+type StateBatch struct {
+ HeaderID string
+ Paths [][]byte
+ StateKeys []string
+ NodeTypes []int
+ CIDs []string
+ MhKeys []string
+ Diff bool
+}
+
+// AccountBatch holds the arguments for a batch insert of account data
+type AccountBatch struct {
+ HeaderID string
+ StatePaths [][]byte
+ Balances []string
+ Nonces []uint64
+ CodeHashes [][]byte
+ StorageRoots []string
+}
+
+// StorageBatch holds the arguments for a batch insert of storage data
+type StorageBatch struct {
+ HeaderID string
+ StatePaths [][]string
+ Paths [][]byte
+ StorageKeys []string
+ NodeTypes []int
+ CIDs []string
+ MhKeys []string
+ Diff bool
+}
diff --git a/statediff/indexer/models/models.go b/statediff/indexer/models/models.go
index cb9404385..2caed1bcb 100644
--- a/statediff/indexer/models/models.go
+++ b/statediff/indexer/models/models.go
@@ -18,16 +18,21 @@ package models
import "github.com/lib/pq"
+// IPLDModel is the db model for public.blocks
+type IPLDModel struct {
+ Key string `db:"key"`
+ Data []byte `db:"data"`
+}
+
// HeaderModel is the db model for eth.header_cids
type HeaderModel struct {
- ID int64 `db:"id"`
BlockNumber string `db:"block_number"`
BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
TotalDifficulty string `db:"td"`
- NodeID int64 `db:"node_id"`
+ NodeID string `db:"node_id"`
Reward string `db:"reward"`
StateRoot string `db:"state_root"`
UncleRoot string `db:"uncle_root"`
@@ -36,13 +41,12 @@ type HeaderModel struct {
Bloom []byte `db:"bloom"`
Timestamp uint64 `db:"timestamp"`
TimesValidated int64 `db:"times_validated"`
- BaseFee *int64 `db:"base_fee"`
+ Coinbase string `db:"coinbase"`
}
// UncleModel is the db model for eth.uncle_cids
type UncleModel struct {
- ID int64 `db:"id"`
- HeaderID int64 `db:"header_id"`
+ HeaderID string `db:"header_id"`
BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"`
CID string `db:"cid"`
@@ -52,8 +56,7 @@ type UncleModel struct {
// TxModel is the db model for eth.transaction_cids
type TxModel struct {
- ID int64 `db:"id"`
- HeaderID int64 `db:"header_id"`
+ HeaderID string `db:"header_id"`
Index int64 `db:"index"`
TxHash string `db:"tx_hash"`
CID string `db:"cid"`
@@ -61,22 +64,21 @@ type TxModel struct {
Dst string `db:"dst"`
Src string `db:"src"`
Data []byte `db:"tx_data"`
- Type *uint8 `db:"tx_type"`
+ Type uint8 `db:"tx_type"`
+ Value string `db:"value"`
}
// AccessListElementModel is the db model for eth.access_list_entry
type AccessListElementModel struct {
- ID int64 `db:"id"`
Index int64 `db:"index"`
- TxID int64 `db:"tx_id"`
+ TxID string `db:"tx_id"`
Address string `db:"address"`
StorageKeys pq.StringArray `db:"storage_keys"`
}
// ReceiptModel is the db model for eth.receipt_cids
type ReceiptModel struct {
- ID int64 `db:"id"`
- TxID int64 `db:"tx_id"`
+ TxID string `db:"tx_id"`
LeafCID string `db:"leaf_cid"`
LeafMhKey string `db:"leaf_mh_key"`
PostStatus uint64 `db:"post_status"`
@@ -88,8 +90,7 @@ type ReceiptModel struct {
// StateNodeModel is the db model for eth.state_cids
type StateNodeModel struct {
- ID int64 `db:"id"`
- HeaderID int64 `db:"header_id"`
+ HeaderID string `db:"header_id"`
Path []byte `db:"state_path"`
StateKey string `db:"state_leaf_key"`
NodeType int `db:"node_type"`
@@ -100,8 +101,8 @@ type StateNodeModel struct {
// StorageNodeModel is the db model for eth.storage_cids
type StorageNodeModel struct {
- ID int64 `db:"id"`
- StateID int64 `db:"state_id"`
+ HeaderID string `db:"header_id"`
+ StatePath []byte `db:"state_path"`
Path []byte `db:"storage_path"`
StorageKey string `db:"storage_leaf_key"`
NodeType int `db:"node_type"`
@@ -112,8 +113,8 @@ type StorageNodeModel struct {
// StorageNodeWithStateKeyModel is a db model for eth.storage_cids + eth.state_cids.state_key
type StorageNodeWithStateKeyModel struct {
- ID int64 `db:"id"`
- StateID int64 `db:"state_id"`
+ HeaderID string `db:"header_id"`
+ StatePath []byte `db:"state_path"`
Path []byte `db:"storage_path"`
StateKey string `db:"state_leaf_key"`
StorageKey string `db:"storage_leaf_key"`
@@ -125,8 +126,8 @@ type StorageNodeWithStateKeyModel struct {
// StateAccountModel is a db model for an eth state account (decoded value of state leaf node)
type StateAccountModel struct {
- ID int64 `db:"id"`
- StateID int64 `db:"state_id"`
+ HeaderID string `db:"header_id"`
+ StatePath []byte `db:"state_path"`
Balance string `db:"balance"`
Nonce uint64 `db:"nonce"`
CodeHash []byte `db:"code_hash"`
@@ -135,10 +136,9 @@ type StateAccountModel struct {
// LogsModel is the db model for eth.logs
type LogsModel struct {
- ID int64 `db:"id"`
+ ReceiptID string `db:"rct_id"`
LeafCID string `db:"leaf_cid"`
LeafMhKey string `db:"leaf_mh_key"`
- ReceiptID int64 `db:"receipt_id"`
Address string `db:"address"`
Index int64 `db:"index"`
Data []byte `db:"log_data"`
diff --git a/statediff/indexer/postgres/config.go b/statediff/indexer/postgres/config.go
deleted file mode 100644
index c2de0a6bf..000000000
--- a/statediff/indexer/postgres/config.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package postgres
-
-import (
- "fmt"
-)
-
-// Env variables
-const (
- DATABASE_NAME = "DATABASE_NAME"
- DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
- DATABASE_PORT = "DATABASE_PORT"
- DATABASE_USER = "DATABASE_USER"
- DATABASE_PASSWORD = "DATABASE_PASSWORD"
- DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
- DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
- DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
-)
-
-type ConnectionParams struct {
- Hostname string
- Name string
- User string
- Password string
- Port int
-}
-
-type ConnectionConfig struct {
- MaxIdle int
- MaxOpen int
- MaxLifetime int
-}
-
-func DbConnectionString(params ConnectionParams) string {
- if len(params.User) > 0 && len(params.Password) > 0 {
- return fmt.Sprintf("postgresql://%s:%s@%s:%d/%s?sslmode=disable",
- params.User, params.Password, params.Hostname, params.Port, params.Name)
- }
- if len(params.User) > 0 && len(params.Password) == 0 {
- return fmt.Sprintf("postgresql://%s@%s:%d/%s?sslmode=disable",
- params.User, params.Hostname, params.Port, params.Name)
- }
- return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", params.Hostname, params.Port, params.Name)
-}
diff --git a/statediff/indexer/postgres/postgres.go b/statediff/indexer/postgres/postgres.go
deleted file mode 100644
index 455dac306..000000000
--- a/statediff/indexer/postgres/postgres.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package postgres
-
-import (
- "time"
-
- "github.com/jmoiron/sqlx"
- _ "github.com/lib/pq" //postgres driver
-
- "github.com/ethereum/go-ethereum/statediff/indexer/node"
-)
-
-type DB struct {
- *sqlx.DB
- Node node.Info
- NodeID int64
-}
-
-func NewDB(connectString string, config ConnectionConfig, node node.Info) (*DB, error) {
- db, connectErr := sqlx.Connect("postgres", connectString)
- if connectErr != nil {
- return &DB{}, ErrDBConnectionFailed(connectErr)
- }
- if config.MaxOpen > 0 {
- db.SetMaxOpenConns(config.MaxOpen)
- }
- if config.MaxIdle > 0 {
- db.SetMaxIdleConns(config.MaxIdle)
- }
- if config.MaxLifetime > 0 {
- lifetime := time.Duration(config.MaxLifetime) * time.Second
- db.SetConnMaxLifetime(lifetime)
- }
- pg := DB{DB: db, Node: node}
- nodeErr := pg.CreateNode(&node)
- if nodeErr != nil {
- return &DB{}, ErrUnableToSetNode(nodeErr)
- }
- return &pg, nil
-}
-
-func (db *DB) CreateNode(node *node.Info) error {
- var nodeID int64
- err := db.QueryRow(
- `INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id)
- VALUES ($1, $2, $3, $4, $5)
- ON CONFLICT (genesis_block, network_id, node_id, chain_id)
- DO UPDATE
- SET genesis_block = $1,
- network_id = $2,
- node_id = $3,
- client_name = $4,
- chain_id = $5
- RETURNING id`,
- node.GenesisBlock, node.NetworkID, node.ID, node.ClientName, node.ChainID).Scan(&nodeID)
- if err != nil {
- return ErrUnableToSetNode(err)
- }
- db.NodeID = nodeID
- return nil
-}
diff --git a/statediff/indexer/shared/chain_type.go b/statediff/indexer/shared/chain_type.go
deleted file mode 100644
index c3dedfe38..000000000
--- a/statediff/indexer/shared/chain_type.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package shared
-
-import (
- "errors"
- "strings"
-)
-
-// ChainType enum for specifying blockchain
-type ChainType int
-
-const (
- UnknownChain ChainType = iota
- Ethereum
- Bitcoin
- Omni
- EthereumClassic
-)
-
-func (c ChainType) String() string {
- switch c {
- case Ethereum:
- return "Ethereum"
- case Bitcoin:
- return "Bitcoin"
- case Omni:
- return "Omni"
- case EthereumClassic:
- return "EthereumClassic"
- default:
- return ""
- }
-}
-
-func (c ChainType) API() string {
- switch c {
- case Ethereum:
- return "eth"
- case Bitcoin:
- return "btc"
- case Omni:
- return "omni"
- case EthereumClassic:
- return "etc"
- default:
- return ""
- }
-}
-
-func NewChainType(name string) (ChainType, error) {
- switch strings.ToLower(name) {
- case "ethereum", "eth":
- return Ethereum, nil
- case "bitcoin", "btc", "xbt":
- return Bitcoin, nil
- case "omni":
- return Omni, nil
- case "classic", "etc":
- return EthereumClassic, nil
- default:
- return UnknownChain, errors.New("invalid name for chain")
- }
-}
diff --git a/statediff/indexer/shared/constants.go b/statediff/indexer/shared/constants.go
index 3dc2994c4..6d1e298ad 100644
--- a/statediff/indexer/shared/constants.go
+++ b/statediff/indexer/shared/constants.go
@@ -1,5 +1,5 @@
// VulcanizeDB
-// Copyright © 2019 Vulcanize
+// Copyright © 2021 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
@@ -17,6 +17,7 @@
package shared
const (
- DefaultMaxBatchSize uint64 = 100
- DefaultMaxBatchNumber int64 = 50
+ RemovedNodeStorageCID = "bagmacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya"
+ RemovedNodeStateCID = "baglacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya"
+ RemovedNodeMhKey = "/blocks/DMQMLUSGAGDPOIZ4SJ7H3MW4Y4B4BZIAWZJ4VARHHN57VWAELWC2I4A"
)
diff --git a/statediff/indexer/shared/data_type.go b/statediff/indexer/shared/data_type.go
deleted file mode 100644
index ccab92c1e..000000000
--- a/statediff/indexer/shared/data_type.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package shared
-
-import (
- "fmt"
- "strings"
-)
-
-// DataType is an enum to loosely represent type of chain data
-type DataType int
-
-const (
- UnknownDataType DataType = iota - 1
- Full
- Headers
- Uncles
- Transactions
- Receipts
- State
- Storage
-)
-
-// String() method to resolve ReSyncType enum
-func (r DataType) String() string {
- switch r {
- case Full:
- return "full"
- case Headers:
- return "headers"
- case Uncles:
- return "uncles"
- case Transactions:
- return "transactions"
- case Receipts:
- return "receipts"
- case State:
- return "state"
- case Storage:
- return "storage"
- default:
- return "unknown"
- }
-}
-
-// GenerateDataTypeFromString returns a DataType from a provided string
-func GenerateDataTypeFromString(str string) (DataType, error) {
- switch strings.ToLower(str) {
- case "full", "f":
- return Full, nil
- case "headers", "header", "h":
- return Headers, nil
- case "uncles", "u":
- return Uncles, nil
- case "transactions", "transaction", "trxs", "txs", "trx", "tx", "t":
- return Transactions, nil
- case "receipts", "receipt", "rcts", "rct", "r":
- return Receipts, nil
- case "state":
- return State, nil
- case "storage":
- return Storage, nil
- default:
- return UnknownDataType, fmt.Errorf("unrecognized resync type: %s", str)
- }
-}
-
-// SupportedDataType returns whether a DataType is supported
-func SupportedDataType(d DataType) (bool, error) {
- switch d {
- case Full:
- return true, nil
- case Headers:
- return true, nil
- case Uncles:
- return true, nil
- case Transactions:
- return true, nil
- case Receipts:
- return true, nil
- case State:
- return true, nil
- case Storage:
- return true, nil
- default:
- return true, nil
- }
-}
diff --git a/statediff/indexer/shared/db_kind.go b/statediff/indexer/shared/db_kind.go
new file mode 100644
index 000000000..7e7997f95
--- /dev/null
+++ b/statediff/indexer/shared/db_kind.go
@@ -0,0 +1,46 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package shared
+
+import (
+ "fmt"
+ "strings"
+)
+
+// DBType to explicitly type the kind of DB
+type DBType string
+
+const (
+ POSTGRES DBType = "Postgres"
+ DUMP DBType = "Dump"
+ FILE DBType = "File"
+ UNKNOWN DBType = "Unknown"
+)
+
+// ResolveDBType resolves a DBType from a provided string
+func ResolveDBType(str string) (DBType, error) {
+ switch strings.ToLower(str) {
+ case "postgres", "pg":
+ return POSTGRES, nil
+ case "dump", "d":
+ return DUMP, nil
+ case "file", "f", "fs":
+ return FILE, nil
+ default:
+ return UNKNOWN, fmt.Errorf("unrecognized db type string: %s", str)
+ }
+}
diff --git a/statediff/indexer/shared/functions.go b/statediff/indexer/shared/functions.go
index cb2ca6cea..8b0acbb54 100644
--- a/statediff/indexer/shared/functions.go
+++ b/statediff/indexer/shared/functions.go
@@ -18,15 +18,9 @@ package shared
import (
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
- "github.com/ethereum/go-ethereum/statediff/indexer/postgres"
-
"github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
- format "github.com/ipfs/go-ipld-format"
- "github.com/jmoiron/sqlx"
"github.com/multiformats/go-multihash"
)
@@ -46,69 +40,12 @@ func HandleZeroAddr(to common.Address) string {
return to.Hex()
}
-// Rollback sql transaction and log any error
-func Rollback(tx *sqlx.Tx) {
- if err := tx.Rollback(); err != nil {
- log.Error(err.Error())
- }
-}
-
-// PublishIPLD is used to insert an IPLD into Postgres blockstore with the provided tx
-func PublishIPLD(tx *sqlx.Tx, i format.Node) error {
- dbKey := dshelp.MultihashToDsKey(i.Cid().Hash())
- prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
- raw := i.RawData()
- _, err := tx.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, prefixedKey, raw)
- return err
-}
-
-// FetchIPLD is used to retrieve an ipld from Postgres blockstore with the provided tx and cid string
-func FetchIPLD(tx *sqlx.Tx, cid string) ([]byte, error) {
- mhKey, err := MultihashKeyFromCIDString(cid)
- if err != nil {
- return nil, err
- }
- pgStr := `SELECT data FROM public.blocks WHERE key = $1`
- var block []byte
- return block, tx.Get(&block, pgStr, mhKey)
-}
-
-// FetchIPLDByMhKey is used to retrieve an ipld from Postgres blockstore with the provided tx and mhkey string
-func FetchIPLDByMhKey(tx *sqlx.Tx, mhKey string) ([]byte, error) {
- pgStr := `SELECT data FROM public.blocks WHERE key = $1`
- var block []byte
- return block, tx.Get(&block, pgStr, mhKey)
-}
-
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
func MultihashKeyFromCID(c cid.Cid) string {
dbKey := dshelp.MultihashToDsKey(c.Hash())
return blockstore.BlockPrefix.String() + dbKey.String()
}
-// MultihashKeyFromCIDString converts a cid string into a blockstore-prefixed multihash db key string
-func MultihashKeyFromCIDString(c string) (string, error) {
- dc, err := cid.Decode(c)
- if err != nil {
- return "", err
- }
- dbKey := dshelp.MultihashToDsKey(dc.Hash())
- return blockstore.BlockPrefix.String() + dbKey.String(), nil
-}
-
-// PublishRaw derives a cid from raw bytes and provided codec and multihash type, and writes it to the db tx
-// returns the CID and blockstore prefixed multihash key
-func PublishRaw(tx *sqlx.Tx, codec, mh uint64, raw []byte) (string, string, error) {
- c, err := ipld.RawdataToCid(codec, raw, mh)
- if err != nil {
- return "", "", err
- }
- dbKey := dshelp.MultihashToDsKey(c.Hash())
- prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
- _, err = tx.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, prefixedKey, raw)
- return c.String(), prefixedKey, err
-}
-
// MultihashKeyFromKeccak256 converts keccak256 hash bytes into a blockstore-prefixed multihash db key string
func MultihashKeyFromKeccak256(hash common.Hash) (string, error) {
mh, err := multihash.Encode(hash.Bytes(), multihash.KECCAK_256)
@@ -118,15 +55,3 @@ func MultihashKeyFromKeccak256(hash common.Hash) (string, error) {
dbKey := dshelp.MultihashToDsKey(mh)
return blockstore.BlockPrefix.String() + dbKey.String(), nil
}
-
-// PublishDirect diretly writes a previously derived mhkey => value pair to the ipld database in the provided tx
-func PublishDirect(tx *sqlx.Tx, key string, value []byte) error {
- _, err := tx.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, key, value)
- return err
-}
-
-// PublishDirectWithDB diretly writes a previously derived mhkey => value pair to the ipld database
-func PublishDirectWithDB(db *postgres.DB, key string, value []byte) error {
- _, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, key, value)
- return err
-}
diff --git a/statediff/indexer/reward.go b/statediff/indexer/shared/reward.go
similarity index 99%
rename from statediff/indexer/reward.go
rename to statediff/indexer/shared/reward.go
index 47e3f17b9..3d5752e25 100644
--- a/statediff/indexer/reward.go
+++ b/statediff/indexer/shared/reward.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package indexer
+package shared
import (
"math/big"
diff --git a/statediff/indexer/shared/types.go b/statediff/indexer/shared/types.go
deleted file mode 100644
index 1337ba68a..000000000
--- a/statediff/indexer/shared/types.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package shared
-
-import (
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
- "github.com/ethereum/go-ethereum/statediff/types"
-)
-
-// TrieNode struct used to flag node as leaf or not
-type TrieNode struct {
- Path []byte
- LeafKey common.Hash
- Value []byte
- Type types.NodeType
-}
-
-// CIDPayload is a struct to hold all the CIDs and their associated meta data for indexing in Postgres
-// Returned by IPLDPublisher
-// Passed to CIDIndexer
-type CIDPayload struct {
- HeaderCID models.HeaderModel
- UncleCIDs []models.UncleModel
- TransactionCIDs []models.TxModel
- ReceiptCIDs map[common.Hash]models.ReceiptModel
- StateNodeCIDs []models.StateNodeModel
- StateAccounts map[string]models.StateAccountModel
- StorageNodeCIDs map[string][]models.StorageNodeModel
-}
diff --git a/statediff/indexer/test_helpers/mainnet_test_helpers.go b/statediff/indexer/test_helpers/mainnet_test_helpers.go
new file mode 100644
index 000000000..141bb10fd
--- /dev/null
+++ b/statediff/indexer/test_helpers/mainnet_test_helpers.go
@@ -0,0 +1,236 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package test_helpers
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "os"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+const (
+ defaultBlockFilePath = "../../../mainnet_data/block"
+ defaultReceiptsFilePath = "../../../mainnet_data/receipts"
+)
+
+const (
+ TEST_RAW_URL = "TEST_RAW_URL"
+ TEST_BLOCK_NUMBER = "TEST_BLOCK_NUMBER"
+)
+
+// ProblemBlocks list of known problem blocks, with funky edge cases
+var ProblemBlocks = []int64{
+ 12600011,
+ 12619985,
+ 12625121,
+ 12655432,
+ 12579670,
+ 12914664,
+}
+
+// TestConfig holds configuration params for mainnet tests
+type TestConfig struct {
+ RawURL string
+ BlockNumber *big.Int
+ LocalCache bool
+}
+
+// DefaultTestConfig is the default TestConfig
+var DefaultTestConfig = TestConfig{
+ RawURL: "http://127.0.0.1:8545",
+ BlockNumber: big.NewInt(12914664),
+ LocalCache: true,
+}
+
+// TestBlockAndReceiptsFromEnv retrieves the block and receipts using env variables to override default config block number
+func TestBlockAndReceiptsFromEnv(conf TestConfig) (*types.Block, types.Receipts, error) {
+ blockNumberStr := os.Getenv(TEST_BLOCK_NUMBER)
+ blockNumber, ok := new(big.Int).SetString(blockNumberStr, 10)
+ if !ok {
+ fmt.Printf("Warning: no blockNumber configured for statediffing mainnet tests, using default (%d)\r\n",
+ DefaultTestConfig.BlockNumber)
+ } else {
+ conf.BlockNumber = blockNumber
+ }
+ return TestBlockAndReceipts(conf)
+}
+
+// TestBlockAndReceipts retrieves the block and receipts for the provided test config
+// It first tries to load files from the local system before setting up and using an ethclient.Client to pull the data
+func TestBlockAndReceipts(conf TestConfig) (*types.Block, types.Receipts, error) {
+ var cli *ethclient.Client
+ var err error
+ var block *types.Block
+ var receipts types.Receipts
+ blockFilePath := fmt.Sprintf("%s_%s.rlp", defaultBlockFilePath, conf.BlockNumber.String())
+ if _, err = os.Stat(blockFilePath); !errors.Is(err, os.ErrNotExist) {
+ fmt.Printf("local file (%s) found for block %s\n", blockFilePath, conf.BlockNumber.String())
+ block, err = LoadBlockRLP(blockFilePath)
+ if err != nil {
+ fmt.Printf("loading local file (%s) failed (%s), dialing remote client at %s\n", blockFilePath, err.Error(), conf.RawURL)
+ cli, err = ethclient.Dial(conf.RawURL)
+ if err != nil {
+ return nil, nil, err
+ }
+ block, err = FetchBlock(cli, conf.BlockNumber)
+ if err != nil {
+ return nil, nil, err
+ }
+ if conf.LocalCache {
+ if err := WriteBlockRLP(blockFilePath, block); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+ } else {
+ fmt.Printf("no local file found for block %s, dialing remote client at %s\n", conf.BlockNumber.String(), conf.RawURL)
+ cli, err = ethclient.Dial(conf.RawURL)
+ if err != nil {
+ return nil, nil, err
+ }
+ block, err = FetchBlock(cli, conf.BlockNumber)
+ if err != nil {
+ return nil, nil, err
+ }
+ if conf.LocalCache {
+ if err := WriteBlockRLP(blockFilePath, block); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+ receiptsFilePath := fmt.Sprintf("%s_%s.rlp", defaultReceiptsFilePath, conf.BlockNumber.String())
+ if _, err = os.Stat(receiptsFilePath); !errors.Is(err, os.ErrNotExist) {
+ fmt.Printf("local file (%s) found for block %s receipts\n", receiptsFilePath, conf.BlockNumber.String())
+ receipts, err = LoadReceiptsEncoding(receiptsFilePath, len(block.Transactions()))
+ if err != nil {
+ fmt.Printf("loading local file (%s) failed (%s), dialing remote client at %s\n", receiptsFilePath, err.Error(), conf.RawURL)
+ if cli == nil {
+ cli, err = ethclient.Dial(conf.RawURL)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ receipts, err = FetchReceipts(cli, block)
+ if err != nil {
+ return nil, nil, err
+ }
+ if conf.LocalCache {
+ if err := WriteReceiptsEncoding(receiptsFilePath, block.Number(), receipts); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+ } else {
+ fmt.Printf("no local file found for block %s receipts, dialing remote client at %s\n", conf.BlockNumber.String(), conf.RawURL)
+ if cli == nil {
+ cli, err = ethclient.Dial(conf.RawURL)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ receipts, err = FetchReceipts(cli, block)
+ if err != nil {
+ return nil, nil, err
+ }
+ if conf.LocalCache {
+ if err := WriteReceiptsEncoding(receiptsFilePath, block.Number(), receipts); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+ return block, receipts, nil
+}
+
+// FetchBlock fetches the block at the provided height using the ethclient.Client
+func FetchBlock(cli *ethclient.Client, blockNumber *big.Int) (*types.Block, error) {
+ return cli.BlockByNumber(context.Background(), blockNumber)
+}
+
+// FetchReceipts fetches the receipts for the provided block using the ethclient.Client
+func FetchReceipts(cli *ethclient.Client, block *types.Block) (types.Receipts, error) {
+ receipts := make(types.Receipts, len(block.Transactions()))
+ for i, tx := range block.Transactions() {
+ rct, err := cli.TransactionReceipt(context.Background(), tx.Hash())
+ if err != nil {
+ return nil, err
+ }
+ receipts[i] = rct
+ }
+ return receipts, nil
+}
+
+// WriteBlockRLP writes out the RLP encoding of the block to the provided filePath
+func WriteBlockRLP(filePath string, block *types.Block) error {
+ if filePath == "" {
+ filePath = fmt.Sprintf("%s_%s.rlp", defaultBlockFilePath, block.Number().String())
+ }
+ if _, err := os.Stat(filePath); !errors.Is(err, os.ErrNotExist) {
+ return fmt.Errorf("cannot create file, file (%s) already exists", filePath)
+ }
+ file, err := os.Create(filePath)
+ if err != nil {
+ return fmt.Errorf("unable to create file (%s), err: %v", filePath, err)
+ }
+ fmt.Printf("writing block rlp to file at %s\r\n", filePath)
+ if err := block.EncodeRLP(file); err != nil {
+ return err
+ }
+ return file.Close()
+}
+
+// LoadBlockRLP loads block from the rlp at filePath
+func LoadBlockRLP(filePath string) (*types.Block, error) {
+ blockBytes, err := os.ReadFile(filePath)
+ if err != nil {
+ return nil, err
+ }
+ block := new(types.Block)
+ return block, rlp.DecodeBytes(blockBytes, block)
+}
+
+// LoadReceiptsEncoding loads receipts from the encoding at filePath
+func LoadReceiptsEncoding(filePath string, cap int) (types.Receipts, error) {
+ rctsBytes, err := os.ReadFile(filePath)
+ if err != nil {
+ return nil, err
+ }
+ receipts := new(types.Receipts)
+ return *receipts, rlp.DecodeBytes(rctsBytes, receipts)
+}
+
+// WriteReceiptsEncoding writes out the consensus encoding of the receipts to the provided io.WriteCloser
+func WriteReceiptsEncoding(filePath string, blockNumber *big.Int, receipts types.Receipts) error {
+ if filePath == "" {
+ filePath = fmt.Sprintf("%s_%s.rlp", defaultReceiptsFilePath, blockNumber.String())
+ }
+ if _, err := os.Stat(filePath); !errors.Is(err, os.ErrNotExist) {
+ return fmt.Errorf("cannot create file, file (%s) already exists", filePath)
+ }
+ file, err := os.Create(filePath)
+ if err != nil {
+ return fmt.Errorf("unable to create file (%s), err: %v", filePath, err)
+ }
+ defer file.Close()
+ fmt.Printf("writing receipts rlp to file at %s\r\n", filePath)
+ return rlp.Encode(file, receipts)
+}
diff --git a/statediff/indexer/shared/test_helpers.go b/statediff/indexer/test_helpers/test_helpers.go
similarity index 59%
rename from statediff/indexer/shared/test_helpers.go
rename to statediff/indexer/test_helpers/test_helpers.go
index d54998cd5..b519d80b5 100644
--- a/statediff/indexer/shared/test_helpers.go
+++ b/statediff/indexer/test_helpers/test_helpers.go
@@ -14,37 +14,20 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see .
-package shared
+package test_helpers
import (
"reflect"
"testing"
-
- "github.com/ipfs/go-cid"
- "github.com/multiformats/go-multihash"
-
- "github.com/ethereum/go-ethereum/statediff/indexer/node"
- "github.com/ethereum/go-ethereum/statediff/indexer/postgres"
)
+// ExpectEqual asserts the provided interfaces are deep equal
func ExpectEqual(t *testing.T, got interface{}, want interface{}) {
if !reflect.DeepEqual(got, want) {
t.Fatalf("Expected: %v\nActual: %v", want, got)
}
}
-// SetupDB is use to setup a db for watcher tests
-func SetupDB() (*postgres.DB, error) {
- uri := postgres.DbConnectionString(postgres.ConnectionParams{
- User: "vdbm",
- Password: "password",
- Hostname: "localhost",
- Name: "vulcanize_public",
- Port: 5432,
- })
- return postgres.NewDB(uri, postgres.ConnectionConfig{}, node.Info{})
-}
-
// ListContainsString used to check if a list of strings contains a particular string
func ListContainsString(sss []string, s string) bool {
for _, str := range sss {
@@ -54,15 +37,3 @@ func ListContainsString(sss []string, s string) bool {
}
return false
}
-
-// TestCID creates a basic CID for testing purposes
-func TestCID(b []byte) cid.Cid {
- pref := cid.Prefix{
- Version: 1,
- Codec: cid.Raw,
- MhType: multihash.KECCAK_256,
- MhLength: -1,
- }
- c, _ := pref.Sum(b)
- return c
-}
diff --git a/statediff/indexer/writer.go b/statediff/indexer/writer.go
deleted file mode 100644
index 62b36ca58..000000000
--- a/statediff/indexer/writer.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// VulcanizeDB
-// Copyright © 2019 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package indexer
-
-import (
- "fmt"
-
- "github.com/jmoiron/sqlx"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
- "github.com/ethereum/go-ethereum/statediff/indexer/postgres"
-)
-
-var (
- nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
-)
-
-// PostgresCIDWriter handles processing and writing of indexed IPLD objects to Postgres
-type PostgresCIDWriter struct {
- db *postgres.DB
-}
-
-// NewPostgresCIDWriter creates a new pointer to a Indexer which satisfies the PostgresCIDWriter interface
-func NewPostgresCIDWriter(db *postgres.DB) *PostgresCIDWriter {
- return &PostgresCIDWriter{
- db: db,
- }
-}
-
-func (in *PostgresCIDWriter) upsertHeaderCID(tx *sqlx.Tx, header models.HeaderModel) (int64, error) {
- var headerID int64
- err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee)
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
- ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)
- RETURNING id`,
- header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, in.db.NodeID, header.Reward, header.StateRoot, header.TxRoot,
- header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1, header.BaseFee).Scan(&headerID)
- if err != nil {
- return 0, fmt.Errorf("error upserting header_cids entry: %v", err)
- }
- indexerMetrics.blocks.Inc(1)
- return headerID, nil
-}
-
-func (in *PostgresCIDWriter) upsertUncleCID(tx *sqlx.Tx, uncle models.UncleModel, headerID int64) error {
- _, err := tx.Exec(`INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6)
- ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward, mh_key) = ($3, $4, $5, $6)`,
- uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey)
- if err != nil {
- return fmt.Errorf("error upserting uncle_cids entry: %v", err)
- }
- return nil
-}
-
-func (in *PostgresCIDWriter) upsertTransactionCID(tx *sqlx.Tx, transaction models.TxModel, headerID int64) (int64, error) {
- var txID int64
- err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
- ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data, tx_type) = ($3, $4, $5, $6, $7, $8, $9)
- RETURNING id`,
- headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type).Scan(&txID)
- if err != nil {
- return 0, fmt.Errorf("error upserting transaction_cids entry: %v", err)
- }
- indexerMetrics.transactions.Inc(1)
- return txID, nil
-}
-
-func (in *PostgresCIDWriter) upsertAccessListElement(tx *sqlx.Tx, accessListElement models.AccessListElementModel, txID int64) error {
- _, err := tx.Exec(`INSERT INTO eth.access_list_element (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4)
- ON CONFLICT (tx_id, index) DO UPDATE SET (address, storage_keys) = ($3, $4)`,
- txID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys)
- if err != nil {
- return fmt.Errorf("error upserting access_list_element entry: %v", err)
- }
- indexerMetrics.accessListEntries.Inc(1)
- return nil
-}
-
-func (in *PostgresCIDWriter) upsertReceiptCID(tx *sqlx.Tx, rct *models.ReceiptModel, txID int64) (int64, error) {
- var receiptID int64
- err := tx.QueryRowx(`INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
- ON CONFLICT (tx_id) DO UPDATE SET (leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) = ($2, $3, $4, $5, $6, $7, $8)
- RETURNING id`,
- txID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot).Scan(&receiptID)
- if err != nil {
- return 0, fmt.Errorf("error upserting receipt_cids entry: %w", err)
- }
- indexerMetrics.receipts.Inc(1)
- return receiptID, nil
-}
-
-func (in *PostgresCIDWriter) upsertLogCID(tx *sqlx.Tx, logs []*models.LogsModel, receiptID int64) error {
- for _, log := range logs {
- _, err := tx.Exec(`INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, receipt_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
- ON CONFLICT (receipt_id, index) DO UPDATE SET (leaf_cid, leaf_mh_key ,address, topic0, topic1, topic2, topic3,log_data ) = ($1, $2, $4, $6, $7, $8, $9, $10)`,
- log.LeafCID, log.LeafMhKey, receiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2, log.Topic3, log.Data)
- if err != nil {
- return fmt.Errorf("error upserting logs entry: %w", err)
- }
- indexerMetrics.logs.Inc(1)
- }
- return nil
-}
-
-func (in *PostgresCIDWriter) upsertStateCID(tx *sqlx.Tx, stateNode models.StateNodeModel, headerID int64) (int64, error) {
- var stateID int64
- var stateKey string
- if stateNode.StateKey != nullHash.String() {
- stateKey = stateNode.StateKey
- }
- err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
- ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)
- RETURNING id`,
- headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID)
- if err != nil {
- return 0, fmt.Errorf("error upserting state_cids entry: %v", err)
- }
- return stateID, nil
-}
-
-func (in *PostgresCIDWriter) upsertStateAccount(tx *sqlx.Tx, stateAccount models.StateAccountModel, stateID int64) error {
- _, err := tx.Exec(`INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5)
- ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = ($2, $3, $4, $5)`,
- stateID, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot)
- if err != nil {
- return fmt.Errorf("error upserting state_accounts entry: %v", err)
- }
- return nil
-}
-
-func (in *PostgresCIDWriter) upsertStorageCID(tx *sqlx.Tx, storageCID models.StorageNodeModel, stateID int64) error {
- var storageKey string
- if storageCID.StorageKey != nullHash.String() {
- storageKey = storageCID.StorageKey
- }
- _, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
- ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)`,
- stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey)
- if err != nil {
- return fmt.Errorf("error upserting storage_cids entry: %v", err)
- }
- return nil
-}
diff --git a/statediff/mainnet_tests/builder_test.go b/statediff/mainnet_tests/builder_test.go
index 859f00489..d838302e0 100644
--- a/statediff/mainnet_tests/builder_test.go
+++ b/statediff/mainnet_tests/builder_test.go
@@ -37,7 +37,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff"
- "github.com/ethereum/go-ethereum/statediff/testhelpers"
+ "github.com/ethereum/go-ethereum/statediff/test_helpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
)
@@ -53,8 +53,8 @@ var (
block1CoinbaseAccount, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
Balance: big.NewInt(5000000000000000000),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
block1CoinbaseLeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("38251692195afc818c92b485fcb8a4691af89cbe5a2ab557b83a4261be2a9a"),
@@ -125,8 +125,8 @@ var (
block2CoinbaseAccount, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
Balance: big.NewInt(5000000000000000000),
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
block2CoinbaseLeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("20679cbcf198c1741a6f4e4473845659a30caa8b26f8d37a0be2e2bc0d8892"),
@@ -137,8 +137,8 @@ var (
block2MovedPremineAccount, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
Balance: block2MovedPremineBalance,
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
block2MovedPremineLeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("20f2e24db7943eab4415f99e109698863b0fecca1cf9ffc500f38cefbbe29e"),
@@ -231,8 +231,8 @@ var (
block3CoinbaseAccount, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
Balance: blcok3CoinbaseBalance,
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
block3CoinbaseLeafNode, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("3a174f00e64521a535f35e67c1aa241951c791639b2f3d060f49c5d9fa8b9e"),
@@ -244,8 +244,8 @@ var (
block3MovedPremineAccount1, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
Balance: block3MovedPremineBalance1,
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
block3MovedPremineLeafNode1, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("3ced93917e658d10e2d9009470dad72b63c898d173721194a12f2ae5e190"), // ce573ced93917e658d10e2d9009470dad72b63c898d173721194a12f2ae5e190
@@ -257,8 +257,8 @@ var (
block3MovedPremineAccount2, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: 0,
Balance: block3MovedPremineBalance2,
- CodeHash: testhelpers.NullCodeHash.Bytes(),
- Root: testhelpers.EmptyContractRoot,
+ CodeHash: test_helpers.NullCodeHash.Bytes(),
+ Root: test_helpers.EmptyContractRoot,
})
block3MovedPremineLeafNode2, _ = rlp.EncodeToBytes([]interface{}{
common.Hex2Bytes("33bc1e69eedf90f402e11f6862da14ed8e50156635a04d6393bbae154012"), // ce5783bc1e69eedf90f402e11f6862da14ed8e50156635a04d6393bbae154012
@@ -480,7 +480,7 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
var tests = []struct {
name string
startingArguments statediff.Args
- expected *statediff.StateObject
+ expected *sdtypes.StateObject
}{
// note that block0 (genesis) has over 1000 nodes due to the pre-allocation for the crowd-sale
// it is not feasible to write a unit test of that size at this time
@@ -493,7 +493,7 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
},
- &statediff.StateObject{
+ &sdtypes.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
Nodes: []sdtypes.StateNode{
@@ -536,7 +536,7 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
},
- &statediff.StateObject{
+ &sdtypes.StateObject{
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
Nodes: []sdtypes.StateNode{
@@ -594,7 +594,7 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
BlockNumber: block3.Number(),
BlockHash: block3.Hash(),
},
- &statediff.StateObject{
+ &sdtypes.StateObject{
BlockNumber: block3.Number(),
BlockHash: block3.Hash(),
Nodes: []sdtypes.StateNode{
diff --git a/statediff/metrics.go b/statediff/metrics.go
index 7e7d6e328..afc80e40e 100644
--- a/statediff/metrics.go
+++ b/statediff/metrics.go
@@ -1,3 +1,19 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
package statediff
import (
diff --git a/statediff/payload.go b/statediff/payload.go
new file mode 100644
index 000000000..233141278
--- /dev/null
+++ b/statediff/payload.go
@@ -0,0 +1,57 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package statediff
+
+import (
+ "encoding/json"
+ "math/big"
+)
+
+// Payload packages the data to send to statediff subscriptions
+type Payload struct {
+ BlockRlp []byte `json:"blockRlp"`
+ TotalDifficulty *big.Int `json:"totalDifficulty"`
+ ReceiptsRlp []byte `json:"receiptsRlp"`
+ StateObjectRlp []byte `json:"stateObjectRlp" gencodec:"required"`
+
+ encoded []byte
+ err error
+}
+
+func (sd *Payload) ensureEncoded() {
+ if sd.encoded == nil && sd.err == nil {
+ sd.encoded, sd.err = json.Marshal(sd)
+ }
+}
+
+// Length to implement Encoder interface for Payload
+func (sd *Payload) Length() int {
+ sd.ensureEncoded()
+ return len(sd.encoded)
+}
+
+// Encode to implement Encoder interface for Payload
+func (sd *Payload) Encode() ([]byte, error) {
+ sd.ensureEncoded()
+ return sd.encoded, sd.err
+}
+
+// Subscription struct holds our subscription channels
+type Subscription struct {
+ PayloadChan chan<- Payload
+ QuitChan chan<- bool
+}
diff --git a/statediff/service.go b/statediff/service.go
index 1154e4750..5334b4b31 100644
--- a/statediff/service.go
+++ b/statediff/service.go
@@ -41,9 +41,9 @@ import (
"github.com/ethereum/go-ethereum/trie"
ind "github.com/ethereum/go-ethereum/statediff/indexer"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
- "github.com/ethereum/go-ethereum/statediff/indexer/postgres"
- . "github.com/ethereum/go-ethereum/statediff/types"
+ types2 "github.com/ethereum/go-ethereum/statediff/types"
)
const chainEventChanSize = 20000
@@ -72,41 +72,32 @@ type blockChain interface {
// IService is the state-diffing service interface
type IService interface {
- // Start() and Stop()
+ // Lifecycle Start() and Stop() methods
node.Lifecycle
- // Method to getting API(s) for this service
+ // APIs method for getting API(s) for this service
APIs() []rpc.API
- // Main event loop for processing state diffs
+ // Loop is the main event loop for processing state diffs
Loop(chainEventCh chan core.ChainEvent)
- // Method to subscribe to receive state diff processing output
+ // Subscribe method to subscribe to receive state diff processing output`
Subscribe(id rpc.ID, sub chan<- Payload, quitChan chan<- bool, params Params)
- // Method to unsubscribe from state diff processing
+ // Unsubscribe method to unsubscribe from state diff processing
Unsubscribe(id rpc.ID) error
- // Method to get state diff object at specific block
+ // StateDiffAt method to get state diff object at specific block
StateDiffAt(blockNumber uint64, params Params) (*Payload, error)
- // Method to get state diff object at specific block
+ // StateDiffFor method to get state diff object at specific block
StateDiffFor(blockHash common.Hash, params Params) (*Payload, error)
- // Method to get state trie object at specific block
+ // StateTrieAt method to get state trie object at specific block
StateTrieAt(blockNumber uint64, params Params) (*Payload, error)
- // Method to stream out all code and codehash pairs
- StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- CodeAndCodeHash, quitChan chan<- bool)
- // Method to write state diff object directly to DB
+ // StreamCodeAndCodeHash method to stream out all code and codehash pairs
+ StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- types2.CodeAndCodeHash, quitChan chan<- bool)
+ // WriteStateDiffAt method to write state diff object directly to DB
WriteStateDiffAt(blockNumber uint64, params Params) error
- // Method to write state diff object directly to DB
+ // WriteStateDiffFor method to write state diff object directly to DB
WriteStateDiffFor(blockHash common.Hash, params Params) error
- // Event loop for progressively processing and writing diffs directly to DB
+ // WriteLoop event loop for progressively processing and writing diffs directly to DB
WriteLoop(chainEventCh chan core.ChainEvent)
}
-// Wraps consructor parameters
-type ServiceParams struct {
- DBParams *DBParams
- // Whether to enable writing state diffs directly to track blochain head
- EnableWriteLoop bool
- // Size of the worker pool
- NumWorkers uint
-}
-
// Service is the underlying struct for the state diffing service
type Service struct {
// Used to sync access to the Subscriptions
@@ -122,26 +113,26 @@ type Service struct {
// A mapping of subscription params rlp hash to the corresponding subscription params
SubscriptionTypes map[common.Hash]Params
// Cache the last block so that we can avoid having to lookup the next block's parent
- BlockCache blockCache
+ BlockCache BlockCache
// Whether or not we have any subscribers; only if we do, do we processes state diffs
subscribers int32
// Interface for publishing statediffs as PG-IPLD objects
- indexer ind.Indexer
+ indexer interfaces.StateDiffIndexer
// Whether to enable writing state diffs directly to track blochain head
enableWriteLoop bool
// Size of the worker pool
numWorkers uint
}
-// Wrap the cached last block for safe access from different service loops
-type blockCache struct {
+// BlockCache caches the last block for safe access from different service loops
+type BlockCache struct {
sync.Mutex
blocks map[common.Hash]*types.Block
maxSize uint
}
-func NewBlockCache(max uint) blockCache {
- return blockCache{
+func NewBlockCache(max uint) BlockCache {
+ return BlockCache{
blocks: make(map[common.Hash]*types.Block),
maxSize: max,
}
@@ -149,29 +140,23 @@ func NewBlockCache(max uint) blockCache {
// New creates a new statediff.Service
// func New(stack *node.Node, ethServ *eth.Ethereum, dbParams *DBParams, enableWriteLoop bool) error {
-func New(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params ServiceParams) error {
+func New(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params Config) error {
blockChain := ethServ.BlockChain()
- var indexer ind.Indexer
+ var indexer interfaces.StateDiffIndexer
quitCh := make(chan bool)
- if params.DBParams != nil {
+ if params.IndexerConfig != nil {
info := nodeinfo.Info{
GenesisBlock: blockChain.Genesis().Hash().Hex(),
NetworkID: strconv.FormatUint(cfg.NetworkId, 10),
ChainID: blockChain.Config().ChainID.Uint64(),
- ID: params.DBParams.ID,
- ClientName: params.DBParams.ClientName,
+ ID: params.ID,
+ ClientName: params.ClientName,
}
-
- // TODO: pass max idle, open, lifetime?
- db, err := postgres.NewDB(params.DBParams.ConnectionURL, postgres.ConnectionConfig{}, info)
+ var err error
+ indexer, err = ind.NewStateDiffIndexer(params.Context, blockChain.Config(), info, params.IndexerConfig)
if err != nil {
return err
}
- indexer, err = ind.NewStateDiffIndexer(blockChain.Config(), db)
- if err != nil {
- return err
- }
-
indexer.ReportDBMetrics(10*time.Second, quitCh)
}
workers := params.NumWorkers
@@ -214,7 +199,7 @@ func (sds *Service) APIs() []rpc.API {
// Return the parent block of currentBlock, using the cached block if available;
// and cache the passed block
-func (lbc *blockCache) getParentBlock(currentBlock *types.Block, bc blockChain) *types.Block {
+func (lbc *BlockCache) getParentBlock(currentBlock *types.Block, bc blockChain) *types.Block {
lbc.Lock()
parentHash := currentBlock.ParentHash()
var parentBlock *types.Block
@@ -233,7 +218,6 @@ func (lbc *blockCache) getParentBlock(currentBlock *types.Block, bc blockChain)
type workerParams struct {
chainEventCh <-chan core.ChainEvent
- errCh <-chan error
wg *sync.WaitGroup
id uint
}
@@ -254,14 +238,26 @@ func (sds *Service) WriteLoop(chainEventCh chan core.ChainEvent) {
statediffMetrics.lastEventHeight.Update(int64(chainEvent.Block.Number().Uint64()))
statediffMetrics.writeLoopChannelLen.Update(int64(len(chainEventCh)))
chainEventFwd <- chainEvent
+ case err := <-errCh:
+ log.Error("Error from chain event subscription", "error", err)
+ close(sds.QuitChan)
+ log.Info("Quitting the statediffing writing loop")
+ if err := sds.indexer.Close(); err != nil {
+ log.Error("Error closing indexer", "err", err)
+ }
+ return
case <-sds.QuitChan:
+ log.Info("Quitting the statediffing writing loop")
+ if err := sds.indexer.Close(); err != nil {
+ log.Error("Error closing indexer", "err", err)
+ }
return
}
}
}()
wg.Add(int(sds.numWorkers))
for worker := uint(0); worker < sds.numWorkers; worker++ {
- params := workerParams{chainEventCh: chainEventFwd, errCh: errCh, wg: &wg, id: worker}
+ params := workerParams{chainEventCh: chainEventFwd, wg: &wg, id: worker}
go sds.writeLoopWorker(params)
}
wg.Wait()
@@ -306,13 +302,8 @@ func (sds *Service) writeLoopWorker(params workerParams) {
}
// TODO: how to handle with concurrent workers
statediffMetrics.lastStatediffHeight.Update(int64(currentBlock.Number().Uint64()))
- case err := <-params.errCh:
- log.Warn("Error from chain event subscription", "error", err, "worker", params.id)
- sds.close()
- return
case <-sds.QuitChan:
log.Info("Quitting the statediff writing process", "worker", params.id)
- sds.close()
return
}
}
@@ -320,6 +311,7 @@ func (sds *Service) writeLoopWorker(params workerParams) {
// Loop is the main processing method
func (sds *Service) Loop(chainEventCh chan core.ChainEvent) {
+ log.Info("Starting statediff listening loop")
chainEventSub := sds.BlockChain.SubscribeChainEvent(chainEventCh)
defer chainEventSub.Unsubscribe()
errCh := chainEventSub.Err()
@@ -350,11 +342,13 @@ func (sds *Service) Loop(chainEventCh chan core.ChainEvent) {
sds.streamStateDiff(currentBlock, parentBlock.Root())
case err := <-errCh:
- log.Warn("Error from chain event subscription", "error", err)
+ log.Error("Error from chain event subscription", "error", err)
+ close(sds.QuitChan)
+ log.Info("Quitting the statediffing listening loop")
sds.close()
return
case <-sds.QuitChan:
- log.Info("Quitting the statediffing process")
+ log.Info("Quitting the statediffing listening loop")
sds.close()
return
}
@@ -590,7 +584,7 @@ func sendNonBlockingQuit(id rpc.ID, sub Subscription) {
}
// StreamCodeAndCodeHash subscription method for extracting all the codehash=>code mappings that exist in the trie at the provided height
-func (sds *Service) StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- CodeAndCodeHash, quitChan chan<- bool) {
+func (sds *Service) StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- types2.CodeAndCodeHash, quitChan chan<- bool) {
current := sds.BlockChain.GetBlockByNumber(blockNumber)
log.Info("sending code and codehash", "block height", blockNumber)
currentTrie, err := sds.BlockChain.StateCache().OpenTrie(current.Root())
@@ -620,7 +614,7 @@ func (sds *Service) StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- Cod
log.Error("error collecting contract code", "err", err)
return
}
- outChan <- CodeAndCodeHash{
+ outChan <- types2.CodeAndCodeHash{
Hash: codeHash,
Code: code,
}
@@ -660,7 +654,7 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p
var totalDifficulty *big.Int
var receipts types.Receipts
var err error
- var tx *ind.BlockTx
+ var tx interfaces.Batch
if params.IncludeTD {
totalDifficulty = sds.BlockChain.GetTd(block.Hash(), block.NumberU64())
}
@@ -672,14 +666,18 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p
return err
}
// defer handling of commit/rollback for any return case
- defer tx.Close(err)
- output := func(node StateNode) error {
- return sds.indexer.PushStateNode(tx, node)
+ defer func() {
+ if err := tx.Submit(err); err != nil {
+ log.Error("batch transaction submission failed", "err", err)
+ }
+ }()
+ output := func(node types2.StateNode) error {
+ return sds.indexer.PushStateNode(tx, node, block.Hash().String())
}
- codeOutput := func(c CodeAndCodeHash) error {
+ codeOutput := func(c types2.CodeAndCodeHash) error {
return sds.indexer.PushCodeAndCodeHash(tx, c)
}
- err = sds.Builder.WriteStateDiffObject(StateRoots{
+ err = sds.Builder.WriteStateDiffObject(types2.StateRoots{
NewStateRoot: block.Root(),
OldStateRoot: parentRoot,
}, params, output, codeOutput)
diff --git a/statediff/service_test.go b/statediff/service_test.go
index ca9a483a5..a17f89217 100644
--- a/statediff/service_test.go
+++ b/statediff/service_test.go
@@ -24,6 +24,8 @@ import (
"sync"
"testing"
+ types2 "github.com/ethereum/go-ethereum/statediff/types"
+
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/common"
@@ -32,7 +34,7 @@ import (
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
statediff "github.com/ethereum/go-ethereum/statediff"
- "github.com/ethereum/go-ethereum/statediff/testhelpers/mocks"
+ "github.com/ethereum/go-ethereum/statediff/test_helpers/mocks"
)
func TestServiceLoop(t *testing.T) {
@@ -218,7 +220,7 @@ func TestGetStateDiffAt(t *testing.T) {
}
func testErrorInStateDiffAt(t *testing.T) {
- mockStateDiff := statediff.StateObject{
+ mockStateDiff := types2.StateObject{
BlockNumber: testBlock1.Number(),
BlockHash: testBlock1.Hash(),
}
diff --git a/statediff/test_helpers/constant.go b/statediff/test_helpers/constant.go
new file mode 100644
index 000000000..ba591ebb4
--- /dev/null
+++ b/statediff/test_helpers/constant.go
@@ -0,0 +1,33 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package test_helpers
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/params"
+)
+
+var (
+ BalanceChange1000 = int64(1000)
+ BalanceChange10000 = int64(10000)
+ BalanceChange1Ether = int64(params.Ether)
+ Block1Account1Balance = big.NewInt(BalanceChange10000)
+ Block2Account2Balance = big.NewInt(21000000000000)
+ GasFees = int64(params.GWei) * int64(params.TxGas)
+ ContractGasLimit = uint64(1000000)
+)
diff --git a/statediff/testhelpers/helpers.go b/statediff/test_helpers/helpers.go
similarity index 99%
rename from statediff/testhelpers/helpers.go
rename to statediff/test_helpers/helpers.go
index 168d770af..8373f7537 100644
--- a/statediff/testhelpers/helpers.go
+++ b/statediff/test_helpers/helpers.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package testhelpers
+package test_helpers
import (
"math/big"
diff --git a/statediff/testhelpers/mocks/blockchain.go b/statediff/test_helpers/mocks/blockchain.go
similarity index 100%
rename from statediff/testhelpers/mocks/blockchain.go
rename to statediff/test_helpers/mocks/blockchain.go
diff --git a/statediff/testhelpers/mocks/builder.go b/statediff/test_helpers/mocks/builder.go
similarity index 80%
rename from statediff/testhelpers/mocks/builder.go
rename to statediff/test_helpers/mocks/builder.go
index ff9faf3ec..e2452301a 100644
--- a/statediff/testhelpers/mocks/builder.go
+++ b/statediff/test_helpers/mocks/builder.go
@@ -26,15 +26,15 @@ import (
type Builder struct {
Args statediff.Args
Params statediff.Params
- StateRoots statediff.StateRoots
- stateDiff statediff.StateObject
+ StateRoots sdtypes.StateRoots
+ stateDiff sdtypes.StateObject
block *types.Block
- stateTrie statediff.StateObject
+ stateTrie sdtypes.StateObject
builderError error
}
// BuildStateDiffObject mock method
-func (builder *Builder) BuildStateDiffObject(args statediff.Args, params statediff.Params) (statediff.StateObject, error) {
+func (builder *Builder) BuildStateDiffObject(args statediff.Args, params statediff.Params) (sdtypes.StateObject, error) {
builder.Args = args
builder.Params = params
@@ -42,7 +42,7 @@ func (builder *Builder) BuildStateDiffObject(args statediff.Args, params statedi
}
// BuildStateDiffObject mock method
-func (builder *Builder) WriteStateDiffObject(args statediff.StateRoots, params statediff.Params, output sdtypes.StateNodeSink, codeOutput sdtypes.CodeSink) error {
+func (builder *Builder) WriteStateDiffObject(args sdtypes.StateRoots, params statediff.Params, output sdtypes.StateNodeSink, codeOutput sdtypes.CodeSink) error {
builder.StateRoots = args
builder.Params = params
@@ -50,14 +50,14 @@ func (builder *Builder) WriteStateDiffObject(args statediff.StateRoots, params s
}
// BuildStateTrieObject mock method
-func (builder *Builder) BuildStateTrieObject(block *types.Block) (statediff.StateObject, error) {
+func (builder *Builder) BuildStateTrieObject(block *types.Block) (sdtypes.StateObject, error) {
builder.block = block
return builder.stateTrie, builder.builderError
}
// SetStateDiffToBuild mock method
-func (builder *Builder) SetStateDiffToBuild(stateDiff statediff.StateObject) {
+func (builder *Builder) SetStateDiffToBuild(stateDiff sdtypes.StateObject) {
builder.stateDiff = stateDiff
}
diff --git a/statediff/testhelpers/mocks/service.go b/statediff/test_helpers/mocks/service.go
similarity index 100%
rename from statediff/testhelpers/mocks/service.go
rename to statediff/test_helpers/mocks/service.go
diff --git a/statediff/testhelpers/mocks/service_test.go b/statediff/test_helpers/mocks/service_test.go
similarity index 79%
rename from statediff/testhelpers/mocks/service_test.go
rename to statediff/test_helpers/mocks/service_test.go
index 8c1fd49cf..dde784316 100644
--- a/statediff/testhelpers/mocks/service_test.go
+++ b/statediff/test_helpers/mocks/service_test.go
@@ -24,20 +24,21 @@ import (
"sort"
"sync"
"testing"
+ "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
- "github.com/ethereum/go-ethereum/statediff/testhelpers"
+ "github.com/ethereum/go-ethereum/statediff/test_helpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
)
var (
emptyStorage = make([]sdtypes.StorageNode, 0)
block0, block1 *types.Block
- minerLeafKey = testhelpers.AddressToLeafKey(common.HexToAddress("0x0"))
+ minerLeafKey = test_helpers.AddressToLeafKey(common.HexToAddress("0x0"))
account1, _ = rlp.EncodeToBytes(types.StateAccount{
Nonce: uint64(0),
Balance: big.NewInt(10000),
@@ -90,9 +91,9 @@ func TestAPI(t *testing.T) {
}
func testSubscriptionAPI(t *testing.T) {
- blocks, chain := testhelpers.MakeChain(1, testhelpers.Genesis, testhelpers.TestChainGen)
+ blocks, chain := test_helpers.MakeChain(1, test_helpers.Genesis, test_helpers.TestChainGen)
defer chain.Stop()
- block0 = testhelpers.Genesis
+ block0 = test_helpers.Genesis
block1 = blocks[0]
expectedBlockRlp, _ := rlp.EncodeToBytes(block1)
mockReceipt := &types.Receipt{
@@ -100,7 +101,7 @@ func testSubscriptionAPI(t *testing.T) {
BlockHash: block1.Hash(),
}
expectedReceiptBytes, _ := rlp.EncodeToBytes(types.Receipts{mockReceipt})
- expectedStateDiff := statediff.StateObject{
+ expectedStateDiff := sdtypes.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
Nodes: []sdtypes.StateNode{
@@ -114,14 +115,14 @@ func testSubscriptionAPI(t *testing.T) {
{
Path: []byte{'\x0e'},
NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x00'},
NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountLeafNode,
StorageNodes: emptyStorage,
},
@@ -150,35 +151,41 @@ func testSubscriptionAPI(t *testing.T) {
id := rpc.NewID()
payloadChan := make(chan statediff.Payload)
quitChan := make(chan bool)
+ wg := new(sync.WaitGroup)
+ go func() {
+ wg.Add(1)
+ defer wg.Done()
+ sort.Slice(expectedStateDiffBytes, func(i, j int) bool { return expectedStateDiffBytes[i] < expectedStateDiffBytes[j] })
+ select {
+ case payload := <-payloadChan:
+ if !bytes.Equal(payload.BlockRlp, expectedBlockRlp) {
+ t.Errorf("payload does not have expected block\r\nactual block rlp: %v\r\nexpected block rlp: %v", payload.BlockRlp, expectedBlockRlp)
+ }
+ sort.Slice(payload.StateObjectRlp, func(i, j int) bool { return payload.StateObjectRlp[i] < payload.StateObjectRlp[j] })
+ if !bytes.Equal(payload.StateObjectRlp, expectedStateDiffBytes) {
+ t.Errorf("payload does not have expected state diff\r\nactual state diff rlp: %v\r\nexpected state diff rlp: %v", payload.StateObjectRlp, expectedStateDiffBytes)
+ }
+ if !bytes.Equal(expectedReceiptBytes, payload.ReceiptsRlp) {
+ t.Errorf("payload does not have expected receipts\r\nactual receipt rlp: %v\r\nexpected receipt rlp: %v", payload.ReceiptsRlp, expectedReceiptBytes)
+ }
+ if !bytes.Equal(payload.TotalDifficulty.Bytes(), mockTotalDifficulty.Bytes()) {
+ t.Errorf("payload does not have expected total difficulty\r\nactual td: %d\r\nexpected td: %d", payload.TotalDifficulty.Int64(), mockTotalDifficulty.Int64())
+ }
+ case <-quitChan:
+ t.Errorf("channel quit before delivering payload")
+ }
+ }()
+ time.Sleep(1)
mockService.Subscribe(id, payloadChan, quitChan, params)
blockChan <- block1
parentBlockChain <- block0
-
- sort.Slice(expectedStateDiffBytes, func(i, j int) bool { return expectedStateDiffBytes[i] < expectedStateDiffBytes[j] })
- select {
- case payload := <-payloadChan:
- if !bytes.Equal(payload.BlockRlp, expectedBlockRlp) {
- t.Errorf("payload does not have expected block\r\nactual block rlp: %v\r\nexpected block rlp: %v", payload.BlockRlp, expectedBlockRlp)
- }
- sort.Slice(payload.StateObjectRlp, func(i, j int) bool { return payload.StateObjectRlp[i] < payload.StateObjectRlp[j] })
- if !bytes.Equal(payload.StateObjectRlp, expectedStateDiffBytes) {
- t.Errorf("payload does not have expected state diff\r\nactual state diff rlp: %v\r\nexpected state diff rlp: %v", payload.StateObjectRlp, expectedStateDiffBytes)
- }
- if !bytes.Equal(expectedReceiptBytes, payload.ReceiptsRlp) {
- t.Errorf("payload does not have expected receipts\r\nactual receipt rlp: %v\r\nexpected receipt rlp: %v", payload.ReceiptsRlp, expectedReceiptBytes)
- }
- if !bytes.Equal(payload.TotalDifficulty.Bytes(), mockTotalDifficulty.Bytes()) {
- t.Errorf("payload does not have expected total difficulty\r\nactual td: %d\r\nexpected td: %d", payload.TotalDifficulty.Int64(), mockTotalDifficulty.Int64())
- }
- case <-quitChan:
- t.Errorf("channel quit before delivering payload")
- }
+ wg.Wait()
}
func testHTTPAPI(t *testing.T) {
- blocks, chain := testhelpers.MakeChain(1, testhelpers.Genesis, testhelpers.TestChainGen)
+ blocks, chain := test_helpers.MakeChain(1, test_helpers.Genesis, test_helpers.TestChainGen)
defer chain.Stop()
- block0 = testhelpers.Genesis
+ block0 = test_helpers.Genesis
block1 = blocks[0]
expectedBlockRlp, _ := rlp.EncodeToBytes(block1)
mockReceipt := &types.Receipt{
@@ -186,7 +193,7 @@ func testHTTPAPI(t *testing.T) {
BlockHash: block1.Hash(),
}
expectedReceiptBytes, _ := rlp.EncodeToBytes(types.Receipts{mockReceipt})
- expectedStateDiff := statediff.StateObject{
+ expectedStateDiff := sdtypes.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
Nodes: []sdtypes.StateNode{
@@ -200,14 +207,14 @@ func testHTTPAPI(t *testing.T) {
{
Path: []byte{'\x0e'},
NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.Account1LeafKey,
+ LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1LeafNode,
StorageNodes: emptyStorage,
},
{
Path: []byte{'\x00'},
NodeType: sdtypes.Leaf,
- LeafKey: testhelpers.BankLeafKey,
+ LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountLeafNode,
StorageNodes: emptyStorage,
},
diff --git a/statediff/testhelpers/test_data.go b/statediff/test_helpers/test_data.go
similarity index 99%
rename from statediff/testhelpers/test_data.go
rename to statediff/test_helpers/test_data.go
index 73def50a4..e5b021364 100644
--- a/statediff/testhelpers/test_data.go
+++ b/statediff/test_helpers/test_data.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package testhelpers
+package test_helpers
import (
"math/big"
diff --git a/statediff/testhelpers/constant.go b/statediff/testhelpers/constant.go
deleted file mode 100644
index 9788549e6..000000000
--- a/statediff/testhelpers/constant.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package testhelpers
-
-import (
- "math/big"
-
- "github.com/ethereum/go-ethereum/params"
-)
-
-var (
- BalanceChange1000 = int64(1000)
- BalanceChange10000 = int64(10000)
- BalanceChange1Ether = int64(params.Ether)
- Block1Account1Balance = big.NewInt(BalanceChange10000)
- Block2Account2Balance = big.NewInt(21000000000000)
- GasFees = int64(params.GWei) * int64(params.TxGas)
- ContractGasLimit = uint64(1000000)
-)
diff --git a/statediff/trie/node.go b/statediff/trie/node.go
deleted file mode 100644
index 6ffc2538c..000000000
--- a/statediff/trie/node.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package trie
-
-import (
- "fmt"
-
- "github.com/ethereum/go-ethereum/rlp"
- sdtypes "github.com/ethereum/go-ethereum/statediff/types"
- "github.com/ethereum/go-ethereum/trie"
-)
-
-// CheckKeyType checks what type of key we have
-func CheckKeyType(elements []interface{}) (sdtypes.NodeType, error) {
- if len(elements) > 2 {
- return sdtypes.Branch, nil
- }
- if len(elements) < 2 {
- return sdtypes.Unknown, fmt.Errorf("node cannot be less than two elements in length")
- }
- switch elements[0].([]byte)[0] / 16 {
- case '\x00':
- return sdtypes.Extension, nil
- case '\x01':
- return sdtypes.Extension, nil
- case '\x02':
- return sdtypes.Leaf, nil
- case '\x03':
- return sdtypes.Leaf, nil
- default:
- return sdtypes.Unknown, fmt.Errorf("unknown hex prefix")
- }
-}
-
-// ResolveNode return the state diff node pointed by the iterator.
-func ResolveNode(it trie.NodeIterator, trieDB *trie.Database) (sdtypes.StateNode, []interface{}, error) {
- nodePath := make([]byte, len(it.Path()))
- copy(nodePath, it.Path())
- node, err := trieDB.Node(it.Hash())
- if err != nil {
- return sdtypes.StateNode{}, nil, err
- }
- var nodeElements []interface{}
- if err = rlp.DecodeBytes(node, &nodeElements); err != nil {
- return sdtypes.StateNode{}, nil, err
- }
- ty, err := CheckKeyType(nodeElements)
- if err != nil {
- return sdtypes.StateNode{}, nil, err
- }
- return sdtypes.StateNode{
- NodeType: ty,
- Path: nodePath,
- NodeValue: node,
- }, nodeElements, nil
-}
diff --git a/statediff/helpers.go b/statediff/trie_helpers/helpers.go
similarity index 53%
rename from statediff/helpers.go
rename to statediff/trie_helpers/helpers.go
index eb5060c51..ce3365f2c 100644
--- a/statediff/helpers.go
+++ b/statediff/trie_helpers/helpers.go
@@ -17,14 +17,65 @@
// Contains a batch of utility type declarations used by the tests. As the node
// operates on unique types, a lot of them are needed to check various features.
-package statediff
+package trie_helpers
import (
+ "fmt"
"sort"
"strings"
+
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/statediff/types"
+ "github.com/ethereum/go-ethereum/trie"
)
-func sortKeys(data AccountMap) []string {
+// CheckKeyType checks what type of key we have
+func CheckKeyType(elements []interface{}) (types.NodeType, error) {
+ if len(elements) > 2 {
+ return types.Branch, nil
+ }
+ if len(elements) < 2 {
+ return types.Unknown, fmt.Errorf("node cannot be less than two elements in length")
+ }
+ switch elements[0].([]byte)[0] / 16 {
+ case '\x00':
+ return types.Extension, nil
+ case '\x01':
+ return types.Extension, nil
+ case '\x02':
+ return types.Leaf, nil
+ case '\x03':
+ return types.Leaf, nil
+ default:
+ return types.Unknown, fmt.Errorf("unknown hex prefix")
+ }
+}
+
+// ResolveNode return the state diff node pointed by the iterator.
+func ResolveNode(it trie.NodeIterator, trieDB *trie.Database) (types.StateNode, []interface{}, error) {
+ nodePath := make([]byte, len(it.Path()))
+ copy(nodePath, it.Path())
+ node, err := trieDB.Node(it.Hash())
+ if err != nil {
+ return types.StateNode{}, nil, err
+ }
+ var nodeElements []interface{}
+ if err = rlp.DecodeBytes(node, &nodeElements); err != nil {
+ return types.StateNode{}, nil, err
+ }
+ ty, err := CheckKeyType(nodeElements)
+ if err != nil {
+ return types.StateNode{}, nil, err
+ }
+ return types.StateNode{
+ NodeType: ty,
+ Path: nodePath,
+ NodeValue: node,
+ }, nodeElements, nil
+}
+
+// SortKeys sorts the keys in the account map
+func SortKeys(data types.AccountMap) []string {
keys := make([]string, 0, len(data))
for key := range data {
keys = append(keys, key)
@@ -34,10 +85,10 @@ func sortKeys(data AccountMap) []string {
return keys
}
-// findIntersection finds the set of strings from both arrays that are equivalent
+// FindIntersection finds the set of strings from both arrays that are equivalent
// a and b must first be sorted
// this is used to find which keys have been both "deleted" and "created" i.e. they were updated
-func findIntersection(a, b []string) []string {
+func FindIntersection(a, b []string) []string {
lenA := len(a)
lenB := len(b)
iOfA, iOfB := 0, 0
diff --git a/statediff/types.go b/statediff/types.go
deleted file mode 100644
index ef8256041..000000000
--- a/statediff/types.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Contains a batch of utility type declarations used by the tests. As the node
-// operates on unique types, a lot of them are needed to check various features.
-
-package statediff
-
-import (
- "encoding/json"
- "math/big"
-
- "github.com/ethereum/go-ethereum/common"
- ctypes "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/statediff/types"
-)
-
-// Subscription struct holds our subscription channels
-type Subscription struct {
- PayloadChan chan<- Payload
- QuitChan chan<- bool
-}
-
-// DBParams holds params for Postgres db connection
-type DBParams struct {
- ConnectionURL string
- ID string
- ClientName string
-}
-
-// Params is used to carry in parameters from subscribing/requesting clients configuration
-type Params struct {
- IntermediateStateNodes bool
- IntermediateStorageNodes bool
- IncludeBlock bool
- IncludeReceipts bool
- IncludeTD bool
- IncludeCode bool
- WatchedAddresses []common.Address
- WatchedStorageSlots []common.Hash
-}
-
-// Args bundles the arguments for the state diff builder
-type Args struct {
- OldStateRoot, NewStateRoot, BlockHash common.Hash
- BlockNumber *big.Int
-}
-
-type StateRoots struct {
- OldStateRoot, NewStateRoot common.Hash
-}
-
-// Payload packages the data to send to statediff subscriptions
-type Payload struct {
- BlockRlp []byte `json:"blockRlp"`
- TotalDifficulty *big.Int `json:"totalDifficulty"`
- ReceiptsRlp []byte `json:"receiptsRlp"`
- StateObjectRlp []byte `json:"stateObjectRlp" gencodec:"required"`
-
- encoded []byte
- err error
-}
-
-func (sd *Payload) ensureEncoded() {
- if sd.encoded == nil && sd.err == nil {
- sd.encoded, sd.err = json.Marshal(sd)
- }
-}
-
-// Length to implement Encoder interface for Payload
-func (sd *Payload) Length() int {
- sd.ensureEncoded()
- return len(sd.encoded)
-}
-
-// Encode to implement Encoder interface for Payload
-func (sd *Payload) Encode() ([]byte, error) {
- sd.ensureEncoded()
- return sd.encoded, sd.err
-}
-
-// StateObject is the final output structure from the builder
-type StateObject struct {
- BlockNumber *big.Int `json:"blockNumber" gencodec:"required"`
- BlockHash common.Hash `json:"blockHash" gencodec:"required"`
- Nodes []types.StateNode `json:"nodes" gencodec:"required"`
- CodeAndCodeHashes []types.CodeAndCodeHash `json:"codeMapping"`
-}
-
-// AccountMap is a mapping of hex encoded path => account wrapper
-type AccountMap map[string]accountWrapper
-
-// accountWrapper is used to temporary associate the unpacked node with its raw values
-type accountWrapper struct {
- Account *ctypes.StateAccount
- NodeType types.NodeType
- Path []byte
- NodeValue []byte
- LeafKey []byte
-}
diff --git a/statediff/types/types.go b/statediff/types/types.go
index 56babfb5b..36008a784 100644
--- a/statediff/types/types.go
+++ b/statediff/types/types.go
@@ -14,12 +14,39 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// Contains a batch of utility type declarations used by the tests. As the node
-// operates on unique types, a lot of them are needed to check various features.
-
package types
-import "github.com/ethereum/go-ethereum/common"
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// StateRoots holds the state roots required for generating a state diff
+type StateRoots struct {
+ OldStateRoot, NewStateRoot common.Hash
+}
+
+// StateObject is the final output structure from the builder
+type StateObject struct {
+ BlockNumber *big.Int `json:"blockNumber" gencodec:"required"`
+ BlockHash common.Hash `json:"blockHash" gencodec:"required"`
+ Nodes []StateNode `json:"nodes" gencodec:"required"`
+ CodeAndCodeHashes []CodeAndCodeHash `json:"codeMapping"`
+}
+
+// AccountMap is a mapping of hex encoded path => account wrapper
+type AccountMap map[string]AccountWrapper
+
+// AccountWrapper is used to temporary associate the unpacked node with its raw values
+type AccountWrapper struct {
+ Account *types.StateAccount
+ NodeType NodeType
+ Path []byte
+ NodeValue []byte
+ LeafKey []byte
+}
// NodeType for explicitly setting type of node
type NodeType string