diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 1a8ee4025..cfc43d2ab 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -23,8 +23,11 @@ import ( "math/big" "os" "reflect" + "time" "unicode" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/statediff" "gopkg.in/urfave/cli.v1" @@ -182,27 +185,48 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { } if ctx.GlobalBool(utils.StateDiffFlag.Name) { - var dbParams *statediff.DBParams - if ctx.GlobalIsSet(utils.StateDiffDBFlag.Name) { - dbParams = new(statediff.DBParams) - dbParams.ConnectionURL = ctx.GlobalString(utils.StateDiffDBFlag.Name) + var dbConfig *sql.Config + if ctx.GlobalIsSet(utils.StateDiffWritingFlag.Name) { + dbConfig = new(sql.Config) + dbConfig.Hostname = ctx.GlobalString(utils.StateDiffDBHostFlag.Name) + dbConfig.Port = ctx.GlobalInt(utils.StateDiffDBPortFlag.Name) + dbConfig.DatabaseName = ctx.GlobalString(utils.StateDiffDBNameFlag.Name) + dbConfig.Username = ctx.GlobalString(utils.StateDiffDBUserFlag.Name) + dbConfig.Password = ctx.GlobalString(utils.StateDiffDBPasswordFlag.Name) + if ctx.GlobalIsSet(utils.StateDiffDBNodeIDFlag.Name) { - dbParams.ID = ctx.GlobalString(utils.StateDiffDBNodeIDFlag.Name) + dbConfig.ID = ctx.GlobalString(utils.StateDiffDBNodeIDFlag.Name) } else { utils.Fatalf("Must specify node ID for statediff DB output") } + if ctx.GlobalIsSet(utils.StateDiffDBClientNameFlag.Name) { - dbParams.ClientName = ctx.GlobalString(utils.StateDiffDBClientNameFlag.Name) + dbConfig.ClientName = ctx.GlobalString(utils.StateDiffDBClientNameFlag.Name) } else { utils.Fatalf("Must specify client name for statediff DB output") } - } else { - if ctx.GlobalBool(utils.StateDiffWritingFlag.Name) { - utils.Fatalf("Must pass DB parameters if enabling statediff write loop") + + if ctx.GlobalIsSet(utils.StateDiffDBMinConns.Name) { + dbConfig.MinConns = ctx.GlobalInt(utils.StateDiffDBMinConns.Name) + } + if ctx.GlobalIsSet(utils.StateDiffDBMaxConns.Name) { + dbConfig.MaxConns = ctx.GlobalInt(utils.StateDiffDBMaxConns.Name) + } + if ctx.GlobalIsSet(utils.StateDiffDBMaxIdleConns.Name) { + dbConfig.MaxIdle = ctx.GlobalInt(utils.StateDiffDBMaxIdleConns.Name) + } + if ctx.GlobalIsSet(utils.StateDiffDBMaxConnLifetime.Name) { + dbConfig.MaxConnLifetime = ctx.GlobalDuration(utils.StateDiffDBMaxConnLifetime.Name) * time.Second + } + if ctx.GlobalIsSet(utils.StateDiffDBMaxConnIdleTime.Name) { + dbConfig.MaxConnIdleTime = ctx.GlobalDuration(utils.StateDiffDBMaxConnIdleTime.Name) * time.Second + } + if ctx.GlobalIsSet(utils.StateDiffDBConnTimeout.Name) { + dbConfig.ConnTimeout = ctx.GlobalDuration(utils.StateDiffDBConnTimeout.Name) * time.Second } } p := statediff.ServiceParams{ - DBParams: dbParams, + DBParams: dbConfig, EnableWriteLoop: ctx.GlobalBool(utils.StateDiffWritingFlag.Name), NumWorkers: ctx.GlobalUint(utils.StateDiffWorkersFlag.Name), } diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 8432b8005..9c8dbdcfd 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -149,7 +149,17 @@ var ( utils.GpoIgnoreGasPriceFlag, utils.MinerNotifyFullFlag, utils.StateDiffFlag, - utils.StateDiffDBFlag, + utils.StateDiffDBNameFlag, + utils.StateDiffDBPasswordFlag, + utils.StateDiffDBUserFlag, + utils.StateDiffDBHostFlag, + utils.StateDiffDBPortFlag, + utils.StateDiffDBMaxConnLifetime, + utils.StateDiffDBMaxConnIdleTime, + utils.StateDiffDBMaxConns, + utils.StateDiffDBMinConns, + utils.StateDiffDBMaxIdleConns, + utils.StateDiffDBConnTimeout, utils.StateDiffDBNodeIDFlag, utils.StateDiffDBClientNameFlag, utils.StateDiffWritingFlag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index e61d2927c..1ad1b8557 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -225,7 +225,17 @@ var AppHelpFlagGroups = []flags.FlagGroup{ Name: "STATE DIFF", Flags: []cli.Flag{ utils.StateDiffFlag, - utils.StateDiffDBFlag, + utils.StateDiffDBNameFlag, + utils.StateDiffDBPasswordFlag, + utils.StateDiffDBUserFlag, + utils.StateDiffDBHostFlag, + utils.StateDiffDBPortFlag, + utils.StateDiffDBMaxConnLifetime, + utils.StateDiffDBMaxConnIdleTime, + utils.StateDiffDBMaxConns, + utils.StateDiffDBMinConns, + utils.StateDiffDBMaxIdleConns, + utils.StateDiffDBConnTimeout, utils.StateDiffDBNodeIDFlag, utils.StateDiffDBClientNameFlag, utils.StateDiffWritingFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index d7a0b7a6a..8dfb92a7e 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -786,16 +786,59 @@ var ( Name: "statediff", Usage: "Enables the processing of state diffs between each block", } - StateDiffDBFlag = cli.StringFlag{ - Name: "statediff.db", - Usage: "PostgreSQL database connection string for writing state diffs", + StateDiffDBHostFlag = cli.StringFlag{ + Name: "statediff.db.host", + Usage: "Statediff database hostname/ip", + Value: "localhost", + } + StateDiffDBPortFlag = cli.IntFlag{ + Name: "statediff.db.port", + Usage: "Statediff database port", + Value: 5432, + } + StateDiffDBNameFlag = cli.StringFlag{ + Name: "statediff.db.name", + Usage: "Statediff database name", + } + StateDiffDBPasswordFlag = cli.StringFlag{ + Name: "statediff.db.password", + Usage: "Statediff database password", + } + StateDiffDBUserFlag = cli.StringFlag{ + Name: "statediff.db.user", + Usage: "Statediff database username", + Value: "postgres", + } + StateDiffDBMaxConnLifetime = cli.DurationFlag{ + Name: "statediff.db.maxconnlifetime", + Usage: "Statediff database maximum connection lifetime (in seconds)", + } + StateDiffDBMaxConnIdleTime = cli.DurationFlag{ + Name: "statediff.db.maxconnidletime", + Usage: "Statediff database maximum connection idle time (in seconds)", + } + StateDiffDBMaxConns = cli.IntFlag{ + Name: "statediff.db.maxconns", + Usage: "Statediff database maximum connections", + } + StateDiffDBMinConns = cli.IntFlag{ + Name: "statediff.db.minconns", + Usage: "Statediff database minimum connections", + } + StateDiffDBMaxIdleConns = cli.IntFlag{ + Name: "statediff.db.maxidleconns", + Usage: "Statediff database maximum idle connections", + } + StateDiffDBConnTimeout = cli.DurationFlag{ + Name: "statediff.db.conntimeout", + Usage: "Statediff database connection timeout (in seconds)", } StateDiffDBNodeIDFlag = cli.StringFlag{ - Name: "statediff.dbnodeid", + Name: "statediff.db.nodeid", Usage: "Node ID to use when writing state diffs to database", } StateDiffDBClientNameFlag = cli.StringFlag{ - Name: "statediff.dbclientname", + Name: "statediff.db.clientname", Usage: "Client name to use when writing state diffs to database", } StateDiffWritingFlag = cli.BoolFlag{ diff --git a/go.mod b/go.mod index 0f94c2611..da726b7c4 100644 --- a/go.mod +++ b/go.mod @@ -25,6 +25,7 @@ require ( github.com/fatih/color v1.7.0 github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff + github.com/georgysavva/scany v0.2.9 github.com/go-ole/go-ole v1.2.1 // indirect github.com/go-stack/stack v1.8.0 github.com/golang/protobuf v1.4.3 @@ -46,6 +47,10 @@ require ( github.com/ipfs/go-ipfs-blockstore v1.0.1 github.com/ipfs/go-ipfs-ds-help v1.0.0 github.com/ipfs/go-ipld-format v0.2.0 + github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect + github.com/jackc/pgconn v1.10.0 + github.com/jackc/pgx v3.6.2+incompatible + github.com/jackc/pgx/v4 v4.13.0 github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e github.com/jmoiron/sqlx v1.2.0 @@ -55,7 +60,6 @@ require ( github.com/lib/pq v1.10.2 github.com/mattn/go-colorable v0.1.8 github.com/mattn/go-isatty v0.0.12 - github.com/mattn/go-sqlite3 v1.14.7 // indirect github.com/multiformats/go-multihash v0.0.14 github.com/naoina/go-stringutil v0.1.0 // indirect github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 @@ -70,7 +74,7 @@ require ( github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef - golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a + golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 golang.org/x/text v0.3.6 diff --git a/go.sum b/go.sum index 0097b96f7..cf5e867db 100644 --- a/go.sum +++ b/go.sum @@ -42,6 +42,8 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= @@ -100,10 +102,17 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA= github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go/v2 v2.0.3 h1:ZA346ACHIZctef6trOTwBAEvPVm1k0uLm/bb2Atc+S8= +github.com/cockroachdb/cockroach-go/v2 v2.0.3/go.mod h1:hAuDgiVgDVkfirP9JnhXEfcXEPRKBpYdGz+l7mvYSzw= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f h1:C43yEtQ6NIf4ftFXD/V55gnGFgPbMQobd//YlnLjUJ8= github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= @@ -116,6 +125,7 @@ github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vs github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= @@ -132,6 +142,7 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= @@ -142,6 +153,8 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/georgysavva/scany v0.2.9 h1:Xt6rjYpHnMClTm/g+oZTnoSxUwiln5GqMNU+QeLNHQU= +github.com/georgysavva/scany v0.2.9/go.mod h1:yeOeC1BdIdl6hOwy8uefL2WNSlseFzbhlG/frrh65SA= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -152,9 +165,11 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -166,11 +181,15 @@ github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZp github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -281,6 +300,80 @@ github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= +github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.6.4/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78= +github.com/jackc/pgconn v1.7.0/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7JMeA= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.10.0 h1:4EYhlDVEMsJ30nNj0mmgwIUXoq7e9sMJrVC2ED6QlCU= +github.com/jackc/pgconn v1.10.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.5/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1 h1:7PQ/4gLoqnl87ZxL7xjO0DR5gYuviDCZxQJsUlFW1eI= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= +github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik= +github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= +github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= +github.com/jackc/pgtype v1.4.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.8.1 h1:9k0IXtdJXHJbyAWQgbWr1lU+MEhPXZz6RIXxfR5oxXs= +github.com/jackc/pgtype v1.8.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o= +github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= +github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg= +github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= +github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= +github.com/jackc/pgx/v4 v4.8.1/go.mod h1:4HOLxrl8wToZJReD04/yB20GDwf4KBYETvlHciCnwW0= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.13.0 h1:JCjhT5vmhMAf/YwBHLvrBn4OGdIQBiFG6ym8Zmdx570= +github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3 h1:JnPg/5Q9xVJGfjsO5CPUOjnJps1JaRUm8I9FXVCFK94= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= @@ -288,6 +381,9 @@ github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsj github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= @@ -313,12 +409,13 @@ github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM52 github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -329,6 +426,11 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.4.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= @@ -338,6 +440,7 @@ github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIG github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -346,6 +449,7 @@ github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpu github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= @@ -355,8 +459,8 @@ github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/Qd github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA= -github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw= +github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= @@ -441,14 +545,25 @@ github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRr github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v0.0.0-20200419222939-1884f454f8ea/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -461,6 +576,8 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57N github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -487,29 +604,45 @@ github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPyS github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -555,6 +688,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -589,13 +723,16 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -618,6 +755,7 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -648,25 +786,31 @@ golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -725,6 +869,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0= diff --git a/statediff/README.md b/statediff/README.md index 74c82f2d2..dd2eaed7f 100644 --- a/statediff/README.md +++ b/statediff/README.md @@ -73,23 +73,31 @@ type Payload struct { ## Usage This state diffing service runs as an auxiliary service concurrent to the regular syncing process of the geth node. - ### CLI configuration This service introduces a CLI flag namespace `statediff` `--statediff` flag is used to turn on the service `--statediff.writing` is used to tell the service to write state diff objects it produces from synced ChainEvents directly to a configured Postgres database `--statediff.workers` is used to set the number of concurrent workers to process state diff objects and write them into the database -`--statediff.db` is the connection string for the Postgres database to write to -`--statediff.db.init` indicates whether we need to initialize a new database; set true if its the first time running the process on a given database -`--statediff.dbnodeid` is the node id to use in the Postgres database -`--statediff.dbclientname` is the client name to use in the Postgres database +`--statediff.db.host` is the hostname/ip to dial to connect to the database +`--statediff.db.port` is the port to dial to connect to the database +`--statediff.db.name` is the name of the database to connect to +`--statediff.db.user` is the user to connect to the database as +`--statediff.db.password` is the password to use to connect to the database +`--statediff.db.conntimeout` is the connection timeout (in seconds) +`--statediff.db.maxconns` is the maximum number of database connections +`--statediff.db.minconns` is the minimum number of database connections +`--statediff.db.maxidleconns` is the maximum number of idle connections +`--statediff.db.maxconnidletime` is the maximum lifetime for an idle connection (in seconds) +`--statediff.db.maxconnlifetime` is the maximum lifetime for a connection (in seconds) +`--statediff.db.nodeid` is the node id to use in the Postgres database +`--statediff.db.clientname` is the client name to use in the Postgres database The service can only operate in full sync mode (`--syncmode=full`), but only the historical RPC endpoints require an archive node (`--gcmode=archive`) e.g. ` -./build/bin/geth --syncmode=full --gcmode=archive --statediff --statediff.writing --statediff.db=postgres://localhost:5432/vulcanize_testing?sslmode=disable --statediff.db.init=true --statediff.dbnodeid={nodeId} --statediff.dbclientname={dbClientName} +./build/bin/geth --syncmode=full --gcmode=archive --statediff --statediff.writing --statediff.db=postgres://localhost:5432/vulcanize_testing?sslmode=disable --statediff.dbnodeid={nodeId} --statediff.dbclientname={dbClientName} ` ### RPC endpoints diff --git a/statediff/api.go b/statediff/api.go index 923a0073f..5c534cddb 100644 --- a/statediff/api.go +++ b/statediff/api.go @@ -19,11 +19,11 @@ package statediff import ( "context" - "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/statediff/types" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - . "github.com/ethereum/go-ethereum/statediff/types" ) // APIName is the namespace used for the state diffing service API @@ -117,7 +117,7 @@ func (api *PublicStateDiffAPI) StreamCodeAndCodeHash(ctx context.Context, blockN // create subscription and start waiting for events rpcSub := notifier.CreateSubscription() - payloadChan := make(chan CodeAndCodeHash, chainEventChanSize) + payloadChan := make(chan types.CodeAndCodeHash, chainEventChanSize) quitChan := make(chan bool) api.sds.StreamCodeAndCodeHash(blockNumber, payloadChan, quitChan) go func() { diff --git a/statediff/builder.go b/statediff/builder.go index 7befb6b3c..eacfeca15 100644 --- a/statediff/builder.go +++ b/statediff/builder.go @@ -23,14 +23,16 @@ import ( "bytes" "fmt" + "github.com/ethereum/go-ethereum/statediff/trie_helpers" + + types2 "github.com/ethereum/go-ethereum/statediff/types" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" - sdtrie "github.com/ethereum/go-ethereum/statediff/trie" - . "github.com/ethereum/go-ethereum/statediff/types" "github.com/ethereum/go-ethereum/trie" ) @@ -43,9 +45,9 @@ var ( // Builder interface exposes the method for building a state diff between two blocks type Builder interface { - BuildStateDiffObject(args Args, params Params) (StateObject, error) - BuildStateTrieObject(current *types.Block) (StateObject, error) - WriteStateDiffObject(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error + BuildStateDiffObject(args Args, params Params) (types2.StateObject, error) + BuildStateTrieObject(current *types.Block) (types2.StateObject, error) + WriteStateDiffObject(args types2.StateRoots, params Params, output types2.StateNodeSink, codeOutput types2.CodeSink) error } type builder struct { @@ -53,20 +55,20 @@ type builder struct { } // convenience -func stateNodeAppender(nodes *[]StateNode) StateNodeSink { - return func(node StateNode) error { +func stateNodeAppender(nodes *[]types2.StateNode) types2.StateNodeSink { + return func(node types2.StateNode) error { *nodes = append(*nodes, node) return nil } } -func storageNodeAppender(nodes *[]StorageNode) StorageNodeSink { - return func(node StorageNode) error { +func storageNodeAppender(nodes *[]types2.StorageNode) types2.StorageNodeSink { + return func(node types2.StorageNode) error { *nodes = append(*nodes, node) return nil } } -func codeMappingAppender(codeAndCodeHashes *[]CodeAndCodeHash) CodeSink { - return func(c CodeAndCodeHash) error { +func codeMappingAppender(codeAndCodeHashes *[]types2.CodeAndCodeHash) types2.CodeSink { + return func(c types2.CodeAndCodeHash) error { *codeAndCodeHashes = append(*codeAndCodeHashes, c) return nil } @@ -80,17 +82,17 @@ func NewBuilder(stateCache state.Database) Builder { } // BuildStateTrieObject builds a state trie object from the provided block -func (sdb *builder) BuildStateTrieObject(current *types.Block) (StateObject, error) { +func (sdb *builder) BuildStateTrieObject(current *types.Block) (types2.StateObject, error) { currentTrie, err := sdb.stateCache.OpenTrie(current.Root()) if err != nil { - return StateObject{}, fmt.Errorf("error creating trie for block %d: %v", current.Number(), err) + return types2.StateObject{}, fmt.Errorf("error creating trie for block %d: %v", current.Number(), err) } it := currentTrie.NodeIterator([]byte{}) stateNodes, codeAndCodeHashes, err := sdb.buildStateTrie(it) if err != nil { - return StateObject{}, fmt.Errorf("error collecting state nodes for block %d: %v", current.Number(), err) + return types2.StateObject{}, fmt.Errorf("error collecting state nodes for block %d: %v", current.Number(), err) } - return StateObject{ + return types2.StateObject{ BlockNumber: current.Number(), BlockHash: current.Hash(), Nodes: stateNodes, @@ -98,20 +100,20 @@ func (sdb *builder) BuildStateTrieObject(current *types.Block) (StateObject, err }, nil } -func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAndCodeHash, error) { - stateNodes := make([]StateNode, 0) - codeAndCodeHashes := make([]CodeAndCodeHash, 0) +func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]types2.StateNode, []types2.CodeAndCodeHash, error) { + stateNodes := make([]types2.StateNode, 0) + codeAndCodeHashes := make([]types2.CodeAndCodeHash, 0) for it.Next(true) { // skip value nodes if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) { continue } - node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB()) + node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB()) if err != nil { return nil, nil, err } switch node.NodeType { - case Leaf: + case types2.Leaf: var account types.StateAccount if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil { return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err) @@ -122,7 +124,7 @@ func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAnd leafKey := encodedPath[1:] node.LeafKey = leafKey if !bytes.Equal(account.CodeHash, nullCodeHash) { - var storageNodes []StorageNode + var storageNodes []types2.StorageNode err := sdb.buildStorageNodesEventual(account.Root, nil, true, storageNodeAppender(&storageNodes)) if err != nil { return nil, nil, fmt.Errorf("failed building eventual storage diffs for account %+v\r\nerror: %v", account, err) @@ -134,13 +136,13 @@ func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAnd if err != nil { return nil, nil, fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err) } - codeAndCodeHashes = append(codeAndCodeHashes, CodeAndCodeHash{ + codeAndCodeHashes = append(codeAndCodeHashes, types2.CodeAndCodeHash{ Hash: codeHash, Code: code, }) } stateNodes = append(stateNodes, node) - case Extension, Branch: + case types2.Extension, types2.Branch: stateNodes = append(stateNodes, node) default: return nil, nil, fmt.Errorf("unexpected node type %s", node.NodeType) @@ -150,16 +152,16 @@ func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAnd } // BuildStateDiffObject builds a statediff object from two blocks and the provided parameters -func (sdb *builder) BuildStateDiffObject(args Args, params Params) (StateObject, error) { - var stateNodes []StateNode - var codeAndCodeHashes []CodeAndCodeHash +func (sdb *builder) BuildStateDiffObject(args Args, params Params) (types2.StateObject, error) { + var stateNodes []types2.StateNode + var codeAndCodeHashes []types2.CodeAndCodeHash err := sdb.WriteStateDiffObject( - StateRoots{OldStateRoot: args.OldStateRoot, NewStateRoot: args.NewStateRoot}, + types2.StateRoots{OldStateRoot: args.OldStateRoot, NewStateRoot: args.NewStateRoot}, params, stateNodeAppender(&stateNodes), codeMappingAppender(&codeAndCodeHashes)) if err != nil { - return StateObject{}, err + return types2.StateObject{}, err } - return StateObject{ + return types2.StateObject{ BlockHash: args.BlockHash, BlockNumber: args.BlockNumber, Nodes: stateNodes, @@ -168,7 +170,7 @@ func (sdb *builder) BuildStateDiffObject(args Args, params Params) (StateObject, } // Writes a statediff object to output callback -func (sdb *builder) WriteStateDiffObject(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error { +func (sdb *builder) WriteStateDiffObject(args types2.StateRoots, params Params, output types2.StateNodeSink, codeOutput types2.CodeSink) error { if !params.IntermediateStateNodes || len(params.WatchedAddresses) > 0 { // if we are watching only specific accounts then we are only diffing leaf nodes return sdb.buildStateDiffWithoutIntermediateStateNodes(args, params, output, codeOutput) @@ -177,7 +179,7 @@ func (sdb *builder) WriteStateDiffObject(args StateRoots, params Params, output } } -func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error { +func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args types2.StateRoots, params Params, output types2.StateNodeSink, codeOutput types2.CodeSink) error { // Load tries for old and new states oldTrie, err := sdb.stateCache.OpenTrie(args.OldStateRoot) if err != nil { @@ -208,14 +210,14 @@ func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args StateRoots, pa } // collect and sort the leafkey keys for both account mappings into a slice - createKeys := sortKeys(diffAccountsAtB) - deleteKeys := sortKeys(diffAccountsAtA) + createKeys := trie_helpers.SortKeys(diffAccountsAtB) + deleteKeys := trie_helpers.SortKeys(diffAccountsAtA) // and then find the intersection of these keys // these are the leafkeys for the accounts which exist at both A and B but are different // this also mutates the passed in createKeys and deleteKeys, removing the intersection keys // and leaving the truly created or deleted keys in place - updatedKeys := findIntersection(createKeys, deleteKeys) + updatedKeys := trie_helpers.FindIntersection(createKeys, deleteKeys) // build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two err = sdb.buildAccountUpdates( @@ -232,7 +234,7 @@ func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args StateRoots, pa return nil } -func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error { +func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args types2.StateRoots, params Params, output types2.StateNodeSink, codeOutput types2.CodeSink) error { // Load tries for old (A) and new (B) states oldTrie, err := sdb.stateCache.OpenTrie(args.OldStateRoot) if err != nil { @@ -262,14 +264,14 @@ func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args StateRoots, } // collect and sort the leafkeys for both account mappings into a slice - createKeys := sortKeys(diffAccountsAtB) - deleteKeys := sortKeys(diffAccountsAtA) + createKeys := trie_helpers.SortKeys(diffAccountsAtB) + deleteKeys := trie_helpers.SortKeys(diffAccountsAtA) // and then find the intersection of these keys // these are the leafkeys for the accounts which exist at both A and B but are different // this also mutates the passed in createKeys and deleteKeys, removing in intersection keys // and leaving the truly created or deleted keys in place - updatedKeys := findIntersection(createKeys, deleteKeys) + updatedKeys := trie_helpers.FindIntersection(createKeys, deleteKeys) // build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two err = sdb.buildAccountUpdates( @@ -289,20 +291,20 @@ func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args StateRoots, // createdAndUpdatedState returns // a mapping of their leafkeys to all the accounts that exist in a different state at B than A // and a slice of the paths for all of the nodes included in both -func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddresses []common.Address) (AccountMap, map[string]bool, error) { +func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddresses []common.Address) (types2.AccountMap, map[string]bool, error) { diffPathsAtB := make(map[string]bool) - diffAcountsAtB := make(AccountMap) + diffAcountsAtB := make(types2.AccountMap) it, _ := trie.NewDifferenceIterator(a, b) for it.Next(true) { // skip value nodes if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) { continue } - node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB()) + node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB()) if err != nil { return nil, nil, err } - if node.NodeType == Leaf { + if node.NodeType == types2.Leaf { // created vs updated is important for leaf nodes since we need to diff their storage // so we need to map all changed accounts at B to their leafkey, since account can change pathes but not leafkey var account types.StateAccount @@ -314,7 +316,7 @@ func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddres encodedPath := trie.HexToCompact(valueNodePath) leafKey := encodedPath[1:] if isWatchedAddress(watchedAddresses, leafKey) { - diffAcountsAtB[common.Bytes2Hex(leafKey)] = accountWrapper{ + diffAcountsAtB[common.Bytes2Hex(leafKey)] = types2.AccountWrapper{ NodeType: node.NodeType, Path: node.Path, NodeValue: node.NodeValue, @@ -333,21 +335,21 @@ func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddres // a slice of all the intermediate nodes that exist in a different state at B than A // a mapping of their leafkeys to all the accounts that exist in a different state at B than A // and a slice of the paths for all of the nodes included in both -func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIterator, output StateNodeSink) (AccountMap, map[string]bool, error) { +func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIterator, output types2.StateNodeSink) (types2.AccountMap, map[string]bool, error) { diffPathsAtB := make(map[string]bool) - diffAcountsAtB := make(AccountMap) + diffAcountsAtB := make(types2.AccountMap) it, _ := trie.NewDifferenceIterator(a, b) for it.Next(true) { // skip value nodes if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) { continue } - node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB()) + node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB()) if err != nil { return nil, nil, err } switch node.NodeType { - case Leaf: + case types2.Leaf: // created vs updated is important for leaf nodes since we need to diff their storage // so we need to map all changed accounts at B to their leafkey, since account can change paths but not leafkey var account types.StateAccount @@ -358,17 +360,17 @@ func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIt valueNodePath := append(node.Path, partialPath...) encodedPath := trie.HexToCompact(valueNodePath) leafKey := encodedPath[1:] - diffAcountsAtB[common.Bytes2Hex(leafKey)] = accountWrapper{ + diffAcountsAtB[common.Bytes2Hex(leafKey)] = types2.AccountWrapper{ NodeType: node.NodeType, Path: node.Path, NodeValue: node.NodeValue, LeafKey: leafKey, Account: &account, } - case Extension, Branch: + case types2.Extension, types2.Branch: // create a diff for any intermediate node that has changed at b // created vs updated makes no difference for intermediate nodes since we do not need to diff storage - if err := output(StateNode{ + if err := output(types2.StateNode{ NodeType: node.NodeType, Path: node.Path, NodeValue: node.NodeValue, @@ -386,20 +388,20 @@ func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIt // deletedOrUpdatedState returns a slice of all the pathes that are emptied at B // and a mapping of their leafkeys to all the accounts that exist in a different state at A than B -func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB map[string]bool, output StateNodeSink) (AccountMap, error) { - diffAccountAtA := make(AccountMap) +func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB map[string]bool, output types2.StateNodeSink) (types2.AccountMap, error) { + diffAccountAtA := make(types2.AccountMap) it, _ := trie.NewDifferenceIterator(b, a) for it.Next(true) { // skip value nodes if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) { continue } - node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB()) + node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB()) if err != nil { return nil, err } switch node.NodeType { - case Leaf: + case types2.Leaf: // map all different accounts at A to their leafkey var account types.StateAccount if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil { @@ -409,7 +411,7 @@ func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB m valueNodePath := append(node.Path, partialPath...) encodedPath := trie.HexToCompact(valueNodePath) leafKey := encodedPath[1:] - diffAccountAtA[common.Bytes2Hex(leafKey)] = accountWrapper{ + diffAccountAtA[common.Bytes2Hex(leafKey)] = types2.AccountWrapper{ NodeType: node.NodeType, Path: node.Path, NodeValue: node.NodeValue, @@ -420,24 +422,24 @@ func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB m // that means the node at this path was deleted (or moved) in B // emit an empty "removed" diff to signify as such if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok { - if err := output(StateNode{ + if err := output(types2.StateNode{ Path: node.Path, NodeValue: []byte{}, - NodeType: Removed, + NodeType: types2.Removed, LeafKey: leafKey, }); err != nil { return nil, err } } - case Extension, Branch: + case types2.Extension, types2.Branch: // if this node's path did not show up in diffPathsAtB // that means the node at this path was deleted (or moved) in B // emit an empty "removed" diff to signify as such if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok { - if err := output(StateNode{ + if err := output(types2.StateNode{ Path: node.Path, NodeValue: []byte{}, - NodeType: Removed, + NodeType: types2.Removed, }); err != nil { return nil, err } @@ -454,13 +456,13 @@ func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB m // to generate the statediff node objects for all of the accounts that existed at both A and B but in different states // needs to be called before building account creations and deletions as this mutates // those account maps to remove the accounts which were updated -func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updatedKeys []string, - watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output StateNodeSink) error { +func (sdb *builder) buildAccountUpdates(creations, deletions types2.AccountMap, updatedKeys []string, + watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output types2.StateNodeSink) error { var err error for _, key := range updatedKeys { createdAcc := creations[key] deletedAcc := deletions[key] - var storageDiffs []StorageNode + var storageDiffs []types2.StorageNode if deletedAcc.Account != nil && createdAcc.Account != nil { oldSR := deletedAcc.Account.Root newSR := createdAcc.Account.Root @@ -471,7 +473,7 @@ func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updated return fmt.Errorf("failed building incremental storage diffs for account with leafkey %s\r\nerror: %v", key, err) } } - if err = output(StateNode{ + if err = output(types2.StateNode{ NodeType: createdAcc.NodeType, Path: createdAcc.Path, NodeValue: createdAcc.NodeValue, @@ -489,9 +491,9 @@ func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updated // buildAccountCreations returns the statediff node objects for all the accounts that exist at B but not at A // it also returns the code and codehash for created contract accounts -func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output StateNodeSink, codeOutput CodeSink) error { +func (sdb *builder) buildAccountCreations(accounts types2.AccountMap, watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output types2.StateNodeSink, codeOutput types2.CodeSink) error { for _, val := range accounts { - diff := StateNode{ + diff := types2.StateNode{ NodeType: val.NodeType, Path: val.Path, LeafKey: val.LeafKey, @@ -499,7 +501,7 @@ func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKey } if !bytes.Equal(val.Account.CodeHash, nullCodeHash) { // For contract creations, any storage node contained is a diff - var storageDiffs []StorageNode + var storageDiffs []types2.StorageNode err := sdb.buildStorageNodesEventual(val.Account.Root, watchedStorageKeys, intermediateStorageNodes, storageNodeAppender(&storageDiffs)) if err != nil { return fmt.Errorf("failed building eventual storage diffs for node %x\r\nerror: %v", val.Path, err) @@ -511,7 +513,7 @@ func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKey if err != nil { return fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err) } - if err := codeOutput(CodeAndCodeHash{ + if err := codeOutput(types2.CodeAndCodeHash{ Hash: codeHash, Code: code, }); err != nil { @@ -528,7 +530,7 @@ func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKey // buildStorageNodesEventual builds the storage diff node objects for a created account // i.e. it returns all the storage nodes at this state, since there is no previous state -func (sdb *builder) buildStorageNodesEventual(sr common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error { +func (sdb *builder) buildStorageNodesEventual(sr common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error { if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) { return nil } @@ -549,24 +551,24 @@ func (sdb *builder) buildStorageNodesEventual(sr common.Hash, watchedStorageKeys // buildStorageNodesFromTrie returns all the storage diff node objects in the provided node interator // if any storage keys are provided it will only return those leaf nodes // including intermediate nodes can be turned on or off -func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStorageKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error { +func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStorageKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error { for it.Next(true) { // skip value nodes if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) { continue } - node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB()) + node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB()) if err != nil { return err } switch node.NodeType { - case Leaf: + case types2.Leaf: partialPath := trie.CompactToHex(nodeElements[0].([]byte)) valueNodePath := append(node.Path, partialPath...) encodedPath := trie.HexToCompact(valueNodePath) leafKey := encodedPath[1:] if isWatchedStorageKey(watchedStorageKeys, leafKey) { - if err := output(StorageNode{ + if err := output(types2.StorageNode{ NodeType: node.NodeType, Path: node.Path, NodeValue: node.NodeValue, @@ -575,9 +577,9 @@ func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStora return err } } - case Extension, Branch: + case types2.Extension, types2.Branch: if intermediateNodes { - if err := output(StorageNode{ + if err := output(types2.StorageNode{ NodeType: node.NodeType, Path: node.Path, NodeValue: node.NodeValue, @@ -593,7 +595,7 @@ func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStora } // buildStorageNodesIncremental builds the storage diff node objects for all nodes that exist in a different state at B than A -func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error { +func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error { if bytes.Equal(newSR.Bytes(), oldSR.Bytes()) { return nil } @@ -621,7 +623,7 @@ func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common return nil } -func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) (map[string]bool, error) { +func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) (map[string]bool, error) { diffPathsAtB := make(map[string]bool) it, _ := trie.NewDifferenceIterator(a, b) for it.Next(true) { @@ -629,18 +631,18 @@ func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) { continue } - node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB()) + node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB()) if err != nil { return nil, err } switch node.NodeType { - case Leaf: + case types2.Leaf: partialPath := trie.CompactToHex(nodeElements[0].([]byte)) valueNodePath := append(node.Path, partialPath...) encodedPath := trie.HexToCompact(valueNodePath) leafKey := encodedPath[1:] if isWatchedStorageKey(watchedKeys, leafKey) { - if err := output(StorageNode{ + if err := output(types2.StorageNode{ NodeType: node.NodeType, Path: node.Path, NodeValue: node.NodeValue, @@ -649,9 +651,9 @@ func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys return nil, err } } - case Extension, Branch: + case types2.Extension, types2.Branch: if intermediateNodes { - if err := output(StorageNode{ + if err := output(types2.StorageNode{ NodeType: node.NodeType, Path: node.Path, NodeValue: node.NodeValue, @@ -667,14 +669,14 @@ func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys return diffPathsAtB, it.Error() } -func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB map[string]bool, watchedKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error { +func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB map[string]bool, watchedKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error { it, _ := trie.NewDifferenceIterator(b, a) for it.Next(true) { // skip value nodes if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) { continue } - node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB()) + node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB()) if err != nil { return err } @@ -685,14 +687,14 @@ func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB continue } switch node.NodeType { - case Leaf: + case types2.Leaf: partialPath := trie.CompactToHex(nodeElements[0].([]byte)) valueNodePath := append(node.Path, partialPath...) encodedPath := trie.HexToCompact(valueNodePath) leafKey := encodedPath[1:] if isWatchedStorageKey(watchedKeys, leafKey) { - if err := output(StorageNode{ - NodeType: Removed, + if err := output(types2.StorageNode{ + NodeType: types2.Removed, Path: node.Path, NodeValue: []byte{}, LeafKey: leafKey, @@ -700,10 +702,10 @@ func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB return err } } - case Extension, Branch: + case types2.Extension, types2.Branch: if intermediateNodes { - if err := output(StorageNode{ - NodeType: Removed, + if err := output(types2.StorageNode{ + NodeType: types2.Removed, Path: node.Path, NodeValue: []byte{}, }); err != nil { diff --git a/statediff/builder_test.go b/statediff/builder_test.go index 6a88bbba0..d4d67940e 100644 --- a/statediff/builder_test.go +++ b/statediff/builder_test.go @@ -24,23 +24,24 @@ import ( "sort" "testing" + types2 "github.com/ethereum/go-ethereum/statediff/types" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/statediff" - "github.com/ethereum/go-ethereum/statediff/testhelpers" - sdtypes "github.com/ethereum/go-ethereum/statediff/types" + "github.com/ethereum/go-ethereum/statediff/test_helpers" ) var ( contractLeafKey []byte - emptyDiffs = make([]sdtypes.StateNode, 0) - emptyStorage = make([]sdtypes.StorageNode, 0) + emptyDiffs = make([]types2.StateNode, 0) + emptyStorage = make([]types2.StorageNode, 0) block0, block1, block2, block3, block4, block5, block6 *types.Block builder statediff.Builder minerAddress = common.HexToAddress("0x0") - minerLeafKey = testhelpers.AddressToLeafKey(minerAddress) + minerLeafKey = test_helpers.AddressToLeafKey(minerAddress) slot0 = common.HexToHash("0") slot1 = common.HexToHash("1") @@ -122,8 +123,8 @@ var ( minerAccountAtBlock1, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, Balance: big.NewInt(2000002625000000000), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) minerAccountAtBlock1LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("3380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a"), @@ -132,8 +133,8 @@ var ( minerAccountAtBlock2, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, Balance: big.NewInt(4000111203461610525), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) minerAccountAtBlock2LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("3380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a"), @@ -142,9 +143,9 @@ var ( account1AtBlock1, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, - Balance: testhelpers.Block1Account1Balance, - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + Balance: test_helpers.Block1Account1Balance, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) account1AtBlock1LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("3926db69aaced518e9b9f0f434a473e7174109c943548bb8f23be41ca76d9ad2"), @@ -153,8 +154,8 @@ var ( account1AtBlock2, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 2, Balance: big.NewInt(999555797000009000), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) account1AtBlock2LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("3926db69aaced518e9b9f0f434a473e7174109c943548bb8f23be41ca76d9ad2"), @@ -163,8 +164,8 @@ var ( account1AtBlock5, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 2, Balance: big.NewInt(2999566008847709960), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) account1AtBlock5LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("3926db69aaced518e9b9f0f434a473e7174109c943548bb8f23be41ca76d9ad2"), @@ -173,8 +174,8 @@ var ( account1AtBlock6, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 3, Balance: big.NewInt(2999537516847709960), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) account1AtBlock6LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("3926db69aaced518e9b9f0f434a473e7174109c943548bb8f23be41ca76d9ad2"), @@ -184,8 +185,8 @@ var ( account2AtBlock2, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, Balance: big.NewInt(1000), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) account2AtBlock2LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45"), @@ -194,8 +195,8 @@ var ( account2AtBlock3, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, Balance: big.NewInt(2000013574009435976), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) account2AtBlock3LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45"), @@ -204,8 +205,8 @@ var ( account2AtBlock4, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, Balance: big.NewInt(4000048088163070348), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) account2AtBlock4LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45"), @@ -214,8 +215,8 @@ var ( account2AtBlock6, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, Balance: big.NewInt(6000063293259748636), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) account2AtBlock6LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45"), @@ -224,33 +225,33 @@ var ( bankAccountAtBlock0, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, - Balance: big.NewInt(testhelpers.TestBankFunds.Int64()), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + Balance: big.NewInt(test_helpers.TestBankFunds.Int64()), + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) bankAccountAtBlock0LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("2000bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"), bankAccountAtBlock0, }) - block1BankBalance = big.NewInt(testhelpers.TestBankFunds.Int64() - testhelpers.BalanceChange10000 - testhelpers.GasFees) + block1BankBalance = big.NewInt(test_helpers.TestBankFunds.Int64() - test_helpers.BalanceChange10000 - test_helpers.GasFees) bankAccountAtBlock1, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 1, Balance: block1BankBalance, - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) bankAccountAtBlock1LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"), bankAccountAtBlock1, }) - block2BankBalance = block1BankBalance.Int64() - testhelpers.BalanceChange1Ether - testhelpers.GasFees + block2BankBalance = block1BankBalance.Int64() - test_helpers.BalanceChange1Ether - test_helpers.GasFees bankAccountAtBlock2, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 2, Balance: big.NewInt(block2BankBalance), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) bankAccountAtBlock2LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"), @@ -259,8 +260,8 @@ var ( bankAccountAtBlock3, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 3, Balance: big.NewInt(999914255999990000), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) bankAccountAtBlock3LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"), @@ -269,8 +270,8 @@ var ( bankAccountAtBlock4, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 6, Balance: big.NewInt(999826859999990000), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) bankAccountAtBlock4LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"), @@ -279,8 +280,8 @@ var ( bankAccountAtBlock5, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 7, Balance: big.NewInt(999805027999990000), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) bankAccountAtBlock5LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"), @@ -469,10 +470,10 @@ func init() { } func TestBuilder(t *testing.T) { - blocks, chain := testhelpers.MakeChain(3, testhelpers.Genesis, testhelpers.TestChainGen) - contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr) + blocks, chain := test_helpers.MakeChain(3, test_helpers.Genesis, test_helpers.TestChainGen) + contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr) defer chain.Stop() - block0 = testhelpers.Genesis + block0 = test_helpers.Genesis block1 = blocks[0] block2 = blocks[1] block3 = blocks[2] @@ -482,7 +483,7 @@ func TestBuilder(t *testing.T) { var tests = []struct { name string startingArguments statediff.Args - expected *statediff.StateObject + expected *types2.StateObject }{ { "testEmptyDiff", @@ -492,7 +493,7 @@ func TestBuilder(t *testing.T) { BlockNumber: block0.Number(), BlockHash: block0.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block0.Number(), BlockHash: block0.Hash(), Nodes: emptyDiffs, @@ -502,19 +503,19 @@ func TestBuilder(t *testing.T) { "testBlock0", //10000 transferred from testBankAddress to account1Addr statediff.Args{ - OldStateRoot: testhelpers.NullHash, + OldStateRoot: test_helpers.NullHash, NewStateRoot: block0.Root(), BlockNumber: block0.Number(), BlockHash: block0.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block0.Number(), BlockHash: block0.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock0LeafNode, StorageNodes: emptyStorage, }, @@ -530,28 +531,28 @@ func TestBuilder(t *testing.T) { BlockNumber: block1.Number(), BlockHash: block1.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block1.Number(), BlockHash: block1.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock1LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x05'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: minerLeafKey, NodeValue: minerAccountAtBlock1LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock1LeafNode, StorageNodes: emptyStorage, }, @@ -569,46 +570,46 @@ func TestBuilder(t *testing.T) { BlockNumber: block2.Number(), BlockHash: block2.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block2.Number(), BlockHash: block2.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock2LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x05'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: minerLeafKey, NodeValue: minerAccountAtBlock2LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock2LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock2LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{'\x02'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot0StorageKey.Bytes(), NodeValue: slot0StorageLeafNode, }, { Path: []byte{'\x0b'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot1StorageKey.Bytes(), NodeValue: slot1StorageLeafNode, }, @@ -616,16 +617,16 @@ func TestBuilder(t *testing.T) { }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account2LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account2LeafKey, NodeValue: account2AtBlock2LeafNode, StorageNodes: emptyStorage, }, }, - CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{ + CodeAndCodeHashes: []types2.CodeAndCodeHash{ { - Hash: testhelpers.CodeHash, - Code: testhelpers.ByteCodeAfterDeployment, + Hash: test_helpers.CodeHash, + Code: test_helpers.ByteCodeAfterDeployment, }, }, }, @@ -640,26 +641,26 @@ func TestBuilder(t *testing.T) { BlockNumber: block3.Number(), BlockHash: block3.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block3.Number(), BlockHash: block3.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock3LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock3LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot3StorageKey.Bytes(), NodeValue: slot3StorageLeafNode, }, @@ -667,8 +668,8 @@ func TestBuilder(t *testing.T) { }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account2LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account2LeafKey, NodeValue: account2AtBlock3LeafNode, StorageNodes: emptyStorage, }, @@ -700,10 +701,10 @@ func TestBuilder(t *testing.T) { } func TestBuilderWithIntermediateNodes(t *testing.T) { - blocks, chain := testhelpers.MakeChain(3, testhelpers.Genesis, testhelpers.TestChainGen) - contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr) + blocks, chain := test_helpers.MakeChain(3, test_helpers.Genesis, test_helpers.TestChainGen) + contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr) defer chain.Stop() - block0 = testhelpers.Genesis + block0 = test_helpers.Genesis block1 = blocks[0] block2 = blocks[1] block3 = blocks[2] @@ -717,7 +718,7 @@ func TestBuilderWithIntermediateNodes(t *testing.T) { var tests = []struct { name string startingArguments statediff.Args - expected *statediff.StateObject + expected *types2.StateObject }{ { "testEmptyDiff", @@ -727,7 +728,7 @@ func TestBuilderWithIntermediateNodes(t *testing.T) { BlockNumber: block0.Number(), BlockHash: block0.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block0.Number(), BlockHash: block0.Hash(), Nodes: emptyDiffs, @@ -737,19 +738,19 @@ func TestBuilderWithIntermediateNodes(t *testing.T) { "testBlock0", //10000 transferred from testBankAddress to account1Addr statediff.Args{ - OldStateRoot: testhelpers.NullHash, + OldStateRoot: test_helpers.NullHash, NewStateRoot: block0.Root(), BlockNumber: block0.Number(), BlockHash: block0.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block0.Number(), BlockHash: block0.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock0LeafNode, StorageNodes: emptyStorage, }, @@ -765,34 +766,34 @@ func TestBuilderWithIntermediateNodes(t *testing.T) { BlockNumber: block1.Number(), BlockHash: block1.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block1.Number(), BlockHash: block1.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block1BranchRootNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock1LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x05'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: minerLeafKey, NodeValue: minerAccountAtBlock1LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock1LeafNode, StorageNodes: emptyStorage, }, @@ -810,57 +811,57 @@ func TestBuilderWithIntermediateNodes(t *testing.T) { BlockNumber: block2.Number(), BlockHash: block2.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block2.Number(), BlockHash: block2.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block2BranchRootNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock2LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x05'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: minerLeafKey, NodeValue: minerAccountAtBlock2LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock2LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock2LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block2StorageBranchRootNode, }, { Path: []byte{'\x02'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot0StorageKey.Bytes(), NodeValue: slot0StorageLeafNode, }, { Path: []byte{'\x0b'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot1StorageKey.Bytes(), NodeValue: slot1StorageLeafNode, }, @@ -868,16 +869,16 @@ func TestBuilderWithIntermediateNodes(t *testing.T) { }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account2LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account2LeafKey, NodeValue: account2AtBlock2LeafNode, StorageNodes: emptyStorage, }, }, - CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{ + CodeAndCodeHashes: []types2.CodeAndCodeHash{ { - Hash: testhelpers.CodeHash, - Code: testhelpers.ByteCodeAfterDeployment, + Hash: test_helpers.CodeHash, + Code: test_helpers.ByteCodeAfterDeployment, }, }, }, @@ -892,37 +893,37 @@ func TestBuilderWithIntermediateNodes(t *testing.T) { BlockNumber: block3.Number(), BlockHash: block3.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block3.Number(), BlockHash: block3.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block3BranchRootNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock3LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock3LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block3StorageBranchRootNode, }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot3StorageKey.Bytes(), NodeValue: slot3StorageLeafNode, }, @@ -930,8 +931,8 @@ func TestBuilderWithIntermediateNodes(t *testing.T) { }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account2LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account2LeafKey, NodeValue: account2AtBlock3LeafNode, StorageNodes: emptyStorage, }, @@ -977,22 +978,22 @@ func TestBuilderWithIntermediateNodes(t *testing.T) { } func TestBuilderWithWatchedAddressList(t *testing.T) { - blocks, chain := testhelpers.MakeChain(3, testhelpers.Genesis, testhelpers.TestChainGen) - contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr) + blocks, chain := test_helpers.MakeChain(3, test_helpers.Genesis, test_helpers.TestChainGen) + contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr) defer chain.Stop() - block0 = testhelpers.Genesis + block0 = test_helpers.Genesis block1 = blocks[0] block2 = blocks[1] block3 = blocks[2] params := statediff.Params{ - WatchedAddresses: []common.Address{testhelpers.Account1Addr, testhelpers.ContractAddr}, + WatchedAddresses: []common.Address{test_helpers.Account1Addr, test_helpers.ContractAddr}, } builder = statediff.NewBuilder(chain.StateCache()) var tests = []struct { name string startingArguments statediff.Args - expected *statediff.StateObject + expected *types2.StateObject }{ { "testEmptyDiff", @@ -1002,7 +1003,7 @@ func TestBuilderWithWatchedAddressList(t *testing.T) { BlockNumber: block0.Number(), BlockHash: block0.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block0.Number(), BlockHash: block0.Hash(), Nodes: emptyDiffs, @@ -1012,12 +1013,12 @@ func TestBuilderWithWatchedAddressList(t *testing.T) { "testBlock0", //10000 transferred from testBankAddress to account1Addr statediff.Args{ - OldStateRoot: testhelpers.NullHash, + OldStateRoot: test_helpers.NullHash, NewStateRoot: block0.Root(), BlockNumber: block0.Number(), BlockHash: block0.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block0.Number(), BlockHash: block0.Hash(), Nodes: emptyDiffs, @@ -1032,14 +1033,14 @@ func TestBuilderWithWatchedAddressList(t *testing.T) { BlockNumber: block1.Number(), BlockHash: block1.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block1.Number(), BlockHash: block1.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock1LeafNode, StorageNodes: emptyStorage, }, @@ -1056,25 +1057,25 @@ func TestBuilderWithWatchedAddressList(t *testing.T) { BlockNumber: block2.Number(), BlockHash: block2.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block2.Number(), BlockHash: block2.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock2LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{'\x02'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot0StorageKey.Bytes(), NodeValue: slot0StorageLeafNode, }, { Path: []byte{'\x0b'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot1StorageKey.Bytes(), NodeValue: slot1StorageLeafNode, }, @@ -1082,16 +1083,16 @@ func TestBuilderWithWatchedAddressList(t *testing.T) { }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock2LeafNode, StorageNodes: emptyStorage, }, }, - CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{ + CodeAndCodeHashes: []types2.CodeAndCodeHash{ { - Hash: testhelpers.CodeHash, - Code: testhelpers.ByteCodeAfterDeployment, + Hash: test_helpers.CodeHash, + Code: test_helpers.ByteCodeAfterDeployment, }, }, }, @@ -1106,19 +1107,19 @@ func TestBuilderWithWatchedAddressList(t *testing.T) { BlockNumber: block3.Number(), BlockHash: block3.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block3.Number(), BlockHash: block3.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock3LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot3StorageKey.Bytes(), NodeValue: slot3StorageLeafNode, }, @@ -1152,15 +1153,15 @@ func TestBuilderWithWatchedAddressList(t *testing.T) { } func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) { - blocks, chain := testhelpers.MakeChain(3, testhelpers.Genesis, testhelpers.TestChainGen) - contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr) + blocks, chain := test_helpers.MakeChain(3, test_helpers.Genesis, test_helpers.TestChainGen) + contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr) defer chain.Stop() - block0 = testhelpers.Genesis + block0 = test_helpers.Genesis block1 = blocks[0] block2 = blocks[1] block3 = blocks[2] params := statediff.Params{ - WatchedAddresses: []common.Address{testhelpers.Account1Addr, testhelpers.ContractAddr}, + WatchedAddresses: []common.Address{test_helpers.Account1Addr, test_helpers.ContractAddr}, WatchedStorageSlots: []common.Hash{slot1StorageKey}, } builder = statediff.NewBuilder(chain.StateCache()) @@ -1168,7 +1169,7 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) { var tests = []struct { name string startingArguments statediff.Args - expected *statediff.StateObject + expected *types2.StateObject }{ { "testEmptyDiff", @@ -1178,7 +1179,7 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) { BlockNumber: block0.Number(), BlockHash: block0.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block0.Number(), BlockHash: block0.Hash(), Nodes: emptyDiffs, @@ -1188,12 +1189,12 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) { "testBlock0", //10000 transferred from testBankAddress to account1Addr statediff.Args{ - OldStateRoot: testhelpers.NullHash, + OldStateRoot: test_helpers.NullHash, NewStateRoot: block0.Root(), BlockNumber: block0.Number(), BlockHash: block0.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block0.Number(), BlockHash: block0.Hash(), Nodes: emptyDiffs, @@ -1208,14 +1209,14 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) { BlockNumber: block1.Number(), BlockHash: block1.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block1.Number(), BlockHash: block1.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock1LeafNode, StorageNodes: emptyStorage, }, @@ -1232,19 +1233,19 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) { BlockNumber: block2.Number(), BlockHash: block2.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block2.Number(), BlockHash: block2.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock2LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{'\x0b'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot1StorageKey.Bytes(), NodeValue: slot1StorageLeafNode, }, @@ -1252,16 +1253,16 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) { }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock2LeafNode, StorageNodes: emptyStorage, }, }, - CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{ + CodeAndCodeHashes: []types2.CodeAndCodeHash{ { - Hash: testhelpers.CodeHash, - Code: testhelpers.ByteCodeAfterDeployment, + Hash: test_helpers.CodeHash, + Code: test_helpers.ByteCodeAfterDeployment, }, }, }, @@ -1276,13 +1277,13 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) { BlockNumber: block3.Number(), BlockHash: block3.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block3.Number(), BlockHash: block3.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock3LeafNode, StorageNodes: emptyStorage, @@ -1315,8 +1316,8 @@ func TestBuilderWithWatchedAddressAndStorageKeyList(t *testing.T) { } func TestBuilderWithRemovedAccountAndStorage(t *testing.T) { - blocks, chain := testhelpers.MakeChain(6, testhelpers.Genesis, testhelpers.TestChainGen) - contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr) + blocks, chain := test_helpers.MakeChain(6, test_helpers.Genesis, test_helpers.TestChainGen) + contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr) defer chain.Stop() block3 = blocks[2] block4 = blocks[3] @@ -1331,7 +1332,7 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) { var tests = []struct { name string startingArguments statediff.Args - expected *statediff.StateObject + expected *types2.StateObject }{ // blocks 0-3 are the same as in TestBuilderWithIntermediateNodes { @@ -1342,49 +1343,49 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) { BlockNumber: block4.Number(), BlockHash: block4.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block4.Number(), BlockHash: block4.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block4BranchRootNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock4LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock4LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block4StorageBranchRootNode, }, { Path: []byte{'\x04'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot2StorageKey.Bytes(), NodeValue: slot2StorageLeafNode, }, { Path: []byte{'\x0b'}, - NodeType: sdtypes.Removed, + NodeType: types2.Removed, LeafKey: slot1StorageKey.Bytes(), NodeValue: []byte{}, }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Removed, + NodeType: types2.Removed, LeafKey: slot3StorageKey.Bytes(), NodeValue: []byte{}, }, @@ -1392,8 +1393,8 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) { }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account2LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account2LeafKey, NodeValue: account2AtBlock4LeafNode, StorageNodes: emptyStorage, }, @@ -1408,44 +1409,44 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) { BlockNumber: block5.Number(), BlockHash: block5.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block5.Number(), BlockHash: block5.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block5BranchRootNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock5LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock5LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, NodeValue: slot0StorageLeafRootNode, LeafKey: slot0StorageKey.Bytes(), }, { Path: []byte{'\x02'}, - NodeType: sdtypes.Removed, + NodeType: types2.Removed, LeafKey: slot0StorageKey.Bytes(), NodeValue: []byte{}, }, { Path: []byte{'\x04'}, - NodeType: sdtypes.Removed, + NodeType: types2.Removed, LeafKey: slot2StorageKey.Bytes(), NodeValue: []byte{}, }, @@ -1453,8 +1454,8 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) { }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock5LeafNode, StorageNodes: emptyStorage, }, @@ -1469,34 +1470,34 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) { BlockNumber: block6.Number(), BlockHash: block6.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block6.Number(), BlockHash: block6.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block6BranchRootNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x06'}, - NodeType: sdtypes.Removed, + NodeType: types2.Removed, LeafKey: contractLeafKey, NodeValue: []byte{}, StorageNodes: emptyStorage, }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account2LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account2LeafKey, NodeValue: account2AtBlock6LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock6LeafNode, StorageNodes: emptyStorage, }, @@ -1528,8 +1529,8 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) { } func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.T) { - blocks, chain := testhelpers.MakeChain(6, testhelpers.Genesis, testhelpers.TestChainGen) - contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr) + blocks, chain := test_helpers.MakeChain(6, test_helpers.Genesis, test_helpers.TestChainGen) + contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr) defer chain.Stop() block3 = blocks[2] block4 = blocks[3] @@ -1544,7 +1545,7 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing. var tests = []struct { name string startingArguments statediff.Args - expected *statediff.StateObject + expected *types2.StateObject }{ // blocks 0-3 are the same as in TestBuilderWithIntermediateNodes { @@ -1555,38 +1556,38 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing. BlockNumber: block4.Number(), BlockHash: block4.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block4.Number(), BlockHash: block4.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock4LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock4LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{'\x04'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot2StorageKey.Bytes(), NodeValue: slot2StorageLeafNode, }, { Path: []byte{'\x0b'}, - NodeType: sdtypes.Removed, + NodeType: types2.Removed, LeafKey: slot1StorageKey.Bytes(), NodeValue: []byte{}, }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Removed, + NodeType: types2.Removed, LeafKey: slot3StorageKey.Bytes(), NodeValue: []byte{}, }, @@ -1594,8 +1595,8 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing. }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account2LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account2LeafKey, NodeValue: account2AtBlock4LeafNode, StorageNodes: emptyStorage, }, @@ -1610,38 +1611,38 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing. BlockNumber: block5.Number(), BlockHash: block5.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block5.Number(), BlockHash: block5.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock5LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock5LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot0StorageKey.Bytes(), NodeValue: slot0StorageLeafRootNode, }, { Path: []byte{'\x02'}, - NodeType: sdtypes.Removed, + NodeType: types2.Removed, LeafKey: slot0StorageKey.Bytes(), NodeValue: []byte{}, }, { Path: []byte{'\x04'}, - NodeType: sdtypes.Removed, + NodeType: types2.Removed, LeafKey: slot2StorageKey.Bytes(), NodeValue: []byte{}, }, @@ -1649,8 +1650,8 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing. }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock5LeafNode, StorageNodes: emptyStorage, }, @@ -1665,27 +1666,27 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing. BlockNumber: block6.Number(), BlockHash: block6.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block6.Number(), BlockHash: block6.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x06'}, - NodeType: sdtypes.Removed, + NodeType: types2.Removed, LeafKey: contractLeafKey, NodeValue: []byte{}, }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account2LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account2LeafKey, NodeValue: account2AtBlock6LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock6LeafNode, StorageNodes: emptyStorage, }, @@ -1740,8 +1741,8 @@ var ( bankAccountAtBlock01, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 1, Balance: big.NewInt(3999629697375000000), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) bankAccountAtBlock01LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"), @@ -1750,8 +1751,8 @@ var ( bankAccountAtBlock02, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 2, Balance: big.NewInt(5999607323457344852), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) bankAccountAtBlock02LeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("2000bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"), @@ -1800,10 +1801,10 @@ var ( ) func TestBuilderWithMovedAccount(t *testing.T) { - blocks, chain := testhelpers.MakeChain(2, testhelpers.Genesis, testhelpers.TestSelfDestructChainGen) - contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr) + blocks, chain := test_helpers.MakeChain(2, test_helpers.Genesis, test_helpers.TestSelfDestructChainGen) + contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr) defer chain.Stop() - block0 = testhelpers.Genesis + block0 = test_helpers.Genesis block1 = blocks[0] block2 = blocks[1] params := statediff.Params{ @@ -1815,7 +1816,7 @@ func TestBuilderWithMovedAccount(t *testing.T) { var tests = []struct { name string startingArguments statediff.Args - expected *statediff.StateObject + expected *types2.StateObject }{ { "testBlock1", @@ -1825,53 +1826,53 @@ func TestBuilderWithMovedAccount(t *testing.T) { BlockNumber: block1.Number(), BlockHash: block1.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block1.Number(), BlockHash: block1.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block01BranchRootNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock01LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x01'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock01LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block01StorageBranchRootNode, }, { Path: []byte{'\x02'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot0StorageKey.Bytes(), NodeValue: slot00StorageLeafNode, }, { Path: []byte{'\x0b'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot1StorageKey.Bytes(), NodeValue: slot1StorageLeafNode, }, }, }, }, - CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{ + CodeAndCodeHashes: []types2.CodeAndCodeHash{ { - Hash: testhelpers.CodeHash, - Code: testhelpers.ByteCodeAfterDeployment, + Hash: test_helpers.CodeHash, + Code: test_helpers.ByteCodeAfterDeployment, }, }, }, @@ -1884,27 +1885,27 @@ func TestBuilderWithMovedAccount(t *testing.T) { BlockNumber: block2.Number(), BlockHash: block2.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block2.Number(), BlockHash: block2.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock02LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x01'}, - NodeType: sdtypes.Removed, + NodeType: types2.Removed, LeafKey: contractLeafKey, NodeValue: []byte{}, }, { Path: []byte{'\x00'}, - NodeType: sdtypes.Removed, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Removed, + LeafKey: test_helpers.BankLeafKey, NodeValue: []byte{}, }, }, @@ -1936,10 +1937,10 @@ func TestBuilderWithMovedAccount(t *testing.T) { } func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) { - blocks, chain := testhelpers.MakeChain(2, testhelpers.Genesis, testhelpers.TestSelfDestructChainGen) - contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr) + blocks, chain := test_helpers.MakeChain(2, test_helpers.Genesis, test_helpers.TestSelfDestructChainGen) + contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr) defer chain.Stop() - block0 = testhelpers.Genesis + block0 = test_helpers.Genesis block1 = blocks[0] block2 = blocks[1] params := statediff.Params{ @@ -1951,7 +1952,7 @@ func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) { var tests = []struct { name string startingArguments statediff.Args - expected *statediff.StateObject + expected *types2.StateObject }{ { "testBlock1", @@ -1961,42 +1962,42 @@ func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) { BlockNumber: block1.Number(), BlockHash: block1.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block1.Number(), BlockHash: block1.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock01LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x01'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock01LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{'\x02'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot0StorageKey.Bytes(), NodeValue: slot00StorageLeafNode, }, { Path: []byte{'\x0b'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot1StorageKey.Bytes(), NodeValue: slot1StorageLeafNode, }, }, }, }, - CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{ + CodeAndCodeHashes: []types2.CodeAndCodeHash{ { - Hash: testhelpers.CodeHash, - Code: testhelpers.ByteCodeAfterDeployment, + Hash: test_helpers.CodeHash, + Code: test_helpers.ByteCodeAfterDeployment, }, }, }, @@ -2009,27 +2010,27 @@ func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) { BlockNumber: block2.Number(), BlockHash: block2.Hash(), }, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block2.Number(), BlockHash: block2.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock02LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x01'}, - NodeType: sdtypes.Removed, + NodeType: types2.Removed, LeafKey: contractLeafKey, NodeValue: []byte{}, }, { Path: []byte{'\x00'}, - NodeType: sdtypes.Removed, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Removed, + LeafKey: test_helpers.BankLeafKey, NodeValue: []byte{}, }, }, @@ -2060,8 +2061,8 @@ func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) { } func TestBuildStateTrie(t *testing.T) { - blocks, chain := testhelpers.MakeChain(3, testhelpers.Genesis, testhelpers.TestChainGen) - contractLeafKey = testhelpers.AddressToLeafKey(testhelpers.ContractAddr) + blocks, chain := test_helpers.MakeChain(3, test_helpers.Genesis, test_helpers.TestChainGen) + contractLeafKey = test_helpers.AddressToLeafKey(test_helpers.ContractAddr) defer chain.Stop() block1 = blocks[0] block2 = blocks[1] @@ -2071,39 +2072,39 @@ func TestBuildStateTrie(t *testing.T) { var tests = []struct { name string block *types.Block - expected *statediff.StateObject + expected *types2.StateObject }{ { "testBlock1", block1, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block1.Number(), BlockHash: block1.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block1BranchRootNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock1LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x05'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: minerLeafKey, NodeValue: minerAccountAtBlock1LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock1LeafNode, StorageNodes: emptyStorage, }, @@ -2113,57 +2114,57 @@ func TestBuildStateTrie(t *testing.T) { { "testBlock2", block2, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block2.Number(), BlockHash: block2.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block2BranchRootNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock2LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x05'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: minerLeafKey, NodeValue: minerAccountAtBlock2LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock2LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock2LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block2StorageBranchRootNode, }, { Path: []byte{'\x02'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot0StorageKey.Bytes(), NodeValue: slot0StorageLeafNode, }, { Path: []byte{'\x0b'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot1StorageKey.Bytes(), NodeValue: slot1StorageLeafNode, }, @@ -2171,16 +2172,16 @@ func TestBuildStateTrie(t *testing.T) { }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account2LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account2LeafKey, NodeValue: account2AtBlock2LeafNode, StorageNodes: emptyStorage, }, }, - CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{ + CodeAndCodeHashes: []types2.CodeAndCodeHash{ { - Hash: testhelpers.CodeHash, - Code: testhelpers.ByteCodeAfterDeployment, + Hash: test_helpers.CodeHash, + Code: test_helpers.ByteCodeAfterDeployment, }, }, }, @@ -2188,63 +2189,63 @@ func TestBuildStateTrie(t *testing.T) { { "testBlock3", block3, - &statediff.StateObject{ + &types2.StateObject{ BlockNumber: block3.Number(), BlockHash: block3.Hash(), - Nodes: []sdtypes.StateNode{ + Nodes: []types2.StateNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block3BranchRootNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x00'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountAtBlock3LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x05'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: minerLeafKey, NodeValue: minerAccountAtBlock2LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x0e'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1AtBlock2LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x06'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: contractLeafKey, NodeValue: contractAccountAtBlock3LeafNode, - StorageNodes: []sdtypes.StorageNode{ + StorageNodes: []types2.StorageNode{ { Path: []byte{}, - NodeType: sdtypes.Branch, + NodeType: types2.Branch, NodeValue: block3StorageBranchRootNode, }, { Path: []byte{'\x02'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot0StorageKey.Bytes(), NodeValue: slot0StorageLeafNode, }, { Path: []byte{'\x0b'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot1StorageKey.Bytes(), NodeValue: slot1StorageLeafNode, }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, + NodeType: types2.Leaf, LeafKey: slot3StorageKey.Bytes(), NodeValue: slot3StorageLeafNode, }, @@ -2252,16 +2253,16 @@ func TestBuildStateTrie(t *testing.T) { }, { Path: []byte{'\x0c'}, - NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account2LeafKey, + NodeType: types2.Leaf, + LeafKey: test_helpers.Account2LeafKey, NodeValue: account2AtBlock3LeafNode, StorageNodes: emptyStorage, }, }, - CodeAndCodeHashes: []sdtypes.CodeAndCodeHash{ + CodeAndCodeHashes: []types2.CodeAndCodeHash{ { - Hash: testhelpers.CodeHash, - Code: testhelpers.ByteCodeAfterDeployment, + Hash: test_helpers.CodeHash, + Code: test_helpers.ByteCodeAfterDeployment, }, }, }, diff --git a/statediff/config.go b/statediff/config.go new file mode 100644 index 000000000..dc9da579b --- /dev/null +++ b/statediff/config.go @@ -0,0 +1,58 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package statediff + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" +) + +// Config contains instantiation parameters for the state diffing service +type Config struct { + IndexerConfig interfaces.Config + // A unique ID used for this service + ID string + // Name for the client this service is running + ClientName string + // Whether to enable writing state diffs directly to track blockchain head + EnableWriteLoop bool + // Size of the worker pool + NumWorkers uint + // Context + Context context.Context +} + +// Params contains config parameters for the state diff builder +type Params struct { + IntermediateStateNodes bool + IntermediateStorageNodes bool + IncludeBlock bool + IncludeReceipts bool + IncludeTD bool + IncludeCode bool + WatchedAddresses []common.Address + WatchedStorageSlots []common.Hash +} + +// Args bundles the arguments for the state diff builder +type Args struct { + OldStateRoot, NewStateRoot, BlockHash common.Hash + BlockNumber *big.Int +} diff --git a/statediff/indexer/constructor.go b/statediff/indexer/constructor.go new file mode 100644 index 000000000..7a44638d0 --- /dev/null +++ b/statediff/indexer/constructor.go @@ -0,0 +1,66 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package indexer + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/statediff/indexer/database/dump" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" + "github.com/ethereum/go-ethereum/statediff/indexer/node" + "github.com/ethereum/go-ethereum/statediff/indexer/shared" +) + +// NewStateDiffIndexer creates and returns an implementation of the StateDiffIndexer interface +func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, nodeInfo node.Info, config interfaces.Config) (interfaces.StateDiffIndexer, error) { + switch config.Type() { + case shared.POSTGRES: + pgc, ok := config.(postgres.Config) + if !ok { + return nil, fmt.Errorf("ostgres config is not the correct type: got %T, expected %T", config, postgres.Config{}) + } + var err error + var driver sql.Driver + switch pgc.Driver { + case postgres.PGX: + driver, err = postgres.NewPGXDriver(ctx, pgc, nodeInfo) + if err != nil { + return nil, err + } + case postgres.SQLX: + driver, err = postgres.NewSQLXDriver(ctx, pgc, nodeInfo) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unrecongized Postgres driver type: %s", pgc.Driver) + } + return sql.NewStateDiffIndexer(ctx, chainConfig, postgres.NewPostgresDB(driver)) + case shared.DUMP: + dumpc, ok := config.(dump.Config) + if !ok { + return nil, fmt.Errorf("dump config is not the correct type: got %T, expected %T", config, dump.Config{}) + } + return dump.NewStateDiffIndexer(chainConfig, dumpc), nil + default: + return nil, fmt.Errorf("unrecognized database type: %s", config.Type()) + } +} diff --git a/statediff/indexer/batch_tx.go b/statediff/indexer/database/dump/batch_tx.go similarity index 67% rename from statediff/indexer/batch_tx.go rename to statediff/indexer/database/dump/batch_tx.go index 170d6ab51..a0021baf7 100644 --- a/statediff/indexer/batch_tx.go +++ b/statediff/indexer/database/dump/batch_tx.go @@ -14,36 +14,37 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package indexer +package dump import ( + "fmt" + "io" + + "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + + "github.com/ethereum/go-ethereum/statediff/indexer/models" blockstore "github.com/ipfs/go-ipfs-blockstore" dshelp "github.com/ipfs/go-ipfs-ds-help" node "github.com/ipfs/go-ipld-format" - "github.com/jmoiron/sqlx" - "github.com/lib/pq" - - "github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld" - "github.com/ethereum/go-ethereum/statediff/indexer/models" ) -const ipldBatchInsertPgStr string = `INSERT INTO public.blocks (key, data) VALUES (unnest($1::TEXT[]), unnest($2::BYTEA[])) ON CONFLICT (key) DO NOTHING` - -// BlockTx wraps a Postgres tx with the state necessary for building the Postgres tx concurrently during trie difference iteration -type BlockTx struct { - dbtx *sqlx.Tx - BlockNumber uint64 - headerID int64 - Close func(blockTx *BlockTx, err error) error - +// BatchTx wraps a void with the state necessary for building the tx concurrently during trie difference iteration +type BatchTx struct { + dump io.Writer quit chan struct{} iplds chan models.IPLDModel ipldCache models.IPLDBatch + + close func(blockTx *BatchTx, err error) error } -func (tx *BlockTx) flush() error { - _, err := tx.dbtx.Exec(ipldBatchInsertPgStr, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values)) - if err != nil { +// Submit satisfies indexer.AtomicTx +func (tx *BatchTx) Submit(err error) error { + return tx.close(tx, err) +} + +func (tx *BatchTx) flush() error { + if _, err := fmt.Fprintf(tx.dump, "%+v", tx.ipldCache); err != nil { return err } tx.ipldCache = models.IPLDBatch{} @@ -51,33 +52,34 @@ func (tx *BlockTx) flush() error { } // run in background goroutine to synchronize concurrent appends to the ipldCache -func (tx *BlockTx) cache() { +func (tx *BatchTx) cache() { for { select { case i := <-tx.iplds: tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key) tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data) case <-tx.quit: + tx.ipldCache = models.IPLDBatch{} return } } } -func (tx *BlockTx) cacheDirect(key string, value []byte) { +func (tx *BatchTx) cacheDirect(key string, value []byte) { tx.iplds <- models.IPLDModel{ Key: key, Data: value, } } -func (tx *BlockTx) cacheIPLD(i node.Node) { +func (tx *BatchTx) cacheIPLD(i node.Node) { tx.iplds <- models.IPLDModel{ Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(), Data: i.RawData(), } } -func (tx *BlockTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error) { +func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error) { c, err := ipld.RawdataToCid(codec, raw, mh) if err != nil { return "", "", err diff --git a/statediff/indexer/database/dump/config.go b/statediff/indexer/database/dump/config.go new file mode 100644 index 000000000..fb2e6a58c --- /dev/null +++ b/statediff/indexer/database/dump/config.go @@ -0,0 +1,31 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package dump + +import ( + "io" + + "github.com/ethereum/go-ethereum/statediff/indexer/shared" +) + +type Config struct { + Dump io.WriteCloser +} + +func (c Config) Type() shared.DBType { + return shared.DUMP +} diff --git a/statediff/indexer/database/dump/indexer.go b/statediff/indexer/database/dump/indexer.go new file mode 100644 index 000000000..f815305b1 --- /dev/null +++ b/statediff/indexer/database/dump/indexer.go @@ -0,0 +1,490 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package dump + +import ( + "fmt" + "io" + "math/big" + "time" + + ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + + "github.com/ipfs/go-cid" + node "github.com/ipfs/go-ipld-format" + "github.com/multiformats/go-multihash" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" + "github.com/ethereum/go-ethereum/statediff/indexer/models" + "github.com/ethereum/go-ethereum/statediff/indexer/shared" + sdtypes "github.com/ethereum/go-ethereum/statediff/types" +) + +var _ interfaces.StateDiffIndexer = &StateDiffIndexer{} + +var ( + indexerMetrics = RegisterIndexerMetrics(metrics.DefaultRegistry) +) + +// StateDiffIndexer satisfies the indexer.StateDiffIndexer interface for ethereum statediff objects on top of a void +type StateDiffIndexer struct { + dump io.WriteCloser + chainConfig *params.ChainConfig +} + +// NewStateDiffIndexer creates a void implementation of interfaces.StateDiffIndexer +func NewStateDiffIndexer(chainConfig *params.ChainConfig, config Config) *StateDiffIndexer { + return &StateDiffIndexer{ + dump: config.Dump, + chainConfig: chainConfig, + } +} + +// ReportDBMetrics has nothing to report for dump +func (sdi *StateDiffIndexer) ReportDBMetrics(time.Duration, <-chan bool) {} + +// PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts) +// Returns an initiated DB transaction which must be Closed via defer to commit or rollback +func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) { + start, t := time.Now(), time.Now() + blockHash := block.Hash() + blockHashStr := blockHash.String() + height := block.NumberU64() + traceMsg := fmt.Sprintf("indexer stats for statediff at %d with hash %s:\r\n", height, blockHashStr) + transactions := block.Transactions() + // Derive any missing fields + if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, transactions); err != nil { + return nil, err + } + + // Generate the block iplds + headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts) + if err != nil { + return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err) + } + + if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) { + return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs)) + } + if len(txTrieNodes) != len(rctTrieNodes) { + return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes)) + } + + // Calculate reward + var reward *big.Int + // in PoA networks block reward is 0 + if sdi.chainConfig.Clique != nil { + reward = big.NewInt(0) + } else { + reward = shared.CalcEthBlockReward(block.Header(), block.Uncles(), block.Transactions(), receipts) + } + t = time.Now() + + blockTx := &BatchTx{ + dump: sdi.dump, + iplds: make(chan models.IPLDModel), + quit: make(chan struct{}), + ipldCache: models.IPLDBatch{}, + close: func(self *BatchTx, err error) error { + close(self.quit) + close(self.iplds) + tDiff := time.Since(t) + indexerMetrics.tStateStoreCodeProcessing.Update(tDiff) + traceMsg += fmt.Sprintf("state, storage, and code storage processing time: %s\r\n", tDiff.String()) + t = time.Now() + if err := self.flush(); err != nil { + traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String()) + log.Debug(traceMsg) + return err + } + tDiff = time.Since(t) + indexerMetrics.tPostgresCommit.Update(tDiff) + traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String()) + traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String()) + log.Debug(traceMsg) + return err + }, + } + go blockTx.cache() + + tDiff := time.Since(t) + indexerMetrics.tFreePostgres.Update(tDiff) + + traceMsg += fmt.Sprintf("time spent waiting for free postgres tx: %s:\r\n", tDiff.String()) + t = time.Now() + + // Publish and index header, collect headerID + var headerID int64 + headerID, err = sdi.processHeader(blockTx, block.Header(), headerNode, reward, totalDifficulty) + if err != nil { + return nil, err + } + tDiff = time.Since(t) + indexerMetrics.tHeaderProcessing.Update(tDiff) + traceMsg += fmt.Sprintf("header processing time: %s\r\n", tDiff.String()) + t = time.Now() + // Publish and index uncles + err = sdi.processUncles(blockTx, headerID, height, uncleNodes) + if err != nil { + return nil, err + } + tDiff = time.Since(t) + indexerMetrics.tUncleProcessing.Update(tDiff) + traceMsg += fmt.Sprintf("uncle processing time: %s\r\n", tDiff.String()) + t = time.Now() + // Publish and index receipts and txs + err = sdi.processReceiptsAndTxs(blockTx, processArgs{ + headerID: headerID, + blockNumber: block.Number(), + receipts: receipts, + txs: transactions, + rctNodes: rctNodes, + rctTrieNodes: rctTrieNodes, + txNodes: txNodes, + txTrieNodes: txTrieNodes, + logTrieNodes: logTrieNodes, + logLeafNodeCIDs: logLeafNodeCIDs, + rctLeafNodeCIDs: rctLeafNodeCIDs, + }) + if err != nil { + return nil, err + } + tDiff = time.Since(t) + indexerMetrics.tTxAndRecProcessing.Update(tDiff) + traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String()) + t = time.Now() + + return blockTx, err +} + +// processHeader publishes and indexes a header IPLD in Postgres +// it returns the headerID +func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (int64, error) { + tx.cacheIPLD(headerNode) + + var baseFee *int64 + if header.BaseFee != nil { + baseFee = new(int64) + *baseFee = header.BaseFee.Int64() + } + + mod := models.HeaderModel{ + CID: headerNode.Cid().String(), + MhKey: shared.MultihashKeyFromCID(headerNode.Cid()), + ParentHash: header.ParentHash.String(), + BlockNumber: header.Number.String(), + BlockHash: header.Hash().String(), + TotalDifficulty: td.String(), + Reward: reward.String(), + Bloom: header.Bloom.Bytes(), + StateRoot: header.Root.String(), + RctRoot: header.ReceiptHash.String(), + TxRoot: header.TxHash.String(), + UncleRoot: header.UncleHash.String(), + Timestamp: header.Time, + BaseFee: baseFee, + } + _, err := fmt.Fprintf(sdi.dump, "%+v", mod) + return 0, err +} + +// processUncles publishes and indexes uncle IPLDs in Postgres +func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID int64, blockNumber uint64, uncleNodes []*ipld2.EthHeader) error { + // publish and index uncles + for _, uncleNode := range uncleNodes { + tx.cacheIPLD(uncleNode) + var uncleReward *big.Int + // in PoA networks uncle reward is 0 + if sdi.chainConfig.Clique != nil { + uncleReward = big.NewInt(0) + } else { + uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64()) + } + uncle := models.UncleModel{ + CID: uncleNode.Cid().String(), + MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()), + ParentHash: uncleNode.ParentHash.String(), + BlockHash: uncleNode.Hash().String(), + Reward: uncleReward.String(), + } + if _, err := fmt.Fprintf(sdi.dump, "%+v", uncle); err != nil { + return err + } + } + return nil +} + +// processArgs bundles arguments to processReceiptsAndTxs +type processArgs struct { + headerID int64 + blockNumber *big.Int + receipts types.Receipts + txs types.Transactions + rctNodes []*ipld2.EthReceipt + rctTrieNodes []*ipld2.EthRctTrie + txNodes []*ipld2.EthTx + txTrieNodes []*ipld2.EthTxTrie + logTrieNodes [][]*ipld2.EthLogTrie + logLeafNodeCIDs [][]cid.Cid + rctLeafNodeCIDs []cid.Cid +} + +// processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres +func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs) error { + // Process receipts and txs + signer := types.MakeSigner(sdi.chainConfig, args.blockNumber) + for i, receipt := range args.receipts { + for _, logTrieNode := range args.logTrieNodes[i] { + tx.cacheIPLD(logTrieNode) + } + txNode := args.txNodes[i] + tx.cacheIPLD(txNode) + + // Indexing + // extract topic and contract data from the receipt for indexing + mappedContracts := make(map[string]bool) // use map to avoid duplicate addresses + logDataSet := make([]*models.LogsModel, len(receipt.Logs)) + for idx, l := range receipt.Logs { + topicSet := make([]string, 4) + for ti, topic := range l.Topics { + topicSet[ti] = topic.Hex() + } + + if !args.logLeafNodeCIDs[i][idx].Defined() { + return fmt.Errorf("invalid log cid") + } + + mappedContracts[l.Address.String()] = true + logDataSet[idx] = &models.LogsModel{ + Address: l.Address.String(), + Index: int64(l.Index), + Data: l.Data, + LeafCID: args.logLeafNodeCIDs[i][idx].String(), + LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]), + Topic0: topicSet[0], + Topic1: topicSet[1], + Topic2: topicSet[2], + Topic3: topicSet[3], + } + } + // these are the contracts seen in the logs + logContracts := make([]string, 0, len(mappedContracts)) + for addr := range mappedContracts { + logContracts = append(logContracts, addr) + } + // this is the contract address if this receipt is for a contract creation tx + contract := shared.HandleZeroAddr(receipt.ContractAddress) + var contractHash string + if contract != "" { + contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String() + } + // index tx first so that the receipt can reference it by FK + trx := args.txs[i] + // derive sender for the tx that corresponds with this receipt + from, err := types.Sender(signer, trx) + if err != nil { + return fmt.Errorf("error deriving tx sender: %v", err) + } + txModel := models.TxModel{ + Dst: shared.HandleZeroAddrPointer(trx.To()), + Src: shared.HandleZeroAddr(from), + TxHash: trx.Hash().String(), + Index: int64(i), + Data: trx.Data(), + CID: txNode.Cid().String(), + MhKey: shared.MultihashKeyFromCID(txNode.Cid()), + } + txType := trx.Type() + if txType != types.LegacyTxType { + txModel.Type = &txType + } + if _, err := fmt.Fprintf(sdi.dump, "%+v", txModel); err != nil { + return err + } + + // index access list if this is one + for j, accessListElement := range trx.AccessList() { + storageKeys := make([]string, len(accessListElement.StorageKeys)) + for k, storageKey := range accessListElement.StorageKeys { + storageKeys[k] = storageKey.Hex() + } + accessListElementModel := models.AccessListElementModel{ + Index: int64(j), + Address: accessListElement.Address.Hex(), + StorageKeys: storageKeys, + } + if _, err := fmt.Fprintf(sdi.dump, "%+v", accessListElementModel); err != nil { + return err + } + } + + // index the receipt + if !args.rctLeafNodeCIDs[i].Defined() { + return fmt.Errorf("invalid receipt leaf node cid") + } + + rctModel := &models.ReceiptModel{ + Contract: contract, + ContractHash: contractHash, + LeafCID: args.rctLeafNodeCIDs[i].String(), + LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]), + LogRoot: args.rctNodes[i].LogRoot.String(), + } + if len(receipt.PostState) == 0 { + rctModel.PostStatus = receipt.Status + } else { + rctModel.PostState = common.Bytes2Hex(receipt.PostState) + } + + if _, err := fmt.Fprintf(sdi.dump, "%+v", rctModel); err != nil { + return err + } + + if _, err := fmt.Fprintf(sdi.dump, "%+v", logDataSet); err != nil { + return err + } + } + + // publish trie nodes, these aren't indexed directly + for i, n := range args.txTrieNodes { + tx.cacheIPLD(n) + tx.cacheIPLD(args.rctTrieNodes[i]) + } + + return nil +} + +// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD sql +func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode) error { + tx, ok := batch.(*BatchTx) + if !ok { + return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch) + } + // publish the state node + if stateNode.NodeType == sdtypes.Removed { + // short circuit if it is a Removed node + // this assumes the db has been initialized and a public.blocks entry for the Removed node is present + stateModel := models.StateNodeModel{ + Path: stateNode.Path, + StateKey: common.BytesToHash(stateNode.LeafKey).String(), + CID: shared.RemovedNodeStateCID, + MhKey: shared.RemovedNodeMhKey, + NodeType: stateNode.NodeType.Int(), + } + _, err := fmt.Fprintf(sdi.dump, "%+v", stateModel) + return err + } + stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue) + if err != nil { + return fmt.Errorf("error generating and cacheing state node IPLD: %v", err) + } + stateModel := models.StateNodeModel{ + Path: stateNode.Path, + StateKey: common.BytesToHash(stateNode.LeafKey).String(), + CID: stateCIDStr, + MhKey: stateMhKey, + NodeType: stateNode.NodeType.Int(), + } + // index the state node, collect the stateID to reference by FK + if _, err := fmt.Fprintf(sdi.dump, "%+v", stateModel); err != nil { + return err + } + // if we have a leaf, decode and index the account data + if stateNode.NodeType == sdtypes.Leaf { + var i []interface{} + if err := rlp.DecodeBytes(stateNode.NodeValue, &i); err != nil { + return fmt.Errorf("error decoding state leaf node rlp: %s", err.Error()) + } + if len(i) != 2 { + return fmt.Errorf("eth IPLDPublisher expected state leaf node rlp to decode into two elements") + } + var account types.StateAccount + if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil { + return fmt.Errorf("error decoding state account rlp: %s", err.Error()) + } + accountModel := models.StateAccountModel{ + Balance: account.Balance.String(), + Nonce: account.Nonce, + CodeHash: account.CodeHash, + StorageRoot: account.Root.String(), + } + if _, err := fmt.Fprintf(sdi.dump, "%+v", accountModel); err != nil { + return err + } + } + // if there are any storage nodes associated with this node, publish and index them + for _, storageNode := range stateNode.StorageNodes { + if storageNode.NodeType == sdtypes.Removed { + // short circuit if it is a Removed node + // this assumes the db has been initialized and a public.blocks entry for the Removed node is present + storageModel := models.StorageNodeModel{ + Path: storageNode.Path, + StorageKey: common.BytesToHash(storageNode.LeafKey).String(), + CID: shared.RemovedNodeStorageCID, + MhKey: shared.RemovedNodeMhKey, + NodeType: storageNode.NodeType.Int(), + } + if _, err := fmt.Fprintf(sdi.dump, "%+v", storageModel); err != nil { + return err + } + continue + } + storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld2.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue) + if err != nil { + return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err) + } + storageModel := models.StorageNodeModel{ + Path: storageNode.Path, + StorageKey: common.BytesToHash(storageNode.LeafKey).String(), + CID: storageCIDStr, + MhKey: storageMhKey, + NodeType: storageNode.NodeType.Int(), + } + if _, err := fmt.Fprintf(sdi.dump, "%+v", storageModel); err != nil { + return err + } + } + + return nil +} + +// PushCodeAndCodeHash publishes code and codehash pairs to the ipld sql +func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error { + tx, ok := batch.(*BatchTx) + if !ok { + return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch) + } + // codec doesn't matter since db key is multihash-based + mhKey, err := shared.MultihashKeyFromKeccak256(codeAndCodeHash.Hash) + if err != nil { + return fmt.Errorf("error deriving multihash key from codehash: %v", err) + } + tx.cacheDirect(mhKey, codeAndCodeHash.Code) + return nil +} + +// Close satisfied io.Closer +func (sdi *StateDiffIndexer) Close() error { + return sdi.dump.Close() +} diff --git a/statediff/indexer/database/dump/metrics.go b/statediff/indexer/database/dump/metrics.go new file mode 100644 index 000000000..700e42dc0 --- /dev/null +++ b/statediff/indexer/database/dump/metrics.go @@ -0,0 +1,94 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package dump + +import ( + "strings" + + "github.com/ethereum/go-ethereum/metrics" +) + +const ( + namespace = "statediff" +) + +// Build a fully qualified metric name +func metricName(subsystem, name string) string { + if name == "" { + return "" + } + parts := []string{namespace, name} + if subsystem != "" { + parts = []string{namespace, subsystem, name} + } + // Prometheus uses _ but geth metrics uses / and replaces + return strings.Join(parts, "/") +} + +type indexerMetricsHandles struct { + // The total number of processed blocks + blocks metrics.Counter + // The total number of processed transactions + transactions metrics.Counter + // The total number of processed receipts + receipts metrics.Counter + // The total number of processed logs + logs metrics.Counter + // The total number of access list entries processed + accessListEntries metrics.Counter + // Time spent waiting for free postgres tx + tFreePostgres metrics.Timer + // Postgres transaction commit duration + tPostgresCommit metrics.Timer + // Header processing time + tHeaderProcessing metrics.Timer + // Uncle processing time + tUncleProcessing metrics.Timer + // Tx and receipt processing time + tTxAndRecProcessing metrics.Timer + // State, storage, and code combined processing time + tStateStoreCodeProcessing metrics.Timer +} + +func RegisterIndexerMetrics(reg metrics.Registry) indexerMetricsHandles { + ctx := indexerMetricsHandles{ + blocks: metrics.NewCounter(), + transactions: metrics.NewCounter(), + receipts: metrics.NewCounter(), + logs: metrics.NewCounter(), + accessListEntries: metrics.NewCounter(), + tFreePostgres: metrics.NewTimer(), + tPostgresCommit: metrics.NewTimer(), + tHeaderProcessing: metrics.NewTimer(), + tUncleProcessing: metrics.NewTimer(), + tTxAndRecProcessing: metrics.NewTimer(), + tStateStoreCodeProcessing: metrics.NewTimer(), + } + subsys := "indexer" + reg.Register(metricName(subsys, "blocks"), ctx.blocks) + reg.Register(metricName(subsys, "transactions"), ctx.transactions) + reg.Register(metricName(subsys, "receipts"), ctx.receipts) + reg.Register(metricName(subsys, "logs"), ctx.logs) + reg.Register(metricName(subsys, "access_list_entries"), ctx.accessListEntries) + reg.Register(metricName(subsys, "t_free_postgres"), ctx.tFreePostgres) + reg.Register(metricName(subsys, "t_postgres_commit"), ctx.tPostgresCommit) + reg.Register(metricName(subsys, "t_header_processing"), ctx.tHeaderProcessing) + reg.Register(metricName(subsys, "t_uncle_processing"), ctx.tUncleProcessing) + reg.Register(metricName(subsys, "t_tx_receipt_processing"), ctx.tTxAndRecProcessing) + reg.Register(metricName(subsys, "t_state_store_code_processing"), ctx.tStateStoreCodeProcessing) + return ctx +} diff --git a/statediff/indexer/database/sql/batch_tx.go b/statediff/indexer/database/sql/batch_tx.go new file mode 100644 index 000000000..2041af1ed --- /dev/null +++ b/statediff/indexer/database/sql/batch_tx.go @@ -0,0 +1,106 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package sql + +import ( + "context" + + "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + + blockstore "github.com/ipfs/go-ipfs-blockstore" + dshelp "github.com/ipfs/go-ipfs-ds-help" + node "github.com/ipfs/go-ipld-format" + "github.com/lib/pq" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/statediff/indexer/models" +) + +// BatchTx wraps a sql tx with the state necessary for building the tx concurrently during trie difference iteration +type BatchTx struct { + ctx context.Context + dbtx Tx + headerID int64 + stm string + quit chan struct{} + iplds chan models.IPLDModel + ipldCache models.IPLDBatch + + close func(blockTx *BatchTx, err error) error +} + +// Submit satisfies indexer.AtomicTx +func (tx *BatchTx) Submit(err error) error { + return tx.close(tx, err) +} + +func (tx *BatchTx) flush() error { + _, err := tx.dbtx.Exec(tx.ctx, tx.stm, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values)) + if err != nil { + return err + } + tx.ipldCache = models.IPLDBatch{} + return nil +} + +// run in background goroutine to synchronize concurrent appends to the ipldCache +func (tx *BatchTx) cache() { + for { + select { + case i := <-tx.iplds: + tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key) + tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data) + case <-tx.quit: + tx.ipldCache = models.IPLDBatch{} + return + } + } +} + +func (tx *BatchTx) cacheDirect(key string, value []byte) { + tx.iplds <- models.IPLDModel{ + Key: key, + Data: value, + } +} + +func (tx *BatchTx) cacheIPLD(i node.Node) { + tx.iplds <- models.IPLDModel{ + Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(), + Data: i.RawData(), + } +} + +func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error) { + c, err := ipld.RawdataToCid(codec, raw, mh) + if err != nil { + return "", "", err + } + prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String() + tx.iplds <- models.IPLDModel{ + Key: prefixedKey, + Data: raw, + } + return c.String(), prefixedKey, err +} + +// rollback sql transaction and log any error +func rollback(ctx context.Context, tx Tx) { + if err := tx.Rollback(ctx); err != nil { + log.Error(err.Error()) + } +} diff --git a/statediff/indexer/database/sql/batch_writer.go b/statediff/indexer/database/sql/batch_writer.go new file mode 100644 index 000000000..05c882259 --- /dev/null +++ b/statediff/indexer/database/sql/batch_writer.go @@ -0,0 +1,216 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package sql + +/* +import ( + "fmt" + + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" + + "github.com/ethereum/go-ethereum/statediff/indexer/models" + "github.com/jmoiron/sqlx" +) + +*/ +/* +// PG_MAX_PARAMS is the max number of placeholders+args a statement can support +// above this limit we need to split into a separate batch +const PG_MAX_PARAMS int = 32767 + +const ( + ipldInsertPgStr string = `INSERT INTO public.blocks (key, data) VALUES (unnest($1), unnest($2)) ON CONFLICT (key) DO NOTHING` + headerCIDsPgStr string = `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) + VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6), unnest($7), unnest($8), unnest($9), unnest($10), unnest($11), unnest($12), unnest($13), unnest($14), unnest($15), unnest($16)) + ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) = (excluded.parent_hash, excluded.cid, excluded.td, excluded.node_id, excluded.reward, excluded.state_root, excluded.tx_root, excluded.receipt_root, excluded.uncle_root, excluded.bloom, excluded.timestamp, excluded.mh_key, eth.header_cids.times_validated + 1, excluded.base_fee) + RETURNING id` + unclesCIDsPgStr string = `INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6)) + ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward, mh_key) = (excluded.parent_hash, excluded.cid, excluded.reward, excluded.mh_key)` + txCIDsPgStr string = `INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6), unnest($7), unnest($8), unnest($9)) + ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data, tx_type) = (excluded.cid, excluded.dst, excluded.src, excluded.index, excluded.mh_key, excluded.tx_data, excluded.tx_type) + RETURNING id` + accessListPgStr string = `INSERT INTO eth.access_list_element (tx_id, index, address, storage_keys) VALUES (unnest($1), unnest($2), unnest($3), unnest($4)) + ON CONFLICT (tx_id, index) DO UPDATE SET (address, storage_keys) = (excluded.address, excluded.storage_keys)` + rctCIDsPgStr string = `INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6), unnest($7), unnest($8)) + ON CONFLICT (tx_id) DO UPDATE SET (leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) = (excluded.leaf_cid, excluded.contract, excluded.contract_hash, excluded.leaf_mh_key, excluded.post_state, excluded.post_status, excluded.log_root) + RETURNING id` + logCIDsPgStr string = `INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, receipt_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6), unnest($7), unnest($8), unnest($9), unnest($10)) + ON CONFLICT (receipt_id, index) DO UPDATE SET (leaf_cid, leaf_mh_key, address, topic0, topic1, topic2, topic3, log_data) = (excluded.leaf_cid, excluded.leaf_mh_key, excluded.address, excluded.topic0, excluded.topic1, excluded.topic2, excluded.topic3, excluded.log_data)` + stateCIDsPgStr string = `INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6), unnest($7)) + ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = (excluded.state_leaf_key, excluded.cid, excluded.node_type, excluded.diff, excluded.mh_key) + RETURNING id` + stateAccountsPgStr string = `INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5)) + ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = (excluded.balance, excluded.nonce, excluded.code_hash, excluded.storage_root)` + storageCIDsPgStr string = `INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6), unnest($7)) + ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = (excluded.storage_leaf_key, excluded.cid, excluded.node_type, excluded.diff, excluded.mh_key)` +) + +// PostgresBatchWriter is used to write statediff data to Postgres using batch inserts/upserts +type PostgresBatchWriter struct { + db *postgres.DB + + // prepared statements (prepared inside tx) + ipldsPreparedStm *sqlx.Stmt + unclesPrepared *sqlx.Stmt + txPreparedStm *sqlx.Stmt + accessListPreparedStm *sqlx.Stmt + rctPreparedStm *sqlx.Stmt + logPreparedStm *sqlx.Stmt + statePreparedStm *sqlx.Stmt + accountPreparedStm *sqlx.Stmt + storagePreparedStm *sqlx.Stmt + + // cached arguments + queuedHeaderArgs models.HeaderModel + queuedUnclesArgs models.UncleBatch + queuedTxArgs models.TxBatch + queuedAccessListArgs models.AccessListBatch + queuedRctArgs models.ReceiptBatch + queuedLogArgs models.LogBatch + queuedStateArgs models.StateBatch + queuedAccountArgs models.AccountBatch + queuedStorageArgs models.StorageBatch +} + +// NewPostgresBatchWriter creates a new pointer to a PostgresBatchWriter +func NewPostgresBatchWriter(db *postgres.DB) *PostgresBatchWriter { + return &PostgresBatchWriter{ + db: db, + } +} + +func (pbw *PostgresBatchWriter) queueHeader(header models.HeaderModel) { + pbw.queuedHeaderArgs = header +} + +func (pbw *PostgresBatchWriter) queueUncle(uncle models.UncleModel) { + pbw.queuedUnclesArgs.BlockHashes = append(pbw.queuedUnclesArgs.BlockHashes, uncle.BlockHash) + pbw.queuedUnclesArgs.ParentHashes = append(pbw.queuedUnclesArgs.ParentHashes, uncle.ParentHash) + pbw.queuedUnclesArgs.CIDs = append(pbw.queuedUnclesArgs.CIDs, uncle.CID) + pbw.queuedUnclesArgs.MhKeys = append(pbw.queuedUnclesArgs.MhKeys, uncle.MhKey) + pbw.queuedUnclesArgs.Rewards = append(pbw.queuedUnclesArgs.Rewards, uncle.Reward) +} + +func (pbw *PostgresBatchWriter) queueTransaction(tx models.TxModel) { + pbw.queuedTxArgs.Indexes = append(pbw.queuedTxArgs.Indexes, tx.Index) + pbw.queuedTxArgs.TxHashes = append(pbw.queuedTxArgs.TxHashes, tx.TxHash) + pbw.queuedTxArgs.CIDs = append(pbw.queuedTxArgs.CIDs, tx.CID) + pbw.queuedTxArgs.MhKeys = append(pbw.queuedTxArgs.MhKeys, tx.MhKey) + pbw.queuedTxArgs.Dsts = append(pbw.queuedTxArgs.Dsts, tx.Dst) + pbw.queuedTxArgs.Srcs = append(pbw.queuedTxArgs.Srcs, tx.Src) + pbw.queuedTxArgs.Datas = append(pbw.queuedTxArgs.Datas, tx.Data) + pbw.queuedTxArgs.Types = append(pbw.queuedTxArgs.Types, tx.Type) +} + +func (pbw *PostgresBatchWriter) queueAccessListElement(al models.AccessListElementModel) { + +} + +func (pbw *PostgresBatchWriter) queueReceipt(rct models.ReceiptModel) { + +} + +func (pbw *PostgresBatchWriter) upsertTransactionCID(tx *sqlx.Tx, transaction models.TxModel, headerID int64) (int64, error) { + var txID int64 + err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data, tx_type) = ($3, $4, $5, $6, $7, $8, $9) + RETURNING id`, + headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type).Scan(&txID) + if err != nil { + return 0, fmt.Errorf("error upserting transaction_cids entry: %v", err) + } + indexerMetrics.transactions.Inc(1) + return txID, nil +} + +func (pbw *PostgresBatchWriter) upsertAccessListElement(tx *sqlx.Tx, accessListElement models.AccessListElementModel, txID int64) error { + _, err := tx.Exec(`INSERT INTO eth.access_list_element (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4) + ON CONFLICT (tx_id, index) DO UPDATE SET (address, storage_keys) = ($3, $4)`, + txID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys) + if err != nil { + return fmt.Errorf("error upserting access_list_element entry: %v", err) + } + indexerMetrics.accessListEntries.Inc(1) + return nil +} + +func (pbw *PostgresBatchWriter) upsertReceiptCID(tx *sqlx.Tx, rct *models.ReceiptModel, txID int64) (int64, error) { + var receiptID int64 + err := tx.QueryRowx(`INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + ON CONFLICT (tx_id) DO UPDATE SET (leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) = ($2, $3, $4, $5, $6, $7, $8) + RETURNING id`, + txID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot).Scan(&receiptID) + if err != nil { + return 0, fmt.Errorf("error upserting receipt_cids entry: %w", err) + } + indexerMetrics.receipts.Inc(1) + return receiptID, nil +} + +func (pbw *PostgresBatchWriter) upsertLogCID(tx *sqlx.Tx, logs []*models.LogsModel, receiptID int64) error { + for _, log := range logs { + _, err := tx.Exec(`INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, receipt_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) + ON CONFLICT (receipt_id, index) DO UPDATE SET (leaf_cid, leaf_mh_key, address, topic0, topic1, topic2, topic3, log_data) = ($1, $2, $4, $6, $7, $8, $9, $10)`, + log.LeafCID, log.LeafMhKey, receiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2, log.Topic3, log.Data) + if err != nil { + return fmt.Errorf("error upserting logs entry: %w", err) + } + indexerMetrics.logs.Inc(1) + } + return nil +} + +func (pbw *PostgresBatchWriter) upsertStateCID(tx *sqlx.Tx, stateNode models.StateNodeModel, headerID int64) (int64, error) { + var stateID int64 + var stateKey string + if stateNode.StateKey != nullHash.String() { + stateKey = stateNode.StateKey + } + err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7) + RETURNING id`, + headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID) + if err != nil { + return 0, fmt.Errorf("error upserting state_cids entry: %v", err) + } + return stateID, nil +} + +func (pbw *PostgresBatchWriter) upsertStateAccount(tx *sqlx.Tx, stateAccount models.StateAccountModel, stateID int64) error { + _, err := tx.Exec(`INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = ($2, $3, $4, $5)`, + stateID, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot) + if err != nil { + return fmt.Errorf("error upserting state_accounts entry: %v", err) + } + return nil +} + +func (pbw *PostgresBatchWriter) upsertStorageCID(tx *sqlx.Tx, storageCID models.StorageNodeModel, stateID int64) error { + var storageKey string + if storageCID.StorageKey != nullHash.String() { + storageKey = storageCID.StorageKey + } + _, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)`, + stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey) + if err != nil { + return fmt.Errorf("error upserting storage_cids entry: %v", err) + } + return nil +} +*/ diff --git a/statediff/indexer/indexer.go b/statediff/indexer/database/sql/indexer.go similarity index 83% rename from statediff/indexer/indexer.go rename to statediff/indexer/database/sql/indexer.go index f8ffee429..6c35cccac 100644 --- a/statediff/indexer/indexer.go +++ b/statediff/indexer/database/sql/indexer.go @@ -14,15 +14,19 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -// Package indexer provides an interface for pushing and indexing IPLD objects into a Postgres database +// Package sql provides an interface for pushing and indexing IPLD objects into a sql database // Metrics for reporting processing and connection stats are defined in ./metrics.go -package indexer + +package sql import ( + "context" "fmt" "math/big" "time" + ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + "github.com/ipfs/go-cid" node "github.com/ipfs/go-ipld-format" "github.com/multiformats/go-multihash" @@ -34,47 +38,36 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" "github.com/ethereum/go-ethereum/statediff/indexer/models" - "github.com/ethereum/go-ethereum/statediff/indexer/postgres" "github.com/ethereum/go-ethereum/statediff/indexer/shared" sdtypes "github.com/ethereum/go-ethereum/statediff/types" ) +var _ interfaces.StateDiffIndexer = &StateDiffIndexer{} + var ( indexerMetrics = RegisterIndexerMetrics(metrics.DefaultRegistry) dbMetrics = RegisterDBMetrics(metrics.DefaultRegistry) ) -const ( - RemovedNodeStorageCID = "bagmacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya" - RemovedNodeStateCID = "baglacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya" - RemovedNodeMhKey = "/blocks/DMQMLUSGAGDPOIZ4SJ7H3MW4Y4B4BZIAWZJ4VARHHN57VWAELWC2I4A" -) - -// Indexer interface to allow substitution of mocks for testing -type Indexer interface { - PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (*BlockTx, error) - PushStateNode(tx *BlockTx, stateNode sdtypes.StateNode) error - PushCodeAndCodeHash(tx *BlockTx, codeAndCodeHash sdtypes.CodeAndCodeHash) error - ReportDBMetrics(delay time.Duration, quit <-chan bool) -} - -// StateDiffIndexer satisfies the Indexer interface for ethereum statediff objects +// StateDiffIndexer satisfies the indexer.StateDiffIndexer interface for ethereum statediff objects on top of an SQL sql type StateDiffIndexer struct { + ctx context.Context chainConfig *params.ChainConfig - dbWriter *PostgresCIDWriter + dbWriter *Writer } -// NewStateDiffIndexer creates a pointer to a new PayloadConverter which satisfies the PayloadConverter interface -func NewStateDiffIndexer(chainConfig *params.ChainConfig, db *postgres.DB) (*StateDiffIndexer, error) { +// NewStateDiffIndexer creates a sql implementation of interfaces.StateDiffIndexer +func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, db Database) (*StateDiffIndexer, error) { // Write the removed node to the db on init - if err := shared.PublishDirectWithDB(db, RemovedNodeMhKey, []byte{}); err != nil { + if _, err := db.Exec(ctx, db.InsertIPLDStm(), shared.RemovedNodeMhKey, []byte{}); err != nil { return nil, err } return &StateDiffIndexer{ + ctx: ctx, chainConfig: chainConfig, - dbWriter: NewPostgresCIDWriter(db), + dbWriter: NewWriter(db), }, nil } @@ -97,9 +90,9 @@ func (sdi *StateDiffIndexer) ReportDBMetrics(delay time.Duration, quit <-chan bo }() } -// PushBlock pushes and indexes block data in database, except state & storage nodes (includes header, uncles, transactions & receipts) +// PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts) // Returns an initiated DB transaction which must be Closed via defer to commit or rollback -func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (*BlockTx, error) { +func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) { start, t := time.Now(), time.Now() blockHash := block.Hash() blockHashStr := blockHash.String() @@ -112,7 +105,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip } // Generate the block iplds - headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld.FromBlockAndReceipts(block, receipts) + headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts) if err != nil { return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err) } @@ -130,49 +123,50 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip if sdi.chainConfig.Clique != nil { reward = big.NewInt(0) } else { - reward = CalcEthBlockReward(block.Header(), block.Uncles(), block.Transactions(), receipts) + reward = shared.CalcEthBlockReward(block.Header(), block.Uncles(), block.Transactions(), receipts) } t = time.Now() // Begin new db tx for everything - tx, err := sdi.dbWriter.db.Beginx() + tx, err := sdi.dbWriter.db.Begin(sdi.ctx) if err != nil { return nil, err } defer func() { if p := recover(); p != nil { - shared.Rollback(tx) + rollback(sdi.ctx, tx) panic(p) } else if err != nil { - shared.Rollback(tx) + rollback(sdi.ctx, tx) } }() - blockTx := &BlockTx{ + blockTx := &BatchTx{ + stm: sdi.dbWriter.db.InsertIPLDsStm(), iplds: make(chan models.IPLDModel), quit: make(chan struct{}), ipldCache: models.IPLDBatch{}, dbtx: tx, // handle transaction commit or rollback for any return case - Close: func(self *BlockTx, err error) error { + close: func(self *BatchTx, err error) error { close(self.quit) close(self.iplds) if p := recover(); p != nil { - shared.Rollback(tx) + rollback(sdi.ctx, tx) panic(p) } else if err != nil { - shared.Rollback(tx) + rollback(sdi.ctx, tx) } else { tDiff := time.Since(t) indexerMetrics.tStateStoreCodeProcessing.Update(tDiff) traceMsg += fmt.Sprintf("state, storage, and code storage processing time: %s\r\n", tDiff.String()) t = time.Now() if err := self.flush(); err != nil { - shared.Rollback(tx) + rollback(sdi.ctx, tx) traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String()) log.Debug(traceMsg) return err } - err = tx.Commit() + err = tx.Commit(sdi.ctx) tDiff = time.Since(t) indexerMetrics.tPostgresCommit.Update(tDiff) traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String()) @@ -231,14 +225,13 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String()) t = time.Now() - blockTx.BlockNumber = height blockTx.headerID = headerID return blockTx, err } // processHeader publishes and indexes a header IPLD in Postgres // it returns the headerID -func (sdi *StateDiffIndexer) processHeader(tx *BlockTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (int64, error) { +func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (int64, error) { tx.cacheIPLD(headerNode) var baseFee *int64 @@ -267,7 +260,7 @@ func (sdi *StateDiffIndexer) processHeader(tx *BlockTx, header *types.Header, he } // processUncles publishes and indexes uncle IPLDs in Postgres -func (sdi *StateDiffIndexer) processUncles(tx *BlockTx, headerID int64, blockNumber uint64, uncleNodes []*ipld.EthHeader) error { +func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID int64, blockNumber uint64, uncleNodes []*ipld2.EthHeader) error { // publish and index uncles for _, uncleNode := range uncleNodes { tx.cacheIPLD(uncleNode) @@ -276,7 +269,7 @@ func (sdi *StateDiffIndexer) processUncles(tx *BlockTx, headerID int64, blockNum if sdi.chainConfig.Clique != nil { uncleReward = big.NewInt(0) } else { - uncleReward = CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64()) + uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64()) } uncle := models.UncleModel{ CID: uncleNode.Cid().String(), @@ -298,17 +291,17 @@ type processArgs struct { blockNumber *big.Int receipts types.Receipts txs types.Transactions - rctNodes []*ipld.EthReceipt - rctTrieNodes []*ipld.EthRctTrie - txNodes []*ipld.EthTx - txTrieNodes []*ipld.EthTxTrie - logTrieNodes [][]*ipld.EthLogTrie + rctNodes []*ipld2.EthReceipt + rctTrieNodes []*ipld2.EthRctTrie + txNodes []*ipld2.EthTx + txTrieNodes []*ipld2.EthTxTrie + logTrieNodes [][]*ipld2.EthLogTrie logLeafNodeCIDs [][]cid.Cid rctLeafNodeCIDs []cid.Cid } // processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres -func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BlockTx, args processArgs) error { +func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs) error { // Process receipts and txs signer := types.MakeSigner(sdi.chainConfig, args.blockNumber) for i, receipt := range args.receipts { @@ -434,8 +427,12 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BlockTx, args processArgs return nil } -// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD database -func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateNode) error { +// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD sql +func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode) error { + tx, ok := batch.(*BatchTx) + if !ok { + return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch) + } // publish the state node if stateNode.NodeType == sdtypes.Removed { // short circuit if it is a Removed node @@ -443,14 +440,14 @@ func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateN stateModel := models.StateNodeModel{ Path: stateNode.Path, StateKey: common.BytesToHash(stateNode.LeafKey).String(), - CID: RemovedNodeStateCID, - MhKey: RemovedNodeMhKey, + CID: shared.RemovedNodeStateCID, + MhKey: shared.RemovedNodeMhKey, NodeType: stateNode.NodeType.Int(), } _, err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel, tx.headerID) return err } - stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue) + stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue) if err != nil { return fmt.Errorf("error generating and cacheing state node IPLD: %v", err) } @@ -497,8 +494,8 @@ func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateN storageModel := models.StorageNodeModel{ Path: storageNode.Path, StorageKey: common.BytesToHash(storageNode.LeafKey).String(), - CID: RemovedNodeStorageCID, - MhKey: RemovedNodeMhKey, + CID: shared.RemovedNodeStorageCID, + MhKey: shared.RemovedNodeMhKey, NodeType: storageNode.NodeType.Int(), } if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel, stateID); err != nil { @@ -506,7 +503,7 @@ func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateN } continue } - storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue) + storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld2.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue) if err != nil { return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err) } @@ -525,8 +522,12 @@ func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateN return nil } -// PushCodeAndCodeHash publishes code and codehash pairs to the ipld database -func (sdi *StateDiffIndexer) PushCodeAndCodeHash(tx *BlockTx, codeAndCodeHash sdtypes.CodeAndCodeHash) error { +// PushCodeAndCodeHash publishes code and codehash pairs to the ipld sql +func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error { + tx, ok := batch.(*BatchTx) + if !ok { + return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch) + } // codec doesn't matter since db key is multihash-based mhKey, err := shared.MultihashKeyFromKeccak256(codeAndCodeHash.Hash) if err != nil { @@ -535,3 +536,8 @@ func (sdi *StateDiffIndexer) PushCodeAndCodeHash(tx *BlockTx, codeAndCodeHash sd tx.cacheDirect(mhKey, codeAndCodeHash.Code) return nil } + +// Close satisfied io.Closer +func (sdi *StateDiffIndexer) Close() error { + return sdi.dbWriter.db.Close() +} diff --git a/statediff/indexer/indexer_legacy_test.go b/statediff/indexer/database/sql/indexer_legacy_test.go similarity index 73% rename from statediff/indexer/indexer_legacy_test.go rename to statediff/indexer/database/sql/indexer_legacy_test.go index 7c01f567f..f2fdb0521 100644 --- a/statediff/indexer/indexer_legacy_test.go +++ b/statediff/indexer/database/sql/indexer_legacy_test.go @@ -14,19 +14,21 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package indexer_test +package sql_test import ( + "context" "testing" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/statediff/indexer" - "github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld" - "github.com/ethereum/go-ethereum/statediff/indexer/mocks" - "github.com/ethereum/go-ethereum/statediff/indexer/shared" "github.com/ipfs/go-cid" "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + "github.com/ethereum/go-ethereum/statediff/indexer/mocks" + "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" ) var ( @@ -39,12 +41,12 @@ func setupLegacy(t *testing.T) { mockLegacyBlock = legacyData.MockBlock legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256) - db, err = shared.SetupDB() + db, err = test_helpers.SetupDB() require.NoError(t, err) - ind, err = indexer.NewStateDiffIndexer(legacyData.Config, db) + ind, err = sql.NewSQLIndexer(context.Background(), legacyData.Config, db) require.NoError(t, err) - var tx *indexer.BlockTx + var tx *sql.BlockTx tx, err = ind.PushBlock( mockLegacyBlock, legacyData.MockReceipts, @@ -57,7 +59,7 @@ func setupLegacy(t *testing.T) { require.NoError(t, err) } - shared.ExpectEqual(t, tx.BlockNumber, legacyData.BlockNumber.Uint64()) + test_helpers.ExpectEqual(t, tx.BlockNumber, legacyData.BlockNumber.Uint64()) } func TestPublishAndIndexerLegacy(t *testing.T) { @@ -76,12 +78,12 @@ func TestPublishAndIndexerLegacy(t *testing.T) { BaseFee *int64 `db:"base_fee"` } header := new(res) - err = db.QueryRowx(pgStr, legacyData.BlockNumber.Uint64()).StructScan(header) + err = db.QueryRow(context.Background(), pgStr, legacyData.BlockNumber.Uint64()).StructScan(header) require.NoError(t, err) - shared.ExpectEqual(t, header.CID, legacyHeaderCID.String()) - shared.ExpectEqual(t, header.TD, legacyData.MockBlock.Difficulty().String()) - shared.ExpectEqual(t, header.Reward, "5000000000000011250") + test_helpers.ExpectEqual(t, header.CID, legacyHeaderCID.String()) + test_helpers.ExpectEqual(t, header.TD, legacyData.MockBlock.Difficulty().String()) + test_helpers.ExpectEqual(t, header.Reward, "5000000000000011250") require.Nil(t, legacyData.MockHeader.BaseFee) require.Nil(t, header.BaseFee) }) diff --git a/statediff/indexer/indexer_test.go b/statediff/indexer/database/sql/indexer_test.go similarity index 72% rename from statediff/indexer/indexer_test.go rename to statediff/indexer/database/sql/indexer_test.go index d1962dad2..91d55f094 100644 --- a/statediff/indexer/indexer_test.go +++ b/statediff/indexer/database/sql/indexer_test.go @@ -14,35 +14,37 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package indexer_test +package sql_test import ( "bytes" + "context" "fmt" "os" "testing" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/statediff/indexer" - "github.com/ethereum/go-ethereum/statediff/indexer/ipfs" - "github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld" - "github.com/ethereum/go-ethereum/statediff/indexer/mocks" - "github.com/ethereum/go-ethereum/statediff/indexer/models" - "github.com/ethereum/go-ethereum/statediff/indexer/postgres" - "github.com/ethereum/go-ethereum/statediff/indexer/shared" "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" dshelp "github.com/ipfs/go-ipfs-ds-help" "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/statediff/indexer" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" + "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + "github.com/ethereum/go-ethereum/statediff/indexer/mocks" + "github.com/ethereum/go-ethereum/statediff/indexer/models" + "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" ) var ( - db *postgres.DB + db sql.Database err error - ind *indexer.StateDiffIndexer + ind *interfaces.StateDiffIndexer ipfsPgGet = `SELECT data FROM public.blocks WHERE key = $1` tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte @@ -135,13 +137,13 @@ func init() { } func setup(t *testing.T) { - db, err = shared.SetupDB() + db, err = test_helpers.SetupDB() if err != nil { t.Fatal(err) } - ind, err = indexer.NewStateDiffIndexer(mocks.TestConfig, db) + ind, err = indexer.NewStateDiffIndexer(context.Background(), mocks.TestConfig, db) require.NoError(t, err) - var tx *indexer.BlockTx + var tx *sql.BlockTx tx, err = ind.PushBlock( mockBlock, mocks.MockReceipts, @@ -157,11 +159,14 @@ func setup(t *testing.T) { } } - shared.ExpectEqual(t, tx.BlockNumber, mocks.BlockNumber.Uint64()) + test_helpers.ExpectEqual(t, tx.BlockNumber, mocks.BlockNumber.Uint64()) } func tearDown(t *testing.T) { - indexer.TearDownDB(t, db) + sql.TearDownDB(t, db) + if err := ind.Close(); err != nil { + t.Fatal(err) + } } func TestPublishAndIndexer(t *testing.T) { @@ -180,14 +185,14 @@ func TestPublishAndIndexer(t *testing.T) { BaseFee *int64 `db:"base_fee"` } header := new(res) - err = db.QueryRowx(pgStr, mocks.BlockNumber.Uint64()).StructScan(header) + err = db.QueryRow(context.Background(), pgStr, mocks.BlockNumber.Uint64()).StructScan(header) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, header.CID, headerCID.String()) - shared.ExpectEqual(t, header.TD, mocks.MockBlock.Difficulty().String()) - shared.ExpectEqual(t, header.Reward, "2000000000000021250") - shared.ExpectEqual(t, *header.BaseFee, mocks.MockHeader.BaseFee.Int64()) + test_helpers.ExpectEqual(t, header.CID, headerCID.String()) + test_helpers.ExpectEqual(t, header.TD, mocks.MockBlock.Difficulty().String()) + test_helpers.ExpectEqual(t, header.Reward, "2000000000000021250") + test_helpers.ExpectEqual(t, *header.BaseFee, mocks.MockHeader.BaseFee.Int64()) dc, err := cid.Decode(header.CID) if err != nil { t.Fatal(err) @@ -195,11 +200,11 @@ func TestPublishAndIndexer(t *testing.T) { mhKey := dshelp.MultihashToDsKey(dc.Hash()) prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() var data []byte - err = db.Get(&data, ipfsPgGet, prefixedKey) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, data, mocks.MockHeaderRlp) + test_helpers.ExpectEqual(t, data, mocks.MockHeaderRlp) }) t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) { @@ -209,16 +214,16 @@ func TestPublishAndIndexer(t *testing.T) { trxs := make([]string, 0) pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id) WHERE header_cids.block_number = $1` - err = db.Select(&trxs, pgStr, mocks.BlockNumber.Uint64()) + err = db.Select(context.Background(), &trxs, pgStr, mocks.BlockNumber.Uint64()) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, len(trxs), 5) - expectTrue(t, shared.ListContainsString(trxs, trx1CID.String())) - expectTrue(t, shared.ListContainsString(trxs, trx2CID.String())) - expectTrue(t, shared.ListContainsString(trxs, trx3CID.String())) - expectTrue(t, shared.ListContainsString(trxs, trx4CID.String())) - expectTrue(t, shared.ListContainsString(trxs, trx5CID.String())) + test_helpers.ExpectEqual(t, len(trxs), 5) + expectTrue(t, test_helpers.ListContainsString(trxs, trx1CID.String())) + expectTrue(t, test_helpers.ListContainsString(trxs, trx2CID.String())) + expectTrue(t, test_helpers.ListContainsString(trxs, trx3CID.String())) + expectTrue(t, test_helpers.ListContainsString(trxs, trx4CID.String())) + expectTrue(t, test_helpers.ListContainsString(trxs, trx5CID.String())) // and published for _, c := range trxs { dc, err := cid.Decode(c) @@ -228,16 +233,16 @@ func TestPublishAndIndexer(t *testing.T) { mhKey := dshelp.MultihashToDsKey(dc.Hash()) prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() var data []byte - err = db.Get(&data, ipfsPgGet, prefixedKey) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) if err != nil { t.Fatal(err) } switch c { case trx1CID.String(): - shared.ExpectEqual(t, data, tx1) + test_helpers.ExpectEqual(t, data, tx1) var txType *uint8 pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1` - err = db.Get(&txType, pgStr, c) + err = db.Get(context.Background(), &txType, pgStr, c) if err != nil { t.Fatal(err) } @@ -245,10 +250,10 @@ func TestPublishAndIndexer(t *testing.T) { t.Fatalf("expected nil tx_type, got %d", *txType) } case trx2CID.String(): - shared.ExpectEqual(t, data, tx2) + test_helpers.ExpectEqual(t, data, tx2) var txType *uint8 pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1` - err = db.Get(&txType, pgStr, c) + err = db.Get(context.Background(), &txType, pgStr, c) if err != nil { t.Fatal(err) } @@ -256,10 +261,10 @@ func TestPublishAndIndexer(t *testing.T) { t.Fatalf("expected nil tx_type, got %d", *txType) } case trx3CID.String(): - shared.ExpectEqual(t, data, tx3) + test_helpers.ExpectEqual(t, data, tx3) var txType *uint8 pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1` - err = db.Get(&txType, pgStr, c) + err = db.Get(context.Background(), &txType, pgStr, c) if err != nil { t.Fatal(err) } @@ -267,10 +272,10 @@ func TestPublishAndIndexer(t *testing.T) { t.Fatalf("expected nil tx_type, got %d", *txType) } case trx4CID.String(): - shared.ExpectEqual(t, data, tx4) + test_helpers.ExpectEqual(t, data, tx4) var txType *uint8 pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1` - err = db.Get(&txType, pgStr, c) + err = db.Get(context.Background(), &txType, pgStr, c) if err != nil { t.Fatal(err) } @@ -279,7 +284,7 @@ func TestPublishAndIndexer(t *testing.T) { } accessListElementModels := make([]models.AccessListElementModel, 0) pgStr = `SELECT access_list_element.* FROM eth.access_list_element INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.id) WHERE cid = $1 ORDER BY access_list_element.index ASC` - err = db.Select(&accessListElementModels, pgStr, c) + err = db.Select(context.Background(), &accessListElementModels, pgStr, c) if err != nil { t.Fatal(err) } @@ -295,13 +300,13 @@ func TestPublishAndIndexer(t *testing.T) { Address: accessListElementModels[1].Address, StorageKeys: accessListElementModels[1].StorageKeys, } - shared.ExpectEqual(t, model1, mocks.AccessListEntry1Model) - shared.ExpectEqual(t, model2, mocks.AccessListEntry2Model) + test_helpers.ExpectEqual(t, model1, mocks.AccessListEntry1Model) + test_helpers.ExpectEqual(t, model2, mocks.AccessListEntry2Model) case trx5CID.String(): - shared.ExpectEqual(t, data, tx5) + test_helpers.ExpectEqual(t, data, tx5) var txType *uint8 pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1` - err = db.Get(&txType, pgStr, c) + err = db.Get(context.Background(), &txType, pgStr, c) if err != nil { t.Fatal(err) } @@ -322,7 +327,7 @@ func TestPublishAndIndexer(t *testing.T) { AND transaction_cids.header_id = header_cids.id AND header_cids.block_number = $1 ORDER BY transaction_cids.index` - err = db.Select(&rcts, pgStr, mocks.BlockNumber.Uint64()) + err = db.Select(context.Background(), &rcts, pgStr, mocks.BlockNumber.Uint64()) if err != nil { t.Fatal(err) } @@ -340,12 +345,12 @@ func TestPublishAndIndexer(t *testing.T) { INNER JOIN eth.receipt_cids ON (log_cids.receipt_id = receipt_cids.id) INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key) WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC` - err = db.Select(&results, pgStr, rcts[i]) + err = db.Select(context.Background(), &results, pgStr, rcts[i]) require.NoError(t, err) // expecting MockLog1 and MockLog2 for mockReceipt4 expectedLogs := mocks.MockReceipts[i].Logs - shared.ExpectEqual(t, len(results), len(expectedLogs)) + test_helpers.ExpectEqual(t, len(results), len(expectedLogs)) var nodeElements []interface{} for idx, r := range results { @@ -357,7 +362,7 @@ func TestPublishAndIndexer(t *testing.T) { require.NoError(t, err) // 2nd element of the leaf node contains the encoded log data. - shared.ExpectEqual(t, logRaw, nodeElements[1].([]byte)) + test_helpers.ExpectEqual(t, logRaw, nodeElements[1].([]byte)) } } }) @@ -372,19 +377,19 @@ func TestPublishAndIndexer(t *testing.T) { WHERE receipt_cids.tx_id = transaction_cids.id AND transaction_cids.header_id = header_cids.id AND header_cids.block_number = $1 order by transaction_cids.id` - err = db.Select(&rcts, pgStr, mocks.BlockNumber.Uint64()) + err = db.Select(context.Background(), &rcts, pgStr, mocks.BlockNumber.Uint64()) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, len(rcts), 5) + test_helpers.ExpectEqual(t, len(rcts), 5) for idx, rctLeafCID := range rcts { - result := make([]ipfs.BlockModel, 0) + result := make([]models.IPLDModel, 0) pgStr = `SELECT data FROM eth.receipt_cids INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key) WHERE receipt_cids.leaf_cid = $1` - err = db.Select(&result, pgStr, rctLeafCID) + err = db.Select(context.Background(), &result, pgStr, rctLeafCID) if err != nil { t.Fatal(err) } @@ -397,7 +402,7 @@ func TestPublishAndIndexer(t *testing.T) { expectedRct, err := mocks.MockReceipts[idx].MarshalBinary() require.NoError(t, err) - shared.ExpectEqual(t, expectedRct, nodeElements[1].([]byte)) + test_helpers.ExpectEqual(t, expectedRct, nodeElements[1].([]byte)) } // and published @@ -409,57 +414,57 @@ func TestPublishAndIndexer(t *testing.T) { mhKey := dshelp.MultihashToDsKey(dc.Hash()) prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() var data []byte - err = db.Get(&data, ipfsPgGet, prefixedKey) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) if err != nil { t.Fatal(err) } switch c { case rct1CID.String(): - shared.ExpectEqual(t, data, rct1) + test_helpers.ExpectEqual(t, data, rct1) var postStatus uint64 pgStr = `SELECT post_status FROM eth.receipt_cids WHERE leaf_cid = $1` - err = db.Get(&postStatus, pgStr, c) + err = db.Get(context.Background(), &postStatus, pgStr, c) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, postStatus, mocks.ExpectedPostStatus) + test_helpers.ExpectEqual(t, postStatus, mocks.ExpectedPostStatus) case rct2CID.String(): - shared.ExpectEqual(t, data, rct2) + test_helpers.ExpectEqual(t, data, rct2) var postState string pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1` - err = db.Get(&postState, pgStr, c) + err = db.Get(context.Background(), &postState, pgStr, c) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, postState, mocks.ExpectedPostState1) + test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState1) case rct3CID.String(): - shared.ExpectEqual(t, data, rct3) + test_helpers.ExpectEqual(t, data, rct3) var postState string pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1` - err = db.Get(&postState, pgStr, c) + err = db.Get(context.Background(), &postState, pgStr, c) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, postState, mocks.ExpectedPostState2) + test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState2) case rct4CID.String(): - shared.ExpectEqual(t, data, rct4) + test_helpers.ExpectEqual(t, data, rct4) var postState string pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1` - err = db.Get(&postState, pgStr, c) + err = db.Get(context.Background(), &postState, pgStr, c) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, postState, mocks.ExpectedPostState3) + test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState3) case rct5CID.String(): - shared.ExpectEqual(t, data, rct5) + test_helpers.ExpectEqual(t, data, rct5) var postState string pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1` - err = db.Get(&postState, pgStr, c) + err = db.Get(context.Background(), &postState, pgStr, c) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, postState, mocks.ExpectedPostState3) + test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState3) } } }) @@ -472,11 +477,11 @@ func TestPublishAndIndexer(t *testing.T) { pgStr := `SELECT state_cids.id, state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id) WHERE header_cids.block_number = $1 AND node_type != 3` - err = db.Select(&stateNodes, pgStr, mocks.BlockNumber.Uint64()) + err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64()) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, len(stateNodes), 2) + test_helpers.ExpectEqual(t, len(stateNodes), 2) for _, stateNode := range stateNodes { var data []byte dc, err := cid.Decode(stateNode.CID) @@ -485,22 +490,22 @@ func TestPublishAndIndexer(t *testing.T) { } mhKey := dshelp.MultihashToDsKey(dc.Hash()) prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - err = db.Get(&data, ipfsPgGet, prefixedKey) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) if err != nil { t.Fatal(err) } pgStr = `SELECT * from eth.state_accounts WHERE state_id = $1` var account models.StateAccountModel - err = db.Get(&account, pgStr, stateNode.ID) + err = db.Get(context.Background(), &account, pgStr, stateNode.ID) if err != nil { t.Fatal(err) } if stateNode.CID == state1CID.String() { - shared.ExpectEqual(t, stateNode.NodeType, 2) - shared.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex()) - shared.ExpectEqual(t, stateNode.Path, []byte{'\x06'}) - shared.ExpectEqual(t, data, mocks.ContractLeafNode) - shared.ExpectEqual(t, account, models.StateAccountModel{ + test_helpers.ExpectEqual(t, stateNode.NodeType, 2) + test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex()) + test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x06'}) + test_helpers.ExpectEqual(t, data, mocks.ContractLeafNode) + test_helpers.ExpectEqual(t, account, models.StateAccountModel{ ID: account.ID, StateID: stateNode.ID, Balance: "0", @@ -510,11 +515,11 @@ func TestPublishAndIndexer(t *testing.T) { }) } if stateNode.CID == state2CID.String() { - shared.ExpectEqual(t, stateNode.NodeType, 2) - shared.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex()) - shared.ExpectEqual(t, stateNode.Path, []byte{'\x0c'}) - shared.ExpectEqual(t, data, mocks.AccountLeafNode) - shared.ExpectEqual(t, account, models.StateAccountModel{ + test_helpers.ExpectEqual(t, stateNode.NodeType, 2) + test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex()) + test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x0c'}) + test_helpers.ExpectEqual(t, data, mocks.AccountLeafNode) + test_helpers.ExpectEqual(t, account, models.StateAccountModel{ ID: account.ID, StateID: stateNode.ID, Balance: "1000", @@ -530,11 +535,11 @@ func TestPublishAndIndexer(t *testing.T) { pgStr = `SELECT state_cids.id, state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id) WHERE header_cids.block_number = $1 AND node_type = 3` - err = db.Select(&stateNodes, pgStr, mocks.BlockNumber.Uint64()) + err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64()) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, len(stateNodes), 1) + test_helpers.ExpectEqual(t, len(stateNodes), 1) stateNode := stateNodes[0] var data []byte dc, err := cid.Decode(stateNode.CID) @@ -543,14 +548,14 @@ func TestPublishAndIndexer(t *testing.T) { } mhKey := dshelp.MultihashToDsKey(dc.Hash()) prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - shared.ExpectEqual(t, prefixedKey, indexer.RemovedNodeMhKey) - err = db.Get(&data, ipfsPgGet, prefixedKey) + test_helpers.ExpectEqual(t, prefixedKey, sql.RemovedNodeMhKey) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, stateNode.CID, indexer.RemovedNodeStateCID) - shared.ExpectEqual(t, stateNode.Path, []byte{'\x02'}) - shared.ExpectEqual(t, data, []byte{}) + test_helpers.ExpectEqual(t, stateNode.CID, sql.RemovedNodeStateCID) + test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x02'}) + test_helpers.ExpectEqual(t, data, []byte{}) }) t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) { @@ -564,12 +569,12 @@ func TestPublishAndIndexer(t *testing.T) { AND state_cids.header_id = header_cids.id AND header_cids.block_number = $1 AND storage_cids.node_type != 3` - err = db.Select(&storageNodes, pgStr, mocks.BlockNumber.Uint64()) + err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64()) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, len(storageNodes), 1) - shared.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{ + test_helpers.ExpectEqual(t, len(storageNodes), 1) + test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{ CID: storageCID.String(), NodeType: 2, StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(), @@ -583,11 +588,11 @@ func TestPublishAndIndexer(t *testing.T) { } mhKey := dshelp.MultihashToDsKey(dc.Hash()) prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - err = db.Get(&data, ipfsPgGet, prefixedKey) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, data, mocks.StorageLeafNode) + test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode) // check that Removed storage nodes were properly indexed storageNodes = make([]models.StorageNodeWithStateKeyModel, 0) @@ -597,13 +602,13 @@ func TestPublishAndIndexer(t *testing.T) { AND state_cids.header_id = header_cids.id AND header_cids.block_number = $1 AND storage_cids.node_type = 3` - err = db.Select(&storageNodes, pgStr, mocks.BlockNumber.Uint64()) + err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64()) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, len(storageNodes), 1) - shared.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{ - CID: indexer.RemovedNodeStorageCID, + test_helpers.ExpectEqual(t, len(storageNodes), 1) + test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{ + CID: sql.RemovedNodeStorageCID, NodeType: 3, StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(), StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(), @@ -615,11 +620,11 @@ func TestPublishAndIndexer(t *testing.T) { } mhKey = dshelp.MultihashToDsKey(dc.Hash()) prefixedKey = blockstore.BlockPrefix.String() + mhKey.String() - shared.ExpectEqual(t, prefixedKey, indexer.RemovedNodeMhKey) - err = db.Get(&data, ipfsPgGet, prefixedKey) + test_helpers.ExpectEqual(t, prefixedKey, sql.RemovedNodeMhKey) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) if err != nil { t.Fatal(err) } - shared.ExpectEqual(t, data, []byte{}) + test_helpers.ExpectEqual(t, data, []byte{}) }) } diff --git a/statediff/indexer/database/sql/interfaces.go b/statediff/indexer/database/sql/interfaces.go new file mode 100644 index 000000000..755c4e156 --- /dev/null +++ b/statediff/indexer/database/sql/interfaces.go @@ -0,0 +1,88 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package sql + +import ( + "context" + "io" + "time" +) + +// Database interfaces required by the sql indexer +type Database interface { + Driver + Statements +} + +// Driver interface has all the methods required by a driver implementation to support the sql indexer +type Driver interface { + QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow + Exec(ctx context.Context, sql string, args ...interface{}) (Result, error) + Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error + Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error + Begin(ctx context.Context) (Tx, error) + Stats() Stats + NodeID() int64 + Context() context.Context + io.Closer +} + +// Statements interface to accommodate different SQL query syntax +type Statements interface { + InsertHeaderStm() string + InsertUncleStm() string + InsertTxStm() string + InsertAccessListElementStm() string + InsertRctStm() string + InsertLogStm() string + InsertStateStm() string + InsertAccountStm() string + InsertStorageStm() string + InsertIPLDStm() string + InsertIPLDsStm() string +} + +// Tx interface to accommodate different concrete SQL transaction types +type Tx interface { + QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow + Exec(ctx context.Context, sql string, args ...interface{}) (Result, error) + Commit(ctx context.Context) error + Rollback(ctx context.Context) error +} + +// ScannableRow interface to accommodate different concrete row types +type ScannableRow interface { + Scan(dest ...interface{}) error + StructScan(dest interface{}) error +} + +// Result interface to accommodate different concrete result types +type Result interface { + RowsAffected() (int64, error) +} + +// Stats interface to accommodate different concrete sql stats types +type Stats interface { + MaxOpen() int64 + Open() int64 + InUse() int64 + Idle() int64 + WaitCount() int64 + WaitDuration() time.Duration + MaxIdleClosed() int64 + MaxLifetimeClosed() int64 +} diff --git a/statediff/indexer/metrics.go b/statediff/indexer/database/sql/metrics.go similarity index 79% rename from statediff/indexer/metrics.go rename to statediff/indexer/database/sql/metrics.go index 2d37816f6..b0946a722 100644 --- a/statediff/indexer/metrics.go +++ b/statediff/indexer/database/sql/metrics.go @@ -1,7 +1,22 @@ -package indexer +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package sql import ( - "database/sql" "strings" "github.com/ethereum/go-ethereum/metrics" @@ -79,7 +94,7 @@ func RegisterIndexerMetrics(reg metrics.Registry) indexerMetricsHandles { } type dbMetricsHandles struct { - // Maximum number of open connections to the database + // Maximum number of open connections to the sql maxOpen metrics.Gauge // The number of established connections both in use and idle open metrics.Gauge @@ -120,13 +135,13 @@ func RegisterDBMetrics(reg metrics.Registry) dbMetricsHandles { return ctx } -func (met *dbMetricsHandles) Update(stats sql.DBStats) { - met.maxOpen.Update(int64(stats.MaxOpenConnections)) - met.open.Update(int64(stats.OpenConnections)) - met.inUse.Update(int64(stats.InUse)) - met.idle.Update(int64(stats.Idle)) - met.waitedFor.Inc(stats.WaitCount) - met.blockedMilliseconds.Inc(stats.WaitDuration.Milliseconds()) - met.closedMaxIdle.Inc(stats.MaxIdleClosed) - met.closedMaxLifetime.Inc(stats.MaxLifetimeClosed) +func (met *dbMetricsHandles) Update(stats Stats) { + met.maxOpen.Update(stats.MaxOpen()) + met.open.Update(stats.Open()) + met.inUse.Update(stats.InUse()) + met.idle.Update(stats.Idle()) + met.waitedFor.Inc(stats.WaitCount()) + met.blockedMilliseconds.Inc(stats.WaitDuration().Milliseconds()) + met.closedMaxIdle.Inc(stats.MaxIdleClosed()) + met.closedMaxLifetime.Inc(stats.MaxLifetimeClosed()) } diff --git a/statediff/indexer/database/sql/postgres/config.go b/statediff/indexer/database/sql/postgres/config.go new file mode 100644 index 000000000..07e3dfe21 --- /dev/null +++ b/statediff/indexer/database/sql/postgres/config.go @@ -0,0 +1,81 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package postgres + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/statediff/indexer/shared" +) + +type DriverType string + +const ( + PGX DriverType = "PGX" + SQLX DriverType = "SQLX" +) + +// DefaultConfig are default parameters for connecting to a Postgres sql +var DefaultConfig = Config{ + Hostname: "localhost", + Port: 5432, + DatabaseName: "vulcanize_test", + Username: "postgres", + Password: "", +} + +// Config holds params for a Postgres db +type Config struct { + // conn string params + Hostname string + Port int + DatabaseName string + Username string + Password string + + // conn settings + MaxConns int + MaxIdle int + MinConns int + MaxConnIdleTime time.Duration + MaxConnLifetime time.Duration + ConnTimeout time.Duration + + // node info params + ID string + ClientName string + + // driver type + Driver DriverType +} + +func (c Config) Type() shared.DBType { + return shared.POSTGRES +} + +func (c Config) DbConnectionString() string { + if len(c.Username) > 0 && len(c.Password) > 0 { + return fmt.Sprintf("postgresql://%s:%s@%s:%d/%s?sslmode=disable", + c.Username, c.Password, c.Hostname, c.Port, c.DatabaseName) + } + if len(c.Username) > 0 && len(c.Password) == 0 { + return fmt.Sprintf("postgresql://%s@%s:%d/%s?sslmode=disable", + c.Username, c.Hostname, c.Port, c.DatabaseName) + } + return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", c.Hostname, c.Port, c.DatabaseName) +} diff --git a/statediff/indexer/database/sql/postgres/database.go b/statediff/indexer/database/sql/postgres/database.go new file mode 100644 index 000000000..3fe7f652e --- /dev/null +++ b/statediff/indexer/database/sql/postgres/database.go @@ -0,0 +1,112 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package postgres + +import "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + +var _ sql.Database = &DB{} + +const ( + createNodeStm = `INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (genesis_block, network_id, node_id, chain_id) + DO UPDATE + SET genesis_block = $1, + network_id = $2, + node_id = $3, + client_name = $4, + chain_id = $5 + RETURNING id` +) + +// NewPostgresDB returns a postgres.DB using the provided driver +func NewPostgresDB(driver sql.Driver) *DB { + return &DB{driver} +} + +// DB implements sql.Databse using a configured driver and Postgres statement syntax +type DB struct { + sql.Driver +} + +// InsertHeaderStm satisfies the sql.Statements interface +func (db *DB) InsertHeaderStm() string { + return `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) + ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16) + RETURNING id` +} + +// InsertUncleStm satisfies the sql.Statements interface +func (db *DB) InsertUncleStm() string { + return `INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward, mh_key) = ($3, $4, $5, $6)` +} + +// InsertTxStm satisfies the sql.Statements interface +func (db *DB) InsertTxStm() string { + return `INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data, tx_type) = ($3, $4, $5, $6, $7, $8, $9) + RETURNING id` +} + +// InsertAccessListElementStm satisfies the sql.Statements interface +func (db *DB) InsertAccessListElementStm() string { + return `INSERT INTO eth.access_list_element (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4) + ON CONFLICT (tx_id, index) DO UPDATE SET (address, storage_keys) = ($3, $4)` +} + +// InsertRctStm satisfies the sql.Statements interface +func (db *DB) InsertRctStm() string { + return `INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + ON CONFLICT (tx_id) DO UPDATE SET (leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) = ($2, $3, $4, $5, $6, $7, $8) + RETURNING id` +} + +// InsertLogStm satisfies the sql.Statements interface +func (db *DB) InsertLogStm() string { + return `INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, receipt_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) + ON CONFLICT (receipt_id, index) DO UPDATE SET (leaf_cid, leaf_mh_key, address, topic0, topic1, topic2, topic3, log_data) = ($1, $2, $4, $6, $7, $8, $9, $10)` +} + +// InsertStateStm satisfies the sql.Statements interface +func (db *DB) InsertStateStm() string { + return `INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7) + RETURNING id` +} + +// InsertAccountStm satisfies the sql.Statements interface +func (db *DB) InsertAccountStm() string { + return `INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = ($2, $3, $4, $5)` +} + +// InsertStorageStm satisfies the sql.Statements interface +func (db *DB) InsertStorageStm() string { + return `INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)` +} + +// InsertIPLDStm satisfies the sql.Statements interface +func (db *DB) InsertIPLDStm() string { + return `INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING` +} + +// InsertIPLDsStm satisfies the sql.Statements interface +func (db *DB) InsertIPLDsStm() string { + return `INSERT INTO public.blocks (key, data) VALUES (unnest($1::TEXT[]), unnest($2::BYTEA[])) ON CONFLICT (key) DO NOTHING` +} diff --git a/statediff/indexer/postgres/errors.go b/statediff/indexer/database/sql/postgres/errors.go similarity index 100% rename from statediff/indexer/postgres/errors.go rename to statediff/indexer/database/sql/postgres/errors.go diff --git a/statediff/indexer/database/sql/postgres/pgx.go b/statediff/indexer/database/sql/postgres/pgx.go new file mode 100644 index 000000000..d94c35083 --- /dev/null +++ b/statediff/indexer/database/sql/postgres/pgx.go @@ -0,0 +1,255 @@ +// VulcanizeDB +// Copyright © 2019 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package postgres + +import ( + "context" + "time" + + "github.com/georgysavva/scany/pgxscan" + "github.com/jackc/pgconn" + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/pgxpool" + + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/statediff/indexer/node" +) + +// PGXDriver driver, implements sql.Driver +type PGXDriver struct { + ctx context.Context + pool *pgxpool.Pool + nodeInfo node.Info + nodeID int64 +} + +// NewPGXDriver returns a new pgx driver +// it initializes the connection pool and creates the node info table +func NewPGXDriver(ctx context.Context, config Config, node node.Info) (*PGXDriver, error) { + pgConf, err := MakeConfig(config) + if err != nil { + return nil, err + } + dbPool, err := pgxpool.ConnectConfig(ctx, pgConf) + if err != nil { + return nil, ErrDBConnectionFailed(err) + } + pg := &PGXDriver{ctx: ctx, pool: dbPool, nodeInfo: node} + nodeErr := pg.createNode() + if nodeErr != nil { + return &PGXDriver{}, ErrUnableToSetNode(nodeErr) + } + return pg, nil +} + +// MakeConfig creates a pgxpool.Config from the provided Config +func MakeConfig(config Config) (*pgxpool.Config, error) { + conf, err := pgxpool.ParseConfig("") + if err != nil { + return nil, err + } + + //conf.ConnConfig.BuildStatementCache = nil + conf.ConnConfig.Config.Host = config.Hostname + conf.ConnConfig.Config.Port = uint16(config.Port) + conf.ConnConfig.Config.Database = config.DatabaseName + conf.ConnConfig.Config.User = config.Username + conf.ConnConfig.Config.Password = config.Password + + if config.ConnTimeout != 0 { + conf.ConnConfig.Config.ConnectTimeout = config.ConnTimeout + } + if config.MaxConns != 0 { + conf.MaxConns = int32(config.MaxConns) + } + if config.MinConns != 0 { + conf.MinConns = int32(config.MinConns) + } + if config.MaxConnLifetime != 0 { + conf.MaxConnLifetime = config.MaxConnLifetime + } + if config.MaxConnIdleTime != 0 { + conf.MaxConnIdleTime = config.MaxConnIdleTime + } + return conf, nil +} + +func (pgx *PGXDriver) createNode() error { + var nodeID int64 + err := pgx.pool.QueryRow( + pgx.ctx, + createNodeStm, + pgx.nodeInfo.GenesisBlock, pgx.nodeInfo.NetworkID, + pgx.nodeInfo.ID, pgx.nodeInfo.ClientName, + pgx.nodeInfo.ChainID).Scan(&nodeID) + if err != nil { + return ErrUnableToSetNode(err) + } + pgx.nodeID = nodeID + return nil +} + +// QueryRow satisfies sql.Database +func (pgx *PGXDriver) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow { + row := pgx.pool.QueryRow(ctx, sql, args...) + return rowWrapper{row: row} +} + +// Exec satisfies sql.Database +func (pgx *PGXDriver) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) { + res, err := pgx.pool.Exec(ctx, sql, args...) + return resultWrapper{ct: res}, err +} + +// Select satisfies sql.Database +func (pgx *PGXDriver) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return pgxscan.Select(ctx, pgx.pool, dest, query, args...) +} + +// Get satisfies sql.Database +func (pgx *PGXDriver) Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return pgxscan.Get(ctx, pgx.pool, dest, query, args...) +} + +// Begin satisfies sql.Database +func (pgx *PGXDriver) Begin(ctx context.Context) (sql.Tx, error) { + tx, err := pgx.pool.Begin(ctx) + if err != nil { + return nil, err + } + return pgxTxWrapper{tx: tx}, nil +} + +func (pgx *PGXDriver) Stats() sql.Stats { + stats := pgx.pool.Stat() + return pgxStatsWrapper{stats: stats} +} + +// NodeInfo satisfies sql.Database +func (pgx *PGXDriver) NodeInfo() node.Info { + return pgx.nodeInfo +} + +// NodeID satisfies sql.Database +func (pgx *PGXDriver) NodeID() int64 { + return pgx.nodeID +} + +// Close satisfies sql.Database/io.Closer +func (pgx *PGXDriver) Close() error { + pgx.pool.Close() + return nil +} + +// Context satisfies sql.Database +func (pgx *PGXDriver) Context() context.Context { + return pgx.ctx +} + +type rowWrapper struct { + row pgx.Row +} + +// Scan satisfies sql.ScannableRow +func (r rowWrapper) Scan(dest ...interface{}) error { + return r.row.Scan(dest) +} + +// StructScan satisfies sql.ScannableRow +func (r rowWrapper) StructScan(dest interface{}) error { + return pgxscan.ScanRow(dest, r.row.(pgx.Rows)) +} + +type resultWrapper struct { + ct pgconn.CommandTag +} + +// RowsAffected satisfies sql.Result +func (r resultWrapper) RowsAffected() (int64, error) { + return r.ct.RowsAffected(), nil +} + +type pgxStatsWrapper struct { + stats *pgxpool.Stat +} + +// MaxOpen satisfies sql.Stats +func (s pgxStatsWrapper) MaxOpen() int64 { + return int64(s.stats.MaxConns()) +} + +// Open satisfies sql.Stats +func (s pgxStatsWrapper) Open() int64 { + return int64(s.stats.TotalConns()) +} + +// InUse satisfies sql.Stats +func (s pgxStatsWrapper) InUse() int64 { + return int64(s.stats.AcquiredConns()) +} + +// Idle satisfies sql.Stats +func (s pgxStatsWrapper) Idle() int64 { + return int64(s.stats.IdleConns()) +} + +// WaitCount satisfies sql.Stats +func (s pgxStatsWrapper) WaitCount() int64 { + return s.stats.EmptyAcquireCount() +} + +// WaitDuration satisfies sql.Stats +func (s pgxStatsWrapper) WaitDuration() time.Duration { + return s.stats.AcquireDuration() +} + +// MaxIdleClosed satisfies sql.Stats +func (s pgxStatsWrapper) MaxIdleClosed() int64 { + // this stat isn't supported by pgxpool, but we don't want to panic + return 0 +} + +// MaxLifetimeClosed satisfies sql.Stats +func (s pgxStatsWrapper) MaxLifetimeClosed() int64 { + return s.stats.CanceledAcquireCount() +} + +type pgxTxWrapper struct { + tx pgx.Tx +} + +// QueryRow satisfies sql.Tx +func (t pgxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow { + row := t.tx.QueryRow(ctx, sql, args...) + return rowWrapper{row: row} +} + +// Exec satisfies sql.Tx +func (t pgxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) { + res, err := t.tx.Exec(ctx, sql, args...) + return resultWrapper{ct: res}, err +} + +// Commit satisfies sql.Tx +func (t pgxTxWrapper) Commit(ctx context.Context) error { + return t.tx.Commit(ctx) +} + +// Rollback satisfies sql.Tx +func (t pgxTxWrapper) Rollback(ctx context.Context) error { + return t.tx.Rollback(ctx) +} diff --git a/statediff/indexer/database/sql/postgres/pgx_test.go b/statediff/indexer/database/sql/postgres/pgx_test.go new file mode 100644 index 000000000..aadb12835 --- /dev/null +++ b/statediff/indexer/database/sql/postgres/pgx_test.go @@ -0,0 +1,122 @@ +// VulcanizeDB +// Copyright © 2019 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package postgres_test + +import ( + "context" + "fmt" + "math/big" + "strings" + "testing" + + "github.com/jackc/pgx/pgtype" + "github.com/jackc/pgx/v4/pgxpool" + + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" + "github.com/ethereum/go-ethereum/statediff/indexer/node" + "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" +) + +var ( + pgConfig, _ = postgres.MakeConfig(postgres.DefaultConfig) + ctx = context.Background() +) + +func expectContainsSubstring(t *testing.T, full string, sub string) { + if !strings.Contains(full, sub) { + t.Fatalf("Expected \"%v\" to contain substring \"%v\"\n", full, sub) + } +} + +func TestPostgresPGX(t *testing.T) { + t.Run("connects to the sql", func(t *testing.T) { + dbPool, err := pgxpool.ConnectConfig(context.Background(), pgConfig) + if err != nil { + t.Fatalf("failed to connect to db with connection string: %s err: %v", pgConfig.ConnString(), err) + } + defer dbPool.Close() + if dbPool == nil { + t.Fatal("DB pool is nil") + } + }) + + t.Run("serializes big.Int to db", func(t *testing.T) { + // postgres driver doesn't support go big.Int type + // various casts in golang uint64, int64, overflow for + // transaction value (in wei) even though + // postgres numeric can handle an arbitrary + // sized int, so use string representation of big.Int + // and cast on insert + + dbPool, err := pgxpool.ConnectConfig(context.Background(), pgConfig) + if err != nil { + t.Fatalf("failed to connect to db with connection string: %s err: %v", pgConfig.ConnString(), err) + } + defer dbPool.Close() + + bi := new(big.Int) + bi.SetString("34940183920000000000", 10) + test_helpers.ExpectEqual(t, bi.String(), "34940183920000000000") + + defer dbPool.Exec(ctx, `DROP TABLE IF EXISTS example`) + _, err = dbPool.Exec(ctx, "CREATE TABLE example ( id INTEGER, data NUMERIC )") + if err != nil { + t.Fatal(err) + } + + sqlStatement := ` + INSERT INTO example (id, data) + VALUES (1, cast($1 AS NUMERIC))` + _, err = dbPool.Exec(ctx, sqlStatement, bi.String()) + if err != nil { + t.Fatal(err) + } + + var data pgtype.Numeric + err = dbPool.QueryRow(ctx, `SELECT data FROM example WHERE id = 1`).Scan(&data) + if err != nil { + t.Fatal(err) + } + + test_helpers.ExpectEqual(t, bi.String(), data) + actual := new(big.Int) + actual.Set(data.Int) + test_helpers.ExpectEqual(t, actual, bi) + }) + + t.Run("throws error when can't connect to the database", func(t *testing.T) { + goodInfo := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"} + _, err := postgres.NewPGXDriver(ctx, postgres.Config{}, goodInfo) + if err == nil { + t.Fatal("Expected an error") + } + + expectContainsSubstring(t, err.Error(), postgres.DbConnectionFailedMsg) + }) + + t.Run("throws error when can't create node", func(t *testing.T) { + badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100)) + badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"} + + _, err := postgres.NewPGXDriver(ctx, postgres.DefaultConfig, badInfo) + if err == nil { + t.Fatal("Expected an error") + } + + expectContainsSubstring(t, err.Error(), postgres.SettingNodeFailedMsg) + }) +} diff --git a/statediff/indexer/postgres/postgres_suite_test.go b/statediff/indexer/database/sql/postgres/postgres_suite_test.go similarity index 100% rename from statediff/indexer/postgres/postgres_suite_test.go rename to statediff/indexer/database/sql/postgres/postgres_suite_test.go diff --git a/statediff/indexer/database/sql/postgres/sqlx.go b/statediff/indexer/database/sql/postgres/sqlx.go new file mode 100644 index 000000000..2abf82d89 --- /dev/null +++ b/statediff/indexer/database/sql/postgres/sqlx.go @@ -0,0 +1,197 @@ +// VulcanizeDB +// Copyright © 2019 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package postgres + +import ( + "context" + coresql "database/sql" + "time" + + "github.com/jmoiron/sqlx" + + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/statediff/indexer/node" +) + +// SQLXDriver driver, implements sql.Driver +type SQLXDriver struct { + ctx context.Context + db *sqlx.DB + nodeInfo node.Info + nodeID int64 +} + +// NewSQLXDriver returns a new sqlx driver for Postgres +// it initializes the connection pool and creates the node info table +func NewSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDriver, error) { + db, err := sqlx.ConnectContext(ctx, "postgres", config.DbConnectionString()) + if err != nil { + return &SQLXDriver{}, ErrDBConnectionFailed(err) + } + if config.MaxConns > 0 { + db.SetMaxOpenConns(config.MaxConns) + } + if config.MaxIdle > 0 { + db.SetMaxIdleConns(config.MaxIdle) + } + if config.MaxConnLifetime > 0 { + lifetime := config.MaxConnLifetime + db.SetConnMaxLifetime(lifetime) + } + driver := &SQLXDriver{ctx: ctx, db: db, nodeInfo: node} + if err := driver.createNode(); err != nil { + return &SQLXDriver{}, ErrUnableToSetNode(err) + } + return driver, nil +} + +func (driver *SQLXDriver) createNode() error { + var nodeID int64 + err := driver.db.QueryRowx( + createNodeStm, + driver.nodeInfo.GenesisBlock, driver.nodeInfo.NetworkID, + driver.nodeInfo.ID, driver.nodeInfo.ClientName, + driver.nodeInfo.ChainID).Scan(&nodeID) + if err != nil { + return ErrUnableToSetNode(err) + } + driver.nodeID = nodeID + return nil +} + +// QueryRow satisfies sql.Database +func (driver *SQLXDriver) QueryRow(_ context.Context, sql string, args ...interface{}) sql.ScannableRow { + return driver.db.QueryRowx(sql, args...) +} + +// Exec satisfies sql.Database +func (driver *SQLXDriver) Exec(_ context.Context, sql string, args ...interface{}) (sql.Result, error) { + return driver.db.Exec(sql, args...) +} + +// Select satisfies sql.Database +func (driver *SQLXDriver) Select(_ context.Context, dest interface{}, query string, args ...interface{}) error { + return driver.db.Select(dest, query, args...) +} + +// Get satisfies sql.Database +func (driver *SQLXDriver) Get(_ context.Context, dest interface{}, query string, args ...interface{}) error { + return driver.db.Get(dest, query, args...) +} + +// Begin satisfies sql.Database +func (driver *SQLXDriver) Begin(_ context.Context) (sql.Tx, error) { + tx, err := driver.db.Beginx() + if err != nil { + return nil, err + } + return sqlxTxWrapper{tx: tx}, nil +} + +func (driver *SQLXDriver) Stats() sql.Stats { + stats := driver.db.Stats() + return sqlxStatsWrapper{stats: stats} +} + +// NodeInfo satisfies sql.Database +func (driver *SQLXDriver) NodeInfo() node.Info { + return driver.nodeInfo +} + +// NodeID satisfies sql.Database +func (driver *SQLXDriver) NodeID() int64 { + return driver.nodeID +} + +// Close satisfies sql.Database/io.Closer +func (driver *SQLXDriver) Close() error { + return driver.db.Close() +} + +// Context satisfies sql.Database +func (driver *SQLXDriver) Context() context.Context { + return driver.ctx +} + +type sqlxStatsWrapper struct { + stats coresql.DBStats +} + +// MaxOpen satisfies sql.Stats +func (s sqlxStatsWrapper) MaxOpen() int64 { + return int64(s.stats.MaxOpenConnections) +} + +// Open satisfies sql.Stats +func (s sqlxStatsWrapper) Open() int64 { + return int64(s.stats.OpenConnections) +} + +// InUse satisfies sql.Stats +func (s sqlxStatsWrapper) InUse() int64 { + return int64(s.stats.InUse) +} + +// Idle satisfies sql.Stats +func (s sqlxStatsWrapper) Idle() int64 { + return int64(s.stats.Idle) +} + +// WaitCount satisfies sql.Stats +func (s sqlxStatsWrapper) WaitCount() int64 { + return s.stats.WaitCount +} + +// WaitDuration satisfies sql.Stats +func (s sqlxStatsWrapper) WaitDuration() time.Duration { + return s.stats.WaitDuration +} + +// MaxIdleClosed satisfies sql.Stats +func (s sqlxStatsWrapper) MaxIdleClosed() int64 { + return s.stats.MaxIdleClosed +} + +// MaxLifetimeClosed satisfies sql.Stats +func (s sqlxStatsWrapper) MaxLifetimeClosed() int64 { + return s.stats.MaxLifetimeClosed +} + +type sqlxTxWrapper struct { + tx *sqlx.Tx +} + +// QueryRow satisfies sql.Tx +func (t sqlxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow { + row := t.tx.QueryRow(sql, args...) + return rowWrapper{row: row} +} + +// Exec satisfies sql.Tx +func (t sqlxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) { + return t.tx.Exec(sql, args...) +} + +// Commit satisfies sql.Tx +func (t sqlxTxWrapper) Commit(ctx context.Context) error { + return t.tx.Commit() +} + +// Rollback satisfies sql.Tx +func (t sqlxTxWrapper) Rollback(ctx context.Context) error { + return t.tx.Rollback() +} diff --git a/statediff/indexer/postgres/postgres_test.go b/statediff/indexer/database/sql/postgres/sqlx_test.go similarity index 66% rename from statediff/indexer/postgres/postgres_test.go rename to statediff/indexer/database/sql/postgres/sqlx_test.go index f3bbdffd0..37164e0f7 100644 --- a/statediff/indexer/postgres/postgres_test.go +++ b/statediff/indexer/database/sql/postgres/sqlx_test.go @@ -25,33 +25,19 @@ import ( "github.com/jmoiron/sqlx" _ "github.com/lib/pq" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" "github.com/ethereum/go-ethereum/statediff/indexer/node" - "github.com/ethereum/go-ethereum/statediff/indexer/postgres" - "github.com/ethereum/go-ethereum/statediff/indexer/shared" + "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" ) -var DBParams = postgres.ConnectionParams{ - Name: "vulcanize_public", - Password: "password", - Port: 5432, - Hostname: "localhost", - User: "vdbm", -} - -func expectContainsSubstring(t *testing.T, full string, sub string) { - if !strings.Contains(full, sub) { - t.Fatalf("Expected \"%v\" to contain substring \"%v\"\n", full, sub) - } -} - -func TestPostgresDB(t *testing.T) { +func TestPostgresSQLX(t *testing.T) { var sqlxdb *sqlx.DB t.Run("connects to the database", func(t *testing.T) { var err error - pgConfig := postgres.DbConnectionString(DBParams) + connStr := postgres.DefaultConfig.DbConnectionString() - sqlxdb, err = sqlx.Connect("postgres", pgConfig) + sqlxdb, err = sqlx.Connect("postgres", connStr) if err != nil { t.Fatalf("failed to connect to db with connection string: %s err: %v", pgConfig, err) @@ -69,8 +55,8 @@ func TestPostgresDB(t *testing.T) { // sized int, so use string representation of big.Int // and cast on insert - pgConnectString := postgres.DbConnectionString(DBParams) - db, err := sqlx.Connect("postgres", pgConnectString) + connStr := postgres.DefaultConfig.DbConnectionString() + db, err := sqlx.Connect("postgres", connStr) if err != nil { t.Fatal(err) } @@ -80,7 +66,7 @@ func TestPostgresDB(t *testing.T) { bi := new(big.Int) bi.SetString("34940183920000000000", 10) - shared.ExpectEqual(t, bi.String(), "34940183920000000000") + test_helpers.ExpectEqual(t, bi.String(), "34940183920000000000") defer db.Exec(`DROP TABLE IF EXISTS example`) _, err = db.Exec("CREATE TABLE example ( id INTEGER, data NUMERIC )") @@ -102,19 +88,15 @@ func TestPostgresDB(t *testing.T) { t.Fatal(err) } - shared.ExpectEqual(t, bi.String(), data) + test_helpers.ExpectEqual(t, bi.String(), data) actual := new(big.Int) actual.SetString(data, 10) - shared.ExpectEqual(t, actual, bi) + test_helpers.ExpectEqual(t, actual, bi) }) t.Run("throws error when can't connect to the database", func(t *testing.T) { - invalidDatabase := postgres.ConnectionParams{} - node := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"} - - _, err := postgres.NewDB(postgres.DbConnectionString(invalidDatabase), - postgres.ConnectionConfig{}, node) - + goodInfo := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"} + _, err := postgres.NewSQLXDriver(ctx, postgres.Config{}, goodInfo) if err == nil { t.Fatal("Expected an error") } @@ -124,13 +106,13 @@ func TestPostgresDB(t *testing.T) { t.Run("throws error when can't create node", func(t *testing.T) { badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100)) - node := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"} - - _, err := postgres.NewDB(postgres.DbConnectionString(DBParams), postgres.ConnectionConfig{}, node) + badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"} + _, err := postgres.NewSQLXDriver(ctx, postgres.DefaultConfig, badInfo) if err == nil { t.Fatal("Expected an error") } + expectContainsSubstring(t, err.Error(), postgres.SettingNodeFailedMsg) }) } diff --git a/statediff/indexer/database/sql/postgres/test_helpers.go b/statediff/indexer/database/sql/postgres/test_helpers.go new file mode 100644 index 000000000..491701c4b --- /dev/null +++ b/statediff/indexer/database/sql/postgres/test_helpers.go @@ -0,0 +1,42 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package postgres + +import ( + "context" + + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/statediff/indexer/node" +) + +// SetupSQLXDB is used to setup a sqlx db for tests +func SetupSQLXDB() (sql.Database, error) { + driver, err := NewSQLXDriver(context.Background(), DefaultConfig, node.Info{}) + if err != nil { + return nil, err + } + return NewPostgresDB(driver), nil +} + +// SetupPGXDB is used to setup a pgx db for tests +func SetupPGXDB() (sql.Database, error) { + driver, err := NewPGXDriver(context.Background(), DefaultConfig, node.Info{}) + if err != nil { + return nil, err + } + return NewPostgresDB(driver), nil +} diff --git a/statediff/indexer/test_helpers.go b/statediff/indexer/database/sql/test_helpers.go similarity index 69% rename from statediff/indexer/test_helpers.go rename to statediff/indexer/database/sql/test_helpers.go index 024bb58f0..cebddb9d1 100644 --- a/statediff/indexer/test_helpers.go +++ b/statediff/indexer/database/sql/test_helpers.go @@ -14,46 +14,46 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package indexer +package sql import ( + "context" "testing" - - "github.com/ethereum/go-ethereum/statediff/indexer/postgres" ) // TearDownDB is used to tear down the watcher dbs after tests -func TearDownDB(t *testing.T, db *postgres.DB) { - tx, err := db.Beginx() +func TearDownDB(t *testing.T, db Database) { + ctx := context.Background() + tx, err := db.Begin(ctx) if err != nil { t.Fatal(err) } - _, err = tx.Exec(`DELETE FROM eth.header_cids`) + _, err = tx.Exec(ctx, `DELETE FROM eth.header_cids`) if err != nil { t.Fatal(err) } - _, err = tx.Exec(`DELETE FROM eth.transaction_cids`) + _, err = tx.Exec(ctx, `DELETE FROM eth.transaction_cids`) if err != nil { t.Fatal(err) } - _, err = tx.Exec(`DELETE FROM eth.receipt_cids`) + _, err = tx.Exec(ctx, `DELETE FROM eth.receipt_cids`) if err != nil { t.Fatal(err) } - _, err = tx.Exec(`DELETE FROM eth.state_cids`) + _, err = tx.Exec(ctx, `DELETE FROM eth.state_cids`) if err != nil { t.Fatal(err) } - _, err = tx.Exec(`DELETE FROM eth.storage_cids`) + _, err = tx.Exec(ctx, `DELETE FROM eth.storage_cids`) if err != nil { t.Fatal(err) } - _, err = tx.Exec(`DELETE FROM blocks`) + _, err = tx.Exec(ctx, `DELETE FROM blocks`) if err != nil { t.Fatal(err) } - err = tx.Commit() + err = tx.Commit(ctx) if err != nil { t.Fatal(err) } diff --git a/statediff/indexer/database/sql/writer.go b/statediff/indexer/database/sql/writer.go new file mode 100644 index 000000000..ea276dfbf --- /dev/null +++ b/statediff/indexer/database/sql/writer.go @@ -0,0 +1,141 @@ +// VulcanizeDB +// Copyright © 2019 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package sql + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/statediff/indexer/models" +) + +var ( + nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") +) + +// Writer handles processing and writing of indexed IPLD objects to Postgres +type Writer struct { + db Database +} + +// NewWriter creates a new pointer to a Writer +func NewWriter(db Database) *Writer { + return &Writer{ + db: db, + } +} + +func (in *Writer) upsertHeaderCID(tx Tx, header models.HeaderModel) (int64, error) { + var headerID int64 + err := tx.QueryRow(in.db.Context(), in.db.InsertHeaderStm(), + header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, in.db.NodeID(), header.Reward, header.StateRoot, header.TxRoot, + header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1, header.BaseFee).Scan(&headerID) + if err != nil { + return 0, fmt.Errorf("error upserting header_cids entry: %v", err) + } + indexerMetrics.blocks.Inc(1) + return headerID, nil +} + +func (in *Writer) upsertUncleCID(tx Tx, uncle models.UncleModel, headerID int64) error { + _, err := tx.Exec(in.db.Context(), in.db.InsertUncleStm(), + uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey) + if err != nil { + return fmt.Errorf("error upserting uncle_cids entry: %v", err) + } + return nil +} + +func (in *Writer) upsertTransactionCID(tx Tx, transaction models.TxModel, headerID int64) (int64, error) { + var txID int64 + err := tx.QueryRow(in.db.Context(), in.db.InsertTxStm(), + headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type).Scan(&txID) + if err != nil { + return 0, fmt.Errorf("error upserting transaction_cids entry: %v", err) + } + indexerMetrics.transactions.Inc(1) + return txID, nil +} + +func (in *Writer) upsertAccessListElement(tx Tx, accessListElement models.AccessListElementModel, txID int64) error { + _, err := tx.Exec(in.db.Context(), in.db.InsertAccessListElementStm(), + txID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys) + if err != nil { + return fmt.Errorf("error upserting access_list_element entry: %v", err) + } + indexerMetrics.accessListEntries.Inc(1) + return nil +} + +func (in *Writer) upsertReceiptCID(tx Tx, rct *models.ReceiptModel, txID int64) (int64, error) { + var receiptID int64 + err := tx.QueryRow(in.db.Context(), in.db.InsertRctStm(), + txID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot).Scan(&receiptID) + if err != nil { + return 0, fmt.Errorf("error upserting receipt_cids entry: %w", err) + } + indexerMetrics.receipts.Inc(1) + return receiptID, nil +} + +func (in *Writer) upsertLogCID(tx Tx, logs []*models.LogsModel, receiptID int64) error { + for _, log := range logs { + _, err := tx.Exec(in.db.Context(), in.db.InsertLogStm(), + log.LeafCID, log.LeafMhKey, receiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2, log.Topic3, log.Data) + if err != nil { + return fmt.Errorf("error upserting logs entry: %w", err) + } + indexerMetrics.logs.Inc(1) + } + return nil +} + +func (in *Writer) upsertStateCID(tx Tx, stateNode models.StateNodeModel, headerID int64) (int64, error) { + var stateID int64 + var stateKey string + if stateNode.StateKey != nullHash.String() { + stateKey = stateNode.StateKey + } + err := tx.QueryRow(in.db.Context(), in.db.InsertStateStm(), + headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID) + if err != nil { + return 0, fmt.Errorf("error upserting state_cids entry: %v", err) + } + return stateID, nil +} + +func (in *Writer) upsertStateAccount(tx Tx, stateAccount models.StateAccountModel, stateID int64) error { + _, err := tx.Exec(in.db.Context(), in.db.InsertAccountStm(), + stateID, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot) + if err != nil { + return fmt.Errorf("error upserting state_accounts entry: %v", err) + } + return nil +} + +func (in *Writer) upsertStorageCID(tx Tx, storageCID models.StorageNodeModel, stateID int64) error { + var storageKey string + if storageCID.StorageKey != nullHash.String() { + storageKey = storageCID.StorageKey + } + _, err := tx.Exec(in.db.Context(), in.db.InsertStorageStm(), + stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey) + if err != nil { + return fmt.Errorf("error upserting storage_cids entry: %v", err) + } + return nil +} diff --git a/statediff/indexer/helpers.go b/statediff/indexer/helpers.go deleted file mode 100644 index 4e4f30c19..000000000 --- a/statediff/indexer/helpers.go +++ /dev/null @@ -1,45 +0,0 @@ -// VulcanizeDB -// Copyright © 2019 Vulcanize - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. - -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package indexer - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/statediff/types" -) - -// ResolveFromNodeType wrapper around NodeType.Int() so that we maintain backwards compatibility -func ResolveFromNodeType(nodeType types.NodeType) int { - return nodeType.Int() -} - -// ChainConfig returns the appropriate ethereum chain config for the provided chain id -func ChainConfig(chainID uint64) (*params.ChainConfig, error) { - switch chainID { - case 1: - return params.MainnetChainConfig, nil - case 3: - return params.RopstenChainConfig, nil - case 4: - return params.RinkebyChainConfig, nil - case 5: - return params.GoerliChainConfig, nil - default: - return nil, fmt.Errorf("chain config for chainid %d not available", chainID) - } -} diff --git a/statediff/indexer/interfaces/interfaces.go b/statediff/indexer/interfaces/interfaces.go new file mode 100644 index 000000000..d32c117eb --- /dev/null +++ b/statediff/indexer/interfaces/interfaces.go @@ -0,0 +1,46 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package interfaces + +import ( + "io" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/statediff/indexer/shared" + sdtypes "github.com/ethereum/go-ethereum/statediff/types" +) + +// StateDiffIndexer interface required to index statediff data +type StateDiffIndexer interface { + PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (Batch, error) + PushStateNode(tx Batch, stateNode sdtypes.StateNode) error + PushCodeAndCodeHash(tx Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error + ReportDBMetrics(delay time.Duration, quit <-chan bool) + io.Closer +} + +// Batch required for indexing data atomically +type Batch interface { + Submit(err error) error +} + +// Config used to configure different underlying implementations +type Config interface { + Type() shared.DBType +} diff --git a/statediff/indexer/ipfs/ipld/eip2930_test_data/eth-block-12252078 b/statediff/indexer/ipld/eip2930_test_data/eth-block-12252078 similarity index 100% rename from statediff/indexer/ipfs/ipld/eip2930_test_data/eth-block-12252078 rename to statediff/indexer/ipld/eip2930_test_data/eth-block-12252078 diff --git a/statediff/indexer/ipfs/ipld/eip2930_test_data/eth-block-12365585 b/statediff/indexer/ipld/eip2930_test_data/eth-block-12365585 similarity index 100% rename from statediff/indexer/ipfs/ipld/eip2930_test_data/eth-block-12365585 rename to statediff/indexer/ipld/eip2930_test_data/eth-block-12365585 diff --git a/statediff/indexer/ipfs/ipld/eip2930_test_data/eth-block-12365586 b/statediff/indexer/ipld/eip2930_test_data/eth-block-12365586 similarity index 100% rename from statediff/indexer/ipfs/ipld/eip2930_test_data/eth-block-12365586 rename to statediff/indexer/ipld/eip2930_test_data/eth-block-12365586 diff --git a/statediff/indexer/ipfs/ipld/eip2930_test_data/eth-receipts-12252078 b/statediff/indexer/ipld/eip2930_test_data/eth-receipts-12252078 similarity index 100% rename from statediff/indexer/ipfs/ipld/eip2930_test_data/eth-receipts-12252078 rename to statediff/indexer/ipld/eip2930_test_data/eth-receipts-12252078 diff --git a/statediff/indexer/ipfs/ipld/eip2930_test_data/eth-receipts-12365585 b/statediff/indexer/ipld/eip2930_test_data/eth-receipts-12365585 similarity index 100% rename from statediff/indexer/ipfs/ipld/eip2930_test_data/eth-receipts-12365585 rename to statediff/indexer/ipld/eip2930_test_data/eth-receipts-12365585 diff --git a/statediff/indexer/ipfs/ipld/eip2930_test_data/eth-receipts-12365586 b/statediff/indexer/ipld/eip2930_test_data/eth-receipts-12365586 similarity index 100% rename from statediff/indexer/ipfs/ipld/eip2930_test_data/eth-receipts-12365586 rename to statediff/indexer/ipld/eip2930_test_data/eth-receipts-12365586 diff --git a/statediff/indexer/ipfs/ipld/eth_account.go b/statediff/indexer/ipld/eth_account.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_account.go rename to statediff/indexer/ipld/eth_account.go diff --git a/statediff/indexer/ipfs/ipld/eth_account_test.go b/statediff/indexer/ipld/eth_account_test.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_account_test.go rename to statediff/indexer/ipld/eth_account_test.go diff --git a/statediff/indexer/ipfs/ipld/eth_header.go b/statediff/indexer/ipld/eth_header.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_header.go rename to statediff/indexer/ipld/eth_header.go diff --git a/statediff/indexer/ipfs/ipld/eth_header_test.go b/statediff/indexer/ipld/eth_header_test.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_header_test.go rename to statediff/indexer/ipld/eth_header_test.go diff --git a/statediff/indexer/ipfs/ipld/eth_log.go b/statediff/indexer/ipld/eth_log.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_log.go rename to statediff/indexer/ipld/eth_log.go diff --git a/statediff/indexer/ipfs/ipld/eth_log_trie.go b/statediff/indexer/ipld/eth_log_trie.go similarity index 98% rename from statediff/indexer/ipfs/ipld/eth_log_trie.go rename to statediff/indexer/ipld/eth_log_trie.go index 2e36f0a68..49b7a7fc6 100644 --- a/statediff/indexer/ipfs/ipld/eth_log_trie.go +++ b/statediff/indexer/ipld/eth_log_trie.go @@ -89,7 +89,7 @@ func newLogTrie() *logTrie { } // getNodes invokes the localTrie, which computes the root hash of the -// log trie and returns its database keys, to return a slice +// log trie and returns its sql keys, to return a slice // of EthLogTrie nodes. func (rt *logTrie) getNodes() ([]*EthLogTrie, error) { keys, err := rt.getKeys() diff --git a/statediff/indexer/ipfs/ipld/eth_parser.go b/statediff/indexer/ipld/eth_parser.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_parser.go rename to statediff/indexer/ipld/eth_parser.go diff --git a/statediff/indexer/ipfs/ipld/eth_parser_test.go b/statediff/indexer/ipld/eth_parser_test.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_parser_test.go rename to statediff/indexer/ipld/eth_parser_test.go diff --git a/statediff/indexer/ipfs/ipld/eth_receipt.go b/statediff/indexer/ipld/eth_receipt.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_receipt.go rename to statediff/indexer/ipld/eth_receipt.go diff --git a/statediff/indexer/ipfs/ipld/eth_receipt_trie.go b/statediff/indexer/ipld/eth_receipt_trie.go similarity index 98% rename from statediff/indexer/ipfs/ipld/eth_receipt_trie.go rename to statediff/indexer/ipld/eth_receipt_trie.go index fc1480703..e187e7d9d 100644 --- a/statediff/indexer/ipfs/ipld/eth_receipt_trie.go +++ b/statediff/indexer/ipld/eth_receipt_trie.go @@ -121,7 +121,7 @@ func NewRctTrie() *rctTrie { } // GetNodes invokes the localTrie, which computes the root hash of the -// transaction trie and returns its database keys, to return a slice +// transaction trie and returns its sql keys, to return a slice // of EthRctTrie nodes. func (rt *rctTrie) GetNodes() ([]*EthRctTrie, error) { keys, err := rt.getKeys() diff --git a/statediff/indexer/ipfs/ipld/eth_state.go b/statediff/indexer/ipld/eth_state.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_state.go rename to statediff/indexer/ipld/eth_state.go diff --git a/statediff/indexer/ipfs/ipld/eth_state_test.go b/statediff/indexer/ipld/eth_state_test.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_state_test.go rename to statediff/indexer/ipld/eth_state_test.go diff --git a/statediff/indexer/ipfs/ipld/eth_storage.go b/statediff/indexer/ipld/eth_storage.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_storage.go rename to statediff/indexer/ipld/eth_storage.go diff --git a/statediff/indexer/ipfs/ipld/eth_storage_test.go b/statediff/indexer/ipld/eth_storage_test.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_storage_test.go rename to statediff/indexer/ipld/eth_storage_test.go diff --git a/statediff/indexer/ipfs/ipld/eth_tx.go b/statediff/indexer/ipld/eth_tx.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_tx.go rename to statediff/indexer/ipld/eth_tx.go diff --git a/statediff/indexer/ipfs/ipld/eth_tx_test.go b/statediff/indexer/ipld/eth_tx_test.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_tx_test.go rename to statediff/indexer/ipld/eth_tx_test.go diff --git a/statediff/indexer/ipfs/ipld/eth_tx_trie.go b/statediff/indexer/ipld/eth_tx_trie.go similarity index 98% rename from statediff/indexer/ipfs/ipld/eth_tx_trie.go rename to statediff/indexer/ipld/eth_tx_trie.go index 7e79ff164..943cf15ae 100644 --- a/statediff/indexer/ipfs/ipld/eth_tx_trie.go +++ b/statediff/indexer/ipld/eth_tx_trie.go @@ -121,7 +121,7 @@ func newTxTrie() *txTrie { } // getNodes invokes the localTrie, which computes the root hash of the -// transaction trie and returns its database keys, to return a slice +// transaction trie and returns its sql keys, to return a slice // of EthTxTrie nodes. func (tt *txTrie) getNodes() ([]*EthTxTrie, error) { keys, err := tt.getKeys() diff --git a/statediff/indexer/ipfs/ipld/eth_tx_trie_test.go b/statediff/indexer/ipld/eth_tx_trie_test.go similarity index 100% rename from statediff/indexer/ipfs/ipld/eth_tx_trie_test.go rename to statediff/indexer/ipld/eth_tx_trie_test.go diff --git a/statediff/indexer/ipfs/ipld/shared.go b/statediff/indexer/ipld/shared.go similarity index 96% rename from statediff/indexer/ipfs/ipld/shared.go rename to statediff/indexer/ipld/shared.go index 993e00b42..e5c22a3c6 100644 --- a/statediff/indexer/ipfs/ipld/shared.go +++ b/statediff/indexer/ipld/shared.go @@ -27,7 +27,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" - sdtrie "github.com/ethereum/go-ethereum/statediff/trie" + sdtrie "github.com/ethereum/go-ethereum/statediff/trie_helpers" sdtypes "github.com/ethereum/go-ethereum/statediff/types" "github.com/ethereum/go-ethereum/trie" ) @@ -143,7 +143,7 @@ func (lt *localTrie) commit() error { return nil } -// getKeys returns the stored keys of the memory database +// getKeys returns the stored keys of the memory sql // of the localTrie for further processing. func (lt *localTrie) getKeys() ([][]byte, error) { if err := lt.commit(); err != nil { @@ -167,7 +167,7 @@ type nodeKey struct { TrieKey []byte } -// getLeafKeys returns the stored leaf keys from the memory database +// getLeafKeys returns the stored leaf keys from the memory sql // of the localTrie for further processing. func (lt *localTrie) getLeafKeys() ([]*nodeKey, error) { if err := lt.commit(); err != nil { diff --git a/statediff/indexer/ipfs/ipld/test_data/error-tx-eth-block-body-json-999999 b/statediff/indexer/ipld/test_data/error-tx-eth-block-body-json-999999 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/error-tx-eth-block-body-json-999999 rename to statediff/indexer/ipld/test_data/error-tx-eth-block-body-json-999999 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-0 b/statediff/indexer/ipld/test_data/eth-block-body-json-0 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-0 rename to statediff/indexer/ipld/test_data/eth-block-body-json-0 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-4139497 b/statediff/indexer/ipld/test_data/eth-block-body-json-4139497 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-4139497 rename to statediff/indexer/ipld/test_data/eth-block-body-json-4139497 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-997522 b/statediff/indexer/ipld/test_data/eth-block-body-json-997522 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-997522 rename to statediff/indexer/ipld/test_data/eth-block-body-json-997522 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-999998 b/statediff/indexer/ipld/test_data/eth-block-body-json-999998 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-999998 rename to statediff/indexer/ipld/test_data/eth-block-body-json-999998 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-999999 b/statediff/indexer/ipld/test_data/eth-block-body-json-999999 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-json-999999 rename to statediff/indexer/ipld/test_data/eth-block-body-json-999999 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-rlp-997522 b/statediff/indexer/ipld/test_data/eth-block-body-rlp-997522 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-rlp-997522 rename to statediff/indexer/ipld/test_data/eth-block-body-rlp-997522 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-body-rlp-999999 b/statediff/indexer/ipld/test_data/eth-block-body-rlp-999999 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-block-body-rlp-999999 rename to statediff/indexer/ipld/test_data/eth-block-body-rlp-999999 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-header-rlp-999996 b/statediff/indexer/ipld/test_data/eth-block-header-rlp-999996 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-block-header-rlp-999996 rename to statediff/indexer/ipld/test_data/eth-block-header-rlp-999996 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-header-rlp-999997 b/statediff/indexer/ipld/test_data/eth-block-header-rlp-999997 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-block-header-rlp-999997 rename to statediff/indexer/ipld/test_data/eth-block-header-rlp-999997 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-block-header-rlp-999999 b/statediff/indexer/ipld/test_data/eth-block-header-rlp-999999 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-block-header-rlp-999999 rename to statediff/indexer/ipld/test_data/eth-block-header-rlp-999999 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-0e8b34 b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-0e8b34 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-0e8b34 rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-0e8b34 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-56864f b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-56864f similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-56864f rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-56864f diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-6fc2d7 b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-6fc2d7 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-6fc2d7 rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-6fc2d7 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-727994 b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-727994 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-727994 rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-727994 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-c9070d b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-c9070d similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-c9070d rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-c9070d diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-d5be90 b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-d5be90 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-d5be90 rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-d5be90 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-d7f897 b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-d7f897 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-d7f897 rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-d7f897 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-eb2f5f b/statediff/indexer/ipld/test_data/eth-state-trie-rlp-eb2f5f similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-state-trie-rlp-eb2f5f rename to statediff/indexer/ipld/test_data/eth-state-trie-rlp-eb2f5f diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-000dd0 b/statediff/indexer/ipld/test_data/eth-storage-trie-rlp-000dd0 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-000dd0 rename to statediff/indexer/ipld/test_data/eth-storage-trie-rlp-000dd0 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-113049 b/statediff/indexer/ipld/test_data/eth-storage-trie-rlp-113049 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-113049 rename to statediff/indexer/ipld/test_data/eth-storage-trie-rlp-113049 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-9d1860 b/statediff/indexer/ipld/test_data/eth-storage-trie-rlp-9d1860 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-9d1860 rename to statediff/indexer/ipld/test_data/eth-storage-trie-rlp-9d1860 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-ffbcad b/statediff/indexer/ipld/test_data/eth-storage-trie-rlp-ffbcad similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-ffbcad rename to statediff/indexer/ipld/test_data/eth-storage-trie-rlp-ffbcad diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-ffc25c b/statediff/indexer/ipld/test_data/eth-storage-trie-rlp-ffc25c similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-storage-trie-rlp-ffc25c rename to statediff/indexer/ipld/test_data/eth-storage-trie-rlp-ffc25c diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-uncle-json-997522-0 b/statediff/indexer/ipld/test_data/eth-uncle-json-997522-0 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-uncle-json-997522-0 rename to statediff/indexer/ipld/test_data/eth-uncle-json-997522-0 diff --git a/statediff/indexer/ipfs/ipld/test_data/eth-uncle-json-997522-1 b/statediff/indexer/ipld/test_data/eth-uncle-json-997522-1 similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/eth-uncle-json-997522-1 rename to statediff/indexer/ipld/test_data/eth-uncle-json-997522-1 diff --git a/statediff/indexer/ipfs/ipld/test_data/tx_data b/statediff/indexer/ipld/test_data/tx_data similarity index 100% rename from statediff/indexer/ipfs/ipld/test_data/tx_data rename to statediff/indexer/ipld/test_data/tx_data diff --git a/statediff/indexer/ipfs/ipld/trie_node.go b/statediff/indexer/ipld/trie_node.go similarity index 100% rename from statediff/indexer/ipfs/ipld/trie_node.go rename to statediff/indexer/ipld/trie_node.go diff --git a/statediff/indexer/mocks/test_data.go b/statediff/indexer/mocks/test_data.go index 2d544b6ea..f437dc8e4 100644 --- a/statediff/indexer/mocks/test_data.go +++ b/statediff/indexer/mocks/test_data.go @@ -32,7 +32,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/statediff/testhelpers" + "github.com/ethereum/go-ethereum/statediff/test_helpers" sdtypes "github.com/ethereum/go-ethereum/statediff/types" ) @@ -111,7 +111,7 @@ var ( nonce1 = uint64(1) ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0" ContractCodeHash = common.HexToHash("0x753f98a8d4328b15636e46f66f2cb4bc860100aa17967cc145fcd17d1d4710ea") - ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress) + ContractLeafKey = test_helpers.AddressToLeafKey(ContractAddress) ContractAccount, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: nonce1, Balance: big.NewInt(0), @@ -127,8 +127,8 @@ var ( nonce0 = uint64(0) AccountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") - AccountLeafKey = testhelpers.Account2LeafKey - RemovedLeafKey = testhelpers.Account1LeafKey + AccountLeafKey = test_helpers.Account2LeafKey + RemovedLeafKey = test_helpers.Account1LeafKey Account, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: nonce0, Balance: big.NewInt(1000), diff --git a/statediff/indexer/models/batch.go b/statediff/indexer/models/batch.go index f780a9b0d..48b2944e0 100644 --- a/statediff/indexer/models/batch.go +++ b/statediff/indexer/models/batch.go @@ -1,3 +1,19 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + package models import "github.com/lib/pq" diff --git a/statediff/indexer/models/models.go b/statediff/indexer/models/models.go index 5837488f8..72efe2383 100644 --- a/statediff/indexer/models/models.go +++ b/statediff/indexer/models/models.go @@ -20,8 +20,8 @@ import "github.com/lib/pq" // IPLDModel is the db model for public.blocks type IPLDModel struct { - Key string - Data []byte + Key string `db:"key"` + Data []byte `db:"data"` } // HeaderModel is the db model for eth.header_cids diff --git a/statediff/indexer/postgres/config.go b/statediff/indexer/postgres/config.go deleted file mode 100644 index c2de0a6bf..000000000 --- a/statediff/indexer/postgres/config.go +++ /dev/null @@ -1,59 +0,0 @@ -// VulcanizeDB -// Copyright © 2019 Vulcanize - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. - -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package postgres - -import ( - "fmt" -) - -// Env variables -const ( - DATABASE_NAME = "DATABASE_NAME" - DATABASE_HOSTNAME = "DATABASE_HOSTNAME" - DATABASE_PORT = "DATABASE_PORT" - DATABASE_USER = "DATABASE_USER" - DATABASE_PASSWORD = "DATABASE_PASSWORD" - DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS" - DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS" - DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME" -) - -type ConnectionParams struct { - Hostname string - Name string - User string - Password string - Port int -} - -type ConnectionConfig struct { - MaxIdle int - MaxOpen int - MaxLifetime int -} - -func DbConnectionString(params ConnectionParams) string { - if len(params.User) > 0 && len(params.Password) > 0 { - return fmt.Sprintf("postgresql://%s:%s@%s:%d/%s?sslmode=disable", - params.User, params.Password, params.Hostname, params.Port, params.Name) - } - if len(params.User) > 0 && len(params.Password) == 0 { - return fmt.Sprintf("postgresql://%s@%s:%d/%s?sslmode=disable", - params.User, params.Hostname, params.Port, params.Name) - } - return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", params.Hostname, params.Port, params.Name) -} diff --git a/statediff/indexer/postgres/postgres.go b/statediff/indexer/postgres/postgres.go deleted file mode 100644 index 455dac306..000000000 --- a/statediff/indexer/postgres/postgres.go +++ /dev/null @@ -1,76 +0,0 @@ -// VulcanizeDB -// Copyright © 2019 Vulcanize - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. - -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package postgres - -import ( - "time" - - "github.com/jmoiron/sqlx" - _ "github.com/lib/pq" //postgres driver - - "github.com/ethereum/go-ethereum/statediff/indexer/node" -) - -type DB struct { - *sqlx.DB - Node node.Info - NodeID int64 -} - -func NewDB(connectString string, config ConnectionConfig, node node.Info) (*DB, error) { - db, connectErr := sqlx.Connect("postgres", connectString) - if connectErr != nil { - return &DB{}, ErrDBConnectionFailed(connectErr) - } - if config.MaxOpen > 0 { - db.SetMaxOpenConns(config.MaxOpen) - } - if config.MaxIdle > 0 { - db.SetMaxIdleConns(config.MaxIdle) - } - if config.MaxLifetime > 0 { - lifetime := time.Duration(config.MaxLifetime) * time.Second - db.SetConnMaxLifetime(lifetime) - } - pg := DB{DB: db, Node: node} - nodeErr := pg.CreateNode(&node) - if nodeErr != nil { - return &DB{}, ErrUnableToSetNode(nodeErr) - } - return &pg, nil -} - -func (db *DB) CreateNode(node *node.Info) error { - var nodeID int64 - err := db.QueryRow( - `INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) - VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (genesis_block, network_id, node_id, chain_id) - DO UPDATE - SET genesis_block = $1, - network_id = $2, - node_id = $3, - client_name = $4, - chain_id = $5 - RETURNING id`, - node.GenesisBlock, node.NetworkID, node.ID, node.ClientName, node.ChainID).Scan(&nodeID) - if err != nil { - return ErrUnableToSetNode(err) - } - db.NodeID = nodeID - return nil -} diff --git a/statediff/indexer/shared/chain_type.go b/statediff/indexer/shared/chain_type.go deleted file mode 100644 index c3dedfe38..000000000 --- a/statediff/indexer/shared/chain_type.go +++ /dev/null @@ -1,78 +0,0 @@ -// VulcanizeDB -// Copyright © 2019 Vulcanize - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. - -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package shared - -import ( - "errors" - "strings" -) - -// ChainType enum for specifying blockchain -type ChainType int - -const ( - UnknownChain ChainType = iota - Ethereum - Bitcoin - Omni - EthereumClassic -) - -func (c ChainType) String() string { - switch c { - case Ethereum: - return "Ethereum" - case Bitcoin: - return "Bitcoin" - case Omni: - return "Omni" - case EthereumClassic: - return "EthereumClassic" - default: - return "" - } -} - -func (c ChainType) API() string { - switch c { - case Ethereum: - return "eth" - case Bitcoin: - return "btc" - case Omni: - return "omni" - case EthereumClassic: - return "etc" - default: - return "" - } -} - -func NewChainType(name string) (ChainType, error) { - switch strings.ToLower(name) { - case "ethereum", "eth": - return Ethereum, nil - case "bitcoin", "btc", "xbt": - return Bitcoin, nil - case "omni": - return Omni, nil - case "classic", "etc": - return EthereumClassic, nil - default: - return UnknownChain, errors.New("invalid name for chain") - } -} diff --git a/statediff/indexer/shared/constants.go b/statediff/indexer/shared/constants.go index 3dc2994c4..6d1e298ad 100644 --- a/statediff/indexer/shared/constants.go +++ b/statediff/indexer/shared/constants.go @@ -1,5 +1,5 @@ // VulcanizeDB -// Copyright © 2019 Vulcanize +// Copyright © 2021 Vulcanize // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as published by @@ -17,6 +17,7 @@ package shared const ( - DefaultMaxBatchSize uint64 = 100 - DefaultMaxBatchNumber int64 = 50 + RemovedNodeStorageCID = "bagmacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya" + RemovedNodeStateCID = "baglacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya" + RemovedNodeMhKey = "/blocks/DMQMLUSGAGDPOIZ4SJ7H3MW4Y4B4BZIAWZJ4VARHHN57VWAELWC2I4A" ) diff --git a/statediff/indexer/shared/data_type.go b/statediff/indexer/shared/data_type.go deleted file mode 100644 index ccab92c1e..000000000 --- a/statediff/indexer/shared/data_type.go +++ /dev/null @@ -1,102 +0,0 @@ -// VulcanizeDB -// Copyright © 2019 Vulcanize - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. - -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package shared - -import ( - "fmt" - "strings" -) - -// DataType is an enum to loosely represent type of chain data -type DataType int - -const ( - UnknownDataType DataType = iota - 1 - Full - Headers - Uncles - Transactions - Receipts - State - Storage -) - -// String() method to resolve ReSyncType enum -func (r DataType) String() string { - switch r { - case Full: - return "full" - case Headers: - return "headers" - case Uncles: - return "uncles" - case Transactions: - return "transactions" - case Receipts: - return "receipts" - case State: - return "state" - case Storage: - return "storage" - default: - return "unknown" - } -} - -// GenerateDataTypeFromString returns a DataType from a provided string -func GenerateDataTypeFromString(str string) (DataType, error) { - switch strings.ToLower(str) { - case "full", "f": - return Full, nil - case "headers", "header", "h": - return Headers, nil - case "uncles", "u": - return Uncles, nil - case "transactions", "transaction", "trxs", "txs", "trx", "tx", "t": - return Transactions, nil - case "receipts", "receipt", "rcts", "rct", "r": - return Receipts, nil - case "state": - return State, nil - case "storage": - return Storage, nil - default: - return UnknownDataType, fmt.Errorf("unrecognized resync type: %s", str) - } -} - -// SupportedDataType returns whether a DataType is supported -func SupportedDataType(d DataType) (bool, error) { - switch d { - case Full: - return true, nil - case Headers: - return true, nil - case Uncles: - return true, nil - case Transactions: - return true, nil - case Receipts: - return true, nil - case State: - return true, nil - case Storage: - return true, nil - default: - return true, nil - } -} diff --git a/statediff/indexer/ipfs/models.go b/statediff/indexer/shared/db_kind.go similarity index 83% rename from statediff/indexer/ipfs/models.go rename to statediff/indexer/shared/db_kind.go index eb0312beb..711f9d050 100644 --- a/statediff/indexer/ipfs/models.go +++ b/statediff/indexer/shared/db_kind.go @@ -1,5 +1,5 @@ // VulcanizeDB -// Copyright © 2019 Vulcanize +// Copyright © 2021 Vulcanize // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as published by @@ -14,9 +14,11 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package ipfs +package shared -type BlockModel struct { - CID string `db:"key"` - Data []byte `db:"data"` -} +type DBType string + +const ( + POSTGRES DBType = "Postgres" + DUMP DBType = "Dump" +) diff --git a/statediff/indexer/shared/functions.go b/statediff/indexer/shared/functions.go index 7823d8f78..8b0acbb54 100644 --- a/statediff/indexer/shared/functions.go +++ b/statediff/indexer/shared/functions.go @@ -18,19 +18,12 @@ package shared import ( "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/statediff/indexer/postgres" - "github.com/ipfs/go-cid" blockstore "github.com/ipfs/go-ipfs-blockstore" dshelp "github.com/ipfs/go-ipfs-ds-help" - "github.com/jmoiron/sqlx" "github.com/multiformats/go-multihash" ) -// IPLDInsertPgStr is the postgres statement string for IPLDs inserting into public.blocks -const IPLDInsertPgStr = `INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING` - // HandleZeroAddrPointer will return an empty string for a nil address pointer func HandleZeroAddrPointer(to *common.Address) string { if to == nil { @@ -47,13 +40,6 @@ func HandleZeroAddr(to common.Address) string { return to.Hex() } -// Rollback sql transaction and log any error -func Rollback(tx *sqlx.Tx) { - if err := tx.Rollback(); err != nil { - log.Error(err.Error()) - } -} - // MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string func MultihashKeyFromCID(c cid.Cid) string { dbKey := dshelp.MultihashToDsKey(c.Hash()) @@ -69,9 +55,3 @@ func MultihashKeyFromKeccak256(hash common.Hash) (string, error) { dbKey := dshelp.MultihashToDsKey(mh) return blockstore.BlockPrefix.String() + dbKey.String(), nil } - -// PublishDirectWithDB diretly writes a previously derived mhkey => value pair to the ipld database -func PublishDirectWithDB(db *postgres.DB, key string, value []byte) error { - _, err := db.Exec(IPLDInsertPgStr, key, value) - return err -} diff --git a/statediff/indexer/reward.go b/statediff/indexer/shared/reward.go similarity index 99% rename from statediff/indexer/reward.go rename to statediff/indexer/shared/reward.go index 47e3f17b9..3d5752e25 100644 --- a/statediff/indexer/reward.go +++ b/statediff/indexer/shared/reward.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package indexer +package shared import ( "math/big" diff --git a/statediff/indexer/shared/types.go b/statediff/indexer/shared/types.go deleted file mode 100644 index 1337ba68a..000000000 --- a/statediff/indexer/shared/types.go +++ /dev/null @@ -1,44 +0,0 @@ -// VulcanizeDB -// Copyright © 2019 Vulcanize - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. - -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package shared - -import ( - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/statediff/indexer/models" - "github.com/ethereum/go-ethereum/statediff/types" -) - -// TrieNode struct used to flag node as leaf or not -type TrieNode struct { - Path []byte - LeafKey common.Hash - Value []byte - Type types.NodeType -} - -// CIDPayload is a struct to hold all the CIDs and their associated meta data for indexing in Postgres -// Returned by IPLDPublisher -// Passed to CIDIndexer -type CIDPayload struct { - HeaderCID models.HeaderModel - UncleCIDs []models.UncleModel - TransactionCIDs []models.TxModel - ReceiptCIDs map[common.Hash]models.ReceiptModel - StateNodeCIDs []models.StateNodeModel - StateAccounts map[string]models.StateAccountModel - StorageNodeCIDs map[string][]models.StorageNodeModel -} diff --git a/statediff/indexer/shared/test_helpers.go b/statediff/indexer/test_helpers/test_helpers.go similarity index 59% rename from statediff/indexer/shared/test_helpers.go rename to statediff/indexer/test_helpers/test_helpers.go index d54998cd5..b519d80b5 100644 --- a/statediff/indexer/shared/test_helpers.go +++ b/statediff/indexer/test_helpers/test_helpers.go @@ -14,37 +14,20 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package shared +package test_helpers import ( "reflect" "testing" - - "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" - - "github.com/ethereum/go-ethereum/statediff/indexer/node" - "github.com/ethereum/go-ethereum/statediff/indexer/postgres" ) +// ExpectEqual asserts the provided interfaces are deep equal func ExpectEqual(t *testing.T, got interface{}, want interface{}) { if !reflect.DeepEqual(got, want) { t.Fatalf("Expected: %v\nActual: %v", want, got) } } -// SetupDB is use to setup a db for watcher tests -func SetupDB() (*postgres.DB, error) { - uri := postgres.DbConnectionString(postgres.ConnectionParams{ - User: "vdbm", - Password: "password", - Hostname: "localhost", - Name: "vulcanize_public", - Port: 5432, - }) - return postgres.NewDB(uri, postgres.ConnectionConfig{}, node.Info{}) -} - // ListContainsString used to check if a list of strings contains a particular string func ListContainsString(sss []string, s string) bool { for _, str := range sss { @@ -54,15 +37,3 @@ func ListContainsString(sss []string, s string) bool { } return false } - -// TestCID creates a basic CID for testing purposes -func TestCID(b []byte) cid.Cid { - pref := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: multihash.KECCAK_256, - MhLength: -1, - } - c, _ := pref.Sum(b) - return c -} diff --git a/statediff/indexer/writer.go b/statediff/indexer/writer.go deleted file mode 100644 index cbc058896..000000000 --- a/statediff/indexer/writer.go +++ /dev/null @@ -1,158 +0,0 @@ -// VulcanizeDB -// Copyright © 2019 Vulcanize - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. - -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package indexer - -import ( - "fmt" - - "github.com/jmoiron/sqlx" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/statediff/indexer/models" - "github.com/ethereum/go-ethereum/statediff/indexer/postgres" -) - -var ( - nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") -) - -// PostgresCIDWriter handles processing and writing of indexed IPLD objects to Postgres -type PostgresCIDWriter struct { - db *postgres.DB -} - -// NewPostgresCIDWriter creates a new pointer to a PostgresCIDWriter -func NewPostgresCIDWriter(db *postgres.DB) *PostgresCIDWriter { - return &PostgresCIDWriter{ - db: db, - } -} - -func (in *PostgresCIDWriter) upsertHeaderCID(tx *sqlx.Tx, header models.HeaderModel) (int64, error) { - var headerID int64 - err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) - ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16) - RETURNING id`, - header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, in.db.NodeID, header.Reward, header.StateRoot, header.TxRoot, - header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1, header.BaseFee).Scan(&headerID) - if err != nil { - return 0, fmt.Errorf("error upserting header_cids entry: %v", err) - } - indexerMetrics.blocks.Inc(1) - return headerID, nil -} - -func (in *PostgresCIDWriter) upsertUncleCID(tx *sqlx.Tx, uncle models.UncleModel, headerID int64) error { - _, err := tx.Exec(`INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward, mh_key) = ($3, $4, $5, $6)`, - uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey) - if err != nil { - return fmt.Errorf("error upserting uncle_cids entry: %v", err) - } - return nil -} - -func (in *PostgresCIDWriter) upsertTransactionCID(tx *sqlx.Tx, transaction models.TxModel, headerID int64) (int64, error) { - var txID int64 - err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) - ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data, tx_type) = ($3, $4, $5, $6, $7, $8, $9) - RETURNING id`, - headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type).Scan(&txID) - if err != nil { - return 0, fmt.Errorf("error upserting transaction_cids entry: %v", err) - } - indexerMetrics.transactions.Inc(1) - return txID, nil -} - -func (in *PostgresCIDWriter) upsertAccessListElement(tx *sqlx.Tx, accessListElement models.AccessListElementModel, txID int64) error { - _, err := tx.Exec(`INSERT INTO eth.access_list_element (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4) - ON CONFLICT (tx_id, index) DO UPDATE SET (address, storage_keys) = ($3, $4)`, - txID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys) - if err != nil { - return fmt.Errorf("error upserting access_list_element entry: %v", err) - } - indexerMetrics.accessListEntries.Inc(1) - return nil -} - -func (in *PostgresCIDWriter) upsertReceiptCID(tx *sqlx.Tx, rct *models.ReceiptModel, txID int64) (int64, error) { - var receiptID int64 - err := tx.QueryRowx(`INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) - ON CONFLICT (tx_id) DO UPDATE SET (leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) = ($2, $3, $4, $5, $6, $7, $8) - RETURNING id`, - txID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot).Scan(&receiptID) - if err != nil { - return 0, fmt.Errorf("error upserting receipt_cids entry: %w", err) - } - indexerMetrics.receipts.Inc(1) - return receiptID, nil -} - -func (in *PostgresCIDWriter) upsertLogCID(tx *sqlx.Tx, logs []*models.LogsModel, receiptID int64) error { - for _, log := range logs { - _, err := tx.Exec(`INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, receipt_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) - ON CONFLICT (receipt_id, index) DO UPDATE SET (leaf_cid, leaf_mh_key, address, topic0, topic1, topic2, topic3, log_data) = ($1, $2, $4, $6, $7, $8, $9, $10)`, - log.LeafCID, log.LeafMhKey, receiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2, log.Topic3, log.Data) - if err != nil { - return fmt.Errorf("error upserting logs entry: %w", err) - } - indexerMetrics.logs.Inc(1) - } - return nil -} - -func (in *PostgresCIDWriter) upsertStateCID(tx *sqlx.Tx, stateNode models.StateNodeModel, headerID int64) (int64, error) { - var stateID int64 - var stateKey string - if stateNode.StateKey != nullHash.String() { - stateKey = stateNode.StateKey - } - err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) - ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7) - RETURNING id`, - headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID) - if err != nil { - return 0, fmt.Errorf("error upserting state_cids entry: %v", err) - } - return stateID, nil -} - -func (in *PostgresCIDWriter) upsertStateAccount(tx *sqlx.Tx, stateAccount models.StateAccountModel, stateID int64) error { - _, err := tx.Exec(`INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = ($2, $3, $4, $5)`, - stateID, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot) - if err != nil { - return fmt.Errorf("error upserting state_accounts entry: %v", err) - } - return nil -} - -func (in *PostgresCIDWriter) upsertStorageCID(tx *sqlx.Tx, storageCID models.StorageNodeModel, stateID int64) error { - var storageKey string - if storageCID.StorageKey != nullHash.String() { - storageKey = storageCID.StorageKey - } - _, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) - ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)`, - stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey) - if err != nil { - return fmt.Errorf("error upserting storage_cids entry: %v", err) - } - return nil -} diff --git a/statediff/mainnet_tests/builder_test.go b/statediff/mainnet_tests/builder_test.go index 859f00489..d838302e0 100644 --- a/statediff/mainnet_tests/builder_test.go +++ b/statediff/mainnet_tests/builder_test.go @@ -37,7 +37,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/statediff" - "github.com/ethereum/go-ethereum/statediff/testhelpers" + "github.com/ethereum/go-ethereum/statediff/test_helpers" sdtypes "github.com/ethereum/go-ethereum/statediff/types" ) @@ -53,8 +53,8 @@ var ( block1CoinbaseAccount, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, Balance: big.NewInt(5000000000000000000), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) block1CoinbaseLeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("38251692195afc818c92b485fcb8a4691af89cbe5a2ab557b83a4261be2a9a"), @@ -125,8 +125,8 @@ var ( block2CoinbaseAccount, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, Balance: big.NewInt(5000000000000000000), - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) block2CoinbaseLeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("20679cbcf198c1741a6f4e4473845659a30caa8b26f8d37a0be2e2bc0d8892"), @@ -137,8 +137,8 @@ var ( block2MovedPremineAccount, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, Balance: block2MovedPremineBalance, - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) block2MovedPremineLeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("20f2e24db7943eab4415f99e109698863b0fecca1cf9ffc500f38cefbbe29e"), @@ -231,8 +231,8 @@ var ( block3CoinbaseAccount, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, Balance: blcok3CoinbaseBalance, - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) block3CoinbaseLeafNode, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("3a174f00e64521a535f35e67c1aa241951c791639b2f3d060f49c5d9fa8b9e"), @@ -244,8 +244,8 @@ var ( block3MovedPremineAccount1, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, Balance: block3MovedPremineBalance1, - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) block3MovedPremineLeafNode1, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("3ced93917e658d10e2d9009470dad72b63c898d173721194a12f2ae5e190"), // ce573ced93917e658d10e2d9009470dad72b63c898d173721194a12f2ae5e190 @@ -257,8 +257,8 @@ var ( block3MovedPremineAccount2, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: 0, Balance: block3MovedPremineBalance2, - CodeHash: testhelpers.NullCodeHash.Bytes(), - Root: testhelpers.EmptyContractRoot, + CodeHash: test_helpers.NullCodeHash.Bytes(), + Root: test_helpers.EmptyContractRoot, }) block3MovedPremineLeafNode2, _ = rlp.EncodeToBytes([]interface{}{ common.Hex2Bytes("33bc1e69eedf90f402e11f6862da14ed8e50156635a04d6393bbae154012"), // ce5783bc1e69eedf90f402e11f6862da14ed8e50156635a04d6393bbae154012 @@ -480,7 +480,7 @@ func TestBuilderOnMainnetBlocks(t *testing.T) { var tests = []struct { name string startingArguments statediff.Args - expected *statediff.StateObject + expected *sdtypes.StateObject }{ // note that block0 (genesis) has over 1000 nodes due to the pre-allocation for the crowd-sale // it is not feasible to write a unit test of that size at this time @@ -493,7 +493,7 @@ func TestBuilderOnMainnetBlocks(t *testing.T) { BlockNumber: block1.Number(), BlockHash: block1.Hash(), }, - &statediff.StateObject{ + &sdtypes.StateObject{ BlockNumber: block1.Number(), BlockHash: block1.Hash(), Nodes: []sdtypes.StateNode{ @@ -536,7 +536,7 @@ func TestBuilderOnMainnetBlocks(t *testing.T) { BlockNumber: block2.Number(), BlockHash: block2.Hash(), }, - &statediff.StateObject{ + &sdtypes.StateObject{ BlockNumber: block2.Number(), BlockHash: block2.Hash(), Nodes: []sdtypes.StateNode{ @@ -594,7 +594,7 @@ func TestBuilderOnMainnetBlocks(t *testing.T) { BlockNumber: block3.Number(), BlockHash: block3.Hash(), }, - &statediff.StateObject{ + &sdtypes.StateObject{ BlockNumber: block3.Number(), BlockHash: block3.Hash(), Nodes: []sdtypes.StateNode{ diff --git a/statediff/metrics.go b/statediff/metrics.go index 7e7d6e328..afc80e40e 100644 --- a/statediff/metrics.go +++ b/statediff/metrics.go @@ -1,3 +1,19 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + package statediff import ( diff --git a/statediff/payload.go b/statediff/payload.go new file mode 100644 index 000000000..233141278 --- /dev/null +++ b/statediff/payload.go @@ -0,0 +1,57 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package statediff + +import ( + "encoding/json" + "math/big" +) + +// Payload packages the data to send to statediff subscriptions +type Payload struct { + BlockRlp []byte `json:"blockRlp"` + TotalDifficulty *big.Int `json:"totalDifficulty"` + ReceiptsRlp []byte `json:"receiptsRlp"` + StateObjectRlp []byte `json:"stateObjectRlp" gencodec:"required"` + + encoded []byte + err error +} + +func (sd *Payload) ensureEncoded() { + if sd.encoded == nil && sd.err == nil { + sd.encoded, sd.err = json.Marshal(sd) + } +} + +// Length to implement Encoder interface for Payload +func (sd *Payload) Length() int { + sd.ensureEncoded() + return len(sd.encoded) +} + +// Encode to implement Encoder interface for Payload +func (sd *Payload) Encode() ([]byte, error) { + sd.ensureEncoded() + return sd.encoded, sd.err +} + +// Subscription struct holds our subscription channels +type Subscription struct { + PayloadChan chan<- Payload + QuitChan chan<- bool +} diff --git a/statediff/service.go b/statediff/service.go index de6e84a65..ae2e34c6c 100644 --- a/statediff/service.go +++ b/statediff/service.go @@ -41,9 +41,9 @@ import ( "github.com/ethereum/go-ethereum/trie" ind "github.com/ethereum/go-ethereum/statediff/indexer" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node" - "github.com/ethereum/go-ethereum/statediff/indexer/postgres" - . "github.com/ethereum/go-ethereum/statediff/types" + types2 "github.com/ethereum/go-ethereum/statediff/types" ) const chainEventChanSize = 20000 @@ -72,41 +72,32 @@ type blockChain interface { // IService is the state-diffing service interface type IService interface { - // Start() and Stop() + // Lifecycle Start() and Stop() methods node.Lifecycle - // Method to getting API(s) for this service + // APIs method for getting API(s) for this service APIs() []rpc.API - // Main event loop for processing state diffs + // Loop is the main event loop for processing state diffs Loop(chainEventCh chan core.ChainEvent) - // Method to subscribe to receive state diff processing output + // Subscribe method to subscribe to receive state diff processing output` Subscribe(id rpc.ID, sub chan<- Payload, quitChan chan<- bool, params Params) - // Method to unsubscribe from state diff processing + // Unsubscribe method to unsubscribe from state diff processing Unsubscribe(id rpc.ID) error - // Method to get state diff object at specific block + // StateDiffAt method to get state diff object at specific block StateDiffAt(blockNumber uint64, params Params) (*Payload, error) - // Method to get state diff object at specific block + // StateDiffFor method to get state diff object at specific block StateDiffFor(blockHash common.Hash, params Params) (*Payload, error) - // Method to get state trie object at specific block + // StateTrieAt method to get state trie object at specific block StateTrieAt(blockNumber uint64, params Params) (*Payload, error) - // Method to stream out all code and codehash pairs - StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- CodeAndCodeHash, quitChan chan<- bool) - // Method to write state diff object directly to DB + // StreamCodeAndCodeHash method to stream out all code and codehash pairs + StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- types2.CodeAndCodeHash, quitChan chan<- bool) + // WriteStateDiffAt method to write state diff object directly to DB WriteStateDiffAt(blockNumber uint64, params Params) error - // Method to write state diff object directly to DB + // WriteStateDiffFor method to write state diff object directly to DB WriteStateDiffFor(blockHash common.Hash, params Params) error - // Event loop for progressively processing and writing diffs directly to DB + // WriteLoop event loop for progressively processing and writing diffs directly to DB WriteLoop(chainEventCh chan core.ChainEvent) } -// Wraps consructor parameters -type ServiceParams struct { - DBParams *DBParams - // Whether to enable writing state diffs directly to track blochain head - EnableWriteLoop bool - // Size of the worker pool - NumWorkers uint -} - // Service is the underlying struct for the state diffing service type Service struct { // Used to sync access to the Subscriptions @@ -122,26 +113,26 @@ type Service struct { // A mapping of subscription params rlp hash to the corresponding subscription params SubscriptionTypes map[common.Hash]Params // Cache the last block so that we can avoid having to lookup the next block's parent - BlockCache blockCache + BlockCache BlockCache // Whether or not we have any subscribers; only if we do, do we processes state diffs subscribers int32 // Interface for publishing statediffs as PG-IPLD objects - indexer ind.Indexer + indexer interfaces.StateDiffIndexer // Whether to enable writing state diffs directly to track blochain head enableWriteLoop bool // Size of the worker pool numWorkers uint } -// Wrap the cached last block for safe access from different service loops -type blockCache struct { +// BlockCache caches the last block for safe access from different service loops +type BlockCache struct { sync.Mutex blocks map[common.Hash]*types.Block maxSize uint } -func NewBlockCache(max uint) blockCache { - return blockCache{ +func NewBlockCache(max uint) BlockCache { + return BlockCache{ blocks: make(map[common.Hash]*types.Block), maxSize: max, } @@ -149,29 +140,23 @@ func NewBlockCache(max uint) blockCache { // New creates a new statediff.Service // func New(stack *node.Node, ethServ *eth.Ethereum, dbParams *DBParams, enableWriteLoop bool) error { -func New(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params ServiceParams) error { +func New(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params Config) error { blockChain := ethServ.BlockChain() - var indexer ind.Indexer + var indexer interfaces.StateDiffIndexer quitCh := make(chan bool) - if params.DBParams != nil { + if params.IndexerConfig != nil { info := nodeinfo.Info{ GenesisBlock: blockChain.Genesis().Hash().Hex(), NetworkID: strconv.FormatUint(cfg.NetworkId, 10), ChainID: blockChain.Config().ChainID.Uint64(), - ID: params.DBParams.ID, - ClientName: params.DBParams.ClientName, + ID: params.ID, + ClientName: params.ClientName, } - - // TODO: pass max idle, open, lifetime? - db, err := postgres.NewDB(params.DBParams.ConnectionURL, postgres.ConnectionConfig{}, info) + var err error + indexer, err = ind.NewStateDiffIndexer(params.Context, blockChain.Config(), info, params.IndexerConfig) if err != nil { return err } - indexer, err = ind.NewStateDiffIndexer(blockChain.Config(), db) - if err != nil { - return err - } - indexer.ReportDBMetrics(10*time.Second, quitCh) } workers := params.NumWorkers @@ -214,7 +199,7 @@ func (sds *Service) APIs() []rpc.API { // Return the parent block of currentBlock, using the cached block if available; // and cache the passed block -func (lbc *blockCache) getParentBlock(currentBlock *types.Block, bc blockChain) *types.Block { +func (lbc *BlockCache) getParentBlock(currentBlock *types.Block, bc blockChain) *types.Block { lbc.Lock() parentHash := currentBlock.ParentHash() var parentBlock *types.Block @@ -590,7 +575,7 @@ func sendNonBlockingQuit(id rpc.ID, sub Subscription) { } // StreamCodeAndCodeHash subscription method for extracting all the codehash=>code mappings that exist in the trie at the provided height -func (sds *Service) StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- CodeAndCodeHash, quitChan chan<- bool) { +func (sds *Service) StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- types2.CodeAndCodeHash, quitChan chan<- bool) { current := sds.BlockChain.GetBlockByNumber(blockNumber) log.Info("sending code and codehash", "block height", blockNumber) currentTrie, err := sds.BlockChain.StateCache().OpenTrie(current.Root()) @@ -620,7 +605,7 @@ func (sds *Service) StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- Cod log.Error("error collecting contract code", "err", err) return } - outChan <- CodeAndCodeHash{ + outChan <- types2.CodeAndCodeHash{ Hash: codeHash, Code: code, } @@ -660,7 +645,7 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p var totalDifficulty *big.Int var receipts types.Receipts var err error - var tx *ind.BlockTx + var tx interfaces.Batch if params.IncludeTD { totalDifficulty = sds.BlockChain.GetTd(block.Hash(), block.NumberU64()) } @@ -672,14 +657,18 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p return err } // defer handling of commit/rollback for any return case - defer tx.Close(tx, err) - output := func(node StateNode) error { + defer func() { + if err := tx.Submit(err); err != nil { + log.Error("batch transaction submission failed", "err", err) + } + }() + output := func(node types2.StateNode) error { return sds.indexer.PushStateNode(tx, node) } - codeOutput := func(c CodeAndCodeHash) error { + codeOutput := func(c types2.CodeAndCodeHash) error { return sds.indexer.PushCodeAndCodeHash(tx, c) } - err = sds.Builder.WriteStateDiffObject(StateRoots{ + err = sds.Builder.WriteStateDiffObject(types2.StateRoots{ NewStateRoot: block.Root(), OldStateRoot: parentRoot, }, params, output, codeOutput) diff --git a/statediff/service_test.go b/statediff/service_test.go index ca9a483a5..a17f89217 100644 --- a/statediff/service_test.go +++ b/statediff/service_test.go @@ -24,6 +24,8 @@ import ( "sync" "testing" + types2 "github.com/ethereum/go-ethereum/statediff/types" + "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/common" @@ -32,7 +34,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" statediff "github.com/ethereum/go-ethereum/statediff" - "github.com/ethereum/go-ethereum/statediff/testhelpers/mocks" + "github.com/ethereum/go-ethereum/statediff/test_helpers/mocks" ) func TestServiceLoop(t *testing.T) { @@ -218,7 +220,7 @@ func TestGetStateDiffAt(t *testing.T) { } func testErrorInStateDiffAt(t *testing.T) { - mockStateDiff := statediff.StateObject{ + mockStateDiff := types2.StateObject{ BlockNumber: testBlock1.Number(), BlockHash: testBlock1.Hash(), } diff --git a/statediff/test_helpers/constant.go b/statediff/test_helpers/constant.go new file mode 100644 index 000000000..ba591ebb4 --- /dev/null +++ b/statediff/test_helpers/constant.go @@ -0,0 +1,33 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package test_helpers + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/params" +) + +var ( + BalanceChange1000 = int64(1000) + BalanceChange10000 = int64(10000) + BalanceChange1Ether = int64(params.Ether) + Block1Account1Balance = big.NewInt(BalanceChange10000) + Block2Account2Balance = big.NewInt(21000000000000) + GasFees = int64(params.GWei) * int64(params.TxGas) + ContractGasLimit = uint64(1000000) +) diff --git a/statediff/testhelpers/helpers.go b/statediff/test_helpers/helpers.go similarity index 99% rename from statediff/testhelpers/helpers.go rename to statediff/test_helpers/helpers.go index 168d770af..8373f7537 100644 --- a/statediff/testhelpers/helpers.go +++ b/statediff/test_helpers/helpers.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package testhelpers +package test_helpers import ( "math/big" diff --git a/statediff/testhelpers/mocks/blockchain.go b/statediff/test_helpers/mocks/blockchain.go similarity index 100% rename from statediff/testhelpers/mocks/blockchain.go rename to statediff/test_helpers/mocks/blockchain.go diff --git a/statediff/testhelpers/mocks/builder.go b/statediff/test_helpers/mocks/builder.go similarity index 80% rename from statediff/testhelpers/mocks/builder.go rename to statediff/test_helpers/mocks/builder.go index ff9faf3ec..e2452301a 100644 --- a/statediff/testhelpers/mocks/builder.go +++ b/statediff/test_helpers/mocks/builder.go @@ -26,15 +26,15 @@ import ( type Builder struct { Args statediff.Args Params statediff.Params - StateRoots statediff.StateRoots - stateDiff statediff.StateObject + StateRoots sdtypes.StateRoots + stateDiff sdtypes.StateObject block *types.Block - stateTrie statediff.StateObject + stateTrie sdtypes.StateObject builderError error } // BuildStateDiffObject mock method -func (builder *Builder) BuildStateDiffObject(args statediff.Args, params statediff.Params) (statediff.StateObject, error) { +func (builder *Builder) BuildStateDiffObject(args statediff.Args, params statediff.Params) (sdtypes.StateObject, error) { builder.Args = args builder.Params = params @@ -42,7 +42,7 @@ func (builder *Builder) BuildStateDiffObject(args statediff.Args, params statedi } // BuildStateDiffObject mock method -func (builder *Builder) WriteStateDiffObject(args statediff.StateRoots, params statediff.Params, output sdtypes.StateNodeSink, codeOutput sdtypes.CodeSink) error { +func (builder *Builder) WriteStateDiffObject(args sdtypes.StateRoots, params statediff.Params, output sdtypes.StateNodeSink, codeOutput sdtypes.CodeSink) error { builder.StateRoots = args builder.Params = params @@ -50,14 +50,14 @@ func (builder *Builder) WriteStateDiffObject(args statediff.StateRoots, params s } // BuildStateTrieObject mock method -func (builder *Builder) BuildStateTrieObject(block *types.Block) (statediff.StateObject, error) { +func (builder *Builder) BuildStateTrieObject(block *types.Block) (sdtypes.StateObject, error) { builder.block = block return builder.stateTrie, builder.builderError } // SetStateDiffToBuild mock method -func (builder *Builder) SetStateDiffToBuild(stateDiff statediff.StateObject) { +func (builder *Builder) SetStateDiffToBuild(stateDiff sdtypes.StateObject) { builder.stateDiff = stateDiff } diff --git a/statediff/testhelpers/mocks/service.go b/statediff/test_helpers/mocks/service.go similarity index 100% rename from statediff/testhelpers/mocks/service.go rename to statediff/test_helpers/mocks/service.go diff --git a/statediff/testhelpers/mocks/service_test.go b/statediff/test_helpers/mocks/service_test.go similarity index 93% rename from statediff/testhelpers/mocks/service_test.go rename to statediff/test_helpers/mocks/service_test.go index 8c1fd49cf..b3b77d4bf 100644 --- a/statediff/testhelpers/mocks/service_test.go +++ b/statediff/test_helpers/mocks/service_test.go @@ -30,14 +30,14 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/statediff" - "github.com/ethereum/go-ethereum/statediff/testhelpers" + "github.com/ethereum/go-ethereum/statediff/test_helpers" sdtypes "github.com/ethereum/go-ethereum/statediff/types" ) var ( emptyStorage = make([]sdtypes.StorageNode, 0) block0, block1 *types.Block - minerLeafKey = testhelpers.AddressToLeafKey(common.HexToAddress("0x0")) + minerLeafKey = test_helpers.AddressToLeafKey(common.HexToAddress("0x0")) account1, _ = rlp.EncodeToBytes(types.StateAccount{ Nonce: uint64(0), Balance: big.NewInt(10000), @@ -90,9 +90,9 @@ func TestAPI(t *testing.T) { } func testSubscriptionAPI(t *testing.T) { - blocks, chain := testhelpers.MakeChain(1, testhelpers.Genesis, testhelpers.TestChainGen) + blocks, chain := test_helpers.MakeChain(1, test_helpers.Genesis, test_helpers.TestChainGen) defer chain.Stop() - block0 = testhelpers.Genesis + block0 = test_helpers.Genesis block1 = blocks[0] expectedBlockRlp, _ := rlp.EncodeToBytes(block1) mockReceipt := &types.Receipt{ @@ -100,7 +100,7 @@ func testSubscriptionAPI(t *testing.T) { BlockHash: block1.Hash(), } expectedReceiptBytes, _ := rlp.EncodeToBytes(types.Receipts{mockReceipt}) - expectedStateDiff := statediff.StateObject{ + expectedStateDiff := sdtypes.StateObject{ BlockNumber: block1.Number(), BlockHash: block1.Hash(), Nodes: []sdtypes.StateNode{ @@ -114,14 +114,14 @@ func testSubscriptionAPI(t *testing.T) { { Path: []byte{'\x0e'}, NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x00'}, NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountLeafNode, StorageNodes: emptyStorage, }, @@ -176,9 +176,9 @@ func testSubscriptionAPI(t *testing.T) { } func testHTTPAPI(t *testing.T) { - blocks, chain := testhelpers.MakeChain(1, testhelpers.Genesis, testhelpers.TestChainGen) + blocks, chain := test_helpers.MakeChain(1, test_helpers.Genesis, test_helpers.TestChainGen) defer chain.Stop() - block0 = testhelpers.Genesis + block0 = test_helpers.Genesis block1 = blocks[0] expectedBlockRlp, _ := rlp.EncodeToBytes(block1) mockReceipt := &types.Receipt{ @@ -186,7 +186,7 @@ func testHTTPAPI(t *testing.T) { BlockHash: block1.Hash(), } expectedReceiptBytes, _ := rlp.EncodeToBytes(types.Receipts{mockReceipt}) - expectedStateDiff := statediff.StateObject{ + expectedStateDiff := sdtypes.StateObject{ BlockNumber: block1.Number(), BlockHash: block1.Hash(), Nodes: []sdtypes.StateNode{ @@ -200,14 +200,14 @@ func testHTTPAPI(t *testing.T) { { Path: []byte{'\x0e'}, NodeType: sdtypes.Leaf, - LeafKey: testhelpers.Account1LeafKey, + LeafKey: test_helpers.Account1LeafKey, NodeValue: account1LeafNode, StorageNodes: emptyStorage, }, { Path: []byte{'\x00'}, NodeType: sdtypes.Leaf, - LeafKey: testhelpers.BankLeafKey, + LeafKey: test_helpers.BankLeafKey, NodeValue: bankAccountLeafNode, StorageNodes: emptyStorage, }, diff --git a/statediff/testhelpers/test_data.go b/statediff/test_helpers/test_data.go similarity index 99% rename from statediff/testhelpers/test_data.go rename to statediff/test_helpers/test_data.go index 73def50a4..e5b021364 100644 --- a/statediff/testhelpers/test_data.go +++ b/statediff/test_helpers/test_data.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package testhelpers +package test_helpers import ( "math/big" diff --git a/statediff/testhelpers/constant.go b/statediff/testhelpers/constant.go deleted file mode 100644 index 9788549e6..000000000 --- a/statediff/testhelpers/constant.go +++ /dev/null @@ -1,17 +0,0 @@ -package testhelpers - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/params" -) - -var ( - BalanceChange1000 = int64(1000) - BalanceChange10000 = int64(10000) - BalanceChange1Ether = int64(params.Ether) - Block1Account1Balance = big.NewInt(BalanceChange10000) - Block2Account2Balance = big.NewInt(21000000000000) - GasFees = int64(params.GWei) * int64(params.TxGas) - ContractGasLimit = uint64(1000000) -) diff --git a/statediff/trie/node.go b/statediff/trie/node.go deleted file mode 100644 index 6ffc2538c..000000000 --- a/statediff/trie/node.go +++ /dev/null @@ -1,54 +0,0 @@ -package trie - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/rlp" - sdtypes "github.com/ethereum/go-ethereum/statediff/types" - "github.com/ethereum/go-ethereum/trie" -) - -// CheckKeyType checks what type of key we have -func CheckKeyType(elements []interface{}) (sdtypes.NodeType, error) { - if len(elements) > 2 { - return sdtypes.Branch, nil - } - if len(elements) < 2 { - return sdtypes.Unknown, fmt.Errorf("node cannot be less than two elements in length") - } - switch elements[0].([]byte)[0] / 16 { - case '\x00': - return sdtypes.Extension, nil - case '\x01': - return sdtypes.Extension, nil - case '\x02': - return sdtypes.Leaf, nil - case '\x03': - return sdtypes.Leaf, nil - default: - return sdtypes.Unknown, fmt.Errorf("unknown hex prefix") - } -} - -// ResolveNode return the state diff node pointed by the iterator. -func ResolveNode(it trie.NodeIterator, trieDB *trie.Database) (sdtypes.StateNode, []interface{}, error) { - nodePath := make([]byte, len(it.Path())) - copy(nodePath, it.Path()) - node, err := trieDB.Node(it.Hash()) - if err != nil { - return sdtypes.StateNode{}, nil, err - } - var nodeElements []interface{} - if err = rlp.DecodeBytes(node, &nodeElements); err != nil { - return sdtypes.StateNode{}, nil, err - } - ty, err := CheckKeyType(nodeElements) - if err != nil { - return sdtypes.StateNode{}, nil, err - } - return sdtypes.StateNode{ - NodeType: ty, - Path: nodePath, - NodeValue: node, - }, nodeElements, nil -} diff --git a/statediff/helpers.go b/statediff/trie_helpers/helpers.go similarity index 53% rename from statediff/helpers.go rename to statediff/trie_helpers/helpers.go index eb5060c51..ce3365f2c 100644 --- a/statediff/helpers.go +++ b/statediff/trie_helpers/helpers.go @@ -17,14 +17,65 @@ // Contains a batch of utility type declarations used by the tests. As the node // operates on unique types, a lot of them are needed to check various features. -package statediff +package trie_helpers import ( + "fmt" "sort" "strings" + + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/statediff/types" + "github.com/ethereum/go-ethereum/trie" ) -func sortKeys(data AccountMap) []string { +// CheckKeyType checks what type of key we have +func CheckKeyType(elements []interface{}) (types.NodeType, error) { + if len(elements) > 2 { + return types.Branch, nil + } + if len(elements) < 2 { + return types.Unknown, fmt.Errorf("node cannot be less than two elements in length") + } + switch elements[0].([]byte)[0] / 16 { + case '\x00': + return types.Extension, nil + case '\x01': + return types.Extension, nil + case '\x02': + return types.Leaf, nil + case '\x03': + return types.Leaf, nil + default: + return types.Unknown, fmt.Errorf("unknown hex prefix") + } +} + +// ResolveNode return the state diff node pointed by the iterator. +func ResolveNode(it trie.NodeIterator, trieDB *trie.Database) (types.StateNode, []interface{}, error) { + nodePath := make([]byte, len(it.Path())) + copy(nodePath, it.Path()) + node, err := trieDB.Node(it.Hash()) + if err != nil { + return types.StateNode{}, nil, err + } + var nodeElements []interface{} + if err = rlp.DecodeBytes(node, &nodeElements); err != nil { + return types.StateNode{}, nil, err + } + ty, err := CheckKeyType(nodeElements) + if err != nil { + return types.StateNode{}, nil, err + } + return types.StateNode{ + NodeType: ty, + Path: nodePath, + NodeValue: node, + }, nodeElements, nil +} + +// SortKeys sorts the keys in the account map +func SortKeys(data types.AccountMap) []string { keys := make([]string, 0, len(data)) for key := range data { keys = append(keys, key) @@ -34,10 +85,10 @@ func sortKeys(data AccountMap) []string { return keys } -// findIntersection finds the set of strings from both arrays that are equivalent +// FindIntersection finds the set of strings from both arrays that are equivalent // a and b must first be sorted // this is used to find which keys have been both "deleted" and "created" i.e. they were updated -func findIntersection(a, b []string) []string { +func FindIntersection(a, b []string) []string { lenA := len(a) lenB := len(b) iOfA, iOfB := 0, 0 diff --git a/statediff/types.go b/statediff/types.go deleted file mode 100644 index ef8256041..000000000 --- a/statediff/types.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Contains a batch of utility type declarations used by the tests. As the node -// operates on unique types, a lot of them are needed to check various features. - -package statediff - -import ( - "encoding/json" - "math/big" - - "github.com/ethereum/go-ethereum/common" - ctypes "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/statediff/types" -) - -// Subscription struct holds our subscription channels -type Subscription struct { - PayloadChan chan<- Payload - QuitChan chan<- bool -} - -// DBParams holds params for Postgres db connection -type DBParams struct { - ConnectionURL string - ID string - ClientName string -} - -// Params is used to carry in parameters from subscribing/requesting clients configuration -type Params struct { - IntermediateStateNodes bool - IntermediateStorageNodes bool - IncludeBlock bool - IncludeReceipts bool - IncludeTD bool - IncludeCode bool - WatchedAddresses []common.Address - WatchedStorageSlots []common.Hash -} - -// Args bundles the arguments for the state diff builder -type Args struct { - OldStateRoot, NewStateRoot, BlockHash common.Hash - BlockNumber *big.Int -} - -type StateRoots struct { - OldStateRoot, NewStateRoot common.Hash -} - -// Payload packages the data to send to statediff subscriptions -type Payload struct { - BlockRlp []byte `json:"blockRlp"` - TotalDifficulty *big.Int `json:"totalDifficulty"` - ReceiptsRlp []byte `json:"receiptsRlp"` - StateObjectRlp []byte `json:"stateObjectRlp" gencodec:"required"` - - encoded []byte - err error -} - -func (sd *Payload) ensureEncoded() { - if sd.encoded == nil && sd.err == nil { - sd.encoded, sd.err = json.Marshal(sd) - } -} - -// Length to implement Encoder interface for Payload -func (sd *Payload) Length() int { - sd.ensureEncoded() - return len(sd.encoded) -} - -// Encode to implement Encoder interface for Payload -func (sd *Payload) Encode() ([]byte, error) { - sd.ensureEncoded() - return sd.encoded, sd.err -} - -// StateObject is the final output structure from the builder -type StateObject struct { - BlockNumber *big.Int `json:"blockNumber" gencodec:"required"` - BlockHash common.Hash `json:"blockHash" gencodec:"required"` - Nodes []types.StateNode `json:"nodes" gencodec:"required"` - CodeAndCodeHashes []types.CodeAndCodeHash `json:"codeMapping"` -} - -// AccountMap is a mapping of hex encoded path => account wrapper -type AccountMap map[string]accountWrapper - -// accountWrapper is used to temporary associate the unpacked node with its raw values -type accountWrapper struct { - Account *ctypes.StateAccount - NodeType types.NodeType - Path []byte - NodeValue []byte - LeafKey []byte -} diff --git a/statediff/types/types.go b/statediff/types/types.go index 56babfb5b..36008a784 100644 --- a/statediff/types/types.go +++ b/statediff/types/types.go @@ -14,12 +14,39 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// Contains a batch of utility type declarations used by the tests. As the node -// operates on unique types, a lot of them are needed to check various features. - package types -import "github.com/ethereum/go-ethereum/common" +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// StateRoots holds the state roots required for generating a state diff +type StateRoots struct { + OldStateRoot, NewStateRoot common.Hash +} + +// StateObject is the final output structure from the builder +type StateObject struct { + BlockNumber *big.Int `json:"blockNumber" gencodec:"required"` + BlockHash common.Hash `json:"blockHash" gencodec:"required"` + Nodes []StateNode `json:"nodes" gencodec:"required"` + CodeAndCodeHashes []CodeAndCodeHash `json:"codeMapping"` +} + +// AccountMap is a mapping of hex encoded path => account wrapper +type AccountMap map[string]AccountWrapper + +// AccountWrapper is used to temporary associate the unpacked node with its raw values +type AccountWrapper struct { + Account *types.StateAccount + NodeType NodeType + Path []byte + NodeValue []byte + LeafKey []byte +} // NodeType for explicitly setting type of node type NodeType string