major refactor
This commit is contained in:
parent
dcaaa40067
commit
b3add308ee
@ -23,8 +23,11 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"time"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
"github.com/ethereum/go-ethereum/statediff"
|
"github.com/ethereum/go-ethereum/statediff"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
@ -182,27 +185,48 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ctx.GlobalBool(utils.StateDiffFlag.Name) {
|
if ctx.GlobalBool(utils.StateDiffFlag.Name) {
|
||||||
var dbParams *statediff.DBParams
|
var dbConfig *sql.Config
|
||||||
if ctx.GlobalIsSet(utils.StateDiffDBFlag.Name) {
|
if ctx.GlobalIsSet(utils.StateDiffWritingFlag.Name) {
|
||||||
dbParams = new(statediff.DBParams)
|
dbConfig = new(sql.Config)
|
||||||
dbParams.ConnectionURL = ctx.GlobalString(utils.StateDiffDBFlag.Name)
|
dbConfig.Hostname = ctx.GlobalString(utils.StateDiffDBHostFlag.Name)
|
||||||
|
dbConfig.Port = ctx.GlobalInt(utils.StateDiffDBPortFlag.Name)
|
||||||
|
dbConfig.DatabaseName = ctx.GlobalString(utils.StateDiffDBNameFlag.Name)
|
||||||
|
dbConfig.Username = ctx.GlobalString(utils.StateDiffDBUserFlag.Name)
|
||||||
|
dbConfig.Password = ctx.GlobalString(utils.StateDiffDBPasswordFlag.Name)
|
||||||
|
|
||||||
if ctx.GlobalIsSet(utils.StateDiffDBNodeIDFlag.Name) {
|
if ctx.GlobalIsSet(utils.StateDiffDBNodeIDFlag.Name) {
|
||||||
dbParams.ID = ctx.GlobalString(utils.StateDiffDBNodeIDFlag.Name)
|
dbConfig.ID = ctx.GlobalString(utils.StateDiffDBNodeIDFlag.Name)
|
||||||
} else {
|
} else {
|
||||||
utils.Fatalf("Must specify node ID for statediff DB output")
|
utils.Fatalf("Must specify node ID for statediff DB output")
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.GlobalIsSet(utils.StateDiffDBClientNameFlag.Name) {
|
if ctx.GlobalIsSet(utils.StateDiffDBClientNameFlag.Name) {
|
||||||
dbParams.ClientName = ctx.GlobalString(utils.StateDiffDBClientNameFlag.Name)
|
dbConfig.ClientName = ctx.GlobalString(utils.StateDiffDBClientNameFlag.Name)
|
||||||
} else {
|
} else {
|
||||||
utils.Fatalf("Must specify client name for statediff DB output")
|
utils.Fatalf("Must specify client name for statediff DB output")
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
if ctx.GlobalBool(utils.StateDiffWritingFlag.Name) {
|
if ctx.GlobalIsSet(utils.StateDiffDBMinConns.Name) {
|
||||||
utils.Fatalf("Must pass DB parameters if enabling statediff write loop")
|
dbConfig.MinConns = ctx.GlobalInt(utils.StateDiffDBMinConns.Name)
|
||||||
|
}
|
||||||
|
if ctx.GlobalIsSet(utils.StateDiffDBMaxConns.Name) {
|
||||||
|
dbConfig.MaxConns = ctx.GlobalInt(utils.StateDiffDBMaxConns.Name)
|
||||||
|
}
|
||||||
|
if ctx.GlobalIsSet(utils.StateDiffDBMaxIdleConns.Name) {
|
||||||
|
dbConfig.MaxIdle = ctx.GlobalInt(utils.StateDiffDBMaxIdleConns.Name)
|
||||||
|
}
|
||||||
|
if ctx.GlobalIsSet(utils.StateDiffDBMaxConnLifetime.Name) {
|
||||||
|
dbConfig.MaxConnLifetime = ctx.GlobalDuration(utils.StateDiffDBMaxConnLifetime.Name) * time.Second
|
||||||
|
}
|
||||||
|
if ctx.GlobalIsSet(utils.StateDiffDBMaxConnIdleTime.Name) {
|
||||||
|
dbConfig.MaxConnIdleTime = ctx.GlobalDuration(utils.StateDiffDBMaxConnIdleTime.Name) * time.Second
|
||||||
|
}
|
||||||
|
if ctx.GlobalIsSet(utils.StateDiffDBConnTimeout.Name) {
|
||||||
|
dbConfig.ConnTimeout = ctx.GlobalDuration(utils.StateDiffDBConnTimeout.Name) * time.Second
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
p := statediff.ServiceParams{
|
p := statediff.ServiceParams{
|
||||||
DBParams: dbParams,
|
DBParams: dbConfig,
|
||||||
EnableWriteLoop: ctx.GlobalBool(utils.StateDiffWritingFlag.Name),
|
EnableWriteLoop: ctx.GlobalBool(utils.StateDiffWritingFlag.Name),
|
||||||
NumWorkers: ctx.GlobalUint(utils.StateDiffWorkersFlag.Name),
|
NumWorkers: ctx.GlobalUint(utils.StateDiffWorkersFlag.Name),
|
||||||
}
|
}
|
||||||
|
@ -149,7 +149,17 @@ var (
|
|||||||
utils.GpoIgnoreGasPriceFlag,
|
utils.GpoIgnoreGasPriceFlag,
|
||||||
utils.MinerNotifyFullFlag,
|
utils.MinerNotifyFullFlag,
|
||||||
utils.StateDiffFlag,
|
utils.StateDiffFlag,
|
||||||
utils.StateDiffDBFlag,
|
utils.StateDiffDBNameFlag,
|
||||||
|
utils.StateDiffDBPasswordFlag,
|
||||||
|
utils.StateDiffDBUserFlag,
|
||||||
|
utils.StateDiffDBHostFlag,
|
||||||
|
utils.StateDiffDBPortFlag,
|
||||||
|
utils.StateDiffDBMaxConnLifetime,
|
||||||
|
utils.StateDiffDBMaxConnIdleTime,
|
||||||
|
utils.StateDiffDBMaxConns,
|
||||||
|
utils.StateDiffDBMinConns,
|
||||||
|
utils.StateDiffDBMaxIdleConns,
|
||||||
|
utils.StateDiffDBConnTimeout,
|
||||||
utils.StateDiffDBNodeIDFlag,
|
utils.StateDiffDBNodeIDFlag,
|
||||||
utils.StateDiffDBClientNameFlag,
|
utils.StateDiffDBClientNameFlag,
|
||||||
utils.StateDiffWritingFlag,
|
utils.StateDiffWritingFlag,
|
||||||
|
@ -225,7 +225,17 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
Name: "STATE DIFF",
|
Name: "STATE DIFF",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
utils.StateDiffFlag,
|
utils.StateDiffFlag,
|
||||||
utils.StateDiffDBFlag,
|
utils.StateDiffDBNameFlag,
|
||||||
|
utils.StateDiffDBPasswordFlag,
|
||||||
|
utils.StateDiffDBUserFlag,
|
||||||
|
utils.StateDiffDBHostFlag,
|
||||||
|
utils.StateDiffDBPortFlag,
|
||||||
|
utils.StateDiffDBMaxConnLifetime,
|
||||||
|
utils.StateDiffDBMaxConnIdleTime,
|
||||||
|
utils.StateDiffDBMaxConns,
|
||||||
|
utils.StateDiffDBMinConns,
|
||||||
|
utils.StateDiffDBMaxIdleConns,
|
||||||
|
utils.StateDiffDBConnTimeout,
|
||||||
utils.StateDiffDBNodeIDFlag,
|
utils.StateDiffDBNodeIDFlag,
|
||||||
utils.StateDiffDBClientNameFlag,
|
utils.StateDiffDBClientNameFlag,
|
||||||
utils.StateDiffWritingFlag,
|
utils.StateDiffWritingFlag,
|
||||||
|
@ -786,16 +786,59 @@ var (
|
|||||||
Name: "statediff",
|
Name: "statediff",
|
||||||
Usage: "Enables the processing of state diffs between each block",
|
Usage: "Enables the processing of state diffs between each block",
|
||||||
}
|
}
|
||||||
StateDiffDBFlag = cli.StringFlag{
|
StateDiffDBHostFlag = cli.StringFlag{
|
||||||
Name: "statediff.db",
|
Name: "statediff.db.host",
|
||||||
Usage: "PostgreSQL database connection string for writing state diffs",
|
Usage: "Statediff database hostname/ip",
|
||||||
|
Value: "localhost",
|
||||||
|
}
|
||||||
|
StateDiffDBPortFlag = cli.IntFlag{
|
||||||
|
Name: "statediff.db.port",
|
||||||
|
Usage: "Statediff database port",
|
||||||
|
Value: 5432,
|
||||||
|
}
|
||||||
|
StateDiffDBNameFlag = cli.StringFlag{
|
||||||
|
Name: "statediff.db.name",
|
||||||
|
Usage: "Statediff database name",
|
||||||
|
}
|
||||||
|
StateDiffDBPasswordFlag = cli.StringFlag{
|
||||||
|
Name: "statediff.db.password",
|
||||||
|
Usage: "Statediff database password",
|
||||||
|
}
|
||||||
|
StateDiffDBUserFlag = cli.StringFlag{
|
||||||
|
Name: "statediff.db.user",
|
||||||
|
Usage: "Statediff database username",
|
||||||
|
Value: "postgres",
|
||||||
|
}
|
||||||
|
StateDiffDBMaxConnLifetime = cli.DurationFlag{
|
||||||
|
Name: "statediff.db.maxconnlifetime",
|
||||||
|
Usage: "Statediff database maximum connection lifetime (in seconds)",
|
||||||
|
}
|
||||||
|
StateDiffDBMaxConnIdleTime = cli.DurationFlag{
|
||||||
|
Name: "statediff.db.maxconnidletime",
|
||||||
|
Usage: "Statediff database maximum connection idle time (in seconds)",
|
||||||
|
}
|
||||||
|
StateDiffDBMaxConns = cli.IntFlag{
|
||||||
|
Name: "statediff.db.maxconns",
|
||||||
|
Usage: "Statediff database maximum connections",
|
||||||
|
}
|
||||||
|
StateDiffDBMinConns = cli.IntFlag{
|
||||||
|
Name: "statediff.db.minconns",
|
||||||
|
Usage: "Statediff database minimum connections",
|
||||||
|
}
|
||||||
|
StateDiffDBMaxIdleConns = cli.IntFlag{
|
||||||
|
Name: "statediff.db.maxidleconns",
|
||||||
|
Usage: "Statediff database maximum idle connections",
|
||||||
|
}
|
||||||
|
StateDiffDBConnTimeout = cli.DurationFlag{
|
||||||
|
Name: "statediff.db.conntimeout",
|
||||||
|
Usage: "Statediff database connection timeout (in seconds)",
|
||||||
}
|
}
|
||||||
StateDiffDBNodeIDFlag = cli.StringFlag{
|
StateDiffDBNodeIDFlag = cli.StringFlag{
|
||||||
Name: "statediff.dbnodeid",
|
Name: "statediff.db.nodeid",
|
||||||
Usage: "Node ID to use when writing state diffs to database",
|
Usage: "Node ID to use when writing state diffs to database",
|
||||||
}
|
}
|
||||||
StateDiffDBClientNameFlag = cli.StringFlag{
|
StateDiffDBClientNameFlag = cli.StringFlag{
|
||||||
Name: "statediff.dbclientname",
|
Name: "statediff.db.clientname",
|
||||||
Usage: "Client name to use when writing state diffs to database",
|
Usage: "Client name to use when writing state diffs to database",
|
||||||
}
|
}
|
||||||
StateDiffWritingFlag = cli.BoolFlag{
|
StateDiffWritingFlag = cli.BoolFlag{
|
||||||
|
8
go.mod
8
go.mod
@ -25,6 +25,7 @@ require (
|
|||||||
github.com/fatih/color v1.7.0
|
github.com/fatih/color v1.7.0
|
||||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
|
||||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
|
||||||
|
github.com/georgysavva/scany v0.2.9
|
||||||
github.com/go-ole/go-ole v1.2.1 // indirect
|
github.com/go-ole/go-ole v1.2.1 // indirect
|
||||||
github.com/go-stack/stack v1.8.0
|
github.com/go-stack/stack v1.8.0
|
||||||
github.com/golang/protobuf v1.4.3
|
github.com/golang/protobuf v1.4.3
|
||||||
@ -46,6 +47,10 @@ require (
|
|||||||
github.com/ipfs/go-ipfs-blockstore v1.0.1
|
github.com/ipfs/go-ipfs-blockstore v1.0.1
|
||||||
github.com/ipfs/go-ipfs-ds-help v1.0.0
|
github.com/ipfs/go-ipfs-ds-help v1.0.0
|
||||||
github.com/ipfs/go-ipld-format v0.2.0
|
github.com/ipfs/go-ipld-format v0.2.0
|
||||||
|
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
|
||||||
|
github.com/jackc/pgconn v1.10.0
|
||||||
|
github.com/jackc/pgx v3.6.2+incompatible
|
||||||
|
github.com/jackc/pgx/v4 v4.13.0
|
||||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458
|
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458
|
||||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e
|
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e
|
||||||
github.com/jmoiron/sqlx v1.2.0
|
github.com/jmoiron/sqlx v1.2.0
|
||||||
@ -55,7 +60,6 @@ require (
|
|||||||
github.com/lib/pq v1.10.2
|
github.com/lib/pq v1.10.2
|
||||||
github.com/mattn/go-colorable v0.1.8
|
github.com/mattn/go-colorable v0.1.8
|
||||||
github.com/mattn/go-isatty v0.0.12
|
github.com/mattn/go-isatty v0.0.12
|
||||||
github.com/mattn/go-sqlite3 v1.14.7 // indirect
|
|
||||||
github.com/multiformats/go-multihash v0.0.14
|
github.com/multiformats/go-multihash v0.0.14
|
||||||
github.com/naoina/go-stringutil v0.1.0 // indirect
|
github.com/naoina/go-stringutil v0.1.0 // indirect
|
||||||
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416
|
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416
|
||||||
@ -70,7 +74,7 @@ require (
|
|||||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
|
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
|
||||||
github.com/tklauser/go-sysconf v0.3.5 // indirect
|
github.com/tklauser/go-sysconf v0.3.5 // indirect
|
||||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
|
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
|
||||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a
|
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912
|
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912
|
||||||
golang.org/x/text v0.3.6
|
golang.org/x/text v0.3.6
|
||||||
|
157
go.sum
157
go.sum
@ -42,6 +42,8 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt
|
|||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||||
|
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
||||||
|
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
|
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
|
||||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||||
@ -100,10 +102,17 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
|||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA=
|
github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA=
|
||||||
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
|
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
|
||||||
|
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||||
|
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||||
|
github.com/cockroachdb/cockroach-go/v2 v2.0.3 h1:ZA346ACHIZctef6trOTwBAEvPVm1k0uLm/bb2Atc+S8=
|
||||||
|
github.com/cockroachdb/cockroach-go/v2 v2.0.3/go.mod h1:hAuDgiVgDVkfirP9JnhXEfcXEPRKBpYdGz+l7mvYSzw=
|
||||||
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
|
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
|
||||||
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f h1:C43yEtQ6NIf4ftFXD/V55gnGFgPbMQobd//YlnLjUJ8=
|
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f h1:C43yEtQ6NIf4ftFXD/V55gnGFgPbMQobd//YlnLjUJ8=
|
||||||
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
|
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
|
||||||
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
|
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||||
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
|
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
|
||||||
@ -116,6 +125,7 @@ github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vs
|
|||||||
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
|
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
|
||||||
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
|
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
|
||||||
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
|
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
|
||||||
|
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
|
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
|
||||||
@ -132,6 +142,7 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
|||||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||||
@ -142,6 +153,8 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo
|
|||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||||
|
github.com/georgysavva/scany v0.2.9 h1:Xt6rjYpHnMClTm/g+oZTnoSxUwiln5GqMNU+QeLNHQU=
|
||||||
|
github.com/georgysavva/scany v0.2.9/go.mod h1:yeOeC1BdIdl6hOwy8uefL2WNSlseFzbhlG/frrh65SA=
|
||||||
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||||
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
@ -152,9 +165,11 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9
|
|||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
|
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
|
||||||
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
|
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
|
||||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
@ -166,11 +181,15 @@ github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZp
|
|||||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||||
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||||
|
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
|
||||||
|
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||||
|
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
@ -281,6 +300,80 @@ github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc=
|
|||||||
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
|
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
|
||||||
github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg=
|
github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg=
|
||||||
github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY=
|
github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY=
|
||||||
|
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
|
||||||
|
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||||
|
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||||
|
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
||||||
|
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||||
|
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
|
||||||
|
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
|
||||||
|
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
|
||||||
|
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
|
||||||
|
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
|
||||||
|
github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk=
|
||||||
|
github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
|
||||||
|
github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
|
||||||
|
github.com/jackc/pgconn v1.6.4/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78=
|
||||||
|
github.com/jackc/pgconn v1.7.0/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7JMeA=
|
||||||
|
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
|
||||||
|
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
|
||||||
|
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
|
||||||
|
github.com/jackc/pgconn v1.10.0 h1:4EYhlDVEMsJ30nNj0mmgwIUXoq7e9sMJrVC2ED6QlCU=
|
||||||
|
github.com/jackc/pgconn v1.10.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
|
||||||
|
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
||||||
|
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
||||||
|
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
||||||
|
github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
|
||||||
|
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=
|
||||||
|
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
|
||||||
|
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||||
|
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||||
|
github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=
|
||||||
|
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.5/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.1.1 h1:7PQ/4gLoqnl87ZxL7xjO0DR5gYuviDCZxQJsUlFW1eI=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
|
github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||||
|
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
|
||||||
|
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||||
|
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
|
||||||
|
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
|
||||||
|
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
||||||
|
github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
|
||||||
|
github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik=
|
||||||
|
github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po=
|
||||||
|
github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ=
|
||||||
|
github.com/jackc/pgtype v1.4.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
|
||||||
|
github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
|
||||||
|
github.com/jackc/pgtype v1.8.1 h1:9k0IXtdJXHJbyAWQgbWr1lU+MEhPXZz6RIXxfR5oxXs=
|
||||||
|
github.com/jackc/pgtype v1.8.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
|
||||||
|
github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o=
|
||||||
|
github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||||
|
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||||
|
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
||||||
|
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
||||||
|
github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA=
|
||||||
|
github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg=
|
||||||
|
github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o=
|
||||||
|
github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg=
|
||||||
|
github.com/jackc/pgx/v4 v4.8.1/go.mod h1:4HOLxrl8wToZJReD04/yB20GDwf4KBYETvlHciCnwW0=
|
||||||
|
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
|
||||||
|
github.com/jackc/pgx/v4 v4.13.0 h1:JCjhT5vmhMAf/YwBHLvrBn4OGdIQBiFG6ym8Zmdx570=
|
||||||
|
github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0=
|
||||||
|
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
|
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
|
github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
|
github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
|
github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
|
github.com/jackc/puddle v1.1.3 h1:JnPg/5Q9xVJGfjsO5CPUOjnJps1JaRUm8I9FXVCFK94=
|
||||||
|
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
|
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
|
||||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||||
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw=
|
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw=
|
||||||
@ -288,6 +381,9 @@ github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsj
|
|||||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
|
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
|
||||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
|
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
|
||||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
|
github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
|
||||||
|
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||||
|
github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||||
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
|
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
|
||||||
@ -313,12 +409,13 @@ github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM52
|
|||||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
@ -329,6 +426,11 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
|
|||||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/lib/pq v1.4.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
|
github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
|
||||||
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
|
github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
|
||||||
@ -338,6 +440,7 @@ github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIG
|
|||||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
|
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||||
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||||
@ -346,6 +449,7 @@ github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpu
|
|||||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||||
@ -355,8 +459,8 @@ github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/Qd
|
|||||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA=
|
github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw=
|
||||||
github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
|
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
|
||||||
@ -441,14 +545,25 @@ github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRr
|
|||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||||
|
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||||
|
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||||
|
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU=
|
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU=
|
||||||
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||||
|
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||||
|
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
|
github.com/shopspring/decimal v0.0.0-20200419222939-1884f454f8ea/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
|
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
|
||||||
|
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||||
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
@ -461,6 +576,8 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57N
|
|||||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||||
|
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||||
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
@ -487,29 +604,45 @@ github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPyS
|
|||||||
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
|
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
|
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||||
|
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||||
|
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
|
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc=
|
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI=
|
||||||
|
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
@ -555,6 +688,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
|
|||||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
@ -589,13 +723,16 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||||||
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -618,6 +755,7 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU=
|
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU=
|
||||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
@ -648,25 +786,31 @@ golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3
|
|||||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
|
golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
|
||||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -725,6 +869,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
|
|||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
|
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0=
|
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0=
|
||||||
|
@ -73,23 +73,31 @@ type Payload struct {
|
|||||||
## Usage
|
## Usage
|
||||||
This state diffing service runs as an auxiliary service concurrent to the regular syncing process of the geth node.
|
This state diffing service runs as an auxiliary service concurrent to the regular syncing process of the geth node.
|
||||||
|
|
||||||
|
|
||||||
### CLI configuration
|
### CLI configuration
|
||||||
This service introduces a CLI flag namespace `statediff`
|
This service introduces a CLI flag namespace `statediff`
|
||||||
|
|
||||||
`--statediff` flag is used to turn on the service
|
`--statediff` flag is used to turn on the service
|
||||||
`--statediff.writing` is used to tell the service to write state diff objects it produces from synced ChainEvents directly to a configured Postgres database
|
`--statediff.writing` is used to tell the service to write state diff objects it produces from synced ChainEvents directly to a configured Postgres database
|
||||||
`--statediff.workers` is used to set the number of concurrent workers to process state diff objects and write them into the database
|
`--statediff.workers` is used to set the number of concurrent workers to process state diff objects and write them into the database
|
||||||
`--statediff.db` is the connection string for the Postgres database to write to
|
`--statediff.db.host` is the hostname/ip to dial to connect to the database
|
||||||
`--statediff.db.init` indicates whether we need to initialize a new database; set true if its the first time running the process on a given database
|
`--statediff.db.port` is the port to dial to connect to the database
|
||||||
`--statediff.dbnodeid` is the node id to use in the Postgres database
|
`--statediff.db.name` is the name of the database to connect to
|
||||||
`--statediff.dbclientname` is the client name to use in the Postgres database
|
`--statediff.db.user` is the user to connect to the database as
|
||||||
|
`--statediff.db.password` is the password to use to connect to the database
|
||||||
|
`--statediff.db.conntimeout` is the connection timeout (in seconds)
|
||||||
|
`--statediff.db.maxconns` is the maximum number of database connections
|
||||||
|
`--statediff.db.minconns` is the minimum number of database connections
|
||||||
|
`--statediff.db.maxidleconns` is the maximum number of idle connections
|
||||||
|
`--statediff.db.maxconnidletime` is the maximum lifetime for an idle connection (in seconds)
|
||||||
|
`--statediff.db.maxconnlifetime` is the maximum lifetime for a connection (in seconds)
|
||||||
|
`--statediff.db.nodeid` is the node id to use in the Postgres database
|
||||||
|
`--statediff.db.clientname` is the client name to use in the Postgres database
|
||||||
|
|
||||||
The service can only operate in full sync mode (`--syncmode=full`), but only the historical RPC endpoints require an archive node (`--gcmode=archive`)
|
The service can only operate in full sync mode (`--syncmode=full`), but only the historical RPC endpoints require an archive node (`--gcmode=archive`)
|
||||||
|
|
||||||
e.g.
|
e.g.
|
||||||
`
|
`
|
||||||
./build/bin/geth --syncmode=full --gcmode=archive --statediff --statediff.writing --statediff.db=postgres://localhost:5432/vulcanize_testing?sslmode=disable --statediff.db.init=true --statediff.dbnodeid={nodeId} --statediff.dbclientname={dbClientName}
|
./build/bin/geth --syncmode=full --gcmode=archive --statediff --statediff.writing --statediff.db=postgres://localhost:5432/vulcanize_testing?sslmode=disable --statediff.dbnodeid={nodeId} --statediff.dbclientname={dbClientName}
|
||||||
`
|
`
|
||||||
|
|
||||||
### RPC endpoints
|
### RPC endpoints
|
||||||
|
@ -19,11 +19,11 @@ package statediff
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
. "github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// APIName is the namespace used for the state diffing service API
|
// APIName is the namespace used for the state diffing service API
|
||||||
@ -117,7 +117,7 @@ func (api *PublicStateDiffAPI) StreamCodeAndCodeHash(ctx context.Context, blockN
|
|||||||
|
|
||||||
// create subscription and start waiting for events
|
// create subscription and start waiting for events
|
||||||
rpcSub := notifier.CreateSubscription()
|
rpcSub := notifier.CreateSubscription()
|
||||||
payloadChan := make(chan CodeAndCodeHash, chainEventChanSize)
|
payloadChan := make(chan types.CodeAndCodeHash, chainEventChanSize)
|
||||||
quitChan := make(chan bool)
|
quitChan := make(chan bool)
|
||||||
api.sds.StreamCodeAndCodeHash(blockNumber, payloadChan, quitChan)
|
api.sds.StreamCodeAndCodeHash(blockNumber, payloadChan, quitChan)
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -23,14 +23,16 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
||||||
|
|
||||||
|
types2 "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
sdtrie "github.com/ethereum/go-ethereum/statediff/trie"
|
|
||||||
. "github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -43,9 +45,9 @@ var (
|
|||||||
|
|
||||||
// Builder interface exposes the method for building a state diff between two blocks
|
// Builder interface exposes the method for building a state diff between two blocks
|
||||||
type Builder interface {
|
type Builder interface {
|
||||||
BuildStateDiffObject(args Args, params Params) (StateObject, error)
|
BuildStateDiffObject(args Args, params Params) (types2.StateObject, error)
|
||||||
BuildStateTrieObject(current *types.Block) (StateObject, error)
|
BuildStateTrieObject(current *types.Block) (types2.StateObject, error)
|
||||||
WriteStateDiffObject(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error
|
WriteStateDiffObject(args types2.StateRoots, params Params, output types2.StateNodeSink, codeOutput types2.CodeSink) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type builder struct {
|
type builder struct {
|
||||||
@ -53,20 +55,20 @@ type builder struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// convenience
|
// convenience
|
||||||
func stateNodeAppender(nodes *[]StateNode) StateNodeSink {
|
func stateNodeAppender(nodes *[]types2.StateNode) types2.StateNodeSink {
|
||||||
return func(node StateNode) error {
|
return func(node types2.StateNode) error {
|
||||||
*nodes = append(*nodes, node)
|
*nodes = append(*nodes, node)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func storageNodeAppender(nodes *[]StorageNode) StorageNodeSink {
|
func storageNodeAppender(nodes *[]types2.StorageNode) types2.StorageNodeSink {
|
||||||
return func(node StorageNode) error {
|
return func(node types2.StorageNode) error {
|
||||||
*nodes = append(*nodes, node)
|
*nodes = append(*nodes, node)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func codeMappingAppender(codeAndCodeHashes *[]CodeAndCodeHash) CodeSink {
|
func codeMappingAppender(codeAndCodeHashes *[]types2.CodeAndCodeHash) types2.CodeSink {
|
||||||
return func(c CodeAndCodeHash) error {
|
return func(c types2.CodeAndCodeHash) error {
|
||||||
*codeAndCodeHashes = append(*codeAndCodeHashes, c)
|
*codeAndCodeHashes = append(*codeAndCodeHashes, c)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -80,17 +82,17 @@ func NewBuilder(stateCache state.Database) Builder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildStateTrieObject builds a state trie object from the provided block
|
// BuildStateTrieObject builds a state trie object from the provided block
|
||||||
func (sdb *builder) BuildStateTrieObject(current *types.Block) (StateObject, error) {
|
func (sdb *builder) BuildStateTrieObject(current *types.Block) (types2.StateObject, error) {
|
||||||
currentTrie, err := sdb.stateCache.OpenTrie(current.Root())
|
currentTrie, err := sdb.stateCache.OpenTrie(current.Root())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return StateObject{}, fmt.Errorf("error creating trie for block %d: %v", current.Number(), err)
|
return types2.StateObject{}, fmt.Errorf("error creating trie for block %d: %v", current.Number(), err)
|
||||||
}
|
}
|
||||||
it := currentTrie.NodeIterator([]byte{})
|
it := currentTrie.NodeIterator([]byte{})
|
||||||
stateNodes, codeAndCodeHashes, err := sdb.buildStateTrie(it)
|
stateNodes, codeAndCodeHashes, err := sdb.buildStateTrie(it)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return StateObject{}, fmt.Errorf("error collecting state nodes for block %d: %v", current.Number(), err)
|
return types2.StateObject{}, fmt.Errorf("error collecting state nodes for block %d: %v", current.Number(), err)
|
||||||
}
|
}
|
||||||
return StateObject{
|
return types2.StateObject{
|
||||||
BlockNumber: current.Number(),
|
BlockNumber: current.Number(),
|
||||||
BlockHash: current.Hash(),
|
BlockHash: current.Hash(),
|
||||||
Nodes: stateNodes,
|
Nodes: stateNodes,
|
||||||
@ -98,20 +100,20 @@ func (sdb *builder) BuildStateTrieObject(current *types.Block) (StateObject, err
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAndCodeHash, error) {
|
func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]types2.StateNode, []types2.CodeAndCodeHash, error) {
|
||||||
stateNodes := make([]StateNode, 0)
|
stateNodes := make([]types2.StateNode, 0)
|
||||||
codeAndCodeHashes := make([]CodeAndCodeHash, 0)
|
codeAndCodeHashes := make([]types2.CodeAndCodeHash, 0)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
// skip value nodes
|
// skip value nodes
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
|
node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
switch node.NodeType {
|
switch node.NodeType {
|
||||||
case Leaf:
|
case types2.Leaf:
|
||||||
var account types.StateAccount
|
var account types.StateAccount
|
||||||
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
||||||
return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
|
return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
|
||||||
@ -122,7 +124,7 @@ func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAnd
|
|||||||
leafKey := encodedPath[1:]
|
leafKey := encodedPath[1:]
|
||||||
node.LeafKey = leafKey
|
node.LeafKey = leafKey
|
||||||
if !bytes.Equal(account.CodeHash, nullCodeHash) {
|
if !bytes.Equal(account.CodeHash, nullCodeHash) {
|
||||||
var storageNodes []StorageNode
|
var storageNodes []types2.StorageNode
|
||||||
err := sdb.buildStorageNodesEventual(account.Root, nil, true, storageNodeAppender(&storageNodes))
|
err := sdb.buildStorageNodesEventual(account.Root, nil, true, storageNodeAppender(&storageNodes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed building eventual storage diffs for account %+v\r\nerror: %v", account, err)
|
return nil, nil, fmt.Errorf("failed building eventual storage diffs for account %+v\r\nerror: %v", account, err)
|
||||||
@ -134,13 +136,13 @@ func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAnd
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err)
|
return nil, nil, fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err)
|
||||||
}
|
}
|
||||||
codeAndCodeHashes = append(codeAndCodeHashes, CodeAndCodeHash{
|
codeAndCodeHashes = append(codeAndCodeHashes, types2.CodeAndCodeHash{
|
||||||
Hash: codeHash,
|
Hash: codeHash,
|
||||||
Code: code,
|
Code: code,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
stateNodes = append(stateNodes, node)
|
stateNodes = append(stateNodes, node)
|
||||||
case Extension, Branch:
|
case types2.Extension, types2.Branch:
|
||||||
stateNodes = append(stateNodes, node)
|
stateNodes = append(stateNodes, node)
|
||||||
default:
|
default:
|
||||||
return nil, nil, fmt.Errorf("unexpected node type %s", node.NodeType)
|
return nil, nil, fmt.Errorf("unexpected node type %s", node.NodeType)
|
||||||
@ -150,16 +152,16 @@ func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]StateNode, []CodeAnd
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildStateDiffObject builds a statediff object from two blocks and the provided parameters
|
// BuildStateDiffObject builds a statediff object from two blocks and the provided parameters
|
||||||
func (sdb *builder) BuildStateDiffObject(args Args, params Params) (StateObject, error) {
|
func (sdb *builder) BuildStateDiffObject(args Args, params Params) (types2.StateObject, error) {
|
||||||
var stateNodes []StateNode
|
var stateNodes []types2.StateNode
|
||||||
var codeAndCodeHashes []CodeAndCodeHash
|
var codeAndCodeHashes []types2.CodeAndCodeHash
|
||||||
err := sdb.WriteStateDiffObject(
|
err := sdb.WriteStateDiffObject(
|
||||||
StateRoots{OldStateRoot: args.OldStateRoot, NewStateRoot: args.NewStateRoot},
|
types2.StateRoots{OldStateRoot: args.OldStateRoot, NewStateRoot: args.NewStateRoot},
|
||||||
params, stateNodeAppender(&stateNodes), codeMappingAppender(&codeAndCodeHashes))
|
params, stateNodeAppender(&stateNodes), codeMappingAppender(&codeAndCodeHashes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return StateObject{}, err
|
return types2.StateObject{}, err
|
||||||
}
|
}
|
||||||
return StateObject{
|
return types2.StateObject{
|
||||||
BlockHash: args.BlockHash,
|
BlockHash: args.BlockHash,
|
||||||
BlockNumber: args.BlockNumber,
|
BlockNumber: args.BlockNumber,
|
||||||
Nodes: stateNodes,
|
Nodes: stateNodes,
|
||||||
@ -168,7 +170,7 @@ func (sdb *builder) BuildStateDiffObject(args Args, params Params) (StateObject,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Writes a statediff object to output callback
|
// Writes a statediff object to output callback
|
||||||
func (sdb *builder) WriteStateDiffObject(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error {
|
func (sdb *builder) WriteStateDiffObject(args types2.StateRoots, params Params, output types2.StateNodeSink, codeOutput types2.CodeSink) error {
|
||||||
if !params.IntermediateStateNodes || len(params.WatchedAddresses) > 0 {
|
if !params.IntermediateStateNodes || len(params.WatchedAddresses) > 0 {
|
||||||
// if we are watching only specific accounts then we are only diffing leaf nodes
|
// if we are watching only specific accounts then we are only diffing leaf nodes
|
||||||
return sdb.buildStateDiffWithoutIntermediateStateNodes(args, params, output, codeOutput)
|
return sdb.buildStateDiffWithoutIntermediateStateNodes(args, params, output, codeOutput)
|
||||||
@ -177,7 +179,7 @@ func (sdb *builder) WriteStateDiffObject(args StateRoots, params Params, output
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error {
|
func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args types2.StateRoots, params Params, output types2.StateNodeSink, codeOutput types2.CodeSink) error {
|
||||||
// Load tries for old and new states
|
// Load tries for old and new states
|
||||||
oldTrie, err := sdb.stateCache.OpenTrie(args.OldStateRoot)
|
oldTrie, err := sdb.stateCache.OpenTrie(args.OldStateRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -208,14 +210,14 @@ func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args StateRoots, pa
|
|||||||
}
|
}
|
||||||
|
|
||||||
// collect and sort the leafkey keys for both account mappings into a slice
|
// collect and sort the leafkey keys for both account mappings into a slice
|
||||||
createKeys := sortKeys(diffAccountsAtB)
|
createKeys := trie_helpers.SortKeys(diffAccountsAtB)
|
||||||
deleteKeys := sortKeys(diffAccountsAtA)
|
deleteKeys := trie_helpers.SortKeys(diffAccountsAtA)
|
||||||
|
|
||||||
// and then find the intersection of these keys
|
// and then find the intersection of these keys
|
||||||
// these are the leafkeys for the accounts which exist at both A and B but are different
|
// these are the leafkeys for the accounts which exist at both A and B but are different
|
||||||
// this also mutates the passed in createKeys and deleteKeys, removing the intersection keys
|
// this also mutates the passed in createKeys and deleteKeys, removing the intersection keys
|
||||||
// and leaving the truly created or deleted keys in place
|
// and leaving the truly created or deleted keys in place
|
||||||
updatedKeys := findIntersection(createKeys, deleteKeys)
|
updatedKeys := trie_helpers.FindIntersection(createKeys, deleteKeys)
|
||||||
|
|
||||||
// build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two
|
// build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two
|
||||||
err = sdb.buildAccountUpdates(
|
err = sdb.buildAccountUpdates(
|
||||||
@ -232,7 +234,7 @@ func (sdb *builder) buildStateDiffWithIntermediateStateNodes(args StateRoots, pa
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args StateRoots, params Params, output StateNodeSink, codeOutput CodeSink) error {
|
func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args types2.StateRoots, params Params, output types2.StateNodeSink, codeOutput types2.CodeSink) error {
|
||||||
// Load tries for old (A) and new (B) states
|
// Load tries for old (A) and new (B) states
|
||||||
oldTrie, err := sdb.stateCache.OpenTrie(args.OldStateRoot)
|
oldTrie, err := sdb.stateCache.OpenTrie(args.OldStateRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -262,14 +264,14 @@ func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args StateRoots,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// collect and sort the leafkeys for both account mappings into a slice
|
// collect and sort the leafkeys for both account mappings into a slice
|
||||||
createKeys := sortKeys(diffAccountsAtB)
|
createKeys := trie_helpers.SortKeys(diffAccountsAtB)
|
||||||
deleteKeys := sortKeys(diffAccountsAtA)
|
deleteKeys := trie_helpers.SortKeys(diffAccountsAtA)
|
||||||
|
|
||||||
// and then find the intersection of these keys
|
// and then find the intersection of these keys
|
||||||
// these are the leafkeys for the accounts which exist at both A and B but are different
|
// these are the leafkeys for the accounts which exist at both A and B but are different
|
||||||
// this also mutates the passed in createKeys and deleteKeys, removing in intersection keys
|
// this also mutates the passed in createKeys and deleteKeys, removing in intersection keys
|
||||||
// and leaving the truly created or deleted keys in place
|
// and leaving the truly created or deleted keys in place
|
||||||
updatedKeys := findIntersection(createKeys, deleteKeys)
|
updatedKeys := trie_helpers.FindIntersection(createKeys, deleteKeys)
|
||||||
|
|
||||||
// build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two
|
// build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two
|
||||||
err = sdb.buildAccountUpdates(
|
err = sdb.buildAccountUpdates(
|
||||||
@ -289,20 +291,20 @@ func (sdb *builder) buildStateDiffWithoutIntermediateStateNodes(args StateRoots,
|
|||||||
// createdAndUpdatedState returns
|
// createdAndUpdatedState returns
|
||||||
// a mapping of their leafkeys to all the accounts that exist in a different state at B than A
|
// a mapping of their leafkeys to all the accounts that exist in a different state at B than A
|
||||||
// and a slice of the paths for all of the nodes included in both
|
// and a slice of the paths for all of the nodes included in both
|
||||||
func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddresses []common.Address) (AccountMap, map[string]bool, error) {
|
func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddresses []common.Address) (types2.AccountMap, map[string]bool, error) {
|
||||||
diffPathsAtB := make(map[string]bool)
|
diffPathsAtB := make(map[string]bool)
|
||||||
diffAcountsAtB := make(AccountMap)
|
diffAcountsAtB := make(types2.AccountMap)
|
||||||
it, _ := trie.NewDifferenceIterator(a, b)
|
it, _ := trie.NewDifferenceIterator(a, b)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
// skip value nodes
|
// skip value nodes
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
|
node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if node.NodeType == Leaf {
|
if node.NodeType == types2.Leaf {
|
||||||
// created vs updated is important for leaf nodes since we need to diff their storage
|
// created vs updated is important for leaf nodes since we need to diff their storage
|
||||||
// so we need to map all changed accounts at B to their leafkey, since account can change pathes but not leafkey
|
// so we need to map all changed accounts at B to their leafkey, since account can change pathes but not leafkey
|
||||||
var account types.StateAccount
|
var account types.StateAccount
|
||||||
@ -314,7 +316,7 @@ func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddres
|
|||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
encodedPath := trie.HexToCompact(valueNodePath)
|
||||||
leafKey := encodedPath[1:]
|
leafKey := encodedPath[1:]
|
||||||
if isWatchedAddress(watchedAddresses, leafKey) {
|
if isWatchedAddress(watchedAddresses, leafKey) {
|
||||||
diffAcountsAtB[common.Bytes2Hex(leafKey)] = accountWrapper{
|
diffAcountsAtB[common.Bytes2Hex(leafKey)] = types2.AccountWrapper{
|
||||||
NodeType: node.NodeType,
|
NodeType: node.NodeType,
|
||||||
Path: node.Path,
|
Path: node.Path,
|
||||||
NodeValue: node.NodeValue,
|
NodeValue: node.NodeValue,
|
||||||
@ -333,21 +335,21 @@ func (sdb *builder) createdAndUpdatedState(a, b trie.NodeIterator, watchedAddres
|
|||||||
// a slice of all the intermediate nodes that exist in a different state at B than A
|
// a slice of all the intermediate nodes that exist in a different state at B than A
|
||||||
// a mapping of their leafkeys to all the accounts that exist in a different state at B than A
|
// a mapping of their leafkeys to all the accounts that exist in a different state at B than A
|
||||||
// and a slice of the paths for all of the nodes included in both
|
// and a slice of the paths for all of the nodes included in both
|
||||||
func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIterator, output StateNodeSink) (AccountMap, map[string]bool, error) {
|
func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIterator, output types2.StateNodeSink) (types2.AccountMap, map[string]bool, error) {
|
||||||
diffPathsAtB := make(map[string]bool)
|
diffPathsAtB := make(map[string]bool)
|
||||||
diffAcountsAtB := make(AccountMap)
|
diffAcountsAtB := make(types2.AccountMap)
|
||||||
it, _ := trie.NewDifferenceIterator(a, b)
|
it, _ := trie.NewDifferenceIterator(a, b)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
// skip value nodes
|
// skip value nodes
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
|
node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
switch node.NodeType {
|
switch node.NodeType {
|
||||||
case Leaf:
|
case types2.Leaf:
|
||||||
// created vs updated is important for leaf nodes since we need to diff their storage
|
// created vs updated is important for leaf nodes since we need to diff their storage
|
||||||
// so we need to map all changed accounts at B to their leafkey, since account can change paths but not leafkey
|
// so we need to map all changed accounts at B to their leafkey, since account can change paths but not leafkey
|
||||||
var account types.StateAccount
|
var account types.StateAccount
|
||||||
@ -358,17 +360,17 @@ func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIt
|
|||||||
valueNodePath := append(node.Path, partialPath...)
|
valueNodePath := append(node.Path, partialPath...)
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
encodedPath := trie.HexToCompact(valueNodePath)
|
||||||
leafKey := encodedPath[1:]
|
leafKey := encodedPath[1:]
|
||||||
diffAcountsAtB[common.Bytes2Hex(leafKey)] = accountWrapper{
|
diffAcountsAtB[common.Bytes2Hex(leafKey)] = types2.AccountWrapper{
|
||||||
NodeType: node.NodeType,
|
NodeType: node.NodeType,
|
||||||
Path: node.Path,
|
Path: node.Path,
|
||||||
NodeValue: node.NodeValue,
|
NodeValue: node.NodeValue,
|
||||||
LeafKey: leafKey,
|
LeafKey: leafKey,
|
||||||
Account: &account,
|
Account: &account,
|
||||||
}
|
}
|
||||||
case Extension, Branch:
|
case types2.Extension, types2.Branch:
|
||||||
// create a diff for any intermediate node that has changed at b
|
// create a diff for any intermediate node that has changed at b
|
||||||
// created vs updated makes no difference for intermediate nodes since we do not need to diff storage
|
// created vs updated makes no difference for intermediate nodes since we do not need to diff storage
|
||||||
if err := output(StateNode{
|
if err := output(types2.StateNode{
|
||||||
NodeType: node.NodeType,
|
NodeType: node.NodeType,
|
||||||
Path: node.Path,
|
Path: node.Path,
|
||||||
NodeValue: node.NodeValue,
|
NodeValue: node.NodeValue,
|
||||||
@ -386,20 +388,20 @@ func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(a, b trie.NodeIt
|
|||||||
|
|
||||||
// deletedOrUpdatedState returns a slice of all the pathes that are emptied at B
|
// deletedOrUpdatedState returns a slice of all the pathes that are emptied at B
|
||||||
// and a mapping of their leafkeys to all the accounts that exist in a different state at A than B
|
// and a mapping of their leafkeys to all the accounts that exist in a different state at A than B
|
||||||
func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB map[string]bool, output StateNodeSink) (AccountMap, error) {
|
func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB map[string]bool, output types2.StateNodeSink) (types2.AccountMap, error) {
|
||||||
diffAccountAtA := make(AccountMap)
|
diffAccountAtA := make(types2.AccountMap)
|
||||||
it, _ := trie.NewDifferenceIterator(b, a)
|
it, _ := trie.NewDifferenceIterator(b, a)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
// skip value nodes
|
// skip value nodes
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
|
node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
switch node.NodeType {
|
switch node.NodeType {
|
||||||
case Leaf:
|
case types2.Leaf:
|
||||||
// map all different accounts at A to their leafkey
|
// map all different accounts at A to their leafkey
|
||||||
var account types.StateAccount
|
var account types.StateAccount
|
||||||
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
||||||
@ -409,7 +411,7 @@ func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB m
|
|||||||
valueNodePath := append(node.Path, partialPath...)
|
valueNodePath := append(node.Path, partialPath...)
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
encodedPath := trie.HexToCompact(valueNodePath)
|
||||||
leafKey := encodedPath[1:]
|
leafKey := encodedPath[1:]
|
||||||
diffAccountAtA[common.Bytes2Hex(leafKey)] = accountWrapper{
|
diffAccountAtA[common.Bytes2Hex(leafKey)] = types2.AccountWrapper{
|
||||||
NodeType: node.NodeType,
|
NodeType: node.NodeType,
|
||||||
Path: node.Path,
|
Path: node.Path,
|
||||||
NodeValue: node.NodeValue,
|
NodeValue: node.NodeValue,
|
||||||
@ -420,24 +422,24 @@ func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB m
|
|||||||
// that means the node at this path was deleted (or moved) in B
|
// that means the node at this path was deleted (or moved) in B
|
||||||
// emit an empty "removed" diff to signify as such
|
// emit an empty "removed" diff to signify as such
|
||||||
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
||||||
if err := output(StateNode{
|
if err := output(types2.StateNode{
|
||||||
Path: node.Path,
|
Path: node.Path,
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
NodeType: Removed,
|
NodeType: types2.Removed,
|
||||||
LeafKey: leafKey,
|
LeafKey: leafKey,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case Extension, Branch:
|
case types2.Extension, types2.Branch:
|
||||||
// if this node's path did not show up in diffPathsAtB
|
// if this node's path did not show up in diffPathsAtB
|
||||||
// that means the node at this path was deleted (or moved) in B
|
// that means the node at this path was deleted (or moved) in B
|
||||||
// emit an empty "removed" diff to signify as such
|
// emit an empty "removed" diff to signify as such
|
||||||
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
||||||
if err := output(StateNode{
|
if err := output(types2.StateNode{
|
||||||
Path: node.Path,
|
Path: node.Path,
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
NodeType: Removed,
|
NodeType: types2.Removed,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -454,13 +456,13 @@ func (sdb *builder) deletedOrUpdatedState(a, b trie.NodeIterator, diffPathsAtB m
|
|||||||
// to generate the statediff node objects for all of the accounts that existed at both A and B but in different states
|
// to generate the statediff node objects for all of the accounts that existed at both A and B but in different states
|
||||||
// needs to be called before building account creations and deletions as this mutates
|
// needs to be called before building account creations and deletions as this mutates
|
||||||
// those account maps to remove the accounts which were updated
|
// those account maps to remove the accounts which were updated
|
||||||
func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updatedKeys []string,
|
func (sdb *builder) buildAccountUpdates(creations, deletions types2.AccountMap, updatedKeys []string,
|
||||||
watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output StateNodeSink) error {
|
watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output types2.StateNodeSink) error {
|
||||||
var err error
|
var err error
|
||||||
for _, key := range updatedKeys {
|
for _, key := range updatedKeys {
|
||||||
createdAcc := creations[key]
|
createdAcc := creations[key]
|
||||||
deletedAcc := deletions[key]
|
deletedAcc := deletions[key]
|
||||||
var storageDiffs []StorageNode
|
var storageDiffs []types2.StorageNode
|
||||||
if deletedAcc.Account != nil && createdAcc.Account != nil {
|
if deletedAcc.Account != nil && createdAcc.Account != nil {
|
||||||
oldSR := deletedAcc.Account.Root
|
oldSR := deletedAcc.Account.Root
|
||||||
newSR := createdAcc.Account.Root
|
newSR := createdAcc.Account.Root
|
||||||
@ -471,7 +473,7 @@ func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updated
|
|||||||
return fmt.Errorf("failed building incremental storage diffs for account with leafkey %s\r\nerror: %v", key, err)
|
return fmt.Errorf("failed building incremental storage diffs for account with leafkey %s\r\nerror: %v", key, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err = output(StateNode{
|
if err = output(types2.StateNode{
|
||||||
NodeType: createdAcc.NodeType,
|
NodeType: createdAcc.NodeType,
|
||||||
Path: createdAcc.Path,
|
Path: createdAcc.Path,
|
||||||
NodeValue: createdAcc.NodeValue,
|
NodeValue: createdAcc.NodeValue,
|
||||||
@ -489,9 +491,9 @@ func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updated
|
|||||||
|
|
||||||
// buildAccountCreations returns the statediff node objects for all the accounts that exist at B but not at A
|
// buildAccountCreations returns the statediff node objects for all the accounts that exist at B but not at A
|
||||||
// it also returns the code and codehash for created contract accounts
|
// it also returns the code and codehash for created contract accounts
|
||||||
func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output StateNodeSink, codeOutput CodeSink) error {
|
func (sdb *builder) buildAccountCreations(accounts types2.AccountMap, watchedStorageKeys []common.Hash, intermediateStorageNodes bool, output types2.StateNodeSink, codeOutput types2.CodeSink) error {
|
||||||
for _, val := range accounts {
|
for _, val := range accounts {
|
||||||
diff := StateNode{
|
diff := types2.StateNode{
|
||||||
NodeType: val.NodeType,
|
NodeType: val.NodeType,
|
||||||
Path: val.Path,
|
Path: val.Path,
|
||||||
LeafKey: val.LeafKey,
|
LeafKey: val.LeafKey,
|
||||||
@ -499,7 +501,7 @@ func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKey
|
|||||||
}
|
}
|
||||||
if !bytes.Equal(val.Account.CodeHash, nullCodeHash) {
|
if !bytes.Equal(val.Account.CodeHash, nullCodeHash) {
|
||||||
// For contract creations, any storage node contained is a diff
|
// For contract creations, any storage node contained is a diff
|
||||||
var storageDiffs []StorageNode
|
var storageDiffs []types2.StorageNode
|
||||||
err := sdb.buildStorageNodesEventual(val.Account.Root, watchedStorageKeys, intermediateStorageNodes, storageNodeAppender(&storageDiffs))
|
err := sdb.buildStorageNodesEventual(val.Account.Root, watchedStorageKeys, intermediateStorageNodes, storageNodeAppender(&storageDiffs))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed building eventual storage diffs for node %x\r\nerror: %v", val.Path, err)
|
return fmt.Errorf("failed building eventual storage diffs for node %x\r\nerror: %v", val.Path, err)
|
||||||
@ -511,7 +513,7 @@ func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKey
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err)
|
return fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err)
|
||||||
}
|
}
|
||||||
if err := codeOutput(CodeAndCodeHash{
|
if err := codeOutput(types2.CodeAndCodeHash{
|
||||||
Hash: codeHash,
|
Hash: codeHash,
|
||||||
Code: code,
|
Code: code,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
@ -528,7 +530,7 @@ func (sdb *builder) buildAccountCreations(accounts AccountMap, watchedStorageKey
|
|||||||
|
|
||||||
// buildStorageNodesEventual builds the storage diff node objects for a created account
|
// buildStorageNodesEventual builds the storage diff node objects for a created account
|
||||||
// i.e. it returns all the storage nodes at this state, since there is no previous state
|
// i.e. it returns all the storage nodes at this state, since there is no previous state
|
||||||
func (sdb *builder) buildStorageNodesEventual(sr common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error {
|
func (sdb *builder) buildStorageNodesEventual(sr common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) {
|
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -549,24 +551,24 @@ func (sdb *builder) buildStorageNodesEventual(sr common.Hash, watchedStorageKeys
|
|||||||
// buildStorageNodesFromTrie returns all the storage diff node objects in the provided node interator
|
// buildStorageNodesFromTrie returns all the storage diff node objects in the provided node interator
|
||||||
// if any storage keys are provided it will only return those leaf nodes
|
// if any storage keys are provided it will only return those leaf nodes
|
||||||
// including intermediate nodes can be turned on or off
|
// including intermediate nodes can be turned on or off
|
||||||
func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStorageKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error {
|
func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStorageKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
// skip value nodes
|
// skip value nodes
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
|
node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch node.NodeType {
|
switch node.NodeType {
|
||||||
case Leaf:
|
case types2.Leaf:
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||||
valueNodePath := append(node.Path, partialPath...)
|
valueNodePath := append(node.Path, partialPath...)
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
encodedPath := trie.HexToCompact(valueNodePath)
|
||||||
leafKey := encodedPath[1:]
|
leafKey := encodedPath[1:]
|
||||||
if isWatchedStorageKey(watchedStorageKeys, leafKey) {
|
if isWatchedStorageKey(watchedStorageKeys, leafKey) {
|
||||||
if err := output(StorageNode{
|
if err := output(types2.StorageNode{
|
||||||
NodeType: node.NodeType,
|
NodeType: node.NodeType,
|
||||||
Path: node.Path,
|
Path: node.Path,
|
||||||
NodeValue: node.NodeValue,
|
NodeValue: node.NodeValue,
|
||||||
@ -575,9 +577,9 @@ func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStora
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case Extension, Branch:
|
case types2.Extension, types2.Branch:
|
||||||
if intermediateNodes {
|
if intermediateNodes {
|
||||||
if err := output(StorageNode{
|
if err := output(types2.StorageNode{
|
||||||
NodeType: node.NodeType,
|
NodeType: node.NodeType,
|
||||||
Path: node.Path,
|
Path: node.Path,
|
||||||
NodeValue: node.NodeValue,
|
NodeValue: node.NodeValue,
|
||||||
@ -593,7 +595,7 @@ func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, watchedStora
|
|||||||
}
|
}
|
||||||
|
|
||||||
// buildStorageNodesIncremental builds the storage diff node objects for all nodes that exist in a different state at B than A
|
// buildStorageNodesIncremental builds the storage diff node objects for all nodes that exist in a different state at B than A
|
||||||
func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error {
|
func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, watchedStorageKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
if bytes.Equal(newSR.Bytes(), oldSR.Bytes()) {
|
if bytes.Equal(newSR.Bytes(), oldSR.Bytes()) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -621,7 +623,7 @@ func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) (map[string]bool, error) {
|
func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) (map[string]bool, error) {
|
||||||
diffPathsAtB := make(map[string]bool)
|
diffPathsAtB := make(map[string]bool)
|
||||||
it, _ := trie.NewDifferenceIterator(a, b)
|
it, _ := trie.NewDifferenceIterator(a, b)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
@ -629,18 +631,18 @@ func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys
|
|||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
|
node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
switch node.NodeType {
|
switch node.NodeType {
|
||||||
case Leaf:
|
case types2.Leaf:
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||||
valueNodePath := append(node.Path, partialPath...)
|
valueNodePath := append(node.Path, partialPath...)
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
encodedPath := trie.HexToCompact(valueNodePath)
|
||||||
leafKey := encodedPath[1:]
|
leafKey := encodedPath[1:]
|
||||||
if isWatchedStorageKey(watchedKeys, leafKey) {
|
if isWatchedStorageKey(watchedKeys, leafKey) {
|
||||||
if err := output(StorageNode{
|
if err := output(types2.StorageNode{
|
||||||
NodeType: node.NodeType,
|
NodeType: node.NodeType,
|
||||||
Path: node.Path,
|
Path: node.Path,
|
||||||
NodeValue: node.NodeValue,
|
NodeValue: node.NodeValue,
|
||||||
@ -649,9 +651,9 @@ func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case Extension, Branch:
|
case types2.Extension, types2.Branch:
|
||||||
if intermediateNodes {
|
if intermediateNodes {
|
||||||
if err := output(StorageNode{
|
if err := output(types2.StorageNode{
|
||||||
NodeType: node.NodeType,
|
NodeType: node.NodeType,
|
||||||
Path: node.Path,
|
Path: node.Path,
|
||||||
NodeValue: node.NodeValue,
|
NodeValue: node.NodeValue,
|
||||||
@ -667,14 +669,14 @@ func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, watchedKeys
|
|||||||
return diffPathsAtB, it.Error()
|
return diffPathsAtB, it.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB map[string]bool, watchedKeys []common.Hash, intermediateNodes bool, output StorageNodeSink) error {
|
func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB map[string]bool, watchedKeys []common.Hash, intermediateNodes bool, output types2.StorageNodeSink) error {
|
||||||
it, _ := trie.NewDifferenceIterator(b, a)
|
it, _ := trie.NewDifferenceIterator(b, a)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
// skip value nodes
|
// skip value nodes
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
|
node, nodeElements, err := trie_helpers.ResolveNode(it, sdb.stateCache.TrieDB())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -685,14 +687,14 @@ func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
switch node.NodeType {
|
switch node.NodeType {
|
||||||
case Leaf:
|
case types2.Leaf:
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||||
valueNodePath := append(node.Path, partialPath...)
|
valueNodePath := append(node.Path, partialPath...)
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
encodedPath := trie.HexToCompact(valueNodePath)
|
||||||
leafKey := encodedPath[1:]
|
leafKey := encodedPath[1:]
|
||||||
if isWatchedStorageKey(watchedKeys, leafKey) {
|
if isWatchedStorageKey(watchedKeys, leafKey) {
|
||||||
if err := output(StorageNode{
|
if err := output(types2.StorageNode{
|
||||||
NodeType: Removed,
|
NodeType: types2.Removed,
|
||||||
Path: node.Path,
|
Path: node.Path,
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
LeafKey: leafKey,
|
LeafKey: leafKey,
|
||||||
@ -700,10 +702,10 @@ func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffPathsAtB
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case Extension, Branch:
|
case types2.Extension, types2.Branch:
|
||||||
if intermediateNodes {
|
if intermediateNodes {
|
||||||
if err := output(StorageNode{
|
if err := output(types2.StorageNode{
|
||||||
NodeType: Removed,
|
NodeType: types2.Removed,
|
||||||
Path: node.Path,
|
Path: node.Path,
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
|
File diff suppressed because it is too large
Load Diff
58
statediff/config.go
Normal file
58
statediff/config.go
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package statediff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config contains instantiation parameters for the state diffing service
|
||||||
|
type Config struct {
|
||||||
|
IndexerConfig interfaces.Config
|
||||||
|
// A unique ID used for this service
|
||||||
|
ID string
|
||||||
|
// Name for the client this service is running
|
||||||
|
ClientName string
|
||||||
|
// Whether to enable writing state diffs directly to track blockchain head
|
||||||
|
EnableWriteLoop bool
|
||||||
|
// Size of the worker pool
|
||||||
|
NumWorkers uint
|
||||||
|
// Context
|
||||||
|
Context context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
// Params contains config parameters for the state diff builder
|
||||||
|
type Params struct {
|
||||||
|
IntermediateStateNodes bool
|
||||||
|
IntermediateStorageNodes bool
|
||||||
|
IncludeBlock bool
|
||||||
|
IncludeReceipts bool
|
||||||
|
IncludeTD bool
|
||||||
|
IncludeCode bool
|
||||||
|
WatchedAddresses []common.Address
|
||||||
|
WatchedStorageSlots []common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// Args bundles the arguments for the state diff builder
|
||||||
|
type Args struct {
|
||||||
|
OldStateRoot, NewStateRoot, BlockHash common.Hash
|
||||||
|
BlockNumber *big.Int
|
||||||
|
}
|
66
statediff/indexer/constructor.go
Normal file
66
statediff/indexer/constructor.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package indexer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/dump"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewStateDiffIndexer creates and returns an implementation of the StateDiffIndexer interface
|
||||||
|
func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, nodeInfo node.Info, config interfaces.Config) (interfaces.StateDiffIndexer, error) {
|
||||||
|
switch config.Type() {
|
||||||
|
case shared.POSTGRES:
|
||||||
|
pgc, ok := config.(postgres.Config)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("ostgres config is not the correct type: got %T, expected %T", config, postgres.Config{})
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
var driver sql.Driver
|
||||||
|
switch pgc.Driver {
|
||||||
|
case postgres.PGX:
|
||||||
|
driver, err = postgres.NewPGXDriver(ctx, pgc, nodeInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case postgres.SQLX:
|
||||||
|
driver, err = postgres.NewSQLXDriver(ctx, pgc, nodeInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unrecongized Postgres driver type: %s", pgc.Driver)
|
||||||
|
}
|
||||||
|
return sql.NewStateDiffIndexer(ctx, chainConfig, postgres.NewPostgresDB(driver))
|
||||||
|
case shared.DUMP:
|
||||||
|
dumpc, ok := config.(dump.Config)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("dump config is not the correct type: got %T, expected %T", config, dump.Config{})
|
||||||
|
}
|
||||||
|
return dump.NewStateDiffIndexer(chainConfig, dumpc), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unrecognized database type: %s", config.Type())
|
||||||
|
}
|
||||||
|
}
|
@ -14,36 +14,37 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package indexer
|
package dump
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||||
node "github.com/ipfs/go-ipld-format"
|
node "github.com/ipfs/go-ipld-format"
|
||||||
"github.com/jmoiron/sqlx"
|
|
||||||
"github.com/lib/pq"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const ipldBatchInsertPgStr string = `INSERT INTO public.blocks (key, data) VALUES (unnest($1::TEXT[]), unnest($2::BYTEA[])) ON CONFLICT (key) DO NOTHING`
|
// BatchTx wraps a void with the state necessary for building the tx concurrently during trie difference iteration
|
||||||
|
type BatchTx struct {
|
||||||
// BlockTx wraps a Postgres tx with the state necessary for building the Postgres tx concurrently during trie difference iteration
|
dump io.Writer
|
||||||
type BlockTx struct {
|
|
||||||
dbtx *sqlx.Tx
|
|
||||||
BlockNumber uint64
|
|
||||||
headerID int64
|
|
||||||
Close func(blockTx *BlockTx, err error) error
|
|
||||||
|
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
iplds chan models.IPLDModel
|
iplds chan models.IPLDModel
|
||||||
ipldCache models.IPLDBatch
|
ipldCache models.IPLDBatch
|
||||||
|
|
||||||
|
close func(blockTx *BatchTx, err error) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *BlockTx) flush() error {
|
// Submit satisfies indexer.AtomicTx
|
||||||
_, err := tx.dbtx.Exec(ipldBatchInsertPgStr, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values))
|
func (tx *BatchTx) Submit(err error) error {
|
||||||
if err != nil {
|
return tx.close(tx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *BatchTx) flush() error {
|
||||||
|
if _, err := fmt.Fprintf(tx.dump, "%+v", tx.ipldCache); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tx.ipldCache = models.IPLDBatch{}
|
tx.ipldCache = models.IPLDBatch{}
|
||||||
@ -51,33 +52,34 @@ func (tx *BlockTx) flush() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// run in background goroutine to synchronize concurrent appends to the ipldCache
|
// run in background goroutine to synchronize concurrent appends to the ipldCache
|
||||||
func (tx *BlockTx) cache() {
|
func (tx *BatchTx) cache() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case i := <-tx.iplds:
|
case i := <-tx.iplds:
|
||||||
tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key)
|
tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key)
|
||||||
tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data)
|
tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data)
|
||||||
case <-tx.quit:
|
case <-tx.quit:
|
||||||
|
tx.ipldCache = models.IPLDBatch{}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *BlockTx) cacheDirect(key string, value []byte) {
|
func (tx *BatchTx) cacheDirect(key string, value []byte) {
|
||||||
tx.iplds <- models.IPLDModel{
|
tx.iplds <- models.IPLDModel{
|
||||||
Key: key,
|
Key: key,
|
||||||
Data: value,
|
Data: value,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *BlockTx) cacheIPLD(i node.Node) {
|
func (tx *BatchTx) cacheIPLD(i node.Node) {
|
||||||
tx.iplds <- models.IPLDModel{
|
tx.iplds <- models.IPLDModel{
|
||||||
Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
|
Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
|
||||||
Data: i.RawData(),
|
Data: i.RawData(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *BlockTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error) {
|
func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error) {
|
||||||
c, err := ipld.RawdataToCid(codec, raw, mh)
|
c, err := ipld.RawdataToCid(codec, raw, mh)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", "", err
|
31
statediff/indexer/database/dump/config.go
Normal file
31
statediff/indexer/database/dump/config.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package dump
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Dump io.WriteCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Config) Type() shared.DBType {
|
||||||
|
return shared.DUMP
|
||||||
|
}
|
490
statediff/indexer/database/dump/indexer.go
Normal file
490
statediff/indexer/database/dump/indexer.go
Normal file
@ -0,0 +1,490 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package dump
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
"github.com/multiformats/go-multihash"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ interfaces.StateDiffIndexer = &StateDiffIndexer{}
|
||||||
|
|
||||||
|
var (
|
||||||
|
indexerMetrics = RegisterIndexerMetrics(metrics.DefaultRegistry)
|
||||||
|
)
|
||||||
|
|
||||||
|
// StateDiffIndexer satisfies the indexer.StateDiffIndexer interface for ethereum statediff objects on top of a void
|
||||||
|
type StateDiffIndexer struct {
|
||||||
|
dump io.WriteCloser
|
||||||
|
chainConfig *params.ChainConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStateDiffIndexer creates a void implementation of interfaces.StateDiffIndexer
|
||||||
|
func NewStateDiffIndexer(chainConfig *params.ChainConfig, config Config) *StateDiffIndexer {
|
||||||
|
return &StateDiffIndexer{
|
||||||
|
dump: config.Dump,
|
||||||
|
chainConfig: chainConfig,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReportDBMetrics has nothing to report for dump
|
||||||
|
func (sdi *StateDiffIndexer) ReportDBMetrics(time.Duration, <-chan bool) {}
|
||||||
|
|
||||||
|
// PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts)
|
||||||
|
// Returns an initiated DB transaction which must be Closed via defer to commit or rollback
|
||||||
|
func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) {
|
||||||
|
start, t := time.Now(), time.Now()
|
||||||
|
blockHash := block.Hash()
|
||||||
|
blockHashStr := blockHash.String()
|
||||||
|
height := block.NumberU64()
|
||||||
|
traceMsg := fmt.Sprintf("indexer stats for statediff at %d with hash %s:\r\n", height, blockHashStr)
|
||||||
|
transactions := block.Transactions()
|
||||||
|
// Derive any missing fields
|
||||||
|
if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, transactions); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate the block iplds
|
||||||
|
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) {
|
||||||
|
return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
|
||||||
|
}
|
||||||
|
if len(txTrieNodes) != len(rctTrieNodes) {
|
||||||
|
return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate reward
|
||||||
|
var reward *big.Int
|
||||||
|
// in PoA networks block reward is 0
|
||||||
|
if sdi.chainConfig.Clique != nil {
|
||||||
|
reward = big.NewInt(0)
|
||||||
|
} else {
|
||||||
|
reward = shared.CalcEthBlockReward(block.Header(), block.Uncles(), block.Transactions(), receipts)
|
||||||
|
}
|
||||||
|
t = time.Now()
|
||||||
|
|
||||||
|
blockTx := &BatchTx{
|
||||||
|
dump: sdi.dump,
|
||||||
|
iplds: make(chan models.IPLDModel),
|
||||||
|
quit: make(chan struct{}),
|
||||||
|
ipldCache: models.IPLDBatch{},
|
||||||
|
close: func(self *BatchTx, err error) error {
|
||||||
|
close(self.quit)
|
||||||
|
close(self.iplds)
|
||||||
|
tDiff := time.Since(t)
|
||||||
|
indexerMetrics.tStateStoreCodeProcessing.Update(tDiff)
|
||||||
|
traceMsg += fmt.Sprintf("state, storage, and code storage processing time: %s\r\n", tDiff.String())
|
||||||
|
t = time.Now()
|
||||||
|
if err := self.flush(); err != nil {
|
||||||
|
traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String())
|
||||||
|
log.Debug(traceMsg)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tDiff = time.Since(t)
|
||||||
|
indexerMetrics.tPostgresCommit.Update(tDiff)
|
||||||
|
traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String())
|
||||||
|
traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String())
|
||||||
|
log.Debug(traceMsg)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
go blockTx.cache()
|
||||||
|
|
||||||
|
tDiff := time.Since(t)
|
||||||
|
indexerMetrics.tFreePostgres.Update(tDiff)
|
||||||
|
|
||||||
|
traceMsg += fmt.Sprintf("time spent waiting for free postgres tx: %s:\r\n", tDiff.String())
|
||||||
|
t = time.Now()
|
||||||
|
|
||||||
|
// Publish and index header, collect headerID
|
||||||
|
var headerID int64
|
||||||
|
headerID, err = sdi.processHeader(blockTx, block.Header(), headerNode, reward, totalDifficulty)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tDiff = time.Since(t)
|
||||||
|
indexerMetrics.tHeaderProcessing.Update(tDiff)
|
||||||
|
traceMsg += fmt.Sprintf("header processing time: %s\r\n", tDiff.String())
|
||||||
|
t = time.Now()
|
||||||
|
// Publish and index uncles
|
||||||
|
err = sdi.processUncles(blockTx, headerID, height, uncleNodes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tDiff = time.Since(t)
|
||||||
|
indexerMetrics.tUncleProcessing.Update(tDiff)
|
||||||
|
traceMsg += fmt.Sprintf("uncle processing time: %s\r\n", tDiff.String())
|
||||||
|
t = time.Now()
|
||||||
|
// Publish and index receipts and txs
|
||||||
|
err = sdi.processReceiptsAndTxs(blockTx, processArgs{
|
||||||
|
headerID: headerID,
|
||||||
|
blockNumber: block.Number(),
|
||||||
|
receipts: receipts,
|
||||||
|
txs: transactions,
|
||||||
|
rctNodes: rctNodes,
|
||||||
|
rctTrieNodes: rctTrieNodes,
|
||||||
|
txNodes: txNodes,
|
||||||
|
txTrieNodes: txTrieNodes,
|
||||||
|
logTrieNodes: logTrieNodes,
|
||||||
|
logLeafNodeCIDs: logLeafNodeCIDs,
|
||||||
|
rctLeafNodeCIDs: rctLeafNodeCIDs,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tDiff = time.Since(t)
|
||||||
|
indexerMetrics.tTxAndRecProcessing.Update(tDiff)
|
||||||
|
traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String())
|
||||||
|
t = time.Now()
|
||||||
|
|
||||||
|
return blockTx, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// processHeader publishes and indexes a header IPLD in Postgres
|
||||||
|
// it returns the headerID
|
||||||
|
func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (int64, error) {
|
||||||
|
tx.cacheIPLD(headerNode)
|
||||||
|
|
||||||
|
var baseFee *int64
|
||||||
|
if header.BaseFee != nil {
|
||||||
|
baseFee = new(int64)
|
||||||
|
*baseFee = header.BaseFee.Int64()
|
||||||
|
}
|
||||||
|
|
||||||
|
mod := models.HeaderModel{
|
||||||
|
CID: headerNode.Cid().String(),
|
||||||
|
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
|
||||||
|
ParentHash: header.ParentHash.String(),
|
||||||
|
BlockNumber: header.Number.String(),
|
||||||
|
BlockHash: header.Hash().String(),
|
||||||
|
TotalDifficulty: td.String(),
|
||||||
|
Reward: reward.String(),
|
||||||
|
Bloom: header.Bloom.Bytes(),
|
||||||
|
StateRoot: header.Root.String(),
|
||||||
|
RctRoot: header.ReceiptHash.String(),
|
||||||
|
TxRoot: header.TxHash.String(),
|
||||||
|
UncleRoot: header.UncleHash.String(),
|
||||||
|
Timestamp: header.Time,
|
||||||
|
BaseFee: baseFee,
|
||||||
|
}
|
||||||
|
_, err := fmt.Fprintf(sdi.dump, "%+v", mod)
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// processUncles publishes and indexes uncle IPLDs in Postgres
|
||||||
|
func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID int64, blockNumber uint64, uncleNodes []*ipld2.EthHeader) error {
|
||||||
|
// publish and index uncles
|
||||||
|
for _, uncleNode := range uncleNodes {
|
||||||
|
tx.cacheIPLD(uncleNode)
|
||||||
|
var uncleReward *big.Int
|
||||||
|
// in PoA networks uncle reward is 0
|
||||||
|
if sdi.chainConfig.Clique != nil {
|
||||||
|
uncleReward = big.NewInt(0)
|
||||||
|
} else {
|
||||||
|
uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64())
|
||||||
|
}
|
||||||
|
uncle := models.UncleModel{
|
||||||
|
CID: uncleNode.Cid().String(),
|
||||||
|
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
|
||||||
|
ParentHash: uncleNode.ParentHash.String(),
|
||||||
|
BlockHash: uncleNode.Hash().String(),
|
||||||
|
Reward: uncleReward.String(),
|
||||||
|
}
|
||||||
|
if _, err := fmt.Fprintf(sdi.dump, "%+v", uncle); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processArgs bundles arguments to processReceiptsAndTxs
|
||||||
|
type processArgs struct {
|
||||||
|
headerID int64
|
||||||
|
blockNumber *big.Int
|
||||||
|
receipts types.Receipts
|
||||||
|
txs types.Transactions
|
||||||
|
rctNodes []*ipld2.EthReceipt
|
||||||
|
rctTrieNodes []*ipld2.EthRctTrie
|
||||||
|
txNodes []*ipld2.EthTx
|
||||||
|
txTrieNodes []*ipld2.EthTxTrie
|
||||||
|
logTrieNodes [][]*ipld2.EthLogTrie
|
||||||
|
logLeafNodeCIDs [][]cid.Cid
|
||||||
|
rctLeafNodeCIDs []cid.Cid
|
||||||
|
}
|
||||||
|
|
||||||
|
// processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres
|
||||||
|
func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs) error {
|
||||||
|
// Process receipts and txs
|
||||||
|
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
|
||||||
|
for i, receipt := range args.receipts {
|
||||||
|
for _, logTrieNode := range args.logTrieNodes[i] {
|
||||||
|
tx.cacheIPLD(logTrieNode)
|
||||||
|
}
|
||||||
|
txNode := args.txNodes[i]
|
||||||
|
tx.cacheIPLD(txNode)
|
||||||
|
|
||||||
|
// Indexing
|
||||||
|
// extract topic and contract data from the receipt for indexing
|
||||||
|
mappedContracts := make(map[string]bool) // use map to avoid duplicate addresses
|
||||||
|
logDataSet := make([]*models.LogsModel, len(receipt.Logs))
|
||||||
|
for idx, l := range receipt.Logs {
|
||||||
|
topicSet := make([]string, 4)
|
||||||
|
for ti, topic := range l.Topics {
|
||||||
|
topicSet[ti] = topic.Hex()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !args.logLeafNodeCIDs[i][idx].Defined() {
|
||||||
|
return fmt.Errorf("invalid log cid")
|
||||||
|
}
|
||||||
|
|
||||||
|
mappedContracts[l.Address.String()] = true
|
||||||
|
logDataSet[idx] = &models.LogsModel{
|
||||||
|
Address: l.Address.String(),
|
||||||
|
Index: int64(l.Index),
|
||||||
|
Data: l.Data,
|
||||||
|
LeafCID: args.logLeafNodeCIDs[i][idx].String(),
|
||||||
|
LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]),
|
||||||
|
Topic0: topicSet[0],
|
||||||
|
Topic1: topicSet[1],
|
||||||
|
Topic2: topicSet[2],
|
||||||
|
Topic3: topicSet[3],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// these are the contracts seen in the logs
|
||||||
|
logContracts := make([]string, 0, len(mappedContracts))
|
||||||
|
for addr := range mappedContracts {
|
||||||
|
logContracts = append(logContracts, addr)
|
||||||
|
}
|
||||||
|
// this is the contract address if this receipt is for a contract creation tx
|
||||||
|
contract := shared.HandleZeroAddr(receipt.ContractAddress)
|
||||||
|
var contractHash string
|
||||||
|
if contract != "" {
|
||||||
|
contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
|
||||||
|
}
|
||||||
|
// index tx first so that the receipt can reference it by FK
|
||||||
|
trx := args.txs[i]
|
||||||
|
// derive sender for the tx that corresponds with this receipt
|
||||||
|
from, err := types.Sender(signer, trx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error deriving tx sender: %v", err)
|
||||||
|
}
|
||||||
|
txModel := models.TxModel{
|
||||||
|
Dst: shared.HandleZeroAddrPointer(trx.To()),
|
||||||
|
Src: shared.HandleZeroAddr(from),
|
||||||
|
TxHash: trx.Hash().String(),
|
||||||
|
Index: int64(i),
|
||||||
|
Data: trx.Data(),
|
||||||
|
CID: txNode.Cid().String(),
|
||||||
|
MhKey: shared.MultihashKeyFromCID(txNode.Cid()),
|
||||||
|
}
|
||||||
|
txType := trx.Type()
|
||||||
|
if txType != types.LegacyTxType {
|
||||||
|
txModel.Type = &txType
|
||||||
|
}
|
||||||
|
if _, err := fmt.Fprintf(sdi.dump, "%+v", txModel); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// index access list if this is one
|
||||||
|
for j, accessListElement := range trx.AccessList() {
|
||||||
|
storageKeys := make([]string, len(accessListElement.StorageKeys))
|
||||||
|
for k, storageKey := range accessListElement.StorageKeys {
|
||||||
|
storageKeys[k] = storageKey.Hex()
|
||||||
|
}
|
||||||
|
accessListElementModel := models.AccessListElementModel{
|
||||||
|
Index: int64(j),
|
||||||
|
Address: accessListElement.Address.Hex(),
|
||||||
|
StorageKeys: storageKeys,
|
||||||
|
}
|
||||||
|
if _, err := fmt.Fprintf(sdi.dump, "%+v", accessListElementModel); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// index the receipt
|
||||||
|
if !args.rctLeafNodeCIDs[i].Defined() {
|
||||||
|
return fmt.Errorf("invalid receipt leaf node cid")
|
||||||
|
}
|
||||||
|
|
||||||
|
rctModel := &models.ReceiptModel{
|
||||||
|
Contract: contract,
|
||||||
|
ContractHash: contractHash,
|
||||||
|
LeafCID: args.rctLeafNodeCIDs[i].String(),
|
||||||
|
LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]),
|
||||||
|
LogRoot: args.rctNodes[i].LogRoot.String(),
|
||||||
|
}
|
||||||
|
if len(receipt.PostState) == 0 {
|
||||||
|
rctModel.PostStatus = receipt.Status
|
||||||
|
} else {
|
||||||
|
rctModel.PostState = common.Bytes2Hex(receipt.PostState)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := fmt.Fprintf(sdi.dump, "%+v", rctModel); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := fmt.Fprintf(sdi.dump, "%+v", logDataSet); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// publish trie nodes, these aren't indexed directly
|
||||||
|
for i, n := range args.txTrieNodes {
|
||||||
|
tx.cacheIPLD(n)
|
||||||
|
tx.cacheIPLD(args.rctTrieNodes[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD sql
|
||||||
|
func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode) error {
|
||||||
|
tx, ok := batch.(*BatchTx)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
||||||
|
}
|
||||||
|
// publish the state node
|
||||||
|
if stateNode.NodeType == sdtypes.Removed {
|
||||||
|
// short circuit if it is a Removed node
|
||||||
|
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
||||||
|
stateModel := models.StateNodeModel{
|
||||||
|
Path: stateNode.Path,
|
||||||
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
|
CID: shared.RemovedNodeStateCID,
|
||||||
|
MhKey: shared.RemovedNodeMhKey,
|
||||||
|
NodeType: stateNode.NodeType.Int(),
|
||||||
|
}
|
||||||
|
_, err := fmt.Fprintf(sdi.dump, "%+v", stateModel)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
||||||
|
}
|
||||||
|
stateModel := models.StateNodeModel{
|
||||||
|
Path: stateNode.Path,
|
||||||
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
|
CID: stateCIDStr,
|
||||||
|
MhKey: stateMhKey,
|
||||||
|
NodeType: stateNode.NodeType.Int(),
|
||||||
|
}
|
||||||
|
// index the state node, collect the stateID to reference by FK
|
||||||
|
if _, err := fmt.Fprintf(sdi.dump, "%+v", stateModel); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// if we have a leaf, decode and index the account data
|
||||||
|
if stateNode.NodeType == sdtypes.Leaf {
|
||||||
|
var i []interface{}
|
||||||
|
if err := rlp.DecodeBytes(stateNode.NodeValue, &i); err != nil {
|
||||||
|
return fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
||||||
|
}
|
||||||
|
if len(i) != 2 {
|
||||||
|
return fmt.Errorf("eth IPLDPublisher expected state leaf node rlp to decode into two elements")
|
||||||
|
}
|
||||||
|
var account types.StateAccount
|
||||||
|
if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
|
||||||
|
return fmt.Errorf("error decoding state account rlp: %s", err.Error())
|
||||||
|
}
|
||||||
|
accountModel := models.StateAccountModel{
|
||||||
|
Balance: account.Balance.String(),
|
||||||
|
Nonce: account.Nonce,
|
||||||
|
CodeHash: account.CodeHash,
|
||||||
|
StorageRoot: account.Root.String(),
|
||||||
|
}
|
||||||
|
if _, err := fmt.Fprintf(sdi.dump, "%+v", accountModel); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if there are any storage nodes associated with this node, publish and index them
|
||||||
|
for _, storageNode := range stateNode.StorageNodes {
|
||||||
|
if storageNode.NodeType == sdtypes.Removed {
|
||||||
|
// short circuit if it is a Removed node
|
||||||
|
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
|
||||||
|
storageModel := models.StorageNodeModel{
|
||||||
|
Path: storageNode.Path,
|
||||||
|
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
|
||||||
|
CID: shared.RemovedNodeStorageCID,
|
||||||
|
MhKey: shared.RemovedNodeMhKey,
|
||||||
|
NodeType: storageNode.NodeType.Int(),
|
||||||
|
}
|
||||||
|
if _, err := fmt.Fprintf(sdi.dump, "%+v", storageModel); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld2.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err)
|
||||||
|
}
|
||||||
|
storageModel := models.StorageNodeModel{
|
||||||
|
Path: storageNode.Path,
|
||||||
|
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
|
||||||
|
CID: storageCIDStr,
|
||||||
|
MhKey: storageMhKey,
|
||||||
|
NodeType: storageNode.NodeType.Int(),
|
||||||
|
}
|
||||||
|
if _, err := fmt.Fprintf(sdi.dump, "%+v", storageModel); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushCodeAndCodeHash publishes code and codehash pairs to the ipld sql
|
||||||
|
func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error {
|
||||||
|
tx, ok := batch.(*BatchTx)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
||||||
|
}
|
||||||
|
// codec doesn't matter since db key is multihash-based
|
||||||
|
mhKey, err := shared.MultihashKeyFromKeccak256(codeAndCodeHash.Hash)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error deriving multihash key from codehash: %v", err)
|
||||||
|
}
|
||||||
|
tx.cacheDirect(mhKey, codeAndCodeHash.Code)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close satisfied io.Closer
|
||||||
|
func (sdi *StateDiffIndexer) Close() error {
|
||||||
|
return sdi.dump.Close()
|
||||||
|
}
|
94
statediff/indexer/database/dump/metrics.go
Normal file
94
statediff/indexer/database/dump/metrics.go
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package dump
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
namespace = "statediff"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Build a fully qualified metric name
|
||||||
|
func metricName(subsystem, name string) string {
|
||||||
|
if name == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
parts := []string{namespace, name}
|
||||||
|
if subsystem != "" {
|
||||||
|
parts = []string{namespace, subsystem, name}
|
||||||
|
}
|
||||||
|
// Prometheus uses _ but geth metrics uses / and replaces
|
||||||
|
return strings.Join(parts, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexerMetricsHandles struct {
|
||||||
|
// The total number of processed blocks
|
||||||
|
blocks metrics.Counter
|
||||||
|
// The total number of processed transactions
|
||||||
|
transactions metrics.Counter
|
||||||
|
// The total number of processed receipts
|
||||||
|
receipts metrics.Counter
|
||||||
|
// The total number of processed logs
|
||||||
|
logs metrics.Counter
|
||||||
|
// The total number of access list entries processed
|
||||||
|
accessListEntries metrics.Counter
|
||||||
|
// Time spent waiting for free postgres tx
|
||||||
|
tFreePostgres metrics.Timer
|
||||||
|
// Postgres transaction commit duration
|
||||||
|
tPostgresCommit metrics.Timer
|
||||||
|
// Header processing time
|
||||||
|
tHeaderProcessing metrics.Timer
|
||||||
|
// Uncle processing time
|
||||||
|
tUncleProcessing metrics.Timer
|
||||||
|
// Tx and receipt processing time
|
||||||
|
tTxAndRecProcessing metrics.Timer
|
||||||
|
// State, storage, and code combined processing time
|
||||||
|
tStateStoreCodeProcessing metrics.Timer
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterIndexerMetrics(reg metrics.Registry) indexerMetricsHandles {
|
||||||
|
ctx := indexerMetricsHandles{
|
||||||
|
blocks: metrics.NewCounter(),
|
||||||
|
transactions: metrics.NewCounter(),
|
||||||
|
receipts: metrics.NewCounter(),
|
||||||
|
logs: metrics.NewCounter(),
|
||||||
|
accessListEntries: metrics.NewCounter(),
|
||||||
|
tFreePostgres: metrics.NewTimer(),
|
||||||
|
tPostgresCommit: metrics.NewTimer(),
|
||||||
|
tHeaderProcessing: metrics.NewTimer(),
|
||||||
|
tUncleProcessing: metrics.NewTimer(),
|
||||||
|
tTxAndRecProcessing: metrics.NewTimer(),
|
||||||
|
tStateStoreCodeProcessing: metrics.NewTimer(),
|
||||||
|
}
|
||||||
|
subsys := "indexer"
|
||||||
|
reg.Register(metricName(subsys, "blocks"), ctx.blocks)
|
||||||
|
reg.Register(metricName(subsys, "transactions"), ctx.transactions)
|
||||||
|
reg.Register(metricName(subsys, "receipts"), ctx.receipts)
|
||||||
|
reg.Register(metricName(subsys, "logs"), ctx.logs)
|
||||||
|
reg.Register(metricName(subsys, "access_list_entries"), ctx.accessListEntries)
|
||||||
|
reg.Register(metricName(subsys, "t_free_postgres"), ctx.tFreePostgres)
|
||||||
|
reg.Register(metricName(subsys, "t_postgres_commit"), ctx.tPostgresCommit)
|
||||||
|
reg.Register(metricName(subsys, "t_header_processing"), ctx.tHeaderProcessing)
|
||||||
|
reg.Register(metricName(subsys, "t_uncle_processing"), ctx.tUncleProcessing)
|
||||||
|
reg.Register(metricName(subsys, "t_tx_receipt_processing"), ctx.tTxAndRecProcessing)
|
||||||
|
reg.Register(metricName(subsys, "t_state_store_code_processing"), ctx.tStateStoreCodeProcessing)
|
||||||
|
return ctx
|
||||||
|
}
|
106
statediff/indexer/database/sql/batch_tx.go
Normal file
106
statediff/indexer/database/sql/batch_tx.go
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package sql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
|
|
||||||
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||||
|
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||||
|
node "github.com/ipfs/go-ipld-format"
|
||||||
|
"github.com/lib/pq"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BatchTx wraps a sql tx with the state necessary for building the tx concurrently during trie difference iteration
|
||||||
|
type BatchTx struct {
|
||||||
|
ctx context.Context
|
||||||
|
dbtx Tx
|
||||||
|
headerID int64
|
||||||
|
stm string
|
||||||
|
quit chan struct{}
|
||||||
|
iplds chan models.IPLDModel
|
||||||
|
ipldCache models.IPLDBatch
|
||||||
|
|
||||||
|
close func(blockTx *BatchTx, err error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Submit satisfies indexer.AtomicTx
|
||||||
|
func (tx *BatchTx) Submit(err error) error {
|
||||||
|
return tx.close(tx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *BatchTx) flush() error {
|
||||||
|
_, err := tx.dbtx.Exec(tx.ctx, tx.stm, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tx.ipldCache = models.IPLDBatch{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// run in background goroutine to synchronize concurrent appends to the ipldCache
|
||||||
|
func (tx *BatchTx) cache() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case i := <-tx.iplds:
|
||||||
|
tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key)
|
||||||
|
tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data)
|
||||||
|
case <-tx.quit:
|
||||||
|
tx.ipldCache = models.IPLDBatch{}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *BatchTx) cacheDirect(key string, value []byte) {
|
||||||
|
tx.iplds <- models.IPLDModel{
|
||||||
|
Key: key,
|
||||||
|
Data: value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *BatchTx) cacheIPLD(i node.Node) {
|
||||||
|
tx.iplds <- models.IPLDModel{
|
||||||
|
Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
|
||||||
|
Data: i.RawData(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error) {
|
||||||
|
c, err := ipld.RawdataToCid(codec, raw, mh)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String()
|
||||||
|
tx.iplds <- models.IPLDModel{
|
||||||
|
Key: prefixedKey,
|
||||||
|
Data: raw,
|
||||||
|
}
|
||||||
|
return c.String(), prefixedKey, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// rollback sql transaction and log any error
|
||||||
|
func rollback(ctx context.Context, tx Tx) {
|
||||||
|
if err := tx.Rollback(ctx); err != nil {
|
||||||
|
log.Error(err.Error())
|
||||||
|
}
|
||||||
|
}
|
216
statediff/indexer/database/sql/batch_writer.go
Normal file
216
statediff/indexer/database/sql/batch_writer.go
Normal file
@ -0,0 +1,216 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package sql
|
||||||
|
|
||||||
|
/*
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
|
)
|
||||||
|
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
// PG_MAX_PARAMS is the max number of placeholders+args a statement can support
|
||||||
|
// above this limit we need to split into a separate batch
|
||||||
|
const PG_MAX_PARAMS int = 32767
|
||||||
|
|
||||||
|
const (
|
||||||
|
ipldInsertPgStr string = `INSERT INTO public.blocks (key, data) VALUES (unnest($1), unnest($2)) ON CONFLICT (key) DO NOTHING`
|
||||||
|
headerCIDsPgStr string = `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee)
|
||||||
|
VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6), unnest($7), unnest($8), unnest($9), unnest($10), unnest($11), unnest($12), unnest($13), unnest($14), unnest($15), unnest($16))
|
||||||
|
ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) = (excluded.parent_hash, excluded.cid, excluded.td, excluded.node_id, excluded.reward, excluded.state_root, excluded.tx_root, excluded.receipt_root, excluded.uncle_root, excluded.bloom, excluded.timestamp, excluded.mh_key, eth.header_cids.times_validated + 1, excluded.base_fee)
|
||||||
|
RETURNING id`
|
||||||
|
unclesCIDsPgStr string = `INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6))
|
||||||
|
ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward, mh_key) = (excluded.parent_hash, excluded.cid, excluded.reward, excluded.mh_key)`
|
||||||
|
txCIDsPgStr string = `INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6), unnest($7), unnest($8), unnest($9))
|
||||||
|
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data, tx_type) = (excluded.cid, excluded.dst, excluded.src, excluded.index, excluded.mh_key, excluded.tx_data, excluded.tx_type)
|
||||||
|
RETURNING id`
|
||||||
|
accessListPgStr string = `INSERT INTO eth.access_list_element (tx_id, index, address, storage_keys) VALUES (unnest($1), unnest($2), unnest($3), unnest($4))
|
||||||
|
ON CONFLICT (tx_id, index) DO UPDATE SET (address, storage_keys) = (excluded.address, excluded.storage_keys)`
|
||||||
|
rctCIDsPgStr string = `INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6), unnest($7), unnest($8))
|
||||||
|
ON CONFLICT (tx_id) DO UPDATE SET (leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) = (excluded.leaf_cid, excluded.contract, excluded.contract_hash, excluded.leaf_mh_key, excluded.post_state, excluded.post_status, excluded.log_root)
|
||||||
|
RETURNING id`
|
||||||
|
logCIDsPgStr string = `INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, receipt_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6), unnest($7), unnest($8), unnest($9), unnest($10))
|
||||||
|
ON CONFLICT (receipt_id, index) DO UPDATE SET (leaf_cid, leaf_mh_key, address, topic0, topic1, topic2, topic3, log_data) = (excluded.leaf_cid, excluded.leaf_mh_key, excluded.address, excluded.topic0, excluded.topic1, excluded.topic2, excluded.topic3, excluded.log_data)`
|
||||||
|
stateCIDsPgStr string = `INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6), unnest($7))
|
||||||
|
ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = (excluded.state_leaf_key, excluded.cid, excluded.node_type, excluded.diff, excluded.mh_key)
|
||||||
|
RETURNING id`
|
||||||
|
stateAccountsPgStr string = `INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5))
|
||||||
|
ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = (excluded.balance, excluded.nonce, excluded.code_hash, excluded.storage_root)`
|
||||||
|
storageCIDsPgStr string = `INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES (unnest($1), unnest($2), unnest($3), unnest($4), unnest($5), unnest($6), unnest($7))
|
||||||
|
ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = (excluded.storage_leaf_key, excluded.cid, excluded.node_type, excluded.diff, excluded.mh_key)`
|
||||||
|
)
|
||||||
|
|
||||||
|
// PostgresBatchWriter is used to write statediff data to Postgres using batch inserts/upserts
|
||||||
|
type PostgresBatchWriter struct {
|
||||||
|
db *postgres.DB
|
||||||
|
|
||||||
|
// prepared statements (prepared inside tx)
|
||||||
|
ipldsPreparedStm *sqlx.Stmt
|
||||||
|
unclesPrepared *sqlx.Stmt
|
||||||
|
txPreparedStm *sqlx.Stmt
|
||||||
|
accessListPreparedStm *sqlx.Stmt
|
||||||
|
rctPreparedStm *sqlx.Stmt
|
||||||
|
logPreparedStm *sqlx.Stmt
|
||||||
|
statePreparedStm *sqlx.Stmt
|
||||||
|
accountPreparedStm *sqlx.Stmt
|
||||||
|
storagePreparedStm *sqlx.Stmt
|
||||||
|
|
||||||
|
// cached arguments
|
||||||
|
queuedHeaderArgs models.HeaderModel
|
||||||
|
queuedUnclesArgs models.UncleBatch
|
||||||
|
queuedTxArgs models.TxBatch
|
||||||
|
queuedAccessListArgs models.AccessListBatch
|
||||||
|
queuedRctArgs models.ReceiptBatch
|
||||||
|
queuedLogArgs models.LogBatch
|
||||||
|
queuedStateArgs models.StateBatch
|
||||||
|
queuedAccountArgs models.AccountBatch
|
||||||
|
queuedStorageArgs models.StorageBatch
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPostgresBatchWriter creates a new pointer to a PostgresBatchWriter
|
||||||
|
func NewPostgresBatchWriter(db *postgres.DB) *PostgresBatchWriter {
|
||||||
|
return &PostgresBatchWriter{
|
||||||
|
db: db,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pbw *PostgresBatchWriter) queueHeader(header models.HeaderModel) {
|
||||||
|
pbw.queuedHeaderArgs = header
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pbw *PostgresBatchWriter) queueUncle(uncle models.UncleModel) {
|
||||||
|
pbw.queuedUnclesArgs.BlockHashes = append(pbw.queuedUnclesArgs.BlockHashes, uncle.BlockHash)
|
||||||
|
pbw.queuedUnclesArgs.ParentHashes = append(pbw.queuedUnclesArgs.ParentHashes, uncle.ParentHash)
|
||||||
|
pbw.queuedUnclesArgs.CIDs = append(pbw.queuedUnclesArgs.CIDs, uncle.CID)
|
||||||
|
pbw.queuedUnclesArgs.MhKeys = append(pbw.queuedUnclesArgs.MhKeys, uncle.MhKey)
|
||||||
|
pbw.queuedUnclesArgs.Rewards = append(pbw.queuedUnclesArgs.Rewards, uncle.Reward)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pbw *PostgresBatchWriter) queueTransaction(tx models.TxModel) {
|
||||||
|
pbw.queuedTxArgs.Indexes = append(pbw.queuedTxArgs.Indexes, tx.Index)
|
||||||
|
pbw.queuedTxArgs.TxHashes = append(pbw.queuedTxArgs.TxHashes, tx.TxHash)
|
||||||
|
pbw.queuedTxArgs.CIDs = append(pbw.queuedTxArgs.CIDs, tx.CID)
|
||||||
|
pbw.queuedTxArgs.MhKeys = append(pbw.queuedTxArgs.MhKeys, tx.MhKey)
|
||||||
|
pbw.queuedTxArgs.Dsts = append(pbw.queuedTxArgs.Dsts, tx.Dst)
|
||||||
|
pbw.queuedTxArgs.Srcs = append(pbw.queuedTxArgs.Srcs, tx.Src)
|
||||||
|
pbw.queuedTxArgs.Datas = append(pbw.queuedTxArgs.Datas, tx.Data)
|
||||||
|
pbw.queuedTxArgs.Types = append(pbw.queuedTxArgs.Types, tx.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pbw *PostgresBatchWriter) queueAccessListElement(al models.AccessListElementModel) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pbw *PostgresBatchWriter) queueReceipt(rct models.ReceiptModel) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pbw *PostgresBatchWriter) upsertTransactionCID(tx *sqlx.Tx, transaction models.TxModel, headerID int64) (int64, error) {
|
||||||
|
var txID int64
|
||||||
|
err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||||
|
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data, tx_type) = ($3, $4, $5, $6, $7, $8, $9)
|
||||||
|
RETURNING id`,
|
||||||
|
headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type).Scan(&txID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("error upserting transaction_cids entry: %v", err)
|
||||||
|
}
|
||||||
|
indexerMetrics.transactions.Inc(1)
|
||||||
|
return txID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pbw *PostgresBatchWriter) upsertAccessListElement(tx *sqlx.Tx, accessListElement models.AccessListElementModel, txID int64) error {
|
||||||
|
_, err := tx.Exec(`INSERT INTO eth.access_list_element (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4)
|
||||||
|
ON CONFLICT (tx_id, index) DO UPDATE SET (address, storage_keys) = ($3, $4)`,
|
||||||
|
txID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error upserting access_list_element entry: %v", err)
|
||||||
|
}
|
||||||
|
indexerMetrics.accessListEntries.Inc(1)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pbw *PostgresBatchWriter) upsertReceiptCID(tx *sqlx.Tx, rct *models.ReceiptModel, txID int64) (int64, error) {
|
||||||
|
var receiptID int64
|
||||||
|
err := tx.QueryRowx(`INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||||
|
ON CONFLICT (tx_id) DO UPDATE SET (leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) = ($2, $3, $4, $5, $6, $7, $8)
|
||||||
|
RETURNING id`,
|
||||||
|
txID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot).Scan(&receiptID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("error upserting receipt_cids entry: %w", err)
|
||||||
|
}
|
||||||
|
indexerMetrics.receipts.Inc(1)
|
||||||
|
return receiptID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pbw *PostgresBatchWriter) upsertLogCID(tx *sqlx.Tx, logs []*models.LogsModel, receiptID int64) error {
|
||||||
|
for _, log := range logs {
|
||||||
|
_, err := tx.Exec(`INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, receipt_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||||
|
ON CONFLICT (receipt_id, index) DO UPDATE SET (leaf_cid, leaf_mh_key, address, topic0, topic1, topic2, topic3, log_data) = ($1, $2, $4, $6, $7, $8, $9, $10)`,
|
||||||
|
log.LeafCID, log.LeafMhKey, receiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2, log.Topic3, log.Data)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error upserting logs entry: %w", err)
|
||||||
|
}
|
||||||
|
indexerMetrics.logs.Inc(1)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pbw *PostgresBatchWriter) upsertStateCID(tx *sqlx.Tx, stateNode models.StateNodeModel, headerID int64) (int64, error) {
|
||||||
|
var stateID int64
|
||||||
|
var stateKey string
|
||||||
|
if stateNode.StateKey != nullHash.String() {
|
||||||
|
stateKey = stateNode.StateKey
|
||||||
|
}
|
||||||
|
err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||||
|
ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)
|
||||||
|
RETURNING id`,
|
||||||
|
headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("error upserting state_cids entry: %v", err)
|
||||||
|
}
|
||||||
|
return stateID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pbw *PostgresBatchWriter) upsertStateAccount(tx *sqlx.Tx, stateAccount models.StateAccountModel, stateID int64) error {
|
||||||
|
_, err := tx.Exec(`INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5)
|
||||||
|
ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = ($2, $3, $4, $5)`,
|
||||||
|
stateID, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error upserting state_accounts entry: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pbw *PostgresBatchWriter) upsertStorageCID(tx *sqlx.Tx, storageCID models.StorageNodeModel, stateID int64) error {
|
||||||
|
var storageKey string
|
||||||
|
if storageCID.StorageKey != nullHash.String() {
|
||||||
|
storageKey = storageCID.StorageKey
|
||||||
|
}
|
||||||
|
_, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||||
|
ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)`,
|
||||||
|
stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error upserting storage_cids entry: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
*/
|
@ -14,15 +14,19 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// Package indexer provides an interface for pushing and indexing IPLD objects into a Postgres database
|
// Package sql provides an interface for pushing and indexing IPLD objects into a sql database
|
||||||
// Metrics for reporting processing and connection stats are defined in ./metrics.go
|
// Metrics for reporting processing and connection stats are defined in ./metrics.go
|
||||||
package indexer
|
|
||||||
|
package sql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
node "github.com/ipfs/go-ipld-format"
|
node "github.com/ipfs/go-ipld-format"
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
@ -34,47 +38,36 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
|
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var _ interfaces.StateDiffIndexer = &StateDiffIndexer{}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
indexerMetrics = RegisterIndexerMetrics(metrics.DefaultRegistry)
|
indexerMetrics = RegisterIndexerMetrics(metrics.DefaultRegistry)
|
||||||
dbMetrics = RegisterDBMetrics(metrics.DefaultRegistry)
|
dbMetrics = RegisterDBMetrics(metrics.DefaultRegistry)
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
// StateDiffIndexer satisfies the indexer.StateDiffIndexer interface for ethereum statediff objects on top of an SQL sql
|
||||||
RemovedNodeStorageCID = "bagmacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya"
|
|
||||||
RemovedNodeStateCID = "baglacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya"
|
|
||||||
RemovedNodeMhKey = "/blocks/DMQMLUSGAGDPOIZ4SJ7H3MW4Y4B4BZIAWZJ4VARHHN57VWAELWC2I4A"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Indexer interface to allow substitution of mocks for testing
|
|
||||||
type Indexer interface {
|
|
||||||
PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (*BlockTx, error)
|
|
||||||
PushStateNode(tx *BlockTx, stateNode sdtypes.StateNode) error
|
|
||||||
PushCodeAndCodeHash(tx *BlockTx, codeAndCodeHash sdtypes.CodeAndCodeHash) error
|
|
||||||
ReportDBMetrics(delay time.Duration, quit <-chan bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StateDiffIndexer satisfies the Indexer interface for ethereum statediff objects
|
|
||||||
type StateDiffIndexer struct {
|
type StateDiffIndexer struct {
|
||||||
|
ctx context.Context
|
||||||
chainConfig *params.ChainConfig
|
chainConfig *params.ChainConfig
|
||||||
dbWriter *PostgresCIDWriter
|
dbWriter *Writer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStateDiffIndexer creates a pointer to a new PayloadConverter which satisfies the PayloadConverter interface
|
// NewStateDiffIndexer creates a sql implementation of interfaces.StateDiffIndexer
|
||||||
func NewStateDiffIndexer(chainConfig *params.ChainConfig, db *postgres.DB) (*StateDiffIndexer, error) {
|
func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, db Database) (*StateDiffIndexer, error) {
|
||||||
// Write the removed node to the db on init
|
// Write the removed node to the db on init
|
||||||
if err := shared.PublishDirectWithDB(db, RemovedNodeMhKey, []byte{}); err != nil {
|
if _, err := db.Exec(ctx, db.InsertIPLDStm(), shared.RemovedNodeMhKey, []byte{}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &StateDiffIndexer{
|
return &StateDiffIndexer{
|
||||||
|
ctx: ctx,
|
||||||
chainConfig: chainConfig,
|
chainConfig: chainConfig,
|
||||||
dbWriter: NewPostgresCIDWriter(db),
|
dbWriter: NewWriter(db),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,9 +90,9 @@ func (sdi *StateDiffIndexer) ReportDBMetrics(delay time.Duration, quit <-chan bo
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// PushBlock pushes and indexes block data in database, except state & storage nodes (includes header, uncles, transactions & receipts)
|
// PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts)
|
||||||
// Returns an initiated DB transaction which must be Closed via defer to commit or rollback
|
// Returns an initiated DB transaction which must be Closed via defer to commit or rollback
|
||||||
func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (*BlockTx, error) {
|
func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) {
|
||||||
start, t := time.Now(), time.Now()
|
start, t := time.Now(), time.Now()
|
||||||
blockHash := block.Hash()
|
blockHash := block.Hash()
|
||||||
blockHashStr := blockHash.String()
|
blockHashStr := blockHash.String()
|
||||||
@ -112,7 +105,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Generate the block iplds
|
// Generate the block iplds
|
||||||
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld.FromBlockAndReceipts(block, receipts)
|
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
|
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
|
||||||
}
|
}
|
||||||
@ -130,49 +123,50 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
|||||||
if sdi.chainConfig.Clique != nil {
|
if sdi.chainConfig.Clique != nil {
|
||||||
reward = big.NewInt(0)
|
reward = big.NewInt(0)
|
||||||
} else {
|
} else {
|
||||||
reward = CalcEthBlockReward(block.Header(), block.Uncles(), block.Transactions(), receipts)
|
reward = shared.CalcEthBlockReward(block.Header(), block.Uncles(), block.Transactions(), receipts)
|
||||||
}
|
}
|
||||||
t = time.Now()
|
t = time.Now()
|
||||||
|
|
||||||
// Begin new db tx for everything
|
// Begin new db tx for everything
|
||||||
tx, err := sdi.dbWriter.db.Beginx()
|
tx, err := sdi.dbWriter.db.Begin(sdi.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if p := recover(); p != nil {
|
if p := recover(); p != nil {
|
||||||
shared.Rollback(tx)
|
rollback(sdi.ctx, tx)
|
||||||
panic(p)
|
panic(p)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
shared.Rollback(tx)
|
rollback(sdi.ctx, tx)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
blockTx := &BlockTx{
|
blockTx := &BatchTx{
|
||||||
|
stm: sdi.dbWriter.db.InsertIPLDsStm(),
|
||||||
iplds: make(chan models.IPLDModel),
|
iplds: make(chan models.IPLDModel),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
ipldCache: models.IPLDBatch{},
|
ipldCache: models.IPLDBatch{},
|
||||||
dbtx: tx,
|
dbtx: tx,
|
||||||
// handle transaction commit or rollback for any return case
|
// handle transaction commit or rollback for any return case
|
||||||
Close: func(self *BlockTx, err error) error {
|
close: func(self *BatchTx, err error) error {
|
||||||
close(self.quit)
|
close(self.quit)
|
||||||
close(self.iplds)
|
close(self.iplds)
|
||||||
if p := recover(); p != nil {
|
if p := recover(); p != nil {
|
||||||
shared.Rollback(tx)
|
rollback(sdi.ctx, tx)
|
||||||
panic(p)
|
panic(p)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
shared.Rollback(tx)
|
rollback(sdi.ctx, tx)
|
||||||
} else {
|
} else {
|
||||||
tDiff := time.Since(t)
|
tDiff := time.Since(t)
|
||||||
indexerMetrics.tStateStoreCodeProcessing.Update(tDiff)
|
indexerMetrics.tStateStoreCodeProcessing.Update(tDiff)
|
||||||
traceMsg += fmt.Sprintf("state, storage, and code storage processing time: %s\r\n", tDiff.String())
|
traceMsg += fmt.Sprintf("state, storage, and code storage processing time: %s\r\n", tDiff.String())
|
||||||
t = time.Now()
|
t = time.Now()
|
||||||
if err := self.flush(); err != nil {
|
if err := self.flush(); err != nil {
|
||||||
shared.Rollback(tx)
|
rollback(sdi.ctx, tx)
|
||||||
traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String())
|
traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String())
|
||||||
log.Debug(traceMsg)
|
log.Debug(traceMsg)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = tx.Commit()
|
err = tx.Commit(sdi.ctx)
|
||||||
tDiff = time.Since(t)
|
tDiff = time.Since(t)
|
||||||
indexerMetrics.tPostgresCommit.Update(tDiff)
|
indexerMetrics.tPostgresCommit.Update(tDiff)
|
||||||
traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String())
|
traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String())
|
||||||
@ -231,14 +225,13 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
|||||||
traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String())
|
traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String())
|
||||||
t = time.Now()
|
t = time.Now()
|
||||||
|
|
||||||
blockTx.BlockNumber = height
|
|
||||||
blockTx.headerID = headerID
|
blockTx.headerID = headerID
|
||||||
return blockTx, err
|
return blockTx, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// processHeader publishes and indexes a header IPLD in Postgres
|
// processHeader publishes and indexes a header IPLD in Postgres
|
||||||
// it returns the headerID
|
// it returns the headerID
|
||||||
func (sdi *StateDiffIndexer) processHeader(tx *BlockTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (int64, error) {
|
func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (int64, error) {
|
||||||
tx.cacheIPLD(headerNode)
|
tx.cacheIPLD(headerNode)
|
||||||
|
|
||||||
var baseFee *int64
|
var baseFee *int64
|
||||||
@ -267,7 +260,7 @@ func (sdi *StateDiffIndexer) processHeader(tx *BlockTx, header *types.Header, he
|
|||||||
}
|
}
|
||||||
|
|
||||||
// processUncles publishes and indexes uncle IPLDs in Postgres
|
// processUncles publishes and indexes uncle IPLDs in Postgres
|
||||||
func (sdi *StateDiffIndexer) processUncles(tx *BlockTx, headerID int64, blockNumber uint64, uncleNodes []*ipld.EthHeader) error {
|
func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID int64, blockNumber uint64, uncleNodes []*ipld2.EthHeader) error {
|
||||||
// publish and index uncles
|
// publish and index uncles
|
||||||
for _, uncleNode := range uncleNodes {
|
for _, uncleNode := range uncleNodes {
|
||||||
tx.cacheIPLD(uncleNode)
|
tx.cacheIPLD(uncleNode)
|
||||||
@ -276,7 +269,7 @@ func (sdi *StateDiffIndexer) processUncles(tx *BlockTx, headerID int64, blockNum
|
|||||||
if sdi.chainConfig.Clique != nil {
|
if sdi.chainConfig.Clique != nil {
|
||||||
uncleReward = big.NewInt(0)
|
uncleReward = big.NewInt(0)
|
||||||
} else {
|
} else {
|
||||||
uncleReward = CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64())
|
uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64())
|
||||||
}
|
}
|
||||||
uncle := models.UncleModel{
|
uncle := models.UncleModel{
|
||||||
CID: uncleNode.Cid().String(),
|
CID: uncleNode.Cid().String(),
|
||||||
@ -298,17 +291,17 @@ type processArgs struct {
|
|||||||
blockNumber *big.Int
|
blockNumber *big.Int
|
||||||
receipts types.Receipts
|
receipts types.Receipts
|
||||||
txs types.Transactions
|
txs types.Transactions
|
||||||
rctNodes []*ipld.EthReceipt
|
rctNodes []*ipld2.EthReceipt
|
||||||
rctTrieNodes []*ipld.EthRctTrie
|
rctTrieNodes []*ipld2.EthRctTrie
|
||||||
txNodes []*ipld.EthTx
|
txNodes []*ipld2.EthTx
|
||||||
txTrieNodes []*ipld.EthTxTrie
|
txTrieNodes []*ipld2.EthTxTrie
|
||||||
logTrieNodes [][]*ipld.EthLogTrie
|
logTrieNodes [][]*ipld2.EthLogTrie
|
||||||
logLeafNodeCIDs [][]cid.Cid
|
logLeafNodeCIDs [][]cid.Cid
|
||||||
rctLeafNodeCIDs []cid.Cid
|
rctLeafNodeCIDs []cid.Cid
|
||||||
}
|
}
|
||||||
|
|
||||||
// processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres
|
// processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres
|
||||||
func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BlockTx, args processArgs) error {
|
func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs) error {
|
||||||
// Process receipts and txs
|
// Process receipts and txs
|
||||||
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
|
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
|
||||||
for i, receipt := range args.receipts {
|
for i, receipt := range args.receipts {
|
||||||
@ -434,8 +427,12 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BlockTx, args processArgs
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD database
|
// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD sql
|
||||||
func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateNode) error {
|
func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode) error {
|
||||||
|
tx, ok := batch.(*BatchTx)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
||||||
|
}
|
||||||
// publish the state node
|
// publish the state node
|
||||||
if stateNode.NodeType == sdtypes.Removed {
|
if stateNode.NodeType == sdtypes.Removed {
|
||||||
// short circuit if it is a Removed node
|
// short circuit if it is a Removed node
|
||||||
@ -443,14 +440,14 @@ func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateN
|
|||||||
stateModel := models.StateNodeModel{
|
stateModel := models.StateNodeModel{
|
||||||
Path: stateNode.Path,
|
Path: stateNode.Path,
|
||||||
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
|
||||||
CID: RemovedNodeStateCID,
|
CID: shared.RemovedNodeStateCID,
|
||||||
MhKey: RemovedNodeMhKey,
|
MhKey: shared.RemovedNodeMhKey,
|
||||||
NodeType: stateNode.NodeType.Int(),
|
NodeType: stateNode.NodeType.Int(),
|
||||||
}
|
}
|
||||||
_, err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel, tx.headerID)
|
_, err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel, tx.headerID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
|
||||||
}
|
}
|
||||||
@ -497,8 +494,8 @@ func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateN
|
|||||||
storageModel := models.StorageNodeModel{
|
storageModel := models.StorageNodeModel{
|
||||||
Path: storageNode.Path,
|
Path: storageNode.Path,
|
||||||
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
|
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
|
||||||
CID: RemovedNodeStorageCID,
|
CID: shared.RemovedNodeStorageCID,
|
||||||
MhKey: RemovedNodeMhKey,
|
MhKey: shared.RemovedNodeMhKey,
|
||||||
NodeType: storageNode.NodeType.Int(),
|
NodeType: storageNode.NodeType.Int(),
|
||||||
}
|
}
|
||||||
if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel, stateID); err != nil {
|
if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel, stateID); err != nil {
|
||||||
@ -506,7 +503,7 @@ func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateN
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
|
storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld2.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err)
|
return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err)
|
||||||
}
|
}
|
||||||
@ -525,8 +522,12 @@ func (sdi *StateDiffIndexer) PushStateNode(tx *BlockTx, stateNode sdtypes.StateN
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PushCodeAndCodeHash publishes code and codehash pairs to the ipld database
|
// PushCodeAndCodeHash publishes code and codehash pairs to the ipld sql
|
||||||
func (sdi *StateDiffIndexer) PushCodeAndCodeHash(tx *BlockTx, codeAndCodeHash sdtypes.CodeAndCodeHash) error {
|
func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error {
|
||||||
|
tx, ok := batch.(*BatchTx)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
||||||
|
}
|
||||||
// codec doesn't matter since db key is multihash-based
|
// codec doesn't matter since db key is multihash-based
|
||||||
mhKey, err := shared.MultihashKeyFromKeccak256(codeAndCodeHash.Hash)
|
mhKey, err := shared.MultihashKeyFromKeccak256(codeAndCodeHash.Hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -535,3 +536,8 @@ func (sdi *StateDiffIndexer) PushCodeAndCodeHash(tx *BlockTx, codeAndCodeHash sd
|
|||||||
tx.cacheDirect(mhKey, codeAndCodeHash.Code)
|
tx.cacheDirect(mhKey, codeAndCodeHash.Code)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close satisfied io.Closer
|
||||||
|
func (sdi *StateDiffIndexer) Close() error {
|
||||||
|
return sdi.dbWriter.db.Close()
|
||||||
|
}
|
@ -14,19 +14,21 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package indexer_test
|
package sql_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -39,12 +41,12 @@ func setupLegacy(t *testing.T) {
|
|||||||
mockLegacyBlock = legacyData.MockBlock
|
mockLegacyBlock = legacyData.MockBlock
|
||||||
legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256)
|
legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256)
|
||||||
|
|
||||||
db, err = shared.SetupDB()
|
db, err = test_helpers.SetupDB()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ind, err = indexer.NewStateDiffIndexer(legacyData.Config, db)
|
ind, err = sql.NewSQLIndexer(context.Background(), legacyData.Config, db)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var tx *indexer.BlockTx
|
var tx *sql.BlockTx
|
||||||
tx, err = ind.PushBlock(
|
tx, err = ind.PushBlock(
|
||||||
mockLegacyBlock,
|
mockLegacyBlock,
|
||||||
legacyData.MockReceipts,
|
legacyData.MockReceipts,
|
||||||
@ -57,7 +59,7 @@ func setupLegacy(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
shared.ExpectEqual(t, tx.BlockNumber, legacyData.BlockNumber.Uint64())
|
test_helpers.ExpectEqual(t, tx.BlockNumber, legacyData.BlockNumber.Uint64())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishAndIndexerLegacy(t *testing.T) {
|
func TestPublishAndIndexerLegacy(t *testing.T) {
|
||||||
@ -76,12 +78,12 @@ func TestPublishAndIndexerLegacy(t *testing.T) {
|
|||||||
BaseFee *int64 `db:"base_fee"`
|
BaseFee *int64 `db:"base_fee"`
|
||||||
}
|
}
|
||||||
header := new(res)
|
header := new(res)
|
||||||
err = db.QueryRowx(pgStr, legacyData.BlockNumber.Uint64()).StructScan(header)
|
err = db.QueryRow(context.Background(), pgStr, legacyData.BlockNumber.Uint64()).StructScan(header)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
shared.ExpectEqual(t, header.CID, legacyHeaderCID.String())
|
test_helpers.ExpectEqual(t, header.CID, legacyHeaderCID.String())
|
||||||
shared.ExpectEqual(t, header.TD, legacyData.MockBlock.Difficulty().String())
|
test_helpers.ExpectEqual(t, header.TD, legacyData.MockBlock.Difficulty().String())
|
||||||
shared.ExpectEqual(t, header.Reward, "5000000000000011250")
|
test_helpers.ExpectEqual(t, header.Reward, "5000000000000011250")
|
||||||
require.Nil(t, legacyData.MockHeader.BaseFee)
|
require.Nil(t, legacyData.MockHeader.BaseFee)
|
||||||
require.Nil(t, header.BaseFee)
|
require.Nil(t, header.BaseFee)
|
||||||
})
|
})
|
@ -14,35 +14,37 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package indexer_test
|
package sql_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
db *postgres.DB
|
db sql.Database
|
||||||
err error
|
err error
|
||||||
ind *indexer.StateDiffIndexer
|
ind *interfaces.StateDiffIndexer
|
||||||
ipfsPgGet = `SELECT data FROM public.blocks
|
ipfsPgGet = `SELECT data FROM public.blocks
|
||||||
WHERE key = $1`
|
WHERE key = $1`
|
||||||
tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte
|
tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte
|
||||||
@ -135,13 +137,13 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setup(t *testing.T) {
|
func setup(t *testing.T) {
|
||||||
db, err = shared.SetupDB()
|
db, err = test_helpers.SetupDB()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
ind, err = indexer.NewStateDiffIndexer(mocks.TestConfig, db)
|
ind, err = indexer.NewStateDiffIndexer(context.Background(), mocks.TestConfig, db)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var tx *indexer.BlockTx
|
var tx *sql.BlockTx
|
||||||
tx, err = ind.PushBlock(
|
tx, err = ind.PushBlock(
|
||||||
mockBlock,
|
mockBlock,
|
||||||
mocks.MockReceipts,
|
mocks.MockReceipts,
|
||||||
@ -157,11 +159,14 @@ func setup(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
shared.ExpectEqual(t, tx.BlockNumber, mocks.BlockNumber.Uint64())
|
test_helpers.ExpectEqual(t, tx.BlockNumber, mocks.BlockNumber.Uint64())
|
||||||
}
|
}
|
||||||
|
|
||||||
func tearDown(t *testing.T) {
|
func tearDown(t *testing.T) {
|
||||||
indexer.TearDownDB(t, db)
|
sql.TearDownDB(t, db)
|
||||||
|
if err := ind.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishAndIndexer(t *testing.T) {
|
func TestPublishAndIndexer(t *testing.T) {
|
||||||
@ -180,14 +185,14 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
BaseFee *int64 `db:"base_fee"`
|
BaseFee *int64 `db:"base_fee"`
|
||||||
}
|
}
|
||||||
header := new(res)
|
header := new(res)
|
||||||
err = db.QueryRowx(pgStr, mocks.BlockNumber.Uint64()).StructScan(header)
|
err = db.QueryRow(context.Background(), pgStr, mocks.BlockNumber.Uint64()).StructScan(header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, header.CID, headerCID.String())
|
test_helpers.ExpectEqual(t, header.CID, headerCID.String())
|
||||||
shared.ExpectEqual(t, header.TD, mocks.MockBlock.Difficulty().String())
|
test_helpers.ExpectEqual(t, header.TD, mocks.MockBlock.Difficulty().String())
|
||||||
shared.ExpectEqual(t, header.Reward, "2000000000000021250")
|
test_helpers.ExpectEqual(t, header.Reward, "2000000000000021250")
|
||||||
shared.ExpectEqual(t, *header.BaseFee, mocks.MockHeader.BaseFee.Int64())
|
test_helpers.ExpectEqual(t, *header.BaseFee, mocks.MockHeader.BaseFee.Int64())
|
||||||
dc, err := cid.Decode(header.CID)
|
dc, err := cid.Decode(header.CID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -195,11 +200,11 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
var data []byte
|
var data []byte
|
||||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, data, mocks.MockHeaderRlp)
|
test_helpers.ExpectEqual(t, data, mocks.MockHeaderRlp)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -209,16 +214,16 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
trxs := make([]string, 0)
|
trxs := make([]string, 0)
|
||||||
pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
||||||
WHERE header_cids.block_number = $1`
|
WHERE header_cids.block_number = $1`
|
||||||
err = db.Select(&trxs, pgStr, mocks.BlockNumber.Uint64())
|
err = db.Select(context.Background(), &trxs, pgStr, mocks.BlockNumber.Uint64())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, len(trxs), 5)
|
test_helpers.ExpectEqual(t, len(trxs), 5)
|
||||||
expectTrue(t, shared.ListContainsString(trxs, trx1CID.String()))
|
expectTrue(t, test_helpers.ListContainsString(trxs, trx1CID.String()))
|
||||||
expectTrue(t, shared.ListContainsString(trxs, trx2CID.String()))
|
expectTrue(t, test_helpers.ListContainsString(trxs, trx2CID.String()))
|
||||||
expectTrue(t, shared.ListContainsString(trxs, trx3CID.String()))
|
expectTrue(t, test_helpers.ListContainsString(trxs, trx3CID.String()))
|
||||||
expectTrue(t, shared.ListContainsString(trxs, trx4CID.String()))
|
expectTrue(t, test_helpers.ListContainsString(trxs, trx4CID.String()))
|
||||||
expectTrue(t, shared.ListContainsString(trxs, trx5CID.String()))
|
expectTrue(t, test_helpers.ListContainsString(trxs, trx5CID.String()))
|
||||||
// and published
|
// and published
|
||||||
for _, c := range trxs {
|
for _, c := range trxs {
|
||||||
dc, err := cid.Decode(c)
|
dc, err := cid.Decode(c)
|
||||||
@ -228,16 +233,16 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
var data []byte
|
var data []byte
|
||||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
switch c {
|
switch c {
|
||||||
case trx1CID.String():
|
case trx1CID.String():
|
||||||
shared.ExpectEqual(t, data, tx1)
|
test_helpers.ExpectEqual(t, data, tx1)
|
||||||
var txType *uint8
|
var txType *uint8
|
||||||
pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
|
pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
|
||||||
err = db.Get(&txType, pgStr, c)
|
err = db.Get(context.Background(), &txType, pgStr, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -245,10 +250,10 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
t.Fatalf("expected nil tx_type, got %d", *txType)
|
t.Fatalf("expected nil tx_type, got %d", *txType)
|
||||||
}
|
}
|
||||||
case trx2CID.String():
|
case trx2CID.String():
|
||||||
shared.ExpectEqual(t, data, tx2)
|
test_helpers.ExpectEqual(t, data, tx2)
|
||||||
var txType *uint8
|
var txType *uint8
|
||||||
pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
|
pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
|
||||||
err = db.Get(&txType, pgStr, c)
|
err = db.Get(context.Background(), &txType, pgStr, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -256,10 +261,10 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
t.Fatalf("expected nil tx_type, got %d", *txType)
|
t.Fatalf("expected nil tx_type, got %d", *txType)
|
||||||
}
|
}
|
||||||
case trx3CID.String():
|
case trx3CID.String():
|
||||||
shared.ExpectEqual(t, data, tx3)
|
test_helpers.ExpectEqual(t, data, tx3)
|
||||||
var txType *uint8
|
var txType *uint8
|
||||||
pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
|
pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
|
||||||
err = db.Get(&txType, pgStr, c)
|
err = db.Get(context.Background(), &txType, pgStr, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -267,10 +272,10 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
t.Fatalf("expected nil tx_type, got %d", *txType)
|
t.Fatalf("expected nil tx_type, got %d", *txType)
|
||||||
}
|
}
|
||||||
case trx4CID.String():
|
case trx4CID.String():
|
||||||
shared.ExpectEqual(t, data, tx4)
|
test_helpers.ExpectEqual(t, data, tx4)
|
||||||
var txType *uint8
|
var txType *uint8
|
||||||
pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
|
pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
|
||||||
err = db.Get(&txType, pgStr, c)
|
err = db.Get(context.Background(), &txType, pgStr, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -279,7 +284,7 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
accessListElementModels := make([]models.AccessListElementModel, 0)
|
accessListElementModels := make([]models.AccessListElementModel, 0)
|
||||||
pgStr = `SELECT access_list_element.* FROM eth.access_list_element INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.id) WHERE cid = $1 ORDER BY access_list_element.index ASC`
|
pgStr = `SELECT access_list_element.* FROM eth.access_list_element INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.id) WHERE cid = $1 ORDER BY access_list_element.index ASC`
|
||||||
err = db.Select(&accessListElementModels, pgStr, c)
|
err = db.Select(context.Background(), &accessListElementModels, pgStr, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -295,13 +300,13 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
Address: accessListElementModels[1].Address,
|
Address: accessListElementModels[1].Address,
|
||||||
StorageKeys: accessListElementModels[1].StorageKeys,
|
StorageKeys: accessListElementModels[1].StorageKeys,
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, model1, mocks.AccessListEntry1Model)
|
test_helpers.ExpectEqual(t, model1, mocks.AccessListEntry1Model)
|
||||||
shared.ExpectEqual(t, model2, mocks.AccessListEntry2Model)
|
test_helpers.ExpectEqual(t, model2, mocks.AccessListEntry2Model)
|
||||||
case trx5CID.String():
|
case trx5CID.String():
|
||||||
shared.ExpectEqual(t, data, tx5)
|
test_helpers.ExpectEqual(t, data, tx5)
|
||||||
var txType *uint8
|
var txType *uint8
|
||||||
pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
|
pgStr = `SELECT tx_type FROM eth.transaction_cids WHERE cid = $1`
|
||||||
err = db.Get(&txType, pgStr, c)
|
err = db.Get(context.Background(), &txType, pgStr, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -322,7 +327,7 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
AND transaction_cids.header_id = header_cids.id
|
AND transaction_cids.header_id = header_cids.id
|
||||||
AND header_cids.block_number = $1
|
AND header_cids.block_number = $1
|
||||||
ORDER BY transaction_cids.index`
|
ORDER BY transaction_cids.index`
|
||||||
err = db.Select(&rcts, pgStr, mocks.BlockNumber.Uint64())
|
err = db.Select(context.Background(), &rcts, pgStr, mocks.BlockNumber.Uint64())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -340,12 +345,12 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
INNER JOIN eth.receipt_cids ON (log_cids.receipt_id = receipt_cids.id)
|
INNER JOIN eth.receipt_cids ON (log_cids.receipt_id = receipt_cids.id)
|
||||||
INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key)
|
INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key)
|
||||||
WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC`
|
WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC`
|
||||||
err = db.Select(&results, pgStr, rcts[i])
|
err = db.Select(context.Background(), &results, pgStr, rcts[i])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// expecting MockLog1 and MockLog2 for mockReceipt4
|
// expecting MockLog1 and MockLog2 for mockReceipt4
|
||||||
expectedLogs := mocks.MockReceipts[i].Logs
|
expectedLogs := mocks.MockReceipts[i].Logs
|
||||||
shared.ExpectEqual(t, len(results), len(expectedLogs))
|
test_helpers.ExpectEqual(t, len(results), len(expectedLogs))
|
||||||
|
|
||||||
var nodeElements []interface{}
|
var nodeElements []interface{}
|
||||||
for idx, r := range results {
|
for idx, r := range results {
|
||||||
@ -357,7 +362,7 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// 2nd element of the leaf node contains the encoded log data.
|
// 2nd element of the leaf node contains the encoded log data.
|
||||||
shared.ExpectEqual(t, logRaw, nodeElements[1].([]byte))
|
test_helpers.ExpectEqual(t, logRaw, nodeElements[1].([]byte))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -372,19 +377,19 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
WHERE receipt_cids.tx_id = transaction_cids.id
|
WHERE receipt_cids.tx_id = transaction_cids.id
|
||||||
AND transaction_cids.header_id = header_cids.id
|
AND transaction_cids.header_id = header_cids.id
|
||||||
AND header_cids.block_number = $1 order by transaction_cids.id`
|
AND header_cids.block_number = $1 order by transaction_cids.id`
|
||||||
err = db.Select(&rcts, pgStr, mocks.BlockNumber.Uint64())
|
err = db.Select(context.Background(), &rcts, pgStr, mocks.BlockNumber.Uint64())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, len(rcts), 5)
|
test_helpers.ExpectEqual(t, len(rcts), 5)
|
||||||
|
|
||||||
for idx, rctLeafCID := range rcts {
|
for idx, rctLeafCID := range rcts {
|
||||||
result := make([]ipfs.BlockModel, 0)
|
result := make([]models.IPLDModel, 0)
|
||||||
pgStr = `SELECT data
|
pgStr = `SELECT data
|
||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key)
|
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key)
|
||||||
WHERE receipt_cids.leaf_cid = $1`
|
WHERE receipt_cids.leaf_cid = $1`
|
||||||
err = db.Select(&result, pgStr, rctLeafCID)
|
err = db.Select(context.Background(), &result, pgStr, rctLeafCID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -397,7 +402,7 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
expectedRct, err := mocks.MockReceipts[idx].MarshalBinary()
|
expectedRct, err := mocks.MockReceipts[idx].MarshalBinary()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
shared.ExpectEqual(t, expectedRct, nodeElements[1].([]byte))
|
test_helpers.ExpectEqual(t, expectedRct, nodeElements[1].([]byte))
|
||||||
}
|
}
|
||||||
|
|
||||||
// and published
|
// and published
|
||||||
@ -409,57 +414,57 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
var data []byte
|
var data []byte
|
||||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch c {
|
switch c {
|
||||||
case rct1CID.String():
|
case rct1CID.String():
|
||||||
shared.ExpectEqual(t, data, rct1)
|
test_helpers.ExpectEqual(t, data, rct1)
|
||||||
var postStatus uint64
|
var postStatus uint64
|
||||||
pgStr = `SELECT post_status FROM eth.receipt_cids WHERE leaf_cid = $1`
|
pgStr = `SELECT post_status FROM eth.receipt_cids WHERE leaf_cid = $1`
|
||||||
err = db.Get(&postStatus, pgStr, c)
|
err = db.Get(context.Background(), &postStatus, pgStr, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, postStatus, mocks.ExpectedPostStatus)
|
test_helpers.ExpectEqual(t, postStatus, mocks.ExpectedPostStatus)
|
||||||
case rct2CID.String():
|
case rct2CID.String():
|
||||||
shared.ExpectEqual(t, data, rct2)
|
test_helpers.ExpectEqual(t, data, rct2)
|
||||||
var postState string
|
var postState string
|
||||||
pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
|
pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
|
||||||
err = db.Get(&postState, pgStr, c)
|
err = db.Get(context.Background(), &postState, pgStr, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, postState, mocks.ExpectedPostState1)
|
test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState1)
|
||||||
case rct3CID.String():
|
case rct3CID.String():
|
||||||
shared.ExpectEqual(t, data, rct3)
|
test_helpers.ExpectEqual(t, data, rct3)
|
||||||
var postState string
|
var postState string
|
||||||
pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
|
pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
|
||||||
err = db.Get(&postState, pgStr, c)
|
err = db.Get(context.Background(), &postState, pgStr, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, postState, mocks.ExpectedPostState2)
|
test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState2)
|
||||||
case rct4CID.String():
|
case rct4CID.String():
|
||||||
shared.ExpectEqual(t, data, rct4)
|
test_helpers.ExpectEqual(t, data, rct4)
|
||||||
var postState string
|
var postState string
|
||||||
pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
|
pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
|
||||||
err = db.Get(&postState, pgStr, c)
|
err = db.Get(context.Background(), &postState, pgStr, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, postState, mocks.ExpectedPostState3)
|
test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState3)
|
||||||
case rct5CID.String():
|
case rct5CID.String():
|
||||||
shared.ExpectEqual(t, data, rct5)
|
test_helpers.ExpectEqual(t, data, rct5)
|
||||||
var postState string
|
var postState string
|
||||||
pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
|
pgStr = `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
|
||||||
err = db.Get(&postState, pgStr, c)
|
err = db.Get(context.Background(), &postState, pgStr, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, postState, mocks.ExpectedPostState3)
|
test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState3)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -472,11 +477,11 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
pgStr := `SELECT state_cids.id, state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
|
pgStr := `SELECT state_cids.id, state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
|
||||||
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
||||||
WHERE header_cids.block_number = $1 AND node_type != 3`
|
WHERE header_cids.block_number = $1 AND node_type != 3`
|
||||||
err = db.Select(&stateNodes, pgStr, mocks.BlockNumber.Uint64())
|
err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, len(stateNodes), 2)
|
test_helpers.ExpectEqual(t, len(stateNodes), 2)
|
||||||
for _, stateNode := range stateNodes {
|
for _, stateNode := range stateNodes {
|
||||||
var data []byte
|
var data []byte
|
||||||
dc, err := cid.Decode(stateNode.CID)
|
dc, err := cid.Decode(stateNode.CID)
|
||||||
@ -485,22 +490,22 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
pgStr = `SELECT * from eth.state_accounts WHERE state_id = $1`
|
pgStr = `SELECT * from eth.state_accounts WHERE state_id = $1`
|
||||||
var account models.StateAccountModel
|
var account models.StateAccountModel
|
||||||
err = db.Get(&account, pgStr, stateNode.ID)
|
err = db.Get(context.Background(), &account, pgStr, stateNode.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if stateNode.CID == state1CID.String() {
|
if stateNode.CID == state1CID.String() {
|
||||||
shared.ExpectEqual(t, stateNode.NodeType, 2)
|
test_helpers.ExpectEqual(t, stateNode.NodeType, 2)
|
||||||
shared.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex())
|
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex())
|
||||||
shared.ExpectEqual(t, stateNode.Path, []byte{'\x06'})
|
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x06'})
|
||||||
shared.ExpectEqual(t, data, mocks.ContractLeafNode)
|
test_helpers.ExpectEqual(t, data, mocks.ContractLeafNode)
|
||||||
shared.ExpectEqual(t, account, models.StateAccountModel{
|
test_helpers.ExpectEqual(t, account, models.StateAccountModel{
|
||||||
ID: account.ID,
|
ID: account.ID,
|
||||||
StateID: stateNode.ID,
|
StateID: stateNode.ID,
|
||||||
Balance: "0",
|
Balance: "0",
|
||||||
@ -510,11 +515,11 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
if stateNode.CID == state2CID.String() {
|
if stateNode.CID == state2CID.String() {
|
||||||
shared.ExpectEqual(t, stateNode.NodeType, 2)
|
test_helpers.ExpectEqual(t, stateNode.NodeType, 2)
|
||||||
shared.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex())
|
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex())
|
||||||
shared.ExpectEqual(t, stateNode.Path, []byte{'\x0c'})
|
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x0c'})
|
||||||
shared.ExpectEqual(t, data, mocks.AccountLeafNode)
|
test_helpers.ExpectEqual(t, data, mocks.AccountLeafNode)
|
||||||
shared.ExpectEqual(t, account, models.StateAccountModel{
|
test_helpers.ExpectEqual(t, account, models.StateAccountModel{
|
||||||
ID: account.ID,
|
ID: account.ID,
|
||||||
StateID: stateNode.ID,
|
StateID: stateNode.ID,
|
||||||
Balance: "1000",
|
Balance: "1000",
|
||||||
@ -530,11 +535,11 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
pgStr = `SELECT state_cids.id, state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
|
pgStr = `SELECT state_cids.id, state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
|
||||||
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
||||||
WHERE header_cids.block_number = $1 AND node_type = 3`
|
WHERE header_cids.block_number = $1 AND node_type = 3`
|
||||||
err = db.Select(&stateNodes, pgStr, mocks.BlockNumber.Uint64())
|
err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, len(stateNodes), 1)
|
test_helpers.ExpectEqual(t, len(stateNodes), 1)
|
||||||
stateNode := stateNodes[0]
|
stateNode := stateNodes[0]
|
||||||
var data []byte
|
var data []byte
|
||||||
dc, err := cid.Decode(stateNode.CID)
|
dc, err := cid.Decode(stateNode.CID)
|
||||||
@ -543,14 +548,14 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
shared.ExpectEqual(t, prefixedKey, indexer.RemovedNodeMhKey)
|
test_helpers.ExpectEqual(t, prefixedKey, sql.RemovedNodeMhKey)
|
||||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, stateNode.CID, indexer.RemovedNodeStateCID)
|
test_helpers.ExpectEqual(t, stateNode.CID, sql.RemovedNodeStateCID)
|
||||||
shared.ExpectEqual(t, stateNode.Path, []byte{'\x02'})
|
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x02'})
|
||||||
shared.ExpectEqual(t, data, []byte{})
|
test_helpers.ExpectEqual(t, data, []byte{})
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -564,12 +569,12 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
AND state_cids.header_id = header_cids.id
|
AND state_cids.header_id = header_cids.id
|
||||||
AND header_cids.block_number = $1
|
AND header_cids.block_number = $1
|
||||||
AND storage_cids.node_type != 3`
|
AND storage_cids.node_type != 3`
|
||||||
err = db.Select(&storageNodes, pgStr, mocks.BlockNumber.Uint64())
|
err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, len(storageNodes), 1)
|
test_helpers.ExpectEqual(t, len(storageNodes), 1)
|
||||||
shared.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
|
test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
|
||||||
CID: storageCID.String(),
|
CID: storageCID.String(),
|
||||||
NodeType: 2,
|
NodeType: 2,
|
||||||
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
|
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
|
||||||
@ -583,11 +588,11 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
mhKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, data, mocks.StorageLeafNode)
|
test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode)
|
||||||
|
|
||||||
// check that Removed storage nodes were properly indexed
|
// check that Removed storage nodes were properly indexed
|
||||||
storageNodes = make([]models.StorageNodeWithStateKeyModel, 0)
|
storageNodes = make([]models.StorageNodeWithStateKeyModel, 0)
|
||||||
@ -597,13 +602,13 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
AND state_cids.header_id = header_cids.id
|
AND state_cids.header_id = header_cids.id
|
||||||
AND header_cids.block_number = $1
|
AND header_cids.block_number = $1
|
||||||
AND storage_cids.node_type = 3`
|
AND storage_cids.node_type = 3`
|
||||||
err = db.Select(&storageNodes, pgStr, mocks.BlockNumber.Uint64())
|
err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, len(storageNodes), 1)
|
test_helpers.ExpectEqual(t, len(storageNodes), 1)
|
||||||
shared.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
|
test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
|
||||||
CID: indexer.RemovedNodeStorageCID,
|
CID: sql.RemovedNodeStorageCID,
|
||||||
NodeType: 3,
|
NodeType: 3,
|
||||||
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
|
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
|
||||||
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
||||||
@ -615,11 +620,11 @@ func TestPublishAndIndexer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
mhKey = dshelp.MultihashToDsKey(dc.Hash())
|
mhKey = dshelp.MultihashToDsKey(dc.Hash())
|
||||||
prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
|
prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
|
||||||
shared.ExpectEqual(t, prefixedKey, indexer.RemovedNodeMhKey)
|
test_helpers.ExpectEqual(t, prefixedKey, sql.RemovedNodeMhKey)
|
||||||
err = db.Get(&data, ipfsPgGet, prefixedKey)
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
shared.ExpectEqual(t, data, []byte{})
|
test_helpers.ExpectEqual(t, data, []byte{})
|
||||||
})
|
})
|
||||||
}
|
}
|
88
statediff/indexer/database/sql/interfaces.go
Normal file
88
statediff/indexer/database/sql/interfaces.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package sql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Database interfaces required by the sql indexer
|
||||||
|
type Database interface {
|
||||||
|
Driver
|
||||||
|
Statements
|
||||||
|
}
|
||||||
|
|
||||||
|
// Driver interface has all the methods required by a driver implementation to support the sql indexer
|
||||||
|
type Driver interface {
|
||||||
|
QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow
|
||||||
|
Exec(ctx context.Context, sql string, args ...interface{}) (Result, error)
|
||||||
|
Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error
|
||||||
|
Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error
|
||||||
|
Begin(ctx context.Context) (Tx, error)
|
||||||
|
Stats() Stats
|
||||||
|
NodeID() int64
|
||||||
|
Context() context.Context
|
||||||
|
io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Statements interface to accommodate different SQL query syntax
|
||||||
|
type Statements interface {
|
||||||
|
InsertHeaderStm() string
|
||||||
|
InsertUncleStm() string
|
||||||
|
InsertTxStm() string
|
||||||
|
InsertAccessListElementStm() string
|
||||||
|
InsertRctStm() string
|
||||||
|
InsertLogStm() string
|
||||||
|
InsertStateStm() string
|
||||||
|
InsertAccountStm() string
|
||||||
|
InsertStorageStm() string
|
||||||
|
InsertIPLDStm() string
|
||||||
|
InsertIPLDsStm() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tx interface to accommodate different concrete SQL transaction types
|
||||||
|
type Tx interface {
|
||||||
|
QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow
|
||||||
|
Exec(ctx context.Context, sql string, args ...interface{}) (Result, error)
|
||||||
|
Commit(ctx context.Context) error
|
||||||
|
Rollback(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScannableRow interface to accommodate different concrete row types
|
||||||
|
type ScannableRow interface {
|
||||||
|
Scan(dest ...interface{}) error
|
||||||
|
StructScan(dest interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Result interface to accommodate different concrete result types
|
||||||
|
type Result interface {
|
||||||
|
RowsAffected() (int64, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats interface to accommodate different concrete sql stats types
|
||||||
|
type Stats interface {
|
||||||
|
MaxOpen() int64
|
||||||
|
Open() int64
|
||||||
|
InUse() int64
|
||||||
|
Idle() int64
|
||||||
|
WaitCount() int64
|
||||||
|
WaitDuration() time.Duration
|
||||||
|
MaxIdleClosed() int64
|
||||||
|
MaxLifetimeClosed() int64
|
||||||
|
}
|
@ -1,7 +1,22 @@
|
|||||||
package indexer
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package sql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
@ -79,7 +94,7 @@ func RegisterIndexerMetrics(reg metrics.Registry) indexerMetricsHandles {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type dbMetricsHandles struct {
|
type dbMetricsHandles struct {
|
||||||
// Maximum number of open connections to the database
|
// Maximum number of open connections to the sql
|
||||||
maxOpen metrics.Gauge
|
maxOpen metrics.Gauge
|
||||||
// The number of established connections both in use and idle
|
// The number of established connections both in use and idle
|
||||||
open metrics.Gauge
|
open metrics.Gauge
|
||||||
@ -120,13 +135,13 @@ func RegisterDBMetrics(reg metrics.Registry) dbMetricsHandles {
|
|||||||
return ctx
|
return ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
func (met *dbMetricsHandles) Update(stats sql.DBStats) {
|
func (met *dbMetricsHandles) Update(stats Stats) {
|
||||||
met.maxOpen.Update(int64(stats.MaxOpenConnections))
|
met.maxOpen.Update(stats.MaxOpen())
|
||||||
met.open.Update(int64(stats.OpenConnections))
|
met.open.Update(stats.Open())
|
||||||
met.inUse.Update(int64(stats.InUse))
|
met.inUse.Update(stats.InUse())
|
||||||
met.idle.Update(int64(stats.Idle))
|
met.idle.Update(stats.Idle())
|
||||||
met.waitedFor.Inc(stats.WaitCount)
|
met.waitedFor.Inc(stats.WaitCount())
|
||||||
met.blockedMilliseconds.Inc(stats.WaitDuration.Milliseconds())
|
met.blockedMilliseconds.Inc(stats.WaitDuration().Milliseconds())
|
||||||
met.closedMaxIdle.Inc(stats.MaxIdleClosed)
|
met.closedMaxIdle.Inc(stats.MaxIdleClosed())
|
||||||
met.closedMaxLifetime.Inc(stats.MaxLifetimeClosed)
|
met.closedMaxLifetime.Inc(stats.MaxLifetimeClosed())
|
||||||
}
|
}
|
81
statediff/indexer/database/sql/postgres/config.go
Normal file
81
statediff/indexer/database/sql/postgres/config.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package postgres
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DriverType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
PGX DriverType = "PGX"
|
||||||
|
SQLX DriverType = "SQLX"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultConfig are default parameters for connecting to a Postgres sql
|
||||||
|
var DefaultConfig = Config{
|
||||||
|
Hostname: "localhost",
|
||||||
|
Port: 5432,
|
||||||
|
DatabaseName: "vulcanize_test",
|
||||||
|
Username: "postgres",
|
||||||
|
Password: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config holds params for a Postgres db
|
||||||
|
type Config struct {
|
||||||
|
// conn string params
|
||||||
|
Hostname string
|
||||||
|
Port int
|
||||||
|
DatabaseName string
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
|
||||||
|
// conn settings
|
||||||
|
MaxConns int
|
||||||
|
MaxIdle int
|
||||||
|
MinConns int
|
||||||
|
MaxConnIdleTime time.Duration
|
||||||
|
MaxConnLifetime time.Duration
|
||||||
|
ConnTimeout time.Duration
|
||||||
|
|
||||||
|
// node info params
|
||||||
|
ID string
|
||||||
|
ClientName string
|
||||||
|
|
||||||
|
// driver type
|
||||||
|
Driver DriverType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Config) Type() shared.DBType {
|
||||||
|
return shared.POSTGRES
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Config) DbConnectionString() string {
|
||||||
|
if len(c.Username) > 0 && len(c.Password) > 0 {
|
||||||
|
return fmt.Sprintf("postgresql://%s:%s@%s:%d/%s?sslmode=disable",
|
||||||
|
c.Username, c.Password, c.Hostname, c.Port, c.DatabaseName)
|
||||||
|
}
|
||||||
|
if len(c.Username) > 0 && len(c.Password) == 0 {
|
||||||
|
return fmt.Sprintf("postgresql://%s@%s:%d/%s?sslmode=disable",
|
||||||
|
c.Username, c.Hostname, c.Port, c.DatabaseName)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", c.Hostname, c.Port, c.DatabaseName)
|
||||||
|
}
|
112
statediff/indexer/database/sql/postgres/database.go
Normal file
112
statediff/indexer/database/sql/postgres/database.go
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package postgres
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
|
|
||||||
|
var _ sql.Database = &DB{}
|
||||||
|
|
||||||
|
const (
|
||||||
|
createNodeStm = `INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5)
|
||||||
|
ON CONFLICT (genesis_block, network_id, node_id, chain_id)
|
||||||
|
DO UPDATE
|
||||||
|
SET genesis_block = $1,
|
||||||
|
network_id = $2,
|
||||||
|
node_id = $3,
|
||||||
|
client_name = $4,
|
||||||
|
chain_id = $5
|
||||||
|
RETURNING id`
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewPostgresDB returns a postgres.DB using the provided driver
|
||||||
|
func NewPostgresDB(driver sql.Driver) *DB {
|
||||||
|
return &DB{driver}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DB implements sql.Databse using a configured driver and Postgres statement syntax
|
||||||
|
type DB struct {
|
||||||
|
sql.Driver
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertHeaderStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) InsertHeaderStm() string {
|
||||||
|
return `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
|
||||||
|
ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)
|
||||||
|
RETURNING id`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertUncleStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) InsertUncleStm() string {
|
||||||
|
return `INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6)
|
||||||
|
ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward, mh_key) = ($3, $4, $5, $6)`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertTxStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) InsertTxStm() string {
|
||||||
|
return `INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||||
|
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data, tx_type) = ($3, $4, $5, $6, $7, $8, $9)
|
||||||
|
RETURNING id`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertAccessListElementStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) InsertAccessListElementStm() string {
|
||||||
|
return `INSERT INTO eth.access_list_element (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4)
|
||||||
|
ON CONFLICT (tx_id, index) DO UPDATE SET (address, storage_keys) = ($3, $4)`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertRctStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) InsertRctStm() string {
|
||||||
|
return `INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||||
|
ON CONFLICT (tx_id) DO UPDATE SET (leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) = ($2, $3, $4, $5, $6, $7, $8)
|
||||||
|
RETURNING id`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertLogStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) InsertLogStm() string {
|
||||||
|
return `INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, receipt_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||||
|
ON CONFLICT (receipt_id, index) DO UPDATE SET (leaf_cid, leaf_mh_key, address, topic0, topic1, topic2, topic3, log_data) = ($1, $2, $4, $6, $7, $8, $9, $10)`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertStateStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) InsertStateStm() string {
|
||||||
|
return `INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||||
|
ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)
|
||||||
|
RETURNING id`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertAccountStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) InsertAccountStm() string {
|
||||||
|
return `INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5)
|
||||||
|
ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = ($2, $3, $4, $5)`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertStorageStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) InsertStorageStm() string {
|
||||||
|
return `INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||||
|
ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertIPLDStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) InsertIPLDStm() string {
|
||||||
|
return `INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertIPLDsStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) InsertIPLDsStm() string {
|
||||||
|
return `INSERT INTO public.blocks (key, data) VALUES (unnest($1::TEXT[]), unnest($2::BYTEA[])) ON CONFLICT (key) DO NOTHING`
|
||||||
|
}
|
255
statediff/indexer/database/sql/postgres/pgx.go
Normal file
255
statediff/indexer/database/sql/postgres/pgx.go
Normal file
@ -0,0 +1,255 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package postgres
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/georgysavva/scany/pgxscan"
|
||||||
|
"github.com/jackc/pgconn"
|
||||||
|
"github.com/jackc/pgx/v4"
|
||||||
|
"github.com/jackc/pgx/v4/pgxpool"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PGXDriver driver, implements sql.Driver
|
||||||
|
type PGXDriver struct {
|
||||||
|
ctx context.Context
|
||||||
|
pool *pgxpool.Pool
|
||||||
|
nodeInfo node.Info
|
||||||
|
nodeID int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPGXDriver returns a new pgx driver
|
||||||
|
// it initializes the connection pool and creates the node info table
|
||||||
|
func NewPGXDriver(ctx context.Context, config Config, node node.Info) (*PGXDriver, error) {
|
||||||
|
pgConf, err := MakeConfig(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dbPool, err := pgxpool.ConnectConfig(ctx, pgConf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrDBConnectionFailed(err)
|
||||||
|
}
|
||||||
|
pg := &PGXDriver{ctx: ctx, pool: dbPool, nodeInfo: node}
|
||||||
|
nodeErr := pg.createNode()
|
||||||
|
if nodeErr != nil {
|
||||||
|
return &PGXDriver{}, ErrUnableToSetNode(nodeErr)
|
||||||
|
}
|
||||||
|
return pg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeConfig creates a pgxpool.Config from the provided Config
|
||||||
|
func MakeConfig(config Config) (*pgxpool.Config, error) {
|
||||||
|
conf, err := pgxpool.ParseConfig("")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
//conf.ConnConfig.BuildStatementCache = nil
|
||||||
|
conf.ConnConfig.Config.Host = config.Hostname
|
||||||
|
conf.ConnConfig.Config.Port = uint16(config.Port)
|
||||||
|
conf.ConnConfig.Config.Database = config.DatabaseName
|
||||||
|
conf.ConnConfig.Config.User = config.Username
|
||||||
|
conf.ConnConfig.Config.Password = config.Password
|
||||||
|
|
||||||
|
if config.ConnTimeout != 0 {
|
||||||
|
conf.ConnConfig.Config.ConnectTimeout = config.ConnTimeout
|
||||||
|
}
|
||||||
|
if config.MaxConns != 0 {
|
||||||
|
conf.MaxConns = int32(config.MaxConns)
|
||||||
|
}
|
||||||
|
if config.MinConns != 0 {
|
||||||
|
conf.MinConns = int32(config.MinConns)
|
||||||
|
}
|
||||||
|
if config.MaxConnLifetime != 0 {
|
||||||
|
conf.MaxConnLifetime = config.MaxConnLifetime
|
||||||
|
}
|
||||||
|
if config.MaxConnIdleTime != 0 {
|
||||||
|
conf.MaxConnIdleTime = config.MaxConnIdleTime
|
||||||
|
}
|
||||||
|
return conf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pgx *PGXDriver) createNode() error {
|
||||||
|
var nodeID int64
|
||||||
|
err := pgx.pool.QueryRow(
|
||||||
|
pgx.ctx,
|
||||||
|
createNodeStm,
|
||||||
|
pgx.nodeInfo.GenesisBlock, pgx.nodeInfo.NetworkID,
|
||||||
|
pgx.nodeInfo.ID, pgx.nodeInfo.ClientName,
|
||||||
|
pgx.nodeInfo.ChainID).Scan(&nodeID)
|
||||||
|
if err != nil {
|
||||||
|
return ErrUnableToSetNode(err)
|
||||||
|
}
|
||||||
|
pgx.nodeID = nodeID
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryRow satisfies sql.Database
|
||||||
|
func (pgx *PGXDriver) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow {
|
||||||
|
row := pgx.pool.QueryRow(ctx, sql, args...)
|
||||||
|
return rowWrapper{row: row}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec satisfies sql.Database
|
||||||
|
func (pgx *PGXDriver) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) {
|
||||||
|
res, err := pgx.pool.Exec(ctx, sql, args...)
|
||||||
|
return resultWrapper{ct: res}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select satisfies sql.Database
|
||||||
|
func (pgx *PGXDriver) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
|
||||||
|
return pgxscan.Select(ctx, pgx.pool, dest, query, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get satisfies sql.Database
|
||||||
|
func (pgx *PGXDriver) Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
|
||||||
|
return pgxscan.Get(ctx, pgx.pool, dest, query, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Begin satisfies sql.Database
|
||||||
|
func (pgx *PGXDriver) Begin(ctx context.Context) (sql.Tx, error) {
|
||||||
|
tx, err := pgx.pool.Begin(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return pgxTxWrapper{tx: tx}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pgx *PGXDriver) Stats() sql.Stats {
|
||||||
|
stats := pgx.pool.Stat()
|
||||||
|
return pgxStatsWrapper{stats: stats}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeInfo satisfies sql.Database
|
||||||
|
func (pgx *PGXDriver) NodeInfo() node.Info {
|
||||||
|
return pgx.nodeInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeID satisfies sql.Database
|
||||||
|
func (pgx *PGXDriver) NodeID() int64 {
|
||||||
|
return pgx.nodeID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close satisfies sql.Database/io.Closer
|
||||||
|
func (pgx *PGXDriver) Close() error {
|
||||||
|
pgx.pool.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Context satisfies sql.Database
|
||||||
|
func (pgx *PGXDriver) Context() context.Context {
|
||||||
|
return pgx.ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
type rowWrapper struct {
|
||||||
|
row pgx.Row
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan satisfies sql.ScannableRow
|
||||||
|
func (r rowWrapper) Scan(dest ...interface{}) error {
|
||||||
|
return r.row.Scan(dest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StructScan satisfies sql.ScannableRow
|
||||||
|
func (r rowWrapper) StructScan(dest interface{}) error {
|
||||||
|
return pgxscan.ScanRow(dest, r.row.(pgx.Rows))
|
||||||
|
}
|
||||||
|
|
||||||
|
type resultWrapper struct {
|
||||||
|
ct pgconn.CommandTag
|
||||||
|
}
|
||||||
|
|
||||||
|
// RowsAffected satisfies sql.Result
|
||||||
|
func (r resultWrapper) RowsAffected() (int64, error) {
|
||||||
|
return r.ct.RowsAffected(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type pgxStatsWrapper struct {
|
||||||
|
stats *pgxpool.Stat
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxOpen satisfies sql.Stats
|
||||||
|
func (s pgxStatsWrapper) MaxOpen() int64 {
|
||||||
|
return int64(s.stats.MaxConns())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open satisfies sql.Stats
|
||||||
|
func (s pgxStatsWrapper) Open() int64 {
|
||||||
|
return int64(s.stats.TotalConns())
|
||||||
|
}
|
||||||
|
|
||||||
|
// InUse satisfies sql.Stats
|
||||||
|
func (s pgxStatsWrapper) InUse() int64 {
|
||||||
|
return int64(s.stats.AcquiredConns())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Idle satisfies sql.Stats
|
||||||
|
func (s pgxStatsWrapper) Idle() int64 {
|
||||||
|
return int64(s.stats.IdleConns())
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitCount satisfies sql.Stats
|
||||||
|
func (s pgxStatsWrapper) WaitCount() int64 {
|
||||||
|
return s.stats.EmptyAcquireCount()
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitDuration satisfies sql.Stats
|
||||||
|
func (s pgxStatsWrapper) WaitDuration() time.Duration {
|
||||||
|
return s.stats.AcquireDuration()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxIdleClosed satisfies sql.Stats
|
||||||
|
func (s pgxStatsWrapper) MaxIdleClosed() int64 {
|
||||||
|
// this stat isn't supported by pgxpool, but we don't want to panic
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxLifetimeClosed satisfies sql.Stats
|
||||||
|
func (s pgxStatsWrapper) MaxLifetimeClosed() int64 {
|
||||||
|
return s.stats.CanceledAcquireCount()
|
||||||
|
}
|
||||||
|
|
||||||
|
type pgxTxWrapper struct {
|
||||||
|
tx pgx.Tx
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryRow satisfies sql.Tx
|
||||||
|
func (t pgxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow {
|
||||||
|
row := t.tx.QueryRow(ctx, sql, args...)
|
||||||
|
return rowWrapper{row: row}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec satisfies sql.Tx
|
||||||
|
func (t pgxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) {
|
||||||
|
res, err := t.tx.Exec(ctx, sql, args...)
|
||||||
|
return resultWrapper{ct: res}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit satisfies sql.Tx
|
||||||
|
func (t pgxTxWrapper) Commit(ctx context.Context) error {
|
||||||
|
return t.tx.Commit(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rollback satisfies sql.Tx
|
||||||
|
func (t pgxTxWrapper) Rollback(ctx context.Context) error {
|
||||||
|
return t.tx.Rollback(ctx)
|
||||||
|
}
|
122
statediff/indexer/database/sql/postgres/pgx_test.go
Normal file
122
statediff/indexer/database/sql/postgres/pgx_test.go
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package postgres_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/jackc/pgx/pgtype"
|
||||||
|
"github.com/jackc/pgx/v4/pgxpool"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
pgConfig, _ = postgres.MakeConfig(postgres.DefaultConfig)
|
||||||
|
ctx = context.Background()
|
||||||
|
)
|
||||||
|
|
||||||
|
func expectContainsSubstring(t *testing.T, full string, sub string) {
|
||||||
|
if !strings.Contains(full, sub) {
|
||||||
|
t.Fatalf("Expected \"%v\" to contain substring \"%v\"\n", full, sub)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPostgresPGX(t *testing.T) {
|
||||||
|
t.Run("connects to the sql", func(t *testing.T) {
|
||||||
|
dbPool, err := pgxpool.ConnectConfig(context.Background(), pgConfig)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to connect to db with connection string: %s err: %v", pgConfig.ConnString(), err)
|
||||||
|
}
|
||||||
|
defer dbPool.Close()
|
||||||
|
if dbPool == nil {
|
||||||
|
t.Fatal("DB pool is nil")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("serializes big.Int to db", func(t *testing.T) {
|
||||||
|
// postgres driver doesn't support go big.Int type
|
||||||
|
// various casts in golang uint64, int64, overflow for
|
||||||
|
// transaction value (in wei) even though
|
||||||
|
// postgres numeric can handle an arbitrary
|
||||||
|
// sized int, so use string representation of big.Int
|
||||||
|
// and cast on insert
|
||||||
|
|
||||||
|
dbPool, err := pgxpool.ConnectConfig(context.Background(), pgConfig)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to connect to db with connection string: %s err: %v", pgConfig.ConnString(), err)
|
||||||
|
}
|
||||||
|
defer dbPool.Close()
|
||||||
|
|
||||||
|
bi := new(big.Int)
|
||||||
|
bi.SetString("34940183920000000000", 10)
|
||||||
|
test_helpers.ExpectEqual(t, bi.String(), "34940183920000000000")
|
||||||
|
|
||||||
|
defer dbPool.Exec(ctx, `DROP TABLE IF EXISTS example`)
|
||||||
|
_, err = dbPool.Exec(ctx, "CREATE TABLE example ( id INTEGER, data NUMERIC )")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlStatement := `
|
||||||
|
INSERT INTO example (id, data)
|
||||||
|
VALUES (1, cast($1 AS NUMERIC))`
|
||||||
|
_, err = dbPool.Exec(ctx, sqlStatement, bi.String())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var data pgtype.Numeric
|
||||||
|
err = dbPool.QueryRow(ctx, `SELECT data FROM example WHERE id = 1`).Scan(&data)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
test_helpers.ExpectEqual(t, bi.String(), data)
|
||||||
|
actual := new(big.Int)
|
||||||
|
actual.Set(data.Int)
|
||||||
|
test_helpers.ExpectEqual(t, actual, bi)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("throws error when can't connect to the database", func(t *testing.T) {
|
||||||
|
goodInfo := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"}
|
||||||
|
_, err := postgres.NewPGXDriver(ctx, postgres.Config{}, goodInfo)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected an error")
|
||||||
|
}
|
||||||
|
|
||||||
|
expectContainsSubstring(t, err.Error(), postgres.DbConnectionFailedMsg)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("throws error when can't create node", func(t *testing.T) {
|
||||||
|
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
||||||
|
badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
|
||||||
|
|
||||||
|
_, err := postgres.NewPGXDriver(ctx, postgres.DefaultConfig, badInfo)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected an error")
|
||||||
|
}
|
||||||
|
|
||||||
|
expectContainsSubstring(t, err.Error(), postgres.SettingNodeFailedMsg)
|
||||||
|
})
|
||||||
|
}
|
197
statediff/indexer/database/sql/postgres/sqlx.go
Normal file
197
statediff/indexer/database/sql/postgres/sqlx.go
Normal file
@ -0,0 +1,197 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package postgres
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
coresql "database/sql"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SQLXDriver driver, implements sql.Driver
|
||||||
|
type SQLXDriver struct {
|
||||||
|
ctx context.Context
|
||||||
|
db *sqlx.DB
|
||||||
|
nodeInfo node.Info
|
||||||
|
nodeID int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSQLXDriver returns a new sqlx driver for Postgres
|
||||||
|
// it initializes the connection pool and creates the node info table
|
||||||
|
func NewSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDriver, error) {
|
||||||
|
db, err := sqlx.ConnectContext(ctx, "postgres", config.DbConnectionString())
|
||||||
|
if err != nil {
|
||||||
|
return &SQLXDriver{}, ErrDBConnectionFailed(err)
|
||||||
|
}
|
||||||
|
if config.MaxConns > 0 {
|
||||||
|
db.SetMaxOpenConns(config.MaxConns)
|
||||||
|
}
|
||||||
|
if config.MaxIdle > 0 {
|
||||||
|
db.SetMaxIdleConns(config.MaxIdle)
|
||||||
|
}
|
||||||
|
if config.MaxConnLifetime > 0 {
|
||||||
|
lifetime := config.MaxConnLifetime
|
||||||
|
db.SetConnMaxLifetime(lifetime)
|
||||||
|
}
|
||||||
|
driver := &SQLXDriver{ctx: ctx, db: db, nodeInfo: node}
|
||||||
|
if err := driver.createNode(); err != nil {
|
||||||
|
return &SQLXDriver{}, ErrUnableToSetNode(err)
|
||||||
|
}
|
||||||
|
return driver, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (driver *SQLXDriver) createNode() error {
|
||||||
|
var nodeID int64
|
||||||
|
err := driver.db.QueryRowx(
|
||||||
|
createNodeStm,
|
||||||
|
driver.nodeInfo.GenesisBlock, driver.nodeInfo.NetworkID,
|
||||||
|
driver.nodeInfo.ID, driver.nodeInfo.ClientName,
|
||||||
|
driver.nodeInfo.ChainID).Scan(&nodeID)
|
||||||
|
if err != nil {
|
||||||
|
return ErrUnableToSetNode(err)
|
||||||
|
}
|
||||||
|
driver.nodeID = nodeID
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryRow satisfies sql.Database
|
||||||
|
func (driver *SQLXDriver) QueryRow(_ context.Context, sql string, args ...interface{}) sql.ScannableRow {
|
||||||
|
return driver.db.QueryRowx(sql, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec satisfies sql.Database
|
||||||
|
func (driver *SQLXDriver) Exec(_ context.Context, sql string, args ...interface{}) (sql.Result, error) {
|
||||||
|
return driver.db.Exec(sql, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select satisfies sql.Database
|
||||||
|
func (driver *SQLXDriver) Select(_ context.Context, dest interface{}, query string, args ...interface{}) error {
|
||||||
|
return driver.db.Select(dest, query, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get satisfies sql.Database
|
||||||
|
func (driver *SQLXDriver) Get(_ context.Context, dest interface{}, query string, args ...interface{}) error {
|
||||||
|
return driver.db.Get(dest, query, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Begin satisfies sql.Database
|
||||||
|
func (driver *SQLXDriver) Begin(_ context.Context) (sql.Tx, error) {
|
||||||
|
tx, err := driver.db.Beginx()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return sqlxTxWrapper{tx: tx}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (driver *SQLXDriver) Stats() sql.Stats {
|
||||||
|
stats := driver.db.Stats()
|
||||||
|
return sqlxStatsWrapper{stats: stats}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeInfo satisfies sql.Database
|
||||||
|
func (driver *SQLXDriver) NodeInfo() node.Info {
|
||||||
|
return driver.nodeInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeID satisfies sql.Database
|
||||||
|
func (driver *SQLXDriver) NodeID() int64 {
|
||||||
|
return driver.nodeID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close satisfies sql.Database/io.Closer
|
||||||
|
func (driver *SQLXDriver) Close() error {
|
||||||
|
return driver.db.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Context satisfies sql.Database
|
||||||
|
func (driver *SQLXDriver) Context() context.Context {
|
||||||
|
return driver.ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
type sqlxStatsWrapper struct {
|
||||||
|
stats coresql.DBStats
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxOpen satisfies sql.Stats
|
||||||
|
func (s sqlxStatsWrapper) MaxOpen() int64 {
|
||||||
|
return int64(s.stats.MaxOpenConnections)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open satisfies sql.Stats
|
||||||
|
func (s sqlxStatsWrapper) Open() int64 {
|
||||||
|
return int64(s.stats.OpenConnections)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InUse satisfies sql.Stats
|
||||||
|
func (s sqlxStatsWrapper) InUse() int64 {
|
||||||
|
return int64(s.stats.InUse)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Idle satisfies sql.Stats
|
||||||
|
func (s sqlxStatsWrapper) Idle() int64 {
|
||||||
|
return int64(s.stats.Idle)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitCount satisfies sql.Stats
|
||||||
|
func (s sqlxStatsWrapper) WaitCount() int64 {
|
||||||
|
return s.stats.WaitCount
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitDuration satisfies sql.Stats
|
||||||
|
func (s sqlxStatsWrapper) WaitDuration() time.Duration {
|
||||||
|
return s.stats.WaitDuration
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxIdleClosed satisfies sql.Stats
|
||||||
|
func (s sqlxStatsWrapper) MaxIdleClosed() int64 {
|
||||||
|
return s.stats.MaxIdleClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxLifetimeClosed satisfies sql.Stats
|
||||||
|
func (s sqlxStatsWrapper) MaxLifetimeClosed() int64 {
|
||||||
|
return s.stats.MaxLifetimeClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
type sqlxTxWrapper struct {
|
||||||
|
tx *sqlx.Tx
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryRow satisfies sql.Tx
|
||||||
|
func (t sqlxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow {
|
||||||
|
row := t.tx.QueryRow(sql, args...)
|
||||||
|
return rowWrapper{row: row}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec satisfies sql.Tx
|
||||||
|
func (t sqlxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) {
|
||||||
|
return t.tx.Exec(sql, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit satisfies sql.Tx
|
||||||
|
func (t sqlxTxWrapper) Commit(ctx context.Context) error {
|
||||||
|
return t.tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rollback satisfies sql.Tx
|
||||||
|
func (t sqlxTxWrapper) Rollback(ctx context.Context) error {
|
||||||
|
return t.tx.Rollback()
|
||||||
|
}
|
@ -25,33 +25,19 @@ import (
|
|||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
_ "github.com/lib/pq"
|
_ "github.com/lib/pq"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var DBParams = postgres.ConnectionParams{
|
func TestPostgresSQLX(t *testing.T) {
|
||||||
Name: "vulcanize_public",
|
|
||||||
Password: "password",
|
|
||||||
Port: 5432,
|
|
||||||
Hostname: "localhost",
|
|
||||||
User: "vdbm",
|
|
||||||
}
|
|
||||||
|
|
||||||
func expectContainsSubstring(t *testing.T, full string, sub string) {
|
|
||||||
if !strings.Contains(full, sub) {
|
|
||||||
t.Fatalf("Expected \"%v\" to contain substring \"%v\"\n", full, sub)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPostgresDB(t *testing.T) {
|
|
||||||
var sqlxdb *sqlx.DB
|
var sqlxdb *sqlx.DB
|
||||||
|
|
||||||
t.Run("connects to the database", func(t *testing.T) {
|
t.Run("connects to the database", func(t *testing.T) {
|
||||||
var err error
|
var err error
|
||||||
pgConfig := postgres.DbConnectionString(DBParams)
|
connStr := postgres.DefaultConfig.DbConnectionString()
|
||||||
|
|
||||||
sqlxdb, err = sqlx.Connect("postgres", pgConfig)
|
sqlxdb, err = sqlx.Connect("postgres", connStr)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to connect to db with connection string: %s err: %v", pgConfig, err)
|
t.Fatalf("failed to connect to db with connection string: %s err: %v", pgConfig, err)
|
||||||
@ -69,8 +55,8 @@ func TestPostgresDB(t *testing.T) {
|
|||||||
// sized int, so use string representation of big.Int
|
// sized int, so use string representation of big.Int
|
||||||
// and cast on insert
|
// and cast on insert
|
||||||
|
|
||||||
pgConnectString := postgres.DbConnectionString(DBParams)
|
connStr := postgres.DefaultConfig.DbConnectionString()
|
||||||
db, err := sqlx.Connect("postgres", pgConnectString)
|
db, err := sqlx.Connect("postgres", connStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -80,7 +66,7 @@ func TestPostgresDB(t *testing.T) {
|
|||||||
|
|
||||||
bi := new(big.Int)
|
bi := new(big.Int)
|
||||||
bi.SetString("34940183920000000000", 10)
|
bi.SetString("34940183920000000000", 10)
|
||||||
shared.ExpectEqual(t, bi.String(), "34940183920000000000")
|
test_helpers.ExpectEqual(t, bi.String(), "34940183920000000000")
|
||||||
|
|
||||||
defer db.Exec(`DROP TABLE IF EXISTS example`)
|
defer db.Exec(`DROP TABLE IF EXISTS example`)
|
||||||
_, err = db.Exec("CREATE TABLE example ( id INTEGER, data NUMERIC )")
|
_, err = db.Exec("CREATE TABLE example ( id INTEGER, data NUMERIC )")
|
||||||
@ -102,19 +88,15 @@ func TestPostgresDB(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
shared.ExpectEqual(t, bi.String(), data)
|
test_helpers.ExpectEqual(t, bi.String(), data)
|
||||||
actual := new(big.Int)
|
actual := new(big.Int)
|
||||||
actual.SetString(data, 10)
|
actual.SetString(data, 10)
|
||||||
shared.ExpectEqual(t, actual, bi)
|
test_helpers.ExpectEqual(t, actual, bi)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("throws error when can't connect to the database", func(t *testing.T) {
|
t.Run("throws error when can't connect to the database", func(t *testing.T) {
|
||||||
invalidDatabase := postgres.ConnectionParams{}
|
goodInfo := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"}
|
||||||
node := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"}
|
_, err := postgres.NewSQLXDriver(ctx, postgres.Config{}, goodInfo)
|
||||||
|
|
||||||
_, err := postgres.NewDB(postgres.DbConnectionString(invalidDatabase),
|
|
||||||
postgres.ConnectionConfig{}, node)
|
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Expected an error")
|
t.Fatal("Expected an error")
|
||||||
}
|
}
|
||||||
@ -124,13 +106,13 @@ func TestPostgresDB(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("throws error when can't create node", func(t *testing.T) {
|
t.Run("throws error when can't create node", func(t *testing.T) {
|
||||||
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
||||||
node := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
|
badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
|
||||||
|
|
||||||
_, err := postgres.NewDB(postgres.DbConnectionString(DBParams), postgres.ConnectionConfig{}, node)
|
|
||||||
|
|
||||||
|
_, err := postgres.NewSQLXDriver(ctx, postgres.DefaultConfig, badInfo)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Expected an error")
|
t.Fatal("Expected an error")
|
||||||
}
|
}
|
||||||
|
|
||||||
expectContainsSubstring(t, err.Error(), postgres.SettingNodeFailedMsg)
|
expectContainsSubstring(t, err.Error(), postgres.SettingNodeFailedMsg)
|
||||||
})
|
})
|
||||||
}
|
}
|
42
statediff/indexer/database/sql/postgres/test_helpers.go
Normal file
42
statediff/indexer/database/sql/postgres/test_helpers.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package postgres
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetupSQLXDB is used to setup a sqlx db for tests
|
||||||
|
func SetupSQLXDB() (sql.Database, error) {
|
||||||
|
driver, err := NewSQLXDriver(context.Background(), DefaultConfig, node.Info{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewPostgresDB(driver), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetupPGXDB is used to setup a pgx db for tests
|
||||||
|
func SetupPGXDB() (sql.Database, error) {
|
||||||
|
driver, err := NewPGXDriver(context.Background(), DefaultConfig, node.Info{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewPostgresDB(driver), nil
|
||||||
|
}
|
@ -14,46 +14,46 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package indexer
|
package sql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TearDownDB is used to tear down the watcher dbs after tests
|
// TearDownDB is used to tear down the watcher dbs after tests
|
||||||
func TearDownDB(t *testing.T, db *postgres.DB) {
|
func TearDownDB(t *testing.T, db Database) {
|
||||||
tx, err := db.Beginx()
|
ctx := context.Background()
|
||||||
|
tx, err := db.Begin(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = tx.Exec(`DELETE FROM eth.header_cids`)
|
_, err = tx.Exec(ctx, `DELETE FROM eth.header_cids`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
_, err = tx.Exec(`DELETE FROM eth.transaction_cids`)
|
_, err = tx.Exec(ctx, `DELETE FROM eth.transaction_cids`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
_, err = tx.Exec(`DELETE FROM eth.receipt_cids`)
|
_, err = tx.Exec(ctx, `DELETE FROM eth.receipt_cids`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
_, err = tx.Exec(`DELETE FROM eth.state_cids`)
|
_, err = tx.Exec(ctx, `DELETE FROM eth.state_cids`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
_, err = tx.Exec(`DELETE FROM eth.storage_cids`)
|
_, err = tx.Exec(ctx, `DELETE FROM eth.storage_cids`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
_, err = tx.Exec(`DELETE FROM blocks`)
|
_, err = tx.Exec(ctx, `DELETE FROM blocks`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
err = tx.Commit()
|
err = tx.Commit(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
141
statediff/indexer/database/sql/writer.go
Normal file
141
statediff/indexer/database/sql/writer.go
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2019 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package sql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Writer handles processing and writing of indexed IPLD objects to Postgres
|
||||||
|
type Writer struct {
|
||||||
|
db Database
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriter creates a new pointer to a Writer
|
||||||
|
func NewWriter(db Database) *Writer {
|
||||||
|
return &Writer{
|
||||||
|
db: db,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *Writer) upsertHeaderCID(tx Tx, header models.HeaderModel) (int64, error) {
|
||||||
|
var headerID int64
|
||||||
|
err := tx.QueryRow(in.db.Context(), in.db.InsertHeaderStm(),
|
||||||
|
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, in.db.NodeID(), header.Reward, header.StateRoot, header.TxRoot,
|
||||||
|
header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1, header.BaseFee).Scan(&headerID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("error upserting header_cids entry: %v", err)
|
||||||
|
}
|
||||||
|
indexerMetrics.blocks.Inc(1)
|
||||||
|
return headerID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *Writer) upsertUncleCID(tx Tx, uncle models.UncleModel, headerID int64) error {
|
||||||
|
_, err := tx.Exec(in.db.Context(), in.db.InsertUncleStm(),
|
||||||
|
uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error upserting uncle_cids entry: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *Writer) upsertTransactionCID(tx Tx, transaction models.TxModel, headerID int64) (int64, error) {
|
||||||
|
var txID int64
|
||||||
|
err := tx.QueryRow(in.db.Context(), in.db.InsertTxStm(),
|
||||||
|
headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type).Scan(&txID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("error upserting transaction_cids entry: %v", err)
|
||||||
|
}
|
||||||
|
indexerMetrics.transactions.Inc(1)
|
||||||
|
return txID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *Writer) upsertAccessListElement(tx Tx, accessListElement models.AccessListElementModel, txID int64) error {
|
||||||
|
_, err := tx.Exec(in.db.Context(), in.db.InsertAccessListElementStm(),
|
||||||
|
txID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error upserting access_list_element entry: %v", err)
|
||||||
|
}
|
||||||
|
indexerMetrics.accessListEntries.Inc(1)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *Writer) upsertReceiptCID(tx Tx, rct *models.ReceiptModel, txID int64) (int64, error) {
|
||||||
|
var receiptID int64
|
||||||
|
err := tx.QueryRow(in.db.Context(), in.db.InsertRctStm(),
|
||||||
|
txID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot).Scan(&receiptID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("error upserting receipt_cids entry: %w", err)
|
||||||
|
}
|
||||||
|
indexerMetrics.receipts.Inc(1)
|
||||||
|
return receiptID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *Writer) upsertLogCID(tx Tx, logs []*models.LogsModel, receiptID int64) error {
|
||||||
|
for _, log := range logs {
|
||||||
|
_, err := tx.Exec(in.db.Context(), in.db.InsertLogStm(),
|
||||||
|
log.LeafCID, log.LeafMhKey, receiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2, log.Topic3, log.Data)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error upserting logs entry: %w", err)
|
||||||
|
}
|
||||||
|
indexerMetrics.logs.Inc(1)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *Writer) upsertStateCID(tx Tx, stateNode models.StateNodeModel, headerID int64) (int64, error) {
|
||||||
|
var stateID int64
|
||||||
|
var stateKey string
|
||||||
|
if stateNode.StateKey != nullHash.String() {
|
||||||
|
stateKey = stateNode.StateKey
|
||||||
|
}
|
||||||
|
err := tx.QueryRow(in.db.Context(), in.db.InsertStateStm(),
|
||||||
|
headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("error upserting state_cids entry: %v", err)
|
||||||
|
}
|
||||||
|
return stateID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *Writer) upsertStateAccount(tx Tx, stateAccount models.StateAccountModel, stateID int64) error {
|
||||||
|
_, err := tx.Exec(in.db.Context(), in.db.InsertAccountStm(),
|
||||||
|
stateID, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error upserting state_accounts entry: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *Writer) upsertStorageCID(tx Tx, storageCID models.StorageNodeModel, stateID int64) error {
|
||||||
|
var storageKey string
|
||||||
|
if storageCID.StorageKey != nullHash.String() {
|
||||||
|
storageKey = storageCID.StorageKey
|
||||||
|
}
|
||||||
|
_, err := tx.Exec(in.db.Context(), in.db.InsertStorageStm(),
|
||||||
|
stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error upserting storage_cids entry: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@ -1,45 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2019 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package indexer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ResolveFromNodeType wrapper around NodeType.Int() so that we maintain backwards compatibility
|
|
||||||
func ResolveFromNodeType(nodeType types.NodeType) int {
|
|
||||||
return nodeType.Int()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChainConfig returns the appropriate ethereum chain config for the provided chain id
|
|
||||||
func ChainConfig(chainID uint64) (*params.ChainConfig, error) {
|
|
||||||
switch chainID {
|
|
||||||
case 1:
|
|
||||||
return params.MainnetChainConfig, nil
|
|
||||||
case 3:
|
|
||||||
return params.RopstenChainConfig, nil
|
|
||||||
case 4:
|
|
||||||
return params.RinkebyChainConfig, nil
|
|
||||||
case 5:
|
|
||||||
return params.GoerliChainConfig, nil
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("chain config for chainid %d not available", chainID)
|
|
||||||
}
|
|
||||||
}
|
|
46
statediff/indexer/interfaces/interfaces.go
Normal file
46
statediff/indexer/interfaces/interfaces.go
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package interfaces
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StateDiffIndexer interface required to index statediff data
|
||||||
|
type StateDiffIndexer interface {
|
||||||
|
PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (Batch, error)
|
||||||
|
PushStateNode(tx Batch, stateNode sdtypes.StateNode) error
|
||||||
|
PushCodeAndCodeHash(tx Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error
|
||||||
|
ReportDBMetrics(delay time.Duration, quit <-chan bool)
|
||||||
|
io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batch required for indexing data atomically
|
||||||
|
type Batch interface {
|
||||||
|
Submit(err error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config used to configure different underlying implementations
|
||||||
|
type Config interface {
|
||||||
|
Type() shared.DBType
|
||||||
|
}
|
@ -89,7 +89,7 @@ func newLogTrie() *logTrie {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getNodes invokes the localTrie, which computes the root hash of the
|
// getNodes invokes the localTrie, which computes the root hash of the
|
||||||
// log trie and returns its database keys, to return a slice
|
// log trie and returns its sql keys, to return a slice
|
||||||
// of EthLogTrie nodes.
|
// of EthLogTrie nodes.
|
||||||
func (rt *logTrie) getNodes() ([]*EthLogTrie, error) {
|
func (rt *logTrie) getNodes() ([]*EthLogTrie, error) {
|
||||||
keys, err := rt.getKeys()
|
keys, err := rt.getKeys()
|
@ -121,7 +121,7 @@ func NewRctTrie() *rctTrie {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetNodes invokes the localTrie, which computes the root hash of the
|
// GetNodes invokes the localTrie, which computes the root hash of the
|
||||||
// transaction trie and returns its database keys, to return a slice
|
// transaction trie and returns its sql keys, to return a slice
|
||||||
// of EthRctTrie nodes.
|
// of EthRctTrie nodes.
|
||||||
func (rt *rctTrie) GetNodes() ([]*EthRctTrie, error) {
|
func (rt *rctTrie) GetNodes() ([]*EthRctTrie, error) {
|
||||||
keys, err := rt.getKeys()
|
keys, err := rt.getKeys()
|
@ -121,7 +121,7 @@ func newTxTrie() *txTrie {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getNodes invokes the localTrie, which computes the root hash of the
|
// getNodes invokes the localTrie, which computes the root hash of the
|
||||||
// transaction trie and returns its database keys, to return a slice
|
// transaction trie and returns its sql keys, to return a slice
|
||||||
// of EthTxTrie nodes.
|
// of EthTxTrie nodes.
|
||||||
func (tt *txTrie) getNodes() ([]*EthTxTrie, error) {
|
func (tt *txTrie) getNodes() ([]*EthTxTrie, error) {
|
||||||
keys, err := tt.getKeys()
|
keys, err := tt.getKeys()
|
@ -27,7 +27,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
sdtrie "github.com/ethereum/go-ethereum/statediff/trie"
|
sdtrie "github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
@ -143,7 +143,7 @@ func (lt *localTrie) commit() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getKeys returns the stored keys of the memory database
|
// getKeys returns the stored keys of the memory sql
|
||||||
// of the localTrie for further processing.
|
// of the localTrie for further processing.
|
||||||
func (lt *localTrie) getKeys() ([][]byte, error) {
|
func (lt *localTrie) getKeys() ([][]byte, error) {
|
||||||
if err := lt.commit(); err != nil {
|
if err := lt.commit(); err != nil {
|
||||||
@ -167,7 +167,7 @@ type nodeKey struct {
|
|||||||
TrieKey []byte
|
TrieKey []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// getLeafKeys returns the stored leaf keys from the memory database
|
// getLeafKeys returns the stored leaf keys from the memory sql
|
||||||
// of the localTrie for further processing.
|
// of the localTrie for further processing.
|
||||||
func (lt *localTrie) getLeafKeys() ([]*nodeKey, error) {
|
func (lt *localTrie) getLeafKeys() ([]*nodeKey, error) {
|
||||||
if err := lt.commit(); err != nil {
|
if err := lt.commit(); err != nil {
|
@ -32,7 +32,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/statediff/testhelpers"
|
"github.com/ethereum/go-ethereum/statediff/test_helpers"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -111,7 +111,7 @@ var (
|
|||||||
nonce1 = uint64(1)
|
nonce1 = uint64(1)
|
||||||
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
|
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
|
||||||
ContractCodeHash = common.HexToHash("0x753f98a8d4328b15636e46f66f2cb4bc860100aa17967cc145fcd17d1d4710ea")
|
ContractCodeHash = common.HexToHash("0x753f98a8d4328b15636e46f66f2cb4bc860100aa17967cc145fcd17d1d4710ea")
|
||||||
ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress)
|
ContractLeafKey = test_helpers.AddressToLeafKey(ContractAddress)
|
||||||
ContractAccount, _ = rlp.EncodeToBytes(types.StateAccount{
|
ContractAccount, _ = rlp.EncodeToBytes(types.StateAccount{
|
||||||
Nonce: nonce1,
|
Nonce: nonce1,
|
||||||
Balance: big.NewInt(0),
|
Balance: big.NewInt(0),
|
||||||
@ -127,8 +127,8 @@ var (
|
|||||||
nonce0 = uint64(0)
|
nonce0 = uint64(0)
|
||||||
AccountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
|
AccountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
|
||||||
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||||
AccountLeafKey = testhelpers.Account2LeafKey
|
AccountLeafKey = test_helpers.Account2LeafKey
|
||||||
RemovedLeafKey = testhelpers.Account1LeafKey
|
RemovedLeafKey = test_helpers.Account1LeafKey
|
||||||
Account, _ = rlp.EncodeToBytes(types.StateAccount{
|
Account, _ = rlp.EncodeToBytes(types.StateAccount{
|
||||||
Nonce: nonce0,
|
Nonce: nonce0,
|
||||||
Balance: big.NewInt(1000),
|
Balance: big.NewInt(1000),
|
||||||
|
@ -1,3 +1,19 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package models
|
package models
|
||||||
|
|
||||||
import "github.com/lib/pq"
|
import "github.com/lib/pq"
|
||||||
|
@ -20,8 +20,8 @@ import "github.com/lib/pq"
|
|||||||
|
|
||||||
// IPLDModel is the db model for public.blocks
|
// IPLDModel is the db model for public.blocks
|
||||||
type IPLDModel struct {
|
type IPLDModel struct {
|
||||||
Key string
|
Key string `db:"key"`
|
||||||
Data []byte
|
Data []byte `db:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeaderModel is the db model for eth.header_cids
|
// HeaderModel is the db model for eth.header_cids
|
||||||
|
@ -1,59 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2019 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package postgres
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Env variables
|
|
||||||
const (
|
|
||||||
DATABASE_NAME = "DATABASE_NAME"
|
|
||||||
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
|
|
||||||
DATABASE_PORT = "DATABASE_PORT"
|
|
||||||
DATABASE_USER = "DATABASE_USER"
|
|
||||||
DATABASE_PASSWORD = "DATABASE_PASSWORD"
|
|
||||||
DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
|
|
||||||
DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
|
|
||||||
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ConnectionParams struct {
|
|
||||||
Hostname string
|
|
||||||
Name string
|
|
||||||
User string
|
|
||||||
Password string
|
|
||||||
Port int
|
|
||||||
}
|
|
||||||
|
|
||||||
type ConnectionConfig struct {
|
|
||||||
MaxIdle int
|
|
||||||
MaxOpen int
|
|
||||||
MaxLifetime int
|
|
||||||
}
|
|
||||||
|
|
||||||
func DbConnectionString(params ConnectionParams) string {
|
|
||||||
if len(params.User) > 0 && len(params.Password) > 0 {
|
|
||||||
return fmt.Sprintf("postgresql://%s:%s@%s:%d/%s?sslmode=disable",
|
|
||||||
params.User, params.Password, params.Hostname, params.Port, params.Name)
|
|
||||||
}
|
|
||||||
if len(params.User) > 0 && len(params.Password) == 0 {
|
|
||||||
return fmt.Sprintf("postgresql://%s@%s:%d/%s?sslmode=disable",
|
|
||||||
params.User, params.Hostname, params.Port, params.Name)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", params.Hostname, params.Port, params.Name)
|
|
||||||
}
|
|
@ -1,76 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2019 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package postgres
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/jmoiron/sqlx"
|
|
||||||
_ "github.com/lib/pq" //postgres driver
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
|
||||||
)
|
|
||||||
|
|
||||||
type DB struct {
|
|
||||||
*sqlx.DB
|
|
||||||
Node node.Info
|
|
||||||
NodeID int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDB(connectString string, config ConnectionConfig, node node.Info) (*DB, error) {
|
|
||||||
db, connectErr := sqlx.Connect("postgres", connectString)
|
|
||||||
if connectErr != nil {
|
|
||||||
return &DB{}, ErrDBConnectionFailed(connectErr)
|
|
||||||
}
|
|
||||||
if config.MaxOpen > 0 {
|
|
||||||
db.SetMaxOpenConns(config.MaxOpen)
|
|
||||||
}
|
|
||||||
if config.MaxIdle > 0 {
|
|
||||||
db.SetMaxIdleConns(config.MaxIdle)
|
|
||||||
}
|
|
||||||
if config.MaxLifetime > 0 {
|
|
||||||
lifetime := time.Duration(config.MaxLifetime) * time.Second
|
|
||||||
db.SetConnMaxLifetime(lifetime)
|
|
||||||
}
|
|
||||||
pg := DB{DB: db, Node: node}
|
|
||||||
nodeErr := pg.CreateNode(&node)
|
|
||||||
if nodeErr != nil {
|
|
||||||
return &DB{}, ErrUnableToSetNode(nodeErr)
|
|
||||||
}
|
|
||||||
return &pg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *DB) CreateNode(node *node.Info) error {
|
|
||||||
var nodeID int64
|
|
||||||
err := db.QueryRow(
|
|
||||||
`INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id)
|
|
||||||
VALUES ($1, $2, $3, $4, $5)
|
|
||||||
ON CONFLICT (genesis_block, network_id, node_id, chain_id)
|
|
||||||
DO UPDATE
|
|
||||||
SET genesis_block = $1,
|
|
||||||
network_id = $2,
|
|
||||||
node_id = $3,
|
|
||||||
client_name = $4,
|
|
||||||
chain_id = $5
|
|
||||||
RETURNING id`,
|
|
||||||
node.GenesisBlock, node.NetworkID, node.ID, node.ClientName, node.ChainID).Scan(&nodeID)
|
|
||||||
if err != nil {
|
|
||||||
return ErrUnableToSetNode(err)
|
|
||||||
}
|
|
||||||
db.NodeID = nodeID
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,78 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2019 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package shared
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ChainType enum for specifying blockchain
|
|
||||||
type ChainType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
UnknownChain ChainType = iota
|
|
||||||
Ethereum
|
|
||||||
Bitcoin
|
|
||||||
Omni
|
|
||||||
EthereumClassic
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c ChainType) String() string {
|
|
||||||
switch c {
|
|
||||||
case Ethereum:
|
|
||||||
return "Ethereum"
|
|
||||||
case Bitcoin:
|
|
||||||
return "Bitcoin"
|
|
||||||
case Omni:
|
|
||||||
return "Omni"
|
|
||||||
case EthereumClassic:
|
|
||||||
return "EthereumClassic"
|
|
||||||
default:
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c ChainType) API() string {
|
|
||||||
switch c {
|
|
||||||
case Ethereum:
|
|
||||||
return "eth"
|
|
||||||
case Bitcoin:
|
|
||||||
return "btc"
|
|
||||||
case Omni:
|
|
||||||
return "omni"
|
|
||||||
case EthereumClassic:
|
|
||||||
return "etc"
|
|
||||||
default:
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewChainType(name string) (ChainType, error) {
|
|
||||||
switch strings.ToLower(name) {
|
|
||||||
case "ethereum", "eth":
|
|
||||||
return Ethereum, nil
|
|
||||||
case "bitcoin", "btc", "xbt":
|
|
||||||
return Bitcoin, nil
|
|
||||||
case "omni":
|
|
||||||
return Omni, nil
|
|
||||||
case "classic", "etc":
|
|
||||||
return EthereumClassic, nil
|
|
||||||
default:
|
|
||||||
return UnknownChain, errors.New("invalid name for chain")
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,5 +1,5 @@
|
|||||||
// VulcanizeDB
|
// VulcanizeDB
|
||||||
// Copyright © 2019 Vulcanize
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
@ -17,6 +17,7 @@
|
|||||||
package shared
|
package shared
|
||||||
|
|
||||||
const (
|
const (
|
||||||
DefaultMaxBatchSize uint64 = 100
|
RemovedNodeStorageCID = "bagmacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya"
|
||||||
DefaultMaxBatchNumber int64 = 50
|
RemovedNodeStateCID = "baglacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya"
|
||||||
|
RemovedNodeMhKey = "/blocks/DMQMLUSGAGDPOIZ4SJ7H3MW4Y4B4BZIAWZJ4VARHHN57VWAELWC2I4A"
|
||||||
)
|
)
|
||||||
|
@ -1,102 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2019 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package shared
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DataType is an enum to loosely represent type of chain data
|
|
||||||
type DataType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
UnknownDataType DataType = iota - 1
|
|
||||||
Full
|
|
||||||
Headers
|
|
||||||
Uncles
|
|
||||||
Transactions
|
|
||||||
Receipts
|
|
||||||
State
|
|
||||||
Storage
|
|
||||||
)
|
|
||||||
|
|
||||||
// String() method to resolve ReSyncType enum
|
|
||||||
func (r DataType) String() string {
|
|
||||||
switch r {
|
|
||||||
case Full:
|
|
||||||
return "full"
|
|
||||||
case Headers:
|
|
||||||
return "headers"
|
|
||||||
case Uncles:
|
|
||||||
return "uncles"
|
|
||||||
case Transactions:
|
|
||||||
return "transactions"
|
|
||||||
case Receipts:
|
|
||||||
return "receipts"
|
|
||||||
case State:
|
|
||||||
return "state"
|
|
||||||
case Storage:
|
|
||||||
return "storage"
|
|
||||||
default:
|
|
||||||
return "unknown"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateDataTypeFromString returns a DataType from a provided string
|
|
||||||
func GenerateDataTypeFromString(str string) (DataType, error) {
|
|
||||||
switch strings.ToLower(str) {
|
|
||||||
case "full", "f":
|
|
||||||
return Full, nil
|
|
||||||
case "headers", "header", "h":
|
|
||||||
return Headers, nil
|
|
||||||
case "uncles", "u":
|
|
||||||
return Uncles, nil
|
|
||||||
case "transactions", "transaction", "trxs", "txs", "trx", "tx", "t":
|
|
||||||
return Transactions, nil
|
|
||||||
case "receipts", "receipt", "rcts", "rct", "r":
|
|
||||||
return Receipts, nil
|
|
||||||
case "state":
|
|
||||||
return State, nil
|
|
||||||
case "storage":
|
|
||||||
return Storage, nil
|
|
||||||
default:
|
|
||||||
return UnknownDataType, fmt.Errorf("unrecognized resync type: %s", str)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SupportedDataType returns whether a DataType is supported
|
|
||||||
func SupportedDataType(d DataType) (bool, error) {
|
|
||||||
switch d {
|
|
||||||
case Full:
|
|
||||||
return true, nil
|
|
||||||
case Headers:
|
|
||||||
return true, nil
|
|
||||||
case Uncles:
|
|
||||||
return true, nil
|
|
||||||
case Transactions:
|
|
||||||
return true, nil
|
|
||||||
case Receipts:
|
|
||||||
return true, nil
|
|
||||||
case State:
|
|
||||||
return true, nil
|
|
||||||
case Storage:
|
|
||||||
return true, nil
|
|
||||||
default:
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,5 +1,5 @@
|
|||||||
// VulcanizeDB
|
// VulcanizeDB
|
||||||
// Copyright © 2019 Vulcanize
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
@ -14,9 +14,11 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package ipfs
|
package shared
|
||||||
|
|
||||||
type BlockModel struct {
|
type DBType string
|
||||||
CID string `db:"key"`
|
|
||||||
Data []byte `db:"data"`
|
const (
|
||||||
}
|
POSTGRES DBType = "Postgres"
|
||||||
|
DUMP DBType = "Dump"
|
||||||
|
)
|
@ -18,19 +18,12 @@ package shared
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||||
"github.com/jmoiron/sqlx"
|
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IPLDInsertPgStr is the postgres statement string for IPLDs inserting into public.blocks
|
|
||||||
const IPLDInsertPgStr = `INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`
|
|
||||||
|
|
||||||
// HandleZeroAddrPointer will return an empty string for a nil address pointer
|
// HandleZeroAddrPointer will return an empty string for a nil address pointer
|
||||||
func HandleZeroAddrPointer(to *common.Address) string {
|
func HandleZeroAddrPointer(to *common.Address) string {
|
||||||
if to == nil {
|
if to == nil {
|
||||||
@ -47,13 +40,6 @@ func HandleZeroAddr(to common.Address) string {
|
|||||||
return to.Hex()
|
return to.Hex()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rollback sql transaction and log any error
|
|
||||||
func Rollback(tx *sqlx.Tx) {
|
|
||||||
if err := tx.Rollback(); err != nil {
|
|
||||||
log.Error(err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
|
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
|
||||||
func MultihashKeyFromCID(c cid.Cid) string {
|
func MultihashKeyFromCID(c cid.Cid) string {
|
||||||
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
||||||
@ -69,9 +55,3 @@ func MultihashKeyFromKeccak256(hash common.Hash) (string, error) {
|
|||||||
dbKey := dshelp.MultihashToDsKey(mh)
|
dbKey := dshelp.MultihashToDsKey(mh)
|
||||||
return blockstore.BlockPrefix.String() + dbKey.String(), nil
|
return blockstore.BlockPrefix.String() + dbKey.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublishDirectWithDB diretly writes a previously derived mhkey => value pair to the ipld database
|
|
||||||
func PublishDirectWithDB(db *postgres.DB, key string, value []byte) error {
|
|
||||||
_, err := db.Exec(IPLDInsertPgStr, key, value)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package indexer
|
package shared
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user