v5 functional updates #339

Merged
telackey merged 64 commits from ian/v5_dev into v1.10.26-statediff-v5 2023-03-23 18:25:21 +00:00
100 changed files with 2852 additions and 8802 deletions

View File

@ -4,8 +4,8 @@ on:
workflow_call:
env:
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref || 'f2fd766f5400fcb9eb47b50675d2e3b1f2753702'}}
ipld-eth-db-ref: ${{ github.event.inputs.ipld-ethcl-db-ref || 'be345e0733d2c025e4082c5154e441317ae94cf7' }}
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref || 'e62830c982d4dfc5f3c1c2b12c1754a7e9b538f1'}}
ipld-eth-db-ref: ${{ github.event.inputs.ipld-ethcl-db-ref || '167cfbfb202d387aed2c9950e18c45a66f87821d' }}
GOPATH: /tmp/go
jobs:
@ -28,7 +28,7 @@ jobs:
- uses: actions/setup-go@v3
with:
go-version: "1.18"
go-version: "1.19"
check-latest: true
- name: Checkout code
@ -47,7 +47,7 @@ jobs:
- uses: actions/setup-go@v3
with:
go-version: "1.18"
go-version: "1.19"
check-latest: true
- name: Checkout code
@ -56,7 +56,6 @@ jobs:
- name: Run docker compose
run: |
docker-compose up -d
- name: Give the migration a few seconds
run: sleep 30;
@ -72,7 +71,7 @@ jobs:
- uses: actions/setup-go@v3
with:
go-version: "1.18"
go-version: "1.19"
check-latest: true
- name: Checkout code
@ -84,7 +83,7 @@ jobs:
with:
ref: ${{ env.stack-orchestrator-ref }}
path: "./stack-orchestrator/"
repository: vulcanize/stack-orchestrator
repository: cerc-io/mshaw_stack_hack
fetch-depth: 0
- uses: actions/checkout@v3
@ -101,13 +100,11 @@ jobs:
echo db_write=true >> $GITHUB_WORKSPACE/config.sh
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> $GITHUB_WORKSPACE/config.sh
cat $GITHUB_WORKSPACE/config.sh
- name: Compile Geth
run: |
cd $GITHUB_WORKSPACE/stack-orchestrator/helper-scripts
./compile-geth.sh -e docker -p $GITHUB_WORKSPACE/config.sh
cd -
- name: Run docker compose
run: |
docker-compose \
@ -115,24 +112,24 @@ jobs:
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" \
--env-file $GITHUB_WORKSPACE/config.sh \
up -d --build
- name: Make sure the /root/transaction_info/STATEFUL_TEST_DEPLOYED_ADDRESS exists within a certain time frame.
shell: bash
run: |
COUNT=0
ATTEMPTS=15
docker ps
docker logs local_go-ethereum_1
docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" exec go-ethereum ps aux
until $(docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" cp go-ethereum:/root/transaction_info/STATEFUL_TEST_DEPLOYED_ADDRESS ./STATEFUL_TEST_DEPLOYED_ADDRESS) || [[ $COUNT -eq $ATTEMPTS ]]; do echo -e "$(( COUNT++ ))... \c"; sleep 10; done
[[ $COUNT -eq $ATTEMPTS ]] && echo "Could not find the successful contract deployment" && (exit 1)
cat ./STATEFUL_TEST_DEPLOYED_ADDRESS
echo "Address length: `wc ./STATEFUL_TEST_DEPLOYED_ADDRESS`"
sleep 15;
- name: Create a new transaction.
shell: bash
run: |
docker logs local_go-ethereum_1
docker compose -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" -f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" exec go-ethereum /bin/bash /root/transaction_info/NEW_TRANSACTION
echo $?
- name: Make sure we see entries in the header table
shell: bash
run: |

View File

@ -26,7 +26,7 @@ PASSWORD = password
export PGPASSWORD=$(PASSWORD)
#Test
TEST_DB = vulcanize_public
TEST_DB = cerc_testing
TEST_CONNECT_STRING = postgresql://$(USER):$(PASSWORD)@$(HOST_NAME):$(PORT)/$(TEST_DB)?sslmode=disable
geth:

View File

@ -69,7 +69,6 @@ type Receipt struct {
BlockHash common.Hash `json:"blockHash,omitempty"`
BlockNumber *big.Int `json:"blockNumber,omitempty"`
TransactionIndex uint `json:"transactionIndex"`
LogRoot common.Hash `json:"logRoot"`
}
type receiptMarshaling struct {

View File

@ -5,10 +5,10 @@ services:
restart: on-failure
depends_on:
- ipld-eth-db
image: vulcanize/ipld-eth-db:v4.2.1-alpha
image: git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:v5.0.1-alpha
environment:
DATABASE_USER: "vdbm"
DATABASE_NAME: "vulcanize_testing"
DATABASE_NAME: "cerc_testing"
DATABASE_PASSWORD: "password"
DATABASE_HOSTNAME: "ipld-eth-db"
DATABASE_PORT: 5432
@ -19,7 +19,7 @@ services:
command: ["postgres", "-c", "log_statement=all"]
environment:
POSTGRES_USER: "vdbm"
POSTGRES_DB: "vulcanize_testing"
POSTGRES_DB: "cerc_testing"
POSTGRES_PASSWORD: "password"
ports:
- "127.0.0.1:8077:5432"

17
go.mod
View File

@ -40,11 +40,7 @@ require (
github.com/influxdata/influxdb v1.8.3
github.com/influxdata/influxdb-client-go/v2 v2.4.0
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/ipfs/go-block-format v0.0.3
github.com/ipfs/go-cid v0.2.0
github.com/ipfs/go-ipfs-blockstore v1.2.0
github.com/ipfs/go-ipfs-ds-help v1.1.0
github.com/ipfs/go-ipld-format v0.4.0
github.com/jackc/pgconn v1.10.0
github.com/jackc/pgx/v4 v4.13.0
github.com/jackpal/go-nat-pmp v1.0.2
@ -67,7 +63,7 @@ require (
github.com/rs/cors v1.7.0
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4
github.com/stretchr/testify v1.7.0
github.com/stretchr/testify v1.8.0
github.com/supranational/blst v0.3.8
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/thoas/go-funk v0.9.2
@ -101,12 +97,6 @@ require (
github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
github.com/go-sql-driver/mysql v1.6.0 // indirect
github.com/gogo/protobuf v1.3.1 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/go-datastore v0.5.0 // indirect
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
github.com/ipfs/go-log v0.0.1 // indirect
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
@ -114,7 +104,6 @@ require (
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
github.com/jackc/pgtype v1.8.1 // indirect
github.com/jackc/puddle v1.1.3 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
@ -131,12 +120,10 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/stretchr/objx v0.2.0 // indirect
github.com/stretchr/objx v0.4.0 // indirect
github.com/tklauser/numcpus v0.2.2 // indirect
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.uber.org/atomic v1.6.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect
golang.org/x/net v0.0.0-20220607020251-c690dde0001d // indirect
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect

64
go.sum
View File

@ -175,8 +175,6 @@ github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
@ -226,7 +224,6 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
@ -237,13 +234,10 @@ github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0U
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
@ -270,36 +264,8 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY=
github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc=
github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk=
github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
github.com/ipfs/go-cid v0.2.0 h1:01JTiihFq9en9Vz0lc0VDWvZe/uBonGpzo4THP0vcQ0=
github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro=
github.com/ipfs/go-datastore v0.5.0 h1:rQicVCEacWyk4JZ6G5bD9TKR7lZEG1MWcG7UdWYrFAU=
github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk=
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw=
github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE=
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q=
github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU=
github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8=
github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ=
github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM=
github.com/ipfs/go-ipld-format v0.4.0 h1:yqJSaJftjmjc9jEOFYlpkwOLVKv68OD27jFLlSghBlQ=
github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM=
github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc=
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg=
github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY=
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
@ -374,9 +340,6 @@ github.com/jackc/puddle v1.1.3 h1:JnPg/5Q9xVJGfjsO5CPUOjnJps1JaRUm8I9FXVCFK94=
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o=
github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b h1:ZGiXF8sz7PDk6RgkP+A/SFfUD0ZR/AgG6SpRNEDKZy8=
github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b/go.mod h1:hQmNrgofl+IY/8L+n20H6E6PWBBTokdsv+q49j0QhsU=
github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
@ -397,7 +360,6 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
@ -412,7 +374,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@ -464,8 +425,6 @@ github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4f
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
@ -476,7 +435,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
@ -484,16 +442,11 @@ github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4=
github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk=
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg=
github.com/multiformats/go-multihash v0.1.0 h1:CgAgwqk3//SVEw3T+6DqI4mWMyRuDwZtOWcJT0q9+EA=
github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84=
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY=
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@ -583,15 +536,18 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57N
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/supranational/blst v0.3.8 h1:glwLF4oBRSJOTr05lRBgNwGQST0ndP2wg29fSeTRKCY=
github.com/supranational/blst v0.3.8/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
@ -611,8 +567,6 @@ github.com/urfave/cli/v2 v2.10.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhA
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo=
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
@ -628,7 +582,6 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
@ -638,13 +591,11 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -683,7 +634,6 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=
@ -702,7 +652,6 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -746,7 +695,6 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -804,7 +752,6 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -897,7 +844,6 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=

View File

@ -8,7 +8,7 @@ mkdir -p out
rm -rf out/docker-tsdb/
# Copy over files to setup TimescaleDB
ID=$(docker create vulcanize/ipld-eth-db:v4.1.1-alpha)
ID=$(docker create cerc-io/ipld-eth-db:v5.0.1-alpha)
docker cp $ID:/app/docker-tsdb out/docker-tsdb/
docker rm -v $ID

View File

@ -127,7 +127,7 @@ This service introduces a CLI flag namespace `statediff`
The service can only operate in full sync mode (`--syncmode=full`), but only the historical RPC endpoints require an archive node (`--gcmode=archive`)
e.g.
`./build/bin/geth --syncmode=full --gcmode=archive --statediff --statediff.writing --statediff.db.type=postgres --statediff.db.driver=sqlx --statediff.db.host=localhost --statediff.db.port=5432 --statediff.db.name=vulcanize_test --statediff.db.user=postgres --statediff.db.nodeid=nodeid --statediff.db.clientname=clientname`
`./build/bin/geth --syncmode=full --gcmode=archive --statediff --statediff.writing --statediff.db.type=postgres --statediff.db.driver=sqlx --statediff.db.host=localhost --statediff.db.port=5432 --statediff.db.name=cerc_testing --statediff.db.user=postgres --statediff.db.nodeid=nodeid --statediff.db.clientname=clientname`
When operating in `--statediff.db.type=file` mode, the service will write SQL statements out to the file designated by
`--statediff.file.path`. Please note that it writes out SQL statements with all `ON CONFLICT` constraint checks dropped.
@ -239,7 +239,7 @@ This will only work on a version 12.4 Postgres database.
#### Schema overview
Our Postgres schemas are built around a single IPFS backing Postgres IPLD blockstore table (`public.blocks`) that conforms with [go-ds-sql](https://github.com/ipfs/go-ds-sql/blob/master/postgres/postgres.go).
Our Postgres schemas are built around a single IPFS backing Postgres IPLD blockstore table (`ipld.blocks`) that conforms with [go-ds-sql](https://github.com/ipfs/go-ds-sql/blob/master/postgres/postgres.go).
All IPLD objects are stored in this table, where `key` is the blockstore-prefixed multihash key for the IPLD object and `data` contains
the bytes for the IPLD block (in the case of all Ethereum IPLDs, this is the RLP byte encoding of the Ethereum object).
@ -250,7 +250,7 @@ we create an Ethereum [advanced data layout](https://github.com/ipld/specs#schem
indexes on top of the raw IPLDs in other Postgres tables.
These secondary index tables fall under the `eth` schema and follow an `{objectType}_cids` naming convention.
These tables provide a view into individual fields of the underlying Ethereum IPLD objects, allowing lookups on these fields, and reference the raw IPLD objects stored in `public.blocks`
These tables provide a view into individual fields of the underlying Ethereum IPLD objects, allowing lookups on these fields, and reference the raw IPLD objects stored in `ipld.blocks`
by foreign keys to their multihash keys.
Additionally, these tables maintain the hash-linked nature of Ethereum objects to one another. E.g. a storage trie node entry in the `storage_cids`
table contains a `state_id` foreign key which references the `id` for the `state_cids` entry that contains the state leaf node for the contract that storage node belongs to,
@ -280,7 +280,7 @@ the full incremental history.
Example: `v1.10.16-statediff-3.0.2`
- The first section, `v1.10.16`, corresponds to the release of the root branch this version is rebased onto (e.g., [](https://github.com/ethereum/go-ethereum/releases/tag/v1.10.16)[https://github.com/ethereum/go-ethereum/releases/tag/v1.10.16](https://github.com/ethereum/go-ethereum/releases/tag/v1.10.16))
- The second section, `3.0.2`, corresponds to the version of our statediffing code. The major version here (3) should always correspond with the major version of the `ipld-eth-db` schema version it works with (e.g., [](https://github.com/vulcanize/ipld-eth-db/releases/tag/v3.0.6)[https://github.com/vulcanize/ipld-eth-db/releases/tag/v3.0.6](https://github.com/vulcanize/ipld-eth-db/releases/tag/v3.0.6)); it is only bumped when we bump the major version of the schema.
- The second section, `3.0.2`, corresponds to the version of our statediffing code. The major version here (3) should always correspond with the major version of the `ipld-eth-db` schema version it works with (e.g., [](https://github.com/cerc-io/ipld-eth-db/releases/tag/v3.0.6)[https://github.com/vulcanize/ipld-eth-db/releases/tag/v3.0.6](https://github.com/vulcanize/ipld-eth-db/releases/tag/v3.0.6)); it is only bumped when we bump the major version of the schema.
- The major version of the schema is only bumped when a breaking change is made to the schema.
- The minor version is bumped when a new feature is added, or a fix is performed that breaks or updates the statediffing API or CLI in some way.
- The patch version is bumped whenever minor fixes/patches/features are done that dont change/break API/CLI compatibility.

View File

@ -102,11 +102,6 @@ func (api *PublicStateDiffAPI) StateDiffFor(ctx context.Context, blockHash commo
return api.sds.StateDiffFor(blockHash, params)
}
// StateTrieAt returns a state trie payload at the specific blockheight
func (api *PublicStateDiffAPI) StateTrieAt(ctx context.Context, blockNumber uint64, params Params) (*Payload, error) {
return api.sds.StateTrieAt(blockNumber, params)
}
// StreamCodeAndCodeHash writes all of the codehash=>code pairs out to a websocket channel
func (api *PublicStateDiffAPI) StreamCodeAndCodeHash(ctx context.Context, blockNumber uint64) (*rpc.Subscription, error) {
// ensure that the RPC connection supports subscriptions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -46,8 +46,6 @@ type Config struct {
// Params contains config parameters for the state diff builder
type Params struct {
IntermediateStateNodes bool
IntermediateStorageNodes bool
IncludeBlock bool
IncludeReceipts bool
IncludeTD bool

View File

@ -23,9 +23,6 @@ import (
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
node "github.com/ipfs/go-ipld-format"
)
// BatchTx wraps a void with the state necessary for building the tx concurrently during trie difference iteration
@ -74,24 +71,10 @@ func (tx *BatchTx) cacheDirect(key string, value []byte) {
}
}
func (tx *BatchTx) cacheIPLD(i node.Node) {
func (tx *BatchTx) cacheIPLD(i ipld.IPLD) {
tx.iplds <- models.IPLDModel{
BlockNumber: tx.BlockNumber,
Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
Key: i.Cid().String(),
Data: i.RawData(),
}
}
func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error) {
c, err := ipld.RawdataToCid(codec, raw, mh)
if err != nil {
return "", "", err
}
prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String()
tx.iplds <- models.IPLDModel{
BlockNumber: tx.BlockNumber,
Key: prefixedKey,
Data: raw,
}
return c.String(), prefixedKey, err
}

View File

@ -17,15 +17,12 @@
package dump
import (
"bytes"
"fmt"
"io"
"math/big"
"time"
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/common"
@ -36,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
@ -79,16 +77,13 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
}
// Generate the block iplds
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
headerNode, txNodes, rctNodes, logNodes, err := ipld.FromBlockAndReceipts(block, receipts)
if err != nil {
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
}
if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) {
return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
}
if len(txTrieNodes) != len(rctTrieNodes) {
return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
if len(txNodes) != len(rctNodes) {
return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d)", len(txNodes), len(rctNodes))
}
// Calculate reward
@ -146,7 +141,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
traceMsg += fmt.Sprintf("header processing time: %s\r\n", tDiff.String())
t = time.Now()
// Publish and index uncles
err = sdi.processUncles(blockTx, headerID, block.Number(), uncleNodes)
err = sdi.processUncles(blockTx, headerID, block.Number(), block.UncleHash(), block.Uncles())
if err != nil {
return nil, err
}
@ -161,12 +156,8 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
receipts: receipts,
txs: transactions,
rctNodes: rctNodes,
rctTrieNodes: rctTrieNodes,
txNodes: txNodes,
txTrieNodes: txTrieNodes,
logTrieNodes: logTrieNodes,
logLeafNodeCIDs: logLeafNodeCIDs,
rctLeafNodeCIDs: rctLeafNodeCIDs,
logNodes: logNodes,
})
if err != nil {
return nil, err
@ -181,13 +172,12 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
// processHeader publishes and indexes a header IPLD in Postgres
// it returns the headerID
func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (string, error) {
func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode ipld.IPLD, reward, td *big.Int) (string, error) {
tx.cacheIPLD(headerNode)
headerID := header.Hash().String()
mod := models.HeaderModel{
CID: headerNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: header.ParentHash.String(),
BlockNumber: header.Number.String(),
BlockHash: headerID,
@ -197,7 +187,7 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he
StateRoot: header.Root.String(),
RctRoot: header.ReceiptHash.String(),
TxRoot: header.TxHash.String(),
UncleRoot: header.UncleHash.String(),
UnclesHash: header.UncleHash.String(),
Timestamp: header.Time,
Coinbase: header.Coinbase.String(),
}
@ -206,25 +196,37 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he
}
// processUncles publishes and indexes uncle IPLDs in Postgres
func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNumber *big.Int, uncleNodes []*ipld2.EthHeader) error {
func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNumber *big.Int, unclesHash common.Hash, uncles []*types.Header) error {
// publish and index uncles
for _, uncleNode := range uncleNodes {
tx.cacheIPLD(uncleNode)
uncleEncoding, err := rlp.EncodeToBytes(uncles)
if err != nil {
return err
}
preparedHash := crypto.Keccak256Hash(uncleEncoding)
if !bytes.Equal(preparedHash.Bytes(), unclesHash.Bytes()) {
return fmt.Errorf("derived uncles hash (%s) does not match the hash in the header (%s)", preparedHash.Hex(), unclesHash.Hex())
}
unclesCID, err := ipld.RawdataToCid(ipld.MEthHeaderList, uncleEncoding, multihash.KECCAK_256)
if err != nil {
return err
}
tx.cacheDirect(unclesCID.String(), uncleEncoding)
for i, uncle := range uncles {
var uncleReward *big.Int
// in PoA networks uncle reward is 0
if sdi.chainConfig.Clique != nil {
uncleReward = big.NewInt(0)
} else {
uncleReward = shared.CalcUncleMinerReward(blockNumber.Uint64(), uncleNode.Number.Uint64())
uncleReward = shared.CalcUncleMinerReward(blockNumber.Uint64(), uncle.Number.Uint64())
}
uncle := models.UncleModel{
BlockNumber: blockNumber.String(),
HeaderID: headerID,
CID: uncleNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
ParentHash: uncleNode.ParentHash.String(),
BlockHash: uncleNode.Hash().String(),
CID: unclesCID.String(),
ParentHash: uncle.ParentHash.String(),
BlockHash: uncle.Hash().String(),
Reward: uncleReward.String(),
Index: int64(i),
}
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", uncle); err != nil {
return err
@ -239,13 +241,9 @@ type processArgs struct {
blockNumber *big.Int
receipts types.Receipts
txs types.Transactions
rctNodes []*ipld2.EthReceipt
rctTrieNodes []*ipld2.EthRctTrie
txNodes []*ipld2.EthTx
txTrieNodes []*ipld2.EthTxTrie
logTrieNodes [][]node.Node
logLeafNodeCIDs [][]cid.Cid
rctLeafNodeCIDs []cid.Cid
rctNodes []*ipld.EthReceipt
txNodes []*ipld.EthTx
logNodes [][]*ipld.EthLog
}
// processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres
@ -253,9 +251,6 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
// Process receipts and txs
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
for i, receipt := range args.receipts {
for _, logTrieNode := range args.logTrieNodes[i] {
tx.cacheIPLD(logTrieNode)
}
txNode := args.txNodes[i]
tx.cacheIPLD(txNode)
@ -281,9 +276,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
Src: shared.HandleZeroAddr(from),
TxHash: trxID,
Index: int64(i),
Data: trx.Data(),
CID: txNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(txNode.Cid()),
Type: trx.Type(),
Value: val,
}
@ -291,45 +284,16 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
return err
}
// index access list if this is one
for j, accessListElement := range trx.AccessList() {
storageKeys := make([]string, len(accessListElement.StorageKeys))
for k, storageKey := range accessListElement.StorageKeys {
storageKeys[k] = storageKey.Hex()
}
accessListElementModel := models.AccessListElementModel{
BlockNumber: args.blockNumber.String(),
TxID: trxID,
Index: int64(j),
Address: accessListElement.Address.Hex(),
StorageKeys: storageKeys,
}
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", accessListElementModel); err != nil {
return err
}
}
// this is the contract address if this receipt is for a contract creation tx
contract := shared.HandleZeroAddr(receipt.ContractAddress)
var contractHash string
if contract != "" {
contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
}
// index the receipt
if !args.rctLeafNodeCIDs[i].Defined() {
return fmt.Errorf("invalid receipt leaf node cid")
}
rctModel := &models.ReceiptModel{
BlockNumber: args.blockNumber.String(),
HeaderID: args.headerID,
TxID: trxID,
Contract: contract,
ContractHash: contractHash,
LeafCID: args.rctLeafNodeCIDs[i].String(),
LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]),
LogRoot: args.rctNodes[i].LogRoot.String(),
CID: args.rctNodes[i].Cid().String(),
}
if len(receipt.PostState) == 0 {
rctModel.PostStatus = receipt.Status
@ -348,19 +312,13 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
topicSet[ti] = topic.Hex()
}
if !args.logLeafNodeCIDs[i][idx].Defined() {
return fmt.Errorf("invalid log cid")
}
logDataSet[idx] = &models.LogsModel{
BlockNumber: args.blockNumber.String(),
HeaderID: args.headerID,
ReceiptID: trxID,
Address: l.Address.String(),
Index: int64(l.Index),
Data: l.Data,
LeafCID: args.logLeafNodeCIDs[i][idx].String(),
LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]),
CID: args.logNodes[i][idx].Cid().String(),
Topic0: topicSet[0],
Topic1: topicSet[1],
Topic2: topicSet[2],
@ -373,48 +331,38 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
}
}
// publish trie nodes, these aren't indexed directly
for i, n := range args.txTrieNodes {
tx.cacheIPLD(n)
tx.cacheIPLD(args.rctTrieNodes[i])
}
return nil
}
// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD sql
func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateLeafNode, headerID string) error {
tx, ok := batch.(*BatchTx)
if !ok {
return fmt.Errorf("dump: batch is expected to be of type %T, got %T", &BatchTx{}, batch)
}
// publish the state node
var stateModel models.StateNodeModel
if stateNode.NodeType == sdtypes.Removed {
if stateNode.Removed {
// short circuit if it is a Removed node
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
// this assumes the db has been initialized and a ipld.blocks entry for the Removed node is present
stateModel = models.StateNodeModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
StateKey: common.BytesToHash(stateNode.AccountWrapper.LeafKey).String(),
CID: shared.RemovedNodeStateCID,
MhKey: shared.RemovedNodeMhKey,
NodeType: stateNode.NodeType.Int(),
Removed: true,
}
} else {
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
if err != nil {
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
}
stateModel = models.StateNodeModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
CID: stateCIDStr,
MhKey: stateMhKey,
NodeType: stateNode.NodeType.Int(),
StateKey: common.BytesToHash(stateNode.AccountWrapper.LeafKey).String(),
CID: stateNode.AccountWrapper.CID,
Removed: false,
Balance: stateNode.AccountWrapper.Account.Balance.String(),
Nonce: stateNode.AccountWrapper.Account.Nonce,
CodeHash: common.BytesToHash(stateNode.AccountWrapper.Account.CodeHash).String(),
StorageRoot: stateNode.AccountWrapper.Account.Root.String(),
}
}
@ -423,66 +371,32 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
return err
}
// if we have a leaf, decode and index the account data
if stateNode.NodeType == sdtypes.Leaf {
var i []interface{}
if err := rlp.DecodeBytes(stateNode.NodeValue, &i); err != nil {
return fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
}
if len(i) != 2 {
return fmt.Errorf("eth IPLDPublisher expected state leaf node rlp to decode into two elements")
}
var account types.StateAccount
if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
return fmt.Errorf("error decoding state account rlp: %s", err.Error())
}
accountModel := models.StateAccountModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
StatePath: stateNode.Path,
Balance: account.Balance.String(),
Nonce: account.Nonce,
CodeHash: account.CodeHash,
StorageRoot: account.Root.String(),
}
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", accountModel); err != nil {
return err
}
}
// if there are any storage nodes associated with this node, publish and index them
for _, storageNode := range stateNode.StorageNodes {
if storageNode.NodeType == sdtypes.Removed {
for _, storageNode := range stateNode.StorageDiff {
if storageNode.Removed {
// short circuit if it is a Removed node
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
// this assumes the db has been initialized and a ipld.blocks entry for the Removed node is present
storageModel := models.StorageNodeModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
StatePath: stateNode.Path,
Path: storageNode.Path,
StateKey: common.BytesToHash(stateNode.AccountWrapper.LeafKey).String(),
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
CID: shared.RemovedNodeStorageCID,
MhKey: shared.RemovedNodeMhKey,
NodeType: storageNode.NodeType.Int(),
Removed: true,
}
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", storageModel); err != nil {
return err
}
continue
}
storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld2.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
if err != nil {
return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err)
}
storageModel := models.StorageNodeModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
StatePath: stateNode.Path,
Path: storageNode.Path,
StateKey: common.BytesToHash(stateNode.AccountWrapper.LeafKey).String(),
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
CID: storageCIDStr,
MhKey: storageMhKey,
NodeType: storageNode.NodeType.Int(),
CID: storageNode.CID,
Removed: false,
Value: storageNode.Value,
}
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", storageModel); err != nil {
return err
@ -492,18 +406,13 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
return nil
}
// PushCodeAndCodeHash publishes code and codehash pairs to the ipld sql
func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error {
// PushIPLD publishes iplds to ipld.blocks
func (sdi *StateDiffIndexer) PushIPLD(batch interfaces.Batch, ipld sdtypes.IPLD) error {
tx, ok := batch.(*BatchTx)
if !ok {
return fmt.Errorf("dump: batch is expected to be of type %T, got %T", &BatchTx{}, batch)
}
// codec doesn't matter since db key is multihash-based
mhKey, err := shared.MultihashKeyFromKeccak256(codeAndCodeHash.Hash)
if err != nil {
return fmt.Errorf("error deriving multihash key from codehash: %v", err)
}
tx.cacheDirect(mhKey, codeAndCodeHash.Code)
tx.cacheDirect(ipld.CID, ipld.Content)
return nil
}

View File

@ -28,8 +28,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
"github.com/ethereum/go-ethereum/statediff/indexer/database/file/types"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/shared/schema"
"github.com/ethereum/go-ethereum/statediff/indexer/test"
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
)
@ -90,7 +90,7 @@ func resetAndDumpWatchedAddressesCSVFileData(t *testing.T) {
test_helpers.TearDownDB(t, db)
outputFilePath := filepath.Join(dbDirectory, file.CSVTestConfig.WatchedAddressesFilePath)
stmt := fmt.Sprintf(pgCopyStatement, types.TableWatchedAddresses.Name, outputFilePath)
stmt := fmt.Sprintf(pgCopyStatement, schema.TableWatchedAddresses.Name, outputFilePath)
_, err = db.Exec(context.Background(), stmt)
require.NoError(t, err)

View File

@ -25,37 +25,32 @@ import (
"path/filepath"
"strconv"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
node "github.com/ipfs/go-ipld-format"
"github.com/thoas/go-funk"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff/indexer/database/file/types"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/ethereum/go-ethereum/statediff/indexer/shared/schema"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
)
var (
Tables = []*types.Table{
&types.TableIPLDBlock,
&types.TableNodeInfo,
&types.TableHeader,
&types.TableStateNode,
&types.TableStorageNode,
&types.TableUncle,
&types.TableTransaction,
&types.TableAccessListElement,
&types.TableReceipt,
&types.TableLog,
&types.TableStateAccount,
Tables = []*schema.Table{
&schema.TableIPLDBlock,
&schema.TableNodeInfo,
&schema.TableHeader,
&schema.TableStateNode,
&schema.TableStorageNode,
&schema.TableUncle,
&schema.TableTransaction,
&schema.TableReceipt,
&schema.TableLog,
}
)
type tableRow struct {
table types.Table
table schema.Table
values []interface{}
}
@ -95,7 +90,7 @@ func newFileWriter(path string) (ret fileWriter, err error) {
return
}
func makeFileWriters(dir string, tables []*types.Table) (fileWriters, error) {
func makeFileWriters(dir string, tables []*schema.Table) (fileWriters, error) {
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, err
}
@ -110,7 +105,7 @@ func makeFileWriters(dir string, tables []*types.Table) (fileWriters, error) {
return writers, nil
}
func (tx fileWriters) write(tbl *types.Table, args ...interface{}) error {
func (tx fileWriters) write(tbl *schema.Table, args ...interface{}) error {
row := tbl.ToCsvRow(args...)
return tx[tbl.Name].Write(row)
}
@ -209,13 +204,13 @@ func (csw *CSVWriter) Close() error {
func (csw *CSVWriter) upsertNode(node nodeinfo.Info) {
var values []interface{}
values = append(values, node.GenesisBlock, node.NetworkID, node.ID, node.ClientName, node.ChainID)
csw.rows <- tableRow{types.TableNodeInfo, values}
csw.rows <- tableRow{schema.TableNodeInfo, values}
}
func (csw *CSVWriter) upsertIPLD(ipld models.IPLDModel) {
var values []interface{}
values = append(values, ipld.BlockNumber, ipld.Key, ipld.Data)
csw.rows <- tableRow{types.TableIPLDBlock, values}
csw.rows <- tableRow{schema.TableIPLDBlock, values}
}
func (csw *CSVWriter) upsertIPLDDirect(blockNumber, key string, value []byte) {
@ -226,94 +221,66 @@ func (csw *CSVWriter) upsertIPLDDirect(blockNumber, key string, value []byte) {
})
}
func (csw *CSVWriter) upsertIPLDNode(blockNumber string, i node.Node) {
func (csw *CSVWriter) upsertIPLDNode(blockNumber string, i ipld.IPLD) {
csw.upsertIPLD(models.IPLDModel{
BlockNumber: blockNumber,
Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
Key: i.Cid().String(),
Data: i.RawData(),
})
}
func (csw *CSVWriter) upsertIPLDRaw(blockNumber string, codec, mh uint64, raw []byte) (string, string, error) {
c, err := ipld.RawdataToCid(codec, raw, mh)
if err != nil {
return "", "", err
}
prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String()
csw.upsertIPLD(models.IPLDModel{
BlockNumber: blockNumber,
Key: prefixedKey,
Data: raw,
})
return c.String(), prefixedKey, err
}
func (csw *CSVWriter) upsertHeaderCID(header models.HeaderModel) {
var values []interface{}
values = append(values, header.BlockNumber, header.BlockHash, header.ParentHash, header.CID,
header.TotalDifficulty, header.NodeID, header.Reward, header.StateRoot, header.TxRoot,
header.RctRoot, header.UncleRoot, header.Bloom, strconv.FormatUint(header.Timestamp, 10), header.MhKey, 1, header.Coinbase)
csw.rows <- tableRow{types.TableHeader, values}
header.TotalDifficulty, header.NodeIDs, header.Reward, header.StateRoot, header.TxRoot,
header.RctRoot, header.UnclesHash, header.Bloom, strconv.FormatUint(header.Timestamp, 10), header.Coinbase)
csw.rows <- tableRow{schema.TableHeader, values}
indexerMetrics.blocks.Inc(1)
}
func (csw *CSVWriter) upsertUncleCID(uncle models.UncleModel) {
var values []interface{}
values = append(values, uncle.BlockNumber, uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID,
uncle.Reward, uncle.MhKey)
csw.rows <- tableRow{types.TableUncle, values}
uncle.Reward, uncle.Index)
csw.rows <- tableRow{schema.TableUncle, values}
}
func (csw *CSVWriter) upsertTransactionCID(transaction models.TxModel) {
var values []interface{}
values = append(values, transaction.BlockNumber, transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst,
transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type, transaction.Value)
csw.rows <- tableRow{types.TableTransaction, values}
transaction.Src, transaction.Index, transaction.Type, transaction.Value)
csw.rows <- tableRow{schema.TableTransaction, values}
indexerMetrics.transactions.Inc(1)
}
func (csw *CSVWriter) upsertAccessListElement(accessListElement models.AccessListElementModel) {
var values []interface{}
values = append(values, accessListElement.BlockNumber, accessListElement.TxID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys)
csw.rows <- tableRow{types.TableAccessListElement, values}
indexerMetrics.accessListEntries.Inc(1)
}
func (csw *CSVWriter) upsertReceiptCID(rct *models.ReceiptModel) {
var values []interface{}
values = append(values, rct.BlockNumber, rct.HeaderID, rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey,
rct.PostState, rct.PostStatus, rct.LogRoot)
csw.rows <- tableRow{types.TableReceipt, values}
values = append(values, rct.BlockNumber, rct.HeaderID, rct.TxID, rct.CID, rct.Contract,
rct.PostState, rct.PostStatus)
csw.rows <- tableRow{schema.TableReceipt, values}
indexerMetrics.receipts.Inc(1)
}
func (csw *CSVWriter) upsertLogCID(logs []*models.LogsModel) {
for _, l := range logs {
var values []interface{}
values = append(values, l.BlockNumber, l.HeaderID, l.LeafCID, l.LeafMhKey, l.ReceiptID, l.Address, l.Index, l.Topic0,
l.Topic1, l.Topic2, l.Topic3, l.Data)
csw.rows <- tableRow{types.TableLog, values}
values = append(values, l.BlockNumber, l.HeaderID, l.CID, l.ReceiptID, l.Address, l.Index, l.Topic0,
l.Topic1, l.Topic2, l.Topic3)
csw.rows <- tableRow{schema.TableLog, values}
indexerMetrics.logs.Inc(1)
}
}
func (csw *CSVWriter) upsertStateCID(stateNode models.StateNodeModel) {
var stateKey string
if stateNode.StateKey != nullHash.String() {
stateKey = stateNode.StateKey
balance := stateNode.Balance
if stateNode.Removed {
balance = "0"
}
var values []interface{}
values = append(values, stateNode.BlockNumber, stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path,
stateNode.NodeType, true, stateNode.MhKey)
csw.rows <- tableRow{types.TableStateNode, values}
}
func (csw *CSVWriter) upsertStateAccount(stateAccount models.StateAccountModel) {
var values []interface{}
values = append(values, stateAccount.BlockNumber, stateAccount.HeaderID, stateAccount.StatePath, stateAccount.Balance,
strconv.FormatUint(stateAccount.Nonce, 10), stateAccount.CodeHash, stateAccount.StorageRoot)
csw.rows <- tableRow{types.TableStateAccount, values}
values = append(values, stateNode.BlockNumber, stateNode.HeaderID, stateNode.StateKey, stateNode.CID,
true, balance, strconv.FormatUint(stateNode.Nonce, 10), stateNode.CodeHash, stateNode.StorageRoot, stateNode.Removed)
csw.rows <- tableRow{schema.TableStateNode, values}
}
func (csw *CSVWriter) upsertStorageCID(storageCID models.StorageNodeModel) {
@ -323,9 +290,9 @@ func (csw *CSVWriter) upsertStorageCID(storageCID models.StorageNodeModel) {
}
var values []interface{}
values = append(values, storageCID.BlockNumber, storageCID.HeaderID, storageCID.StatePath, storageKey, storageCID.CID,
storageCID.Path, storageCID.NodeType, true, storageCID.MhKey)
csw.rows <- tableRow{types.TableStorageNode, values}
values = append(values, storageCID.BlockNumber, storageCID.HeaderID, storageCID.StateKey, storageKey, storageCID.CID,
true, storageCID.Value, storageCID.Removed)
csw.rows <- tableRow{schema.TableStorageNode, values}
}
// LoadWatchedAddresses loads watched addresses from a file
@ -365,7 +332,7 @@ func (csw *CSVWriter) insertWatchedAddresses(args []sdtypes.WatchAddressArg, cur
var values []interface{}
values = append(values, arg.Address, strconv.FormatUint(arg.CreatedAt, 10), currentBlockNumber.String(), "0")
row := types.TableWatchedAddresses.ToCsvRow(values...)
row := schema.TableWatchedAddresses.ToCsvRow(values...)
// writing directly instead of using rows channel as it needs to be flushed immediately
err = csw.watchedAddressesWriter.Write(row)
@ -408,7 +375,7 @@ func (csw *CSVWriter) removeWatchedAddresses(args []sdtypes.WatchAddressArg) err
func (csw *CSVWriter) setWatchedAddresses(args []sdtypes.WatchAddressArg, currentBlockNumber *big.Int) error {
var rows [][]string
for _, arg := range args {
row := types.TableWatchedAddresses.ToCsvRow(arg.Address, strconv.FormatUint(arg.CreatedAt, 10), currentBlockNumber.String(), "0")
row := schema.TableWatchedAddresses.ToCsvRow(arg.Address, strconv.FormatUint(arg.CreatedAt, 10), currentBlockNumber.String(), "0")
rows = append(rows, row)
}

View File

@ -17,6 +17,7 @@
package file
import (
"bytes"
"context"
"errors"
"fmt"
@ -26,8 +27,7 @@ import (
"sync/atomic"
"time"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/lib/pq"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/common"
@ -38,7 +38,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
@ -149,16 +149,13 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
}
// Generate the block iplds
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
headerNode, txNodes, rctNodes, logNodes, err := ipld.FromBlockAndReceipts(block, receipts)
if err != nil {
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
}
if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) {
return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
}
if len(txTrieNodes) != len(rctTrieNodes) {
return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
if len(txNodes) != len(rctNodes) {
return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d)", len(txNodes), len(rctNodes))
}
// Calculate reward
@ -200,7 +197,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
t = time.Now()
// write uncles
sdi.processUncles(headerID, block.Number(), uncleNodes)
sdi.processUncles(headerID, block.Number(), block.UncleHash(), block.Uncles())
tDiff = time.Since(t)
indexerMetrics.tUncleProcessing.Update(tDiff)
traceMsg += fmt.Sprintf("uncle processing time: %s\r\n", tDiff.String())
@ -213,12 +210,8 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
receipts: receipts,
txs: transactions,
rctNodes: rctNodes,
rctTrieNodes: rctTrieNodes,
txNodes: txNodes,
txTrieNodes: txTrieNodes,
logTrieNodes: logTrieNodes,
logLeafNodeCIDs: logLeafNodeCIDs,
rctLeafNodeCIDs: rctLeafNodeCIDs,
logNodes: logNodes,
})
if err != nil {
return nil, err
@ -233,7 +226,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
// processHeader write a header IPLD insert SQL stmt to a file
// it returns the headerID
func (sdi *StateDiffIndexer) processHeader(header *types.Header, headerNode node.Node, reward, td *big.Int) string {
func (sdi *StateDiffIndexer) processHeader(header *types.Header, headerNode ipld.IPLD, reward, td *big.Int) string {
sdi.fileWriter.upsertIPLDNode(header.Number.String(), headerNode)
var baseFee *string
@ -243,9 +236,8 @@ func (sdi *StateDiffIndexer) processHeader(header *types.Header, headerNode node
}
headerID := header.Hash().String()
sdi.fileWriter.upsertHeaderCID(models.HeaderModel{
NodeID: sdi.nodeID,
NodeIDs: pq.StringArray([]string{sdi.nodeID}),
CID: headerNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: header.ParentHash.String(),
BlockNumber: header.Number.String(),
BlockHash: headerID,
@ -255,35 +247,48 @@ func (sdi *StateDiffIndexer) processHeader(header *types.Header, headerNode node
StateRoot: header.Root.String(),
RctRoot: header.ReceiptHash.String(),
TxRoot: header.TxHash.String(),
UncleRoot: header.UncleHash.String(),
UnclesHash: header.UncleHash.String(),
Timestamp: header.Time,
Coinbase: header.Coinbase.String(),
})
return headerID
}
// processUncles writes uncle IPLD insert SQL stmts to a file
func (sdi *StateDiffIndexer) processUncles(headerID string, blockNumber *big.Int, uncleNodes []*ipld2.EthHeader) {
// processUncles publishes and indexes uncle IPLDs in Postgres
func (sdi *StateDiffIndexer) processUncles(headerID string, blockNumber *big.Int, unclesHash common.Hash, uncles []*types.Header) error {
// publish and index uncles
for _, uncleNode := range uncleNodes {
sdi.fileWriter.upsertIPLDNode(blockNumber.String(), uncleNode)
uncleEncoding, err := rlp.EncodeToBytes(uncles)
if err != nil {
return err
}
preparedHash := crypto.Keccak256Hash(uncleEncoding)
if !bytes.Equal(preparedHash.Bytes(), unclesHash.Bytes()) {
return fmt.Errorf("derived uncles hash (%s) does not match the hash in the header (%s)", preparedHash.Hex(), unclesHash.Hex())
}
unclesCID, err := ipld.RawdataToCid(ipld.MEthHeaderList, uncleEncoding, multihash.KECCAK_256)
if err != nil {
return err
}
sdi.fileWriter.upsertIPLDDirect(blockNumber.String(), unclesCID.String(), uncleEncoding)
for i, uncle := range uncles {
var uncleReward *big.Int
// in PoA networks uncle reward is 0
if sdi.chainConfig.Clique != nil {
uncleReward = big.NewInt(0)
} else {
uncleReward = shared.CalcUncleMinerReward(blockNumber.Uint64(), uncleNode.Number.Uint64())
uncleReward = shared.CalcUncleMinerReward(blockNumber.Uint64(), uncle.Number.Uint64())
}
sdi.fileWriter.upsertUncleCID(models.UncleModel{
BlockNumber: blockNumber.String(),
HeaderID: headerID,
CID: uncleNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
ParentHash: uncleNode.ParentHash.String(),
BlockHash: uncleNode.Hash().String(),
CID: unclesCID.String(),
ParentHash: uncle.ParentHash.String(),
BlockHash: uncle.Hash().String(),
Reward: uncleReward.String(),
Index: int64(i),
})
}
return nil
}
// processArgs bundles arguments to processReceiptsAndTxs
@ -292,13 +297,9 @@ type processArgs struct {
blockNumber *big.Int
receipts types.Receipts
txs types.Transactions
rctNodes []*ipld2.EthReceipt
rctTrieNodes []*ipld2.EthRctTrie
txNodes []*ipld2.EthTx
txTrieNodes []*ipld2.EthTxTrie
logTrieNodes [][]node.Node
logLeafNodeCIDs [][]cid.Cid
rctLeafNodeCIDs []cid.Cid
rctNodes []*ipld.EthReceipt
txNodes []*ipld.EthTx
logNodes [][]*ipld.EthLog
}
// processReceiptsAndTxs writes receipt and tx IPLD insert SQL stmts to a file
@ -306,11 +307,9 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
// Process receipts and txs
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
for i, receipt := range args.receipts {
for _, logTrieNode := range args.logTrieNodes[i] {
sdi.fileWriter.upsertIPLDNode(args.blockNumber.String(), logTrieNode)
}
txNode := args.txNodes[i]
sdi.fileWriter.upsertIPLDNode(args.blockNumber.String(), txNode)
sdi.fileWriter.upsertIPLDNode(args.blockNumber.String(), args.rctNodes[i])
// index tx
trx := args.txs[i]
@ -333,80 +332,46 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
Src: shared.HandleZeroAddr(from),
TxHash: txID,
Index: int64(i),
Data: trx.Data(),
CID: txNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(txNode.Cid()),
Type: trx.Type(),
Value: val,
}
sdi.fileWriter.upsertTransactionCID(txModel)
// index access list if this is one
for j, accessListElement := range trx.AccessList() {
storageKeys := make([]string, len(accessListElement.StorageKeys))
for k, storageKey := range accessListElement.StorageKeys {
storageKeys[k] = storageKey.Hex()
}
accessListElementModel := models.AccessListElementModel{
BlockNumber: args.blockNumber.String(),
TxID: txID,
Index: int64(j),
Address: accessListElement.Address.Hex(),
StorageKeys: storageKeys,
}
sdi.fileWriter.upsertAccessListElement(accessListElementModel)
}
// this is the contract address if this receipt is for a contract creation tx
contract := shared.HandleZeroAddr(receipt.ContractAddress)
var contractHash string
if contract != "" {
contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
}
// index receipt
if !args.rctLeafNodeCIDs[i].Defined() {
return fmt.Errorf("invalid receipt leaf node cid")
}
rctModel := &models.ReceiptModel{
BlockNumber: args.blockNumber.String(),
HeaderID: args.headerID,
TxID: txID,
Contract: contract,
ContractHash: contractHash,
LeafCID: args.rctLeafNodeCIDs[i].String(),
LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]),
LogRoot: args.rctNodes[i].LogRoot.String(),
CID: args.rctNodes[i].Cid().String(),
}
if len(receipt.PostState) == 0 {
rctModel.PostStatus = receipt.Status
} else {
rctModel.PostState = common.Bytes2Hex(receipt.PostState)
rctModel.PostState = common.BytesToHash(receipt.PostState).String()
}
sdi.fileWriter.upsertReceiptCID(rctModel)
// index logs
logDataSet := make([]*models.LogsModel, len(receipt.Logs))
for idx, l := range receipt.Logs {
sdi.fileWriter.upsertIPLDNode(args.blockNumber.String(), args.logNodes[i][idx])
topicSet := make([]string, 4)
for ti, topic := range l.Topics {
topicSet[ti] = topic.Hex()
}
if !args.logLeafNodeCIDs[i][idx].Defined() {
return fmt.Errorf("invalid log cid")
}
logDataSet[idx] = &models.LogsModel{
BlockNumber: args.blockNumber.String(),
HeaderID: args.headerID,
ReceiptID: txID,
Address: l.Address.String(),
Index: int64(l.Index),
Data: l.Data,
LeafCID: args.logLeafNodeCIDs[i][idx].String(),
LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]),
CID: args.logNodes[i][idx].Cid().String(),
Topic0: topicSet[0],
Topic1: topicSet[1],
Topic2: topicSet[2],
@ -416,114 +381,73 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
sdi.fileWriter.upsertLogCID(logDataSet)
}
// publish trie nodes, these aren't indexed directly
for i, n := range args.txTrieNodes {
sdi.fileWriter.upsertIPLDNode(args.blockNumber.String(), n)
sdi.fileWriter.upsertIPLDNode(args.blockNumber.String(), args.rctTrieNodes[i])
}
return nil
}
// PushStateNode writes a state diff node object (including any child storage nodes) IPLD insert SQL stmt to a file
func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateLeafNode, headerID string) error {
tx, ok := batch.(*BatchTx)
if !ok {
return fmt.Errorf("file: batch is expected to be of type %T, got %T", &BatchTx{}, batch)
}
// publish the state node
var stateModel models.StateNodeModel
if stateNode.NodeType == sdtypes.Removed {
if stateNode.Removed {
if atomic.LoadUint32(sdi.removedCacheFlag) == 0 {
atomic.StoreUint32(sdi.removedCacheFlag, 1)
sdi.fileWriter.upsertIPLDDirect(tx.BlockNumber, shared.RemovedNodeMhKey, []byte{})
sdi.fileWriter.upsertIPLDDirect(tx.BlockNumber, shared.RemovedNodeStateCID, []byte{})
}
stateModel = models.StateNodeModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
StateKey: common.BytesToHash(stateNode.AccountWrapper.LeafKey).String(),
CID: shared.RemovedNodeStateCID,
MhKey: shared.RemovedNodeMhKey,
NodeType: stateNode.NodeType.Int(),
Removed: true,
}
} else {
stateCIDStr, stateMhKey, err := sdi.fileWriter.upsertIPLDRaw(tx.BlockNumber, ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
if err != nil {
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
}
stateModel = models.StateNodeModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
CID: stateCIDStr,
MhKey: stateMhKey,
NodeType: stateNode.NodeType.Int(),
StateKey: common.BytesToHash(stateNode.AccountWrapper.LeafKey).String(),
CID: stateNode.AccountWrapper.CID,
Removed: false,
Balance: stateNode.AccountWrapper.Account.Balance.String(),
Nonce: stateNode.AccountWrapper.Account.Nonce,
CodeHash: common.BytesToHash(stateNode.AccountWrapper.Account.CodeHash).String(),
StorageRoot: stateNode.AccountWrapper.Account.Root.String(),
}
}
// index the state node
sdi.fileWriter.upsertStateCID(stateModel)
// if we have a leaf, decode and index the account data
if stateNode.NodeType == sdtypes.Leaf {
var i []interface{}
if err := rlp.DecodeBytes(stateNode.NodeValue, &i); err != nil {
return fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
}
if len(i) != 2 {
return fmt.Errorf("eth IPLDPublisher expected state leaf node rlp to decode into two elements")
}
var account types.StateAccount
if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
return fmt.Errorf("error decoding state account rlp: %s", err.Error())
}
accountModel := models.StateAccountModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
StatePath: stateNode.Path,
Balance: account.Balance.String(),
Nonce: account.Nonce,
CodeHash: account.CodeHash,
StorageRoot: account.Root.String(),
}
sdi.fileWriter.upsertStateAccount(accountModel)
}
// if there are any storage nodes associated with this node, publish and index them
for _, storageNode := range stateNode.StorageNodes {
if storageNode.NodeType == sdtypes.Removed {
for _, storageNode := range stateNode.StorageDiff {
if storageNode.Removed {
if atomic.LoadUint32(sdi.removedCacheFlag) == 0 {
atomic.StoreUint32(sdi.removedCacheFlag, 1)
sdi.fileWriter.upsertIPLDDirect(tx.BlockNumber, shared.RemovedNodeMhKey, []byte{})
sdi.fileWriter.upsertIPLDDirect(tx.BlockNumber, shared.RemovedNodeStorageCID, []byte{})
}
storageModel := models.StorageNodeModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
StatePath: stateNode.Path,
Path: storageNode.Path,
StateKey: common.BytesToHash(stateNode.AccountWrapper.LeafKey).String(),
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
CID: shared.RemovedNodeStorageCID,
MhKey: shared.RemovedNodeMhKey,
NodeType: storageNode.NodeType.Int(),
Removed: true,
Value: []byte{},
}
sdi.fileWriter.upsertStorageCID(storageModel)
continue
}
storageCIDStr, storageMhKey, err := sdi.fileWriter.upsertIPLDRaw(tx.BlockNumber, ipld2.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
if err != nil {
return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err)
}
storageModel := models.StorageNodeModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
StatePath: stateNode.Path,
Path: storageNode.Path,
StateKey: common.BytesToHash(stateNode.AccountWrapper.LeafKey).String(),
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
CID: storageCIDStr,
MhKey: storageMhKey,
NodeType: storageNode.NodeType.Int(),
CID: storageNode.CID,
Removed: false,
Value: storageNode.Value,
}
sdi.fileWriter.upsertStorageCID(storageModel)
}
@ -531,18 +455,13 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
return nil
}
// PushCodeAndCodeHash writes code and codehash pairs insert SQL stmts to a file
func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error {
// PushIPLD writes iplds to ipld.blocks
func (sdi *StateDiffIndexer) PushIPLD(batch interfaces.Batch, ipld sdtypes.IPLD) error {
tx, ok := batch.(*BatchTx)
if !ok {
return fmt.Errorf("file: batch is expected to be of type %T, got %T", &BatchTx{}, batch)
}
// codec doesn't matter since db key is multihash-based
mhKey, err := shared.MultihashKeyFromKeccak256(codeAndCodeHash.Hash)
if err != nil {
return fmt.Errorf("error deriving multihash key from codehash: %v", err)
}
sdi.fileWriter.upsertIPLDDirect(tx.BlockNumber, mhKey, codeAndCodeHash.Code)
sdi.fileWriter.upsertIPLDDirect(tx.BlockNumber, ipld.CID, ipld.Content)
return nil
}

View File

@ -19,7 +19,7 @@ package file
import (
"math/big"
node "github.com/ipfs/go-ipld-format"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
@ -39,18 +39,15 @@ type FileWriter interface {
upsertHeaderCID(header models.HeaderModel)
upsertUncleCID(uncle models.UncleModel)
upsertTransactionCID(transaction models.TxModel)
upsertAccessListElement(accessListElement models.AccessListElementModel)
upsertReceiptCID(rct *models.ReceiptModel)
upsertLogCID(logs []*models.LogsModel)
upsertStateCID(stateNode models.StateNodeModel)
upsertStateAccount(stateAccount models.StateAccountModel)
upsertStorageCID(storageCID models.StorageNodeModel)
upsertIPLD(ipld models.IPLDModel)
// Methods to upsert IPLD in different ways
upsertIPLDDirect(blockNumber, key string, value []byte)
upsertIPLDNode(blockNumber string, i node.Node)
upsertIPLDRaw(blockNumber string, codec, mh uint64, raw []byte) (string, string, error)
upsertIPLDNode(blockNumber string, i ipld.IPLD)
// Methods to read and write watched addresses
loadWatchedAddresses() ([]common.Address, error)

View File

@ -24,9 +24,6 @@ import (
"math/big"
"os"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
node "github.com/ipfs/go-ipld-format"
pg_query "github.com/pganalyze/pg_query_go/v2"
"github.com/thoas/go-funk"
@ -140,35 +137,29 @@ const (
nodeInsert = "INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES " +
"('%s', '%s', '%s', '%s', %d);\n"
ipldInsert = "INSERT INTO public.blocks (block_number, key, data) VALUES ('%s', '%s', '\\x%x');\n"
ipldInsert = "INSERT INTO ipld.blocks (block_number, key, data) VALUES ('%s', '%s', '\\x%x');\n"
headerInsert = "INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, " +
"state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) VALUES " +
"('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '\\x%x', %d, '%s', %d, '%s');\n"
headerInsert = "INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_ids, reward, " +
"state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase) VALUES " +
"('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '\\x%x', %d, '%s');\n"
uncleInsert = "INSERT INTO eth.uncle_cids (block_number, block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES " +
"('%s', '%s', '%s', '%s', '%s', '%s', '%s');\n"
uncleInsert = "INSERT INTO eth.uncle_cids (block_number, block_hash, header_id, parent_hash, cid, reward, index) VALUES " +
"('%s', '%s', '%s', '%s', '%s', '%s', %d);\n"
txInsert = "INSERT INTO eth.transaction_cids (block_number, header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type, " +
"value) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', %d, '%s', '\\x%x', %d, '%s');\n"
txInsert = "INSERT INTO eth.transaction_cids (block_number, header_id, tx_hash, cid, dst, src, index, tx_type, " +
"value) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', %d, %d, '%s');\n"
alInsert = "INSERT INTO eth.access_list_elements (block_number, tx_id, index, address, storage_keys) VALUES " +
"('%s', '%s', %d, '%s', '%s');\n"
rctInsert = "INSERT INTO eth.receipt_cids (block_number, header_id, tx_id, cid, contract, post_state, " +
"post_status) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', %d);\n"
rctInsert = "INSERT INTO eth.receipt_cids (block_number, header_id, tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, " +
"post_status, log_root) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', %d, '%s');\n"
logInsert = "INSERT INTO eth.log_cids (block_number, header_id, cid, rct_id, address, index, topic0, topic1, topic2, " +
"topic3) VALUES ('%s', '%s', '%s', '%s', '%s', %d, '%s', '%s', '%s', '%s');\n"
logInsert = "INSERT INTO eth.log_cids (block_number, header_id, leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, " +
"topic3, log_data) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', %d, '%s', '%s', '%s', '%s', '\\x%x');\n"
stateInsert = "INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, removed, diff, " +
"balance, nonce, code_hash, storage_root) VALUES ('%s', '%s', '%s', '%s', %t, %t, '%s', %d, '%s', '%s');\n"
stateInsert = "INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) " +
"VALUES ('%s', '%s', '%s', '%s', '\\x%x', %d, %t, '%s');\n"
accountInsert = "INSERT INTO eth.state_accounts (block_number, header_id, state_path, balance, nonce, code_hash, storage_root) " +
"VALUES ('%s', '%s', '\\x%x', '%s', %d, '\\x%x', '%s');\n"
storageInsert = "INSERT INTO eth.storage_cids (block_number, header_id, state_path, storage_leaf_key, cid, storage_path, " +
"node_type, diff, mh_key) VALUES ('%s', '%s', '\\x%x', '%s', '%s', '\\x%x', %d, %t, '%s');\n"
storageInsert = "INSERT INTO eth.storage_cids (block_number, header_id, state_leaf_key, storage_leaf_key, cid, " +
"removed, diff, val) VALUES ('%s', '%s', '%s', '%s', '%s', %t, %t, '\\x%x');\n"
)
func (sqw *SQLWriter) upsertNode(node nodeinfo.Info) {
@ -187,88 +178,59 @@ func (sqw *SQLWriter) upsertIPLDDirect(blockNumber, key string, value []byte) {
})
}
func (sqw *SQLWriter) upsertIPLDNode(blockNumber string, i node.Node) {
func (sqw *SQLWriter) upsertIPLDNode(blockNumber string, i ipld.IPLD) {
sqw.upsertIPLD(models.IPLDModel{
BlockNumber: blockNumber,
Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
Key: i.Cid().String(),
Data: i.RawData(),
})
}
func (sqw *SQLWriter) upsertIPLDRaw(blockNumber string, codec, mh uint64, raw []byte) (string, string, error) {
c, err := ipld.RawdataToCid(codec, raw, mh)
if err != nil {
return "", "", err
}
prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String()
sqw.upsertIPLD(models.IPLDModel{
BlockNumber: blockNumber,
Key: prefixedKey,
Data: raw,
})
return c.String(), prefixedKey, err
}
func (sqw *SQLWriter) upsertHeaderCID(header models.HeaderModel) {
stmt := fmt.Sprintf(headerInsert, header.BlockNumber, header.BlockHash, header.ParentHash, header.CID,
header.TotalDifficulty, header.NodeID, header.Reward, header.StateRoot, header.TxRoot,
header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1, header.Coinbase)
header.TotalDifficulty, formatPostgresStringArray(header.NodeIDs), header.Reward, header.StateRoot, header.TxRoot,
header.RctRoot, header.UnclesHash, header.Bloom, header.Timestamp, header.Coinbase)
sqw.stmts <- []byte(stmt)
indexerMetrics.blocks.Inc(1)
}
func (sqw *SQLWriter) upsertUncleCID(uncle models.UncleModel) {
sqw.stmts <- []byte(fmt.Sprintf(uncleInsert, uncle.BlockNumber, uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID,
uncle.Reward, uncle.MhKey))
uncle.Reward, uncle.Index))
}
func (sqw *SQLWriter) upsertTransactionCID(transaction models.TxModel) {
sqw.stmts <- []byte(fmt.Sprintf(txInsert, transaction.BlockNumber, transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst,
transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type, transaction.Value))
transaction.Src, transaction.Index, transaction.Type, transaction.Value))
indexerMetrics.transactions.Inc(1)
}
func (sqw *SQLWriter) upsertAccessListElement(accessListElement models.AccessListElementModel) {
sqw.stmts <- []byte(fmt.Sprintf(alInsert, accessListElement.BlockNumber, accessListElement.TxID, accessListElement.Index, accessListElement.Address,
formatPostgresStringArray(accessListElement.StorageKeys)))
indexerMetrics.accessListEntries.Inc(1)
}
func (sqw *SQLWriter) upsertReceiptCID(rct *models.ReceiptModel) {
sqw.stmts <- []byte(fmt.Sprintf(rctInsert, rct.BlockNumber, rct.HeaderID, rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey,
rct.PostState, rct.PostStatus, rct.LogRoot))
sqw.stmts <- []byte(fmt.Sprintf(rctInsert, rct.BlockNumber, rct.HeaderID, rct.TxID, rct.CID, rct.Contract,
rct.PostState, rct.PostStatus))
indexerMetrics.receipts.Inc(1)
}
func (sqw *SQLWriter) upsertLogCID(logs []*models.LogsModel) {
for _, l := range logs {
sqw.stmts <- []byte(fmt.Sprintf(logInsert, l.BlockNumber, l.HeaderID, l.LeafCID, l.LeafMhKey, l.ReceiptID, l.Address, l.Index, l.Topic0,
l.Topic1, l.Topic2, l.Topic3, l.Data))
sqw.stmts <- []byte(fmt.Sprintf(logInsert, l.BlockNumber, l.HeaderID, l.CID, l.ReceiptID, l.Address, l.Index, l.Topic0,
l.Topic1, l.Topic2, l.Topic3))
indexerMetrics.logs.Inc(1)
}
}
func (sqw *SQLWriter) upsertStateCID(stateNode models.StateNodeModel) {
var stateKey string
if stateNode.StateKey != nullHash.String() {
stateKey = stateNode.StateKey
balance := stateNode.Balance
if stateNode.Removed {
balance = "0"
}
sqw.stmts <- []byte(fmt.Sprintf(stateInsert, stateNode.BlockNumber, stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path,
stateNode.NodeType, true, stateNode.MhKey))
}
func (sqw *SQLWriter) upsertStateAccount(stateAccount models.StateAccountModel) {
sqw.stmts <- []byte(fmt.Sprintf(accountInsert, stateAccount.BlockNumber, stateAccount.HeaderID, stateAccount.StatePath, stateAccount.Balance,
stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot))
sqw.stmts <- []byte(fmt.Sprintf(stateInsert, stateNode.BlockNumber, stateNode.HeaderID, stateNode.StateKey, stateNode.CID,
stateNode.Removed, true, balance, stateNode.Nonce, stateNode.CodeHash, stateNode.StorageRoot))
}
func (sqw *SQLWriter) upsertStorageCID(storageCID models.StorageNodeModel) {
var storageKey string
if storageCID.StorageKey != nullHash.String() {
storageKey = storageCID.StorageKey
}
sqw.stmts <- []byte(fmt.Sprintf(storageInsert, storageCID.BlockNumber, storageCID.HeaderID, storageCID.StatePath, storageKey, storageCID.CID,
storageCID.Path, storageCID.NodeType, true, storageCID.MhKey))
sqw.stmts <- []byte(fmt.Sprintf(storageInsert, storageCID.BlockNumber, storageCID.HeaderID, storageCID.StateKey, storageCID.StorageKey, storageCID.CID,
storageCID.Removed, true, storageCID.Value))
}
// LoadWatchedAddresses loads watched addresses from a file

View File

@ -1,186 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
var TableIPLDBlock = Table{
`public.blocks`,
[]column{
{name: "block_number", dbType: bigint},
{name: "key", dbType: text},
{name: "data", dbType: bytea},
},
}
var TableNodeInfo = Table{
Name: `public.nodes`,
Columns: []column{
{name: "genesis_block", dbType: varchar},
{name: "network_id", dbType: varchar},
{name: "node_id", dbType: varchar},
{name: "client_name", dbType: varchar},
{name: "chain_id", dbType: integer},
},
}
var TableHeader = Table{
"eth.header_cids",
[]column{
{name: "block_number", dbType: bigint},
{name: "block_hash", dbType: varchar},
{name: "parent_hash", dbType: varchar},
{name: "cid", dbType: text},
{name: "td", dbType: numeric},
{name: "node_id", dbType: varchar},
{name: "reward", dbType: numeric},
{name: "state_root", dbType: varchar},
{name: "tx_root", dbType: varchar},
{name: "receipt_root", dbType: varchar},
{name: "uncle_root", dbType: varchar},
{name: "bloom", dbType: bytea},
{name: "timestamp", dbType: numeric},
{name: "mh_key", dbType: text},
{name: "times_validated", dbType: integer},
{name: "coinbase", dbType: varchar},
},
}
var TableStateNode = Table{
"eth.state_cids",
[]column{
{name: "block_number", dbType: bigint},
{name: "header_id", dbType: varchar},
{name: "state_leaf_key", dbType: varchar},
{name: "cid", dbType: text},
{name: "state_path", dbType: bytea},
{name: "node_type", dbType: integer},
{name: "diff", dbType: boolean},
{name: "mh_key", dbType: text},
},
}
var TableStorageNode = Table{
"eth.storage_cids",
[]column{
{name: "block_number", dbType: bigint},
{name: "header_id", dbType: varchar},
{name: "state_path", dbType: bytea},
{name: "storage_leaf_key", dbType: varchar},
{name: "cid", dbType: text},
{name: "storage_path", dbType: bytea},
{name: "node_type", dbType: integer},
{name: "diff", dbType: boolean},
{name: "mh_key", dbType: text},
},
}
var TableUncle = Table{
"eth.uncle_cids",
[]column{
{name: "block_number", dbType: bigint},
{name: "block_hash", dbType: varchar},
{name: "header_id", dbType: varchar},
{name: "parent_hash", dbType: varchar},
{name: "cid", dbType: text},
{name: "reward", dbType: numeric},
{name: "mh_key", dbType: text},
},
}
var TableTransaction = Table{
"eth.transaction_cids",
[]column{
{name: "block_number", dbType: bigint},
{name: "header_id", dbType: varchar},
{name: "tx_hash", dbType: varchar},
{name: "cid", dbType: text},
{name: "dst", dbType: varchar},
{name: "src", dbType: varchar},
{name: "index", dbType: integer},
{name: "mh_key", dbType: text},
{name: "tx_data", dbType: bytea},
{name: "tx_type", dbType: integer},
{name: "value", dbType: numeric},
},
}
var TableAccessListElement = Table{
"eth.access_list_elements",
[]column{
{name: "block_number", dbType: bigint},
{name: "tx_id", dbType: varchar},
{name: "index", dbType: integer},
{name: "address", dbType: varchar},
{name: "storage_keys", dbType: varchar, isArray: true},
},
}
var TableReceipt = Table{
"eth.receipt_cids",
[]column{
{name: "block_number", dbType: bigint},
{name: "header_id", dbType: varchar},
{name: "tx_id", dbType: varchar},
{name: "leaf_cid", dbType: text},
{name: "contract", dbType: varchar},
{name: "contract_hash", dbType: varchar},
{name: "leaf_mh_key", dbType: text},
{name: "post_state", dbType: varchar},
{name: "post_status", dbType: integer},
{name: "log_root", dbType: varchar},
},
}
var TableLog = Table{
"eth.log_cids",
[]column{
{name: "block_number", dbType: bigint},
{name: "header_id", dbType: varchar},
{name: "leaf_cid", dbType: text},
{name: "leaf_mh_key", dbType: text},
{name: "rct_id", dbType: varchar},
{name: "address", dbType: varchar},
{name: "index", dbType: integer},
{name: "topic0", dbType: varchar},
{name: "topic1", dbType: varchar},
{name: "topic2", dbType: varchar},
{name: "topic3", dbType: varchar},
{name: "log_data", dbType: bytea},
},
}
var TableStateAccount = Table{
"eth.state_accounts",
[]column{
{name: "block_number", dbType: bigint},
{name: "header_id", dbType: varchar},
{name: "state_path", dbType: bytea},
{name: "balance", dbType: numeric},
{name: "nonce", dbType: bigint},
{name: "code_hash", dbType: bytea},
{name: "storage_root", dbType: varchar},
},
}
var TableWatchedAddresses = Table{
"eth_meta.watched_addresses",
[]column{
{name: "address", dbType: varchar},
{name: "created_at", dbType: bigint},
{name: "watched_at", dbType: bigint},
{name: "last_filled_at", dbType: bigint},
},
}

View File

@ -1,104 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"fmt"
"strings"
"github.com/thoas/go-funk"
)
type colType int
const (
integer colType = iota
boolean
bigint
numeric
bytea
varchar
text
)
type column struct {
name string
dbType colType
isArray bool
}
type Table struct {
Name string
Columns []column
}
func (tbl *Table) ToCsvRow(args ...interface{}) []string {
var row []string
for i, col := range tbl.Columns {
value := col.dbType.formatter()(args[i])
if col.isArray {
valueList := funk.Map(args[i], col.dbType.formatter()).([]string)
value = fmt.Sprintf("{%s}", strings.Join(valueList, ","))
}
row = append(row, value)
}
return row
}
func (tbl *Table) VarcharColumns() []string {
columns := funk.Filter(tbl.Columns, func(col column) bool {
return col.dbType == varchar
}).([]column)
columnNames := funk.Map(columns, func(col column) string {
return col.name
}).([]string)
return columnNames
}
type colfmt = func(interface{}) string
func sprintf(f string) colfmt {
return func(x interface{}) string { return fmt.Sprintf(f, x) }
}
func (typ colType) formatter() colfmt {
switch typ {
case integer:
return sprintf("%d")
case boolean:
return func(x interface{}) string {
if x.(bool) {
return "t"
}
return "f"
}
case bigint:
return sprintf("%s")
case numeric:
return sprintf("%s")
case bytea:
return sprintf(`\x%x`)
case varchar:
return sprintf("%s")
case text:
return sprintf("%s")
}
panic("unreachable")
}

View File

@ -21,9 +21,6 @@ import (
"sync"
"sync/atomic"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
node "github.com/ipfs/go-ipld-format"
"github.com/lib/pq"
"github.com/ethereum/go-ethereum/log"
@ -59,7 +56,7 @@ func (tx *BatchTx) flush() error {
_, err := tx.dbtx.Exec(tx.ctx, tx.stm, pq.Array(tx.ipldCache.BlockNumbers), pq.Array(tx.ipldCache.Keys),
pq.Array(tx.ipldCache.Values))
if err != nil {
log.Debug(insertError{"public.blocks", err, tx.stm,
log.Debug(insertError{"ipld.blocks", err, tx.stm,
struct {
blockNumbers []string
keys []string
@ -69,7 +66,7 @@ func (tx *BatchTx) flush() error {
tx.ipldCache.Keys,
tx.ipldCache.Values,
}}.Error())
return insertError{"public.blocks", err, tx.stm, "too many arguments; use debug mode for full list"}
return insertError{"ipld.blocks", err, tx.stm, "too many arguments; use debug mode for full list"}
}
tx.ipldCache = models.IPLDBatch{}
return nil
@ -100,30 +97,15 @@ func (tx *BatchTx) cacheDirect(key string, value []byte) {
}
}
func (tx *BatchTx) cacheIPLD(i node.Node) {
func (tx *BatchTx) cacheIPLD(i ipld.IPLD) {
tx.cacheWg.Add(1)
tx.iplds <- models.IPLDModel{
BlockNumber: tx.BlockNumber,
Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
Key: i.Cid().String(),
Data: i.RawData(),
}
}
func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error) {
c, err := ipld.RawdataToCid(codec, raw, mh)
if err != nil {
return "", "", err
}
prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String()
tx.cacheWg.Add(1)
tx.iplds <- models.IPLDModel{
BlockNumber: tx.BlockNumber,
Key: prefixedKey,
Data: raw,
}
return c.String(), prefixedKey, err
}
func (tx *BatchTx) cacheRemoved(key string, value []byte) {
if atomic.LoadUint32(tx.removedCacheFlag) == 0 {
atomic.StoreUint32(tx.removedCacheFlag, 1)

View File

@ -20,13 +20,12 @@
package sql
import (
"bytes"
"context"
"fmt"
"math/big"
"time"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/common"
@ -37,7 +36,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
@ -100,16 +99,13 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
}
// Generate the block iplds
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
headerNode, txNodes, rctNodes, logNodes, err := ipld.FromBlockAndReceipts(block, receipts)
if err != nil {
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
}
if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) {
return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
}
if len(txTrieNodes) != len(rctTrieNodes) {
return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
if len(txNodes) != len(rctNodes) {
return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d)", len(txNodes), len(rctNodes))
}
// Calculate reward
@ -198,7 +194,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
traceMsg += fmt.Sprintf("header processing time: %s\r\n", tDiff.String())
t = time.Now()
// Publish and index uncles
err = sdi.processUncles(blockTx, headerID, block.Number(), uncleNodes)
err = sdi.processUncles(blockTx, headerID, block.Number(), block.UncleHash(), block.Uncles())
if err != nil {
return nil, err
}
@ -213,12 +209,8 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
receipts: receipts,
txs: transactions,
rctNodes: rctNodes,
rctTrieNodes: rctTrieNodes,
txNodes: txNodes,
txTrieNodes: txTrieNodes,
logTrieNodes: logTrieNodes,
logLeafNodeCIDs: logLeafNodeCIDs,
rctLeafNodeCIDs: rctLeafNodeCIDs,
logNodes: logNodes,
})
if err != nil {
return nil, err
@ -233,7 +225,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
// processHeader publishes and indexes a header IPLD in Postgres
// it returns the headerID
func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (string, error) {
func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode ipld.IPLD, reward, td *big.Int) (string, error) {
tx.cacheIPLD(headerNode)
var baseFee *string
@ -245,7 +237,6 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he
// index header
return headerID, sdi.dbWriter.upsertHeaderCID(tx.dbtx, models.HeaderModel{
CID: headerNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: header.ParentHash.String(),
BlockNumber: header.Number.String(),
BlockHash: headerID,
@ -255,32 +246,44 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he
StateRoot: header.Root.String(),
RctRoot: header.ReceiptHash.String(),
TxRoot: header.TxHash.String(),
UncleRoot: header.UncleHash.String(),
UnclesHash: header.UncleHash.String(),
Timestamp: header.Time,
Coinbase: header.Coinbase.String(),
})
}
// processUncles publishes and indexes uncle IPLDs in Postgres
func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNumber *big.Int, uncleNodes []*ipld2.EthHeader) error {
func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNumber *big.Int, unclesHash common.Hash, uncles []*types.Header) error {
// publish and index uncles
for _, uncleNode := range uncleNodes {
tx.cacheIPLD(uncleNode)
uncleEncoding, err := rlp.EncodeToBytes(uncles)
if err != nil {
return err
}
preparedHash := crypto.Keccak256Hash(uncleEncoding)
if !bytes.Equal(preparedHash.Bytes(), unclesHash.Bytes()) {
return fmt.Errorf("derived uncles hash (%s) does not match the hash in the header (%s)", preparedHash.Hex(), unclesHash.Hex())
}
unclesCID, err := ipld.RawdataToCid(ipld.MEthHeaderList, uncleEncoding, multihash.KECCAK_256)
if err != nil {
return err
}
tx.cacheDirect(unclesCID.String(), uncleEncoding)
for i, uncle := range uncles {
var uncleReward *big.Int
// in PoA networks uncle reward is 0
if sdi.chainConfig.Clique != nil {
uncleReward = big.NewInt(0)
} else {
uncleReward = shared.CalcUncleMinerReward(blockNumber.Uint64(), uncleNode.Number.Uint64())
uncleReward = shared.CalcUncleMinerReward(blockNumber.Uint64(), uncle.Number.Uint64())
}
uncle := models.UncleModel{
BlockNumber: blockNumber.String(),
HeaderID: headerID,
CID: uncleNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
ParentHash: uncleNode.ParentHash.String(),
BlockHash: uncleNode.Hash().String(),
CID: unclesCID.String(),
ParentHash: uncle.ParentHash.String(),
BlockHash: uncle.Hash().String(),
Reward: uncleReward.String(),
Index: int64(i),
}
if err := sdi.dbWriter.upsertUncleCID(tx.dbtx, uncle); err != nil {
return err
@ -295,13 +298,9 @@ type processArgs struct {
blockNumber *big.Int
receipts types.Receipts
txs types.Transactions
rctNodes []*ipld2.EthReceipt
rctTrieNodes []*ipld2.EthRctTrie
txNodes []*ipld2.EthTx
txTrieNodes []*ipld2.EthTxTrie
logTrieNodes [][]node.Node
logLeafNodeCIDs [][]cid.Cid
rctLeafNodeCIDs []cid.Cid
rctNodes []*ipld.EthReceipt
txNodes []*ipld.EthTx
logNodes [][]*ipld.EthLog
}
// processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres
@ -309,11 +308,9 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
// Process receipts and txs
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
for i, receipt := range args.receipts {
for _, logTrieNode := range args.logTrieNodes[i] {
tx.cacheIPLD(logTrieNode)
}
txNode := args.txNodes[i]
tx.cacheIPLD(txNode)
tx.cacheIPLD(args.rctNodes[i])
// index tx
trx := args.txs[i]
@ -336,9 +333,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
Src: shared.HandleZeroAddr(from),
TxHash: txID,
Index: int64(i),
Data: trx.Data(),
CID: txNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(txNode.Cid()),
Type: trx.Type(),
Value: val,
}
@ -346,50 +341,20 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
return err
}
// index access list if this is one
for j, accessListElement := range trx.AccessList() {
storageKeys := make([]string, len(accessListElement.StorageKeys))
for k, storageKey := range accessListElement.StorageKeys {
storageKeys[k] = storageKey.Hex()
}
accessListElementModel := models.AccessListElementModel{
BlockNumber: args.blockNumber.String(),
TxID: txID,
Index: int64(j),
Address: accessListElement.Address.Hex(),
StorageKeys: storageKeys,
}
if err := sdi.dbWriter.upsertAccessListElement(tx.dbtx, accessListElementModel); err != nil {
return err
}
}
// this is the contract address if this receipt is for a contract creation tx
contract := shared.HandleZeroAddr(receipt.ContractAddress)
var contractHash string
if contract != "" {
contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
}
// index receipt
if !args.rctLeafNodeCIDs[i].Defined() {
return fmt.Errorf("invalid receipt leaf node cid")
}
rctModel := &models.ReceiptModel{
BlockNumber: args.blockNumber.String(),
HeaderID: args.headerID,
TxID: txID,
Contract: contract,
ContractHash: contractHash,
LeafCID: args.rctLeafNodeCIDs[i].String(),
LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]),
LogRoot: args.rctNodes[i].LogRoot.String(),
CID: args.rctNodes[i].Cid().String(),
}
if len(receipt.PostState) == 0 {
rctModel.PostStatus = receipt.Status
} else {
rctModel.PostState = common.Bytes2Hex(receipt.PostState)
rctModel.PostState = common.BytesToHash(receipt.PostState).String()
}
if err := sdi.dbWriter.upsertReceiptCID(tx.dbtx, rctModel); err != nil {
@ -399,24 +364,19 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
// index logs
logDataSet := make([]*models.LogsModel, len(receipt.Logs))
for idx, l := range receipt.Logs {
tx.cacheIPLD(args.logNodes[i][idx])
topicSet := make([]string, 4)
for ti, topic := range l.Topics {
topicSet[ti] = topic.Hex()
}
if !args.logLeafNodeCIDs[i][idx].Defined() {
return fmt.Errorf("invalid log cid")
}
logDataSet[idx] = &models.LogsModel{
BlockNumber: args.blockNumber.String(),
HeaderID: args.headerID,
ReceiptID: txID,
Address: l.Address.String(),
Index: int64(l.Index),
Data: l.Data,
LeafCID: args.logLeafNodeCIDs[i][idx].String(),
LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]),
CID: args.logNodes[i][idx].Cid().String(),
Topic0: topicSet[0],
Topic1: topicSet[1],
Topic2: topicSet[2],
@ -429,47 +389,37 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
}
}
// publish trie nodes, these aren't indexed directly
for i, n := range args.txTrieNodes {
tx.cacheIPLD(n)
tx.cacheIPLD(args.rctTrieNodes[i])
}
return nil
}
// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD sql
func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateLeafNode, headerID string) error {
tx, ok := batch.(*BatchTx)
if !ok {
return fmt.Errorf("sql: batch is expected to be of type %T, got %T", &BatchTx{}, batch)
}
// publish the state node
var stateModel models.StateNodeModel
if stateNode.NodeType == sdtypes.Removed {
tx.cacheRemoved(shared.RemovedNodeMhKey, []byte{})
if stateNode.Removed {
tx.cacheRemoved(shared.RemovedNodeStateCID, []byte{})
stateModel = models.StateNodeModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
StateKey: common.BytesToHash(stateNode.AccountWrapper.LeafKey).String(),
CID: shared.RemovedNodeStateCID,
MhKey: shared.RemovedNodeMhKey,
NodeType: stateNode.NodeType.Int(),
Removed: true,
}
} else {
stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
if err != nil {
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
}
stateModel = models.StateNodeModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
CID: stateCIDStr,
MhKey: stateMhKey,
NodeType: stateNode.NodeType.Int(),
StateKey: common.BytesToHash(stateNode.AccountWrapper.LeafKey).String(),
CID: stateNode.AccountWrapper.CID,
Removed: false,
Balance: stateNode.AccountWrapper.Account.Balance.String(),
Nonce: stateNode.AccountWrapper.Account.Nonce,
CodeHash: common.BytesToHash(stateNode.AccountWrapper.Account.CodeHash).String(),
StorageRoot: stateNode.AccountWrapper.Account.Root.String(),
}
}
@ -478,65 +428,32 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
return err
}
// if we have a leaf, decode and index the account data
if stateNode.NodeType == sdtypes.Leaf {
var i []interface{}
if err := rlp.DecodeBytes(stateNode.NodeValue, &i); err != nil {
return fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
}
if len(i) != 2 {
return fmt.Errorf("eth IPLDPublisher expected state leaf node rlp to decode into two elements")
}
var account types.StateAccount
if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
return fmt.Errorf("error decoding state account rlp: %s", err.Error())
}
accountModel := models.StateAccountModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
StatePath: stateNode.Path,
Balance: account.Balance.String(),
Nonce: account.Nonce,
CodeHash: account.CodeHash,
StorageRoot: account.Root.String(),
}
if err := sdi.dbWriter.upsertStateAccount(tx.dbtx, accountModel); err != nil {
return err
}
}
// if there are any storage nodes associated with this node, publish and index them
for _, storageNode := range stateNode.StorageNodes {
if storageNode.NodeType == sdtypes.Removed {
tx.cacheRemoved(shared.RemovedNodeMhKey, []byte{})
for _, storageNode := range stateNode.StorageDiff {
if storageNode.Removed {
tx.cacheRemoved(shared.RemovedNodeStorageCID, []byte{})
storageModel := models.StorageNodeModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
StatePath: stateNode.Path,
Path: storageNode.Path,
StateKey: common.BytesToHash(stateNode.AccountWrapper.LeafKey).String(),
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
CID: shared.RemovedNodeStorageCID,
MhKey: shared.RemovedNodeMhKey,
NodeType: storageNode.NodeType.Int(),
Removed: true,
Value: []byte{},
}
if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel); err != nil {
return err
}
continue
}
storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld2.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
if err != nil {
return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err)
}
storageModel := models.StorageNodeModel{
BlockNumber: tx.BlockNumber,
HeaderID: headerID,
StatePath: stateNode.Path,
Path: storageNode.Path,
StateKey: common.BytesToHash(stateNode.AccountWrapper.LeafKey).String(),
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
CID: storageCIDStr,
MhKey: storageMhKey,
NodeType: storageNode.NodeType.Int(),
CID: storageNode.CID,
Removed: false,
Value: storageNode.Value,
}
if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel); err != nil {
return err
@ -546,18 +463,13 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
return nil
}
// PushCodeAndCodeHash publishes code and codehash pairs to the ipld sql
func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error {
// PushIPLD publishes iplds to ipld.blocks
func (sdi *StateDiffIndexer) PushIPLD(batch interfaces.Batch, ipld sdtypes.IPLD) error {
tx, ok := batch.(*BatchTx)
if !ok {
return fmt.Errorf("sql: batch is expected to be of type %T, got %T", &BatchTx{}, batch)
}
// codec doesn't matter since db key is multihash-based
mhKey, err := shared.MultihashKeyFromKeccak256(codeAndCodeHash.Hash)
if err != nil {
return fmt.Errorf("error deriving multihash key from codehash: %v", err)
}
tx.cacheDirect(mhKey, codeAndCodeHash.Code)
tx.cacheDirect(ipld.CID, ipld.Content)
return nil
}

View File

@ -46,15 +46,12 @@ type Statements interface {
InsertHeaderStm() string
InsertUncleStm() string
InsertTxStm() string
InsertAccessListElementStm() string
InsertRctStm() string
InsertLogStm() string
InsertStateStm() string
InsertAccountStm() string
InsertStorageStm() string
InsertIPLDStm() string
InsertIPLDsStm() string
InsertKnownGapsStm() string
}
// Tx interface to accommodate different concrete SQL transaction types

View File

@ -18,6 +18,8 @@ package postgres
import (
"fmt"
"os"
"strconv"
"strings"
"time"
@ -33,6 +35,15 @@ const (
Unknown DriverType = "Unknown"
)
// Env variables
const (
DATABASE_NAME = "DATABASE_NAME"
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
DATABASE_PORT = "DATABASE_PORT"
DATABASE_USER = "DATABASE_USER"
DATABASE_PASSWORD = "DATABASE_PASSWORD"
)
// ResolveDriverType resolves a DriverType from a provided string
func ResolveDriverType(str string) (DriverType, error) {
switch strings.ToLower(str) {
@ -49,7 +60,7 @@ func ResolveDriverType(str string) (DriverType, error) {
var DefaultConfig = Config{
Hostname: "localhost",
Port: 8077,
DatabaseName: "vulcanize_testing",
DatabaseName: "cerc_testing",
Username: "vdbm",
Password: "password",
}
@ -100,3 +111,26 @@ func (c Config) DbConnectionString() string {
}
return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", c.Hostname, c.Port, c.DatabaseName)
}
func (c Config) WithEnv() (Config, error) {
if val := os.Getenv(DATABASE_NAME); val != "" {
c.DatabaseName = val
}
if val := os.Getenv(DATABASE_HOSTNAME); val != "" {
c.Hostname = val
}
if val := os.Getenv(DATABASE_PORT); val != "" {
port, err := strconv.Atoi(val)
if err != nil {
return c, err
}
c.Port = port
}
if val := os.Getenv(DATABASE_USER); val != "" {
c.Username = val
}
if val := os.Getenv(DATABASE_PASSWORD); val != "" {
c.Password = val
}
return c, nil
}

View File

@ -16,7 +16,10 @@
package postgres
import "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
import (
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/shared/schema"
)
var _ sql.Database = &DB{}
@ -39,85 +42,45 @@ type DB struct {
// InsertHeaderStm satisfies the sql.Statements interface
// Stm == Statement
func (db *DB) InsertHeaderStm() string {
if db.upsert {
return `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
ON CONFLICT (block_hash, block_number) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)`
}
return `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
ON CONFLICT (block_hash, block_number) DO NOTHING`
return schema.TableHeader.ToInsertStatement(db.upsert)
}
// InsertUncleStm satisfies the sql.Statements interface
func (db *DB) InsertUncleStm() string {
return `INSERT INTO eth.uncle_cids (block_number, block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (block_hash, block_number) DO NOTHING`
return schema.TableUncle.ToInsertStatement(db.upsert)
}
// InsertTxStm satisfies the sql.Statements interface
func (db *DB) InsertTxStm() string {
return `INSERT INTO eth.transaction_cids (block_number, header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type, value) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
ON CONFLICT (tx_hash, header_id, block_number) DO NOTHING`
}
// InsertAccessListElementStm satisfies the sql.Statements interface
func (db *DB) InsertAccessListElementStm() string {
return `INSERT INTO eth.access_list_elements (block_number, tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (tx_id, index, block_number) DO NOTHING`
return schema.TableTransaction.ToInsertStatement(db.upsert)
}
// InsertRctStm satisfies the sql.Statements interface
func (db *DB) InsertRctStm() string {
return `INSERT INTO eth.receipt_cids (block_number, header_id, tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT (tx_id, header_id, block_number) DO NOTHING`
return schema.TableReceipt.ToInsertStatement(db.upsert)
}
// InsertLogStm satisfies the sql.Statements interface
func (db *DB) InsertLogStm() string {
return `INSERT INTO eth.log_cids (block_number, header_id, leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
ON CONFLICT (rct_id, index, header_id, block_number) DO NOTHING`
return schema.TableLog.ToInsertStatement(db.upsert)
}
// InsertStateStm satisfies the sql.Statements interface
func (db *DB) InsertStateStm() string {
if db.upsert {
return `INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (header_id, state_path, block_number) DO UPDATE SET (block_number, state_leaf_key, cid, node_type, diff, mh_key) = ($1, $3, $4, $6, $7, $8)`
}
return `INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (header_id, state_path, block_number) DO NOTHING`
}
// InsertAccountStm satisfies the sql.Statements interface
func (db *DB) InsertAccountStm() string {
return `INSERT INTO eth.state_accounts (block_number, header_id, state_path, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (header_id, state_path, block_number) DO NOTHING`
return schema.TableStateNode.ToInsertStatement(db.upsert)
}
// InsertStorageStm satisfies the sql.Statements interface
func (db *DB) InsertStorageStm() string {
if db.upsert {
return `INSERT INTO eth.storage_cids (block_number, header_id, state_path, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (header_id, state_path, storage_path, block_number) DO UPDATE SET (block_number, storage_leaf_key, cid, node_type, diff, mh_key) = ($1, $4, $5, $7, $8, $9)`
}
return `INSERT INTO eth.storage_cids (block_number, header_id, state_path, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (header_id, state_path, storage_path, block_number) DO NOTHING`
return schema.TableStorageNode.ToInsertStatement(db.upsert)
}
// InsertIPLDStm satisfies the sql.Statements interface
func (db *DB) InsertIPLDStm() string {
return `INSERT INTO public.blocks (block_number, key, data) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING`
return schema.TableIPLDBlock.ToInsertStatement(db.upsert)
}
// InsertIPLDsStm satisfies the sql.Statements interface
func (db *DB) InsertIPLDsStm() string {
return `INSERT INTO public.blocks (block_number, key, data) VALUES (unnest($1::BIGINT[]), unnest($2::TEXT[]), unnest($3::BYTEA[])) ON CONFLICT DO NOTHING`
}
// InsertKnownGapsStm satisfies the sql.Statements interface
func (db *DB) InsertKnownGapsStm() string {
return `INSERT INTO eth_meta.known_gaps (starting_block_number, ending_block_number, checked_out, processing_key) VALUES ($1, $2, $3, $4)
ON CONFLICT (starting_block_number) DO UPDATE SET (ending_block_number, processing_key) = ($2, $4)
WHERE eth_meta.known_gaps.ending_block_number <= $2`
return `INSERT INTO ipld.blocks (block_number, key, data) VALUES (unnest($1::BIGINT[]), unnest($2::TEXT[]), unnest($3::BYTEA[])) ON CONFLICT DO NOTHING`
}

View File

@ -26,13 +26,13 @@ const (
)
func ErrDBConnectionFailed(connectErr error) error {
return formatError(DbConnectionFailedMsg, connectErr.Error())
return formatError(DbConnectionFailedMsg, connectErr)
}
func ErrUnableToSetNode(setErr error) error {
return formatError(SettingNodeFailedMsg, setErr.Error())
return formatError(SettingNodeFailedMsg, setErr)
}
func formatError(msg, err string) error {
return fmt.Errorf("%s: %s", msg, err)
func formatError(msg string, err error) error {
return fmt.Errorf("%s: %w", msg, err)
}

View File

@ -39,14 +39,19 @@ type PGXDriver struct {
nodeID string
}
// NewPGXDriver returns a new pgx driver
// it initializes the connection pool and creates the node info table
func NewPGXDriver(ctx context.Context, config Config, node node.Info) (*PGXDriver, error) {
// ConnectPGX initializes and returns a PGX connection pool
func ConnectPGX(ctx context.Context, config Config) (*pgxpool.Pool, error) {
pgConf, err := MakeConfig(config)
if err != nil {
return nil, err
}
dbPool, err := pgxpool.ConnectConfig(ctx, pgConf)
return pgxpool.ConnectConfig(ctx, pgConf)
}
// NewPGXDriver returns a new pgx driver
// it initializes the connection pool and creates the node info table
func NewPGXDriver(ctx context.Context, config Config, node node.Info) (*PGXDriver, error) {
dbPool, err := ConnectPGX(ctx, config)
if err != nil {
return nil, ErrDBConnectionFailed(err)
}

View File

@ -35,12 +35,11 @@ type SQLXDriver struct {
nodeID string
}
// NewSQLXDriver returns a new sqlx driver for Postgres
// it initializes the connection pool and creates the node info table
func NewSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDriver, error) {
// ConnectSQLX initializes and returns a SQLX connection pool for postgres
func ConnectSQLX(ctx context.Context, config Config) (*sqlx.DB, error) {
db, err := sqlx.ConnectContext(ctx, "postgres", config.DbConnectionString())
if err != nil {
return &SQLXDriver{}, ErrDBConnectionFailed(err)
return nil, ErrDBConnectionFailed(err)
}
if config.MaxConns > 0 {
db.SetMaxOpenConns(config.MaxConns)
@ -49,9 +48,19 @@ func NewSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDri
db.SetConnMaxLifetime(config.MaxConnLifetime)
}
db.SetMaxIdleConns(config.MaxIdle)
return db, nil
}
// NewSQLXDriver returns a new sqlx driver for Postgres
// it initializes the connection pool and creates the node info table
func NewSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDriver, error) {
db, err := ConnectSQLX(ctx, config)
if err != nil {
return nil, err
}
driver := &SQLXDriver{ctx: ctx, db: db, nodeInfo: node}
if err := driver.createNode(); err != nil {
return &SQLXDriver{}, ErrUnableToSetNode(err)
return nil, err
}
return driver, nil
}
@ -59,8 +68,10 @@ func NewSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDri
func (driver *SQLXDriver) createNode() error {
_, err := driver.db.Exec(
createNodeStm,
driver.nodeInfo.GenesisBlock, driver.nodeInfo.NetworkID,
driver.nodeInfo.ID, driver.nodeInfo.ClientName,
driver.nodeInfo.GenesisBlock,
driver.nodeInfo.NetworkID,
driver.nodeInfo.ID,
driver.nodeInfo.ClientName,
driver.nodeInfo.ChainID)
if err != nil {
return ErrUnableToSetNode(err)

View File

@ -19,12 +19,9 @@ package sql
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
)
"github.com/lib/pq"
var (
nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
"github.com/ethereum/go-ethereum/statediff/indexer/models"
)
// Writer handles processing and writing of indexed IPLD objects to Postgres
@ -45,15 +42,27 @@ func (w *Writer) Close() error {
}
/*
INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
ON CONFLICT (block_hash, block_number) DO UPDATE SET (block_number, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($1, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)
INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_ids, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
ON CONFLICT (block_hash, block_number) DO NOTHING
*/
func (w *Writer) upsertHeaderCID(tx Tx, header models.HeaderModel) error {
nodeIDs := pq.StringArray([]string{w.db.NodeID()})
_, err := tx.Exec(w.db.Context(), w.db.InsertHeaderStm(),
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, w.db.NodeID(),
header.Reward, header.StateRoot, header.TxRoot, header.RctRoot, header.UncleRoot, header.Bloom,
header.Timestamp, header.MhKey, 1, header.Coinbase)
header.BlockNumber,
header.BlockHash,
header.ParentHash,
header.CID,
header.TotalDifficulty,
nodeIDs,
header.Reward,
header.StateRoot,
header.TxRoot,
header.RctRoot,
header.UnclesHash,
header.Bloom,
header.Timestamp,
header.Coinbase)
if err != nil {
return insertError{"eth.header_cids", err, w.db.InsertHeaderStm(), header}
}
@ -62,12 +71,18 @@ func (w *Writer) upsertHeaderCID(tx Tx, header models.HeaderModel) error {
}
/*
INSERT INTO eth.uncle_cids (block_number, block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
INSERT INTO eth.uncle_cids (block_number, block_hash, header_id, parent_hash, cid, reward, index) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (block_hash, block_number) DO NOTHING
*/
func (w *Writer) upsertUncleCID(tx Tx, uncle models.UncleModel) error {
_, err := tx.Exec(w.db.Context(), w.db.InsertUncleStm(),
uncle.BlockNumber, uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey)
uncle.BlockNumber,
uncle.BlockHash,
uncle.HeaderID,
uncle.ParentHash,
uncle.CID,
uncle.Reward,
uncle.Index)
if err != nil {
return insertError{"eth.uncle_cids", err, w.db.InsertUncleStm(), uncle}
}
@ -75,13 +90,20 @@ func (w *Writer) upsertUncleCID(tx Tx, uncle models.UncleModel) error {
}
/*
INSERT INTO eth.transaction_cids (block_number, header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type, value) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
INSERT INTO eth.transaction_cids (block_number, header_id, tx_hash, cid, dst, src, index, tx_type, value) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (tx_hash, header_id, block_number) DO NOTHING
*/
func (w *Writer) upsertTransactionCID(tx Tx, transaction models.TxModel) error {
_, err := tx.Exec(w.db.Context(), w.db.InsertTxStm(),
transaction.BlockNumber, transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src,
transaction.Index, transaction.MhKey, transaction.Data, transaction.Type, transaction.Value)
transaction.BlockNumber,
transaction.HeaderID,
transaction.TxHash,
transaction.CID,
transaction.Dst,
transaction.Src,
transaction.Index,
transaction.Type,
transaction.Value)
if err != nil {
return insertError{"eth.transaction_cids", err, w.db.InsertTxStm(), transaction}
}
@ -90,28 +112,18 @@ func (w *Writer) upsertTransactionCID(tx Tx, transaction models.TxModel) error {
}
/*
INSERT INTO eth.access_list_elements (block_number, tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (tx_id, index, block_number) DO NOTHING
*/
func (w *Writer) upsertAccessListElement(tx Tx, accessListElement models.AccessListElementModel) error {
_, err := tx.Exec(w.db.Context(), w.db.InsertAccessListElementStm(),
accessListElement.BlockNumber, accessListElement.TxID, accessListElement.Index, accessListElement.Address,
accessListElement.StorageKeys)
if err != nil {
return insertError{"eth.access_list_elements", err, w.db.InsertAccessListElementStm(), accessListElement}
}
indexerMetrics.accessListEntries.Inc(1)
return nil
}
/*
INSERT INTO eth.receipt_cids (block_number, header_id, tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
INSERT INTO eth.receipt_cids (block_number, header_id, tx_id, cid, contract, post_state, post_status) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (tx_id, header_id, block_number) DO NOTHING
*/
func (w *Writer) upsertReceiptCID(tx Tx, rct *models.ReceiptModel) error {
_, err := tx.Exec(w.db.Context(), w.db.InsertRctStm(),
rct.BlockNumber, rct.HeaderID, rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState,
rct.PostStatus, rct.LogRoot)
rct.BlockNumber,
rct.HeaderID,
rct.TxID,
rct.CID,
rct.Contract,
rct.PostState,
rct.PostStatus)
if err != nil {
return insertError{"eth.receipt_cids", err, w.db.InsertRctStm(), *rct}
}
@ -120,14 +132,22 @@ func (w *Writer) upsertReceiptCID(tx Tx, rct *models.ReceiptModel) error {
}
/*
INSERT INTO eth.log_cids (block_number, header_id, leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
INSERT INTO eth.log_cids (block_number, header_id, cid, rct_id, address, index, topic0, topic1, topic2, topic3) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT (rct_id, index, header_id, block_number) DO NOTHING
*/
func (w *Writer) upsertLogCID(tx Tx, logs []*models.LogsModel) error {
for _, log := range logs {
_, err := tx.Exec(w.db.Context(), w.db.InsertLogStm(),
log.BlockNumber, log.HeaderID, log.LeafCID, log.LeafMhKey, log.ReceiptID, log.Address, log.Index, log.Topic0, log.Topic1,
log.Topic2, log.Topic3, log.Data)
log.BlockNumber,
log.HeaderID,
log.CID,
log.ReceiptID,
log.Address,
log.Index,
log.Topic0,
log.Topic1,
log.Topic2,
log.Topic3)
if err != nil {
return insertError{"eth.log_cids", err, w.db.InsertLogStm(), *log}
}
@ -137,17 +157,26 @@ func (w *Writer) upsertLogCID(tx Tx, logs []*models.LogsModel) error {
}
/*
INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (header_id, state_path, block_number) DO UPDATE SET (block_number, state_leaf_key, cid, node_type, diff, mh_key) = ($1 $3, $4, $6, $7, $8)
INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, removed, diff, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT (header_id, state_leaf_key, block_number) DO NOTHING
*/
func (w *Writer) upsertStateCID(tx Tx, stateNode models.StateNodeModel) error {
var stateKey string
if stateNode.StateKey != nullHash.String() {
stateKey = stateNode.StateKey
balance := stateNode.Balance
if stateNode.Removed {
balance = "0"
}
_, err := tx.Exec(w.db.Context(), w.db.InsertStateStm(),
stateNode.BlockNumber, stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true,
stateNode.MhKey)
stateNode.BlockNumber,
stateNode.HeaderID,
stateNode.StateKey,
stateNode.CID,
true,
balance,
stateNode.Nonce,
stateNode.CodeHash,
stateNode.StorageRoot,
stateNode.Removed,
)
if err != nil {
return insertError{"eth.state_cids", err, w.db.InsertStateStm(), stateNode}
}
@ -155,31 +184,20 @@ func (w *Writer) upsertStateCID(tx Tx, stateNode models.StateNodeModel) error {
}
/*
INSERT INTO eth.state_accounts (block_number, header_id, state_path, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (header_id, state_path, block_number) DO NOTHING
*/
func (w *Writer) upsertStateAccount(tx Tx, stateAccount models.StateAccountModel) error {
_, err := tx.Exec(w.db.Context(), w.db.InsertAccountStm(),
stateAccount.BlockNumber, stateAccount.HeaderID, stateAccount.StatePath, stateAccount.Balance,
stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot)
if err != nil {
return insertError{"eth.state_accounts", err, w.db.InsertAccountStm(), stateAccount}
}
return nil
}
/*
INSERT INTO eth.storage_cids (block_number, header_id, state_path, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (header_id, state_path, storage_path, block_number) DO UPDATE SET (block_number, storage_leaf_key, cid, node_type, diff, mh_key) = ($1, $4, $5, $7, $8, $9)
INSERT INTO eth.storage_cids (block_number, header_id, state_leaf_key, storage_leaf_key, cid, removed, diff, val) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (header_id, state_leaf_key, storage_leaf_key, block_number) DO NOTHING
*/
func (w *Writer) upsertStorageCID(tx Tx, storageCID models.StorageNodeModel) error {
var storageKey string
if storageCID.StorageKey != nullHash.String() {
storageKey = storageCID.StorageKey
}
_, err := tx.Exec(w.db.Context(), w.db.InsertStorageStm(),
storageCID.BlockNumber, storageCID.HeaderID, storageCID.StatePath, storageKey, storageCID.CID, storageCID.Path,
storageCID.NodeType, true, storageCID.MhKey)
storageCID.BlockNumber,
storageCID.HeaderID,
storageCID.StateKey,
storageCID.StorageKey,
storageCID.CID,
true,
storageCID.Value,
storageCID.Removed,
)
if err != nil {
return insertError{"eth.storage_cids", err, w.db.InsertStorageStm(), storageCID}
}

View File

@ -30,8 +30,8 @@ import (
// StateDiffIndexer interface required to index statediff data
type StateDiffIndexer interface {
PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (Batch, error)
PushStateNode(tx Batch, stateNode sdtypes.StateNode, headerID string) error
PushCodeAndCodeHash(tx Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error
PushStateNode(tx Batch, stateNode sdtypes.StateLeafNode, headerID string) error
PushIPLD(tx Batch, ipld sdtypes.IPLD) error
ReportDBMetrics(delay time.Duration, quit <-chan bool)
// Methods used by WatchAddress API/functionality

View File

@ -1,175 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"encoding/json"
"fmt"
"math/big"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
)
// EthAccountSnapshot (eth-account-snapshot codec 0x97)
// represents an ethereum account, i.e. a wallet address or
// a smart contract
type EthAccountSnapshot struct {
*EthAccount
cid cid.Cid
rawdata []byte
}
// EthAccount is the building block of EthAccountSnapshot.
// Or, is the former stripped of its cid and rawdata components.
type EthAccount struct {
Nonce uint64
Balance *big.Int
Root []byte // This is the storage root trie
CodeHash []byte // This is the hash of the EVM code
}
// Static (compile time) check that EthAccountSnapshot satisfies the
// node.Node interface.
var _ node.Node = (*EthAccountSnapshot)(nil)
/*
INPUT
*/
// Input should be managed by EthStateTrie
/*
OUTPUT
*/
// Output should be managed by EthStateTrie
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the account snapshot.
func (as *EthAccountSnapshot) RawData() []byte {
return as.rawdata
}
// Cid returns the cid of the transaction.
func (as *EthAccountSnapshot) Cid() cid.Cid {
return as.cid
}
// String is a helper for output
func (as *EthAccountSnapshot) String() string {
return fmt.Sprintf("<EthereumAccountSnapshot %s>", as.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (as *EthAccountSnapshot) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-account-snapshot",
}
}
/*
Node INTERFACE
*/
// Resolve resolves a path through this node, stopping at any link boundary
// and returning the object found as well as the remaining path to traverse
func (as *EthAccountSnapshot) Resolve(p []string) (interface{}, []string, error) {
if len(p) == 0 {
return as, nil, nil
}
if len(p) > 1 {
return nil, nil, fmt.Errorf("unexpected path elements past %s", p[0])
}
switch p[0] {
case "balance":
return as.Balance, nil, nil
case "codeHash":
return &node.Link{Cid: keccak256ToCid(RawBinary, as.CodeHash)}, nil, nil
case "nonce":
return as.Nonce, nil, nil
case "root":
return &node.Link{Cid: keccak256ToCid(MEthStorageTrie, as.Root)}, nil, nil
default:
return nil, nil, ErrInvalidLink
}
}
// Tree lists all paths within the object under 'path', and up to the given depth.
// To list the entire object (similar to `find .`) pass "" and -1
func (as *EthAccountSnapshot) Tree(p string, depth int) []string {
if p != "" || depth == 0 {
return nil
}
return []string{"balance", "codeHash", "nonce", "root"}
}
// ResolveLink is a helper function that calls resolve and asserts the
// output is a link
func (as *EthAccountSnapshot) ResolveLink(p []string) (*node.Link, []string, error) {
obj, rest, err := as.Resolve(p)
if err != nil {
return nil, nil, err
}
if lnk, ok := obj.(*node.Link); ok {
return lnk, rest, nil
}
return nil, nil, fmt.Errorf("resolved item was not a link")
}
// Copy will go away. It is here to comply with the interface.
func (as *EthAccountSnapshot) Copy() node.Node {
panic("implement me")
}
// Links is a helper function that returns all links within this object
func (as *EthAccountSnapshot) Links() []*node.Link {
return nil
}
// Stat will go away. It is here to comply with the interface.
func (as *EthAccountSnapshot) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
// Size will go away. It is here to comply with the interface.
func (as *EthAccountSnapshot) Size() (uint64, error) {
return 0, nil
}
/*
EthAccountSnapshot functions
*/
// MarshalJSON processes the transaction into readable JSON format.
func (as *EthAccountSnapshot) MarshalJSON() ([]byte, error) {
out := map[string]interface{}{
"balance": as.Balance,
"codeHash": keccak256ToCid(RawBinary, as.CodeHash),
"nonce": as.Nonce,
"root": keccak256ToCid(MEthStorageTrie, as.Root),
}
return json.Marshal(out)
}

View File

@ -1,297 +0,0 @@
package ipld
import (
"encoding/json"
"fmt"
"os"
"regexp"
"testing"
)
/*
Block INTERFACE
*/
func init() {
if os.Getenv("MODE") != "statediff" {
fmt.Println("Skipping statediff test")
os.Exit(0)
}
}
func TestAccountSnapshotBlockElements(t *testing.T) {
eas := prepareEthAccountSnapshot(t)
if fmt.Sprintf("%x", eas.RawData())[:10] != "f84e808a03" {
t.Fatal("Wrong Data")
}
if eas.Cid().String() !=
"baglqcgzasckx2alxk43cksshnztjvhfyvbbh6bkp376gtcndm5cg4fkrkhsa" {
t.Fatal("Wrong Cid")
}
}
func TestAccountSnapshotString(t *testing.T) {
eas := prepareEthAccountSnapshot(t)
if eas.String() !=
"<EthereumAccountSnapshot baglqcgzasckx2alxk43cksshnztjvhfyvbbh6bkp376gtcndm5cg4fkrkhsa>" {
t.Fatalf("Wrong String()")
}
}
func TestAccountSnapshotLoggable(t *testing.T) {
eas := prepareEthAccountSnapshot(t)
l := eas.Loggable()
if _, ok := l["type"]; !ok {
t.Fatal("Loggable map expected the field 'type'")
}
if l["type"] != "eth-account-snapshot" {
t.Fatalf("Wrong Loggable 'type' value\r\nexpected %s\r\ngot %s", "eth-account-snapshot", l["type"])
}
}
/*
Node INTERFACE
*/
func TestAccountSnapshotResolve(t *testing.T) {
eas := prepareEthAccountSnapshot(t)
// Empty path
obj, rest, err := eas.Resolve([]string{})
reas, ok := obj.(*EthAccountSnapshot)
if !ok {
t.Fatalf("Wrong type of returned object\r\nexpected %T\r\ngot %T", &EthAccountSnapshot{}, reas)
}
if reas.Cid() != eas.Cid() {
t.Fatalf("wrong returned CID\r\nexpected %s\r\ngot %s", eas.Cid().String(), reas.Cid().String())
}
if rest != nil {
t.Fatal("rest should be nil")
}
if err != nil {
t.Fatal("err should be nil")
}
// len(p) > 1
badCases := [][]string{
{"two", "elements"},
{"here", "three", "elements"},
{"and", "here", "four", "elements"},
}
for _, bc := range badCases {
obj, rest, err = eas.Resolve(bc)
if obj != nil {
t.Fatal("obj should be nil")
}
if rest != nil {
t.Fatal("rest should be nil")
}
if err.Error() != fmt.Sprintf("unexpected path elements past %s", bc[0]) {
t.Fatal("wrong error")
}
}
moreBadCases := []string{
"i",
"am",
"not",
"an",
"account",
"field",
}
for _, mbc := range moreBadCases {
obj, rest, err = eas.Resolve([]string{mbc})
if obj != nil {
t.Fatal("obj should be nil")
}
if rest != nil {
t.Fatal("rest should be nil")
}
if err != ErrInvalidLink {
t.Fatal("wrong error")
}
}
goodCases := []string{
"balance",
"codeHash",
"nonce",
"root",
}
for _, gc := range goodCases {
_, _, err = eas.Resolve([]string{gc})
if err != nil {
t.Fatalf("error should be nil %v", gc)
}
}
}
func TestAccountSnapshotTree(t *testing.T) {
eas := prepareEthAccountSnapshot(t)
// Bad cases
tree := eas.Tree("non-empty-string", 0)
if tree != nil {
t.Fatal("Expected nil to be returned")
}
tree = eas.Tree("non-empty-string", 1)
if tree != nil {
t.Fatal("Expected nil to be returned")
}
tree = eas.Tree("", 0)
if tree != nil {
t.Fatal("Expected nil to be returned")
}
// Good cases
tree = eas.Tree("", 1)
lookupElements := map[string]interface{}{
"balance": nil,
"codeHash": nil,
"nonce": nil,
"root": nil,
}
if len(tree) != len(lookupElements) {
t.Fatalf("Wrong number of elements\r\nexpected %d\r\ngot %d", len(lookupElements), len(tree))
}
for _, te := range tree {
if _, ok := lookupElements[te]; !ok {
t.Fatalf("Unexpected Element: %v", te)
}
}
}
func TestAccountSnapshotResolveLink(t *testing.T) {
eas := prepareEthAccountSnapshot(t)
// bad case
obj, rest, err := eas.ResolveLink([]string{"supercalifragilist"})
if obj != nil {
t.Fatalf("Expected obj to be nil")
}
if rest != nil {
t.Fatal("Expected rest to be nil")
}
if err != ErrInvalidLink {
t.Fatal("Wrong error")
}
// good case
obj, rest, err = eas.ResolveLink([]string{"nonce"})
if obj != nil {
t.Fatalf("Expected obj to be nil")
}
if rest != nil {
t.Fatal("Expected rest to be nil")
}
if err.Error() != "resolved item was not a link" {
t.Fatal("Wrong error")
}
}
func TestAccountSnapshotCopy(t *testing.T) {
eas := prepareEthAccountSnapshot(t)
defer func() {
r := recover()
if r == nil {
t.Fatal("Expected panic")
}
if r != "implement me" {
t.Fatalf("Wrong panic message\r\n expected %s\r\ngot %s", "'implement me'", r)
}
}()
_ = eas.Copy()
}
func TestAccountSnapshotLinks(t *testing.T) {
eas := prepareEthAccountSnapshot(t)
if eas.Links() != nil {
t.Fatal("Links() expected to return nil")
}
}
func TestAccountSnapshotStat(t *testing.T) {
eas := prepareEthAccountSnapshot(t)
obj, err := eas.Stat()
if obj == nil {
t.Fatal("Expected a not null object node.NodeStat")
}
if err != nil {
t.Fatal("Expected a nil error")
}
}
func TestAccountSnapshotSize(t *testing.T) {
eas := prepareEthAccountSnapshot(t)
size, err := eas.Size()
if size != uint64(0) {
t.Fatalf("Wrong size\r\nexpected %d\r\ngot %d", 0, size)
}
if err != nil {
t.Fatal("Expected a nil error")
}
}
/*
EthAccountSnapshot functions
*/
func TestAccountSnapshotMarshalJSON(t *testing.T) {
eas := prepareEthAccountSnapshot(t)
jsonOutput, err := eas.MarshalJSON()
checkError(err, t)
var data map[string]interface{}
err = json.Unmarshal(jsonOutput, &data)
checkError(err, t)
balanceExpression := regexp.MustCompile(`{"balance":16011846000000000000000,`)
if !balanceExpression.MatchString(string(jsonOutput)) {
t.Fatal("Balance expression not found")
}
code, _ := data["codeHash"].(map[string]interface{})
if fmt.Sprintf("%s", code["/"]) !=
"bafkrwigf2jdadbxxem6je7t5wlomoa6a4ualmu6kqittw6723acf3bneoa" {
t.Fatalf("Wrong Marshaled Value\r\nexpected %s\r\ngot %s", "bafkrwigf2jdadbxxem6je7t5wlomoa6a4ualmu6kqittw6723acf3bneoa", fmt.Sprintf("%s", code["/"]))
}
if fmt.Sprintf("%v", data["nonce"]) != "0" {
t.Fatalf("Wrong Marshaled Value\r\nexpected %s\r\ngot %s", "0", fmt.Sprintf("%v", data["nonce"]))
}
root, _ := data["root"].(map[string]interface{})
if fmt.Sprintf("%s", root["/"]) !=
"bagmacgzak3ub6fy3zrk2n74dixtjfqhynznurya3tfwk3qabmix3ly3dwqqq" {
t.Fatalf("Wrong Marshaled Value\r\nexpected %s\r\ngot %s", "bagmacgzak3ub6fy3zrk2n74dixtjfqhynznurya3tfwk3qabmix3ly3dwqqq", fmt.Sprintf("%s", root["/"]))
}
}
/*
AUXILIARS
*/
func prepareEthAccountSnapshot(t *testing.T) *EthAccountSnapshot {
fi, err := os.Open("test_data/eth-state-trie-rlp-c9070d")
checkError(err, t)
output, err := FromStateTrieRLPFile(fi)
checkError(err, t)
return output.elements[1].(*EthAccountSnapshot)
}

View File

@ -17,32 +17,21 @@
package ipld
import (
"encoding/json"
"fmt"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
// EthHeader (eth-block, codec 0x90), represents an ethereum block header
type EthHeader struct {
*types.Header
cid cid.Cid
rawdata []byte
}
// Static (compile time) check that EthHeader satisfies the node.Node interface.
var _ node.Node = (*EthHeader)(nil)
/*
INPUT
*/
var _ IPLD = (*EthHeader)(nil)
// NewEthHeader converts a *types.Header into an EthHeader IPLD node
func NewEthHeader(header *types.Header) (*EthHeader, error) {
@ -55,34 +44,11 @@ func NewEthHeader(header *types.Header) (*EthHeader, error) {
return nil, err
}
return &EthHeader{
Header: header,
cid: c,
rawdata: headerRLP,
}, nil
}
/*
OUTPUT
*/
// DecodeEthHeader takes a cid and its raw binary data
// from IPFS and returns an EthTx object for further processing.
func DecodeEthHeader(c cid.Cid, b []byte) (*EthHeader, error) {
h := new(types.Header)
if err := rlp.DecodeBytes(b, h); err != nil {
return nil, err
}
return &EthHeader{
Header: h,
cid: c,
rawdata: b,
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the block header.
func (b *EthHeader) RawData() []byte {
return b.rawdata
@ -92,202 +58,3 @@ func (b *EthHeader) RawData() []byte {
func (b *EthHeader) Cid() cid.Cid {
return b.cid
}
// String is a helper for output
func (b *EthHeader) String() string {
return fmt.Sprintf("<EthHeader %s>", b.cid)
}
// Loggable returns a map the type of IPLD Link.
func (b *EthHeader) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-header",
}
}
/*
Node INTERFACE
*/
// Resolve resolves a path through this node, stopping at any link boundary
// and returning the object found as well as the remaining path to traverse
func (b *EthHeader) Resolve(p []string) (interface{}, []string, error) {
if len(p) == 0 {
return b, nil, nil
}
first, rest := p[0], p[1:]
switch first {
case "parent":
return &node.Link{Cid: commonHashToCid(MEthHeader, b.ParentHash)}, rest, nil
case "receipts":
return &node.Link{Cid: commonHashToCid(MEthTxReceiptTrie, b.ReceiptHash)}, rest, nil
case "root":
return &node.Link{Cid: commonHashToCid(MEthStateTrie, b.Root)}, rest, nil
case "tx":
return &node.Link{Cid: commonHashToCid(MEthTxTrie, b.TxHash)}, rest, nil
case "uncles":
return &node.Link{Cid: commonHashToCid(MEthHeaderList, b.UncleHash)}, rest, nil
}
if len(p) != 1 {
return nil, nil, fmt.Errorf("unexpected path elements past %s", first)
}
switch first {
case "bloom":
return b.Bloom, nil, nil
case "coinbase":
return b.Coinbase, nil, nil
case "difficulty":
return b.Difficulty, nil, nil
case "extra":
// This is a []byte. By default they are marshalled into Base64.
return fmt.Sprintf("0x%x", b.Extra), nil, nil
case "gaslimit":
return b.GasLimit, nil, nil
case "gasused":
return b.GasUsed, nil, nil
case "mixdigest":
return b.MixDigest, nil, nil
case "nonce":
return b.Nonce, nil, nil
case "number":
return b.Number, nil, nil
case "time":
return b.Time, nil, nil
default:
return nil, nil, ErrInvalidLink
}
}
// Tree lists all paths within the object under 'path', and up to the given depth.
// To list the entire object (similar to `find .`) pass "" and -1
func (b *EthHeader) Tree(p string, depth int) []string {
if p != "" || depth == 0 {
return nil
}
return []string{
"time",
"bloom",
"coinbase",
"difficulty",
"extra",
"gaslimit",
"gasused",
"mixdigest",
"nonce",
"number",
"parent",
"receipts",
"root",
"tx",
"uncles",
}
}
// ResolveLink is a helper function that allows easier traversal of links through blocks
func (b *EthHeader) ResolveLink(p []string) (*node.Link, []string, error) {
obj, rest, err := b.Resolve(p)
if err != nil {
return nil, nil, err
}
if lnk, ok := obj.(*node.Link); ok {
return lnk, rest, nil
}
return nil, nil, fmt.Errorf("resolved item was not a link")
}
// Copy will go away. It is here to comply with the Node interface.
func (b *EthHeader) Copy() node.Node {
panic("implement me")
}
// Links is a helper function that returns all links within this object
// HINT: Use `ipfs refs <cid>`
func (b *EthHeader) Links() []*node.Link {
return []*node.Link{
{Cid: commonHashToCid(MEthHeader, b.ParentHash)},
{Cid: commonHashToCid(MEthTxReceiptTrie, b.ReceiptHash)},
{Cid: commonHashToCid(MEthStateTrie, b.Root)},
{Cid: commonHashToCid(MEthTxTrie, b.TxHash)},
{Cid: commonHashToCid(MEthHeaderList, b.UncleHash)},
}
}
// Stat will go away. It is here to comply with the Node interface.
func (b *EthHeader) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
// Size will go away. It is here to comply with the Node interface.
func (b *EthHeader) Size() (uint64, error) {
return 0, nil
}
/*
EthHeader functions
*/
// MarshalJSON processes the block header into readable JSON format,
// converting the right links into their cids, and keeping the original
// hex hash, allowing the user to simplify external queries.
func (b *EthHeader) MarshalJSON() ([]byte, error) {
out := map[string]interface{}{
"time": b.Time,
"bloom": b.Bloom,
"coinbase": b.Coinbase,
"difficulty": b.Difficulty,
"extra": fmt.Sprintf("0x%x", b.Extra),
"gaslimit": b.GasLimit,
"gasused": b.GasUsed,
"mixdigest": b.MixDigest,
"nonce": b.Nonce,
"number": b.Number,
"parent": commonHashToCid(MEthHeader, b.ParentHash),
"receipts": commonHashToCid(MEthTxReceiptTrie, b.ReceiptHash),
"root": commonHashToCid(MEthStateTrie, b.Root),
"tx": commonHashToCid(MEthTxTrie, b.TxHash),
"uncles": commonHashToCid(MEthHeaderList, b.UncleHash),
}
return json.Marshal(out)
}
// objJSONHeader defines the output of the JSON RPC API for either
// "eth_BlockByHash" or "eth_BlockByHeader".
type objJSONHeader struct {
Result objJSONHeaderResult `json:"result"`
}
// objJSONBLockResult is the nested struct that takes
// the contents of the JSON field "result".
type objJSONHeaderResult struct {
types.Header // Use its fields and unmarshaler
*objJSONHeaderResultExt // Add these fields to the parsing
}
// objJSONBLockResultExt facilitates the composition
// of the field "result", adding to the
// `types.Header` fields, both ommers (their hashes) and transactions.
type objJSONHeaderResultExt struct {
OmmerHashes []common.Hash `json:"uncles"`
Transactions []*types.Transaction `json:"transactions"`
}
// UnmarshalJSON overrides the function types.Header.UnmarshalJSON, allowing us
// to parse the fields of Header, plus ommer hashes and transactions.
// (yes, ommer hashes. You will need to "eth_getUncleCountByBlockHash" per each ommer)
func (o *objJSONHeaderResult) UnmarshalJSON(input []byte) error {
err := o.Header.UnmarshalJSON(input)
if err != nil {
return err
}
o.objJSONHeaderResultExt = &objJSONHeaderResultExt{}
err = json.Unmarshal(input, o.objJSONHeaderResultExt)
return err
}

View File

@ -1,585 +0,0 @@
package ipld
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"runtime"
"strconv"
"testing"
block "github.com/ipfs/go-block-format"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/core/types"
)
func TestBlockBodyRlpParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-block-body-rlp-999999")
checkError(err, t)
output, _, _, err := FromBlockRLP(fi)
checkError(err, t)
testEthBlockFields(output, t)
}
func TestBlockHeaderRlpParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-block-header-rlp-999999")
checkError(err, t)
output, _, _, err := FromBlockRLP(fi)
checkError(err, t)
testEthBlockFields(output, t)
}
func TestBlockBodyJsonParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-block-body-json-999999")
checkError(err, t)
output, _, _, err := FromBlockJSON(fi)
checkError(err, t)
testEthBlockFields(output, t)
}
func TestEthBlockProcessTransactionsError(t *testing.T) {
// Let's just change one byte in a field of one of these transactions.
fi, err := os.Open("test_data/error-tx-eth-block-body-json-999999")
checkError(err, t)
_, _, _, err = FromBlockJSON(fi)
if err == nil {
t.Fatal("Expected an error")
}
}
// TestDecodeBlockHeader should work for both inputs (block header and block body)
// as what we are storing is just the block header
func TestDecodeBlockHeader(t *testing.T) {
storedEthBlock := prepareStoredEthBlock("test_data/eth-block-header-rlp-999999", t)
ethBlock, err := DecodeEthHeader(storedEthBlock.Cid(), storedEthBlock.RawData())
checkError(err, t)
testEthBlockFields(ethBlock, t)
}
func TestEthBlockString(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
if ethBlock.String() != "<EthHeader bagiacgzawt5236hkiuvrhfyy4jya3qitlt6icfcqgheew6vsptlraokppm4a>" {
t.Fatalf("Wrong String()\r\nexpected %s\r\ngot %s", "<EthHeader bagiacgzawt5236hkiuvrhfyy4jya3qitlt6icfcqgheew6vsptlraokppm4a>", ethBlock.String())
}
}
func TestEthBlockLoggable(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
l := ethBlock.Loggable()
if _, ok := l["type"]; !ok {
t.Fatal("Loggable map expected the field 'type'")
}
if l["type"] != "eth-header" {
t.Fatalf("Wrong Loggable 'type' value\r\nexpected %s\r\ngot %s", "eth-header", l["type"])
}
}
func TestEthBlockJSONMarshal(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
jsonOutput, err := ethBlock.MarshalJSON()
checkError(err, t)
var data map[string]interface{}
err = json.Unmarshal(jsonOutput, &data)
checkError(err, t)
// Testing all fields is boring, but can help us to avoid
// that dreaded regression
if data["bloom"].(string)[:10] != "0x00000000" {
t.Fatalf("Wrong Bloom\r\nexpected %s\r\ngot %s", "0x00000000", data["bloom"].(string)[:10])
t.Fatal("Wrong Bloom")
}
if data["coinbase"] != "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5" {
t.Fatalf("Wrong coinbase\r\nexpected %s\r\ngot %s", "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5", data["coinbase"])
}
if parseFloat(data["difficulty"]) != "12555463106190" {
t.Fatalf("Wrong Difficulty\r\nexpected %s\r\ngot %s", "12555463106190", parseFloat(data["difficulty"]))
}
if data["extra"] != "0xd783010303844765746887676f312e342e32856c696e7578" {
t.Fatalf("Wrong Extra\r\nexpected %s\r\ngot %s", "0xd783010303844765746887676f312e342e32856c696e7578", data["extra"])
}
if parseFloat(data["gaslimit"]) != "3141592" {
t.Fatalf("Wrong Gas limit\r\nexpected %s\r\ngot %s", "3141592", parseFloat(data["gaslimit"]))
}
if parseFloat(data["gasused"]) != "231000" {
t.Fatalf("Wrong Gas used\r\nexpected %s\r\ngot %s", "231000", parseFloat(data["gasused"]))
}
if data["mixdigest"] != "0x5b10f4a08a6c209d426f6158bd24b574f4f7b7aa0099c67c14a1f693b4dd04d0" {
t.Fatalf("Wrong Mix digest\r\nexpected %s\r\ngot %s", "0x5b10f4a08a6c209d426f6158bd24b574f4f7b7aa0099c67c14a1f693b4dd04d0", data["mixdigest"])
}
if data["nonce"] != "0xf491f46b60fe04b3" {
t.Fatalf("Wrong nonce\r\nexpected %s\r\ngot %s", "0xf491f46b60fe04b3", data["nonce"])
}
if parseFloat(data["number"]) != "999999" {
t.Fatalf("Wrong block number\r\nexpected %s\r\ngot %s", "999999", parseFloat(data["number"]))
}
if parseMapElement(data["parent"]) != "bagiacgza2m6j3xu774hlvjxhd2fsnuv5ufom6ei4ply3mm3jrleeozt7b62a" {
t.Fatalf("Wrong Parent cid\r\nexpected %s\r\ngot %s", "bagiacgza2m6j3xu774hlvjxhd2fsnuv5ufom6ei4ply3mm3jrleeozt7b62a", parseMapElement(data["parent"]))
}
if parseMapElement(data["receipts"]) != "bagkacgzap6qpnsrkagbdecgybaa63ljx4pr2aa5vlsetdg2f5mpzpbrk2iuq" {
t.Fatalf("Wrong Receipt root cid\r\nexpected %s\r\ngot %s", "bagkacgzap6qpnsrkagbdecgybaa63ljx4pr2aa5vlsetdg2f5mpzpbrk2iuq", parseMapElement(data["receipts"]))
}
if parseMapElement(data["root"]) != "baglacgza5wmkus23dhec7m2tmtyikcfobjw6yzs7uv3ghxfjjroxavkm3yia" {
t.Fatalf("Wrong root hash cid\r\nexpected %s\r\ngot %s", "baglacgza5wmkus23dhec7m2tmtyikcfobjw6yzs7uv3ghxfjjroxavkm3yia", parseMapElement(data["root"]))
}
if parseFloat(data["time"]) != "1455404037" {
t.Fatalf("Wrong Time\r\nexpected %s\r\ngot %s", "1455404037", parseFloat(data["time"]))
}
if parseMapElement(data["tx"]) != "bagjacgzair6l3dci6smknejlccbrzx7vtr737s56onoksked2t5anxgxvzka" {
t.Fatalf("Wrong Tx root cid\r\nexpected %s\r\ngot %s", "bagjacgzair6l3dci6smknejlccbrzx7vtr737s56onoksked2t5anxgxvzka", parseMapElement(data["tx"]))
}
if parseMapElement(data["uncles"]) != "bagiqcgzadxge32g6y5oxvk4fwvt3ntgudljreri3ssfhie7qufbp2qgusndq" {
t.Fatalf("Wrong Uncle hash cid\r\nexpected %s\r\ngot %s", "bagiqcgzadxge32g6y5oxvk4fwvt3ntgudljreri3ssfhie7qufbp2qgusndq", parseMapElement(data["uncles"]))
}
}
func TestEthBlockLinks(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
links := ethBlock.Links()
if links[0].Cid.String() != "bagiacgza2m6j3xu774hlvjxhd2fsnuv5ufom6ei4ply3mm3jrleeozt7b62a" {
t.Fatalf("Wrong cid for parent link\r\nexpected: %s\r\ngot %s", "bagiacgza2m6j3xu774hlvjxhd2fsnuv5ufom6ei4ply3mm3jrleeozt7b62a", links[0].Cid.String())
}
if links[1].Cid.String() != "bagkacgzap6qpnsrkagbdecgybaa63ljx4pr2aa5vlsetdg2f5mpzpbrk2iuq" {
t.Fatalf("Wrong cid for receipt root link\r\nexpected: %s\r\ngot %s", "bagkacgzap6qpnsrkagbdecgybaa63ljx4pr2aa5vlsetdg2f5mpzpbrk2iuq", links[1].Cid.String())
}
if links[2].Cid.String() != "baglacgza5wmkus23dhec7m2tmtyikcfobjw6yzs7uv3ghxfjjroxavkm3yia" {
t.Fatalf("Wrong cid for state root link\r\nexpected: %s\r\ngot %s", "baglacgza5wmkus23dhec7m2tmtyikcfobjw6yzs7uv3ghxfjjroxavkm3yia", links[2].Cid.String())
}
if links[3].Cid.String() != "bagjacgzair6l3dci6smknejlccbrzx7vtr737s56onoksked2t5anxgxvzka" {
t.Fatalf("Wrong cid for tx root link\r\nexpected: %s\r\ngot %s", "bagjacgzair6l3dci6smknejlccbrzx7vtr737s56onoksked2t5anxgxvzka", links[3].Cid.String())
}
if links[4].Cid.String() != "bagiqcgzadxge32g6y5oxvk4fwvt3ntgudljreri3ssfhie7qufbp2qgusndq" {
t.Fatalf("Wrong cid for uncles root link\r\nexpected: %s\r\ngot %s", "bagiqcgzadxge32g6y5oxvk4fwvt3ntgudljreri3ssfhie7qufbp2qgusndq", links[4].Cid.String())
}
}
func TestEthBlockResolveEmptyPath(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
obj, rest, err := ethBlock.Resolve([]string{})
checkError(err, t)
if ethBlock != obj.(*EthHeader) {
t.Fatal("Should have returned the same eth-block object")
}
if len(rest) != 0 {
t.Fatalf("Wrong len of rest of the path returned\r\nexpected %d\r\ngot %d", 0, len(rest))
}
}
func TestEthBlockResolveNoSuchLink(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
_, _, err := ethBlock.Resolve([]string{"wewonthavethisfieldever"})
if err == nil {
t.Fatal("Should have failed with unknown field")
}
if err != ErrInvalidLink {
t.Fatalf("Wrong error message\r\nexpected %s\r\ngot %s", ErrInvalidLink, err.Error())
}
}
func TestEthBlockResolveBloom(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
obj, rest, err := ethBlock.Resolve([]string{"bloom"})
checkError(err, t)
// The marshaler of types.Bloom should output it as 0x
bloomInText := fmt.Sprintf("%x", obj.(types.Bloom))
if bloomInText[:10] != "0000000000" {
t.Fatalf("Wrong Bloom\r\nexpected %s\r\ngot %s", "0000000000", bloomInText[:10])
}
if len(rest) != 0 {
t.Fatalf("Wrong len of rest of the path returned\r\nexpected %d\r\ngot %d", 0, len(rest))
}
}
func TestEthBlockResolveBloomExtraPathElements(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
obj, rest, err := ethBlock.Resolve([]string{"bloom", "unexpected", "extra", "elements"})
if obj != nil {
t.Fatal("Returned obj should be nil")
}
if rest != nil {
t.Fatal("Returned rest should be nil")
}
if err.Error() != "unexpected path elements past bloom" {
t.Fatalf("Wrong error\r\nexpected %s\r\ngot %s", "unexpected path elements past bloom", err.Error())
}
}
func TestEthBlockResolveNonLinkFields(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
testCases := map[string][]string{
"coinbase": {"%x", "52bc44d5378309ee2abf1539bf71de1b7d7be3b5"},
"difficulty": {"%s", "12555463106190"},
"extra": {"%s", "0xd783010303844765746887676f312e342e32856c696e7578"},
"gaslimit": {"%d", "3141592"},
"gasused": {"%d", "231000"},
"mixdigest": {"%x", "5b10f4a08a6c209d426f6158bd24b574f4f7b7aa0099c67c14a1f693b4dd04d0"},
"nonce": {"%x", "f491f46b60fe04b3"},
"number": {"%s", "999999"},
"time": {"%d", "1455404037"},
}
for field, value := range testCases {
obj, rest, err := ethBlock.Resolve([]string{field})
checkError(err, t)
format := value[0]
result := value[1]
if fmt.Sprintf(format, obj) != result {
t.Fatalf("Wrong %v\r\nexpected %v\r\ngot %s", field, result, fmt.Sprintf(format, obj))
}
if len(rest) != 0 {
t.Fatalf("Wrong len of rest of the path returned\r\nexpected %d\r\ngot %d", 0, len(rest))
}
}
}
func TestEthBlockResolveNonLinkFieldsExtraPathElements(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
testCases := []string{
"coinbase",
"difficulty",
"extra",
"gaslimit",
"gasused",
"mixdigest",
"nonce",
"number",
"time",
}
for _, field := range testCases {
obj, rest, err := ethBlock.Resolve([]string{field, "unexpected", "extra", "elements"})
if obj != nil {
t.Fatal("Returned obj should be nil")
}
if rest != nil {
t.Fatal("Returned rest should be nil")
}
if err.Error() != "unexpected path elements past "+field {
t.Fatalf("Wrong error\r\nexpected %s\r\ngot %s", "unexpected path elements past "+field, err.Error())
}
}
}
func TestEthBlockResolveLinkFields(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
testCases := map[string]string{
"parent": "bagiacgza2m6j3xu774hlvjxhd2fsnuv5ufom6ei4ply3mm3jrleeozt7b62a",
"receipts": "bagkacgzap6qpnsrkagbdecgybaa63ljx4pr2aa5vlsetdg2f5mpzpbrk2iuq",
"root": "baglacgza5wmkus23dhec7m2tmtyikcfobjw6yzs7uv3ghxfjjroxavkm3yia",
"tx": "bagjacgzair6l3dci6smknejlccbrzx7vtr737s56onoksked2t5anxgxvzka",
"uncles": "bagiqcgzadxge32g6y5oxvk4fwvt3ntgudljreri3ssfhie7qufbp2qgusndq",
}
for field, result := range testCases {
obj, rest, err := ethBlock.Resolve([]string{field, "anything", "goes", "here"})
checkError(err, t)
lnk, ok := obj.(*node.Link)
if !ok {
t.Fatal("Returned object is not a link")
}
if lnk.Cid.String() != result {
t.Fatalf("Wrong %s cid\r\nexpected %v\r\ngot %v", field, result, lnk.Cid.String())
}
for i, p := range []string{"anything", "goes", "here"} {
if rest[i] != p {
t.Fatalf("Wrong rest of the path returned\r\nexpected %s\r\ngot %s", p, rest[i])
}
}
}
}
func TestEthBlockTreeBadParams(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
tree := ethBlock.Tree("non-empty-string", 0)
if tree != nil {
t.Fatal("Expected nil to be returned")
}
tree = ethBlock.Tree("non-empty-string", 1)
if tree != nil {
t.Fatal("Expected nil to be returned")
}
tree = ethBlock.Tree("", 0)
if tree != nil {
t.Fatal("Expected nil to be returned")
}
}
func TestEThBlockTree(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
tree := ethBlock.Tree("", 1)
lookupElements := map[string]interface{}{
"bloom": nil,
"coinbase": nil,
"difficulty": nil,
"extra": nil,
"gaslimit": nil,
"gasused": nil,
"mixdigest": nil,
"nonce": nil,
"number": nil,
"parent": nil,
"receipts": nil,
"root": nil,
"time": nil,
"tx": nil,
"uncles": nil,
}
if len(tree) != len(lookupElements) {
t.Fatalf("Wrong number of elements\r\nexpected %d\r\ngot %d", len(lookupElements), len(tree))
}
for _, te := range tree {
if _, ok := lookupElements[te]; !ok {
t.Fatalf("Unexpected Element: %v", te)
}
}
}
/*
The two functions above: TestEthBlockResolveNonLinkFields and
TestEthBlockResolveLinkFields did all the heavy lifting. Then, we will
just test two use cases.
*/
func TestEthBlockResolveLinksBadLink(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
obj, rest, err := ethBlock.ResolveLink([]string{"supercalifragilist"})
if obj != nil {
t.Fatalf("Expected obj to be nil")
}
if rest != nil {
t.Fatal("Expected rest to be nil")
}
if err != ErrInvalidLink {
t.Fatalf("Expected error\r\nexpected %s\r\ngot %s", ErrInvalidLink, err)
}
}
func TestEthBlockResolveLinksGoodLink(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
obj, rest, err := ethBlock.ResolveLink([]string{"tx", "0", "0", "0"})
if obj == nil {
t.Fatalf("Expected valid *node.Link obj to be returned")
}
if rest == nil {
t.Fatal("Expected rest to be returned")
}
for i, p := range []string{"0", "0", "0"} {
if rest[i] != p {
t.Fatalf("Wrong rest of the path returned\r\nexpected %s\r\ngot %s", p, rest[i])
}
}
if err != nil {
t.Fatal("Non error expected")
}
}
/*
These functions below should go away
We are working on test coverage anyways...
*/
func TestEthBlockCopy(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
defer func() {
r := recover()
if r == nil {
t.Fatal("Expected panic")
}
if r != "implement me" {
t.Fatalf("Wrong panic message\r\nexpected %s\r\ngot %s", "'implement me'", r)
}
}()
_ = ethBlock.Copy()
}
func TestEthBlockStat(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
obj, err := ethBlock.Stat()
if obj == nil {
t.Fatal("Expected a not null object node.NodeStat")
}
if err != nil {
t.Fatal("Expected a nil error")
}
}
func TestEthBlockSize(t *testing.T) {
ethBlock := prepareDecodedEthBlock("test_data/eth-block-header-rlp-999999", t)
size, err := ethBlock.Size()
if size != 0 {
t.Fatalf("Wrong size\r\nexpected %d\r\ngot %d", 0, size)
}
if err != nil {
t.Fatal("Expected a nil error")
}
}
/*
AUXILIARS
*/
// checkError makes 3 lines into 1.
func checkError(err error, t *testing.T) {
if err != nil {
_, fn, line, _ := runtime.Caller(1)
t.Fatalf("[%v:%v] %v", fn, line, err)
}
}
// parseFloat is a convenience function to test json output
func parseFloat(v interface{}) string {
return strconv.FormatFloat(v.(float64), 'f', 0, 64)
}
// parseMapElement is a convenience function to tets json output
func parseMapElement(v interface{}) string {
return v.(map[string]interface{})["/"].(string)
}
// prepareStoredEthBlock reads the block from a file source to get its rawdata
// and computes its cid, for then, feeding it into a new IPLD block function.
// So we can pretend that we got this block from the datastore
func prepareStoredEthBlock(filepath string, t *testing.T) *block.BasicBlock {
// Prepare the "fetched block". This one is supposed to be in the datastore
// and given away by github.com/ipfs/go-ipfs/merkledag
fi, err := os.Open(filepath)
checkError(err, t)
b, err := ioutil.ReadAll(fi)
checkError(err, t)
c, err := RawdataToCid(MEthHeader, b, multihash.KECCAK_256)
checkError(err, t)
// It's good to clarify that this one below is an IPLD block
storedEthBlock, err := block.NewBlockWithCid(b, c)
checkError(err, t)
return storedEthBlock
}
// prepareDecodedEthBlock is more complex than function above, as it stores a
// basic block and RLP-decodes it
func prepareDecodedEthBlock(filepath string, t *testing.T) *EthHeader {
// Get the block from the datastore and decode it.
storedEthBlock := prepareStoredEthBlock("test_data/eth-block-header-rlp-999999", t)
ethBlock, err := DecodeEthHeader(storedEthBlock.Cid(), storedEthBlock.RawData())
checkError(err, t)
return ethBlock
}
// testEthBlockFields checks the fields of EthBlock one by one.
func testEthBlockFields(ethBlock *EthHeader, t *testing.T) {
// Was the cid calculated?
if ethBlock.Cid().String() != "bagiacgzawt5236hkiuvrhfyy4jya3qitlt6icfcqgheew6vsptlraokppm4a" {
t.Fatalf("Wrong cid\r\nexpected %s\r\ngot %s", "bagiacgzawt5236hkiuvrhfyy4jya3qitlt6icfcqgheew6vsptlraokppm4a", ethBlock.Cid().String())
}
// Do we have the rawdata available?
if fmt.Sprintf("%x", ethBlock.RawData()[:10]) != "f90218a0d33c9dde9fff" {
t.Fatalf("Wrong Rawdata\r\nexpected %s\r\ngot %s", "f90218a0d33c9dde9fff", fmt.Sprintf("%x", ethBlock.RawData()[:10]))
}
// Proper Fields of types.Header
if fmt.Sprintf("%x", ethBlock.ParentHash) != "d33c9dde9fff0ebaa6e71e8b26d2bda15ccf111c7af1b633698ac847667f0fb4" {
t.Fatalf("Wrong ParentHash\r\nexpected %s\r\ngot %s", "d33c9dde9fff0ebaa6e71e8b26d2bda15ccf111c7af1b633698ac847667f0fb4", fmt.Sprintf("%x", ethBlock.ParentHash))
}
if fmt.Sprintf("%x", ethBlock.UncleHash) != "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347" {
t.Fatalf("Wrong UncleHash field\r\nexpected %s\r\ngot %s", "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", fmt.Sprintf("%x", ethBlock.UncleHash))
}
if fmt.Sprintf("%x", ethBlock.Coinbase) != "52bc44d5378309ee2abf1539bf71de1b7d7be3b5" {
t.Fatalf("Wrong Coinbase\r\nexpected %s\r\ngot %s", "52bc44d5378309ee2abf1539bf71de1b7d7be3b5", fmt.Sprintf("%x", ethBlock.Coinbase))
}
if fmt.Sprintf("%x", ethBlock.Root) != "ed98aa4b5b19c82fb35364f08508ae0a6dec665fa57663dca94c5d70554cde10" {
t.Fatalf("Wrong Root\r\nexpected %s\r\ngot %s", "ed98aa4b5b19c82fb35364f08508ae0a6dec665fa57663dca94c5d70554cde10", fmt.Sprintf("%x", ethBlock.Root))
}
if fmt.Sprintf("%x", ethBlock.TxHash) != "447cbd8c48f498a6912b10831cdff59c7fbfcbbe735ca92883d4fa06dcd7ae54" {
t.Fatalf("Wrong TxHash\r\nexpected %s\r\ngot %s", "447cbd8c48f498a6912b10831cdff59c7fbfcbbe735ca92883d4fa06dcd7ae54", fmt.Sprintf("%x", ethBlock.TxHash))
}
if fmt.Sprintf("%x", ethBlock.ReceiptHash) != "7fa0f6ca2a01823208d80801edad37e3e3a003b55c89319b45eb1f97862ad229" {
t.Fatalf("Wrong ReceiptHash\r\nexpected %s\r\ngot %s", "7fa0f6ca2a01823208d80801edad37e3e3a003b55c89319b45eb1f97862ad229", fmt.Sprintf("%x", ethBlock.ReceiptHash))
}
if len(ethBlock.Bloom) != 256 {
t.Fatalf("Wrong Bloom Length\r\nexpected %d\r\ngot %d", 256, len(ethBlock.Bloom))
}
if fmt.Sprintf("%x", ethBlock.Bloom[71:76]) != "0000000000" { // You wouldn't want me to print out the whole bloom field?
t.Fatalf("Wrong Bloom\r\nexpected %s\r\ngot %s", "0000000000", fmt.Sprintf("%x", ethBlock.Bloom[71:76]))
}
if ethBlock.Difficulty.String() != "12555463106190" {
t.Fatalf("Wrong Difficulty\r\nexpected %s\r\ngot %s", "12555463106190", ethBlock.Difficulty.String())
}
if ethBlock.Number.String() != "999999" {
t.Fatalf("Wrong Block Number\r\nexpected %s\r\ngot %s", "999999", ethBlock.Number.String())
}
if ethBlock.GasLimit != uint64(3141592) {
t.Fatalf("Wrong Gas Limit\r\nexpected %d\r\ngot %d", 3141592, ethBlock.GasLimit)
}
if ethBlock.GasUsed != uint64(231000) {
t.Fatalf("Wrong Gas Used\r\nexpected %d\r\ngot %d", 231000, ethBlock.GasUsed)
}
if ethBlock.Time != uint64(1455404037) {
t.Fatalf("Wrong Time\r\nexpected %d\r\ngot %d", 1455404037, ethBlock.Time)
}
if fmt.Sprintf("%x", ethBlock.Extra) != "d783010303844765746887676f312e342e32856c696e7578" {
t.Fatalf("Wrong Extra\r\nexpected %s\r\ngot %s", "d783010303844765746887676f312e342e32856c696e7578", fmt.Sprintf("%x", ethBlock.Extra))
}
if fmt.Sprintf("%x", ethBlock.Nonce) != "f491f46b60fe04b3" {
t.Fatalf("Wrong Nonce\r\nexpected %s\r\ngot %s", "f491f46b60fe04b3", fmt.Sprintf("%x", ethBlock.Nonce))
}
if fmt.Sprintf("%x", ethBlock.MixDigest) != "5b10f4a08a6c209d426f6158bd24b574f4f7b7aa0099c67c14a1f693b4dd04d0" {
t.Fatalf("Wrong MixDigest\r\nexpected %s\r\ngot %s", "5b10f4a08a6c209d426f6158bd24b574f4f7b7aa0099c67c14a1f693b4dd04d0", fmt.Sprintf("%x", ethBlock.MixDigest))
}
}

View File

@ -1,10 +1,7 @@
package ipld
import (
"fmt"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/core/types"
@ -13,14 +10,12 @@ import (
// EthLog (eth-log, codec 0x9a), represents an ethereum block header
type EthLog struct {
*types.Log
rawData []byte
cid cid.Cid
}
// Static (compile time) check that EthLog satisfies the node.Node interface.
var _ node.Node = (*EthLog)(nil)
var _ IPLD = (*EthLog)(nil)
// NewLog create a new EthLog IPLD node
func NewLog(log *types.Log) (*EthLog, error) {
@ -33,29 +28,11 @@ func NewLog(log *types.Log) (*EthLog, error) {
return nil, err
}
return &EthLog{
Log: log,
cid: c,
rawData: logRaw,
}, nil
}
// DecodeEthLogs takes a cid and its raw binary data
func DecodeEthLogs(c cid.Cid, b []byte) (*EthLog, error) {
l := new(types.Log)
if err := rlp.DecodeBytes(b, l); err != nil {
return nil, err
}
return &EthLog{
Log: l,
cid: c,
rawData: b,
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the log.
func (l *EthLog) RawData() []byte {
return l.rawData
@ -65,94 +42,3 @@ func (l *EthLog) RawData() []byte {
func (l *EthLog) Cid() cid.Cid {
return l.cid
}
// String is a helper for output
func (l *EthLog) String() string {
return fmt.Sprintf("<EthereumLog %s>", l.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (l *EthLog) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-log",
}
}
// Resolve resolves a path through this node, stopping at any link boundary
// and returning the object found as well as the remaining path to traverse
func (l *EthLog) Resolve(p []string) (interface{}, []string, error) {
if len(p) == 0 {
return l, nil, nil
}
if len(p) > 1 {
return nil, nil, fmt.Errorf("unexpected path elements past %s", p[0])
}
switch p[0] {
case "address":
return l.Address, nil, nil
case "data":
// This is a []byte. By default they are marshalled into Base64.
return fmt.Sprintf("0x%x", l.Data), nil, nil
case "topics":
return l.Topics, nil, nil
case "logIndex":
return l.Index, nil, nil
case "removed":
return l.Removed, nil, nil
default:
return nil, nil, ErrInvalidLink
}
}
// Tree lists all paths within the object under 'path', and up to the given depth.
// To list the entire object (similar to `find .`) pass "" and -1
func (l *EthLog) Tree(p string, depth int) []string {
if p != "" || depth == 0 {
return nil
}
return []string{
"address",
"data",
"topics",
"logIndex",
"removed",
}
}
// ResolveLink is a helper function that calls resolve and asserts the
// output is a link
func (l *EthLog) ResolveLink(p []string) (*node.Link, []string, error) {
obj, rest, err := l.Resolve(p)
if err != nil {
return nil, nil, err
}
if lnk, ok := obj.(*node.Link); ok {
return lnk, rest, nil
}
return nil, nil, fmt.Errorf("resolved item was not a link")
}
// Copy will go away. It is here to comply with the Node interface.
func (l *EthLog) Copy() node.Node {
panic("implement me")
}
// Links is a helper function that returns all links within this object
func (l *EthLog) Links() []*node.Link {
return nil
}
// Stat will go away. It is here to comply with the interface.
func (l *EthLog) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
// Size will go away. It is here to comply with the interface.
func (l *EthLog) Size() (uint64, error) {
return 0, nil
}

View File

@ -1,144 +0,0 @@
package ipld
import (
"fmt"
node "github.com/ipfs/go-ipld-format"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
// EthLogTrie (eth-tx-trie codec 0x9p) represents
// a node from the transaction trie in ethereum.
type EthLogTrie struct {
*TrieNode
}
/*
OUTPUT
*/
// DecodeEthLogTrie returns an EthLogTrie object from its cid and rawdata.
func DecodeEthLogTrie(c cid.Cid, b []byte) (*EthLogTrie, error) {
tn, err := decodeTrieNode(c, b, decodeEthLogTrieLeaf)
if err != nil {
return nil, err
}
return &EthLogTrie{TrieNode: tn}, nil
}
// decodeEthLogTrieLeaf parses a eth-log-trie leaf
// from decoded RLP elements
func decodeEthLogTrieLeaf(i []interface{}) ([]interface{}, error) {
l := new(types.Log)
if err := rlp.DecodeBytes(i[1].([]byte), l); err != nil {
return nil, err
}
c, err := RawdataToCid(MEthLogTrie, i[1].([]byte), multihash.KECCAK_256)
if err != nil {
return nil, err
}
return []interface{}{
i[0].([]byte),
&EthLog{
Log: l,
cid: c,
rawData: i[1].([]byte),
},
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the transaction.
func (t *EthLogTrie) RawData() []byte {
return t.rawdata
}
// Cid returns the cid of the transaction.
func (t *EthLogTrie) Cid() cid.Cid {
return t.cid
}
// String is a helper for output
func (t *EthLogTrie) String() string {
return fmt.Sprintf("<EthereumLogTrie %s>", t.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (t *EthLogTrie) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-log-trie",
}
}
// logTrie wraps a localTrie for use on the receipt trie.
type logTrie struct {
*localTrie
}
// newLogTrie initializes and returns a logTrie.
func newLogTrie() *logTrie {
return &logTrie{
localTrie: newLocalTrie(),
}
}
// getNodes invokes the localTrie, which computes the root hash of the
// log trie and returns its sql keys, to return a slice
// of EthLogTrie nodes.
func (rt *logTrie) getNodes() ([]node.Node, error) {
keys, err := rt.getKeys()
if err != nil {
return nil, err
}
out := make([]node.Node, 0, len(keys))
for _, k := range keys {
n, err := rt.getNodeFromDB(k)
if err != nil {
return nil, err
}
out = append(out, n)
}
return out, nil
}
func (rt *logTrie) getNodeFromDB(key []byte) (*EthLogTrie, error) {
rawdata, err := rt.db.Get(key)
if err != nil {
return nil, err
}
tn := &TrieNode{
cid: keccak256ToCid(MEthLogTrie, key),
rawdata: rawdata,
}
return &EthLogTrie{TrieNode: tn}, nil
}
// getLeafNodes invokes the localTrie, which returns a slice
// of EthLogTrie leaf nodes.
func (rt *logTrie) getLeafNodes() ([]*EthLogTrie, []*nodeKey, error) {
keys, err := rt.getLeafKeys()
if err != nil {
return nil, nil, err
}
out := make([]*EthLogTrie, 0, len(keys))
for _, k := range keys {
n, err := rt.getNodeFromDB(k.dbKey)
if err != nil {
return nil, nil, err
}
out = append(out, n)
}
return out, keys, nil
}

View File

@ -17,286 +17,78 @@
package ipld
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
// FromBlockRLP takes an RLP message representing
// an ethereum block header or body (header, ommers and txs)
// to return it as a set of IPLD nodes for further processing.
func FromBlockRLP(r io.Reader) (*EthHeader, []*EthTx, []*EthTxTrie, error) {
// We may want to use this stream several times
rawdata, err := ioutil.ReadAll(r)
if err != nil {
return nil, nil, nil, err
}
// Let's try to decode the received element as a block body
var decodedBlock types.Block
err = rlp.Decode(bytes.NewBuffer(rawdata), &decodedBlock)
if err != nil {
if err.Error()[:41] != "rlp: expected input list for types.Header" {
return nil, nil, nil, err
}
// Maybe it is just a header... (body sans ommers and txs)
var decodedHeader types.Header
err := rlp.Decode(bytes.NewBuffer(rawdata), &decodedHeader)
if err != nil {
return nil, nil, nil, err
}
c, err := RawdataToCid(MEthHeader, rawdata, multihash.KECCAK_256)
if err != nil {
return nil, nil, nil, err
}
// It was a header
return &EthHeader{
Header: &decodedHeader,
cid: c,
rawdata: rawdata,
}, nil, nil, nil
}
// This is a block body (header + ommers + txs)
// We'll extract the header bits here
headerRawData := getRLP(decodedBlock.Header())
c, err := RawdataToCid(MEthHeader, headerRawData, multihash.KECCAK_256)
if err != nil {
return nil, nil, nil, err
}
ethBlock := &EthHeader{
Header: decodedBlock.Header(),
cid: c,
rawdata: headerRawData,
}
// Process the found eth-tx objects
ethTxNodes, ethTxTrieNodes, err := processTransactions(decodedBlock.Transactions(),
decodedBlock.Header().TxHash[:])
if err != nil {
return nil, nil, nil, err
}
return ethBlock, ethTxNodes, ethTxTrieNodes, nil
}
// FromBlockJSON takes the output of an ethereum client JSON API
// (i.e. parity or geth) and returns a set of IPLD nodes.
func FromBlockJSON(r io.Reader) (*EthHeader, []*EthTx, []*EthTxTrie, error) {
var obj objJSONHeader
dec := json.NewDecoder(r)
err := dec.Decode(&obj)
if err != nil {
return nil, nil, nil, err
}
headerRawData := getRLP(&obj.Result.Header)
c, err := RawdataToCid(MEthHeader, headerRawData, multihash.KECCAK_256)
if err != nil {
return nil, nil, nil, err
}
ethBlock := &EthHeader{
Header: &obj.Result.Header,
cid: c,
rawdata: headerRawData,
}
// Process the found eth-tx objects
ethTxNodes, ethTxTrieNodes, err := processTransactions(obj.Result.Transactions,
obj.Result.Header.TxHash[:])
if err != nil {
return nil, nil, nil, err
}
return ethBlock, ethTxNodes, ethTxTrieNodes, nil
}
// FromBlockAndReceipts takes a block and processes it
// to return it a set of IPLD nodes for further processing.
func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHeader, []*EthHeader, []*EthTx, []*EthTxTrie, []*EthReceipt, []*EthRctTrie, [][]node.Node, [][]cid.Cid, []cid.Cid, error) {
func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHeader, []*EthTx, []*EthReceipt, [][]*EthLog, error) {
// Process the header
headerNode, err := NewEthHeader(block.Header())
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, nil, nil, err
}
// Process the uncles
uncleNodes := make([]*EthHeader, len(block.Uncles()))
for i, uncle := range block.Uncles() {
uncleNode, err := NewEthHeader(uncle)
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, nil, nil, err
}
uncleNodes[i] = uncleNode
return nil, nil, nil, nil, err
}
// Process the txs
txNodes, txTrieNodes, err := processTransactions(block.Transactions(),
block.Header().TxHash[:])
txNodes, err := processTransactions(block.Transactions())
if err != nil {
return nil, nil, nil, nil, nil, nil, nil, nil, nil, err
return nil, nil, nil, nil, err
}
// Process the receipts and logs
rctNodes, tctTrieNodes, logTrieAndLogNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := processReceiptsAndLogs(receipts,
block.Header().ReceiptHash[:])
rctNodes, logNodes, err := processReceiptsAndLogs(receipts)
return headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, tctTrieNodes, logTrieAndLogNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err
return headerNode, txNodes, rctNodes, logNodes, err
}
// processTransactions will take the found transactions in a parsed block body
// to return IPLD node slices for eth-tx and eth-tx-trie
func processTransactions(txs []*types.Transaction, expectedTxRoot []byte) ([]*EthTx, []*EthTxTrie, error) {
// to return IPLD node slices for eth-tx
func processTransactions(txs []*types.Transaction) ([]*EthTx, error) {
var ethTxNodes []*EthTx
transactionTrie := newTxTrie()
for idx, tx := range txs {
for _, tx := range txs {
ethTx, err := NewEthTx(tx)
if err != nil {
return nil, nil, err
return nil, err
}
ethTxNodes = append(ethTxNodes, ethTx)
if err := transactionTrie.Add(idx, ethTx.RawData()); err != nil {
return nil, nil, err
}
}
if !bytes.Equal(transactionTrie.rootHash(), expectedTxRoot) {
return nil, nil, fmt.Errorf("wrong transaction hash computed")
}
txTrieNodes, err := transactionTrie.getNodes()
return ethTxNodes, txTrieNodes, err
return ethTxNodes, nil
}
// processReceiptsAndLogs will take in receipts
// to return IPLD node slices for eth-rct, eth-rct-trie, eth-log, eth-log-trie, eth-log-trie-CID, eth-rct-trie-CID
func processReceiptsAndLogs(rcts []*types.Receipt, expectedRctRoot []byte) ([]*EthReceipt, []*EthRctTrie, [][]node.Node, [][]cid.Cid, []cid.Cid, error) {
// to return IPLD node slices for eth-rct and eth-log
func processReceiptsAndLogs(rcts []*types.Receipt) ([]*EthReceipt, [][]*EthLog, error) {
// Pre allocating memory.
ethRctNodes := make([]*EthReceipt, 0, len(rcts))
ethLogleafNodeCids := make([][]cid.Cid, 0, len(rcts))
ethLogTrieAndLogNodes := make([][]node.Node, 0, len(rcts))
receiptTrie := NewRctTrie()
ethRctNodes := make([]*EthReceipt, len(rcts))
ethLogNodes := make([][]*EthLog, len(rcts))
for idx, rct := range rcts {
// Process logs for each receipt.
logTrieNodes, leafNodeCids, logTrieHash, err := processLogs(rct.Logs)
logNodes, err := processLogs(rct.Logs)
if err != nil {
return nil, nil, nil, nil, nil, err
return nil, nil, err
}
rct.LogRoot = logTrieHash
ethLogTrieAndLogNodes = append(ethLogTrieAndLogNodes, logTrieNodes)
ethLogleafNodeCids = append(ethLogleafNodeCids, leafNodeCids)
ethRct, err := NewReceipt(rct)
if err != nil {
return nil, nil, nil, nil, nil, err
return nil, nil, err
}
ethRctNodes = append(ethRctNodes, ethRct)
if err = receiptTrie.Add(idx, ethRct.RawData()); err != nil {
return nil, nil, nil, nil, nil, err
}
ethRctNodes[idx] = ethRct
ethLogNodes[idx] = logNodes
}
if !bytes.Equal(receiptTrie.rootHash(), expectedRctRoot) {
return nil, nil, nil, nil, nil, fmt.Errorf("wrong receipt hash computed")
}
rctTrieNodes, err := receiptTrie.GetNodes()
if err != nil {
return nil, nil, nil, nil, nil, err
}
rctLeafNodes, keys, err := receiptTrie.GetLeafNodes()
if err != nil {
return nil, nil, nil, nil, nil, err
}
ethRctleafNodeCids := make([]cid.Cid, len(rctLeafNodes))
for i, rln := range rctLeafNodes {
var idx uint
r := bytes.NewReader(keys[i].TrieKey)
err = rlp.Decode(r, &idx)
if err != nil {
return nil, nil, nil, nil, nil, err
}
ethRctleafNodeCids[idx] = rln.Cid()
}
return ethRctNodes, rctTrieNodes, ethLogTrieAndLogNodes, ethLogleafNodeCids, ethRctleafNodeCids, err
return ethRctNodes, ethLogNodes, nil
}
const keccak256Length = 32
func processLogs(logs []*types.Log) ([]node.Node, []cid.Cid, common.Hash, error) {
logTr := newLogTrie()
shortLog := make(map[uint64]*EthLog, len(logs))
func processLogs(logs []*types.Log) ([]*EthLog, error) {
logNodes := make([]*EthLog, len(logs))
for idx, log := range logs {
logRaw, err := rlp.EncodeToBytes(log)
if err != nil {
return nil, nil, common.Hash{}, err
}
// if len(logRaw) <= keccak256Length it is possible this value's "leaf node"
// will be stored in its parent branch but only if len(partialPathOfTheNode) + len(logRaw) <= keccak256Length
// But we can't tell what the partial path will be until the trie is Commit()-ed
// So wait until we collect all the leaf nodes, and if we are missing any at the indexes we note in shortLogCIDs
// we know that these "leaf nodes" were internalized into their parent branch node and we move forward with
// using the cid.Cid we cached in shortLogCIDs
if len(logRaw) <= keccak256Length {
logNode, err := NewLog(log)
if err != nil {
return nil, nil, common.Hash{}, err
return nil, err
}
shortLog[uint64(idx)] = logNode
logNodes[idx] = logNode
}
if err = logTr.Add(idx, logRaw); err != nil {
return nil, nil, common.Hash{}, err
}
}
logTrieNodes, err := logTr.getNodes()
if err != nil {
return nil, nil, common.Hash{}, err
}
leafNodes, keys, err := logTr.getLeafNodes()
if err != nil {
return nil, nil, common.Hash{}, err
}
leafNodeCids := make([]cid.Cid, len(logs))
for i, ln := range leafNodes {
var idx uint
r := bytes.NewReader(keys[i].TrieKey)
err = rlp.Decode(r, &idx)
if err != nil {
return nil, nil, common.Hash{}, err
}
leafNodeCids[idx] = ln.Cid()
}
// this is where we check which logs <= keccak256Length were actually internalized into parent branch node
// and replace those that were with the cid.Cid for the raw log IPLD
for i, l := range shortLog {
if !leafNodeCids[i].Defined() {
leafNodeCids[i] = l.Cid()
// if the leaf node was internalized, we append an IPLD for log itself to the list of IPLDs we need to publish
logTrieNodes = append(logTrieNodes, l)
}
}
return logTrieNodes, leafNodeCids, common.BytesToHash(logTr.rootHash()), err
return logNodes, nil
}

View File

@ -23,9 +23,9 @@ import (
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
)
type kind string
@ -92,7 +92,7 @@ func loadBlockData(t *testing.T) []testCase {
func TestFromBlockAndReceipts(t *testing.T) {
testCases := loadBlockData(t)
for _, tc := range testCases {
_, _, _, _, _, _, _, _, _, err := FromBlockAndReceipts(tc.block, tc.receipts)
_, _, _, _, err := FromBlockAndReceipts(tc.block, tc.receipts)
if err != nil {
t.Fatalf("error generating IPLDs from block and receipts, err %v, kind %s, block hash %s", err, tc.kind, tc.block.Hash())
}
@ -100,9 +100,27 @@ func TestFromBlockAndReceipts(t *testing.T) {
}
func TestProcessLogs(t *testing.T) {
logs := []*types.Log{mocks.MockLog1, mocks.MockLog2}
nodes, cids, _, err := processLogs(logs)
logs := []*types.Log{mockLog1, mockLog2}
nodes, err := processLogs(logs)
require.NoError(t, err)
require.GreaterOrEqual(t, len(nodes), len(logs))
require.Equal(t, len(logs), len(cids))
}
var (
address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
anotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
mockTopic11 = common.HexToHash("0x04")
mockTopic12 = common.HexToHash("0x06")
mockTopic21 = common.HexToHash("0x05")
mockTopic22 = common.HexToHash("0x07")
mockLog1 = &types.Log{
Address: address,
Topics: []common.Hash{mockTopic11, mockTopic12},
Data: []byte{},
}
mockLog2 = &types.Log{
Address: anotherAddress,
Topics: []common.Hash{mockTopic21, mockTopic22},
Data: []byte{},
}
)

View File

@ -17,30 +17,19 @@
package ipld
import (
"encoding/json"
"fmt"
"strconv"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/core/types"
)
type EthReceipt struct {
*types.Receipt
rawdata []byte
cid cid.Cid
}
// Static (compile time) check that EthReceipt satisfies the node.Node interface.
var _ node.Node = (*EthReceipt)(nil)
/*
INPUT
*/
var _ IPLD = (*EthReceipt)(nil)
// NewReceipt converts a types.ReceiptForStorage to an EthReceipt IPLD node
func NewReceipt(receipt *types.Receipt) (*EthReceipt, error) {
@ -53,34 +42,11 @@ func NewReceipt(receipt *types.Receipt) (*EthReceipt, error) {
return nil, err
}
return &EthReceipt{
Receipt: receipt,
cid: c,
rawdata: rctRaw,
}, nil
}
/*
OUTPUT
*/
// DecodeEthReceipt takes a cid and its raw binary data
// from IPFS and returns an EthTx object for further processing.
func DecodeEthReceipt(c cid.Cid, b []byte) (*EthReceipt, error) {
r := new(types.Receipt)
if err := r.UnmarshalBinary(b); err != nil {
return nil, err
}
return &EthReceipt{
Receipt: r,
cid: c,
rawdata: b,
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the receipt.
func (r *EthReceipt) RawData() []byte {
return r.rawdata
@ -90,116 +56,3 @@ func (r *EthReceipt) RawData() []byte {
func (r *EthReceipt) Cid() cid.Cid {
return r.cid
}
// String is a helper for output
func (r *EthReceipt) String() string {
return fmt.Sprintf("<EthereumReceipt %s>", r.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (r *EthReceipt) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-receipt",
}
}
// Resolve resolves a path through this node, stopping at any link boundary
// and returning the object found as well as the remaining path to traverse
func (r *EthReceipt) Resolve(p []string) (interface{}, []string, error) {
if len(p) == 0 {
return r, nil, nil
}
first, rest := p[0], p[1:]
if first != "logs" && len(p) != 1 {
return nil, nil, fmt.Errorf("unexpected path elements past %s", first)
}
switch first {
case "logs":
return &node.Link{Cid: commonHashToCid(MEthLog, r.LogRoot)}, rest, nil
case "root":
return r.PostState, nil, nil
case "status":
return r.Status, nil, nil
case "cumulativeGasUsed":
return r.CumulativeGasUsed, nil, nil
case "logsBloom":
return r.Bloom, nil, nil
case "transactionHash":
return r.TxHash, nil, nil
case "contractAddress":
return r.ContractAddress, nil, nil
case "gasUsed":
return r.GasUsed, nil, nil
case "type":
return r.Type, nil, nil
default:
return nil, nil, ErrInvalidLink
}
}
// Tree lists all paths within the object under 'path', and up to the given depth.
// To list the entire object (similar to `find .`) pass "" and -1
func (r *EthReceipt) Tree(p string, depth int) []string {
if p != "" || depth == 0 {
return nil
}
return []string{"type", "root", "status", "cumulativeGasUsed", "logsBloom", "logs", "transactionHash", "contractAddress", "gasUsed"}
}
// ResolveLink is a helper function that calls resolve and asserts the
// output is a link
func (r *EthReceipt) ResolveLink(p []string) (*node.Link, []string, error) {
obj, rest, err := r.Resolve(p)
if err != nil {
return nil, nil, err
}
if lnk, ok := obj.(*node.Link); ok {
return lnk, rest, nil
}
return nil, nil, fmt.Errorf("resolved item was not a link")
}
// Copy will go away. It is here to comply with the Node interface.
func (r *EthReceipt) Copy() node.Node {
panic("implement me")
}
// Links is a helper function that returns all links within this object
func (r *EthReceipt) Links() []*node.Link {
return []*node.Link{
{Cid: commonHashToCid(MEthLog, r.LogRoot)},
}
}
// Stat will go away. It is here to comply with the interface.
func (r *EthReceipt) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
// Size will go away. It is here to comply with the interface.
func (r *EthReceipt) Size() (uint64, error) {
return strconv.ParseUint(r.Receipt.Size().String(), 10, 64)
}
/*
EthReceipt functions
*/
// MarshalJSON processes the receipt into readable JSON format.
func (r *EthReceipt) MarshalJSON() ([]byte, error) {
out := map[string]interface{}{
"root": r.PostState,
"status": r.Status,
"cumulativeGasUsed": r.CumulativeGasUsed,
"logsBloom": r.Bloom,
"logs": r.Logs,
"transactionHash": r.TxHash,
"contractAddress": r.ContractAddress,
"gasUsed": r.GasUsed,
}
return json.Marshal(out)
}

View File

@ -1,175 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"fmt"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/core/types"
)
// EthRctTrie (eth-tx-trie codec 0x92) represents
// a node from the transaction trie in ethereum.
type EthRctTrie struct {
*TrieNode
}
// Static (compile time) check that EthRctTrie satisfies the node.Node interface.
var _ node.Node = (*EthRctTrie)(nil)
/*
INPUT
*/
// To create a proper trie of the eth-tx-trie objects, it is required
// to input all transactions belonging to a forest in a single step.
// We are adding the transactions, and creating its trie on
// block body parsing time.
/*
OUTPUT
*/
// DecodeEthRctTrie returns an EthRctTrie object from its cid and rawdata.
func DecodeEthRctTrie(c cid.Cid, b []byte) (*EthRctTrie, error) {
tn, err := decodeTrieNode(c, b, decodeEthRctTrieLeaf)
if err != nil {
return nil, err
}
return &EthRctTrie{TrieNode: tn}, nil
}
// decodeEthRctTrieLeaf parses a eth-rct-trie leaf
//from decoded RLP elements
func decodeEthRctTrieLeaf(i []interface{}) ([]interface{}, error) {
r := new(types.Receipt)
if err := r.UnmarshalBinary(i[1].([]byte)); err != nil {
return nil, err
}
c, err := RawdataToCid(MEthTxReceipt, i[1].([]byte), multihash.KECCAK_256)
if err != nil {
return nil, err
}
return []interface{}{
i[0].([]byte),
&EthReceipt{
Receipt: r,
cid: c,
rawdata: i[1].([]byte),
},
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the transaction.
func (t *EthRctTrie) RawData() []byte {
return t.rawdata
}
// Cid returns the cid of the transaction.
func (t *EthRctTrie) Cid() cid.Cid {
return t.cid
}
// String is a helper for output
func (t *EthRctTrie) String() string {
return fmt.Sprintf("<EthereumRctTrie %s>", t.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (t *EthRctTrie) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-rct-trie",
}
}
/*
EthRctTrie functions
*/
// rctTrie wraps a localTrie for use on the receipt trie.
type rctTrie struct {
*localTrie
}
// NewRctTrie initializes and returns a rctTrie.
func NewRctTrie() *rctTrie {
return &rctTrie{
localTrie: newLocalTrie(),
}
}
// GetNodes invokes the localTrie, which computes the root hash of the
// transaction trie and returns its sql keys, to return a slice
// of EthRctTrie nodes.
func (rt *rctTrie) GetNodes() ([]*EthRctTrie, error) {
keys, err := rt.getKeys()
if err != nil {
return nil, err
}
var out []*EthRctTrie
for _, k := range keys {
n, err := rt.getNodeFromDB(k)
if err != nil {
return nil, err
}
out = append(out, n)
}
return out, nil
}
// GetLeafNodes invokes the localTrie, which returns a slice
// of EthRctTrie leaf nodes.
func (rt *rctTrie) GetLeafNodes() ([]*EthRctTrie, []*nodeKey, error) {
keys, err := rt.getLeafKeys()
if err != nil {
return nil, nil, err
}
out := make([]*EthRctTrie, 0, len(keys))
for _, k := range keys {
n, err := rt.getNodeFromDB(k.dbKey)
if err != nil {
return nil, nil, err
}
out = append(out, n)
}
return out, keys, nil
}
func (rt *rctTrie) getNodeFromDB(key []byte) (*EthRctTrie, error) {
rawdata, err := rt.db.Get(key)
if err != nil {
return nil, err
}
tn := &TrieNode{
cid: keccak256ToCid(MEthTxReceiptTrie, key),
rawdata: rawdata,
}
return &EthRctTrie{TrieNode: tn}, nil
}

View File

@ -1,126 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"fmt"
"io"
"io/ioutil"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/rlp"
)
// EthStateTrie (eth-state-trie, codec 0x96), represents
// a node from the satte trie in ethereum.
type EthStateTrie struct {
*TrieNode
}
// Static (compile time) check that EthStateTrie satisfies the node.Node interface.
var _ node.Node = (*EthStateTrie)(nil)
/*
INPUT
*/
// FromStateTrieRLPFile takes the RLP representation of an ethereum
// state trie node to return it as an IPLD node for further processing.
func FromStateTrieRLPFile(r io.Reader) (*EthStateTrie, error) {
raw, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
return FromStateTrieRLP(raw)
}
// FromStateTrieRLP takes the RLP representation of an ethereum
// state trie node to return it as an IPLD node for further processing.
func FromStateTrieRLP(raw []byte) (*EthStateTrie, error) {
c, err := RawdataToCid(MEthStateTrie, raw, multihash.KECCAK_256)
if err != nil {
return nil, err
}
// Let's run the whole mile and process the nodeKind and
// its elements, in case somebody would need this function
// to parse an RLP element from the filesystem
return DecodeEthStateTrie(c, raw)
}
/*
OUTPUT
*/
// DecodeEthStateTrie returns an EthStateTrie object from its cid and rawdata.
func DecodeEthStateTrie(c cid.Cid, b []byte) (*EthStateTrie, error) {
tn, err := decodeTrieNode(c, b, decodeEthStateTrieLeaf)
if err != nil {
return nil, err
}
return &EthStateTrie{TrieNode: tn}, nil
}
// decodeEthStateTrieLeaf parses a eth-tx-trie leaf
// from decoded RLP elements
func decodeEthStateTrieLeaf(i []interface{}) ([]interface{}, error) {
var account EthAccount
err := rlp.DecodeBytes(i[1].([]byte), &account)
if err != nil {
return nil, err
}
c, err := RawdataToCid(MEthAccountSnapshot, i[1].([]byte), multihash.KECCAK_256)
if err != nil {
return nil, err
}
return []interface{}{
i[0].([]byte),
&EthAccountSnapshot{
EthAccount: &account,
cid: c,
rawdata: i[1].([]byte),
},
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the state trie node.
func (st *EthStateTrie) RawData() []byte {
return st.rawdata
}
// Cid returns the cid of the state trie node.
func (st *EthStateTrie) Cid() cid.Cid {
return st.cid
}
// String is a helper for output
func (st *EthStateTrie) String() string {
return fmt.Sprintf("<EthereumStateTrie %s>", st.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (st *EthStateTrie) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-state-trie",
}
}

View File

@ -1,326 +0,0 @@
package ipld
import (
"fmt"
"os"
"testing"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
)
/*
INPUT
OUTPUT
*/
func TestStateTrieNodeEvenExtensionParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-state-trie-rlp-eb2f5f")
checkError(err, t)
output, err := FromStateTrieRLPFile(fi)
checkError(err, t)
if output.nodeKind != "extension" {
t.Fatalf("Wrong nodeKind\r\nexpected %s\r\ngot %s", "extension", output.nodeKind)
}
if len(output.elements) != 2 {
t.Fatalf("Wrong number of elements for an extension node\r\nexpected %d\r\ngot %d", 2, len(output.elements))
}
if fmt.Sprintf("%x", output.elements[0]) != "0d08" {
t.Fatalf("Wrong key\r\nexpected %s\r\ngot %s", "0d08", fmt.Sprintf("%x", output.elements[0]))
}
if output.elements[1].(cid.Cid).String() !=
"baglacgzalnzmhhnxudxtga6t3do2rctb6ycgyj6mjnycoamlnc733nnbkd6q" {
t.Fatalf("Wrong CID\r\nexpected %s\r\ngot %s", "baglacgzalnzmhhnxudxtga6t3do2rctb6ycgyj6mjnycoamlnc733nnbkd6q", output.elements[1].(cid.Cid).String())
}
}
func TestStateTrieNodeOddExtensionParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-state-trie-rlp-56864f")
checkError(err, t)
output, err := FromStateTrieRLPFile(fi)
checkError(err, t)
if output.nodeKind != "extension" {
t.Fatalf("Wrong nodeKind\r\nexpected %s\r\ngot %s", "extension", output.nodeKind)
}
if len(output.elements) != 2 {
t.Fatalf("Wrong number of elements for an extension node\r\nexpected %d\r\ngot %d", 2, len(output.elements))
}
if fmt.Sprintf("%x", output.elements[0]) != "02" {
t.Fatalf("Wrong key\r\nexpected %s\r\ngot %s", "02", fmt.Sprintf("%x", output.elements[0]))
}
if output.elements[1].(cid.Cid).String() !=
"baglacgzaizf2czb7wztoox4lu23qkwkbfamqsdzcmejzr3rsszrvkaktpfeq" {
t.Fatalf("Wrong CID\r\nexpected %s\r\ngot %s", "baglacgzaizf2czb7wztoox4lu23qkwkbfamqsdzcmejzr3rsszrvkaktpfeq", output.elements[1].(cid.Cid).String())
}
}
func TestStateTrieNodeEvenLeafParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-state-trie-rlp-0e8b34")
checkError(err, t)
output, err := FromStateTrieRLPFile(fi)
checkError(err, t)
if output.nodeKind != "leaf" {
t.Fatalf("Wrong nodeKind\r\nexpected %s\r\ngot %s", "leaf", output.nodeKind)
}
if len(output.elements) != 2 {
t.Fatalf("Wrong number of elements for an extension node\r\nexpected %d\r\ngot %d", 2, len(output.elements))
}
// bd66f60e5b954e1af93ded1b02cb575ff0ed6d9241797eff7576b0bf0637
if fmt.Sprintf("%x", output.elements[0].([]byte)[0:10]) != "0b0d06060f06000e050b" {
t.Fatalf("Wrong key\r\nexpected %s\r\ngot %s", "0b0d06060f06000e050b", fmt.Sprintf("%x", output.elements[0].([]byte)[0:10]))
}
if output.elements[1].(*EthAccountSnapshot).String() !=
"<EthereumAccountSnapshot baglqcgzaf5tapdf2fwb6mo4ijtovqpoi4n3f4jv2yx6avvz6sjypp6vytfva>" {
t.Fatalf("Wrong String()\r\nexpected %s\r\ngot %s", "<EthereumAccountSnapshot baglqcgzaf5tapdf2fwb6mo4ijtovqpoi4n3f4jv2yx6avvz6sjypp6vytfva>", output.elements[1].(*EthAccountSnapshot).String())
}
}
func TestStateTrieNodeOddLeafParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-state-trie-rlp-c9070d")
checkError(err, t)
output, err := FromStateTrieRLPFile(fi)
checkError(err, t)
if output.nodeKind != "leaf" {
t.Fatalf("Wrong nodeKind\r\nexpected %s\r\ngot %s", "leaf", output.nodeKind)
}
if len(output.elements) != 2 {
t.Fatalf("Wrong number of elements for an extension node\r\nexpected %d\r\ngot %d", 2, len(output.elements))
}
// 6c9db9bb545a03425e300f3ee72bae098110336dd3eaf48c20a2e5b6865fc
if fmt.Sprintf("%x", output.elements[0].([]byte)[0:10]) != "060c090d0b090b0b0504" {
t.Fatalf("Wrong key\r\nexpected %s\r\ngot %s", "060c090d0b090b0b0504", fmt.Sprintf("%x", output.elements[0].([]byte)[0:10]))
}
if output.elements[1].(*EthAccountSnapshot).String() !=
"<EthereumAccountSnapshot baglqcgzasckx2alxk43cksshnztjvhfyvbbh6bkp376gtcndm5cg4fkrkhsa>" {
t.Fatalf("Wrong String()\r\nexpected %s\r\ngot %s", "<EthereumAccountSnapshot baglqcgzasckx2alxk43cksshnztjvhfyvbbh6bkp376gtcndm5cg4fkrkhsa>", output.elements[1].(*EthAccountSnapshot).String())
}
}
/*
Block INTERFACE
*/
func TestStateTrieBlockElements(t *testing.T) {
fi, err := os.Open("test_data/eth-state-trie-rlp-d7f897")
checkError(err, t)
output, err := FromStateTrieRLPFile(fi)
checkError(err, t)
if fmt.Sprintf("%x", output.RawData())[:10] != "f90211a090" {
t.Fatalf("Wrong Data\r\nexpected %s\r\ngot %s", "f90211a090", fmt.Sprintf("%x", output.RawData())[:10])
}
if output.Cid().String() !=
"baglacgza274jot5vvr4ntlajtonnkaml5xbm4cts3liye6qxbhndawapavca" {
t.Fatalf("Wrong Cid\r\nexpected %s\r\ngot %s", "baglacgza274jot5vvr4ntlajtonnkaml5xbm4cts3liye6qxbhndawapavca", output.Cid().String())
}
}
func TestStateTrieString(t *testing.T) {
fi, err := os.Open("test_data/eth-state-trie-rlp-d7f897")
checkError(err, t)
output, err := FromStateTrieRLPFile(fi)
checkError(err, t)
if output.String() !=
"<EthereumStateTrie baglacgza274jot5vvr4ntlajtonnkaml5xbm4cts3liye6qxbhndawapavca>" {
t.Fatalf("Wrong String()\r\nexpected %s\r\ngot %s", "<EthereumStateTrie baglacgza274jot5vvr4ntlajtonnkaml5xbm4cts3liye6qxbhndawapavca>", output.String())
}
}
func TestStateTrieLoggable(t *testing.T) {
fi, err := os.Open("test_data/eth-state-trie-rlp-d7f897")
checkError(err, t)
output, err := FromStateTrieRLPFile(fi)
checkError(err, t)
l := output.Loggable()
if _, ok := l["type"]; !ok {
t.Fatal("Loggable map expected the field 'type'")
}
if l["type"] != "eth-state-trie" {
t.Fatalf("Wrong Loggable 'type' value\r\nexpected %s\r\ngot %s", "eth-state-trie", l["type"])
}
}
/*
TRIE NODE (Through EthStateTrie)
Node INTERFACE
*/
func TestTraverseStateTrieWithResolve(t *testing.T) {
var err error
stMap := prepareStateTrieMap(t)
// This is the cid of the root of the block 0
// baglacgza274jot5vvr4ntlajtonnkaml5xbm4cts3liye6qxbhndawapavca
currentNode := stMap["baglacgza274jot5vvr4ntlajtonnkaml5xbm4cts3liye6qxbhndawapavca"]
// This is the path we want to traverse
// The eth address is 0x5abfec25f74cd88437631a7731906932776356f9
// Its keccak-256 is cdd3e25edec0a536a05f5e5ab90a5603624c0ed77453b2e8f955cf8b43d4d0fb
// We use the keccak-256(addr) to traverse the state trie in ethereum.
var traversePath []string
for _, s := range "cdd3e25edec0a536a05f5e5ab90a5603624c0ed77453b2e8f955cf8b43d4d0fb" {
traversePath = append(traversePath, string(s))
}
traversePath = append(traversePath, "balance")
var obj interface{}
for {
obj, traversePath, err = currentNode.Resolve(traversePath)
link, ok := obj.(*node.Link)
if !ok {
break
}
if err != nil {
t.Fatal("Error should be nil")
}
currentNode = stMap[link.Cid.String()]
if currentNode == nil {
t.Fatal("state trie node not found in memory map")
}
}
if fmt.Sprintf("%v", obj) != "11901484239480000000000000" {
t.Fatalf("Wrong balance value\r\nexpected %s\r\ngot %s", "11901484239480000000000000", fmt.Sprintf("%v", obj))
}
}
func TestStateTrieResolveLinks(t *testing.T) {
fi, err := os.Open("test_data/eth-state-trie-rlp-eb2f5f")
checkError(err, t)
stNode, err := FromStateTrieRLPFile(fi)
checkError(err, t)
// bad case
obj, rest, err := stNode.ResolveLink([]string{"supercalifragilist"})
if obj != nil {
t.Fatalf("Expected obj to be nil")
}
if rest != nil {
t.Fatal("Expected rest to be nil")
}
if err.Error() != "invalid path element" {
t.Fatalf("Wrong error\r\nexpected %s\r\ngot %s", "invalid path element", err.Error())
}
// good case
obj, rest, err = stNode.ResolveLink([]string{"d8"})
if obj == nil {
t.Fatalf("Expected a not nil obj to be returned")
}
if rest != nil {
t.Fatal("Expected rest to be nil")
}
if err != nil {
t.Fatal("Expected error to be nil")
}
}
func TestStateTrieCopy(t *testing.T) {
fi, err := os.Open("test_data/eth-state-trie-rlp-eb2f5f")
checkError(err, t)
stNode, err := FromStateTrieRLPFile(fi)
checkError(err, t)
defer func() {
r := recover()
if r == nil {
t.Fatal("Expected panic")
}
if r != "implement me" {
t.Fatalf("Wrong panic message\r\nexpected %s\r\ngot %s", "'implement me'", r)
}
}()
_ = stNode.Copy()
}
func TestStateTrieStat(t *testing.T) {
fi, err := os.Open("test_data/eth-state-trie-rlp-eb2f5f")
checkError(err, t)
stNode, err := FromStateTrieRLPFile(fi)
checkError(err, t)
obj, err := stNode.Stat()
if obj == nil {
t.Fatal("Expected a not null object node.NodeStat")
}
if err != nil {
t.Fatal("Expected a nil error")
}
}
func TestStateTrieSize(t *testing.T) {
fi, err := os.Open("test_data/eth-state-trie-rlp-eb2f5f")
checkError(err, t)
stNode, err := FromStateTrieRLPFile(fi)
checkError(err, t)
size, err := stNode.Size()
if size != uint64(0) {
t.Fatalf("Wrong size\r\nexpected %d\r\ngot %d", 0, size)
}
if err != nil {
t.Fatal("Expected a nil error")
}
}
func prepareStateTrieMap(t *testing.T) map[string]*EthStateTrie {
filepaths := []string{
"test_data/eth-state-trie-rlp-0e8b34",
"test_data/eth-state-trie-rlp-56864f",
"test_data/eth-state-trie-rlp-6fc2d7",
"test_data/eth-state-trie-rlp-727994",
"test_data/eth-state-trie-rlp-c9070d",
"test_data/eth-state-trie-rlp-d5be90",
"test_data/eth-state-trie-rlp-d7f897",
"test_data/eth-state-trie-rlp-eb2f5f",
}
out := make(map[string]*EthStateTrie)
for _, fp := range filepaths {
fi, err := os.Open(fp)
checkError(err, t)
stateTrieNode, err := FromStateTrieRLPFile(fi)
checkError(err, t)
out[stateTrieNode.Cid().String()] = stateTrieNode
}
return out
}

View File

@ -1,112 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"fmt"
"io"
"io/ioutil"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
)
// EthStorageTrie (eth-storage-trie, codec 0x98), represents
// a node from the storage trie in ethereum.
type EthStorageTrie struct {
*TrieNode
}
// Static (compile time) check that EthStorageTrie satisfies the node.Node interface.
var _ node.Node = (*EthStorageTrie)(nil)
/*
INPUT
*/
// FromStorageTrieRLPFile takes the RLP representation of an ethereum
// storage trie node to return it as an IPLD node for further processing.
func FromStorageTrieRLPFile(r io.Reader) (*EthStorageTrie, error) {
raw, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
return FromStorageTrieRLP(raw)
}
// FromStorageTrieRLP takes the RLP representation of an ethereum
// storage trie node to return it as an IPLD node for further processing.
func FromStorageTrieRLP(raw []byte) (*EthStorageTrie, error) {
c, err := RawdataToCid(MEthStorageTrie, raw, multihash.KECCAK_256)
if err != nil {
return nil, err
}
// Let's run the whole mile and process the nodeKind and
// its elements, in case somebody would need this function
// to parse an RLP element from the filesystem
return DecodeEthStorageTrie(c, raw)
}
/*
OUTPUT
*/
// DecodeEthStorageTrie returns an EthStorageTrie object from its cid and rawdata.
func DecodeEthStorageTrie(c cid.Cid, b []byte) (*EthStorageTrie, error) {
tn, err := decodeTrieNode(c, b, decodeEthStorageTrieLeaf)
if err != nil {
return nil, err
}
return &EthStorageTrie{TrieNode: tn}, nil
}
// decodeEthStorageTrieLeaf parses a eth-tx-trie leaf
// from decoded RLP elements
func decodeEthStorageTrieLeaf(i []interface{}) ([]interface{}, error) {
return []interface{}{
i[0].([]byte),
i[1].([]byte),
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the storage trie node.
func (st *EthStorageTrie) RawData() []byte {
return st.rawdata
}
// Cid returns the cid of the storage trie node.
func (st *EthStorageTrie) Cid() cid.Cid {
return st.cid
}
// String is a helper for output
func (st *EthStorageTrie) String() string {
return fmt.Sprintf("<EthereumStorageTrie %s>", st.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (st *EthStorageTrie) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-storage-trie",
}
}

View File

@ -1,140 +0,0 @@
package ipld
import (
"fmt"
"os"
"testing"
"github.com/ipfs/go-cid"
)
/*
INPUT
OUTPUT
*/
func TestStorageTrieNodeExtensionParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-storage-trie-rlp-113049")
checkError(err, t)
output, err := FromStateTrieRLPFile(fi)
checkError(err, t)
if output.nodeKind != "extension" {
t.Fatalf("Wrong nodeKind\r\nexpected %s\r\ngot %s", "extension", output.nodeKind)
}
if len(output.elements) != 2 {
t.Fatalf("Wrong number of elements for an extension node\r\nexpected %d\r\ngot %d", 2, len(output.elements))
}
if fmt.Sprintf("%x", output.elements[0]) != "0a" {
t.Fatalf("Wrong key\r\nexpected %s\r\ngot %s", "0a", fmt.Sprintf("%x", output.elements[0]))
}
if output.elements[1].(cid.Cid).String() !=
"baglacgzautxeutufae7owyrezfvwpan2vusocmxgzwqhzrhjbwprp2texgsq" {
t.Fatalf("Wrong CID\r\nexpected %s\r\ngot %s", "baglacgzautxeutufae7owyrezfvwpan2vusocmxgzwqhzrhjbwprp2texgsq", output.elements[1].(cid.Cid).String())
}
}
func TestStateTrieNodeLeafParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-storage-trie-rlp-ffbcad")
checkError(err, t)
output, err := FromStorageTrieRLPFile(fi)
checkError(err, t)
if output.nodeKind != "leaf" {
t.Fatalf("Wrong nodeKind\r\nexpected %s\r\ngot %s", "leaf", output.nodeKind)
}
if len(output.elements) != 2 {
t.Fatalf("Wrong number of elements for an leaf node\r\nexpected %d\r\ngot %d", 2, len(output.elements))
}
// 2ee1ae9c502e48e0ed528b7b39ac569cef69d7844b5606841a7f3fe898a2
if fmt.Sprintf("%x", output.elements[0].([]byte)[:10]) != "020e0e010a0e090c0500" {
t.Fatalf("Wrong key\r\nexpected %s\r\ngot %s", "020e0e010a0e090c0500", fmt.Sprintf("%x", output.elements[0].([]byte)[:10]))
}
if fmt.Sprintf("%x", output.elements[1]) != "89056c31f304b2530000" {
t.Fatalf("Wrong Value\r\nexpected %s\r\ngot %s", "89056c31f304b2530000", fmt.Sprintf("%x", output.elements[1]))
}
}
func TestStateTrieNodeBranchParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-storage-trie-rlp-ffc25c")
checkError(err, t)
output, err := FromStateTrieRLPFile(fi)
checkError(err, t)
if output.nodeKind != "branch" {
t.Fatalf("Wrong nodeKind\r\nexpected %s\r\ngot %s", "branch", output.nodeKind)
}
if len(output.elements) != 17 {
t.Fatalf("Wrong number of elements for an branch node\r\nexpected %d\r\ngot %d", 17, len(output.elements))
}
if fmt.Sprintf("%s", output.elements[4]) !=
"baglacgzadqhbmlxrxtw5hplcq5jn74p4dceryzw664w3237ra52dnghbjpva" {
t.Fatalf("Wrong Cid\r\nexpected %s\r\ngot %s", "baglacgzadqhbmlxrxtw5hplcq5jn74p4dceryzw664w3237ra52dnghbjpva", fmt.Sprintf("%s", output.elements[4]))
}
if fmt.Sprintf("%s", output.elements[10]) !=
"baglacgza77d37i2v6uhtzeeq4vngragjbgbwq3lylpoc3lihenvzimybzxmq" {
t.Fatalf("Wrong Cid\r\nexpected %s\r\ngot %s", "baglacgza77d37i2v6uhtzeeq4vngragjbgbwq3lylpoc3lihenvzimybzxmq", fmt.Sprintf("%s", output.elements[10]))
}
}
/*
Block INTERFACE
*/
func TestStorageTrieBlockElements(t *testing.T) {
fi, err := os.Open("test_data/eth-storage-trie-rlp-ffbcad")
checkError(err, t)
output, err := FromStorageTrieRLPFile(fi)
checkError(err, t)
if fmt.Sprintf("%x", output.RawData())[:10] != "eb9f202ee1" {
t.Fatalf("Wrong Data\r\nexpected %s\r\ngot %s", "eb9f202ee1", fmt.Sprintf("%x", output.RawData())[:10])
}
if output.Cid().String() !=
"bagmacgza766k3oprj2qxn36eycw55pogmu3dwtfay6zdh6ajrhvw3b2nqg5a" {
t.Fatalf("Wrong Cid\r\nexpected %s\r\ngot %s", "bagmacgza766k3oprj2qxn36eycw55pogmu3dwtfay6zdh6ajrhvw3b2nqg5a", output.Cid().String())
}
}
func TestStorageTrieString(t *testing.T) {
fi, err := os.Open("test_data/eth-storage-trie-rlp-ffbcad")
checkError(err, t)
output, err := FromStorageTrieRLPFile(fi)
checkError(err, t)
if output.String() !=
"<EthereumStorageTrie bagmacgza766k3oprj2qxn36eycw55pogmu3dwtfay6zdh6ajrhvw3b2nqg5a>" {
t.Fatalf("Wrong String()\r\nexpected %s\r\ngot %s", "<EthereumStorageTrie bagmacgza766k3oprj2qxn36eycw55pogmu3dwtfay6zdh6ajrhvw3b2nqg5a>", output.String())
}
}
func TestStorageTrieLoggable(t *testing.T) {
fi, err := os.Open("test_data/eth-storage-trie-rlp-ffbcad")
checkError(err, t)
output, err := FromStorageTrieRLPFile(fi)
checkError(err, t)
l := output.Loggable()
if _, ok := l["type"]; !ok {
t.Fatal("Loggable map expected the field 'type'")
}
if l["type"] != "eth-storage-trie" {
t.Fatalf("Wrong Loggable 'type' value\r\nexpected %s\r\ngot %s", "eth-storage-trie", l["type"])
}
}

View File

@ -17,33 +17,20 @@
package ipld
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
// EthTx (eth-tx codec 0x93) represents an ethereum transaction
type EthTx struct {
*types.Transaction
cid cid.Cid
rawdata []byte
}
// Static (compile time) check that EthTx satisfies the node.Node interface.
var _ node.Node = (*EthTx)(nil)
/*
INPUT
*/
var _ IPLD = (*EthTx)(nil)
// NewEthTx converts a *types.Transaction to an EthTx IPLD node
func NewEthTx(tx *types.Transaction) (*EthTx, error) {
@ -56,34 +43,11 @@ func NewEthTx(tx *types.Transaction) (*EthTx, error) {
return nil, err
}
return &EthTx{
Transaction: tx,
cid: c,
rawdata: txRaw,
}, nil
}
/*
OUTPUT
*/
// DecodeEthTx takes a cid and its raw binary data
// from IPFS and returns an EthTx object for further processing.
func DecodeEthTx(c cid.Cid, b []byte) (*EthTx, error) {
t := new(types.Transaction)
if err := t.UnmarshalBinary(b); err != nil {
return nil, err
}
return &EthTx{
Transaction: t,
cid: c,
rawdata: b,
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the transaction.
func (t *EthTx) RawData() []byte {
return t.rawdata
@ -93,146 +57,3 @@ func (t *EthTx) RawData() []byte {
func (t *EthTx) Cid() cid.Cid {
return t.cid
}
// String is a helper for output
func (t *EthTx) String() string {
return fmt.Sprintf("<EthereumTx %s>", t.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (t *EthTx) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-tx",
}
}
/*
Node INTERFACE
*/
// Resolve resolves a path through this node, stopping at any link boundary
// and returning the object found as well as the remaining path to traverse
func (t *EthTx) Resolve(p []string) (interface{}, []string, error) {
if len(p) == 0 {
return t, nil, nil
}
if len(p) > 1 {
return nil, nil, fmt.Errorf("unexpected path elements past %s", p[0])
}
switch p[0] {
case "type":
return t.Type(), nil, nil
case "gas":
return t.Gas(), nil, nil
case "gasPrice":
return t.GasPrice(), nil, nil
case "input":
return fmt.Sprintf("%x", t.Data()), nil, nil
case "nonce":
return t.Nonce(), nil, nil
case "r":
_, r, _ := t.RawSignatureValues()
return hexutil.EncodeBig(r), nil, nil
case "s":
_, _, s := t.RawSignatureValues()
return hexutil.EncodeBig(s), nil, nil
case "toAddress":
return t.To(), nil, nil
case "v":
v, _, _ := t.RawSignatureValues()
return hexutil.EncodeBig(v), nil, nil
case "value":
return hexutil.EncodeBig(t.Value()), nil, nil
default:
return nil, nil, ErrInvalidLink
}
}
// Tree lists all paths within the object under 'path', and up to the given depth.
// To list the entire object (similar to `find .`) pass "" and -1
func (t *EthTx) Tree(p string, depth int) []string {
if p != "" || depth == 0 {
return nil
}
return []string{"type", "gas", "gasPrice", "input", "nonce", "r", "s", "toAddress", "v", "value"}
}
// ResolveLink is a helper function that calls resolve and asserts the
// output is a link
func (t *EthTx) ResolveLink(p []string) (*node.Link, []string, error) {
obj, rest, err := t.Resolve(p)
if err != nil {
return nil, nil, err
}
if lnk, ok := obj.(*node.Link); ok {
return lnk, rest, nil
}
return nil, nil, fmt.Errorf("resolved item was not a link")
}
// Copy will go away. It is here to comply with the interface.
func (t *EthTx) Copy() node.Node {
panic("implement me")
}
// Links is a helper function that returns all links within this object
func (t *EthTx) Links() []*node.Link {
return nil
}
// Stat will go away. It is here to comply with the interface.
func (t *EthTx) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
// Size will go away. It is here to comply with the interface. It returns the byte size for the transaction
func (t *EthTx) Size() (uint64, error) {
spl := strings.Split(t.Transaction.Size().String(), " ")
size, units := spl[0], spl[1]
floatSize, err := strconv.ParseFloat(size, 64)
if err != nil {
return 0, err
}
var byteSize uint64
switch units {
case "B":
byteSize = uint64(floatSize)
case "KB":
byteSize = uint64(floatSize * 1000)
case "MB":
byteSize = uint64(floatSize * 1000000)
case "GB":
byteSize = uint64(floatSize * 1000000000)
case "TB":
byteSize = uint64(floatSize * 1000000000000)
default:
return 0, fmt.Errorf("unreconginized units %s", units)
}
return byteSize, nil
}
/*
EthTx functions
*/
// MarshalJSON processes the transaction into readable JSON format.
func (t *EthTx) MarshalJSON() ([]byte, error) {
v, r, s := t.RawSignatureValues()
out := map[string]interface{}{
"gas": t.Gas(),
"gasPrice": hexutil.EncodeBig(t.GasPrice()),
"input": fmt.Sprintf("%x", t.Data()),
"nonce": t.Nonce(),
"r": hexutil.EncodeBig(r),
"s": hexutil.EncodeBig(s),
"toAddress": t.To(),
"v": hexutil.EncodeBig(v),
"value": hexutil.EncodeBig(t.Value()),
}
return json.Marshal(out)
}

View File

@ -1,411 +0,0 @@
package ipld
import (
"encoding/hex"
"fmt"
"os"
"strconv"
"strings"
"testing"
block "github.com/ipfs/go-block-format"
"github.com/multiformats/go-multihash"
)
/*
EthBlock
INPUT
*/
func TestTxInBlockBodyRlpParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-block-body-rlp-999999")
checkError(err, t)
_, output, _, err := FromBlockRLP(fi)
checkError(err, t)
if len(output) != 11 {
t.Fatalf("Wrong number of parsed txs\r\nexpected %d\r\ngot %d", 11, len(output))
}
// Oh, let's just grab the last element and one from the middle
testTx05Fields(output[5], t)
testTx10Fields(output[10], t)
}
func TestTxInBlockHeaderRlpParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-block-header-rlp-999999")
checkError(err, t)
_, output, _, err := FromBlockRLP(fi)
checkError(err, t)
if len(output) != 0 {
t.Fatalf("Wrong number of txs\r\nexpected %d\r\ngot %d", 0, len(output))
}
}
func TestTxInBlockBodyJsonParsing(t *testing.T) {
fi, err := os.Open("test_data/eth-block-body-json-999999")
checkError(err, t)
_, output, _, err := FromBlockJSON(fi)
checkError(err, t)
if len(output) != 11 {
t.Fatalf("Wrong number of parsed txs\r\nexpected %d\r\ngot %d", 11, len(output))
}
testTx05Fields(output[5], t)
testTx10Fields(output[10], t)
}
/*
OUTPUT
*/
func TestDecodeTransaction(t *testing.T) {
// Prepare the "fetched transaction".
// This one is supposed to be in the datastore already,
// and given away by github.com/ipfs/go-ipfs/merkledag
rawTransactionString :=
"f86c34850df84758008252089432be343b94f860124dc4fee278fdcbd38c102d88880f25" +
"8512af0d4000801ba0e9a25c929c26d1a95232ba75aef419a91b470651eb77614695e16c" +
"5ba023e383a0679fb2fc0d0b0f3549967c0894ee7d947f07d238a83ef745bc3ced5143a4af36"
rawTransaction, err := hex.DecodeString(rawTransactionString)
checkError(err, t)
c, err := RawdataToCid(MEthTx, rawTransaction, multihash.KECCAK_256)
checkError(err, t)
// Just to clarify: This `block` is an IPFS block
storedTransaction, err := block.NewBlockWithCid(rawTransaction, c)
checkError(err, t)
// Now the proper test
ethTransaction, err := DecodeEthTx(storedTransaction.Cid(), storedTransaction.RawData())
checkError(err, t)
testTx05Fields(ethTransaction, t)
}
/*
Block INTERFACE
*/
func TestEthTxLoggable(t *testing.T) {
txs := prepareParsedTxs(t)
l := txs[0].Loggable()
if _, ok := l["type"]; !ok {
t.Fatal("Loggable map expected the field 'type'")
}
if l["type"] != "eth-tx" {
t.Fatalf("Wrong Loggable 'type' value\r\nexpected %s\r\ngot %s", "eth-tx", l["type"])
}
}
/*
Node INTERFACE
*/
func TestEthTxResolve(t *testing.T) {
tx := prepareParsedTxs(t)[0]
// Empty path
obj, rest, err := tx.Resolve([]string{})
rtx, ok := obj.(*EthTx)
if !ok {
t.Fatal("Wrong type of returned object")
}
if rtx.Cid() != tx.Cid() {
t.Fatalf("Wrong CID\r\nexpected %s\r\ngot %s", tx.Cid().String(), rtx.Cid().String())
}
if rest != nil {
t.Fatal("est should be nil")
}
if err != nil {
t.Fatal("err should be nil")
}
// len(p) > 1
badCases := [][]string{
{"two", "elements"},
{"here", "three", "elements"},
{"and", "here", "four", "elements"},
}
for _, bc := range badCases {
obj, rest, err = tx.Resolve(bc)
if obj != nil {
t.Fatal("obj should be nil")
}
if rest != nil {
t.Fatal("rest should be nil")
}
if err.Error() != fmt.Sprintf("unexpected path elements past %s", bc[0]) {
t.Fatalf("wrong error\r\nexpected %s\r\ngot %s", fmt.Sprintf("unexpected path elements past %s", bc[0]), err.Error())
}
}
moreBadCases := []string{
"i",
"am",
"not",
"a",
"tx",
"field",
}
for _, mbc := range moreBadCases {
obj, rest, err = tx.Resolve([]string{mbc})
if obj != nil {
t.Fatal("obj should be nil")
}
if rest != nil {
t.Fatal("rest should be nil")
}
if err != ErrInvalidLink {
t.Fatalf("wrong error\r\nexpected %s\r\ngot %s", ErrInvalidLink, err)
}
}
goodCases := []string{
"gas",
"gasPrice",
"input",
"nonce",
"r",
"s",
"toAddress",
"v",
"value",
}
for _, gc := range goodCases {
_, _, err = tx.Resolve([]string{gc})
if err != nil {
t.Fatalf("error should be nil %v", gc)
}
}
}
func TestEthTxTree(t *testing.T) {
tx := prepareParsedTxs(t)[0]
_ = tx
// Bad cases
tree := tx.Tree("non-empty-string", 0)
if tree != nil {
t.Fatal("Expected nil to be returned")
}
tree = tx.Tree("non-empty-string", 1)
if tree != nil {
t.Fatal("Expected nil to be returned")
}
tree = tx.Tree("", 0)
if tree != nil {
t.Fatal("Expected nil to be returned")
}
// Good cases
tree = tx.Tree("", 1)
lookupElements := map[string]interface{}{
"type": nil,
"gas": nil,
"gasPrice": nil,
"input": nil,
"nonce": nil,
"r": nil,
"s": nil,
"toAddress": nil,
"v": nil,
"value": nil,
}
if len(tree) != len(lookupElements) {
t.Fatalf("Wrong number of elements\r\nexpected %d\r\ngot %d", len(lookupElements), len(tree))
}
for _, te := range tree {
if _, ok := lookupElements[te]; !ok {
t.Fatalf("Unexpected Element: %v", te)
}
}
}
func TestEthTxResolveLink(t *testing.T) {
tx := prepareParsedTxs(t)[0]
// bad case
obj, rest, err := tx.ResolveLink([]string{"supercalifragilist"})
if obj != nil {
t.Fatalf("Expected obj to be nil")
}
if rest != nil {
t.Fatal("Expected rest to be nil")
}
if err != ErrInvalidLink {
t.Fatalf("Wrong error\r\nexpected %s\r\ngot %s", ErrInvalidLink, err.Error())
}
// good case
obj, rest, err = tx.ResolveLink([]string{"nonce"})
if obj != nil {
t.Fatalf("Expected obj to be nil")
}
if rest != nil {
t.Fatal("Expected rest to be nil")
}
if err.Error() != "resolved item was not a link" {
t.Fatalf("Wrong error\r\nexpected %s\r\ngot %s", "resolved item was not a link", err.Error())
}
}
func TestEthTxCopy(t *testing.T) {
tx := prepareParsedTxs(t)[0]
defer func() {
r := recover()
if r == nil {
t.Fatal("Expected panic")
}
if r != "implement me" {
t.Fatalf("Wrong panic message\r\nexpected %s\r\ngot %s", "'implement me'", r)
}
}()
_ = tx.Copy()
}
func TestEthTxLinks(t *testing.T) {
tx := prepareParsedTxs(t)[0]
if tx.Links() != nil {
t.Fatal("Links() expected to return nil")
}
}
func TestEthTxStat(t *testing.T) {
tx := prepareParsedTxs(t)[0]
obj, err := tx.Stat()
if obj == nil {
t.Fatal("Expected a not null object node.NodeStat")
}
if err != nil {
t.Fatal("Expected a nil error")
}
}
func TestEthTxSize(t *testing.T) {
tx := prepareParsedTxs(t)[0]
size, err := tx.Size()
checkError(err, t)
spl := strings.Split(tx.Transaction.Size().String(), " ")
expectedSize, units := spl[0], spl[1]
floatSize, err := strconv.ParseFloat(expectedSize, 64)
checkError(err, t)
var byteSize uint64
switch units {
case "B":
byteSize = uint64(floatSize)
case "KB":
byteSize = uint64(floatSize * 1000)
case "MB":
byteSize = uint64(floatSize * 1000000)
case "GB":
byteSize = uint64(floatSize * 1000000000)
case "TB":
byteSize = uint64(floatSize * 1000000000000)
default:
t.Fatal("Unexpected size units")
}
if size != byteSize {
t.Fatalf("Wrong size\r\nexpected %d\r\ngot %d", byteSize, size)
}
}
/*
AUXILIARS
*/
// prepareParsedTxs is a convenienve method
func prepareParsedTxs(t *testing.T) []*EthTx {
fi, err := os.Open("test_data/eth-block-body-rlp-999999")
checkError(err, t)
_, output, _, err := FromBlockRLP(fi)
checkError(err, t)
return output
}
func testTx05Fields(ethTx *EthTx, t *testing.T) {
// Was the cid calculated?
if ethTx.Cid().String() != "bagjqcgzawhfnvdnpmpcfoug7d3tz53k2ht3cidr45pnw3y7snpd46azbpp2a" {
t.Fatalf("Wrong cid\r\nexpected %s\r\ngot %s\r\n", "bagjqcgzawhfnvdnpmpcfoug7d3tz53k2ht3cidr45pnw3y7snpd46azbpp2a", ethTx.Cid().String())
}
// Do we have the rawdata available?
if fmt.Sprintf("%x", ethTx.RawData()[:10]) != "f86c34850df847580082" {
t.Fatalf("Wrong Rawdata\r\nexpected %s\r\ngot %s", "f86c34850df847580082", fmt.Sprintf("%x", ethTx.RawData()[:10]))
}
// Proper Fields of types.Transaction
if fmt.Sprintf("%x", ethTx.To()) != "32be343b94f860124dc4fee278fdcbd38c102d88" {
t.Fatalf("Wrong Recipient\r\nexpected %s\r\ngot %s", "32be343b94f860124dc4fee278fdcbd38c102d88", fmt.Sprintf("%x", ethTx.To()))
}
if len(ethTx.Data()) != 0 {
t.Fatalf("Wrong len of Data\r\nexpected %d\r\ngot %d", 0, len(ethTx.Data()))
}
if fmt.Sprintf("%v", ethTx.Gas()) != "21000" {
t.Fatalf("Wrong Gas\r\nexpected %s\r\ngot %s", "21000", fmt.Sprintf("%v", ethTx.Gas()))
}
if fmt.Sprintf("%v", ethTx.Value()) != "1091424800000000000" {
t.Fatalf("Wrong Value\r\nexpected %s\r\ngot %s", "1091424800000000000", fmt.Sprintf("%v", ethTx.Value()))
}
if fmt.Sprintf("%v", ethTx.Nonce()) != "52" {
t.Fatalf("Wrong Nonce\r\nexpected %s\r\ngot %s", "52", fmt.Sprintf("%v", ethTx.Nonce()))
}
if fmt.Sprintf("%v", ethTx.GasPrice()) != "60000000000" {
t.Fatalf("Wrong Gas Price\r\nexpected %s\r\ngot %s", "60000000000", fmt.Sprintf("%v", ethTx.GasPrice()))
}
}
func testTx10Fields(ethTx *EthTx, t *testing.T) {
// Was the cid calculated?
if ethTx.Cid().String() != "bagjqcgzaykakwayoec6j55zmq62cbvmplgf5u5j67affge3ksi4ermgitjoa" {
t.Fatalf("Wrong Cid\r\nexpected %s\r\ngot %s", "bagjqcgzaykakwayoec6j55zmq62cbvmplgf5u5j67affge3ksi4ermgitjoa", ethTx.Cid().String())
}
// Do we have the rawdata available?
if fmt.Sprintf("%x", ethTx.RawData()[:10]) != "f8708302a120850ba43b" {
t.Fatalf("Wrong Rawdata\r\nexpected %s\r\ngot %s", "f8708302a120850ba43b", fmt.Sprintf("%x", ethTx.RawData()[:10]))
}
// Proper Fields of types.Transaction
if fmt.Sprintf("%x", ethTx.To()) != "1c51bf013add0857c5d9cf2f71a7f15ca93d4816" {
t.Fatalf("Wrong Recipient\r\nexpected %s\r\ngot %s", "1c51bf013add0857c5d9cf2f71a7f15ca93d4816", fmt.Sprintf("%x", ethTx.To()))
}
if len(ethTx.Data()) != 0 {
t.Fatalf("Wrong len of Data\r\nexpected %d\r\ngot %d", 0, len(ethTx.Data()))
}
if fmt.Sprintf("%v", ethTx.Gas()) != "90000" {
t.Fatalf("Wrong Gas\r\nexpected %s\r\ngot %s", "90000", fmt.Sprintf("%v", ethTx.Gas()))
}
if fmt.Sprintf("%v", ethTx.Value()) != "1049756850000000000" {
t.Fatalf("Wrong Value\r\nexpected %s\r\ngot %s", "1049756850000000000", fmt.Sprintf("%v", ethTx.Value()))
}
if fmt.Sprintf("%v", ethTx.Nonce()) != "172320" {
t.Fatalf("Wrong Nonce\r\nexpected %s\r\ngot %s", "172320", fmt.Sprintf("%v", ethTx.Nonce()))
}
if fmt.Sprintf("%v", ethTx.GasPrice()) != "50000000000" {
t.Fatalf("Wrong Gas Price\r\nexpected %s\r\ngot %s", "50000000000", fmt.Sprintf("%v", ethTx.GasPrice()))
}
}

View File

@ -1,146 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"fmt"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/core/types"
)
// EthTxTrie (eth-tx-trie codec 0x92) represents
// a node from the transaction trie in ethereum.
type EthTxTrie struct {
*TrieNode
}
// Static (compile time) check that EthTxTrie satisfies the node.Node interface.
var _ node.Node = (*EthTxTrie)(nil)
/*
INPUT
*/
// To create a proper trie of the eth-tx-trie objects, it is required
// to input all transactions belonging to a forest in a single step.
// We are adding the transactions, and creating its trie on
// block body parsing time.
/*
OUTPUT
*/
// DecodeEthTxTrie returns an EthTxTrie object from its cid and rawdata.
func DecodeEthTxTrie(c cid.Cid, b []byte) (*EthTxTrie, error) {
tn, err := decodeTrieNode(c, b, decodeEthTxTrieLeaf)
if err != nil {
return nil, err
}
return &EthTxTrie{TrieNode: tn}, nil
}
// decodeEthTxTrieLeaf parses a eth-tx-trie leaf
//from decoded RLP elements
func decodeEthTxTrieLeaf(i []interface{}) ([]interface{}, error) {
t := new(types.Transaction)
if err := t.UnmarshalBinary(i[1].([]byte)); err != nil {
return nil, err
}
c, err := RawdataToCid(MEthTx, i[1].([]byte), multihash.KECCAK_256)
if err != nil {
return nil, err
}
return []interface{}{
i[0].([]byte),
&EthTx{
Transaction: t,
cid: c,
rawdata: i[1].([]byte),
},
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the transaction.
func (t *EthTxTrie) RawData() []byte {
return t.rawdata
}
// Cid returns the cid of the transaction.
func (t *EthTxTrie) Cid() cid.Cid {
return t.cid
}
// String is a helper for output
func (t *EthTxTrie) String() string {
return fmt.Sprintf("<EthereumTxTrie %s>", t.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (t *EthTxTrie) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-tx-trie",
}
}
/*
EthTxTrie functions
*/
// txTrie wraps a localTrie for use on the transaction trie.
type txTrie struct {
*localTrie
}
// newTxTrie initializes and returns a txTrie.
func newTxTrie() *txTrie {
return &txTrie{
localTrie: newLocalTrie(),
}
}
// getNodes invokes the localTrie, which computes the root hash of the
// transaction trie and returns its sql keys, to return a slice
// of EthTxTrie nodes.
func (tt *txTrie) getNodes() ([]*EthTxTrie, error) {
keys, err := tt.getKeys()
if err != nil {
return nil, err
}
var out []*EthTxTrie
for _, k := range keys {
rawdata, err := tt.db.Get(k)
if err != nil {
return nil, err
}
tn := &TrieNode{
cid: keccak256ToCid(MEthTxTrie, k),
rawdata: rawdata,
}
out = append(out, &EthTxTrie{TrieNode: tn})
}
return out, nil
}

View File

@ -1,503 +0,0 @@
package ipld
import (
"encoding/hex"
"encoding/json"
"fmt"
"os"
"testing"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
)
/*
EthBlock
*/
func TestTxTriesInBlockBodyJSONParsing(t *testing.T) {
// HINT: 306 txs
// cat test_data/eth-block-body-json-4139497 | jsontool | grep transactionIndex | wc -l
// or, https://etherscan.io/block/4139497
fi, err := os.Open("test_data/eth-block-body-json-4139497")
checkError(err, t)
_, _, output, err := FromBlockJSON(fi)
checkError(err, t)
if len(output) != 331 {
t.Fatalf("Wrong number of obtained tx trie nodes\r\nexpected %d\r\n got %d", 331, len(output))
}
}
/*
OUTPUT
*/
func TestTxTrieDecodeExtension(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieExtension(t)
if ethTxTrie.nodeKind != "extension" {
t.Fatalf("Wrong nodeKind\r\nexpected %s\r\ngot %s", "extension", ethTxTrie.nodeKind)
}
if len(ethTxTrie.elements) != 2 {
t.Fatalf("Wrong number of elements for an extension node\r\nexpected %d\r\ngot %d", 2, len(ethTxTrie.elements))
}
if fmt.Sprintf("%x", ethTxTrie.elements[0].([]byte)) != "0001" {
t.Fatalf("Wrong key\r\nexpected %s\r\ngot %s", "0001", fmt.Sprintf("%x", ethTxTrie.elements[0].([]byte)))
}
if ethTxTrie.elements[1].(cid.Cid).String() !=
"bagjacgzak6wdjvshdtb7lrvlteweyd7f5qjr3dmzmh7g2xpi4xrwoujsio2a" {
t.Fatalf("Wrong CID\r\nexpected %s\r\ngot %s", "bagjacgzak6wdjvshdtb7lrvlteweyd7f5qjr3dmzmh7g2xpi4xrwoujsio2a", ethTxTrie.elements[1].(cid.Cid).String())
}
}
func TestTxTrieDecodeLeaf(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieLeaf(t)
if ethTxTrie.nodeKind != "leaf" {
t.Fatalf("Wrong nodeKind\r\nexpected %s\r\ngot %s", "leaf", ethTxTrie.nodeKind)
}
if len(ethTxTrie.elements) != 2 {
t.Fatalf("Wrong number of elements for a leaf node\r\nexpected %d\r\ngot %d", 2, len(ethTxTrie.elements))
}
if fmt.Sprintf("%x", ethTxTrie.elements[0].([]byte)) != "" {
t.Fatalf("Wrong key\r\nexpected %s\r\ngot %s", "", fmt.Sprintf("%x", ethTxTrie.elements[0].([]byte)))
}
if _, ok := ethTxTrie.elements[1].(*EthTx); !ok {
t.Fatal("Expected element to be an EthTx")
}
if ethTxTrie.elements[1].(*EthTx).String() !=
"<EthereumTx bagjqcgzaqsbvff5xrqh5lobxmhuharvkqdc4jmsqfalsu2xs4pbyix7dvfzq>" {
t.Fatalf("Wrong String()\r\nexpected %s\r\ngot %s", "<EthereumTx bagjqcgzaqsbvff5xrqh5lobxmhuharvkqdc4jmsqfalsu2xs4pbyix7dvfzq>", ethTxTrie.elements[1].(*EthTx).String())
}
}
func TestTxTrieDecodeBranch(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieBranch(t)
if ethTxTrie.nodeKind != "branch" {
t.Fatalf("Wrong nodeKind\r\nexpected %s\r\ngot %s", "branch", ethTxTrie.nodeKind)
}
if len(ethTxTrie.elements) != 17 {
t.Fatalf("Wrong number of elements for a branch node\r\nexpected %d\r\ngot %d", 17, len(ethTxTrie.elements))
}
for i, element := range ethTxTrie.elements {
switch {
case i < 9:
if _, ok := element.(cid.Cid); !ok {
t.Fatal("Expected element to be a cid")
}
continue
default:
if element != nil {
t.Fatal("Expected element to be a nil")
}
}
}
}
/*
Block INTERFACE
*/
func TestEthTxTrieBlockElements(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieExtension(t)
if fmt.Sprintf("%x", ethTxTrie.RawData())[:10] != "e4820001a0" {
t.Fatalf("Wrong Data\r\nexpected %s\r\ngot %s", "e4820001a0", fmt.Sprintf("%x", ethTxTrie.RawData())[:10])
}
if ethTxTrie.Cid().String() !=
"bagjacgzaw6ccgrfc3qnrl6joodbjjiet4haufnt2xww725luwgfhijnmg36q" {
t.Fatalf("Wrong Cid\r\nexpected %s\r\ngot %s", "bagjacgzaw6ccgrfc3qnrl6joodbjjiet4haufnt2xww725luwgfhijnmg36q", ethTxTrie.Cid().String())
}
}
func TestEthTxTrieString(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieExtension(t)
if ethTxTrie.String() != "<EthereumTxTrie bagjacgzaw6ccgrfc3qnrl6joodbjjiet4haufnt2xww725luwgfhijnmg36q>" {
t.Fatalf("Wrong String()\r\nexpected %s\r\ngot %s", "<EthereumTxTrie bagjacgzaw6ccgrfc3qnrl6joodbjjiet4haufnt2xww725luwgfhijnmg36q>", ethTxTrie.String())
}
}
func TestEthTxTrieLoggable(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieExtension(t)
l := ethTxTrie.Loggable()
if _, ok := l["type"]; !ok {
t.Fatal("Loggable map expected the field 'type'")
}
if l["type"] != "eth-tx-trie" {
t.Fatalf("Wrong Loggable 'type' value\r\nexpected %s\r\ngot %s", "eth-tx-trie", l["type"])
}
}
/*
Node INTERFACE
*/
func TestTxTrieResolveExtension(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieExtension(t)
_ = ethTxTrie
}
func TestTxTrieResolveLeaf(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieLeaf(t)
_ = ethTxTrie
}
func TestTxTrieResolveBranch(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieBranch(t)
indexes := []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"}
for j, index := range indexes {
obj, rest, err := ethTxTrie.Resolve([]string{index, "nonce"})
switch {
case j < 9:
_, ok := obj.(*node.Link)
if !ok {
t.Fatalf("Returned object is not a link (index: %d)", j)
}
if rest[0] != "nonce" {
t.Fatalf("Wrong rest of the path returned\r\nexpected %s\r\ngot %s", "nonce", rest[0])
}
if err != nil {
t.Fatal("Error should be nil")
}
default:
if obj != nil {
t.Fatalf("Returned object should have been nil")
}
if rest != nil {
t.Fatalf("Rest of the path returned should be nil")
}
if err.Error() != "no such link in this branch" {
t.Fatalf("Wrong error")
}
}
}
otherSuccessCases := [][]string{
{"0", "1", "banana"},
{"1", "banana"},
{"7bc", "def"},
{"bc", "def"},
}
for i := 0; i < len(otherSuccessCases); i = i + 2 {
osc := otherSuccessCases[i]
expectedRest := otherSuccessCases[i+1]
obj, rest, err := ethTxTrie.Resolve(osc)
_, ok := obj.(*node.Link)
if !ok {
t.Fatalf("Returned object is not a link")
}
for j := range expectedRest {
if rest[j] != expectedRest[j] {
t.Fatalf("Wrong rest of the path returned\r\nexpected %s\r\ngot %s", expectedRest[j], rest[j])
}
}
if err != nil {
t.Fatal("Error should be nil")
}
}
}
func TestTraverseTxTrieWithResolve(t *testing.T) {
var err error
txMap := prepareTxTrieMap(t)
// This is the cid of the tx root at the block 4,139,497
currentNode := txMap["bagjacgzaqolvvlyflkdiylijcu4ts6myxczkb2y3ewxmln5oyrsrkfc4v7ua"]
// This is the path we want to traverse
// the transaction id 256, which is RLP encoded to 820100
var traversePath []string
for _, s := range "820100" {
traversePath = append(traversePath, string(s))
}
traversePath = append(traversePath, "value")
var obj interface{}
for {
obj, traversePath, err = currentNode.Resolve(traversePath)
link, ok := obj.(*node.Link)
if !ok {
break
}
if err != nil {
t.Fatal("Error should be nil")
}
currentNode = txMap[link.Cid.String()]
if currentNode == nil {
t.Fatal("transaction trie node not found in memory map")
}
}
if fmt.Sprintf("%v", obj) != "0xc495a958603400" {
t.Fatalf("Wrong value\r\nexpected %s\r\ngot %s", "0xc495a958603400", fmt.Sprintf("%v", obj))
}
}
func TestTxTrieTreeBadParams(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieBranch(t)
tree := ethTxTrie.Tree("non-empty-string", 0)
if tree != nil {
t.Fatal("Expected nil to be returned")
}
tree = ethTxTrie.Tree("non-empty-string", 1)
if tree != nil {
t.Fatal("Expected nil to be returned")
}
tree = ethTxTrie.Tree("", 0)
if tree != nil {
t.Fatal("Expected nil to be returned")
}
}
func TestTxTrieTreeExtension(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieExtension(t)
tree := ethTxTrie.Tree("", -1)
if len(tree) != 1 {
t.Fatalf("An extension should have one element")
}
if tree[0] != "01" {
t.Fatalf("Wrong trie element\r\nexpected %s\r\ngot %s", "01", tree[0])
}
}
func TestTxTrieTreeBranch(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieBranch(t)
tree := ethTxTrie.Tree("", -1)
lookupElements := map[string]interface{}{
"0": nil,
"1": nil,
"2": nil,
"3": nil,
"4": nil,
"5": nil,
"6": nil,
"7": nil,
"8": nil,
}
if len(tree) != len(lookupElements) {
t.Fatalf("Wrong number of elements\r\nexpected %d\r\ngot %d", len(lookupElements), len(tree))
}
for _, te := range tree {
if _, ok := lookupElements[te]; !ok {
t.Fatalf("Unexpected Element: %v", te)
}
}
}
func TestTxTrieLinksBranch(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieBranch(t)
desiredValues := []string{
"bagjacgzakhtcfpja453ydiaqxgidqmxhh7jwmxujib663deebwfs3m2n3hoa",
"bagjacgza2p2fuqh4vumknq6x5w7i47usvtu5ixqins6qjjtcks4zge3vx3qq",
"bagjacgza4fkhn7et3ra66yjkzbtvbxjefuketda6jctlut6it7gfahxhywga",
"bagjacgzacnryeybs52xryrka5uxi4eg4hi2mh66esaghu7cetzu6fsukrynq",
"bagjacgzastu5tc7lwz4ap3gznjwkyyepswquub7gvhags5mgdyfynnwbi43a",
"bagjacgza5qgp76ovvorkydni2lchew6ieu5wb55w6hdliiu6vft7zlxtdhjq",
"bagjacgzafnssc4yvln6zxmks5roskw4ckngta5n4yfy2skhlu435ve4b575a",
"bagjacgzagkuei7qxfxefufme2d3xizxokkq4ad3rzl2x4dq2uao6dcr4va2a",
"bagjacgzaxpaehtananrdxjghwukh2wwkkzcqwveppf6xclkrtd26rm27kqwq",
}
links := ethTxTrie.Links()
for i, v := range desiredValues {
if links[i].Cid.String() != v {
t.Fatalf("Wrong cid for link %d\r\nexpected %s\r\ngot %s", i, v, links[i].Cid.String())
}
}
}
/*
EthTxTrie Functions
*/
func TestTxTrieJSONMarshalExtension(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieExtension(t)
jsonOutput, err := ethTxTrie.MarshalJSON()
checkError(err, t)
var data map[string]interface{}
err = json.Unmarshal(jsonOutput, &data)
checkError(err, t)
if parseMapElement(data["01"]) !=
"bagjacgzak6wdjvshdtb7lrvlteweyd7f5qjr3dmzmh7g2xpi4xrwoujsio2a" {
t.Fatalf("Wrong Marshaled Value\r\nexpected %s\r\ngot %s", "bagjacgzak6wdjvshdtb7lrvlteweyd7f5qjr3dmzmh7g2xpi4xrwoujsio2a", parseMapElement(data["01"]))
}
if data["type"] != "extension" {
t.Fatalf("Wrong node type\r\nexpected %s\r\ngot %s", "extension", data["type"])
}
}
func TestTxTrieJSONMarshalLeaf(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieLeaf(t)
jsonOutput, err := ethTxTrie.MarshalJSON()
checkError(err, t)
var data map[string]interface{}
err = json.Unmarshal(jsonOutput, &data)
checkError(err, t)
if data["type"] != "leaf" {
t.Fatalf("Wrong node type\r\nexpected %s\r\ngot %s", "leaf", data["type"])
}
if fmt.Sprintf("%v", data[""].(map[string]interface{})["nonce"]) !=
"40243" {
t.Fatalf("Wrong nonce value\r\nexepcted %s\r\ngot %s", "40243", fmt.Sprintf("%v", data[""].(map[string]interface{})["nonce"]))
}
}
func TestTxTrieJSONMarshalBranch(t *testing.T) {
ethTxTrie := prepareDecodedEthTxTrieBranch(t)
jsonOutput, err := ethTxTrie.MarshalJSON()
checkError(err, t)
var data map[string]interface{}
err = json.Unmarshal(jsonOutput, &data)
checkError(err, t)
desiredValues := map[string]string{
"0": "bagjacgzakhtcfpja453ydiaqxgidqmxhh7jwmxujib663deebwfs3m2n3hoa",
"1": "bagjacgza2p2fuqh4vumknq6x5w7i47usvtu5ixqins6qjjtcks4zge3vx3qq",
"2": "bagjacgza4fkhn7et3ra66yjkzbtvbxjefuketda6jctlut6it7gfahxhywga",
"3": "bagjacgzacnryeybs52xryrka5uxi4eg4hi2mh66esaghu7cetzu6fsukrynq",
"4": "bagjacgzastu5tc7lwz4ap3gznjwkyyepswquub7gvhags5mgdyfynnwbi43a",
"5": "bagjacgza5qgp76ovvorkydni2lchew6ieu5wb55w6hdliiu6vft7zlxtdhjq",
"6": "bagjacgzafnssc4yvln6zxmks5roskw4ckngta5n4yfy2skhlu435ve4b575a",
"7": "bagjacgzagkuei7qxfxefufme2d3xizxokkq4ad3rzl2x4dq2uao6dcr4va2a",
"8": "bagjacgzaxpaehtananrdxjghwukh2wwkkzcqwveppf6xclkrtd26rm27kqwq",
}
for k, v := range desiredValues {
if parseMapElement(data[k]) != v {
t.Fatalf("Wrong Marshaled Value %s\r\nexpected %s\r\ngot %s", k, v, parseMapElement(data[k]))
}
}
for _, v := range []string{"a", "b", "c", "d", "e", "f"} {
if data[v] != nil {
t.Fatal("Expected value to be nil")
}
}
if data["type"] != "branch" {
t.Fatalf("Wrong node type\r\nexpected %s\r\ngot %s", "branch", data["type"])
}
}
/*
AUXILIARS
*/
// prepareDecodedEthTxTrie simulates an IPLD block available in the datastore,
// checks the source RLP and tests for the absence of errors during the decoding fase.
func prepareDecodedEthTxTrie(branchDataRLP string, t *testing.T) *EthTxTrie {
b, err := hex.DecodeString(branchDataRLP)
checkError(err, t)
c, err := RawdataToCid(MEthTxTrie, b, multihash.KECCAK_256)
checkError(err, t)
storedEthTxTrie, err := block.NewBlockWithCid(b, c)
checkError(err, t)
ethTxTrie, err := DecodeEthTxTrie(storedEthTxTrie.Cid(), storedEthTxTrie.RawData())
checkError(err, t)
return ethTxTrie
}
func prepareDecodedEthTxTrieExtension(t *testing.T) *EthTxTrie {
extensionDataRLP :=
"e4820001a057ac34d6471cc3f5c6ab992c4c0fe5ec131d8d9961fe6d5de8e5e367513243b4"
return prepareDecodedEthTxTrie(extensionDataRLP, t)
}
func prepareDecodedEthTxTrieLeaf(t *testing.T) *EthTxTrie {
leafDataRLP :=
"f87220b86ff86d829d3384ee6b280083015f9094e0e6c781b8cba08bc840" +
"7eac0101b668d1fa6f4987c495a9586034008026a0981b6223c9d3c31971" +
"6da3cf057da84acf0fef897f4003d8a362d7bda42247dba066be134c4bc4" +
"32125209b5056ef274b7423bcac7cc398cf60b83aaff7b95469f"
return prepareDecodedEthTxTrie(leafDataRLP, t)
}
func prepareDecodedEthTxTrieBranch(t *testing.T) *EthTxTrie {
branchDataRLP :=
"f90131a051e622bd20e77781a010b9903832e73fd3665e89407ded8c840d8b2db34dd9" +
"dca0d3f45a40fcad18a6c3d7edbe8e7e92ace9d45e086cbd04a66254b9931375bee1a0" +
"e15476fc93dc41ef612ac86750dd242d14498c1e48a6ba4fc89fcc501ee7c58ca01363" +
"826032eeaf1c4540ed2e8e10dc3a34c3fbc4900c7a7c449e69e2ca8a8e1ba094e9d98b" +
"ebb67807ecd96a6cac608f95a14a07e6a9c06975861e0b86b6c14736a0ec0cfff9d5ab" +
"a2ac0da8d2c4725bc8253b60f7b6f1c6b4229ea967fcaef319d3a02b652173155b7d9b" +
"b152ec5d255b82534d3075bcc171a928eba737da9381effaa032a8447e172dc85a1584" +
"d0f77466ee52a1c00f71caf57e0e1aa01de18a3ca834a0bbc043cc0d03623ba4c7b514" +
"7d5aca56450b548f797d712d5198f5e8b35f542d8080808080808080"
return prepareDecodedEthTxTrie(branchDataRLP, t)
}
func prepareTxTrieMap(t *testing.T) map[string]*EthTxTrie {
fi, err := os.Open("test_data/eth-block-body-json-4139497")
checkError(err, t)
_, _, txTrieNodes, err := FromBlockJSON(fi)
checkError(err, t)
out := make(map[string]*EthTxTrie)
for _, txTrieNode := range txTrieNodes {
decodedNode, err := DecodeEthTxTrie(txTrieNode.Cid(), txTrieNode.RawData())
checkError(err, t)
out[txTrieNode.Cid().String()] = decodedNode
}
return out
}

View File

@ -0,0 +1,8 @@
package ipld
import "github.com/ipfs/go-cid"
type IPLD interface {
Cid() cid.Cid
RawData() []byte
}

View File

@ -17,16 +17,6 @@
package ipld
import (
"bytes"
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
sdtrie "github.com/ethereum/go-ethereum/statediff/trie_helpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/trie"
"github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
)
@ -49,11 +39,6 @@ const (
MEthLog = 0x9a
)
var (
nullHashBytes = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
ErrInvalidLink = errors.New("no such link")
)
// RawdataToCid takes the desired codec and a slice of bytes
// and returns the proper cid of the object.
func RawdataToCid(codec uint64, rawdata []byte, multiHash uint64) (cid.Cid, error) {
@ -69,146 +54,13 @@ func RawdataToCid(codec uint64, rawdata []byte, multiHash uint64) (cid.Cid, erro
return c, nil
}
// keccak256ToCid takes a keccak256 hash and returns its cid based on
// Keccak256ToCid takes a keccak256 hash and returns its cid based on
// the codec given.
func keccak256ToCid(codec uint64, h []byte) cid.Cid {
func Keccak256ToCid(codec uint64, h []byte) cid.Cid {
buf, err := mh.Encode(h, mh.KECCAK_256)
if err != nil {
panic(err)
}
return cid.NewCidV1(codec, mh.Multihash(buf))
}
// commonHashToCid takes a go-ethereum common.Hash and returns its
// cid based on the codec given,
func commonHashToCid(codec uint64, h common.Hash) cid.Cid {
mhash, err := mh.Encode(h[:], mh.KECCAK_256)
if err != nil {
panic(err)
}
return cid.NewCidV1(codec, mhash)
}
// localTrie wraps a go-ethereum trie and its underlying memory db.
// It contributes to the creation of the trie node objects.
type localTrie struct {
db ethdb.Database
trieDB *trie.Database
trie *trie.Trie
}
// newLocalTrie initializes and returns a localTrie object
func newLocalTrie() *localTrie {
var err error
lt := &localTrie{}
lt.db = rawdb.NewMemoryDatabase()
lt.trieDB = trie.NewDatabase(lt.db)
lt.trie, err = trie.New(common.Hash{}, common.Hash{}, lt.trieDB)
if err != nil {
panic(err)
}
return lt
}
// Add receives the index of an object and its rawdata value
// and includes it into the localTrie
func (lt *localTrie) Add(idx int, rawdata []byte) error {
key, err := rlp.EncodeToBytes(uint(idx))
if err != nil {
panic(err)
}
return lt.trie.TryUpdate(key, rawdata)
}
// rootHash returns the computed trie root.
// Useful for sanity checks on parsed data.
func (lt *localTrie) rootHash() []byte {
return lt.trie.Hash().Bytes()
}
func (lt *localTrie) commit() error {
// commit trie nodes to trieDB
ltHash, trieNodes, err := lt.trie.Commit(true)
if err != nil {
return err
}
//new trie.Commit method signature also requires Update with returned NodeSet
if trieNodes != nil {
lt.trieDB.Update(trie.NewWithNodeSet(trieNodes))
}
// commit trieDB to the underlying ethdb.Database
if err := lt.trieDB.Commit(ltHash, false, nil); err != nil {
return err
}
return nil
}
// getKeys returns the stored keys of the memory sql
// of the localTrie for further processing.
func (lt *localTrie) getKeys() ([][]byte, error) {
if err := lt.commit(); err != nil {
return nil, err
}
// collect all of the node keys
it := lt.trie.NodeIterator([]byte{})
keyBytes := make([][]byte, 0)
for it.Next(true) {
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
continue
}
keyBytes = append(keyBytes, it.Hash().Bytes())
}
return keyBytes, nil
}
type nodeKey struct {
dbKey []byte
TrieKey []byte
}
// getLeafKeys returns the stored leaf keys from the memory sql
// of the localTrie for further processing.
func (lt *localTrie) getLeafKeys() ([]*nodeKey, error) {
if err := lt.commit(); err != nil {
return nil, err
}
it := lt.trie.NodeIterator([]byte{})
leafKeys := make([]*nodeKey, 0)
for it.Next(true) {
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
continue
}
node, nodeElements, err := sdtrie.ResolveNode(it, lt.trieDB)
if err != nil {
return nil, err
}
if node.NodeType != sdtypes.Leaf {
continue
}
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
valueNodePath := append(node.Path, partialPath...)
encodedPath := trie.HexToCompact(valueNodePath)
leafKey := encodedPath[1:]
leafKeys = append(leafKeys, &nodeKey{dbKey: it.Hash().Bytes(), TrieKey: leafKey})
}
return leafKeys, nil
}
// getRLP encodes the given object to RLP returning its bytes.
func getRLP(object interface{}) []byte {
buf := new(bytes.Buffer)
if err := rlp.Encode(buf, object); err != nil {
panic(err)
}
return buf.Bytes()
return cid.NewCidV1(codec, buf)
}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
{"jsonrpc":"2.0","id":1,"result":{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x400000000","extraData":"0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa","gasLimit":"0x1388","gasUsed":"0x0","hash":"0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000042","number":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000042"],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x21c","stateRoot":"0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544","timestamp":"0x0","totalDifficulty":"0x400000000","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}}

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
{"jsonrpc":"2.0","result":{"author":"0x4bb96091ee9d802ed039c4d1a5f6216f90f81b01","difficulty":"0xae22b2113ed","extraData":"0xd783010400844765746887676f312e352e31856c696e7578","gasLimit":"0x2fefd8","gasUsed":"0x5208","hash":"0x79851e1adb52a8c5490da2df5d8c060b1cc44a3b6eeaada2e20edba5a8e84523","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x4bb96091ee9d802ed039c4d1a5f6216f90f81b01","mixHash":"0x2565992ba4dbd7ab3bb08d1da34051ae1d90c79bc637a21aa2f51f6380bf5f6a","nonce":"0xf7a14147c2320b2d","number":"0xf3892","parentHash":"0x8ad6d5cbe7ec75ed71d5153dd58f2fd413b17c398ad2a7d9309459ce884e6c9b","receiptsRoot":"0xa73a95d90de29c66220c8b8da825cf34ae969efc7f9a878d8ed893565e4b4676","sealFields":["0xa02565992ba4dbd7ab3bb08d1da34051ae1d90c79bc637a21aa2f51f6380bf5f6a","0x88f7a14147c2320b2d"],"sha3Uncles":"0x08793b633d0b21b980107f3e3277c6693f2f3739e0c676a238cbe24d9ae6e252","size":"0x6c0","stateRoot":"0x11e5ea49ecbee25a9b8f267492a5d296ac09cf6179b43bc334242d052bac5963","timestamp":"0x56bf10c5","totalDifficulty":"0x629a0a89232bcd5b","transactions":[{"blockHash":"0x79851e1adb52a8c5490da2df5d8c060b1cc44a3b6eeaada2e20edba5a8e84523","blockNumber":"0xf3892","condition":null,"creates":null,"from":"0x4bb96091ee9d802ed039c4d1a5f6216f90f81b01","gas":"0x15f90","gasPrice":"0xa","hash":"0xd0fc6b051f16468862c462c672532427efef537ea3737b25b10716949d0e2228","input":"0x","networkId":null,"nonce":"0x7c37","publicKey":"0xa9177f27b99a4ad938359d77e0dca4b64e7ce3722c835d8087d4eecb27c8a54d59e2917e6b31ec12e44b1064d102d35815f9707af9571f15e92d1b6fbcd207e9","r":"0x76933e91718154f18db2e993bc96e82abd9a0fac2bae284875341cbecafa837b","raw":"0xf86a827c370a83015f909404a6c6a293340fc3f2244d097b0cfd84d5317ba58844b1eec616322c1c801ba076933e91718154f18db2e993bc96e82abd9a0fac2bae284875341cbecafa837ba02f165c2c4b5f4b786a95e106c48bccc7e065647af5a1942025b6fbfafeabbbf6","s":"0x2f165c2c4b5f4b786a95e106c48bccc7e065647af5a1942025b6fbfafeabbbf6","standardV":"0x0","to":"0x04a6c6a293340fc3f2244d097b0cfd84d5317ba5","transactionIndex":"0x0","v":"0x1b","value":"0x44b1eec616322c1c"}],"transactionsRoot":"0x7ab22cfcf6db5d1628ac888c25e6bc49aba2faaa200fc880f800f1db1e8bd3cc","uncles":["0x319e0dc9a53711579c4ba88062c927a0045443cca57625903ef471d760506a94","0x0324272e484e509c3c9e9e75ad8b48c7d34556e6b269dd72331033fd5cdc1b2a"]},"id":1}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
â FKˇd?¶fç_¦·YA( "a<13>î2cUSyI

View File

@ -1,5 +0,0 @@
ù Úä<C39A>[G“ o½Uå,ÔrBÇõSsµ^²€^âä©) 7ó7ì€.Ytâç5_ñ¤ƒ+9¸ÃYzFv Ú<C2A0>b{¸ûî­³à¨äõ(Û1Y¶«-í¤©÷Ê<C3B7>*µ —f&HÕ•ÐЪK€UX<55> v R€%IÙJ/ ÌÇïä³A?Ö¦lŸ@éU¯wFI¨Ùý!-jZ9Ý»g Öͳ.+Ö5î¼”ݽ ±À<>fbŽfzìW  [‰ =É@æúpì­NÐIÓ¥º ¨ùÀRRVíI Ÿ ¸B'Ô<>öŠìÇr“šY¯©á¤«W<C2AB>{iÛ‰â›`DfŽ ý™ p¹JÎW䌿e¡j§pÆEùõﺇ»å<C2BB>
) áj|ΦtŠé é/Šï;=ÂH¥W¹¬N)i41?$÷üí_ B7<ô 0ÙMé
#¸óŒík|¸¸<5F> *(¢Z _‰ÒAÿBˆˆ˜fHLïb-å:Fç•ßÞÃ61Ÿ u— fE&ÈǕ΢{rE\IeqàEURÛÀhź1 Õ¾<C395>/Ú,XZ˜Ž¥ïÍ:˜Ž
†‚ iK7Å ÷°5.8ò١MQºêMÞáw tÈâ 5R3ÃÈ<C383>În I¿n<C2BF>ð¬¯Ðïømïî³VŽDÕ-"5Ï4
á\`4â²A€

View File

@ -1 +0,0 @@
β ¤ξJN…>λb$Ιkg<6B>Ί­$αΝ |Δι <0A>κdΉ¥

View File

@ -1 +0,0 @@
<EFBFBD>Q<EFBFBD><EFBFBD><EFBFBD><EFBFBD> <>ٍس<D98D>b<EFBFBD>R<EFBFBD>ّ<EFBFBD><18>f<><66>-<2D>t6<74>فKي<4B><D98A><EFBFBD><EFBFBD><EFBFBD> <EFBFBD>ا؟<D8A7>U<EFBFBD><<3C><>مZh<5A>ة <09>hmx[<5B>-­#k<>3حع<D8AD><D8B9><EFBFBD><EFBFBD><EFBFBD><EFBFBD>

View File

@ -1 +0,0 @@
{"jsonrpc":"2.0","result":{"author":"0x68795c4aa09d6f4ed3e5deddf8c2ad3049a601da","difficulty":"0xae387bd92cc","extraData":"0xd783010400844765746887676f312e352e31856c696e7578","gasLimit":"0x2fefd8","gasUsed":"0x0","hash":"0x319e0dc9a53711579c4ba88062c927a0045443cca57625903ef471d760506a94","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x68795c4aa09d6f4ed3e5deddf8c2ad3049a601da","mixHash":"0x2d4fd3be43fd50f5c0ba7f1c86c8f468b5c14f75b6143da927a2994383f26640","nonce":"0x0aaaa7fe9d7cf7f4","number":"0xf388f","parentHash":"0xac74216bbdb0ebec6612ad5f26301ab50e588aabe75a804bc2068f83980eefc6","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":["0xa02d4fd3be43fd50f5c0ba7f1c86c8f468b5c14f75b6143da927a2994383f26640","0x880aaaa7fe9d7cf7f4"],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":null,"stateRoot":"0xf9309492322aab44243f8c38240874b37dd0c563bac85f1a816941acc945b21d","timestamp":"0x56bf1097","totalDifficulty":"0x6299e9e3fdb6eb4d","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]},"id":1}

View File

@ -1 +0,0 @@
{"jsonrpc":"2.0","result":{"author":"0x68795c4aa09d6f4ed3e5deddf8c2ad3049a601da","difficulty":"0xae22b4c9b9a","extraData":"0xd783010400844765746887676f312e352e31856c696e7578","gasLimit":"0x2fefd8","gasUsed":"0xf618","hash":"0x0324272e484e509c3c9e9e75ad8b48c7d34556e6b269dd72331033fd5cdc1b2a","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x68795c4aa09d6f4ed3e5deddf8c2ad3049a601da","mixHash":"0x0f3bdea5170d6af74b70fcf0df81969f6bb1b740f4a6c78df1d354f172865594","nonce":"0x4c691de262b2b3d9","number":"0xf3890","parentHash":"0xcb9efe9bc3c59be7fb673576d661aff9ca75b1522f58fd38d03d3d49b32bddb3","receiptsRoot":"0x5cf73738487f67f1c0a1c2d1083ae014f38e1aab5eb26a8929a511c48b07ea03","sealFields":["0xa00f3bdea5170d6af74b70fcf0df81969f6bb1b740f4a6c78df1d354f172865594","0x884c691de262b2b3d9"],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":null,"stateRoot":"0x968e8d8d099572ac783f4511724ec646f59bb33f7395edf858f98b37c8c3b265","timestamp":"0x56bf10b1","totalDifficulty":"0x6299f4c6290386e7","transactions":[],"transactionsRoot":"0x9cea6a59a5df69111ead7406a431c764b2357120e5b61425388df62f87cbcbc3","uncles":[]},"id":1}

View File

@ -1,457 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"encoding/json"
"fmt"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/ethereum/go-ethereum/rlp"
)
const (
extension = "extension"
leaf = "leaf"
branch = "branch"
)
// TrieNode is the general abstraction for
//ethereum IPLD trie nodes.
type TrieNode struct {
// leaf, extension or branch
nodeKind string
// If leaf or extension: [0] is key, [1] is val.
// If branch: [0] - [16] are children.
elements []interface{}
// IPLD block information
cid cid.Cid
rawdata []byte
}
/*
OUTPUT
*/
type trieNodeLeafDecoder func([]interface{}) ([]interface{}, error)
// decodeTrieNode returns a TrieNode object from an IPLD block's
// cid and rawdata.
func decodeTrieNode(c cid.Cid, b []byte,
leafDecoder trieNodeLeafDecoder) (*TrieNode, error) {
var (
i, decoded, elements []interface{}
nodeKind string
err error
)
if err = rlp.DecodeBytes(b, &i); err != nil {
return nil, err
}
codec := c.Type()
switch len(i) {
case 2:
nodeKind, decoded, err = decodeCompactKey(i)
if err != nil {
return nil, err
}
if nodeKind == extension {
elements, err = parseTrieNodeExtension(decoded, codec)
if err != nil {
return nil, err
}
}
if nodeKind == leaf {
elements, err = leafDecoder(decoded)
if err != nil {
return nil, err
}
}
if nodeKind != extension && nodeKind != leaf {
return nil, fmt.Errorf("unexpected nodeKind returned from decoder")
}
case 17:
nodeKind = branch
elements, err = parseTrieNodeBranch(i, codec)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unknown trie node type")
}
return &TrieNode{
nodeKind: nodeKind,
elements: elements,
rawdata: b,
cid: c,
}, nil
}
// decodeCompactKey takes a compact key, and returns its nodeKind and value.
func decodeCompactKey(i []interface{}) (string, []interface{}, error) {
first := i[0].([]byte)
last := i[1].([]byte)
switch first[0] / 16 {
case '\x00':
return extension, []interface{}{
nibbleToByte(first)[2:],
last,
}, nil
case '\x01':
return extension, []interface{}{
nibbleToByte(first)[1:],
last,
}, nil
case '\x02':
return leaf, []interface{}{
nibbleToByte(first)[2:],
last,
}, nil
case '\x03':
return leaf, []interface{}{
nibbleToByte(first)[1:],
last,
}, nil
default:
return "", nil, fmt.Errorf("unknown hex prefix")
}
}
// parseTrieNodeExtension helper improves readability
func parseTrieNodeExtension(i []interface{}, codec uint64) ([]interface{}, error) {
return []interface{}{
i[0].([]byte),
keccak256ToCid(codec, i[1].([]byte)),
}, nil
}
// parseTrieNodeBranch helper improves readability
func parseTrieNodeBranch(i []interface{}, codec uint64) ([]interface{}, error) {
var out []interface{}
for i, vi := range i {
v, ok := vi.([]byte)
// Sometimes this throws "panic: interface conversion: interface {} is []interface {}, not []uint8"
// Figure out why, and if it is okay to continue
if !ok {
return nil, fmt.Errorf("unable to decode branch node entry into []byte at position: %d value: %+v", i, vi)
}
switch len(v) {
case 0:
out = append(out, nil)
case 32:
out = append(out, keccak256ToCid(codec, v))
default:
return nil, fmt.Errorf("unrecognized object: %v", v)
}
}
return out, nil
}
/*
Node INTERFACE
*/
// Resolve resolves a path through this node, stopping at any link boundary
// and returning the object found as well as the remaining path to traverse
func (t *TrieNode) Resolve(p []string) (interface{}, []string, error) {
switch t.nodeKind {
case extension:
return t.resolveTrieNodeExtension(p)
case leaf:
return t.resolveTrieNodeLeaf(p)
case branch:
return t.resolveTrieNodeBranch(p)
default:
return nil, nil, fmt.Errorf("nodeKind case not implemented")
}
}
// Tree lists all paths within the object under 'path', and up to the given depth.
// To list the entire object (similar to `find .`) pass "" and -1
func (t *TrieNode) Tree(p string, depth int) []string {
if p != "" || depth == 0 {
return nil
}
var out []string
switch t.nodeKind {
case extension:
var val string
for _, e := range t.elements[0].([]byte) {
val += fmt.Sprintf("%x", e)
}
return []string{val}
case branch:
for i, elem := range t.elements {
if _, ok := elem.(cid.Cid); ok {
out = append(out, fmt.Sprintf("%x", i))
}
}
return out
default:
return nil
}
}
// ResolveLink is a helper function that calls resolve and asserts the
// output is a link
func (t *TrieNode) ResolveLink(p []string) (*node.Link, []string, error) {
obj, rest, err := t.Resolve(p)
if err != nil {
return nil, nil, err
}
lnk, ok := obj.(*node.Link)
if !ok {
return nil, nil, fmt.Errorf("was not a link")
}
return lnk, rest, nil
}
// Copy will go away. It is here to comply with the interface.
func (t *TrieNode) Copy() node.Node {
panic("implement me")
}
// Links is a helper function that returns all links within this object
func (t *TrieNode) Links() []*node.Link {
var out []*node.Link
for _, i := range t.elements {
c, ok := i.(cid.Cid)
if ok {
out = append(out, &node.Link{Cid: c})
}
}
return out
}
// Stat will go away. It is here to comply with the interface.
func (t *TrieNode) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
// Size will go away. It is here to comply with the interface.
func (t *TrieNode) Size() (uint64, error) {
return 0, nil
}
/*
TrieNode functions
*/
// MarshalJSON processes the transaction trie into readable JSON format.
func (t *TrieNode) MarshalJSON() ([]byte, error) {
var out map[string]interface{}
switch t.nodeKind {
case extension:
fallthrough
case leaf:
var hexPrefix string
for _, e := range t.elements[0].([]byte) {
hexPrefix += fmt.Sprintf("%x", e)
}
// if we got a byte we need to do this casting otherwise
// it will be marshaled to a base64 encoded value
if _, ok := t.elements[1].([]byte); ok {
var hexVal string
for _, e := range t.elements[1].([]byte) {
hexVal += fmt.Sprintf("%x", e)
}
t.elements[1] = hexVal
}
out = map[string]interface{}{
"type": t.nodeKind,
hexPrefix: t.elements[1],
}
case branch:
out = map[string]interface{}{
"type": branch,
"0": t.elements[0],
"1": t.elements[1],
"2": t.elements[2],
"3": t.elements[3],
"4": t.elements[4],
"5": t.elements[5],
"6": t.elements[6],
"7": t.elements[7],
"8": t.elements[8],
"9": t.elements[9],
"a": t.elements[10],
"b": t.elements[11],
"c": t.elements[12],
"d": t.elements[13],
"e": t.elements[14],
"f": t.elements[15],
}
default:
return nil, fmt.Errorf("nodeKind %s not supported", t.nodeKind)
}
return json.Marshal(out)
}
// nibbleToByte expands the nibbles of a byte slice into their own bytes.
func nibbleToByte(k []byte) []byte {
var out []byte
for _, b := range k {
out = append(out, b/16)
out = append(out, b%16)
}
return out
}
// Resolve reading conveniences
func (t *TrieNode) resolveTrieNodeExtension(p []string) (interface{}, []string, error) {
nibbles := t.elements[0].([]byte)
idx, rest := shiftFromPath(p, len(nibbles))
if len(idx) < len(nibbles) {
return nil, nil, fmt.Errorf("not enough nibbles to traverse this extension")
}
for _, i := range idx {
if getHexIndex(string(i)) == -1 {
return nil, nil, fmt.Errorf("invalid path element")
}
}
for i, n := range nibbles {
if string(idx[i]) != fmt.Sprintf("%x", n) {
return nil, nil, fmt.Errorf("no such link in this extension")
}
}
return &node.Link{Cid: t.elements[1].(cid.Cid)}, rest, nil
}
func (t *TrieNode) resolveTrieNodeLeaf(p []string) (interface{}, []string, error) {
nibbles := t.elements[0].([]byte)
if len(nibbles) != 0 {
idx, rest := shiftFromPath(p, len(nibbles))
if len(idx) < len(nibbles) {
return nil, nil, fmt.Errorf("not enough nibbles to traverse this leaf")
}
for _, i := range idx {
if getHexIndex(string(i)) == -1 {
return nil, nil, fmt.Errorf("invalid path element")
}
}
for i, n := range nibbles {
if string(idx[i]) != fmt.Sprintf("%x", n) {
return nil, nil, fmt.Errorf("no such link in this extension")
}
}
p = rest
}
link, ok := t.elements[1].(node.Node)
if !ok {
return nil, nil, fmt.Errorf("leaf children is not an IPLD node")
}
return link.Resolve(p)
}
func (t *TrieNode) resolveTrieNodeBranch(p []string) (interface{}, []string, error) {
idx, rest := shiftFromPath(p, 1)
hidx := getHexIndex(idx)
if hidx == -1 {
return nil, nil, fmt.Errorf("incorrect path")
}
child := t.elements[hidx]
if child != nil {
return &node.Link{Cid: child.(cid.Cid)}, rest, nil
}
return nil, nil, fmt.Errorf("no such link in this branch")
}
// shiftFromPath extracts from a given path (as a slice of strings)
// the given number of elements as a single string, returning whatever
// it has not taken.
//
// Examples:
// ["0", "a", "something"] and 1 -> "0" and ["a", "something"]
// ["ab", "c", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
// ["abc", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
func shiftFromPath(p []string, i int) (string, []string) {
var (
out string
rest []string
)
for _, pe := range p {
re := ""
for _, c := range pe {
if len(out) < i {
out += string(c)
} else {
re += string(c)
}
}
if len(out) == i && re != "" {
rest = append(rest, re)
}
}
return out, rest
}
// getHexIndex returns to you the integer 0 - 15 equivalent to your
// string character if applicable, or -1 otherwise.
func getHexIndex(s string) int {
if len(s) != 1 {
return -1
}
c := s[0]
switch {
case '0' <= c && c <= '9':
return int(c - '0')
case 'a' <= c && c <= 'f':
return int(c - 'a' + 10)
}
return -1
}

View File

@ -22,13 +22,15 @@ import (
"crypto/rand"
"math/big"
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/test_helpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/trie"
@ -37,8 +39,7 @@ import (
// Test variables
var (
// block data
// TODO: Update this to `MainnetChainConfig` when `LondonBlock` is added
TestConfig = params.RopstenChainConfig
TestConfig = params.MainnetChainConfig
BlockNumber = TestConfig.LondonBlock
// canonical block at London height
@ -95,9 +96,9 @@ var (
mockTopic21 = common.HexToHash("0x05")
mockTopic22 = common.HexToHash("0x07")
ExpectedPostStatus uint64 = 1
ExpectedPostState1 = common.Bytes2Hex(common.HexToHash("0x1").Bytes())
ExpectedPostState2 = common.Bytes2Hex(common.HexToHash("0x2").Bytes())
ExpectedPostState3 = common.Bytes2Hex(common.HexToHash("0x3").Bytes())
ExpectedPostState1 = common.HexToHash("0x1").String()
ExpectedPostState2 = common.HexToHash("0x2").String()
ExpectedPostState3 = common.HexToHash("0x3").String()
MockLog1 = &types.Log{
Address: Address,
Topics: []common.Hash{mockTopic11, mockTopic12},
@ -137,17 +138,6 @@ var (
Address: AnotherAddress,
StorageKeys: []common.Hash{common.BytesToHash(StorageLeafKey), common.BytesToHash(MockStorageLeafKey)},
}
AccessListEntry1Model = models.AccessListElementModel{
BlockNumber: BlockNumber.String(),
Index: 0,
Address: Address.Hex(),
}
AccessListEntry2Model = models.AccessListElementModel{
BlockNumber: BlockNumber.String(),
Index: 1,
Address: AnotherAddress.Hex(),
StorageKeys: []string{common.BytesToHash(StorageLeafKey).Hex(), common.BytesToHash(MockStorageLeafKey).Hex()},
}
// statediff data
storageLocation = common.HexToHash("0")
@ -160,22 +150,26 @@ var (
StoragePartialPath,
StorageValue,
})
StorageLeafNodeCID = ipld2.Keccak256ToCid(ipld2.MEthStorageTrie, crypto.Keccak256(StorageLeafNode)).String()
nonce1 = uint64(1)
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
ContractCodeHash = common.HexToHash("0x753f98a8d4328b15636e46f66f2cb4bc860100aa17967cc145fcd17d1d4710ea")
ContractLeafKey = test_helpers.AddressToLeafKey(ContractAddress)
ContractAccount, _ = rlp.EncodeToBytes(&types.StateAccount{
ContractAccount = &types.StateAccount{
Nonce: nonce1,
Balance: big.NewInt(0),
CodeHash: ContractCodeHash.Bytes(),
Root: common.HexToHash(ContractRoot),
})
}
ContractAccountRLP, _ = rlp.EncodeToBytes(ContractAccount)
ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45")
ContractLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
ContractPartialPath,
ContractAccount,
})
ContractLeafNodeCID = ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(ContractLeafNode)).String()
Contract2LeafKey = test_helpers.AddressToLeafKey(ContractAddress2)
storage2Location = common.HexToHash("2")
@ -188,74 +182,108 @@ var (
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
AccountLeafKey = test_helpers.Account2LeafKey
RemovedLeafKey = test_helpers.Account1LeafKey
Account, _ = rlp.EncodeToBytes(&types.StateAccount{
Account = &types.StateAccount{
Nonce: nonce0,
Balance: big.NewInt(1000),
CodeHash: AccountCodeHash.Bytes(),
Root: common.HexToHash(AccountRoot),
})
}
AccountRLP, _ = rlp.EncodeToBytes(Account)
AccountPartialPath = common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45")
AccountLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
AccountPartialPath,
Account,
})
AccountLeafNodeCID = ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(AccountLeafNode)).String()
StateDiffs = []sdtypes.StateNode{
StateDiffs = []sdtypes.StateLeafNode{
{
Path: []byte{'\x06'},
NodeType: sdtypes.Leaf,
AccountWrapper: sdtypes.AccountWrapper{
Account: ContractAccount,
LeafKey: ContractLeafKey,
NodeValue: ContractLeafNode,
StorageNodes: []sdtypes.StorageNode{
CID: ContractLeafNodeCID,
},
Removed: false,
StorageDiff: []sdtypes.StorageLeafNode{
{
Path: []byte{},
NodeType: sdtypes.Leaf,
Removed: false,
LeafKey: StorageLeafKey,
NodeValue: StorageLeafNode,
Value: StorageValue,
CID: StorageLeafNodeCID,
},
{
Path: []byte{'\x03'},
NodeType: sdtypes.Removed,
Removed: true,
LeafKey: RemovedLeafKey,
NodeValue: []byte{},
CID: shared.RemovedNodeStorageCID,
Value: []byte{},
},
},
},
{
Path: []byte{'\x0c'},
NodeType: sdtypes.Leaf,
AccountWrapper: sdtypes.AccountWrapper{
Account: Account,
LeafKey: AccountLeafKey,
NodeValue: AccountLeafNode,
StorageNodes: []sdtypes.StorageNode{},
CID: AccountLeafNodeCID,
},
Removed: false,
StorageDiff: []sdtypes.StorageLeafNode{},
},
{
Path: []byte{'\x02'},
NodeType: sdtypes.Removed,
AccountWrapper: sdtypes.AccountWrapper{
Account: nil,
LeafKey: RemovedLeafKey,
NodeValue: []byte{},
CID: shared.RemovedNodeStateCID,
},
Removed: true,
StorageDiff: []sdtypes.StorageLeafNode{},
},
{
Path: []byte{'\x07'},
NodeType: sdtypes.Removed,
AccountWrapper: sdtypes.AccountWrapper{
Account: nil,
LeafKey: Contract2LeafKey,
NodeValue: []byte{},
StorageNodes: []sdtypes.StorageNode{
CID: shared.RemovedNodeStateCID,
},
Removed: true,
StorageDiff: []sdtypes.StorageLeafNode{
{
Path: []byte{'\x0e'},
NodeType: sdtypes.Removed,
Removed: true,
CID: shared.RemovedNodeStorageCID,
LeafKey: Storage2LeafKey,
NodeValue: []byte{},
Value: []byte{},
},
{
Path: []byte{'\x0f'},
NodeType: sdtypes.Removed,
Removed: true,
CID: shared.RemovedNodeStorageCID,
LeafKey: Storage3LeafKey,
NodeValue: []byte{},
Value: []byte{},
},
},
},
}
IPLDs = []sdtypes.IPLD{
{
CID: ContractLeafNodeCID,
Content: ContractLeafNode,
},
{
CID: StorageLeafNodeCID,
Content: StorageLeafNode,
},
{
CID: shared.RemovedNodeStorageCID,
Content: []byte{},
},
{
CID: AccountLeafNodeCID,
Content: AccountLeafNode,
},
{
CID: shared.RemovedNodeStateCID,
Content: []byte{},
},
}
// Mock data for testing watched addresses methods
Contract1Address = "0x5d663F5269090bD2A7DC2390c911dF6083D7b28F"
Contract2Address = "0x6Eb7e5C66DB8af2E96159AC440cbc8CDB7fbD26B"
@ -296,7 +324,7 @@ type LegacyData struct {
ContractLeafNode []byte
AccountRoot string
AccountLeafNode []byte
StateDiffs []sdtypes.StateNode
StateDiffs []sdtypes.StateLeafNode
}
func NewLegacyData(config *params.ChainConfig) *LegacyData {
@ -336,7 +364,7 @@ func NewLegacyData(config *params.ChainConfig) *LegacyData {
MockStorageLeafKey: MockStorageLeafKey,
StorageLeafNode: StorageLeafNode,
ContractLeafKey: ContractLeafKey,
ContractAccount: ContractAccount,
ContractAccount: ContractAccountRLP,
ContractPartialPath: ContractPartialPath,
ContractLeafNode: ContractLeafNode,
AccountRoot: AccountRoot,

View File

@ -16,111 +16,9 @@
package models
import "github.com/lib/pq"
// IPLDBatch holds the arguments for a batch insert of IPLD data
type IPLDBatch struct {
BlockNumbers []string
Keys []string
Values [][]byte
}
// UncleBatch holds the arguments for a batch insert of uncle data
type UncleBatch struct {
BlockNumbers []string
HeaderID []string
BlockHashes []string
ParentHashes []string
CIDs []string
MhKeys []string
Rewards []string
}
// TxBatch holds the arguments for a batch insert of tx data
type TxBatch struct {
BlockNumbers []string
HeaderIDs []string
Indexes []int64
TxHashes []string
CIDs []string
MhKeys []string
Dsts []string
Srcs []string
Datas [][]byte
Types []uint8
}
// AccessListBatch holds the arguments for a batch insert of access list data
type AccessListBatch struct {
BlockNumbers []string
Indexes []int64
TxIDs []string
Addresses []string
StorageKeysSets []pq.StringArray
}
// ReceiptBatch holds the arguments for a batch insert of receipt data
type ReceiptBatch struct {
BlockNumbers []string
HeaderIDs []string
TxIDs []string
LeafCIDs []string
LeafMhKeys []string
PostStatuses []uint64
PostStates []string
Contracts []string
ContractHashes []string
LogRoots []string
}
// LogBatch holds the arguments for a batch insert of log data
type LogBatch struct {
BlockNumbers []string
HeaderIDs []string
LeafCIDs []string
LeafMhKeys []string
ReceiptIDs []string
Addresses []string
Indexes []int64
Datas [][]byte
Topic0s []string
Topic1s []string
Topic2s []string
Topic3s []string
}
// StateBatch holds the arguments for a batch insert of state data
type StateBatch struct {
BlockNumbers []string
HeaderIDs []string
Paths [][]byte
StateKeys []string
NodeTypes []int
CIDs []string
MhKeys []string
Diff bool
}
// AccountBatch holds the arguments for a batch insert of account data
type AccountBatch struct {
BlockNumbers []string
HeaderIDs []string
StatePaths [][]byte
Balances []string
Nonces []uint64
CodeHashes [][]byte
StorageRoots []string
}
// StorageBatch holds the arguments for a batch insert of storage data
type StorageBatch struct {
BlockNumbers []string
HeaderIDs []string
StatePaths [][]string
Paths [][]byte
StorageKeys []string
NodeTypes []int
CIDs []string
MhKeys []string
Diff bool
}

View File

@ -18,7 +18,7 @@ package models
import "github.com/lib/pq"
// IPLDModel is the db model for public.blocks
// IPLDModel is the db model for ipld.blocks
type IPLDModel struct {
BlockNumber string `db:"block_number"`
Key string `db:"key"`
@ -31,17 +31,15 @@ type HeaderModel struct {
BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
TotalDifficulty string `db:"td"`
NodeID string `db:"node_id"`
NodeIDs pq.StringArray `db:"node_ids"`
Reward string `db:"reward"`
StateRoot string `db:"state_root"`
UncleRoot string `db:"uncle_root"`
UnclesHash string `db:"uncles_hash"`
TxRoot string `db:"tx_root"`
RctRoot string `db:"receipt_root"`
Bloom []byte `db:"bloom"`
Timestamp uint64 `db:"timestamp"`
TimesValidated int64 `db:"times_validated"`
Coinbase string `db:"coinbase"`
}
@ -52,8 +50,8 @@ type UncleModel struct {
BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
Reward string `db:"reward"`
Index int64 `db:"index"`
}
// TxModel is the db model for eth.transaction_cids
@ -63,85 +61,47 @@ type TxModel struct {
Index int64 `db:"index"`
TxHash string `db:"tx_hash"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
Dst string `db:"dst"`
Src string `db:"src"`
Data []byte `db:"tx_data"`
Type uint8 `db:"tx_type"`
Value string `db:"value"`
}
// AccessListElementModel is the db model for eth.access_list_entry
type AccessListElementModel struct {
BlockNumber string `db:"block_number"`
Index int64 `db:"index"`
TxID string `db:"tx_id"`
Address string `db:"address"`
StorageKeys pq.StringArray `db:"storage_keys"`
}
// ReceiptModel is the db model for eth.receipt_cids
type ReceiptModel struct {
BlockNumber string `db:"block_number"`
HeaderID string `db:"header_id"`
TxID string `db:"tx_id"`
LeafCID string `db:"leaf_cid"`
LeafMhKey string `db:"leaf_mh_key"`
CID string `db:"cid"`
PostStatus uint64 `db:"post_status"`
PostState string `db:"post_state"`
Contract string `db:"contract"`
ContractHash string `db:"contract_hash"`
LogRoot string `db:"log_root"`
}
// StateNodeModel is the db model for eth.state_cids
type StateNodeModel struct {
BlockNumber string `db:"block_number"`
HeaderID string `db:"header_id"`
Path []byte `db:"state_path"`
StateKey string `db:"state_leaf_key"`
NodeType int `db:"node_type"`
Removed bool `db:"removed"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
Diff bool `db:"diff"`
Balance string `db:"balance"`
Nonce uint64 `db:"nonce"`
CodeHash string `db:"code_hash"`
StorageRoot string `db:"storage_root"`
}
// StorageNodeModel is the db model for eth.storage_cids
type StorageNodeModel struct {
BlockNumber string `db:"block_number"`
HeaderID string `db:"header_id"`
StatePath []byte `db:"state_path"`
Path []byte `db:"storage_path"`
StorageKey string `db:"storage_leaf_key"`
NodeType int `db:"node_type"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
Diff bool `db:"diff"`
}
// StorageNodeWithStateKeyModel is a db model for eth.storage_cids + eth.state_cids.state_key
type StorageNodeWithStateKeyModel struct {
BlockNumber string `db:"block_number"`
HeaderID string `db:"header_id"`
StatePath []byte `db:"state_path"`
Path []byte `db:"storage_path"`
StateKey string `db:"state_leaf_key"`
StorageKey string `db:"storage_leaf_key"`
NodeType int `db:"node_type"`
Removed bool `db:"removed"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
Diff bool `db:"diff"`
}
// StateAccountModel is a db model for an eth state account (decoded value of state leaf node)
type StateAccountModel struct {
BlockNumber string `db:"block_number"`
HeaderID string `db:"header_id"`
StatePath []byte `db:"state_path"`
Balance string `db:"balance"`
Nonce uint64 `db:"nonce"`
CodeHash []byte `db:"code_hash"`
StorageRoot string `db:"storage_root"`
Value []byte `db:"val"`
}
// LogsModel is the db model for eth.logs
@ -149,21 +109,11 @@ type LogsModel struct {
BlockNumber string `db:"block_number"`
HeaderID string `db:"header_id"`
ReceiptID string `db:"rct_id"`
LeafCID string `db:"leaf_cid"`
LeafMhKey string `db:"leaf_mh_key"`
CID string `db:"cid"`
Address string `db:"address"`
Index int64 `db:"index"`
Data []byte `db:"log_data"`
Topic0 string `db:"topic0"`
Topic1 string `db:"topic1"`
Topic2 string `db:"topic2"`
Topic3 string `db:"topic3"`
}
// KnownGaps is the data structure for eth_meta.known_gaps
type KnownGapsModel struct {
StartingBlockNumber string `db:"starting_block_number"`
EndingBlockNumber string `db:"ending_block_number"`
CheckedOut bool `db:"checked_out"`
ProcessingKey int64 `db:"processing_key"`
}

View File

@ -19,5 +19,4 @@ package shared
const (
RemovedNodeStorageCID = "bagmacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya"
RemovedNodeStateCID = "baglacgzayxjemamg64rtzet6pwznzrydydsqbnstzkbcoo337lmaixmfurya"
RemovedNodeMhKey = "/blocks/DMQMLUSGAGDPOIZ4SJ7H3MW4Y4B4BZIAWZJ4VARHHN57VWAELWC2I4A"
)

View File

@ -18,10 +18,6 @@ package shared
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
"github.com/multiformats/go-multihash"
)
// HandleZeroAddrPointer will return an empty string for a nil address pointer
@ -39,19 +35,3 @@ func HandleZeroAddr(to common.Address) string {
}
return to.Hex()
}
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
func MultihashKeyFromCID(c cid.Cid) string {
dbKey := dshelp.MultihashToDsKey(c.Hash())
return blockstore.BlockPrefix.String() + dbKey.String()
}
// MultihashKeyFromKeccak256 converts keccak256 hash bytes into a blockstore-prefixed multihash db key string
func MultihashKeyFromKeccak256(hash common.Hash) (string, error) {
mh, err := multihash.Encode(hash.Bytes(), multihash.KECCAK_256)
if err != nil {
return "", err
}
dbKey := dshelp.MultihashToDsKey(mh)
return blockstore.BlockPrefix.String() + dbKey.String(), nil
}

View File

@ -0,0 +1,173 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package schema
var TableIPLDBlock = Table{
Name: `ipld.blocks`,
Columns: []Column{
{Name: "block_number", Type: Dbigint},
{Name: "key", Type: Dtext},
{Name: "data", Type: Dbytea},
},
}
var TableNodeInfo = Table{
Name: `public.nodes`,
Columns: []Column{
{Name: "genesis_block", Type: Dvarchar},
{Name: "network_id", Type: Dvarchar},
{Name: "node_id", Type: Dvarchar},
{Name: "client_name", Type: Dvarchar},
{Name: "chain_id", Type: Dinteger},
},
}
var TableHeader = Table{
Name: "eth.header_cids",
Columns: []Column{
{Name: "block_number", Type: Dbigint},
{Name: "block_hash", Type: Dvarchar},
{Name: "parent_hash", Type: Dvarchar},
{Name: "cid", Type: Dtext},
{Name: "td", Type: Dnumeric},
{Name: "node_ids", Type: Dvarchar, Array: true},
{Name: "reward", Type: Dnumeric},
{Name: "state_root", Type: Dvarchar},
{Name: "tx_root", Type: Dvarchar},
{Name: "receipt_root", Type: Dvarchar},
{Name: "uncles_hash", Type: Dvarchar},
{Name: "bloom", Type: Dbytea},
{Name: "timestamp", Type: Dnumeric},
{Name: "coinbase", Type: Dvarchar},
},
UpsertClause: OnConflict("block_number", "block_hash").Set(
"parent_hash",
"cid",
"td",
"node_ids",
"reward",
"state_root",
"tx_root",
"receipt_root",
"uncles_hash",
"bloom",
"timestamp",
"coinbase",
)}
var TableStateNode = Table{
Name: "eth.state_cids",
Columns: []Column{
{Name: "block_number", Type: Dbigint},
{Name: "header_id", Type: Dvarchar},
{Name: "state_leaf_key", Type: Dvarchar},
{Name: "cid", Type: Dtext},
{Name: "diff", Type: Dboolean},
{Name: "balance", Type: Dnumeric},
{Name: "nonce", Type: Dbigint},
{Name: "code_hash", Type: Dvarchar},
{Name: "storage_root", Type: Dvarchar},
{Name: "removed", Type: Dboolean},
},
UpsertClause: OnConflict("block_number", "header_id", "state_leaf_key"),
}
var TableStorageNode = Table{
Name: "eth.storage_cids",
Columns: []Column{
{Name: "block_number", Type: Dbigint},
{Name: "header_id", Type: Dvarchar},
{Name: "state_leaf_key", Type: Dvarchar},
{Name: "storage_leaf_key", Type: Dvarchar},
{Name: "cid", Type: Dtext},
{Name: "diff", Type: Dboolean},
{Name: "val", Type: Dbytea},
{Name: "removed", Type: Dboolean},
},
UpsertClause: OnConflict("block_number", "header_id", "state_leaf_key", "storage_leaf_key"),
}
var TableUncle = Table{
Name: "eth.uncle_cids",
Columns: []Column{
{Name: "block_number", Type: Dbigint},
{Name: "block_hash", Type: Dvarchar},
{Name: "header_id", Type: Dvarchar},
{Name: "parent_hash", Type: Dvarchar},
{Name: "cid", Type: Dtext},
{Name: "reward", Type: Dnumeric},
{Name: "index", Type: Dinteger},
},
UpsertClause: OnConflict("block_number", "block_hash"),
}
var TableTransaction = Table{
Name: "eth.transaction_cids",
Columns: []Column{
{Name: "block_number", Type: Dbigint},
{Name: "header_id", Type: Dvarchar},
{Name: "tx_hash", Type: Dvarchar},
{Name: "cid", Type: Dtext},
{Name: "dst", Type: Dvarchar},
{Name: "src", Type: Dvarchar},
{Name: "index", Type: Dinteger},
{Name: "tx_type", Type: Dinteger},
{Name: "value", Type: Dnumeric},
},
UpsertClause: OnConflict("block_number", "header_id", "tx_hash"),
}
var TableReceipt = Table{
Name: "eth.receipt_cids",
Columns: []Column{
{Name: "block_number", Type: Dbigint},
{Name: "header_id", Type: Dvarchar},
{Name: "tx_id", Type: Dvarchar},
{Name: "cid", Type: Dtext},
{Name: "contract", Type: Dvarchar},
{Name: "post_state", Type: Dvarchar},
{Name: "post_status", Type: Dinteger},
},
UpsertClause: OnConflict("block_number", "header_id", "tx_id"),
}
var TableLog = Table{
Name: "eth.log_cids",
Columns: []Column{
{Name: "block_number", Type: Dbigint},
{Name: "header_id", Type: Dvarchar},
{Name: "cid", Type: Dtext},
{Name: "rct_id", Type: Dvarchar},
{Name: "address", Type: Dvarchar},
{Name: "index", Type: Dinteger},
{Name: "topic0", Type: Dvarchar},
{Name: "topic1", Type: Dvarchar},
{Name: "topic2", Type: Dvarchar},
{Name: "topic3", Type: Dvarchar},
},
UpsertClause: OnConflict("block_number", "header_id", "rct_id", "index"),
}
var TableWatchedAddresses = Table{
Name: "eth_meta.watched_addresses",
Columns: []Column{
{Name: "address", Type: Dvarchar},
{Name: "created_at", Type: Dbigint},
{Name: "watched_at", Type: Dbigint},
{Name: "last_filled_at", Type: Dbigint},
},
}

View File

@ -0,0 +1,147 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package schema
import (
"fmt"
"strings"
"github.com/thoas/go-funk"
)
type colType int
const (
Dinteger colType = iota
Dboolean
Dbigint
Dnumeric
Dbytea
Dvarchar
Dtext
)
type ConflictClause struct {
Target []string
Update []string
}
type Column struct {
Name string
Type colType
Array bool
}
type Table struct {
Name string
Columns []Column
UpsertClause ConflictClause
}
type colfmt = func(interface{}) string
func (tbl *Table) ToCsvRow(args ...interface{}) []string {
var row []string
for i, col := range tbl.Columns {
value := col.Type.formatter()(args[i])
if col.Array {
valueList := funk.Map(args[i], col.Type.formatter()).([]string)
value = fmt.Sprintf("{%s}", strings.Join(valueList, ","))
}
row = append(row, value)
}
return row
}
func (tbl *Table) VarcharColumns() []string {
columns := funk.Filter(tbl.Columns, func(col Column) bool {
return col.Type == Dvarchar
}).([]Column)
columnNames := funk.Map(columns, func(col Column) string {
return col.Name
}).([]string)
return columnNames
}
func OnConflict(target ...string) ConflictClause {
return ConflictClause{Target: target}
}
func (c ConflictClause) Set(fields ...string) ConflictClause {
c.Update = fields
return c
}
// ToInsertStatement returns a Postgres-compatible SQL insert statement for the table
// using positional placeholders
func (tbl *Table) ToInsertStatement(upsert bool) string {
var colnames, placeholders []string
for i, col := range tbl.Columns {
colnames = append(colnames, col.Name)
placeholders = append(placeholders, fmt.Sprintf("$%d", i+1))
}
suffix := fmt.Sprintf("ON CONFLICT (%s)", strings.Join(tbl.UpsertClause.Target, ", "))
if upsert && len(tbl.UpsertClause.Update) != 0 {
var update_placeholders []string
for _, name := range tbl.UpsertClause.Update {
i := funk.IndexOf(tbl.Columns, func(col Column) bool { return col.Name == name })
update_placeholders = append(update_placeholders, fmt.Sprintf("$%d", i+1))
}
suffix += fmt.Sprintf(
" DO UPDATE SET (%s) = (%s)",
strings.Join(tbl.UpsertClause.Update, ", "), strings.Join(update_placeholders, ", "),
)
} else {
suffix += " DO NOTHING"
}
return fmt.Sprintf(
"INSERT INTO %s (%s) VALUES (%s) %s",
tbl.Name, strings.Join(colnames, ", "), strings.Join(placeholders, ", "), suffix,
)
}
func sprintf(f string) colfmt {
return func(x interface{}) string { return fmt.Sprintf(f, x) }
}
func (typ colType) formatter() colfmt {
switch typ {
case Dinteger:
return sprintf("%d")
case Dboolean:
return func(x interface{}) string {
if x.(bool) {
return "t"
}
return "f"
}
case Dbigint:
return sprintf("%s")
case Dnumeric:
return sprintf("%s")
case Dbytea:
return sprintf(`\x%x`)
case Dvarchar:
return sprintf("%s")
case Dtext:
return sprintf("%s")
}
panic("unreachable")
}

View File

@ -0,0 +1,53 @@
package schema_test
import (
"testing"
"github.com/stretchr/testify/require"
. "github.com/ethereum/go-ethereum/statediff/indexer/shared/schema"
)
var testHeaderTable = Table{
Name: "eth.header_cids",
Columns: []Column{
{Name: "block_number", Type: Dbigint},
{Name: "block_hash", Type: Dvarchar},
{Name: "parent_hash", Type: Dvarchar},
{Name: "cid", Type: Dtext},
{Name: "td", Type: Dnumeric},
{Name: "node_id", Type: Dvarchar},
{Name: "reward", Type: Dnumeric},
{Name: "state_root", Type: Dvarchar},
{Name: "tx_root", Type: Dvarchar},
{Name: "receipt_root", Type: Dvarchar},
{Name: "uncle_root", Type: Dvarchar},
{Name: "bloom", Type: Dbytea},
{Name: "timestamp", Type: Dnumeric},
{Name: "mh_key", Type: Dtext},
{Name: "times_validated", Type: Dinteger},
{Name: "coinbase", Type: Dvarchar},
},
UpsertClause: OnConflict("block_hash", "block_number").Set(
"parent_hash",
"cid",
"td",
"node_id",
"reward",
"state_root",
"tx_root",
"receipt_root",
"uncle_root",
"bloom",
"timestamp",
"mh_key",
"times_validated",
"coinbase",
)}
func TestTable(t *testing.T) {
headerUpsert := `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) ON CONFLICT (block_hash, block_number) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)`
headerNoUpsert := `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) ON CONFLICT (block_hash, block_number) DO NOTHING`
require.Equal(t, headerNoUpsert, testHeaderTable.ToInsertStatement(false))
require.Equal(t, headerUpsert, testHeaderTable.ToInsertStatement(true))
}

View File

@ -17,14 +17,13 @@
package test
import (
"bytes"
"context"
"sort"
"testing"
"github.com/stretchr/testify/assert"
"github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
@ -58,6 +57,10 @@ func SetupTestData(t *testing.T, ind interfaces.StateDiffIndexer) {
err = ind.PushStateNode(tx, node, mockBlock.Hash().String())
require.NoError(t, err)
}
for _, node := range mocks.IPLDs {
err = ind.PushIPLD(tx, node)
require.NoError(t, err)
}
if batchTx, ok := tx.(*sql.BatchTx); ok {
require.Equal(t, mocks.BlockNumber.String(), batchTx.BlockNumber)
@ -96,10 +99,8 @@ func TestPublishAndIndexHeaderIPLDs(t *testing.T, db sql.Database) {
if err != nil {
t.Fatal(err)
}
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
err = db.Get(context.Background(), &data, ipfsPgGet, dc.String(), mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
@ -132,10 +133,8 @@ func TestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
if err != nil {
t.Fatal(err)
}
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
err = db.Get(context.Background(), &data, ipfsPgGet, dc.String(), mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
@ -193,30 +192,6 @@ func TestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
if txRes.Value != transactions[3].Value().String() {
t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value)
}
accessListElementModels := make([]models.AccessListElementModel, 0)
pgStr = "SELECT cast(access_list_elements.block_number AS TEXT), access_list_elements.index, access_list_elements.tx_id, " +
"access_list_elements.address, access_list_elements.storage_keys FROM eth.access_list_elements " +
"INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC"
err = db.Select(context.Background(), &accessListElementModels, pgStr, c)
if err != nil {
t.Fatal(err)
}
if len(accessListElementModels) != 2 {
t.Fatalf("expected two access list entries, got %d", len(accessListElementModels))
}
model1 := models.AccessListElementModel{
BlockNumber: mocks.BlockNumber.String(),
Index: accessListElementModels[0].Index,
Address: accessListElementModels[0].Address,
}
model2 := models.AccessListElementModel{
BlockNumber: mocks.BlockNumber.String(),
Index: accessListElementModels[1].Index,
Address: accessListElementModels[1].Address,
StorageKeys: accessListElementModels[1].StorageKeys,
}
require.Equal(t, mocks.AccessListEntry1Model, model1)
require.Equal(t, mocks.AccessListEntry2Model, model2)
case trx5CID.String():
require.Equal(t, tx5, data)
txRes := new(txResult)
@ -236,15 +211,15 @@ func TestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
func TestPublishAndIndexLogIPLDs(t *testing.T, db sql.Database) {
rcts := make([]string, 0)
rctsPgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
rctsPgStr := `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
AND transaction_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1
ORDER BY transaction_cids.index`
logsPgStr := `SELECT log_cids.index, log_cids.address, log_cids.topic0, log_cids.topic1, data FROM eth.log_cids
logsPgStr := `SELECT log_cids.index, log_cids.address, blocks.data, log_cids.topic0, log_cids.topic1 FROM eth.log_cids
INNER JOIN eth.receipt_cids ON (log_cids.rct_id = receipt_cids.tx_id)
INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key)
WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC`
INNER JOIN ipld.blocks ON (log_cids.cid = blocks.key)
WHERE receipt_cids.cid = $1 ORDER BY eth.log_cids.index ASC`
err = db.Select(context.Background(), &rcts, rctsPgStr, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
@ -268,30 +243,18 @@ func TestPublishAndIndexLogIPLDs(t *testing.T, db sql.Database) {
expectedLogs := mocks.MockReceipts[i].Logs
require.Equal(t, len(expectedLogs), len(results))
var nodeElements []interface{}
for idx, r := range results {
// Attempt to decode the log leaf node.
err = rlp.DecodeBytes(r.Data, &nodeElements)
require.NoError(t, err)
if len(nodeElements) == 2 {
logRaw, err := rlp.EncodeToBytes(&expectedLogs[idx])
require.NoError(t, err)
// 2nd element of the leaf node contains the encoded log data.
require.Equal(t, nodeElements[1].([]byte), logRaw)
} else {
logRaw, err := rlp.EncodeToBytes(&expectedLogs[idx])
require.NoError(t, err)
// raw log was IPLDized
require.Equal(t, r.Data, logRaw)
}
}
}
}
func TestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) {
// check receipts were properly indexed and published
rcts := make([]string, 0)
pgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
pgStr := `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
AND transaction_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1 order by transaction_cids.index`
@ -309,49 +272,41 @@ func TestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) {
for idx, c := range rcts {
result := make([]models.IPLDModel, 0)
pgStr = `SELECT data
FROM eth.receipt_cids
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key)
WHERE receipt_cids.leaf_cid = $1`
FROM ipld.blocks
WHERE ipld.blocks.key = $1`
err = db.Select(context.Background(), &result, pgStr, c)
if err != nil {
t.Fatal(err)
}
// Decode the receipt leaf node.
var nodeElements []interface{}
err = rlp.DecodeBytes(result[0].Data, &nodeElements)
require.NoError(t, err)
expectedRct, err := mocks.MockReceipts[idx].MarshalBinary()
require.NoError(t, err)
require.Equal(t, nodeElements[1].([]byte), expectedRct)
require.Equal(t, result[0].Data, expectedRct)
dc, err := cid.Decode(c)
if err != nil {
t.Fatal(err)
}
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
err = db.Get(context.Background(), &data, ipfsPgGet, dc.String(), mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
postStatePgStr := `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1`
postStatePgStr := `SELECT post_state FROM eth.receipt_cids WHERE cid = $1`
switch c {
case rct1CID.String():
require.Equal(t, rctLeaf1, data)
require.Equal(t, rct1, data)
var postStatus uint64
pgStr = `SELECT post_status FROM eth.receipt_cids WHERE leaf_cid = $1`
pgStr = `SELECT post_status FROM eth.receipt_cids WHERE cid = $1`
err = db.Get(context.Background(), &postStatus, pgStr, c)
if err != nil {
t.Fatal(err)
}
require.Equal(t, mocks.ExpectedPostStatus, postStatus)
case rct2CID.String():
require.Equal(t, rctLeaf2, data)
require.Equal(t, rct2, data)
var postState string
err = db.Get(context.Background(), &postState, postStatePgStr, c)
if err != nil {
@ -359,7 +314,7 @@ func TestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) {
}
require.Equal(t, mocks.ExpectedPostState1, postState)
case rct3CID.String():
require.Equal(t, rctLeaf3, data)
require.Equal(t, rct3, data)
var postState string
err = db.Get(context.Background(), &postState, postStatePgStr, c)
if err != nil {
@ -367,7 +322,7 @@ func TestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) {
}
require.Equal(t, mocks.ExpectedPostState2, postState)
case rct4CID.String():
require.Equal(t, rctLeaf4, data)
require.Equal(t, rct4, data)
var postState string
err = db.Get(context.Background(), &postState, postStatePgStr, c)
if err != nil {
@ -375,7 +330,7 @@ func TestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) {
}
require.Equal(t, mocks.ExpectedPostState3, postState)
case rct5CID.String():
require.Equal(t, rctLeaf5, data)
require.Equal(t, rct5, data)
var postState string
err = db.Get(context.Background(), &postState, postStatePgStr, c)
if err != nil {
@ -389,9 +344,11 @@ func TestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) {
func TestPublishAndIndexStateIPLDs(t *testing.T, db sql.Database) {
// check that state nodes were properly indexed and published
stateNodes := make([]models.StateNodeModel, 0)
pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
WHERE header_cids.block_number = $1 AND node_type != 3`
pgStr := `SELECT state_cids.cid, CAST(state_cids.block_number as TEXT), state_cids.state_leaf_key, state_cids.removed,
state_cids.header_id, CAST(state_cids.balance as TEXT), state_cids.nonce, state_cids.code_hash, state_cids.storage_root
FROM eth.state_cids
WHERE block_number = $1
AND removed = false`
err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
@ -403,164 +360,159 @@ func TestPublishAndIndexStateIPLDs(t *testing.T, db sql.Database) {
if err != nil {
t.Fatal(err)
}
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
pgStr = `SELECT cast(block_number AS TEXT), header_id, state_path, cast(balance AS TEXT), nonce, code_hash, storage_root from eth.state_accounts WHERE header_id = $1 AND state_path = $2`
var account models.StateAccountModel
err = db.Get(context.Background(), &account, pgStr, stateNode.HeaderID, stateNode.Path)
err = db.Get(context.Background(), &data, ipfsPgGet, dc.String(), mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
if stateNode.CID == state1CID.String() {
require.Equal(t, 2, stateNode.NodeType)
require.Equal(t, false, stateNode.Removed)
require.Equal(t, common.BytesToHash(mocks.ContractLeafKey).Hex(), stateNode.StateKey)
require.Equal(t, []byte{'\x06'}, stateNode.Path)
require.Equal(t, mocks.ContractLeafNode, data)
require.Equal(t, models.StateAccountModel{
BlockNumber: mocks.BlockNumber.String(),
HeaderID: account.HeaderID,
StatePath: stateNode.Path,
Balance: "0",
CodeHash: mocks.ContractCodeHash.Bytes(),
StorageRoot: mocks.ContractRoot,
Nonce: 1,
}, account)
require.Equal(t, mocks.BlockNumber.String(), stateNode.BlockNumber)
require.Equal(t, "0", stateNode.Balance)
require.Equal(t, mocks.ContractCodeHash.String(), stateNode.CodeHash)
require.Equal(t, mocks.ContractRoot, stateNode.StorageRoot)
require.Equal(t, uint64(1), stateNode.Nonce)
require.Equal(t, mockBlock.Hash().String(), stateNode.HeaderID)
}
if stateNode.CID == state2CID.String() {
require.Equal(t, 2, stateNode.NodeType)
require.Equal(t, false, stateNode.Removed)
require.Equal(t, common.BytesToHash(mocks.AccountLeafKey).Hex(), stateNode.StateKey)
require.Equal(t, []byte{'\x0c'}, stateNode.Path)
require.Equal(t, mocks.AccountLeafNode, data)
require.Equal(t, models.StateAccountModel{
BlockNumber: mocks.BlockNumber.String(),
HeaderID: account.HeaderID,
StatePath: stateNode.Path,
Balance: "1000",
CodeHash: mocks.AccountCodeHash.Bytes(),
StorageRoot: mocks.AccountRoot,
Nonce: 0,
}, account)
require.Equal(t, mocks.BlockNumber.String(), stateNode.BlockNumber)
require.Equal(t, "1000", stateNode.Balance)
require.Equal(t, mocks.AccountCodeHash.String(), stateNode.CodeHash)
require.Equal(t, mocks.AccountRoot, stateNode.StorageRoot)
require.Equal(t, uint64(0), stateNode.Nonce)
require.Equal(t, mockBlock.Hash().String(), stateNode.HeaderID)
}
}
// check that Removed state nodes were properly indexed and published
stateNodes = make([]models.StateNodeModel, 0)
pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.removed, state_cids.header_id,
state_cids.nonce, CAST(state_cids.balance as TEXT), state_cids.code_hash, state_cids.storage_root
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
WHERE header_cids.block_number = $1 AND node_type = 3
ORDER BY state_path`
WHERE header_cids.block_number = $1 AND removed = true
ORDER BY state_leaf_key`
err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
require.Equal(t, 2, len(stateNodes))
for idx, stateNode := range stateNodes {
for _, stateNode := range stateNodes {
var data []byte
dc, err := cid.Decode(stateNode.CID)
if err != nil {
t.Fatal(err)
}
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
require.Equal(t, shared.RemovedNodeMhKey, prefixedKey)
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
require.Equal(t, shared.RemovedNodeStateCID, dc.String())
err = db.Get(context.Background(), &data, ipfsPgGet, dc.String(), mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
if idx == 0 {
if common.BytesToHash(mocks.RemovedLeafKey).Hex() == stateNode.StateKey {
require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID)
require.Equal(t, common.BytesToHash(mocks.RemovedLeafKey).Hex(), stateNode.StateKey)
require.Equal(t, []byte{'\x02'}, stateNode.Path)
require.Equal(t, true, stateNode.Removed)
require.Equal(t, []byte{}, data)
}
if idx == 1 {
} else if common.BytesToHash(mocks.Contract2LeafKey).Hex() == stateNode.StateKey {
require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID)
require.Equal(t, common.BytesToHash(mocks.Contract2LeafKey).Hex(), stateNode.StateKey)
require.Equal(t, []byte{'\x07'}, stateNode.Path)
require.Equal(t, true, stateNode.Removed)
require.Equal(t, []byte{}, data)
} else {
t.Fatalf("unexpected stateNode.StateKey value: %s", stateNode.StateKey)
}
}
}
/*
type StorageNodeModel struct {
BlockNumber string `db:"block_number"`
HeaderID string `db:"header_id"`
StateKey []byte `db:"state_leaf_key"`
StorageKey string `db:"storage_leaf_key"`
Removed bool `db:"removed"`
CID string `db:"cid"`
Diff bool `db:"diff"`
Value []byte `db:"val"`
}
*/
func TestPublishAndIndexStorageIPLDs(t *testing.T, db sql.Database) {
// check that storage nodes were properly indexed
storageNodes := make([]models.StorageNodeWithStateKeyModel, 0)
pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
AND state_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1
AND storage_cids.node_type != 3
ORDER BY storage_path`
storageNodes := make([]models.StorageNodeModel, 0)
pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.header_id, storage_cids.cid,
storage_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.removed, storage_cids.val
FROM eth.storage_cids
WHERE storage_cids.block_number = $1
AND storage_cids.removed = false
ORDER BY storage_leaf_key`
err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
require.Equal(t, 1, len(storageNodes))
require.Equal(t, models.StorageNodeWithStateKeyModel{
require.Equal(t, models.StorageNodeModel{
BlockNumber: mocks.BlockNumber.String(),
HeaderID: mockBlock.Header().Hash().Hex(),
CID: storageCID.String(),
NodeType: 2,
Removed: false,
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
Path: []byte{},
Value: mocks.StorageValue,
}, storageNodes[0])
var data []byte
dc, err := cid.Decode(storageNodes[0].CID)
if err != nil {
t.Fatal(err)
}
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
err = db.Get(context.Background(), &data, ipfsPgGet, dc.String(), mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
require.Equal(t, mocks.StorageLeafNode, data)
// check that Removed storage nodes were properly indexed
storageNodes = make([]models.StorageNodeWithStateKeyModel, 0)
pgStr = `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
AND state_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1
AND storage_cids.node_type = 3
ORDER BY storage_path`
storageNodes = make([]models.StorageNodeModel, 0)
pgStr = `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.header_id, storage_cids.cid,
storage_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.removed, storage_cids.val
FROM eth.storage_cids
WHERE storage_cids.block_number = $1
AND storage_cids.removed = true
ORDER BY storage_leaf_key`
err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
require.Equal(t, 3, len(storageNodes))
expectedStorageNodes := []models.StorageNodeWithStateKeyModel{
expectedStorageNodes := []models.StorageNodeModel{ // TODO: ordering is non-deterministic
{
BlockNumber: mocks.BlockNumber.String(),
HeaderID: mockBlock.Header().Hash().Hex(),
CID: shared.RemovedNodeStorageCID,
NodeType: 3,
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
Path: []byte{'\x03'},
},
{
BlockNumber: mocks.BlockNumber.String(),
CID: shared.RemovedNodeStorageCID,
NodeType: 3,
Removed: true,
StorageKey: common.BytesToHash(mocks.Storage2LeafKey).Hex(),
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
Path: []byte{'\x0e'},
Value: []byte{},
},
{
BlockNumber: mocks.BlockNumber.String(),
HeaderID: mockBlock.Header().Hash().Hex(),
CID: shared.RemovedNodeStorageCID,
NodeType: 3,
Removed: true,
StorageKey: common.BytesToHash(mocks.Storage3LeafKey).Hex(),
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
Path: []byte{'\x0f'},
Value: []byte{},
},
{
BlockNumber: mocks.BlockNumber.String(),
HeaderID: mockBlock.Header().Hash().Hex(),
CID: shared.RemovedNodeStorageCID,
Removed: true,
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
Value: []byte{},
},
}
for idx, storageNode := range storageNodes {
@ -569,10 +521,8 @@ func TestPublishAndIndexStorageIPLDs(t *testing.T, db sql.Database) {
if err != nil {
t.Fatal(err)
}
mhKey = dshelp.MultihashToDsKey(dc.Hash())
prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
require.Equal(t, shared.RemovedNodeMhKey, prefixedKey)
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
require.Equal(t, shared.RemovedNodeStorageCID, dc.String())
err = db.Get(context.Background(), &data, ipfsPgGet, dc.String(), mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
@ -662,7 +612,7 @@ func SetupTestDataNonCanonical(t *testing.T, ind interfaces.StateDiffIndexer) {
func TestPublishAndIndexHeaderNonCanonical(t *testing.T, db sql.Database) {
// check indexed headers
pgStr := `SELECT CAST(block_number as TEXT), block_hash, cid, cast(td AS TEXT), cast(reward AS TEXT),
tx_root, receipt_root, uncle_root, coinbase
tx_root, receipt_root, uncles_hash, coinbase
FROM eth.header_cids
ORDER BY block_number`
headerRes := make([]models.HeaderModel, 0)
@ -682,7 +632,7 @@ func TestPublishAndIndexHeaderNonCanonical(t *testing.T, db sql.Database) {
TotalDifficulty: mockBlock.Difficulty().String(),
TxRoot: mockBlock.TxHash().String(),
RctRoot: mockBlock.ReceiptHash().String(),
UncleRoot: mockBlock.UncleHash().String(),
UnclesHash: mockBlock.UncleHash().String(),
Coinbase: mocks.MockHeader.Coinbase.String(),
},
{
@ -692,7 +642,7 @@ func TestPublishAndIndexHeaderNonCanonical(t *testing.T, db sql.Database) {
TotalDifficulty: mockNonCanonicalBlock.Difficulty().String(),
TxRoot: mockNonCanonicalBlock.TxHash().String(),
RctRoot: mockNonCanonicalBlock.ReceiptHash().String(),
UncleRoot: mockNonCanonicalBlock.UncleHash().String(),
UnclesHash: mockNonCanonicalBlock.UncleHash().String(),
Coinbase: mocks.MockNonCanonicalHeader.Coinbase.String(),
},
{
@ -702,7 +652,7 @@ func TestPublishAndIndexHeaderNonCanonical(t *testing.T, db sql.Database) {
TotalDifficulty: mockNonCanonicalBlock2.Difficulty().String(),
TxRoot: mockNonCanonicalBlock2.TxHash().String(),
RctRoot: mockNonCanonicalBlock2.ReceiptHash().String(),
UncleRoot: mockNonCanonicalBlock2.UncleHash().String(),
UnclesHash: mockNonCanonicalBlock2.UncleHash().String(),
Coinbase: mocks.MockNonCanonicalHeader2.Coinbase.String(),
},
}
@ -732,8 +682,7 @@ func TestPublishAndIndexHeaderNonCanonical(t *testing.T, db sql.Database) {
headerRLPs := [][]byte{mocks.MockHeaderRlp, mocks.MockNonCanonicalHeaderRlp, mocks.MockNonCanonicalHeader2Rlp}
for i := range expectedRes {
var data []byte
prefixedKey := shared.MultihashKeyFromCID(headerCIDs[i])
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, blockNumbers[i])
err = db.Get(context.Background(), &data, ipfsPgGet, headerCIDs[i].String(), blockNumbers[i])
if err != nil {
t.Fatal(err)
}
@ -744,7 +693,7 @@ func TestPublishAndIndexHeaderNonCanonical(t *testing.T, db sql.Database) {
func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database) {
// check indexed transactions
pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_hash, cid, dst, src, index,
tx_data, tx_type, CAST(value as TEXT)
tx_type, CAST(value as TEXT)
FROM eth.transaction_cids
ORDER BY block_number, index`
txRes := make([]models.TxModel, 0)
@ -764,7 +713,6 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[0].To()),
Src: mocks.SenderAddr.String(),
Index: 0,
Data: mockBlockTxs[0].Data(),
Type: mockBlockTxs[0].Type(),
Value: mockBlockTxs[0].Value().String(),
},
@ -776,7 +724,6 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[1].To()),
Src: mocks.SenderAddr.String(),
Index: 1,
Data: mockBlockTxs[1].Data(),
Type: mockBlockTxs[1].Type(),
Value: mockBlockTxs[1].Value().String(),
},
@ -788,7 +735,6 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[2].To()),
Src: mocks.SenderAddr.String(),
Index: 2,
Data: mockBlockTxs[2].Data(),
Type: mockBlockTxs[2].Type(),
Value: mockBlockTxs[2].Value().String(),
},
@ -800,7 +746,6 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[3].To()),
Src: mocks.SenderAddr.String(),
Index: 3,
Data: mockBlockTxs[3].Data(),
Type: mockBlockTxs[3].Type(),
Value: mockBlockTxs[3].Value().String(),
},
@ -812,7 +757,6 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[4].To()),
Src: mocks.SenderAddr.String(),
Index: 4,
Data: mockBlockTxs[4].Data(),
Type: mockBlockTxs[4].Type(),
Value: mockBlockTxs[4].Value().String(),
},
@ -829,7 +773,6 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
Dst: mockNonCanonicalBlockTxs[0].To().String(),
Src: mocks.SenderAddr.String(),
Index: 0,
Data: mockNonCanonicalBlockTxs[0].Data(),
Type: mockNonCanonicalBlockTxs[0].Type(),
Value: mockNonCanonicalBlockTxs[0].Value().String(),
},
@ -841,7 +784,6 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
Dst: mockNonCanonicalBlockTxs[1].To().String(),
Src: mocks.SenderAddr.String(),
Index: 1,
Data: mockNonCanonicalBlockTxs[1].Data(),
Type: mockNonCanonicalBlockTxs[1].Type(),
Value: mockNonCanonicalBlockTxs[1].Value().String(),
},
@ -858,7 +800,6 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
Dst: "",
Src: mocks.SenderAddr.String(),
Index: 0,
Data: mockNonCanonicalBlock2Txs[0].Data(),
Type: mockNonCanonicalBlock2Txs[0].Type(),
Value: mockNonCanonicalBlock2Txs[0].Value().String(),
},
@ -870,7 +811,6 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
Dst: mockNonCanonicalBlock2Txs[1].To().String(),
Src: mocks.SenderAddr.String(),
Index: 1,
Data: mockNonCanonicalBlock2Txs[1].Data(),
Type: mockNonCanonicalBlock2Txs[1].Type(),
Value: mockNonCanonicalBlock2Txs[1].Value().String(),
},
@ -903,13 +843,11 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
// check indexed IPLD blocks
var data []byte
var prefixedKey string
txCIDs := []cid.Cid{trx1CID, trx2CID, trx3CID, trx4CID, trx5CID}
txRLPs := [][]byte{tx1, tx2, tx3, tx4, tx5}
for i, txCID := range txCIDs {
prefixedKey = shared.MultihashKeyFromCID(txCID)
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
err = db.Get(context.Background(), &data, ipfsPgGet, txCID.String(), mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
@ -919,7 +857,7 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
func TestPublishAndIndexReceiptsNonCanonical(t *testing.T, db sql.Database) {
// check indexed receipts
pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_id, leaf_cid, leaf_mh_key, post_status, post_state, contract, contract_hash, log_root
pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_id, cid, post_status, post_state, contract
FROM eth.receipt_cids
ORDER BY block_number`
rctRes := make([]models.ReceiptModel, 0)
@ -969,43 +907,39 @@ func TestPublishAndIndexReceiptsNonCanonical(t *testing.T, db sql.Database) {
for i := 0; i < len(expectedBlockRctsMap); i++ {
rct := rctRes[i]
require.Contains(t, expectedBlockRctsMap, rct.LeafCID)
require.Equal(t, expectedBlockRctsMap[rct.LeafCID], rct)
require.Contains(t, expectedBlockRctsMap, rct.CID)
require.Equal(t, expectedBlockRctsMap[rct.CID], rct)
}
for i := 0; i < len(expectedNonCanonicalBlockRctsMap); i++ {
rct := rctRes[len(expectedBlockRctsMap)+i]
require.Contains(t, expectedNonCanonicalBlockRctsMap, rct.LeafCID)
require.Equal(t, expectedNonCanonicalBlockRctsMap[rct.LeafCID], rct)
require.Contains(t, expectedNonCanonicalBlockRctsMap, rct.CID)
require.Equal(t, expectedNonCanonicalBlockRctsMap[rct.CID], rct)
}
for i := 0; i < len(expectedNonCanonicalBlock2RctsMap); i++ {
rct := rctRes[len(expectedBlockRctsMap)+len(expectedNonCanonicalBlockRctsMap)+i]
require.Contains(t, expectedNonCanonicalBlock2RctsMap, rct.LeafCID)
require.Equal(t, expectedNonCanonicalBlock2RctsMap[rct.LeafCID], rct)
require.Contains(t, expectedNonCanonicalBlock2RctsMap, rct.CID)
require.Equal(t, expectedNonCanonicalBlock2RctsMap[rct.CID], rct)
}
// check indexed rct IPLD blocks
var data []byte
var prefixedKey string
rctRLPs := [][]byte{
rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5,
nonCanonicalBlockRctLeaf1, nonCanonicalBlockRctLeaf2,
rct1, rct2, rct3, rct4, rct5, nonCanonicalBlockRct1, nonCanonicalBlockRct2,
}
for i, rctCid := range append(rctCids, nonCanonicalBlockRctCids...) {
prefixedKey = shared.MultihashKeyFromCID(rctCid)
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
err = db.Get(context.Background(), &data, ipfsPgGet, rctCid.String(), mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
require.Equal(t, rctRLPs[i], data)
}
nonCanonicalBlock2RctRLPs := [][]byte{nonCanonicalBlock2RctLeaf1, nonCanonicalBlock2RctLeaf2}
nonCanonicalBlock2RctRLPs := [][]byte{nonCanonicalBlock2Rct1, nonCanonicalBlock2Rct2}
for i, rctCid := range nonCanonicalBlock2RctCids {
prefixedKey = shared.MultihashKeyFromCID(rctCid)
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.Block2Number.Uint64())
err = db.Get(context.Background(), &data, ipfsPgGet, rctCid.String(), mocks.Block2Number.Uint64())
if err != nil {
t.Fatal(err)
}
@ -1015,9 +949,9 @@ func TestPublishAndIndexReceiptsNonCanonical(t *testing.T, db sql.Database) {
func TestPublishAndIndexLogsNonCanonical(t *testing.T, db sql.Database) {
// check indexed logs
pgStr := `SELECT address, log_data, topic0, topic1, topic2, topic3, data
pgStr := `SELECT address, topic0, topic1, topic2, topic3, data
FROM eth.log_cids
INNER JOIN public.blocks ON (log_cids.block_number = blocks.block_number AND log_cids.leaf_mh_key = blocks.key)
INNER JOIN ipld.blocks ON (log_cids.block_number = blocks.block_number AND log_cids.cid = blocks.key)
WHERE log_cids.block_number = $1 AND header_id = $2 AND rct_id = $3
ORDER BY log_cids.index ASC`
@ -1073,7 +1007,6 @@ func TestPublishAndIndexLogsNonCanonical(t *testing.T, db sql.Database) {
expectedLog := models.LogsModel{
Address: log.Address.String(),
Data: log.Data,
Topic0: topicSet[0],
Topic1: topicSet[1],
Topic2: topicSet[2],
@ -1081,33 +1014,19 @@ func TestPublishAndIndexLogsNonCanonical(t *testing.T, db sql.Database) {
}
require.Equal(t, expectedLog, logRes[i].LogsModel)
// check indexed log IPLD block
var nodeElements []interface{}
err = rlp.DecodeBytes(logRes[i].IPLDData, &nodeElements)
require.NoError(t, err)
if len(nodeElements) == 2 {
logRaw, err := rlp.EncodeToBytes(log)
require.NoError(t, err)
// 2nd element of the leaf node contains the encoded log data.
require.Equal(t, nodeElements[1].([]byte), logRaw)
} else {
logRaw, err := rlp.EncodeToBytes(log)
require.NoError(t, err)
// raw log was IPLDized
require.Equal(t, logRes[i].IPLDData, logRaw)
}
require.Equal(t, logRaw, logRes[i].IPLDData)
}
}
}
func TestPublishAndIndexStateNonCanonical(t *testing.T, db sql.Database) {
// check indexed state nodes
pgStr := `SELECT state_path, state_leaf_key, node_type, cid, mh_key, diff
pgStr := `SELECT state_leaf_key, removed, cid, diff
FROM eth.state_cids
WHERE block_number = $1
AND header_id = $2
ORDER BY state_path`
AND header_id = $2`
removedNodeCID, _ := cid.Decode(shared.RemovedNodeStateCID)
stateNodeCIDs := []cid.Cid{state1CID, state2CID, removedNodeCID, removedNodeCID}
@ -1116,31 +1035,20 @@ func TestPublishAndIndexStateNonCanonical(t *testing.T, db sql.Database) {
expectedStateNodes := make([]models.StateNodeModel, 0)
for i, stateDiff := range mocks.StateDiffs {
expectedStateNodes = append(expectedStateNodes, models.StateNodeModel{
Path: stateDiff.Path,
StateKey: common.BytesToHash(stateDiff.LeafKey).Hex(),
NodeType: stateDiff.NodeType.Int(),
StateKey: common.BytesToHash(stateDiff.AccountWrapper.LeafKey).Hex(),
Removed: stateDiff.Removed,
CID: stateNodeCIDs[i].String(),
MhKey: shared.MultihashKeyFromCID(stateNodeCIDs[i]),
Diff: true,
})
}
sort.Slice(expectedStateNodes, func(i, j int) bool {
if bytes.Compare(expectedStateNodes[i].Path, expectedStateNodes[j].Path) < 0 {
return true
} else {
return false
}
})
// expected state nodes in the non-canonical block at London height + 1
expectedNonCanonicalBlock2StateNodes := make([]models.StateNodeModel, 0)
for i, stateDiff := range mocks.StateDiffs[:2] {
expectedNonCanonicalBlock2StateNodes = append(expectedNonCanonicalBlock2StateNodes, models.StateNodeModel{
Path: stateDiff.Path,
StateKey: common.BytesToHash(stateDiff.LeafKey).Hex(),
NodeType: stateDiff.NodeType.Int(),
StateKey: common.BytesToHash(stateDiff.AccountWrapper.LeafKey).Hex(),
Removed: stateDiff.Removed,
CID: stateNodeCIDs[i].String(),
MhKey: shared.MultihashKeyFromCID(stateNodeCIDs[i]),
Diff: true,
})
}
@ -1151,11 +1059,9 @@ func TestPublishAndIndexStateNonCanonical(t *testing.T, db sql.Database) {
if err != nil {
t.Fatal(err)
}
require.Equal(t, len(expectedStateNodes), len(stateNodes))
for i, expectedStateNode := range expectedStateNodes {
require.Equal(t, expectedStateNode, stateNodes[i])
}
require.Equal(t, len(expectedStateNodes), len(stateNodes))
assert.ElementsMatch(t, expectedStateNodes, stateNodes)
// check state nodes for non-canonical block at London height
stateNodes = make([]models.StateNodeModel, 0)
@ -1164,10 +1070,7 @@ func TestPublishAndIndexStateNonCanonical(t *testing.T, db sql.Database) {
t.Fatal(err)
}
require.Equal(t, len(expectedStateNodes), len(stateNodes))
for i, expectedStateNode := range expectedStateNodes {
require.Equal(t, expectedStateNode, stateNodes[i])
}
assert.ElementsMatch(t, expectedStateNodes, stateNodes)
// check state nodes for non-canonical block at London height + 1
stateNodes = make([]models.StateNodeModel, 0)
@ -1176,19 +1079,15 @@ func TestPublishAndIndexStateNonCanonical(t *testing.T, db sql.Database) {
t.Fatal(err)
}
require.Equal(t, len(expectedNonCanonicalBlock2StateNodes), len(stateNodes))
for i, expectedStateNode := range expectedNonCanonicalBlock2StateNodes {
require.Equal(t, expectedStateNode, stateNodes[i])
}
assert.ElementsMatch(t, expectedNonCanonicalBlock2StateNodes, stateNodes)
}
func TestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) {
// check indexed storage nodes
pgStr := `SELECT state_path, storage_path, storage_leaf_key, node_type, cid, mh_key, diff
pgStr := `SELECT storage_leaf_key, state_leaf_key, removed, cid, diff, val
FROM eth.storage_cids
WHERE block_number = $1
AND header_id = $2
ORDER BY state_path, storage_path`
AND header_id = $2`
removedNodeCID, _ := cid.Decode(shared.RemovedNodeStorageCID)
storageNodeCIDs := []cid.Cid{storageCID, removedNodeCID, removedNodeCID, removedNodeCID}
@ -1197,40 +1096,31 @@ func TestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) {
expectedStorageNodes := make([]models.StorageNodeModel, 0)
storageNodeIndex := 0
for _, stateDiff := range mocks.StateDiffs {
for _, storageNode := range stateDiff.StorageNodes {
for _, storageNode := range stateDiff.StorageDiff {
expectedStorageNodes = append(expectedStorageNodes, models.StorageNodeModel{
StatePath: stateDiff.Path,
Path: storageNode.Path,
StateKey: common.BytesToHash(stateDiff.AccountWrapper.LeafKey).Hex(),
StorageKey: common.BytesToHash(storageNode.LeafKey).Hex(),
NodeType: storageNode.NodeType.Int(),
Removed: storageNode.Removed,
CID: storageNodeCIDs[storageNodeIndex].String(),
MhKey: shared.MultihashKeyFromCID(storageNodeCIDs[storageNodeIndex]),
Diff: true,
Value: storageNode.Value,
})
storageNodeIndex++
}
}
sort.Slice(expectedStorageNodes, func(i, j int) bool {
if bytes.Compare(expectedStorageNodes[i].Path, expectedStorageNodes[j].Path) < 0 {
return true
} else {
return false
}
})
// expected storage nodes in the non-canonical block at London height + 1
expectedNonCanonicalBlock2StorageNodes := make([]models.StorageNodeModel, 0)
storageNodeIndex = 0
for _, stateDiff := range mocks.StateDiffs[:2] {
for _, storageNode := range stateDiff.StorageNodes {
for _, storageNode := range stateDiff.StorageDiff {
expectedNonCanonicalBlock2StorageNodes = append(expectedNonCanonicalBlock2StorageNodes, models.StorageNodeModel{
StatePath: stateDiff.Path,
Path: storageNode.Path,
StateKey: common.BytesToHash(stateDiff.AccountWrapper.LeafKey).Hex(),
StorageKey: common.BytesToHash(storageNode.LeafKey).Hex(),
NodeType: storageNode.NodeType.Int(),
Removed: storageNode.Removed,
CID: storageNodeCIDs[storageNodeIndex].String(),
MhKey: shared.MultihashKeyFromCID(storageNodeCIDs[storageNodeIndex]),
Diff: true,
Value: storageNode.Value,
})
storageNodeIndex++
}
@ -1242,11 +1132,9 @@ func TestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) {
if err != nil {
t.Fatal(err)
}
require.Equal(t, len(expectedStorageNodes), len(storageNodes))
for i, expectedStorageNode := range expectedStorageNodes {
require.Equal(t, expectedStorageNode, storageNodes[i])
}
require.Equal(t, len(expectedStorageNodes), len(storageNodes))
assert.ElementsMatch(t, expectedStorageNodes, storageNodes)
// check storage nodes for non-canonical block at London height
storageNodes = make([]models.StorageNodeModel, 0)
@ -1254,11 +1142,9 @@ func TestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) {
if err != nil {
t.Fatal(err)
}
require.Equal(t, len(expectedStorageNodes), len(storageNodes))
for i, expectedStorageNode := range expectedStorageNodes {
require.Equal(t, expectedStorageNode, storageNodes[i])
}
require.Equal(t, len(expectedStorageNodes), len(storageNodes))
assert.ElementsMatch(t, expectedStorageNodes, storageNodes)
// check storage nodes for non-canonical block at London height + 1
storageNodes = make([]models.StorageNodeModel, 0)
@ -1267,8 +1153,5 @@ func TestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) {
t.Fatal(err)
}
require.Equal(t, len(expectedNonCanonicalBlock2StorageNodes), len(storageNodes))
for i, expectedStorageNode := range expectedNonCanonicalBlock2StorageNodes {
require.Equal(t, expectedStorageNode, storageNodes[i])
}
assert.ElementsMatch(t, expectedNonCanonicalBlock2StorageNodes, storageNodes)
}

View File

@ -24,8 +24,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
@ -36,7 +34,7 @@ import (
var (
err error
ipfsPgGet = `SELECT data FROM public.blocks
ipfsPgGet = `SELECT data FROM ipld.blocks
WHERE key = $1 AND block_number = $2`
watchedAddressesPgGet = `SELECT *
FROM eth_meta.watched_addresses`
@ -49,9 +47,6 @@ var (
rct1CID, rct2CID, rct3CID, rct4CID, rct5CID cid.Cid
nonCanonicalBlockRct1CID, nonCanonicalBlockRct2CID cid.Cid
nonCanonicalBlock2Rct1CID, nonCanonicalBlock2Rct2CID cid.Cid
rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5 []byte
nonCanonicalBlockRctLeaf1, nonCanonicalBlockRctLeaf2 []byte
nonCanonicalBlock2RctLeaf1, nonCanonicalBlock2RctLeaf2 []byte
state1CID, state2CID, storageCID cid.Cid
)
@ -157,62 +152,18 @@ func init() {
state1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.ContractLeafNode, multihash.KECCAK_256)
state2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.AccountLeafNode, multihash.KECCAK_256)
storageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, mocks.StorageLeafNode, multihash.KECCAK_256)
// create raw receipts
rawRctLeafNodes, rctleafNodeCids := createRctTrie([][]byte{rct1, rct2, rct3, rct4, rct5})
rct1CID = rctleafNodeCids[0]
rct2CID = rctleafNodeCids[1]
rct3CID = rctleafNodeCids[2]
rct4CID = rctleafNodeCids[3]
rct5CID = rctleafNodeCids[4]
rctLeaf1 = rawRctLeafNodes[0]
rctLeaf2 = rawRctLeafNodes[1]
rctLeaf3 = rawRctLeafNodes[2]
rctLeaf4 = rawRctLeafNodes[3]
rctLeaf5 = rawRctLeafNodes[4]
rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct1, multihash.KECCAK_256)
rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct2, multihash.KECCAK_256)
rct3CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct3, multihash.KECCAK_256)
rct4CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct4, multihash.KECCAK_256)
rct5CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct5, multihash.KECCAK_256)
// create raw receipts for non-canonical blocks
nonCanonicalBlockRawRctLeafNodes, nonCanonicalBlockRctLeafNodeCids := createRctTrie([][]byte{nonCanonicalBlockRct1, nonCanonicalBlockRct2})
nonCanonicalBlockRct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, nonCanonicalBlockRct1, multihash.KECCAK_256)
nonCanonicalBlockRct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, nonCanonicalBlockRct2, multihash.KECCAK_256)
nonCanonicalBlockRct1CID = nonCanonicalBlockRctLeafNodeCids[0]
nonCanonicalBlockRct2CID = nonCanonicalBlockRctLeafNodeCids[1]
nonCanonicalBlockRctLeaf1 = nonCanonicalBlockRawRctLeafNodes[0]
nonCanonicalBlockRctLeaf2 = nonCanonicalBlockRawRctLeafNodes[1]
nonCanonicalBlock2RawRctLeafNodes, nonCanonicalBlock2RctLeafNodeCids := createRctTrie([][]byte{nonCanonicalBlockRct1, nonCanonicalBlockRct2})
nonCanonicalBlock2Rct1CID = nonCanonicalBlock2RctLeafNodeCids[0]
nonCanonicalBlock2Rct2CID = nonCanonicalBlock2RctLeafNodeCids[1]
nonCanonicalBlock2RctLeaf1 = nonCanonicalBlock2RawRctLeafNodes[0]
nonCanonicalBlock2RctLeaf2 = nonCanonicalBlock2RawRctLeafNodes[1]
}
// createRctTrie creates a receipt trie from the given raw receipts
// returns receipt leaf nodes and their CIDs
func createRctTrie(rcts [][]byte) ([][]byte, []cid.Cid) {
receiptTrie := ipld.NewRctTrie()
for i, rct := range rcts {
receiptTrie.Add(i, rct)
}
rctLeafNodes, keys, _ := receiptTrie.GetLeafNodes()
rctleafNodeCids := make([]cid.Cid, len(rctLeafNodes))
orderedRctLeafNodes := make([][]byte, len(rctLeafNodes))
for i, rln := range rctLeafNodes {
var idx uint
r := bytes.NewReader(keys[i].TrieKey)
rlp.Decode(r, &idx)
rctleafNodeCids[idx] = rln.Cid()
orderedRctLeafNodes[idx] = rln.RawData()
}
return orderedRctLeafNodes, rctleafNodeCids
nonCanonicalBlock2Rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, nonCanonicalBlock2Rct1, multihash.KECCAK_256)
nonCanonicalBlock2Rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, nonCanonicalBlock2Rct2, multihash.KECCAK_256)
}
// createRctModel creates a models.ReceiptModel object from a given ethereum receipt
@ -221,21 +172,16 @@ func createRctModel(rct *types.Receipt, cid cid.Cid, blockNumber string) models.
BlockNumber: blockNumber,
HeaderID: rct.BlockHash.String(),
TxID: rct.TxHash.String(),
LeafCID: cid.String(),
LeafMhKey: shared.MultihashKeyFromCID(cid),
LogRoot: rct.LogRoot.String(),
CID: cid.String(),
}
contract := shared.HandleZeroAddr(rct.ContractAddress)
rctModel.Contract = contract
if contract != "" {
rctModel.ContractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
}
if len(rct.PostState) == 0 {
rctModel.PostStatus = rct.Status
} else {
rctModel.PostState = common.Bytes2Hex(rct.PostState)
rctModel.PostState = common.BytesToHash(rct.PostState).String()
}
return rctModel

View File

@ -100,19 +100,11 @@ func TearDownDB(t *testing.T, db sql.Database) {
if err != nil {
t.Fatal(err)
}
_, err = tx.Exec(ctx, `DELETE FROM eth.state_accounts`)
if err != nil {
t.Fatal(err)
}
_, err = tx.Exec(ctx, `DELETE FROM eth.access_list_elements`)
if err != nil {
t.Fatal(err)
}
_, err = tx.Exec(ctx, `DELETE FROM eth.log_cids`)
if err != nil {
t.Fatal(err)
}
_, err = tx.Exec(ctx, `DELETE FROM blocks`)
_, err = tx.Exec(ctx, `DELETE FROM ipld.blocks`)
if err != nil {
t.Fatal(err)
}

View File

@ -18,6 +18,7 @@ package statediff_test
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
@ -26,6 +27,8 @@ import (
"sort"
"testing"
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
@ -47,18 +50,19 @@ var (
block1CoinbaseAddr, block2CoinbaseAddr, block3CoinbaseAddr common.Address
block1CoinbaseHash, block2CoinbaseHash, block3CoinbaseHash common.Hash
builder statediff.Builder
emptyStorage = make([]sdtypes.StorageNode, 0)
emptyStorage = make([]sdtypes.StorageLeafNode, 0)
// block 1 data
block1CoinbaseAccount, _ = rlp.EncodeToBytes(&types.StateAccount{
block1CoinbaseAccount = &types.StateAccount{
Nonce: 0,
Balance: big.NewInt(5000000000000000000),
CodeHash: test_helpers.NullCodeHash.Bytes(),
Root: test_helpers.EmptyContractRoot,
})
}
block1CoinbaseAccountRLP, _ = rlp.EncodeToBytes(block1CoinbaseAccount)
block1CoinbaseLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
common.Hex2Bytes("38251692195afc818c92b485fcb8a4691af89cbe5a2ab557b83a4261be2a9a"),
block1CoinbaseAccount,
block1CoinbaseAccountRLP,
})
block1CoinbaseLeafNodeHash = crypto.Keccak256(block1CoinbaseLeafNode)
block1x040bBranchNode, _ = rlp.EncodeToBytes(&[]interface{}{
@ -122,27 +126,29 @@ var (
})
// block 2 data
block2CoinbaseAccount, _ = rlp.EncodeToBytes(&types.StateAccount{
block2CoinbaseAccount = &types.StateAccount{
Nonce: 0,
Balance: big.NewInt(5000000000000000000),
CodeHash: test_helpers.NullCodeHash.Bytes(),
Root: test_helpers.EmptyContractRoot,
})
}
block2CoinbaseAccountRLP, _ = rlp.EncodeToBytes(block2CoinbaseAccount)
block2CoinbaseLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
common.Hex2Bytes("20679cbcf198c1741a6f4e4473845659a30caa8b26f8d37a0be2e2bc0d8892"),
block2CoinbaseAccount,
block2CoinbaseAccountRLP,
})
block2CoinbaseLeafNodeHash = crypto.Keccak256(block2CoinbaseLeafNode)
block2MovedPremineBalance, _ = new(big.Int).SetString("4000000000000000000000", 10)
block2MovedPremineAccount, _ = rlp.EncodeToBytes(&types.StateAccount{
block2MovedPremineAccount = &types.StateAccount{
Nonce: 0,
Balance: block2MovedPremineBalance,
CodeHash: test_helpers.NullCodeHash.Bytes(),
Root: test_helpers.EmptyContractRoot,
})
}
block2MovedPremineAccountRLP, _ = rlp.EncodeToBytes(block2MovedPremineAccount)
block2MovedPremineLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
common.Hex2Bytes("20f2e24db7943eab4415f99e109698863b0fecca1cf9ffc500f38cefbbe29e"),
block2MovedPremineAccount,
block2MovedPremineAccountRLP,
})
block2MovedPremineLeafNodeHash = crypto.Keccak256(block2MovedPremineLeafNode)
block2x00080dBranchNode, _ = rlp.EncodeToBytes(&[]interface{}{
@ -228,41 +234,44 @@ var (
// block3 data
// path 060e0f
blcok3CoinbaseBalance, _ = new(big.Int).SetString("5156250000000000000", 10)
block3CoinbaseAccount, _ = rlp.EncodeToBytes(&types.StateAccount{
block3CoinbaseAccount = &types.StateAccount{
Nonce: 0,
Balance: blcok3CoinbaseBalance,
CodeHash: test_helpers.NullCodeHash.Bytes(),
Root: test_helpers.EmptyContractRoot,
})
}
block3CoinbaseAccountRLP, _ = rlp.EncodeToBytes(block3CoinbaseAccount)
block3CoinbaseLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
common.Hex2Bytes("3a174f00e64521a535f35e67c1aa241951c791639b2f3d060f49c5d9fa8b9e"),
block3CoinbaseAccount,
block3CoinbaseAccountRLP,
})
block3CoinbaseLeafNodeHash = crypto.Keccak256(block3CoinbaseLeafNode)
// path 0c0e050703
block3MovedPremineBalance1, _ = new(big.Int).SetString("3750000000000000000", 10)
block3MovedPremineAccount1, _ = rlp.EncodeToBytes(&types.StateAccount{
block3MovedPremineAccount1 = &types.StateAccount{
Nonce: 0,
Balance: block3MovedPremineBalance1,
CodeHash: test_helpers.NullCodeHash.Bytes(),
Root: test_helpers.EmptyContractRoot,
})
}
block3MovedPremineAccount1RLP, _ = rlp.EncodeToBytes(block3MovedPremineAccount1)
block3MovedPremineLeafNode1, _ = rlp.EncodeToBytes(&[]interface{}{
common.Hex2Bytes("3ced93917e658d10e2d9009470dad72b63c898d173721194a12f2ae5e190"), // ce573ced93917e658d10e2d9009470dad72b63c898d173721194a12f2ae5e190
block3MovedPremineAccount1,
block3MovedPremineAccount1RLP,
})
block3MovedPremineLeafNodeHash1 = crypto.Keccak256(block3MovedPremineLeafNode1)
// path 0c0e050708
block3MovedPremineBalance2, _ = new(big.Int).SetString("1999944000000000000000", 10)
block3MovedPremineAccount2, _ = rlp.EncodeToBytes(&types.StateAccount{
block3MovedPremineAccount2 = &types.StateAccount{
Nonce: 0,
Balance: block3MovedPremineBalance2,
CodeHash: test_helpers.NullCodeHash.Bytes(),
Root: test_helpers.EmptyContractRoot,
})
}
block3MovedPremineAccount2RLP, _ = rlp.EncodeToBytes(block3MovedPremineAccount2)
block3MovedPremineLeafNode2, _ = rlp.EncodeToBytes(&[]interface{}{
common.Hex2Bytes("33bc1e69eedf90f402e11f6862da14ed8e50156635a04d6393bbae154012"), // ce5783bc1e69eedf90f402e11f6862da14ed8e50156635a04d6393bbae154012
block3MovedPremineAccount2,
block3MovedPremineAccount2RLP,
})
block3MovedPremineLeafNodeHash2 = crypto.Keccak256(block3MovedPremineLeafNode2)
@ -443,7 +452,7 @@ func init() {
log.Fatal(err)
}
block2CoinbaseAddr = block2.Coinbase()
block2CoinbaseHash = crypto.Keccak256Hash(block2CoinbaseAddr.Bytes())
block2CoinbaseHash = crypto.Keccak256Hash(block2CoinbaseAddr.Bytes()) // 0x08d4679cbcf198c1741a6f4e4473845659a30caa8b26f8d37a0be2e2bc0d8892
block3, _, err = loadBlockFromRLPFile("./block3_rlp")
if err != nil {
log.Fatal(err)
@ -472,9 +481,7 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
if err != nil {
t.Error(err)
}
params := statediff.Params{
IntermediateStateNodes: true,
}
params := statediff.Params{}
builder = statediff.NewBuilder(chain.StateCache())
var tests = []struct {
@ -496,31 +503,33 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
&sdtypes.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
Nodes: []sdtypes.StateNode{
Nodes: []sdtypes.StateLeafNode{
{
Path: []byte{},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block1RootBranchNode,
},
{
Path: []byte{'\x04'},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block1x04BranchNode,
},
{
Path: []byte{'\x04', '\x0b'},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block1x040bBranchNode,
},
{
Path: []byte{'\x04', '\x0b', '\x0e'},
NodeType: sdtypes.Leaf,
Removed: false,
AccountWrapper: sdtypes.AccountWrapper{
Account: block1CoinbaseAccount,
LeafKey: block1CoinbaseHash.Bytes(),
NodeValue: block1CoinbaseLeafNode,
StorageNodes: emptyStorage,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block1CoinbaseLeafNode)).String(),
},
StorageDiff: emptyStorage,
},
},
IPLDs: []sdtypes.IPLD{
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block1RootBranchNode)).String(),
Content: block1RootBranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block1x04BranchNode)).String(),
Content: block1x04BranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block1x040bBranchNode)).String(),
Content: block1x040bBranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block1CoinbaseLeafNode)).String(),
Content: block1CoinbaseLeafNode,
},
},
},
@ -539,47 +548,41 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
&sdtypes.StateObject{
BlockNumber: block2.Number(),
BlockHash: block2.Hash(),
Nodes: []sdtypes.StateNode{
Nodes: []sdtypes.StateLeafNode{
{
Path: []byte{},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block2RootBranchNode,
},
{
Path: []byte{'\x00'},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block2x00BranchNode,
},
{
Path: []byte{'\x00', '\x08'},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block2x0008BranchNode,
},
{
Path: []byte{'\x00', '\x08', '\x0d'},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block2x00080dBranchNode,
},
// this new leaf at x00 x08 x0d x00 was "created" when a premine account (leaf) was moved from path x00 x08 x0d
// this occurred because of the creation of the new coinbase receiving account (leaf) at x00 x08 x0d x04
// which necessitates we create a branch at x00 x08 x0d (as shown in the below UpdateAccounts)
{
Path: []byte{'\x00', '\x08', '\x0d', '\x00'},
NodeType: sdtypes.Leaf,
StorageNodes: emptyStorage,
LeafKey: common.HexToHash("08d0f2e24db7943eab4415f99e109698863b0fecca1cf9ffc500f38cefbbe29e").Bytes(),
NodeValue: block2MovedPremineLeafNode,
},
{
Path: []byte{'\x00', '\x08', '\x0d', '\x04'},
NodeType: sdtypes.Leaf,
StorageNodes: emptyStorage,
Removed: false,
AccountWrapper: sdtypes.AccountWrapper{
Account: block2CoinbaseAccount,
LeafKey: block2CoinbaseHash.Bytes(),
NodeValue: block2CoinbaseLeafNode,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2CoinbaseLeafNode)).String(),
},
StorageDiff: emptyStorage,
},
},
IPLDs: []sdtypes.IPLD{
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2RootBranchNode)).String(),
Content: block2RootBranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2x00BranchNode)).String(),
Content: block2x00BranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2x0008BranchNode)).String(),
Content: block2x0008BranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2x00080dBranchNode)).String(),
Content: block2x00080dBranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2MovedPremineLeafNode)).String(),
Content: block2MovedPremineLeafNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2CoinbaseLeafNode)).String(),
Content: block2CoinbaseLeafNode,
},
},
},
@ -597,69 +600,66 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
&sdtypes.StateObject{
BlockNumber: block3.Number(),
BlockHash: block3.Hash(),
Nodes: []sdtypes.StateNode{
{
Path: []byte{},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block3RootBranchNode,
},
{
Path: []byte{'\x06'},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block3x06BranchNode,
},
{
Path: []byte{'\x06', '\x0e'},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block3x060eBranchNode,
},
{
Path: []byte{'\x0c'},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block3x0cBranchNode,
},
{
Path: []byte{'\x0c', '\x0e'},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block3x0c0eBranchNode,
},
{
Path: []byte{'\x0c', '\x0e', '\x05'},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block3x0c0e05BranchNode,
},
{
Path: []byte{'\x0c', '\x0e', '\x05', '\x07'},
NodeType: sdtypes.Branch,
StorageNodes: emptyStorage,
NodeValue: block3x0c0e0507BranchNode,
},
Nodes: []sdtypes.StateLeafNode{
{ // How was this account created???
Path: []byte{'\x0c', '\x0e', '\x05', '\x07', '\x03'},
NodeType: sdtypes.Leaf,
StorageNodes: emptyStorage,
Removed: false,
AccountWrapper: sdtypes.AccountWrapper{
Account: block3MovedPremineAccount1,
LeafKey: common.HexToHash("ce573ced93917e658d10e2d9009470dad72b63c898d173721194a12f2ae5e190").Bytes(),
NodeValue: block3MovedPremineLeafNode1,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3MovedPremineLeafNode1)).String(),
},
{ // This account (leaf) used to be at 0c 0e 05 07, likely moves because of the new account above
Path: []byte{'\x0c', '\x0e', '\x05', '\x07', '\x08'},
NodeType: sdtypes.Leaf,
StorageNodes: emptyStorage,
LeafKey: common.HexToHash("ce5783bc1e69eedf90f402e11f6862da14ed8e50156635a04d6393bbae154012").Bytes(),
NodeValue: block3MovedPremineLeafNode2,
StorageDiff: emptyStorage,
},
{ // this is the new account created due to the coinbase mining a block, it's creation shouldn't affect 0x 0e 05 07
Path: []byte{'\x06', '\x0e', '\x0f'},
NodeType: sdtypes.Leaf,
StorageNodes: emptyStorage,
Removed: false,
AccountWrapper: sdtypes.AccountWrapper{
Account: block3CoinbaseAccount,
LeafKey: block3CoinbaseHash.Bytes(),
NodeValue: block3CoinbaseLeafNode,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3CoinbaseLeafNode)).String(),
},
StorageDiff: emptyStorage,
},
},
IPLDs: []sdtypes.IPLD{
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3RootBranchNode)).String(),
Content: block3RootBranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3x06BranchNode)).String(),
Content: block3x06BranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3x060eBranchNode)).String(),
Content: block3x060eBranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3x0cBranchNode)).String(),
Content: block3x0cBranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3x0c0eBranchNode)).String(),
Content: block3x0c0eBranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3x0c0e05BranchNode)).String(),
Content: block3x0c0e05BranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3x0c0e0507BranchNode)).String(),
Content: block3x0c0e0507BranchNode,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3MovedPremineLeafNode1)).String(),
Content: block3MovedPremineLeafNode1,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3MovedPremineLeafNode2)).String(),
Content: block3MovedPremineLeafNode2,
},
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3CoinbaseLeafNode)).String(),
Content: block3CoinbaseLeafNode,
},
},
},
@ -682,8 +682,25 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
sort.Slice(receivedStateDiffRlp, func(i, j int) bool { return receivedStateDiffRlp[i] < receivedStateDiffRlp[j] })
sort.Slice(expectedStateDiffRlp, func(i, j int) bool { return expectedStateDiffRlp[i] < expectedStateDiffRlp[j] })
if !bytes.Equal(receivedStateDiffRlp, expectedStateDiffRlp) {
t.Logf("Test failed: %s", test.name)
t.Errorf("actual state diff: %+v\nexpected state diff: %+v", diff, test.expected)
actual, err := json.Marshal(diff)
if err != nil {
t.Error(err)
}
expected, err := json.Marshal(test.expected)
if err != nil {
t.Error(err)
}
t.Logf("Test failed: %s", test.name)
t.Errorf("actual state diff: %s\r\n\r\n\r\nexpected state diff: %s", actual, expected)
}
}
if !bytes.Equal(crypto.Keccak256(block1RootBranchNode), block1.Root().Bytes()) {
t.Errorf("actual state root: %s\r\nexpected state root: %s", crypto.Keccak256(block1RootBranchNode), block1.Root().Bytes())
}
if !bytes.Equal(crypto.Keccak256(block2RootBranchNode), block2.Root().Bytes()) {
t.Errorf("actual state root: %s\r\nexpected state root: %s", crypto.Keccak256(block2RootBranchNode), block2.Root().Bytes())
}
if !bytes.Equal(crypto.Keccak256(block3RootBranchNode), block3.Root().Bytes()) {
t.Errorf("actual state root: %s\r\nexpected state root: %s", crypto.Keccak256(block3RootBranchNode), block3.Root().Bytes())
}
}

View File

@ -26,6 +26,8 @@ import (
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
@ -44,7 +46,6 @@ import (
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
types2 "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/trie"
"github.com/thoas/go-funk"
)
@ -59,8 +60,6 @@ const (
var writeLoopParams = ParamsWithMutex{
Params: Params{
IntermediateStateNodes: true,
IntermediateStorageNodes: true,
IncludeBlock: true,
IncludeReceipts: true,
IncludeTD: true,
@ -95,18 +94,16 @@ type IService interface {
StateDiffAt(blockNumber uint64, params Params) (*Payload, error)
// StateDiffFor method to get state diff object at specific block
StateDiffFor(blockHash common.Hash, params Params) (*Payload, error)
// StateTrieAt method to get state trie object at specific block
StateTrieAt(blockNumber uint64, params Params) (*Payload, error)
// StreamCodeAndCodeHash method to stream out all code and codehash pairs
StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- types2.CodeAndCodeHash, quitChan chan<- bool)
// WriteStateDiffAt method to write state diff object directly to DB
WriteStateDiffAt(blockNumber uint64, params Params) JobID
// WriteStateDiffFor method to write state diff object directly to DB
WriteStateDiffFor(blockHash common.Hash, params Params) error
// WriteLoop event loop for progressively processing and writing diffs directly to DB
WriteLoop(chainEventCh chan core.ChainEvent)
// Method to change the addresses being watched in write loop params
// WatchAddress method to change the addresses being watched in write loop params
WatchAddress(operation types2.OperationType, args []types2.WatchAddressArg) error
// StreamCodeAndCodeHash method to export all the codehash => code mappings at a block height
StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- types2.CodeAndCodeHash, quitChan chan<- bool)
// SubscribeWriteStatus method to subscribe to receive state diff processing output
SubscribeWriteStatus(id rpc.ID, sub chan<- JobStatus, quitChan chan<- bool)
@ -547,31 +544,6 @@ func (sds *Service) newPayload(stateObject []byte, block *types.Block, params Pa
return payload, nil
}
// StateTrieAt returns a state trie object payload at the specified blockheight
// This operation cannot be performed back past the point of db pruning; it requires an archival node for historical data
func (sds *Service) StateTrieAt(blockNumber uint64, params Params) (*Payload, error) {
currentBlock := sds.BlockChain.GetBlockByNumber(blockNumber)
log.Info("sending state trie", "block height", blockNumber)
// compute leaf paths of watched addresses in the params
params.ComputeWatchedAddressesLeafPaths()
return sds.processStateTrie(currentBlock, params)
}
func (sds *Service) processStateTrie(block *types.Block, params Params) (*Payload, error) {
stateNodes, err := sds.Builder.BuildStateTrieObject(block)
if err != nil {
return nil, err
}
stateTrieRlp, err := rlp.EncodeToBytes(&stateNodes)
if err != nil {
return nil, err
}
log.Info("state trie size", "at block height", block.Number().Uint64(), "rlp byte size", len(stateTrieRlp))
return sds.newPayload(stateTrieRlp, block, params)
}
// Subscribe is used by the API to subscribe to the service loop
func (sds *Service) Subscribe(id rpc.ID, sub chan<- Payload, quitChan chan<- bool, params Params) {
log.Info("Subscribing to the statediff service")
@ -732,45 +704,6 @@ func sendNonBlockingQuit(id rpc.ID, sub Subscription) {
}
}
// StreamCodeAndCodeHash subscription method for extracting all the codehash=>code mappings that exist in the trie at the provided height
func (sds *Service) StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- types2.CodeAndCodeHash, quitChan chan<- bool) {
current := sds.BlockChain.GetBlockByNumber(blockNumber)
log.Info("sending code and codehash", "block height", blockNumber)
currentTrie, err := sds.BlockChain.StateCache().OpenTrie(current.Root())
if err != nil {
log.Error("error creating trie for block", "block height", current.Number(), "err", err)
close(quitChan)
return
}
it := currentTrie.NodeIterator([]byte{})
leafIt := trie.NewIterator(it)
go func() {
defer close(quitChan)
for leafIt.Next() {
select {
case <-sds.QuitChan:
return
default:
}
account := new(types.StateAccount)
if err := rlp.DecodeBytes(leafIt.Value, account); err != nil {
log.Error("error decoding state account", "err", err)
return
}
codeHash := common.BytesToHash(account.CodeHash)
code, err := sds.BlockChain.StateCache().ContractCode(common.Hash{}, codeHash)
if err != nil {
log.Error("error collecting contract code", "err", err)
return
}
outChan <- types2.CodeAndCodeHash{
Hash: codeHash,
Code: code,
}
}
}()
}
// WriteStateDiffAt writes a state diff at the specific blockheight directly to the database
// This operation cannot be performed back past the point of db pruning; it requires an archival node
// for historical data
@ -860,17 +793,17 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p
return err
}
output := func(node types2.StateNode) error {
output := func(node types2.StateLeafNode) error {
return sds.indexer.PushStateNode(tx, node, block.Hash().String())
}
codeOutput := func(c types2.CodeAndCodeHash) error {
return sds.indexer.PushCodeAndCodeHash(tx, c)
ipldOutput := func(c types2.IPLD) error {
return sds.indexer.PushIPLD(tx, c)
}
err = sds.Builder.WriteStateDiffObject(types2.StateRoots{
NewStateRoot: block.Root(),
OldStateRoot: parentRoot,
}, params, output, codeOutput)
}, params, output, ipldOutput)
// TODO this anti-pattern needs to be sorted out eventually
if err := tx.Submit(err); err != nil {
return fmt.Errorf("batch transaction submission failed: %s", err.Error())
@ -925,6 +858,45 @@ func (sds *Service) UnsubscribeWriteStatus(id rpc.ID) error {
return nil
}
// StreamCodeAndCodeHash subscription method for extracting all the codehash=>code mappings that exist in the trie at the provided height
func (sds *Service) StreamCodeAndCodeHash(blockNumber uint64, outChan chan<- types2.CodeAndCodeHash, quitChan chan<- bool) {
current := sds.BlockChain.GetBlockByNumber(blockNumber)
log.Info("sending code and codehash", "block height", blockNumber)
currentTrie, err := sds.BlockChain.StateCache().OpenTrie(current.Root())
if err != nil {
log.Error("error creating trie for block", "block height", current.Number(), "err", err)
close(quitChan)
return
}
it := currentTrie.NodeIterator([]byte{})
leafIt := trie.NewIterator(it)
go func() {
defer close(quitChan)
for leafIt.Next() {
select {
case <-sds.QuitChan:
return
default:
}
account := new(types.StateAccount)
if err := rlp.DecodeBytes(leafIt.Value, account); err != nil {
log.Error("error decoding state account", "err", err)
return
}
codeHash := common.BytesToHash(account.CodeHash)
code, err := sds.BlockChain.StateCache().ContractCode(common.Hash{}, codeHash)
if err != nil {
log.Error("error collecting contract code", "err", err)
return
}
outChan <- types2.CodeAndCodeHash{
Hash: codeHash,
Code: code,
}
}
}()
}
// WatchAddress performs one of following operations on the watched addresses in writeLoopParams and the db:
// add | remove | set | clear
func (sds *Service) WatchAddress(operation types2.OperationType, args []types2.WatchAddressArg) error {

View File

@ -16,6 +16,7 @@
package statediff_test
/*
import (
"bytes"
"math/big"
@ -437,3 +438,4 @@ func testGetSyncStatus(t *testing.T) {
}
}
}
*/

View File

@ -50,15 +50,15 @@ func TestSelfDestructChainGen(i int, block *core.BlockGen) {
signer := types.HomesteadSigner{}
switch i {
case 0:
// Block 1 is mined by Account1Addr
// Account1Addr creates a new contract
// Block 1 is mined by TestBankAddress
// TestBankAddress creates a new contract
block.SetCoinbase(TestBankAddress)
tx, _ := types.SignTx(types.NewContractCreation(0, big.NewInt(0), 1000000, big.NewInt(params.GWei), ContractCode), signer, TestBankKey)
ContractAddr = crypto.CreateAddress(TestBankAddress, 0)
block.AddTx(tx)
case 1:
// Block 2 is mined by Account1Addr
// Account1Addr self-destructs the contract
// Block 2 is mined by TestBankAddress
// TestBankAddress self-destructs the contract
block.SetCoinbase(TestBankAddress)
data := common.Hex2Bytes("43D726D6")
tx, _ := types.SignTx(types.NewTransaction(1, ContractAddr, big.NewInt(0), 100000, big.NewInt(params.GWei), data), signer, TestBankKey)

View File

@ -22,6 +22,8 @@ import (
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
)
var _ statediff.Builder = &Builder{}
// Builder is a mock state diff builder
type Builder struct {
Args statediff.Args
@ -42,7 +44,7 @@ func (builder *Builder) BuildStateDiffObject(args statediff.Args, params statedi
}
// BuildStateDiffObject mock method
func (builder *Builder) WriteStateDiffObject(args sdtypes.StateRoots, params statediff.Params, output sdtypes.StateNodeSink, codeOutput sdtypes.CodeSink) error {
func (builder *Builder) WriteStateDiffObject(args sdtypes.StateRoots, params statediff.Params, output sdtypes.StateNodeSink, iplds sdtypes.IPLDSink) error {
builder.StateRoots = args
builder.Params = params

View File

@ -35,11 +35,11 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
return nil, nil
}
func (sdi *StateDiffIndexer) PushStateNode(tx interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
func (sdi *StateDiffIndexer) PushStateNode(tx interfaces.Batch, stateNode sdtypes.StateLeafNode, headerID string) error {
return nil
}
func (sdi *StateDiffIndexer) PushCodeAndCodeHash(tx interfaces.Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error {
func (sdi *StateDiffIndexer) PushIPLD(tx interfaces.Batch, iplds sdtypes.IPLD) error {
return nil
}

View File

@ -42,6 +42,8 @@ var (
unexpectedOperation = "unexpected operation"
)
var _ statediff.IService = &MockStateDiffService{}
// MockStateDiffService is a mock state diff service
type MockStateDiffService struct {
sync.Mutex
@ -225,25 +227,6 @@ func (sds *MockStateDiffService) WriteLoop(chan core.ChainEvent) {
}
}
// StateTrieAt mock method
func (sds *MockStateDiffService) StateTrieAt(blockNumber uint64, params statediff.Params) (*statediff.Payload, error) {
currentBlock := sds.BlockChain.GetBlockByNumber(blockNumber)
log.Info(fmt.Sprintf("sending state trie at %d", blockNumber))
return sds.stateTrieAt(currentBlock, params)
}
func (sds *MockStateDiffService) stateTrieAt(block *types.Block, params statediff.Params) (*statediff.Payload, error) {
stateNodes, err := sds.Builder.BuildStateTrieObject(block)
if err != nil {
return nil, err
}
stateTrieRlp, err := rlp.EncodeToBytes(&stateNodes)
if err != nil {
return nil, err
}
return sds.newPayload(stateTrieRlp, block, params)
}
// Subscribe is used by the API to subscribe to the service loop
func (sds *MockStateDiffService) Subscribe(id rpc.ID, sub chan<- statediff.Payload, quitChan chan<- bool, params statediff.Params) {
// Subscription type is defined as the hash of the rlp-serialized subscription params

View File

@ -29,54 +29,77 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/test_helpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
)
var (
emptyStorage = make([]sdtypes.StorageNode, 0)
emptyStorage = make([]sdtypes.StorageLeafNode, 0)
block0, block1 *types.Block
minerLeafKey = test_helpers.AddressToLeafKey(common.HexToAddress("0x0"))
account1, _ = rlp.EncodeToBytes(&types.StateAccount{
account1 = &types.StateAccount{
Nonce: uint64(0),
Balance: big.NewInt(10000),
CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(),
Root: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
})
}
account1RLP, _ = rlp.EncodeToBytes(account1)
account1LeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
common.Hex2Bytes("3926db69aaced518e9b9f0f434a473e7174109c943548bb8f23be41ca76d9ad2"),
account1,
account1RLP,
})
minerAccount, _ = rlp.EncodeToBytes(&types.StateAccount{
minerAccount = &types.StateAccount{
Nonce: uint64(0),
Balance: big.NewInt(2000002625000000000),
CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(),
Root: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
})
}
minerAccountRLP, _ = rlp.EncodeToBytes(minerAccount)
minerAccountLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
common.Hex2Bytes("3380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a"),
minerAccount,
minerAccountRLP,
})
bankAccount, _ = rlp.EncodeToBytes(&types.StateAccount{
bankAccount = &types.StateAccount{
Nonce: uint64(1),
Balance: big.NewInt(1999978999999990000),
CodeHash: common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes(),
Root: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
})
}
bankAccountRLP, _ = rlp.EncodeToBytes(bankAccount)
bankAccountLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
common.Hex2Bytes("30bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a"),
bankAccount,
bankAccountRLP,
})
mockTotalDifficulty = big.NewInt(1337)
parameters = statediff.Params{
IntermediateStateNodes: false,
IncludeTD: true,
IncludeBlock: true,
IncludeReceipts: true,
}
block1BranchRootNode, _ = rlp.EncodeToBytes(&[]interface{}{
crypto.Keccak256(bankAccountLeafNode),
[]byte{},
[]byte{},
[]byte{},
[]byte{},
crypto.Keccak256(minerAccountLeafNode),
[]byte{},
[]byte{},
[]byte{},
[]byte{},
[]byte{},
[]byte{},
[]byte{},
[]byte{},
crypto.Keccak256(account1LeafNode),
[]byte{},
[]byte{},
})
)
func init() {
@ -106,27 +129,51 @@ func testSubscriptionAPI(t *testing.T) {
expectedStateDiff := sdtypes.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
Nodes: []sdtypes.StateNode{
Nodes: []sdtypes.StateLeafNode{
{
Path: []byte{'\x05'},
NodeType: sdtypes.Leaf,
Removed: false,
AccountWrapper: sdtypes.AccountWrapper{
Account: minerAccount,
LeafKey: minerLeafKey,
NodeValue: minerAccountLeafNode,
StorageNodes: emptyStorage,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(minerAccountLeafNode)).String(),
},
StorageDiff: emptyStorage,
},
{
Path: []byte{'\x0e'},
NodeType: sdtypes.Leaf,
Removed: false,
AccountWrapper: sdtypes.AccountWrapper{
Account: account1,
LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1LeafNode,
StorageNodes: emptyStorage,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(account1LeafNode)).String(),
},
StorageDiff: emptyStorage,
},
{
Path: []byte{'\x00'},
NodeType: sdtypes.Leaf,
Removed: false,
AccountWrapper: sdtypes.AccountWrapper{
Account: bankAccount,
LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountLeafNode,
StorageNodes: emptyStorage,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(bankAccountLeafNode)).String(),
},
StorageDiff: emptyStorage,
},
},
IPLDs: []sdtypes.IPLD{
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block1BranchRootNode)).String(),
Content: block1BranchRootNode,
},
{
Content: minerAccountLeafNode,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(minerAccountLeafNode)).String(),
},
{
Content: account1LeafNode,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(account1LeafNode)).String(),
},
{
Content: bankAccountLeafNode,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(bankAccountLeafNode)).String(),
},
},
}
@ -198,27 +245,51 @@ func testHTTPAPI(t *testing.T) {
expectedStateDiff := sdtypes.StateObject{
BlockNumber: block1.Number(),
BlockHash: block1.Hash(),
Nodes: []sdtypes.StateNode{
Nodes: []sdtypes.StateLeafNode{
{
Path: []byte{'\x05'},
NodeType: sdtypes.Leaf,
Removed: false,
AccountWrapper: sdtypes.AccountWrapper{
Account: minerAccount,
LeafKey: minerLeafKey,
NodeValue: minerAccountLeafNode,
StorageNodes: emptyStorage,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(minerAccountLeafNode)).String(),
},
StorageDiff: emptyStorage,
},
{
Path: []byte{'\x0e'},
NodeType: sdtypes.Leaf,
Removed: false,
AccountWrapper: sdtypes.AccountWrapper{
Account: account1,
LeafKey: test_helpers.Account1LeafKey,
NodeValue: account1LeafNode,
StorageNodes: emptyStorage,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(account1LeafNode)).String(),
},
StorageDiff: emptyStorage,
},
{
Path: []byte{'\x00'},
NodeType: sdtypes.Leaf,
Removed: false,
AccountWrapper: sdtypes.AccountWrapper{
Account: bankAccount,
LeafKey: test_helpers.BankLeafKey,
NodeValue: bankAccountLeafNode,
StorageNodes: emptyStorage,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(bankAccountLeafNode)).String(),
},
StorageDiff: emptyStorage,
},
},
IPLDs: []sdtypes.IPLD{
{
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block1BranchRootNode)).String(),
Content: block1BranchRootNode,
},
{
Content: minerAccountLeafNode,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(minerAccountLeafNode)).String(),
},
{
Content: account1LeafNode,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(account1LeafNode)).String(),
},
{
Content: bankAccountLeafNode,
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(bankAccountLeafNode)).String(),
},
},
}

View File

@ -20,60 +20,12 @@
package trie_helpers
import (
"fmt"
"sort"
"strings"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/trie"
)
// CheckKeyType checks what type of key we have
func CheckKeyType(elements []interface{}) (types.NodeType, error) {
if len(elements) > 2 {
return types.Branch, nil
}
if len(elements) < 2 {
return types.Unknown, fmt.Errorf("node cannot be less than two elements in length")
}
switch elements[0].([]byte)[0] / 16 {
case '\x00':
return types.Extension, nil
case '\x01':
return types.Extension, nil
case '\x02':
return types.Leaf, nil
case '\x03':
return types.Leaf, nil
default:
return types.Unknown, fmt.Errorf("unknown hex prefix")
}
}
// ResolveNode return the state diff node pointed by the iterator.
func ResolveNode(it trie.NodeIterator, trieDB *trie.Database) (types.StateNode, []interface{}, error) {
nodePath := make([]byte, len(it.Path()))
copy(nodePath, it.Path())
node, err := trieDB.Node(it.Hash())
if err != nil {
return types.StateNode{}, nil, err
}
var nodeElements []interface{}
if err = rlp.DecodeBytes(node, &nodeElements); err != nil {
return types.StateNode{}, nil, err
}
ty, err := CheckKeyType(nodeElements)
if err != nil {
return types.StateNode{}, nil, err
}
return types.StateNode{
NodeType: ty,
Path: nodePath,
NodeValue: node,
}, nodeElements, nil
}
// SortKeys sorts the keys in the account map
func SortKeys(data types.AccountMap) []string {
keys := make([]string, 0, len(data))

View File

@ -32,75 +32,50 @@ type StateRoots struct {
type StateObject struct {
BlockNumber *big.Int `json:"blockNumber" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Nodes []StateNode `json:"nodes" gencodec:"required"`
CodeAndCodeHashes []CodeAndCodeHash `json:"codeMapping"`
Nodes []StateLeafNode `json:"nodes" gencodec:"required"`
IPLDs []IPLD `json:"iplds"`
}
// AccountMap is a mapping of hex encoded path => account wrapper
type AccountMap map[string]AccountWrapper
// AccountWrapper is used to temporary associate the unpacked node with its raw values
// AccountWrapper is used to temporarily associate the unpacked node with its raw values
type AccountWrapper struct {
Account *types.StateAccount
NodeType NodeType
Path []byte
NodeValue []byte
LeafKey []byte
CID string
}
// NodeType for explicitly setting type of node
type NodeType string
const (
Unknown NodeType = "Unknown"
Branch NodeType = "Branch"
Extension NodeType = "Extension"
Leaf NodeType = "Leaf"
Removed NodeType = "Removed" // used to represent paths which have been emptied
)
func (n NodeType) Int() int {
switch n {
case Branch:
return 0
case Extension:
return 1
case Leaf:
return 2
case Removed:
return 3
default:
return -1
}
// StateLeafNode holds the data for a single state diff leaf node
type StateLeafNode struct {
Removed bool
AccountWrapper AccountWrapper
StorageDiff []StorageLeafNode
}
// StateNode holds the data for a single state diff node
type StateNode struct {
NodeType NodeType `json:"nodeType" gencodec:"required"`
Path []byte `json:"path" gencodec:"required"`
NodeValue []byte `json:"value" gencodec:"required"`
StorageNodes []StorageNode `json:"storage"`
LeafKey []byte `json:"leafKey"`
// StorageLeafNode holds the data for a single storage diff node leaf node
type StorageLeafNode struct {
Removed bool
Value []byte
LeafKey []byte
CID string
}
// StorageNode holds the data for a single storage diff node
type StorageNode struct {
NodeType NodeType `json:"nodeType" gencodec:"required"`
Path []byte `json:"path" gencodec:"required"`
NodeValue []byte `json:"value" gencodec:"required"`
LeafKey []byte `json:"leafKey"`
// IPLD holds a cid:content pair, e.g. for codehash to code mappings or for intermediate node IPLD objects
type IPLD struct {
CID string
Content []byte
}
// CodeAndCodeHash struct for holding codehash => code mappings
// we can't use an actual map because they are not rlp serializable
// CodeAndCodeHash struct to hold codehash => code mappings
type CodeAndCodeHash struct {
Hash common.Hash `json:"codeHash"`
Code []byte `json:"code"`
Hash common.Hash
Code []byte
}
type StateNodeSink func(StateNode) error
type StorageNodeSink func(StorageNode) error
type CodeSink func(CodeAndCodeHash) error
type StateNodeSink func(node StateLeafNode) error
type StorageNodeSink func(node StorageLeafNode) error
type IPLDSink func(IPLD) error
// OperationType for type of WatchAddress operation
type OperationType string