remove btc stuff

This commit is contained in:
Ian Norden 2020-08-31 10:42:01 -05:00
parent 3fd2af61e8
commit 558599dd32
87 changed files with 0 additions and 11269 deletions

View File

@ -1,27 +0,0 @@
dist: trusty
language: go
go:
- 1.12
services:
- postgresql
addons:
ssh_known_hosts: arch1.vdb.to
postgresql: '11.2'
go_import_path: github.com/vulcanize/ipfs-blockchain-watcher
before_install:
- openssl aes-256-cbc -K $encrypted_e1db309e8776_key -iv $encrypted_e1db309e8776_iv
-in temp_rsa.enc -out temp_rsa -d
- eval "$(ssh-agent -s)"
- chmod 600 temp_rsa
- ssh-add temp_rsa
- ssh -4 -fNL 8545:localhost:8545 geth@arch1.vdb.to
- make installtools
- bash ./scripts/install-postgres-11.sh
- curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
- echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
- sudo apt-get update && sudo apt-get install yarn
script:
- env GO111MODULE=on make test
- env GO111MODULE=on make integrationtest
notifications:
email: false

View File

@ -1,110 +0,0 @@
// Copyright © 2020 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/resync"
v "github.com/vulcanize/ipfs-blockchain-watcher/version"
)
// resyncCmd represents the resync command
var resyncCmd = &cobra.Command{
Use: "resync",
Short: "Resync historical data",
Long: `Use this command to fill in sections of missing data in the ipfs-blockchain-watcher database`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
rsyncCmdCommand()
},
}
func rsyncCmdCommand() {
logWithCommand.Infof("running ipfs-blockchain-watcher version: %s", v.VersionWithMeta)
logWithCommand.Debug("loading resync configuration variables")
rConfig, err := resync.NewConfig()
if err != nil {
logWithCommand.Fatal(err)
}
logWithCommand.Infof("resync config: %+v", rConfig)
logWithCommand.Debug("initializing new resync service")
rService, err := resync.NewResyncService(rConfig)
if err != nil {
logWithCommand.Fatal(err)
}
logWithCommand.Info("starting up resync process")
if err := rService.Resync(); err != nil {
logWithCommand.Fatal(err)
}
logWithCommand.Infof("%s %s resync finished", rConfig.Chain.String(), rConfig.ResyncType.String())
}
func init() {
rootCmd.AddCommand(resyncCmd)
// flags
resyncCmd.PersistentFlags().String("resync-chain", "", "which chain to support, options are currently Ethereum or Bitcoin.")
resyncCmd.PersistentFlags().String("resync-type", "", "which type of data to resync")
resyncCmd.PersistentFlags().Int("resync-start", 0, "block height to start resync")
resyncCmd.PersistentFlags().Int("resync-stop", 0, "block height to stop resync")
resyncCmd.PersistentFlags().Int("resync-batch-size", 0, "data fetching batch size")
resyncCmd.PersistentFlags().Int("resync-batch-number", 0, "how many goroutines to fetch data concurrently")
resyncCmd.PersistentFlags().Bool("resync-clear-old-cache", false, "if true, clear out old data of the provided type within the resync range before resyncing")
resyncCmd.PersistentFlags().Bool("resync-reset-validation", false, "if true, reset times_validated to 0")
resyncCmd.PersistentFlags().Int("resync-timeout", 15, "timeout used for resync http requests")
resyncCmd.PersistentFlags().String("btc-http-path", "", "http url for bitcoin node")
resyncCmd.PersistentFlags().String("btc-password", "", "password for btc node")
resyncCmd.PersistentFlags().String("btc-username", "", "username for btc node")
resyncCmd.PersistentFlags().String("btc-node-id", "", "btc node id")
resyncCmd.PersistentFlags().String("btc-client-name", "", "btc client name")
resyncCmd.PersistentFlags().String("btc-genesis-block", "", "btc genesis block hash")
resyncCmd.PersistentFlags().String("btc-network-id", "", "btc network id")
resyncCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node")
resyncCmd.PersistentFlags().String("eth-node-id", "", "eth node id")
resyncCmd.PersistentFlags().String("eth-client-name", "", "eth client name")
resyncCmd.PersistentFlags().String("eth-genesis-block", "", "eth genesis block hash")
resyncCmd.PersistentFlags().String("eth-network-id", "", "eth network id")
// and their bindings
viper.BindPFlag("resync.chain", resyncCmd.PersistentFlags().Lookup("resync-chain"))
viper.BindPFlag("resync.type", resyncCmd.PersistentFlags().Lookup("resync-type"))
viper.BindPFlag("resync.start", resyncCmd.PersistentFlags().Lookup("resync-start"))
viper.BindPFlag("resync.stop", resyncCmd.PersistentFlags().Lookup("resync-stop"))
viper.BindPFlag("resync.batchSize", resyncCmd.PersistentFlags().Lookup("resync-batch-size"))
viper.BindPFlag("resync.batchNumber", resyncCmd.PersistentFlags().Lookup("resync-batch-number"))
viper.BindPFlag("resync.clearOldCache", resyncCmd.PersistentFlags().Lookup("resync-clear-old-cache"))
viper.BindPFlag("resync.resetValidation", resyncCmd.PersistentFlags().Lookup("resync-reset-validation"))
viper.BindPFlag("resync.timeout", resyncCmd.PersistentFlags().Lookup("resync-timeout"))
viper.BindPFlag("bitcoin.httpPath", resyncCmd.PersistentFlags().Lookup("btc-http-path"))
viper.BindPFlag("bitcoin.pass", resyncCmd.PersistentFlags().Lookup("btc-password"))
viper.BindPFlag("bitcoin.user", resyncCmd.PersistentFlags().Lookup("btc-username"))
viper.BindPFlag("bitcoin.nodeID", resyncCmd.PersistentFlags().Lookup("btc-node-id"))
viper.BindPFlag("bitcoin.clientName", resyncCmd.PersistentFlags().Lookup("btc-client-name"))
viper.BindPFlag("bitcoin.genesisBlock", resyncCmd.PersistentFlags().Lookup("btc-genesis-block"))
viper.BindPFlag("bitcoin.networkID", resyncCmd.PersistentFlags().Lookup("btc-network-id"))
viper.BindPFlag("ethereum.httpPath", resyncCmd.PersistentFlags().Lookup("eth-http-path"))
viper.BindPFlag("ethereum.nodeID", resyncCmd.PersistentFlags().Lookup("eth-node-id"))
viper.BindPFlag("ethereum.clientName", resyncCmd.PersistentFlags().Lookup("eth-client-name"))
viper.BindPFlag("ethereum.genesisBlock", resyncCmd.PersistentFlags().Lookup("eth-genesis-block"))
viper.BindPFlag("ethereum.networkID", resyncCmd.PersistentFlags().Lookup("eth-network-id"))
}

View File

@ -1,5 +0,0 @@
-- +goose Up
CREATE SCHEMA btc;
-- +goose Down
DROP SCHEMA btc;

View File

@ -1,17 +0,0 @@
-- +goose Up
CREATE TABLE btc.header_cids (
id SERIAL PRIMARY KEY,
block_number BIGINT NOT NULL,
block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
timestamp NUMERIC NOT NULL,
bits BIGINT NOT NULL,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
times_validated INTEGER NOT NULL DEFAULT 1,
UNIQUE (block_number, block_hash)
);
-- +goose Down
DROP TABLE btc.header_cids;

View File

@ -1,14 +0,0 @@
-- +goose Up
CREATE TABLE btc.transaction_cids (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES btc.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
index INTEGER NOT NULL,
tx_hash VARCHAR(66) NOT NULL UNIQUE,
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
segwit BOOL NOT NULL,
witness_hash VARCHAR(66)
);
-- +goose Down
DROP TABLE btc.transaction_cids;

View File

@ -1,15 +0,0 @@
-- +goose Up
CREATE TABLE btc.tx_outputs (
id SERIAL PRIMARY KEY,
tx_id INTEGER NOT NULL REFERENCES btc.transaction_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
index INTEGER NOT NULL,
value BIGINT NOT NULL,
pk_script BYTEA NOT NULL,
script_class INTEGER NOT NULL,
addresses VARCHAR(66)[],
required_sigs INTEGER NOT NULL,
UNIQUE (tx_id, index)
);
-- +goose Down
DROP TABLE btc.tx_outputs;

View File

@ -1,14 +0,0 @@
-- +goose Up
CREATE TABLE btc.tx_inputs (
id SERIAL PRIMARY KEY,
tx_id INTEGER NOT NULL REFERENCES btc.transaction_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
index INTEGER NOT NULL,
witness VARCHAR[],
sig_script BYTEA NOT NULL,
outpoint_tx_hash VARCHAR(66) NOT NULL,
outpoint_index NUMERIC NOT NULL,
UNIQUE (tx_id, index)
);
-- +goose Down
DROP TABLE btc.tx_inputs;

View File

@ -1,132 +0,0 @@
# ipfs-blockchain-watcher architecture
1. [Processes](#processes)
1. [Command](#command)
1. [Configuration](#config)
1. [Database](#database)
1. [APIs](#apis)
1. [Resync](#resync)
1. [IPFS Considerations](#ipfs-considerations)
## Processes
ipfs-blockchain-watcher is a [service](../pkg/watch/service.go#L61) comprised of the following interfaces:
* [Payload Fetcher](../pkg/shared/interfaces.go#L29): Fetches raw chain data from a half-duplex endpoint (HTTP/IPC), used for historical data fetching. ([BTC](../pkg/btc/payload_fetcher.go), [ETH](../pkg/eth/payload_fetcher.go)).
* [Payload Streamer](../pkg/shared/interfaces.go#L24): Streams raw chain data from a full-duplex endpoint (WebSocket/IPC), used for syncing data at the head of the chain in real-time. ([BTC](../pkg/btc/http_streamer.go), [ETH](../pkg/eth/streamer.go)).
* [Payload Converter](../pkg/shared/interfaces.go#L34): Converters raw chain data to an intermediary form prepared for IPFS publishing. ([BTC](../pkg/btc/converter.go), [ETH](../pkg/eth/converter.go)).
* [IPLD Publisher](../pkg/shared/interfaces.go#L39): Publishes the converted data to IPFS, returning their CIDs and associated metadata for indexing. ([BTC](../pkg/btc/publisher.go), [ETH](../pkg/eth/publisher.go)).
* [CID Indexer](../pkg/shared/interfaces.go#L44): Indexes CIDs in Postgres with their associated metadata. This metadata is chain specific and selected based on utility. ([BTC](../pkg/btc/indexer.go), [ETH](../pkg/eth/indexer.go)).
* [CID Retriever](../pkg/shared/interfaces.go#L54): Retrieves CIDs from Postgres by searching against their associated metadata, is used to lookup data to serve API requests/subscriptions. ([BTC](../pkg/btc/retriever.go), [ETH](../pkg/eth/retriever.go)).
* [IPLD Fetcher](../pkg/shared/interfaces.go#L62): Fetches the IPLDs needed to service API requests/subscriptions from IPFS using retrieved CIDS; can route through a IPFS block-exchange to search for objects that are not directly available. ([BTC](../pkg/btc/ipld_fetcher.go), [ETH](../pkg/eth/ipld_fetcher.go))
* [Response Filterer](../pkg/shared/interfaces.go#L49): Filters converted data payloads served to API subscriptions; filters according to the subscriber provided parameters. ([BTC](../pkg/btc/filterer.go), [ETH](../pkg/eth/filterer.go)).
* [API](https://github.com/ethereum/go-ethereum/blob/master/rpc/types.go#L31): Expose RPC methods for clients to interface with the data. Chain-specific APIs should aim to recapitulate as much of the native API as possible. ([VDB](../pkg/api.go), [ETH](../pkg/eth/api.go)).
Appropriating the service for a new chain is done by creating underlying types to satisfy these interfaces for
the specifics of that chain.
The service uses these interfaces to operate in any combination of three modes: `sync`, `serve`, and `backfill`.
* Sync: Streams raw chain data at the head, converts and publishes it to IPFS, and indexes the resulting set of CIDs in Postgres with useful metadata.
* BackFill: Automatically searches for and detects gaps in the DB; fetches, converts, publishes, and indexes the data to fill these gaps.
* Serve: Opens up IPC, HTTP, and WebSocket servers on top of the ipfs-blockchain-watcher DB and any concurrent sync and/or backfill processes.
These three modes are all operated through a single vulcanizeDB command: `watch`
## Command
Usage: `./ipfs-blockchain-watcher watch --config={config.toml}`
Configuration can also be done through CLI options and/or environmental variables.
CLI options can be found using `./ipfs-blockchain-watcher watch --help`.
## Config
Below is the set of universal config parameters for the ipfs-blockchain-watcher command, in .toml form, with the respective environmental variables commented to the side.
This set of parameters needs to be set no matter the chain type.
```toml
[database]
name = "vulcanize_public" # $DATABASE_NAME
hostname = "localhost" # $DATABASE_HOSTNAME
port = 5432 # $DATABASE_PORT
user = "vdbm" # $DATABASE_USER
password = "" # $DATABASE_PASSWORD
[watcher]
chain = "bitcoin" # $SUPERNODE_CHAIN
server = true # $SUPERNODE_SERVER
ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH
wsPath = "127.0.0.1:8082" # $SUPERNODE_WS_PATH
httpPath = "127.0.0.1:8083" # $SUPERNODE_HTTP_PATH
sync = true # $SUPERNODE_SYNC
workers = 1 # $SUPERNODE_WORKERS
backFill = true # $SUPERNODE_BACKFILL
frequency = 45 # $SUPERNODE_FREQUENCY
batchSize = 1 # $SUPERNODE_BATCH_SIZE
batchNumber = 50 # $SUPERNODE_BATCH_NUMBER
timeout = 300 # $HTTP_TIMEOUT
validationLevel = 1 # $SUPERNODE_VALIDATION_LEVEL
```
Additional parameters need to be set depending on the specific chain.
For Bitcoin:
```toml
[bitcoin]
wsPath = "127.0.0.1:8332" # $BTC_WS_PATH
httpPath = "127.0.0.1:8332" # $BTC_HTTP_PATH
pass = "password" # $BTC_NODE_PASSWORD
user = "username" # $BTC_NODE_USER
nodeID = "ocd0" # $BTC_NODE_ID
clientName = "Omnicore" # $BTC_CLIENT_NAME
genesisBlock = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" # $BTC_GENESIS_BLOCK
networkID = "0xD9B4BEF9" # $BTC_NETWORK_ID
```
For Ethereum:
```toml
[ethereum]
wsPath = "127.0.0.1:8546" # $ETH_WS_PATH
httpPath = "127.0.0.1:8545" # $ETH_HTTP_PATH
nodeID = "arch1" # $ETH_NODE_ID
clientName = "Geth" # $ETH_CLIENT_NAME
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # $ETH_GENESIS_BLOCK
networkID = "1" # $ETH_NETWORK_ID
```
## Database
Currently, ipfs-blockchain-watcher persists all data to a single Postgres database. The migrations for this DB can be found [here](../db/migrations).
Chain-specific data is populated under a chain-specific schema (e.g. `eth` and `btc`) while shared data- such as the IPFS blocks table- is populated under the `public` schema.
Subsequent watchers which act on the raw chain data should build and populate their own schemas or separate databases entirely.
In the future, the database architecture will be moving to a foreign table based architecture wherein a single db is used for shared data while each watcher uses
its own database and accesses and acts on the shared data through foreign tables. Isolating watchers to their own databases will prevent complications and
conflicts between watcher db migrations.
## APIs
ipfs-blockchain-watcher provides mutliple types of APIs by which to interface with its data.
More detailed information on the APIs can be found [here](apis.md).
## Resync
A separate command `resync` is available for directing the resyncing of data within specified ranges.
This is useful if there is a need to re-validate a range of data using a new source or clean out bad/deprecated data.
More detailed information on this command can be found [here](resync.md).
## IPFS Considerations
Currently the IPLD Publisher and Fetcher can either use internalized IPFS processes which interface with a local IPFS repository, or can interface
directly with the backing Postgres database.
Both these options circumvent the need to run a full IPFS daemon with a [go-ipld-eth](https://github.com/ipfs/go-ipld-eth) or [go-ipld-btc](https://github.com/ipld/go-ipld-btc) plugin.
The former approach can lead to issues with lock-contention on the IPFS repo if another IPFS process is configured and running at the same $IPFS_PATH, it also necessitates the need for
a locally configured IPFS repository. The later bypasses the need for a configured IPFS repository/$IPFS_PATH and allows all Postgres write operations at a given block height
to occur in a single transaction, the only disadvantage is that by avoiding moving through an IPFS node intermediary the direct ability to reach out to the block
exchange for data not found locally is lost.
Once go-ipld-eth and go-ipld-btc have been updated to work with a modern version of PG-IPFS, an additional option will be provided to direct
all publishing and fetching of IPLD objects through a remote IPFS daemon.

View File

@ -1,70 +0,0 @@
## ipfs-blockchain-watcher resync
The `resync` command is made available for directing the resyncing of ipfs-blockchain-watcherdata within specified ranges.
It also contains a utility for cleaning out old data, and resetting the validation level of data.
### Rational
Manual resyncing of data can be used to re-validate data within specific ranges using a new source.
Option to remove data may be needed for bad/deprecated data or to prepare for breaking changes to the db schemas.
Resetting the validation level of data is useful for designating ranges of data for resyncing by an ongoing ipfs-blockchain-watcher
backfill process.
### Command
Usage: `./ipfs-blockchain-watcher resync --config={config.toml}`
Configuration can also be done through CLI options and/or environmental variables.
CLI options can be found using `./ipfs-blockchain-watcher resync --help`.
### Config
Below is the set of universal config parameters for the resync command, in .toml form, with the respective environmental variables commented to the side.
This set of parameters needs to be set no matter the chain type.
```toml
[database]
name = "vulcanize_public" # $DATABASE_NAME
hostname = "localhost" # $DATABASE_HOSTNAME
port = 5432 # $DATABASE_PORT
user = "vdbm" # $DATABASE_USER
password = "" # $DATABASE_PASSWORD
[resync]
chain = "ethereum" # $RESYNC_CHAIN
type = "state" # $RESYNC_TYPE
start = 0 # $RESYNC_START
stop = 1000 # $RESYNC_STOP
batchSize = 10 # $RESYNC_BATCH_SIZE
batchNumber = 100 # $RESYNC_BATCH_NUMBER
timeout = 300 # $HTTP_TIMEOUT
clearOldCache = true # $RESYNC_CLEAR_OLD_CACHE
resetValidation = true # $RESYNC_RESET_VALIDATION
```
Additional parameters need to be set depending on the specific chain.
For Bitcoin:
```toml
[bitcoin]
httpPath = "127.0.0.1:8332" # $BTC_HTTP_PATH
pass = "password" # $BTC_NODE_PASSWORD
user = "username" # $BTC_NODE_USER
nodeID = "ocd0" # $BTC_NODE_ID
clientName = "Omnicore" # $BTC_CLIENT_NAME
genesisBlock = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" # $BTC_GENESIS_BLOCK
networkID = "0xD9B4BEF9" # $BTC_NETWORK_ID
```
For Ethereum:
```toml
[ethereum]
httpPath = "127.0.0.1:8545" # $ETH_HTTP_PATH
nodeID = "arch1" # $ETH_NODE_ID
clientName = "Geth" # $ETH_CLIENT_NAME
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # $ETH_GENESIS_BLOCK
networkID = "1" # $ETH_NETWORK_ID
```

View File

@ -1,16 +0,0 @@
These are the components of a VulcanizeDB Watcher:
* Data Fetcher/Streamer sources:
* go-ethereum
* bitcoind
* btcd
* IPFS
* Transformers contain:
* converter
* publisher
* indexer
* Endpoints contain:
* api
* backend
* filterer
* retriever
* ipld_server

View File

@ -1,48 +0,0 @@
[database]
name = "vulcanize_public" # $DATABASE_NAME
hostname = "localhost" # $DATABASE_HOSTNAME
port = 5432 # $DATABASE_PORT
user = "vdbm" # $DATABASE_USER
password = "" # $DATABASE_PASSWORD
[database.sync]
maxIdle = 1
[database.backFill]
maxIdle = 5
[log]
level = "debug" # $LOGRUS_LEVEL
[resync]
chain = "bitcoin" # $RESYNC_CHAIN
type = "full" # $RESYNC_TYPE
start = 0 # $RESYNC_START
stop = 0 # $RESYNC_STOP
batchSize = 5 # $RESYNC_BATCH_SIZE
batchNumber = 5 # $RESYNC_BATCH_NUMBER
clearOldCache = false # $RESYNC_CLEAR_OLD_CACHE
resetValidation = true # $RESYNC_RESET_VALIDATION
[watcher]
chain = "bitcoin" # $SUPERNODE_CHAIN
server = true # $SUPERNODE_SERVER
ipcPath = "~/.vulcanize/vulcanize.ipc" # $SUPERNODE_IPC_PATH
wsPath = "127.0.0.1:8082" # $SUPERNODE_WS_PATH
httpPath = "127.0.0.1:8083" # $SUPERNODE_HTTP_PATH
sync = true # $SUPERNODE_SYNC
workers = 1 # $SUPERNODE_WORKERS
backFill = true # $SUPERNODE_BACKFILL
frequency = 45 # $SUPERNODE_FREQUENCY
batchSize = 5 # $SUPERNODE_BATCH_SIZE
batchNumber = 5 # $SUPERNODE_BATCH_NUMBER
validationLevel = 1 # $SUPERNODE_VALIDATION_LEVEL
[bitcoin]
wsPath = "127.0.0.1:8332" # $BTC_WS_PATH
httpPath = "127.0.0.1:8332" # $BTC_HTTP_PATH
pass = "password" # $BTC_NODE_PASSWORD
user = "username" # $BTC_NODE_USER
nodeID = "ocd0" # $BTC_NODE_ID
clientName = "Omnicore" # $BTC_CLIENT_NAME
genesisBlock = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" # $BTC_GENESIS_BLOCK
networkID = "0xD9B4BEF9" # $BTC_NETWORK_ID

View File

@ -1,35 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc_test
import (
"io/ioutil"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
)
func TestBTCWatcher(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "BTC IPFS Watcher Suite Test")
}
var _ = BeforeSuite(func() {
logrus.SetOutput(ioutil.Discard)
})

View File

@ -1,302 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
"database/sql"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/vulcanize/ipfs-blockchain-watcher/utils"
)
// CIDRetriever satisfies the CIDRetriever interface for bitcoin
type CIDRetriever struct {
db *postgres.DB
}
// NewCIDRetriever returns a pointer to a new CIDRetriever which supports the CIDRetriever interface
func NewCIDRetriever(db *postgres.DB) *CIDRetriever {
return &CIDRetriever{
db: db,
}
}
// RetrieveFirstBlockNumber is used to retrieve the first block number in the db
func (bcr *CIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
var blockNumber int64
err := bcr.db.Get(&blockNumber, "SELECT block_number FROM btc.header_cids ORDER BY block_number ASC LIMIT 1")
return blockNumber, err
}
// RetrieveLastBlockNumber is used to retrieve the latest block number in the db
func (bcr *CIDRetriever) RetrieveLastBlockNumber() (int64, error) {
var blockNumber int64
err := bcr.db.Get(&blockNumber, "SELECT block_number FROM btc.header_cids ORDER BY block_number DESC LIMIT 1 ")
return blockNumber, err
}
// Retrieve is used to retrieve all of the CIDs which conform to the passed StreamFilters
func (bcr *CIDRetriever) Retrieve(filter shared.SubscriptionSettings, blockNumber int64) ([]shared.CIDsForFetching, bool, error) {
streamFilter, ok := filter.(*SubscriptionSettings)
if !ok {
return nil, true, fmt.Errorf("btc retriever expected filter type %T got %T", &SubscriptionSettings{}, filter)
}
log.Debug("retrieving cids")
// Begin new db tx
tx, err := bcr.db.Beginx()
if err != nil {
return nil, true, err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
// Retrieve cached header CIDs
headers, err := bcr.RetrieveHeaderCIDs(tx, blockNumber)
if err != nil {
log.Error("header cid retrieval error")
return nil, true, err
}
cws := make([]shared.CIDsForFetching, len(headers))
empty := true
for i, header := range headers {
cw := new(CIDWrapper)
cw.BlockNumber = big.NewInt(blockNumber)
if !streamFilter.HeaderFilter.Off {
cw.Header = header
empty = false
}
// Retrieve cached trx CIDs
if !streamFilter.TxFilter.Off {
cw.Transactions, err = bcr.RetrieveTxCIDs(tx, streamFilter.TxFilter, header.ID)
if err != nil {
log.Error("transaction cid retrieval error")
return nil, true, err
}
if len(cw.Transactions) > 0 {
empty = false
}
}
cws[i] = cw
}
return cws, empty, err
}
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
func (bcr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]HeaderModel, error) {
log.Debug("retrieving header cids for block ", blockNumber)
headers := make([]HeaderModel, 0)
pgStr := `SELECT * FROM btc.header_cids
WHERE block_number = $1`
return headers, tx.Select(&headers, pgStr, blockNumber)
}
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
// also returns the ids for the returned transaction cids
func (bcr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID int64) ([]TxModel, error) {
log.Debug("retrieving transaction cids for header id ", headerID)
args := make([]interface{}, 0, 3)
results := make([]TxModel, 0)
id := 1
pgStr := fmt.Sprintf(`SELECT transaction_cids.id, transaction_cids.header_id,
transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.mh_key,
transaction_cids.segwit, transaction_cids.witness_hash, transaction_cids.index
FROM btc.transaction_cids, btc.header_cids, btc.tx_inputs, btc.tx_outputs
WHERE transaction_cids.header_id = header_cids.id
AND tx_inputs.tx_id = transaction_cids.id
AND tx_outputs.tx_id = transaction_cids.id
AND header_cids.id = $%d`, id)
args = append(args, headerID)
id++
if txFilter.Segwit {
pgStr += ` AND transaction_cids.segwit = true`
}
if txFilter.MultiSig {
pgStr += ` AND tx_outputs.required_sigs > 1`
}
if len(txFilter.WitnessHashes) > 0 {
pgStr += fmt.Sprintf(` AND transaction_cids.witness_hash = ANY($%d::VARCHAR(66)[])`, id)
args = append(args, pq.Array(txFilter.WitnessHashes))
id++
}
if len(txFilter.Addresses) > 0 {
pgStr += fmt.Sprintf(` AND tx_outputs.addresses && $%d::VARCHAR(66)[]`, id)
args = append(args, pq.Array(txFilter.Addresses))
id++
}
if len(txFilter.Indexes) > 0 {
pgStr += fmt.Sprintf(` AND transaction_cids.index = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(txFilter.Indexes))
id++
}
if len(txFilter.PkScriptClasses) > 0 {
pgStr += fmt.Sprintf(` AND tx_outputs.script_class = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(txFilter.PkScriptClasses))
}
return results, tx.Select(&results, pgStr, args...)
}
// RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db
func (bcr *CIDRetriever) RetrieveGapsInData(validationLevel int) ([]shared.Gap, error) {
log.Info("searching for gaps in the btc ipfs watcher database")
startingBlock, err := bcr.RetrieveFirstBlockNumber()
if err != nil {
return nil, fmt.Errorf("btc CIDRetriever RetrieveFirstBlockNumber error: %v", err)
}
var initialGap []shared.Gap
if startingBlock != 0 {
stop := uint64(startingBlock - 1)
log.Infof("found gap at the beginning of the btc sync from 0 to %d", stop)
initialGap = []shared.Gap{{
Start: 0,
Stop: stop,
}}
}
pgStr := `SELECT header_cids.block_number + 1 AS start, min(fr.block_number) - 1 AS stop FROM btc.header_cids
LEFT JOIN btc.header_cids r on btc.header_cids.block_number = r.block_number - 1
LEFT JOIN btc.header_cids fr on btc.header_cids.block_number < fr.block_number
WHERE r.block_number is NULL and fr.block_number IS NOT NULL
GROUP BY header_cids.block_number, r.block_number`
results := make([]struct {
Start uint64 `db:"start"`
Stop uint64 `db:"stop"`
}, 0)
if err := bcr.db.Select(&results, pgStr); err != nil && err != sql.ErrNoRows {
return nil, err
}
emptyGaps := make([]shared.Gap, len(results))
for i, res := range results {
emptyGaps[i] = shared.Gap{
Start: res.Start,
Stop: res.Stop,
}
}
// Find sections of blocks where we are below the validation level
// There will be no overlap between these "gaps" and the ones above
pgStr = `SELECT block_number FROM btc.header_cids
WHERE times_validated < $1
ORDER BY block_number`
var heights []uint64
if err := bcr.db.Select(&heights, pgStr, validationLevel); err != nil && err != sql.ErrNoRows {
return nil, err
}
return append(append(initialGap, emptyGaps...), utils.MissingHeightsToGaps(heights)...), nil
}
// RetrieveBlockByHash returns all of the CIDs needed to compose an entire block, for a given block hash
func (bcr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (HeaderModel, []TxModel, error) {
log.Debug("retrieving block cids for block hash ", blockHash.String())
// Begin new db tx
tx, err := bcr.db.Beginx()
if err != nil {
return HeaderModel{}, nil, err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
headerCID, err := bcr.RetrieveHeaderCIDByHash(tx, blockHash)
if err != nil {
log.Error("header cid retrieval error")
return HeaderModel{}, nil, err
}
txCIDs, err := bcr.RetrieveTxCIDsByHeaderID(tx, headerCID.ID)
if err != nil {
log.Error("tx cid retrieval error")
}
return headerCID, txCIDs, err
}
// RetrieveBlockByNumber returns all of the CIDs needed to compose an entire block, for a given block number
func (bcr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (HeaderModel, []TxModel, error) {
log.Debug("retrieving block cids for block number ", blockNumber)
// Begin new db tx
tx, err := bcr.db.Beginx()
if err != nil {
return HeaderModel{}, nil, err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
headerCID, err := bcr.RetrieveHeaderCIDs(tx, blockNumber)
if err != nil {
log.Error("header cid retrieval error")
return HeaderModel{}, nil, err
}
if len(headerCID) < 1 {
return HeaderModel{}, nil, fmt.Errorf("header cid retrieval error, no header CIDs found at block %d", blockNumber)
}
txCIDs, err := bcr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].ID)
if err != nil {
log.Error("tx cid retrieval error")
}
return headerCID[0], txCIDs, err
}
// RetrieveHeaderCIDByHash returns the header for the given block hash
func (bcr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (HeaderModel, error) {
log.Debug("retrieving header cids for block hash ", blockHash.String())
pgStr := `SELECT * FROM btc.header_cids
WHERE block_hash = $1`
var headerCID HeaderModel
return headerCID, tx.Get(&headerCID, pgStr, blockHash.String())
}
// RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id
func (bcr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]TxModel, error) {
log.Debug("retrieving tx cids for block id ", headerID)
pgStr := `SELECT * FROM btc.transaction_cids
WHERE header_id = $1`
var txCIDs []TxModel
return txCIDs, tx.Select(&txCIDs, pgStr, headerID)
}

View File

@ -1,193 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
"fmt"
"github.com/jmoiron/sqlx"
"github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// Cleaner satisfies the shared.Cleaner interface fo bitcoin
type Cleaner struct {
db *postgres.DB
}
// NewCleaner returns a new Cleaner struct that satisfies the shared.Cleaner interface
func NewCleaner(db *postgres.DB) *Cleaner {
return &Cleaner{
db: db,
}
}
// ResetValidation resets the validation level to 0 to enable revalidation
func (c *Cleaner) ResetValidation(rngs [][2]uint64) error {
tx, err := c.db.Beginx()
if err != nil {
return err
}
for _, rng := range rngs {
logrus.Infof("btc db cleaner resetting validation level to 0 for block range %d to %d", rng[0], rng[1])
pgStr := `UPDATE btc.header_cids
SET times_validated = 0
WHERE block_number BETWEEN $1 AND $2`
if _, err := tx.Exec(pgStr, rng[0], rng[1]); err != nil {
shared.Rollback(tx)
return err
}
}
return tx.Commit()
}
// Clean removes the specified data from the db within the provided block range
func (c *Cleaner) Clean(rngs [][2]uint64, t shared.DataType) error {
tx, err := c.db.Beginx()
if err != nil {
return err
}
for _, rng := range rngs {
logrus.Infof("btc db cleaner cleaning up block range %d to %d", rng[0], rng[1])
if err := c.clean(tx, rng, t); err != nil {
shared.Rollback(tx)
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
logrus.Infof("btc db cleaner vacuum analyzing cleaned tables to free up space from deleted rows")
return c.vacuumAnalyze(t)
}
func (c *Cleaner) clean(tx *sqlx.Tx, rng [2]uint64, t shared.DataType) error {
switch t {
case shared.Full, shared.Headers:
return c.cleanFull(tx, rng)
case shared.Transactions:
if err := c.cleanTransactionIPLDs(tx, rng); err != nil {
return err
}
return c.cleanTransactionMetaData(tx, rng)
default:
return fmt.Errorf("btc cleaner unrecognized type: %s", t.String())
}
}
func (c *Cleaner) vacuumAnalyze(t shared.DataType) error {
switch t {
case shared.Full, shared.Headers:
if err := c.vacuumHeaders(); err != nil {
return err
}
if err := c.vacuumTxs(); err != nil {
return err
}
if err := c.vacuumTxInputs(); err != nil {
return err
}
if err := c.vacuumTxOutputs(); err != nil {
return err
}
case shared.Transactions:
if err := c.vacuumTxs(); err != nil {
return err
}
if err := c.vacuumTxInputs(); err != nil {
return err
}
if err := c.vacuumTxOutputs(); err != nil {
return err
}
default:
return fmt.Errorf("btc cleaner unrecognized type: %s", t.String())
}
return c.vacuumIPLDs()
}
func (c *Cleaner) vacuumHeaders() error {
_, err := c.db.Exec(`VACUUM ANALYZE btc.header_cids`)
return err
}
func (c *Cleaner) vacuumTxs() error {
_, err := c.db.Exec(`VACUUM ANALYZE btc.transaction_cids`)
return err
}
func (c *Cleaner) vacuumTxInputs() error {
_, err := c.db.Exec(`VACUUM ANALYZE btc.tx_inputs`)
return err
}
func (c *Cleaner) vacuumTxOutputs() error {
_, err := c.db.Exec(`VACUUM ANALYZE btc.tx_outputs`)
return err
}
func (c *Cleaner) vacuumIPLDs() error {
_, err := c.db.Exec(`VACUUM ANALYZE public.blocks`)
return err
}
func (c *Cleaner) cleanFull(tx *sqlx.Tx, rng [2]uint64) error {
if err := c.cleanTransactionIPLDs(tx, rng); err != nil {
return err
}
if err := c.cleanHeaderIPLDs(tx, rng); err != nil {
return err
}
return c.cleanHeaderMetaData(tx, rng)
}
func (c *Cleaner) cleanTransactionIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A
USING btc.transaction_cids B, btc.header_cids C
WHERE A.key = B.mh_key
AND B.header_id = C.id
AND C.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanTransactionMetaData(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM btc.transaction_cids A
USING btc.header_cids B
WHERE A.header_id = B.id
AND B.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanHeaderIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A
USING btc.header_cids B
WHERE A.key = B.mh_key
AND B.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanHeaderMetaData(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM btc.header_cids
WHERE block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}

View File

@ -1,354 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc_test
import (
"math/big"
"github.com/ethereum/go-ethereum/crypto"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
var (
// Block 0
// header variables
blockHash1 = crypto.Keccak256Hash([]byte{00, 02})
blocKNumber1 = big.NewInt(0)
headerCid1 = shared.TestCID([]byte("mockHeader1CID"))
headerMhKey1 = shared.MultihashKeyFromCID(headerCid1)
parentHash = crypto.Keccak256Hash([]byte{00, 01})
headerModel1 = btc.HeaderModel{
BlockHash: blockHash1.String(),
BlockNumber: blocKNumber1.String(),
ParentHash: parentHash.String(),
CID: headerCid1.String(),
MhKey: headerMhKey1,
}
// tx variables
tx1CID = shared.TestCID([]byte("mockTx1CID"))
tx1MhKey = shared.MultihashKeyFromCID(tx1CID)
tx2CID = shared.TestCID([]byte("mockTx2CID"))
tx2MhKey = shared.MultihashKeyFromCID(tx2CID)
tx1Hash = crypto.Keccak256Hash([]byte{01, 01})
tx2Hash = crypto.Keccak256Hash([]byte{01, 02})
opHash = crypto.Keccak256Hash([]byte{02, 01})
txModels1 = []btc.TxModelWithInsAndOuts{
{
Index: 0,
CID: tx1CID.String(),
MhKey: tx1MhKey,
TxHash: tx1Hash.String(),
SegWit: true,
TxInputs: []btc.TxInput{
{
Index: 0,
TxWitness: []string{"mockWitness"},
SignatureScript: []byte{01},
PreviousOutPointIndex: 0,
PreviousOutPointHash: opHash.String(),
},
},
TxOutputs: []btc.TxOutput{
{
Index: 0,
Value: 50000000,
PkScript: []byte{02},
ScriptClass: 0,
RequiredSigs: 1,
},
},
},
{
Index: 1,
CID: tx2CID.String(),
MhKey: tx2MhKey,
TxHash: tx2Hash.String(),
SegWit: true,
},
}
mockCIDPayload1 = &btc.CIDPayload{
HeaderCID: headerModel1,
TransactionCIDs: txModels1,
}
// Block 1
// header variables
blockHash2 = crypto.Keccak256Hash([]byte{00, 03})
blocKNumber2 = big.NewInt(1)
headerCid2 = shared.TestCID([]byte("mockHeaderCID2"))
headerMhKey2 = shared.MultihashKeyFromCID(headerCid2)
headerModel2 = btc.HeaderModel{
BlockNumber: blocKNumber2.String(),
BlockHash: blockHash2.String(),
ParentHash: blockHash1.String(),
CID: headerCid2.String(),
MhKey: headerMhKey2,
}
// tx variables
tx3CID = shared.TestCID([]byte("mockTx3CID"))
tx3MhKey = shared.MultihashKeyFromCID(tx3CID)
tx3Hash = crypto.Keccak256Hash([]byte{01, 03})
txModels2 = []btc.TxModelWithInsAndOuts{
{
Index: 0,
CID: tx3CID.String(),
MhKey: tx3MhKey,
TxHash: tx3Hash.String(),
SegWit: true,
},
}
mockCIDPayload2 = &btc.CIDPayload{
HeaderCID: headerModel2,
TransactionCIDs: txModels2,
}
rngs = [][2]uint64{{0, 1}}
mhKeys = []string{
headerMhKey1,
headerMhKey2,
tx1MhKey,
tx2MhKey,
tx3MhKey,
}
mockData = []byte{'\x01'}
)
var _ = Describe("Cleaner", func() {
var (
db *postgres.DB
repo *btc.CIDIndexer
cleaner *btc.Cleaner
)
BeforeEach(func() {
var err error
db, err = shared.SetupDB()
Expect(err).ToNot(HaveOccurred())
repo = btc.NewCIDIndexer(db)
cleaner = btc.NewCleaner(db)
})
Describe("Clean", func() {
BeforeEach(func() {
for _, key := range mhKeys {
_, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, key, mockData)
Expect(err).ToNot(HaveOccurred())
}
err := repo.Index(mockCIDPayload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(mockCIDPayload2)
Expect(err).ToNot(HaveOccurred())
tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred())
var startingIPFSBlocksCount int
pgStr := `SELECT COUNT(*) FROM public.blocks`
err = tx.Get(&startingIPFSBlocksCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var startingTxCount int
pgStr = `SELECT COUNT(*) FROM btc.transaction_cids`
err = tx.Get(&startingTxCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var startingHeaderCount int
pgStr = `SELECT COUNT(*) FROM btc.header_cids`
err = tx.Get(&startingHeaderCount, pgStr)
Expect(err).ToNot(HaveOccurred())
err = tx.Commit()
Expect(err).ToNot(HaveOccurred())
Expect(startingIPFSBlocksCount).To(Equal(5))
Expect(startingTxCount).To(Equal(3))
Expect(startingHeaderCount).To(Equal(2))
})
AfterEach(func() {
btc.TearDownDB(db)
})
It("Cleans everything", func() {
err := cleaner.Clean(rngs, shared.Full)
Expect(err).ToNot(HaveOccurred())
tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred())
var txCount int
pgStr := `SELECT COUNT(*) FROM btc.transaction_cids`
err = tx.Get(&txCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txInCount int
pgStr = `SELECT COUNT(*) FROM btc.tx_inputs`
err = tx.Get(&txInCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txOutCount int
pgStr = `SELECT COUNT(*) FROM btc.tx_outputs`
err = tx.Get(&txOutCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var headerCount int
pgStr = `SELECT COUNT(*) FROM btc.header_cids`
err = tx.Get(&headerCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var blocksCount int
pgStr = `SELECT COUNT(*) FROM public.blocks`
err = tx.Get(&blocksCount, pgStr)
Expect(err).ToNot(HaveOccurred())
err = tx.Commit()
Expect(err).ToNot(HaveOccurred())
Expect(blocksCount).To(Equal(0))
Expect(txCount).To(Equal(0))
Expect(txInCount).To(Equal(0))
Expect(txOutCount).To(Equal(0))
Expect(headerCount).To(Equal(0))
})
It("Cleans headers and all linked data", func() {
err := cleaner.Clean(rngs, shared.Headers)
Expect(err).ToNot(HaveOccurred())
tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred())
var txCount int
pgStr := `SELECT COUNT(*) FROM btc.transaction_cids`
err = tx.Get(&txCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txInCount int
pgStr = `SELECT COUNT(*) FROM btc.tx_inputs`
err = tx.Get(&txInCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txOutCount int
pgStr = `SELECT COUNT(*) FROM btc.tx_outputs`
err = tx.Get(&txOutCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var headerCount int
pgStr = `SELECT COUNT(*) FROM btc.header_cids`
err = tx.Get(&headerCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var blocksCount int
pgStr = `SELECT COUNT(*) FROM public.blocks`
err = tx.Get(&blocksCount, pgStr)
Expect(err).ToNot(HaveOccurred())
err = tx.Commit()
Expect(err).ToNot(HaveOccurred())
Expect(blocksCount).To(Equal(0))
Expect(txCount).To(Equal(0))
Expect(txInCount).To(Equal(0))
Expect(txOutCount).To(Equal(0))
Expect(headerCount).To(Equal(0))
})
It("Cleans transactions", func() {
err := cleaner.Clean(rngs, shared.Transactions)
Expect(err).ToNot(HaveOccurred())
tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred())
var txCount int
pgStr := `SELECT COUNT(*) FROM btc.transaction_cids`
err = tx.Get(&txCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txInCount int
pgStr = `SELECT COUNT(*) FROM btc.tx_inputs`
err = tx.Get(&txInCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txOutCount int
pgStr = `SELECT COUNT(*) FROM btc.tx_outputs`
err = tx.Get(&txOutCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var headerCount int
pgStr = `SELECT COUNT(*) FROM btc.header_cids`
err = tx.Get(&headerCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var blocksCount int
pgStr = `SELECT COUNT(*) FROM public.blocks`
err = tx.Get(&blocksCount, pgStr)
Expect(err).ToNot(HaveOccurred())
err = tx.Commit()
Expect(err).ToNot(HaveOccurred())
Expect(blocksCount).To(Equal(2))
Expect(txCount).To(Equal(0))
Expect(txInCount).To(Equal(0))
Expect(txOutCount).To(Equal(0))
Expect(headerCount).To(Equal(2))
})
})
Describe("ResetValidation", func() {
BeforeEach(func() {
for _, key := range mhKeys {
_, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, key, mockData)
Expect(err).ToNot(HaveOccurred())
}
err := repo.Index(mockCIDPayload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(mockCIDPayload2)
Expect(err).ToNot(HaveOccurred())
var validationTimes []int
pgStr := `SELECT times_validated FROM btc.header_cids`
err = db.Select(&validationTimes, pgStr)
Expect(err).ToNot(HaveOccurred())
Expect(len(validationTimes)).To(Equal(2))
Expect(validationTimes[0]).To(Equal(1))
Expect(validationTimes[1]).To(Equal(1))
err = repo.Index(mockCIDPayload1)
Expect(err).ToNot(HaveOccurred())
validationTimes = []int{}
pgStr = `SELECT times_validated FROM btc.header_cids ORDER BY block_number`
err = db.Select(&validationTimes, pgStr)
Expect(err).ToNot(HaveOccurred())
Expect(len(validationTimes)).To(Equal(2))
Expect(validationTimes[0]).To(Equal(2))
Expect(validationTimes[1]).To(Equal(1))
})
AfterEach(func() {
btc.TearDownDB(db)
})
It("Resets the validation level", func() {
err := cleaner.ResetValidation(rngs)
Expect(err).ToNot(HaveOccurred())
var validationTimes []int
pgStr := `SELECT times_validated FROM btc.header_cids`
err = db.Select(&validationTimes, pgStr)
Expect(err).ToNot(HaveOccurred())
Expect(len(validationTimes)).To(Equal(2))
Expect(validationTimes[0]).To(Equal(0))
Expect(validationTimes[1]).To(Equal(0))
err = repo.Index(mockCIDPayload2)
Expect(err).ToNot(HaveOccurred())
validationTimes = []int{}
pgStr = `SELECT times_validated FROM btc.header_cids ORDER BY block_number`
err = db.Select(&validationTimes, pgStr)
Expect(err).ToNot(HaveOccurred())
Expect(len(validationTimes)).To(Equal(2))
Expect(validationTimes[0]).To(Equal(0))
Expect(validationTimes[1]).To(Equal(1))
})
})
})

View File

@ -1,102 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
"encoding/hex"
"fmt"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// PayloadConverter satisfies the PayloadConverter interface for bitcoin
type PayloadConverter struct {
chainConfig *chaincfg.Params
}
// NewPayloadConverter creates a pointer to a new PayloadConverter which satisfies the PayloadConverter interface
func NewPayloadConverter(chainConfig *chaincfg.Params) *PayloadConverter {
return &PayloadConverter{
chainConfig: chainConfig,
}
}
// Convert method is used to convert a bitcoin BlockPayload to an IPLDPayload
// Satisfies the shared.PayloadConverter interface
func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
btcBlockPayload, ok := payload.(BlockPayload)
if !ok {
return nil, fmt.Errorf("btc converter: expected payload type %T got %T", BlockPayload{}, payload)
}
txMeta := make([]TxModelWithInsAndOuts, len(btcBlockPayload.Txs))
for i, tx := range btcBlockPayload.Txs {
txModel := TxModelWithInsAndOuts{
TxHash: tx.Hash().String(),
Index: int64(i),
SegWit: tx.HasWitness(),
TxOutputs: make([]TxOutput, len(tx.MsgTx().TxOut)),
TxInputs: make([]TxInput, len(tx.MsgTx().TxIn)),
}
if tx.HasWitness() {
txModel.WitnessHash = tx.WitnessHash().String()
}
for i, in := range tx.MsgTx().TxIn {
txModel.TxInputs[i] = TxInput{
Index: int64(i),
SignatureScript: in.SignatureScript,
PreviousOutPointHash: in.PreviousOutPoint.Hash.String(),
PreviousOutPointIndex: in.PreviousOutPoint.Index,
TxWitness: convertBytesToHexArray(in.Witness),
}
}
for i, out := range tx.MsgTx().TxOut {
scriptClass, addresses, numberOfSigs, err := txscript.ExtractPkScriptAddrs(out.PkScript, pc.chainConfig)
// if we receive an error but the txscript type isn't NonStandardTy then something went wrong
if err != nil && scriptClass != txscript.NonStandardTy {
return nil, err
}
stringAddrs := make([]string, len(addresses))
for i, addr := range addresses {
stringAddrs[i] = addr.EncodeAddress()
}
txModel.TxOutputs[i] = TxOutput{
Index: int64(i),
Value: out.Value,
PkScript: out.PkScript,
RequiredSigs: int64(numberOfSigs),
ScriptClass: uint8(scriptClass),
Addresses: stringAddrs,
}
}
txMeta[i] = txModel
}
return ConvertedPayload{
BlockPayload: btcBlockPayload,
TxMetaData: txMeta,
}, nil
}
func convertBytesToHexArray(bytea [][]byte) []string {
var strs []string
for _, b := range bytea {
strs = append(strs, hex.EncodeToString(b))
}
return strs
}

View File

@ -1,43 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc_test
import (
"github.com/btcsuite/btcd/chaincfg"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc/mocks"
)
var _ = Describe("Converter", func() {
Describe("Convert", func() {
It("Converts mock BlockPayloads into the expected IPLDPayloads", func() {
converter := btc.NewPayloadConverter(&chaincfg.MainNetParams)
payload, err := converter.Convert(mocks.MockBlockPayload)
Expect(err).ToNot(HaveOccurred())
convertedPayload, ok := payload.(btc.ConvertedPayload)
Expect(ok).To(BeTrue())
Expect(convertedPayload).To(Equal(mocks.MockConvertedPayload))
Expect(convertedPayload.BlockHeight).To(Equal(mocks.MockBlockHeight))
Expect(convertedPayload.Header).To(Equal(&mocks.MockBlock.Header))
Expect(convertedPayload.Txs).To(Equal(mocks.MockTransactions))
Expect(convertedPayload.TxMetaData).To(Equal(mocks.MockTxsMetaData))
})
})
})

View File

@ -1,159 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
"bytes"
"fmt"
"math/big"
"github.com/multiformats/go-multihash"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// ResponseFilterer satisfies the ResponseFilterer interface for bitcoin
type ResponseFilterer struct{}
// NewResponseFilterer creates a new Filterer satisfying the ResponseFilterer interface
func NewResponseFilterer() *ResponseFilterer {
return &ResponseFilterer{}
}
// Filter is used to filter through btc data to extract and package requested data into a Payload
func (s *ResponseFilterer) Filter(filter shared.SubscriptionSettings, payload shared.ConvertedData) (shared.IPLDs, error) {
btcFilters, ok := filter.(*SubscriptionSettings)
if !ok {
return IPLDs{}, fmt.Errorf("btc filterer expected filter type %T got %T", &SubscriptionSettings{}, filter)
}
btcPayload, ok := payload.(ConvertedPayload)
if !ok {
return IPLDs{}, fmt.Errorf("btc filterer expected payload type %T got %T", ConvertedPayload{}, payload)
}
height := int64(btcPayload.BlockPayload.BlockHeight)
if checkRange(btcFilters.Start.Int64(), btcFilters.End.Int64(), height) {
response := new(IPLDs)
if err := s.filterHeaders(btcFilters.HeaderFilter, response, btcPayload); err != nil {
return IPLDs{}, err
}
if err := s.filterTransactions(btcFilters.TxFilter, response, btcPayload); err != nil {
return IPLDs{}, err
}
response.BlockNumber = big.NewInt(height)
return *response, nil
}
return IPLDs{}, nil
}
func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IPLDs, payload ConvertedPayload) error {
if !headerFilter.Off {
headerBuffer := new(bytes.Buffer)
if err := payload.Header.Serialize(headerBuffer); err != nil {
return err
}
data := headerBuffer.Bytes()
cid, err := ipld.RawdataToCid(ipld.MBitcoinHeader, data, multihash.DBL_SHA2_256)
if err != nil {
return err
}
response.Header = ipfs.BlockModel{
Data: data,
CID: cid.String(),
}
}
return nil
}
func checkRange(start, end, actual int64) bool {
if (end <= 0 || end >= actual) && start <= actual {
return true
}
return false
}
func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload ConvertedPayload) error {
if !trxFilter.Off {
response.Transactions = make([]ipfs.BlockModel, 0, len(payload.TxMetaData))
for i, txMeta := range payload.TxMetaData {
if checkTransaction(txMeta, trxFilter) {
trxBuffer := new(bytes.Buffer)
if err := payload.Txs[i].MsgTx().Serialize(trxBuffer); err != nil {
return err
}
data := trxBuffer.Bytes()
cid, err := ipld.RawdataToCid(ipld.MBitcoinTx, data, multihash.DBL_SHA2_256)
if err != nil {
return err
}
response.Transactions = append(response.Transactions, ipfs.BlockModel{
Data: data,
CID: cid.String(),
})
}
}
}
return nil
}
// checkTransaction returns true if the provided transaction has a hit on the filter
func checkTransaction(txMeta TxModelWithInsAndOuts, txFilter TxFilter) bool {
passesSegwitFilter := false
if !txFilter.Segwit || (txFilter.Segwit && txMeta.SegWit) {
passesSegwitFilter = true
}
passesMultiSigFilter := !txFilter.MultiSig
if txFilter.MultiSig {
for _, out := range txMeta.TxOutputs {
if out.RequiredSigs > 1 {
passesMultiSigFilter = true
}
}
}
passesWitnessFilter := len(txFilter.WitnessHashes) == 0
for _, wantedWitnessHash := range txFilter.WitnessHashes {
if wantedWitnessHash == txMeta.WitnessHash {
passesWitnessFilter = true
}
}
passesAddressFilter := len(txFilter.Addresses) == 0
for _, wantedAddress := range txFilter.Addresses {
for _, out := range txMeta.TxOutputs {
for _, actualAddress := range out.Addresses {
if wantedAddress == actualAddress {
passesAddressFilter = true
}
}
}
}
passesIndexFilter := len(txFilter.Indexes) == 0
for _, wantedIndex := range txFilter.Indexes {
if wantedIndex == txMeta.Index {
passesIndexFilter = true
}
}
passesPkScriptClassFilter := len(txFilter.PkScriptClasses) == 0
for _, wantedPkScriptClass := range txFilter.PkScriptClasses {
for _, out := range txMeta.TxOutputs {
if out.ScriptClass == wantedPkScriptClass {
passesPkScriptClassFilter = true
}
}
}
return passesSegwitFilter && passesMultiSigFilter && passesWitnessFilter && passesAddressFilter && passesIndexFilter && passesPkScriptClassFilter
}

View File

@ -1,104 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
"bytes"
"time"
"github.com/btcsuite/btcd/rpcclient"
"github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// HTTPPayloadStreamer satisfies the PayloadStreamer interface for bitcoin over http endpoints (since bitcoin core doesn't support websockets)
type HTTPPayloadStreamer struct {
Config *rpcclient.ConnConfig
lastHash []byte
}
// NewHTTPPayloadStreamer creates a pointer to a new PayloadStreamer which satisfies the PayloadStreamer interface for bitcoin
func NewHTTPPayloadStreamer(clientConfig *rpcclient.ConnConfig) *HTTPPayloadStreamer {
return &HTTPPayloadStreamer{
Config: clientConfig,
}
}
// Stream is the main loop for subscribing to data from the btc block notifications
// Satisfies the shared.PayloadStreamer interface
func (ps *HTTPPayloadStreamer) Stream(payloadChan chan shared.RawChainData) (shared.ClientSubscription, error) {
logrus.Debug("streaming block payloads from btc")
client, err := rpcclient.New(ps.Config, nil)
if err != nil {
return nil, err
}
ticker := time.NewTicker(time.Second * 5)
errChan := make(chan error)
go func() {
for {
// start at
select {
case <-ticker.C:
height, err := client.GetBlockCount()
if err != nil {
errChan <- err
continue
}
blockHash, err := client.GetBlockHash(height)
if err != nil {
errChan <- err
continue
}
blockHashBytes := blockHash.CloneBytes()
if bytes.Equal(blockHashBytes, ps.lastHash) {
continue
}
block, err := client.GetBlock(blockHash)
if err != nil {
errChan <- err
continue
}
ps.lastHash = blockHashBytes
payloadChan <- BlockPayload{
Header: &block.Header,
BlockHeight: height,
Txs: msgTxsToUtilTxs(block.Transactions),
}
default:
}
}
}()
return &HTTPClientSubscription{client: client, errChan: errChan}, nil
}
// HTTPClientSubscription is a wrapper around the underlying bitcoind rpc client
// to fit the shared.ClientSubscription interface
type HTTPClientSubscription struct {
client *rpcclient.Client
errChan chan error
}
// Unsubscribe satisfies the rpc.Subscription interface
func (bcs *HTTPClientSubscription) Unsubscribe() {
bcs.client.Shutdown()
}
// Err() satisfies the rpc.Subscription interface
func (bcs *HTTPClientSubscription) Err() <-chan error {
return bcs.errChan
}

View File

@ -1,132 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
"fmt"
"github.com/sirupsen/logrus"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
type CIDIndexer struct {
db *postgres.DB
}
func NewCIDIndexer(db *postgres.DB) *CIDIndexer {
return &CIDIndexer{
db: db,
}
}
func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error {
cidWrapper, ok := cids.(*CIDPayload)
if !ok {
return fmt.Errorf("btc indexer expected cids type %T got %T", &CIDPayload{}, cids)
}
// Begin new db tx
tx, err := in.db.Beginx()
if err != nil {
return err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
headerID, err := in.indexHeaderCID(tx, cidWrapper.HeaderCID)
if err != nil {
logrus.Error("btc indexer error when indexing header")
return err
}
err = in.indexTransactionCIDs(tx, cidWrapper.TransactionCIDs, headerID)
if err != nil {
logrus.Error("btc indexer error when indexing transactions")
}
return err
}
func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel) (int64, error) {
var headerID int64
err := tx.QueryRowx(`INSERT INTO btc.header_cids (block_number, block_hash, parent_hash, cid, timestamp, bits, node_id, mh_key, times_validated)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, timestamp, bits, node_id, mh_key, times_validated) = ($3, $4, $5, $6, $7, $8, btc.header_cids.times_validated + 1)
RETURNING id`,
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.Timestamp, header.Bits, in.db.NodeID, header.MhKey, 1).Scan(&headerID)
return headerID, err
}
func (in *CIDIndexer) indexTransactionCIDs(tx *sqlx.Tx, transactions []TxModelWithInsAndOuts, headerID int64) error {
for _, transaction := range transactions {
txID, err := in.indexTransactionCID(tx, transaction, headerID)
if err != nil {
logrus.Error("btc indexer error when indexing header")
return err
}
for _, input := range transaction.TxInputs {
if err := in.indexTxInput(tx, input, txID); err != nil {
logrus.Error("btc indexer error when indexing tx inputs")
return err
}
}
for _, output := range transaction.TxOutputs {
if err := in.indexTxOutput(tx, output, txID); err != nil {
logrus.Error("btc indexer error when indexing tx outputs")
return err
}
}
}
return nil
}
func (in *CIDIndexer) indexTransactionCID(tx *sqlx.Tx, transaction TxModelWithInsAndOuts, headerID int64) (int64, error) {
var txID int64
err := tx.QueryRowx(`INSERT INTO btc.transaction_cids (header_id, tx_hash, index, cid, segwit, witness_hash, mh_key)
VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (tx_hash) DO UPDATE SET (header_id, index, cid, segwit, witness_hash, mh_key) = ($1, $3, $4, $5, $6, $7)
RETURNING id`,
headerID, transaction.TxHash, transaction.Index, transaction.CID, transaction.SegWit, transaction.WitnessHash, transaction.MhKey).Scan(&txID)
return txID, err
}
func (in *CIDIndexer) indexTxInput(tx *sqlx.Tx, txInput TxInput, txID int64) error {
_, err := tx.Exec(`INSERT INTO btc.tx_inputs (tx_id, index, witness, sig_script, outpoint_tx_hash, outpoint_index)
VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (tx_id, index) DO UPDATE SET (witness, sig_script, outpoint_tx_hash, outpoint_index) = ($3, $4, $5, $6)`,
txID, txInput.Index, pq.Array(txInput.TxWitness), txInput.SignatureScript, txInput.PreviousOutPointHash, txInput.PreviousOutPointIndex)
return err
}
func (in *CIDIndexer) indexTxOutput(tx *sqlx.Tx, txOuput TxOutput, txID int64) error {
_, err := tx.Exec(`INSERT INTO btc.tx_outputs (tx_id, index, value, pk_script, script_class, addresses, required_sigs)
VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (tx_id, index) DO UPDATE SET (value, pk_script, script_class, addresses, required_sigs) = ($3, $4, $5, $6, $7)`,
txID, txOuput.Index, txOuput.Value, txOuput.PkScript, txOuput.ScriptClass, txOuput.Addresses, txOuput.RequiredSigs)
return err
}

View File

@ -1,94 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
var _ = Describe("Indexer", func() {
var (
db *postgres.DB
err error
repo *btc.CIDIndexer
mockData = []byte{1, 2, 3}
)
BeforeEach(func() {
db, err = shared.SetupDB()
Expect(err).ToNot(HaveOccurred())
repo = btc.NewCIDIndexer(db)
// need entries in the public.blocks with the mhkeys or the FK constraint will fail
shared.PublishMockIPLD(db, mocks.MockHeaderMhKey, mockData)
shared.PublishMockIPLD(db, mocks.MockTrxMhKey1, mockData)
shared.PublishMockIPLD(db, mocks.MockTrxMhKey2, mockData)
shared.PublishMockIPLD(db, mocks.MockTrxMhKey3, mockData)
})
AfterEach(func() {
btc.TearDownDB(db)
})
Describe("Index", func() {
It("Indexes CIDs and related metadata into vulcanizedb", func() {
err = repo.Index(&mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
pgStr := `SELECT * FROM btc.header_cids
WHERE block_number = $1`
// check header was properly indexed
header := new(btc.HeaderModel)
err = db.Get(header, pgStr, mocks.MockHeaderMetaData.BlockNumber)
Expect(err).ToNot(HaveOccurred())
Expect(header.CID).To(Equal(mocks.MockHeaderMetaData.CID))
Expect(header.BlockNumber).To(Equal(mocks.MockHeaderMetaData.BlockNumber))
Expect(header.Bits).To(Equal(mocks.MockHeaderMetaData.Bits))
Expect(header.Timestamp).To(Equal(mocks.MockHeaderMetaData.Timestamp))
Expect(header.BlockHash).To(Equal(mocks.MockHeaderMetaData.BlockHash))
Expect(header.ParentHash).To(Equal(mocks.MockHeaderMetaData.ParentHash))
// check trxs were properly indexed
trxs := make([]btc.TxModel, 0)
pgStr = `SELECT transaction_cids.id, transaction_cids.header_id, transaction_cids.index,
transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.segwit, transaction_cids.witness_hash
FROM btc.transaction_cids INNER JOIN btc.header_cids ON (transaction_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
err = db.Select(&trxs, pgStr, mocks.MockHeaderMetaData.BlockNumber)
Expect(err).ToNot(HaveOccurred())
Expect(len(trxs)).To(Equal(3))
for _, tx := range trxs {
Expect(tx.SegWit).To(Equal(false))
Expect(tx.HeaderID).To(Equal(header.ID))
Expect(tx.WitnessHash).To(Equal(""))
switch tx.Index {
case 0:
Expect(tx.CID).To(Equal(mocks.MockTrxCID1.String()))
Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[0].TxHash().String()))
case 1:
Expect(tx.CID).To(Equal(mocks.MockTrxCID2.String()))
Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[1].TxHash().String()))
case 2:
Expect(tx.CID).To(Equal(mocks.MockTrxCID3.String()))
Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[2].TxHash().String()))
}
}
})
})
})

View File

@ -1,107 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
"fmt"
"github.com/jmoiron/sqlx"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// IPLDFetcher satisfies the IPLDFetcher interface for ethereum
// it interfaces directly with PG-IPFS instead of going through a node-interface or remote node
type IPLDFetcher struct {
db *postgres.DB
}
// NewIPLDFetcher creates a pointer to a new IPLDFetcher
func NewIPLDFetcher(db *postgres.DB) *IPLDFetcher {
return &IPLDFetcher{
db: db,
}
}
// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
func (f *IPLDFetcher) Fetch(cids shared.CIDsForFetching) (shared.IPLDs, error) {
cidWrapper, ok := cids.(*CIDWrapper)
if !ok {
return nil, fmt.Errorf("btc fetcher: expected cids type %T got %T", &CIDWrapper{}, cids)
}
log.Debug("fetching iplds")
iplds := IPLDs{}
iplds.BlockNumber = cidWrapper.BlockNumber
tx, err := f.db.Beginx()
if err != nil {
return nil, err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
iplds.Header, err = f.FetchHeader(tx, cidWrapper.Header)
if err != nil {
return nil, fmt.Errorf("btc pg fetcher: header fetching error: %s", err.Error())
}
iplds.Transactions, err = f.FetchTrxs(tx, cidWrapper.Transactions)
if err != nil {
return nil, fmt.Errorf("btc pg fetcher: transaction fetching error: %s", err.Error())
}
return iplds, err
}
// FetchHeaders fetches headers
func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c HeaderModel) (ipfs.BlockModel, error) {
log.Debug("fetching header ipld")
headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil {
return ipfs.BlockModel{}, err
}
return ipfs.BlockModel{
Data: headerBytes,
CID: c.CID,
}, nil
}
// FetchTrxs fetches transactions
func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []TxModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching transaction iplds")
trxIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids {
trxBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil {
return nil, err
}
trxIPLDs[i] = ipfs.BlockModel{
Data: trxBytes,
CID: c.CID,
}
}
return trxIPLDs, nil
}

View File

@ -1,64 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mocks
import (
"fmt"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// PayloadConverter is the underlying struct for the Converter interface
type PayloadConverter struct {
PassedStatediffPayload btc.BlockPayload
ReturnIPLDPayload btc.ConvertedPayload
ReturnErr error
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
stateDiffPayload, ok := payload.(btc.BlockPayload)
if !ok {
return nil, fmt.Errorf("convert expected payload type %T got %T", btc.BlockPayload{}, payload)
}
pc.PassedStatediffPayload = stateDiffPayload
return pc.ReturnIPLDPayload, pc.ReturnErr
}
// IterativePayloadConverter is the underlying struct for the Converter interface
type IterativePayloadConverter struct {
PassedStatediffPayload []btc.BlockPayload
ReturnIPLDPayload []btc.ConvertedPayload
ReturnErr error
iteration int
}
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
func (pc *IterativePayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
stateDiffPayload, ok := payload.(btc.BlockPayload)
if !ok {
return nil, fmt.Errorf("convert expected payload type %T got %T", btc.BlockPayload{}, payload)
}
pc.PassedStatediffPayload = append(pc.PassedStatediffPayload, stateDiffPayload)
if len(pc.PassedStatediffPayload) < pc.iteration+1 {
return nil, fmt.Errorf("IterativePayloadConverter does not have a payload to return at iteration %d", pc.iteration)
}
returnPayload := pc.ReturnIPLDPayload[pc.iteration]
pc.iteration++
return returnPayload, pc.ReturnErr
}

View File

@ -1,40 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mocks
import (
"fmt"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// CIDIndexer is the underlying struct for the Indexer interface
type CIDIndexer struct {
PassedCIDPayload []*btc.CIDPayload
ReturnErr error
}
// Index indexes a cidPayload in Postgres
func (repo *CIDIndexer) Index(cids shared.CIDsForIndexing) error {
cidPayload, ok := cids.(*btc.CIDPayload)
if !ok {
return fmt.Errorf("index expected cids type %T got %T", &btc.CIDPayload{}, cids)
}
repo.PassedCIDPayload = append(repo.PassedCIDPayload, cidPayload)
return repo.ReturnErr
}

View File

@ -1,65 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mocks
import (
"fmt"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// IPLDPublisher is the underlying struct for the Publisher interface
type IPLDPublisher struct {
PassedIPLDPayload btc.ConvertedPayload
ReturnCIDPayload *btc.CIDPayload
ReturnErr error
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
ipldPayload, ok := payload.(btc.ConvertedPayload)
if !ok {
return nil, fmt.Errorf("publish expected payload type %T got %T", &btc.ConvertedPayload{}, payload)
}
pub.PassedIPLDPayload = ipldPayload
return pub.ReturnCIDPayload, pub.ReturnErr
}
// IterativeIPLDPublisher is the underlying struct for the Publisher interface; used in testing
type IterativeIPLDPublisher struct {
PassedIPLDPayload []btc.ConvertedPayload
ReturnCIDPayload []*btc.CIDPayload
ReturnErr error
iteration int
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
func (pub *IterativeIPLDPublisher) Publish(payload shared.ConvertedData) (shared.CIDsForIndexing, error) {
ipldPayload, ok := payload.(btc.ConvertedPayload)
if !ok {
return nil, fmt.Errorf("publish expected payload type %T got %T", &btc.ConvertedPayload{}, payload)
}
pub.PassedIPLDPayload = append(pub.PassedIPLDPayload, ipldPayload)
if len(pub.ReturnCIDPayload) < pub.iteration+1 {
return nil, fmt.Errorf("IterativeIPLDPublisher does not have a payload to return at iteration %d", pub.iteration)
}
returnPayload := pub.ReturnCIDPayload[pub.iteration]
pub.iteration++
return returnPayload, pub.ReturnErr
}

View File

@ -1,709 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mocks
import (
"strconv"
"time"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
var (
MockHeaderCID = shared.TestCID([]byte("MockHeaderCID"))
MockTrxCID1 = shared.TestCID([]byte("MockTrxCID1"))
MockTrxCID2 = shared.TestCID([]byte("MockTrxCID2"))
MockTrxCID3 = shared.TestCID([]byte("MockTrxCID3"))
MockHeaderMhKey = shared.MultihashKeyFromCID(MockHeaderCID)
MockTrxMhKey1 = shared.MultihashKeyFromCID(MockTrxCID1)
MockTrxMhKey2 = shared.MultihashKeyFromCID(MockTrxCID2)
MockTrxMhKey3 = shared.MultihashKeyFromCID(MockTrxCID3)
MockBlockHeight int64 = 1337
MockBlock = wire.MsgBlock{
Header: wire.BlockHeader{
Version: 1,
PrevBlock: chainhash.Hash([32]byte{ // Make go vet happy.
0x50, 0x12, 0x01, 0x19, 0x17, 0x2a, 0x61, 0x04,
0x21, 0xa6, 0xc3, 0x01, 0x1d, 0xd3, 0x30, 0xd9,
0xdf, 0x07, 0xb6, 0x36, 0x16, 0xc2, 0xcc, 0x1f,
0x1c, 0xd0, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
}), // 000000000002d01c1fccc21636b607dfd930d31d01c3a62104612a1719011250
MerkleRoot: chainhash.Hash([32]byte{ // Make go vet happy.
0x66, 0x57, 0xa9, 0x25, 0x2a, 0xac, 0xd5, 0xc0,
0xb2, 0x94, 0x09, 0x96, 0xec, 0xff, 0x95, 0x22,
0x28, 0xc3, 0x06, 0x7c, 0xc3, 0x8d, 0x48, 0x85,
0xef, 0xb5, 0xa4, 0xac, 0x42, 0x47, 0xe9, 0xf3,
}), // f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766
Timestamp: time.Unix(1293623863, 0), // 2010-12-29 11:57:43 +0000 UTC
Bits: 0x1b04864c, // 453281356
Nonce: 0x10572b0f, // 274148111
},
Transactions: []*wire.MsgTx{
{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: chainhash.Hash{},
Index: 0xffffffff,
},
SignatureScript: []byte{
0x04, 0x4c, 0x86, 0x04, 0x1b, 0x02, 0x06, 0x02,
},
Sequence: 0xffffffff,
},
},
TxOut: []*wire.TxOut{
{
Value: 0x12a05f200, // 5000000000
PkScript: []byte{
0x41, // OP_DATA_65
0x04, 0x1b, 0x0e, 0x8c, 0x25, 0x67, 0xc1, 0x25,
0x36, 0xaa, 0x13, 0x35, 0x7b, 0x79, 0xa0, 0x73,
0xdc, 0x44, 0x44, 0xac, 0xb8, 0x3c, 0x4e, 0xc7,
0xa0, 0xe2, 0xf9, 0x9d, 0xd7, 0x45, 0x75, 0x16,
0xc5, 0x81, 0x72, 0x42, 0xda, 0x79, 0x69, 0x24,
0xca, 0x4e, 0x99, 0x94, 0x7d, 0x08, 0x7f, 0xed,
0xf9, 0xce, 0x46, 0x7c, 0xb9, 0xf7, 0xc6, 0x28,
0x70, 0x78, 0xf8, 0x01, 0xdf, 0x27, 0x6f, 0xdf,
0x84, // 65-byte signature
0xac, // OP_CHECKSIG
},
},
},
LockTime: 0,
},
{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: chainhash.Hash([32]byte{ // Make go vet happy.
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07,
0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87,
}), // 87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03
Index: 0,
},
SignatureScript: []byte{
0x49, // OP_DATA_73
0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3,
0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6,
0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94,
0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58,
0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00,
0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62,
0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c,
0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60,
0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48,
0x01, // 73-byte signature
0x41, // OP_DATA_65
0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d,
0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38,
0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25,
0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e,
0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8,
0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd,
0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b,
0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3,
0xd3, // 65-byte pubkey
},
Sequence: 0xffffffff,
},
},
TxOut: []*wire.TxOut{
{
Value: 0x2123e300, // 556000000
PkScript: []byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0xc3, 0x98, 0xef, 0xa9, 0xc3, 0x92, 0xba, 0x60,
0x13, 0xc5, 0xe0, 0x4e, 0xe7, 0x29, 0x75, 0x5e,
0xf7, 0xf5, 0x8b, 0x32,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
},
},
{
Value: 0x108e20f00, // 4444000000
PkScript: []byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f,
0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b,
0x52, 0xde, 0x3d, 0x7c,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
},
},
},
LockTime: 0,
},
{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: chainhash.Hash([32]byte{ // Make go vet happy.
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65,
0xe4, 0x1c, 0x61, 0xd0, 0x78, 0x29, 0x4e, 0xcf,
}), // cf4e2978d0611ce46592e02d7e7daf8627a316ab69759a9f3df109a7f2bf3ec3
Index: 1,
},
SignatureScript: []byte{
0x47, // OP_DATA_71
0x30, 0x44, 0x02, 0x20, 0x03, 0x2d, 0x30, 0xdf,
0x5e, 0xe6, 0xf5, 0x7f, 0xa4, 0x6c, 0xdd, 0xb5,
0xeb, 0x8d, 0x0d, 0x9f, 0xe8, 0xde, 0x6b, 0x34,
0x2d, 0x27, 0x94, 0x2a, 0xe9, 0x0a, 0x32, 0x31,
0xe0, 0xba, 0x33, 0x3e, 0x02, 0x20, 0x3d, 0xee,
0xe8, 0x06, 0x0f, 0xdc, 0x70, 0x23, 0x0a, 0x7f,
0x5b, 0x4a, 0xd7, 0xd7, 0xbc, 0x3e, 0x62, 0x8c,
0xbe, 0x21, 0x9a, 0x88, 0x6b, 0x84, 0x26, 0x9e,
0xae, 0xb8, 0x1e, 0x26, 0xb4, 0xfe, 0x01,
0x41, // OP_DATA_65
0x04, 0xae, 0x31, 0xc3, 0x1b, 0xf9, 0x12, 0x78,
0xd9, 0x9b, 0x83, 0x77, 0xa3, 0x5b, 0xbc, 0xe5,
0xb2, 0x7d, 0x9f, 0xff, 0x15, 0x45, 0x68, 0x39,
0xe9, 0x19, 0x45, 0x3f, 0xc7, 0xb3, 0xf7, 0x21,
0xf0, 0xba, 0x40, 0x3f, 0xf9, 0x6c, 0x9d, 0xee,
0xb6, 0x80, 0xe5, 0xfd, 0x34, 0x1c, 0x0f, 0xc3,
0xa7, 0xb9, 0x0d, 0xa4, 0x63, 0x1e, 0xe3, 0x95,
0x60, 0x63, 0x9d, 0xb4, 0x62, 0xe9, 0xcb, 0x85,
0x0f, // 65-byte pubkey
},
Sequence: 0xffffffff,
},
},
TxOut: []*wire.TxOut{
{
Value: 0xf4240, // 1000000
PkScript: []byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0xb0, 0xdc, 0xbf, 0x97, 0xea, 0xbf, 0x44, 0x04,
0xe3, 0x1d, 0x95, 0x24, 0x77, 0xce, 0x82, 0x2d,
0xad, 0xbe, 0x7e, 0x10,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
},
},
{
Value: 0x11d260c0, // 299000000
PkScript: []byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0x6b, 0x12, 0x81, 0xee, 0xc2, 0x5a, 0xb4, 0xe1,
0xe0, 0x79, 0x3f, 0xf4, 0xe0, 0x8a, 0xb1, 0xab,
0xb3, 0x40, 0x9c, 0xd9,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
},
},
},
LockTime: 0,
},
},
}
MockTransactions = []*btcutil.Tx{
btcutil.NewTx(MockBlock.Transactions[0]),
btcutil.NewTx(MockBlock.Transactions[1]),
btcutil.NewTx(MockBlock.Transactions[2]),
}
MockBlockPayload = btc.BlockPayload{
Header: &MockBlock.Header,
Txs: MockTransactions,
BlockHeight: MockBlockHeight,
}
sClass1, addresses1, numOfSigs1, _ = txscript.ExtractPkScriptAddrs([]byte{
0x41, // OP_DATA_65
0x04, 0x1b, 0x0e, 0x8c, 0x25, 0x67, 0xc1, 0x25,
0x36, 0xaa, 0x13, 0x35, 0x7b, 0x79, 0xa0, 0x73,
0xdc, 0x44, 0x44, 0xac, 0xb8, 0x3c, 0x4e, 0xc7,
0xa0, 0xe2, 0xf9, 0x9d, 0xd7, 0x45, 0x75, 0x16,
0xc5, 0x81, 0x72, 0x42, 0xda, 0x79, 0x69, 0x24,
0xca, 0x4e, 0x99, 0x94, 0x7d, 0x08, 0x7f, 0xed,
0xf9, 0xce, 0x46, 0x7c, 0xb9, 0xf7, 0xc6, 0x28,
0x70, 0x78, 0xf8, 0x01, 0xdf, 0x27, 0x6f, 0xdf,
0x84, // 65-byte signature
0xac, // OP_CHECKSIG
}, &chaincfg.MainNetParams)
sClass2a, addresses2a, numOfSigs2a, _ = txscript.ExtractPkScriptAddrs([]byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0xc3, 0x98, 0xef, 0xa9, 0xc3, 0x92, 0xba, 0x60,
0x13, 0xc5, 0xe0, 0x4e, 0xe7, 0x29, 0x75, 0x5e,
0xf7, 0xf5, 0x8b, 0x32,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
}, &chaincfg.MainNetParams)
sClass2b, addresses2b, numOfSigs2b, _ = txscript.ExtractPkScriptAddrs([]byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f,
0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b,
0x52, 0xde, 0x3d, 0x7c,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
}, &chaincfg.MainNetParams)
sClass3a, addresses3a, numOfSigs3a, _ = txscript.ExtractPkScriptAddrs([]byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0xb0, 0xdc, 0xbf, 0x97, 0xea, 0xbf, 0x44, 0x04,
0xe3, 0x1d, 0x95, 0x24, 0x77, 0xce, 0x82, 0x2d,
0xad, 0xbe, 0x7e, 0x10,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
}, &chaincfg.MainNetParams)
sClass3b, addresses3b, numOfSigs3b, _ = txscript.ExtractPkScriptAddrs([]byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0x6b, 0x12, 0x81, 0xee, 0xc2, 0x5a, 0xb4, 0xe1,
0xe0, 0x79, 0x3f, 0xf4, 0xe0, 0x8a, 0xb1, 0xab,
0xb3, 0x40, 0x9c, 0xd9,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
}, &chaincfg.MainNetParams)
MockTxsMetaData = []btc.TxModelWithInsAndOuts{
{
TxHash: MockBlock.Transactions[0].TxHash().String(),
Index: 0,
SegWit: MockBlock.Transactions[0].HasWitness(),
TxInputs: []btc.TxInput{
{
Index: 0,
SignatureScript: []byte{
0x04, 0x4c, 0x86, 0x04, 0x1b, 0x02, 0x06, 0x02,
},
PreviousOutPointHash: chainhash.Hash{}.String(),
PreviousOutPointIndex: 0xffffffff,
},
},
TxOutputs: []btc.TxOutput{
{
Value: 5000000000,
Index: 0,
PkScript: []byte{
0x41, // OP_DATA_65
0x04, 0x1b, 0x0e, 0x8c, 0x25, 0x67, 0xc1, 0x25,
0x36, 0xaa, 0x13, 0x35, 0x7b, 0x79, 0xa0, 0x73,
0xdc, 0x44, 0x44, 0xac, 0xb8, 0x3c, 0x4e, 0xc7,
0xa0, 0xe2, 0xf9, 0x9d, 0xd7, 0x45, 0x75, 0x16,
0xc5, 0x81, 0x72, 0x42, 0xda, 0x79, 0x69, 0x24,
0xca, 0x4e, 0x99, 0x94, 0x7d, 0x08, 0x7f, 0xed,
0xf9, 0xce, 0x46, 0x7c, 0xb9, 0xf7, 0xc6, 0x28,
0x70, 0x78, 0xf8, 0x01, 0xdf, 0x27, 0x6f, 0xdf,
0x84, // 65-byte signature
0xac, // OP_CHECKSIG
},
ScriptClass: uint8(sClass1),
RequiredSigs: int64(numOfSigs1),
Addresses: stringSliceFromAddresses(addresses1),
},
},
},
{
TxHash: MockBlock.Transactions[1].TxHash().String(),
Index: 1,
SegWit: MockBlock.Transactions[1].HasWitness(),
TxInputs: []btc.TxInput{
{
Index: 0,
PreviousOutPointHash: chainhash.Hash([32]byte{ // Make go vet happy.
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07,
0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87,
}).String(),
PreviousOutPointIndex: 0,
SignatureScript: []byte{
0x49, // OP_DATA_73
0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3,
0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6,
0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94,
0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58,
0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00,
0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62,
0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c,
0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60,
0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48,
0x01, // 73-byte signature
0x41, // OP_DATA_65
0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d,
0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38,
0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25,
0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e,
0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8,
0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd,
0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b,
0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3,
0xd3, // 65-byte pubkey
},
},
},
TxOutputs: []btc.TxOutput{
{
Index: 0,
Value: 556000000,
PkScript: []byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0xc3, 0x98, 0xef, 0xa9, 0xc3, 0x92, 0xba, 0x60,
0x13, 0xc5, 0xe0, 0x4e, 0xe7, 0x29, 0x75, 0x5e,
0xf7, 0xf5, 0x8b, 0x32,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
},
ScriptClass: uint8(sClass2a),
RequiredSigs: int64(numOfSigs2a),
Addresses: stringSliceFromAddresses(addresses2a),
},
{
Index: 1,
Value: 4444000000,
PkScript: []byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f,
0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b,
0x52, 0xde, 0x3d, 0x7c,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
},
ScriptClass: uint8(sClass2b),
RequiredSigs: int64(numOfSigs2b),
Addresses: stringSliceFromAddresses(addresses2b),
},
},
},
{
TxHash: MockBlock.Transactions[2].TxHash().String(),
Index: 2,
SegWit: MockBlock.Transactions[2].HasWitness(),
TxInputs: []btc.TxInput{
{
Index: 0,
PreviousOutPointHash: chainhash.Hash([32]byte{ // Make go vet happy.
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65,
0xe4, 0x1c, 0x61, 0xd0, 0x78, 0x29, 0x4e, 0xcf,
}).String(),
PreviousOutPointIndex: 1,
SignatureScript: []byte{
0x47, // OP_DATA_71
0x30, 0x44, 0x02, 0x20, 0x03, 0x2d, 0x30, 0xdf,
0x5e, 0xe6, 0xf5, 0x7f, 0xa4, 0x6c, 0xdd, 0xb5,
0xeb, 0x8d, 0x0d, 0x9f, 0xe8, 0xde, 0x6b, 0x34,
0x2d, 0x27, 0x94, 0x2a, 0xe9, 0x0a, 0x32, 0x31,
0xe0, 0xba, 0x33, 0x3e, 0x02, 0x20, 0x3d, 0xee,
0xe8, 0x06, 0x0f, 0xdc, 0x70, 0x23, 0x0a, 0x7f,
0x5b, 0x4a, 0xd7, 0xd7, 0xbc, 0x3e, 0x62, 0x8c,
0xbe, 0x21, 0x9a, 0x88, 0x6b, 0x84, 0x26, 0x9e,
0xae, 0xb8, 0x1e, 0x26, 0xb4, 0xfe, 0x01,
0x41, // OP_DATA_65
0x04, 0xae, 0x31, 0xc3, 0x1b, 0xf9, 0x12, 0x78,
0xd9, 0x9b, 0x83, 0x77, 0xa3, 0x5b, 0xbc, 0xe5,
0xb2, 0x7d, 0x9f, 0xff, 0x15, 0x45, 0x68, 0x39,
0xe9, 0x19, 0x45, 0x3f, 0xc7, 0xb3, 0xf7, 0x21,
0xf0, 0xba, 0x40, 0x3f, 0xf9, 0x6c, 0x9d, 0xee,
0xb6, 0x80, 0xe5, 0xfd, 0x34, 0x1c, 0x0f, 0xc3,
0xa7, 0xb9, 0x0d, 0xa4, 0x63, 0x1e, 0xe3, 0x95,
0x60, 0x63, 0x9d, 0xb4, 0x62, 0xe9, 0xcb, 0x85,
0x0f, // 65-byte pubkey
},
},
},
TxOutputs: []btc.TxOutput{
{
Index: 0,
Value: 1000000,
PkScript: []byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0xb0, 0xdc, 0xbf, 0x97, 0xea, 0xbf, 0x44, 0x04,
0xe3, 0x1d, 0x95, 0x24, 0x77, 0xce, 0x82, 0x2d,
0xad, 0xbe, 0x7e, 0x10,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
},
ScriptClass: uint8(sClass3a),
RequiredSigs: int64(numOfSigs3a),
Addresses: stringSliceFromAddresses(addresses3a),
},
{
Index: 1,
Value: 299000000,
PkScript: []byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0x6b, 0x12, 0x81, 0xee, 0xc2, 0x5a, 0xb4, 0xe1,
0xe0, 0x79, 0x3f, 0xf4, 0xe0, 0x8a, 0xb1, 0xab,
0xb3, 0x40, 0x9c, 0xd9,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
},
ScriptClass: uint8(sClass3b),
RequiredSigs: int64(numOfSigs3b),
Addresses: stringSliceFromAddresses(addresses3b),
},
},
},
}
MockTxsMetaDataPostPublish = []btc.TxModelWithInsAndOuts{
{
CID: MockTrxCID1.String(),
MhKey: MockTrxMhKey1,
TxHash: MockBlock.Transactions[0].TxHash().String(),
Index: 0,
SegWit: MockBlock.Transactions[0].HasWitness(),
TxInputs: []btc.TxInput{
{
Index: 0,
SignatureScript: []byte{
0x04, 0x4c, 0x86, 0x04, 0x1b, 0x02, 0x06, 0x02,
},
PreviousOutPointHash: chainhash.Hash{}.String(),
PreviousOutPointIndex: 0xffffffff,
},
},
TxOutputs: []btc.TxOutput{
{
Value: 5000000000,
Index: 0,
PkScript: []byte{
0x41, // OP_DATA_65
0x04, 0x1b, 0x0e, 0x8c, 0x25, 0x67, 0xc1, 0x25,
0x36, 0xaa, 0x13, 0x35, 0x7b, 0x79, 0xa0, 0x73,
0xdc, 0x44, 0x44, 0xac, 0xb8, 0x3c, 0x4e, 0xc7,
0xa0, 0xe2, 0xf9, 0x9d, 0xd7, 0x45, 0x75, 0x16,
0xc5, 0x81, 0x72, 0x42, 0xda, 0x79, 0x69, 0x24,
0xca, 0x4e, 0x99, 0x94, 0x7d, 0x08, 0x7f, 0xed,
0xf9, 0xce, 0x46, 0x7c, 0xb9, 0xf7, 0xc6, 0x28,
0x70, 0x78, 0xf8, 0x01, 0xdf, 0x27, 0x6f, 0xdf,
0x84, // 65-byte signature
0xac, // OP_CHECKSIG
},
ScriptClass: uint8(sClass1),
RequiredSigs: int64(numOfSigs1),
Addresses: stringSliceFromAddresses(addresses1),
},
},
},
{
CID: MockTrxCID2.String(),
MhKey: MockTrxMhKey2,
TxHash: MockBlock.Transactions[1].TxHash().String(),
Index: 1,
SegWit: MockBlock.Transactions[1].HasWitness(),
TxInputs: []btc.TxInput{
{
Index: 0,
PreviousOutPointHash: chainhash.Hash([32]byte{ // Make go vet happy.
0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60,
0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac,
0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07,
0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87,
}).String(),
PreviousOutPointIndex: 0,
SignatureScript: []byte{
0x49, // OP_DATA_73
0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3,
0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6,
0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94,
0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58,
0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00,
0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62,
0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c,
0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60,
0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48,
0x01, // 73-byte signature
0x41, // OP_DATA_65
0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d,
0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38,
0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25,
0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e,
0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8,
0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd,
0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b,
0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3,
0xd3, // 65-byte pubkey
},
},
},
TxOutputs: []btc.TxOutput{
{
Index: 0,
Value: 556000000,
PkScript: []byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0xc3, 0x98, 0xef, 0xa9, 0xc3, 0x92, 0xba, 0x60,
0x13, 0xc5, 0xe0, 0x4e, 0xe7, 0x29, 0x75, 0x5e,
0xf7, 0xf5, 0x8b, 0x32,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
},
ScriptClass: uint8(sClass2a),
RequiredSigs: int64(numOfSigs2a),
Addresses: stringSliceFromAddresses(addresses2a),
},
{
Index: 1,
Value: 4444000000,
PkScript: []byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f,
0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b,
0x52, 0xde, 0x3d, 0x7c,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
},
ScriptClass: uint8(sClass2b),
RequiredSigs: int64(numOfSigs2b),
Addresses: stringSliceFromAddresses(addresses2b),
},
},
},
{
CID: MockTrxCID3.String(),
MhKey: MockTrxMhKey3,
TxHash: MockBlock.Transactions[2].TxHash().String(),
Index: 2,
SegWit: MockBlock.Transactions[2].HasWitness(),
TxInputs: []btc.TxInput{
{
Index: 0,
PreviousOutPointHash: chainhash.Hash([32]byte{ // Make go vet happy.
0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d,
0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27,
0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65,
0xe4, 0x1c, 0x61, 0xd0, 0x78, 0x29, 0x4e, 0xcf,
}).String(),
PreviousOutPointIndex: 1,
SignatureScript: []byte{
0x47, // OP_DATA_71
0x30, 0x44, 0x02, 0x20, 0x03, 0x2d, 0x30, 0xdf,
0x5e, 0xe6, 0xf5, 0x7f, 0xa4, 0x6c, 0xdd, 0xb5,
0xeb, 0x8d, 0x0d, 0x9f, 0xe8, 0xde, 0x6b, 0x34,
0x2d, 0x27, 0x94, 0x2a, 0xe9, 0x0a, 0x32, 0x31,
0xe0, 0xba, 0x33, 0x3e, 0x02, 0x20, 0x3d, 0xee,
0xe8, 0x06, 0x0f, 0xdc, 0x70, 0x23, 0x0a, 0x7f,
0x5b, 0x4a, 0xd7, 0xd7, 0xbc, 0x3e, 0x62, 0x8c,
0xbe, 0x21, 0x9a, 0x88, 0x6b, 0x84, 0x26, 0x9e,
0xae, 0xb8, 0x1e, 0x26, 0xb4, 0xfe, 0x01,
0x41, // OP_DATA_65
0x04, 0xae, 0x31, 0xc3, 0x1b, 0xf9, 0x12, 0x78,
0xd9, 0x9b, 0x83, 0x77, 0xa3, 0x5b, 0xbc, 0xe5,
0xb2, 0x7d, 0x9f, 0xff, 0x15, 0x45, 0x68, 0x39,
0xe9, 0x19, 0x45, 0x3f, 0xc7, 0xb3, 0xf7, 0x21,
0xf0, 0xba, 0x40, 0x3f, 0xf9, 0x6c, 0x9d, 0xee,
0xb6, 0x80, 0xe5, 0xfd, 0x34, 0x1c, 0x0f, 0xc3,
0xa7, 0xb9, 0x0d, 0xa4, 0x63, 0x1e, 0xe3, 0x95,
0x60, 0x63, 0x9d, 0xb4, 0x62, 0xe9, 0xcb, 0x85,
0x0f, // 65-byte pubkey
},
},
},
TxOutputs: []btc.TxOutput{
{
Index: 0,
Value: 1000000,
PkScript: []byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0xb0, 0xdc, 0xbf, 0x97, 0xea, 0xbf, 0x44, 0x04,
0xe3, 0x1d, 0x95, 0x24, 0x77, 0xce, 0x82, 0x2d,
0xad, 0xbe, 0x7e, 0x10,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
},
ScriptClass: uint8(sClass3a),
RequiredSigs: int64(numOfSigs3a),
Addresses: stringSliceFromAddresses(addresses3a),
},
{
Index: 1,
Value: 299000000,
PkScript: []byte{
0x76, // OP_DUP
0xa9, // OP_HASH160
0x14, // OP_DATA_20
0x6b, 0x12, 0x81, 0xee, 0xc2, 0x5a, 0xb4, 0xe1,
0xe0, 0x79, 0x3f, 0xf4, 0xe0, 0x8a, 0xb1, 0xab,
0xb3, 0x40, 0x9c, 0xd9,
0x88, // OP_EQUALVERIFY
0xac, // OP_CHECKSIG
},
ScriptClass: uint8(sClass3b),
RequiredSigs: int64(numOfSigs3b),
Addresses: stringSliceFromAddresses(addresses3b),
},
},
},
}
MockHeaderMetaData = btc.HeaderModel{
CID: MockHeaderCID.String(),
MhKey: MockHeaderMhKey,
ParentHash: MockBlock.Header.PrevBlock.String(),
BlockNumber: strconv.Itoa(int(MockBlockHeight)),
BlockHash: MockBlock.Header.BlockHash().String(),
Timestamp: MockBlock.Header.Timestamp.UnixNano(),
Bits: MockBlock.Header.Bits,
}
MockConvertedPayload = btc.ConvertedPayload{
BlockPayload: MockBlockPayload,
TxMetaData: MockTxsMetaData,
}
MockCIDPayload = btc.CIDPayload{
HeaderCID: MockHeaderMetaData,
TransactionCIDs: MockTxsMetaDataPostPublish,
}
)
func stringSliceFromAddresses(addrs []btcutil.Address) []string {
strs := make([]string, len(addrs))
for i, addr := range addrs {
strs[i] = addr.EncodeAddress()
}
return strs
}

View File

@ -1,82 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import "github.com/lib/pq"
// HeaderModel is the db model for btc.header_cids table
type HeaderModel struct {
ID int64 `db:"id"`
BlockNumber string `db:"block_number"`
BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
Timestamp int64 `db:"timestamp"`
Bits uint32 `db:"bits"`
NodeID int64 `db:"node_id"`
TimesValidated int64 `db:"times_validated"`
}
// TxModel is the db model for btc.transaction_cids table
type TxModel struct {
ID int64 `db:"id"`
HeaderID int64 `db:"header_id"`
Index int64 `db:"index"`
TxHash string `db:"tx_hash"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
SegWit bool `db:"segwit"`
WitnessHash string `db:"witness_hash"`
}
// TxModelWithInsAndOuts is the db model for btc.transaction_cids table that includes the children tx_input and tx_output tables
type TxModelWithInsAndOuts struct {
ID int64 `db:"id"`
HeaderID int64 `db:"header_id"`
Index int64 `db:"index"`
TxHash string `db:"tx_hash"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
SegWit bool `db:"segwit"`
WitnessHash string `db:"witness_hash"`
TxInputs []TxInput
TxOutputs []TxOutput
}
// TxInput is the db model for btc.tx_inputs table
type TxInput struct {
ID int64 `db:"id"`
TxID int64 `db:"tx_id"`
Index int64 `db:"index"`
TxWitness []string `db:"witness"`
SignatureScript []byte `db:"sig_script"`
PreviousOutPointIndex uint32 `db:"outpoint_tx_hash"`
PreviousOutPointHash string `db:"outpoint_index"`
}
// TxOutput is the db model for btc.tx_outputs table
type TxOutput struct {
ID int64 `db:"id"`
TxID int64 `db:"tx_id"`
Index int64 `db:"index"`
Value int64 `db:"value"`
PkScript []byte `db:"pk_script"`
ScriptClass uint8 `db:"script_class"`
RequiredSigs int64 `db:"required_sigs"`
Addresses pq.StringArray `db:"addresses"`
}

View File

@ -1,76 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
"fmt"
"github.com/btcsuite/btcd/rpcclient"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// PayloadFetcher satisfies the PayloadFetcher interface for bitcoin
type PayloadFetcher struct {
// PayloadFetcher is thread-safe as long as the underlying client is thread-safe, since it has/modifies no other state
// http.Client is thread-safe
client *rpcclient.Client
}
// NewStateDiffFetcher returns a PayloadFetcher
func NewPayloadFetcher(c *rpcclient.ConnConfig) (*PayloadFetcher, error) {
client, err := rpcclient.New(c, nil)
if err != nil {
return nil, err
}
return &PayloadFetcher{
client: client,
}, nil
}
// FetchAt fetches the block payloads at the given block heights
func (fetcher *PayloadFetcher) FetchAt(blockHeights []uint64) ([]shared.RawChainData, error) {
blockPayloads := make([]shared.RawChainData, len(blockHeights))
for i, height := range blockHeights {
hash, err := fetcher.client.GetBlockHash(int64(height))
if err != nil {
return nil, fmt.Errorf("bitcoin PayloadFetcher GetBlockHash err at blockheight %d: %s", height, err.Error())
}
block, err := fetcher.client.GetBlock(hash)
if err != nil {
return nil, fmt.Errorf("bitcoin PayloadFetcher GetBlock err at blockheight %d: %s", height, err.Error())
}
blockPayloads[i] = BlockPayload{
BlockHeight: int64(height),
Header: &block.Header,
Txs: msgTxsToUtilTxs(block.Transactions),
}
}
return blockPayloads, nil
}
func msgTxsToUtilTxs(msgs []*wire.MsgTx) []*btcutil.Tx {
txs := make([]*btcutil.Tx, len(msgs))
for i, msg := range msgs {
tx := btcutil.NewTx(msg)
tx.SetIndex(i)
txs[i] = tx
}
return txs
}

View File

@ -1,120 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
"fmt"
"strconv"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// IPLDPublisher satisfies the IPLDPublisher interface for bitcoin
// It interfaces directly with the public.blocks table of PG-IPFS rather than going through an ipfs intermediary
// It publishes and indexes IPLDs together in a single sqlx.Tx
type IPLDPublisher struct {
indexer *CIDIndexer
}
// NewIPLDPublisher creates a pointer to a new eth IPLDPublisher which satisfies the IPLDPublisher interface
func NewIPLDPublisher(db *postgres.DB) *IPLDPublisher {
return &IPLDPublisher{
indexer: NewCIDIndexer(db),
}
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) error {
ipldPayload, ok := payload.(ConvertedPayload)
if !ok {
return fmt.Errorf("btc publisher expected payload type %T got %T", ConvertedPayload{}, payload)
}
// Generate the iplds
headerNode, txNodes, txTrieNodes, err := ipld.FromHeaderAndTxs(ipldPayload.Header, ipldPayload.Txs)
if err != nil {
return err
}
// Begin new db tx
tx, err := pub.indexer.db.Beginx()
if err != nil {
return err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
// Publish trie nodes
for _, node := range txTrieNodes {
if err := shared.PublishIPLD(tx, node); err != nil {
return err
}
}
// Publish and index header
if err := shared.PublishIPLD(tx, headerNode); err != nil {
return err
}
header := HeaderModel{
CID: headerNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: ipldPayload.Header.PrevBlock.String(),
BlockNumber: strconv.Itoa(int(ipldPayload.BlockPayload.BlockHeight)),
BlockHash: ipldPayload.Header.BlockHash().String(),
Timestamp: ipldPayload.Header.Timestamp.UnixNano(),
Bits: ipldPayload.Header.Bits,
}
headerID, err := pub.indexer.indexHeaderCID(tx, header)
if err != nil {
return err
}
// Publish and index txs
for i, txNode := range txNodes {
if err := shared.PublishIPLD(tx, txNode); err != nil {
return err
}
txModel := ipldPayload.TxMetaData[i]
txModel.CID = txNode.Cid().String()
txModel.MhKey = shared.MultihashKeyFromCID(txNode.Cid())
txID, err := pub.indexer.indexTransactionCID(tx, txModel, headerID)
if err != nil {
return err
}
for _, input := range txModel.TxInputs {
if err := pub.indexer.indexTxInput(tx, input, txID); err != nil {
return err
}
}
for _, output := range txModel.TxOutputs {
if err := pub.indexer.indexTxOutput(tx, output, txID); err != nil {
return err
}
}
}
return err
}

View File

@ -1,120 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc_test
import (
"bytes"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-ipfs-blockstore"
"github.com/ipfs/go-ipfs-ds-help"
"github.com/multiformats/go-multihash"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/btc/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
var _ = Describe("PublishAndIndexer", func() {
var (
db *postgres.DB
err error
repo *btc.IPLDPublisher
ipfsPgGet = `SELECT data FROM public.blocks
WHERE key = $1`
)
BeforeEach(func() {
db, err = shared.SetupDB()
Expect(err).ToNot(HaveOccurred())
repo = btc.NewIPLDPublisher(db)
})
AfterEach(func() {
btc.TearDownDB(db)
})
Describe("Publish", func() {
It("Published and indexes header and transaction IPLDs in a single tx", func() {
err = repo.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
pgStr := `SELECT * FROM btc.header_cids
WHERE block_number = $1`
// check header was properly indexed
buf := bytes.NewBuffer(make([]byte, 0, 80))
err = mocks.MockBlock.Header.Serialize(buf)
Expect(err).ToNot(HaveOccurred())
headerBytes := buf.Bytes()
c, _ := ipld.RawdataToCid(ipld.MBitcoinHeader, headerBytes, multihash.DBL_SHA2_256)
header := new(btc.HeaderModel)
err = db.Get(header, pgStr, mocks.MockHeaderMetaData.BlockNumber)
Expect(err).ToNot(HaveOccurred())
Expect(header.CID).To(Equal(c.String()))
Expect(header.BlockNumber).To(Equal(mocks.MockHeaderMetaData.BlockNumber))
Expect(header.Bits).To(Equal(mocks.MockHeaderMetaData.Bits))
Expect(header.Timestamp).To(Equal(mocks.MockHeaderMetaData.Timestamp))
Expect(header.BlockHash).To(Equal(mocks.MockHeaderMetaData.BlockHash))
Expect(header.ParentHash).To(Equal(mocks.MockHeaderMetaData.ParentHash))
dc, err := cid.Decode(header.CID)
Expect(err).ToNot(HaveOccurred())
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(headerBytes))
// check that txs were properly indexed
trxs := make([]btc.TxModel, 0)
pgStr = `SELECT transaction_cids.id, transaction_cids.header_id, transaction_cids.index,
transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.segwit, transaction_cids.witness_hash
FROM btc.transaction_cids INNER JOIN btc.header_cids ON (transaction_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
err = db.Select(&trxs, pgStr, mocks.MockHeaderMetaData.BlockNumber)
Expect(err).ToNot(HaveOccurred())
Expect(len(trxs)).To(Equal(3))
txData := make([][]byte, len(mocks.MockTransactions))
txCIDs := make([]string, len(mocks.MockTransactions))
for i, m := range mocks.MockTransactions {
buf := bytes.NewBuffer(make([]byte, 0))
err = m.MsgTx().Serialize(buf)
Expect(err).ToNot(HaveOccurred())
tx := buf.Bytes()
txData[i] = tx
c, _ := ipld.RawdataToCid(ipld.MBitcoinTx, tx, multihash.DBL_SHA2_256)
txCIDs[i] = c.String()
}
for _, tx := range trxs {
Expect(tx.SegWit).To(Equal(false))
Expect(tx.HeaderID).To(Equal(header.ID))
Expect(tx.WitnessHash).To(Equal(""))
Expect(tx.CID).To(Equal(txCIDs[tx.Index]))
Expect(tx.TxHash).To(Equal(mocks.MockBlock.Transactions[tx.Index].TxHash().String()))
dc, err := cid.Decode(tx.CID)
Expect(err).ToNot(HaveOccurred())
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(txData[tx.Index]))
}
})
})
})

View File

@ -1,86 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
"github.com/btcsuite/btcd/rpcclient"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
const (
PayloadChanBufferSize = 20000 // the max eth sub buffer size
)
// PayloadStreamer satisfies the PayloadStreamer interface for bitcoin
type PayloadStreamer struct {
Config *rpcclient.ConnConfig
}
// NewPayloadStreamer creates a pointer to a new PayloadStreamer which satisfies the PayloadStreamer interface for bitcoin
func NewPayloadStreamer(clientConfig *rpcclient.ConnConfig) *PayloadStreamer {
return &PayloadStreamer{
Config: clientConfig,
}
}
// Stream is the main loop for subscribing to data from the btc block notifications
// Satisfies the shared.PayloadStreamer interface
func (ps *PayloadStreamer) Stream(payloadChan chan shared.RawChainData) (shared.ClientSubscription, error) {
logrus.Info("streaming block payloads from btc")
blockNotificationHandler := rpcclient.NotificationHandlers{
// Notification handler for block connections, forwards new block data to the payloadChan
OnFilteredBlockConnected: func(height int32, header *wire.BlockHeader, txs []*btcutil.Tx) {
payloadChan <- BlockPayload{
BlockHeight: int64(height),
Header: header,
Txs: txs,
}
},
}
// Create a new client, and connect to btc ws server
client, err := rpcclient.New(ps.Config, &blockNotificationHandler)
if err != nil {
return nil, err
}
// Register for block connect notifications.
if err := client.NotifyBlocks(); err != nil {
return nil, err
}
client.WaitForShutdown()
return &ClientSubscription{client: client}, nil
}
// ClientSubscription is a wrapper around the underlying btcd rpc client
// to fit the shared.ClientSubscription interface
type ClientSubscription struct {
client *rpcclient.Client
}
// Unsubscribe satisfies the rpc.Subscription interface
func (bcs *ClientSubscription) Unsubscribe() {
bcs.client.Shutdown()
}
// Err() satisfies the rpc.Subscription interface with a dummy err channel
func (bcs *ClientSubscription) Err() <-chan error {
errChan := make(chan error)
return errChan
}

View File

@ -1,115 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
"errors"
"math/big"
"github.com/spf13/viper"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// SubscriptionSettings config is used by a subscriber to specify what bitcoin data to stream from the watcher
type SubscriptionSettings struct {
BackFill bool
BackFillOnly bool
Start *big.Int
End *big.Int // set to 0 or a negative value to have no ending block
HeaderFilter HeaderFilter
TxFilter TxFilter
}
// HeaderFilter contains filter settings for headers
type HeaderFilter struct {
Off bool
}
// TxFilter contains filter settings for txs
type TxFilter struct {
Off bool
Segwit bool // allow filtering for segwit trxs
WitnessHashes []string // allow filtering for specific witness hashes
Indexes []int64 // allow filtering for specific transaction indexes (e.g. 0 for coinbase transactions)
PkScriptClasses []uint8 // allow filtering for txs that have at least one tx output with the specified pkscript class
MultiSig bool // allow filtering for txs that have at least one tx output that requires more than one signature
Addresses []string // allow filtering for txs that have at least one tx output with at least one of the provided addresses
}
// Init is used to initialize a EthSubscription struct with env variables
func NewBtcSubscriptionConfig() (*SubscriptionSettings, error) {
sc := new(SubscriptionSettings)
// Below default to false, which means we do not backfill by default
sc.BackFill = viper.GetBool("watcher.btcSubscription.historicalData")
sc.BackFillOnly = viper.GetBool("watcher.btcSubscription.historicalDataOnly")
// Below default to 0
// 0 start means we start at the beginning and 0 end means we continue indefinitely
sc.Start = big.NewInt(viper.GetInt64("watcher.btcSubscription.startingBlock"))
sc.End = big.NewInt(viper.GetInt64("watcher.btcSubscription.endingBlock"))
// Below default to false, which means we get all headers by default
sc.HeaderFilter = HeaderFilter{
Off: viper.GetBool("watcher.btcSubscription.headerFilter.off"),
}
// Below defaults to false and two slices of length 0
// Which means we get all transactions by default
pksc := viper.Get("watcher.btcSubscription.txFilter.pkScriptClass")
pkScriptClasses, ok := pksc.([]uint8)
if !ok {
return nil, errors.New("watcher.btcSubscription.txFilter.pkScriptClass needs to be an array of uint8s")
}
is := viper.Get("watcher.btcSubscription.txFilter.indexes")
indexes, ok := is.([]int64)
if !ok {
return nil, errors.New("watcher.btcSubscription.txFilter.indexes needs to be an array of int64s")
}
sc.TxFilter = TxFilter{
Off: viper.GetBool("watcher.btcSubscription.txFilter.off"),
Segwit: viper.GetBool("watcher.btcSubscription.txFilter.segwit"),
WitnessHashes: viper.GetStringSlice("watcher.btcSubscription.txFilter.witnessHashes"),
PkScriptClasses: pkScriptClasses,
Indexes: indexes,
MultiSig: viper.GetBool("watcher.btcSubscription.txFilter.multiSig"),
Addresses: viper.GetStringSlice("watcher.btcSubscription.txFilter.addresses"),
}
return sc, nil
}
// StartingBlock satisfies the SubscriptionSettings() interface
func (sc *SubscriptionSettings) StartingBlock() *big.Int {
return sc.Start
}
// EndingBlock satisfies the SubscriptionSettings() interface
func (sc *SubscriptionSettings) EndingBlock() *big.Int {
return sc.End
}
// HistoricalData satisfies the SubscriptionSettings() interface
func (sc *SubscriptionSettings) HistoricalData() bool {
return sc.BackFill
}
// HistoricalDataOnly satisfies the SubscriptionSettings() interface
func (sc *SubscriptionSettings) HistoricalDataOnly() bool {
return sc.BackFillOnly
}
// ChainType satisfies the SubscriptionSettings() interface
func (sc *SubscriptionSettings) ChainType() shared.ChainType {
return shared.Bitcoin
}

View File

@ -1,43 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
)
// TearDownDB is used to tear down the watcher dbs after tests
func TearDownDB(db *postgres.DB) {
tx, err := db.Beginx()
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM btc.header_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM btc.transaction_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM btc.tx_inputs`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM btc.tx_outputs`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM blocks`)
Expect(err).NotTo(HaveOccurred())
err = tx.Commit()
Expect(err).NotTo(HaveOccurred())
}

View File

@ -1,76 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package btc
import (
"math/big"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
)
// BlockPayload packages the block and tx data received from block connection notifications
type BlockPayload struct {
BlockHeight int64
Header *wire.BlockHeader
Txs []*btcutil.Tx
}
// ConvertedPayload is a custom type which packages raw BTC data for publishing to IPFS and filtering to subscribers
// Returned by PayloadConverter
// Passed to IPLDPublisher and ResponseFilterer
type ConvertedPayload struct {
BlockPayload
TxMetaData []TxModelWithInsAndOuts
}
// Height satisfies the StreamedIPLDs interface
func (cp ConvertedPayload) Height() int64 {
return cp.BlockPayload.BlockHeight
}
// CIDPayload is a struct to hold all the CIDs and their associated meta data for indexing in Postgres
// Returned by IPLDPublisher
// Passed to CIDIndexer
type CIDPayload struct {
HeaderCID HeaderModel
TransactionCIDs []TxModelWithInsAndOuts
}
// CIDWrapper is used to direct fetching of IPLDs from IPFS
// Returned by CIDRetriever
// Passed to IPLDFetcher
type CIDWrapper struct {
BlockNumber *big.Int
Header HeaderModel
Transactions []TxModel
}
// IPLDs is used to package raw IPLD block data fetched from IPFS and returned by the server
// Returned by IPLDFetcher and ResponseFilterer
type IPLDs struct {
BlockNumber *big.Int
Header ipfs.BlockModel
Transactions []ipfs.BlockModel
}
// Height satisfies the StreamedIPLDs interface
func (i IPLDs) Height() int64 {
return i.BlockNumber.Int64()
}

View File

@ -1,29 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package config_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestConfig(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Config Suite")
}

View File

@ -1,48 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package config_test
import (
"bytes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/spf13/viper"
)
var vulcanizeConfig = []byte(`
[database]
name = "dbname"
hostname = "localhost"
port = 5432
`)
var _ = Describe("Loading the config", func() {
It("reads the private config using the environment", func() {
viper.SetConfigName("config")
viper.AddConfigPath("$GOPATH/src/github.com/vulcanize/ipfs-blockchain-watcher/environments/")
testConfig := viper.New()
testConfig.SetConfigType("toml")
err := testConfig.ReadConfig(bytes.NewBuffer(vulcanizeConfig))
Expect(err).To(BeNil())
Expect(testConfig.Get("database.hostname")).To(Equal("localhost"))
Expect(testConfig.Get("database.name")).To(Equal("dbname"))
Expect(testConfig.Get("database.port")).To(Equal(int64(5432)))
})
})

View File

@ -1,78 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package config
import (
"fmt"
"github.com/spf13/viper"
)
// Env variables
const (
DATABASE_NAME = "DATABASE_NAME"
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
DATABASE_PORT = "DATABASE_PORT"
DATABASE_USER = "DATABASE_USER"
DATABASE_PASSWORD = "DATABASE_PASSWORD"
DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
)
type Database struct {
Hostname string
Name string
User string
Password string
Port int
MaxIdle int
MaxOpen int
MaxLifetime int
}
func DbConnectionString(dbConfig Database) string {
if len(dbConfig.User) > 0 && len(dbConfig.Password) > 0 {
return fmt.Sprintf("postgresql://%s:%s@%s:%d/%s?sslmode=disable",
dbConfig.User, dbConfig.Password, dbConfig.Hostname, dbConfig.Port, dbConfig.Name)
}
if len(dbConfig.User) > 0 && len(dbConfig.Password) == 0 {
return fmt.Sprintf("postgresql://%s@%s:%d/%s?sslmode=disable",
dbConfig.User, dbConfig.Hostname, dbConfig.Port, dbConfig.Name)
}
return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", dbConfig.Hostname, dbConfig.Port, dbConfig.Name)
}
func (d *Database) Init() {
viper.BindEnv("database.name", DATABASE_NAME)
viper.BindEnv("database.hostname", DATABASE_HOSTNAME)
viper.BindEnv("database.port", DATABASE_PORT)
viper.BindEnv("database.user", DATABASE_USER)
viper.BindEnv("database.password", DATABASE_PASSWORD)
viper.BindEnv("database.maxIdle", DATABASE_MAX_IDLE_CONNECTIONS)
viper.BindEnv("database.maxOpen", DATABASE_MAX_OPEN_CONNECTIONS)
viper.BindEnv("database.maxLifetime", DATABASE_MAX_CONN_LIFETIME)
d.Name = viper.GetString("database.name")
d.Hostname = viper.GetString("database.hostname")
d.Port = viper.GetInt("database.port")
d.User = viper.GetString("database.user")
d.Password = viper.GetString("database.password")
d.MaxIdle = viper.GetInt("database.maxIdle")
d.MaxOpen = viper.GetInt("database.maxOpen")
d.MaxLifetime = viper.GetInt("database.maxLifetime")
}

View File

@ -1,356 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"fmt"
"github.com/jmoiron/sqlx"
"github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// Cleaner satisfies the shared.Cleaner interface fo ethereum
type Cleaner struct {
db *postgres.DB
}
// NewCleaner returns a new Cleaner struct that satisfies the shared.Cleaner interface
func NewCleaner(db *postgres.DB) *Cleaner {
return &Cleaner{
db: db,
}
}
// ResetValidation resets the validation level to 0 to enable revalidation
func (c *Cleaner) ResetValidation(rngs [][2]uint64) error {
tx, err := c.db.Beginx()
if err != nil {
return err
}
for _, rng := range rngs {
logrus.Infof("eth db cleaner resetting validation level to 0 for block range %d to %d", rng[0], rng[1])
pgStr := `UPDATE eth.header_cids
SET times_validated = 0
WHERE block_number BETWEEN $1 AND $2`
if _, err := tx.Exec(pgStr, rng[0], rng[1]); err != nil {
shared.Rollback(tx)
return err
}
}
return tx.Commit()
}
// Clean removes the specified data from the db within the provided block range
func (c *Cleaner) Clean(rngs [][2]uint64, t shared.DataType) error {
tx, err := c.db.Beginx()
if err != nil {
return err
}
for _, rng := range rngs {
logrus.Infof("eth db cleaner cleaning up block range %d to %d", rng[0], rng[1])
if err := c.clean(tx, rng, t); err != nil {
shared.Rollback(tx)
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
logrus.Infof("eth db cleaner vacuum analyzing cleaned tables to free up space from deleted rows")
return c.vacuumAnalyze(t)
}
func (c *Cleaner) clean(tx *sqlx.Tx, rng [2]uint64, t shared.DataType) error {
switch t {
case shared.Full, shared.Headers:
return c.cleanFull(tx, rng)
case shared.Uncles:
if err := c.cleanUncleIPLDs(tx, rng); err != nil {
return err
}
return c.cleanUncleMetaData(tx, rng)
case shared.Transactions:
if err := c.cleanReceiptIPLDs(tx, rng); err != nil {
return err
}
if err := c.cleanTransactionIPLDs(tx, rng); err != nil {
return err
}
return c.cleanTransactionMetaData(tx, rng)
case shared.Receipts:
if err := c.cleanReceiptIPLDs(tx, rng); err != nil {
return err
}
return c.cleanReceiptMetaData(tx, rng)
case shared.State:
if err := c.cleanStorageIPLDs(tx, rng); err != nil {
return err
}
if err := c.cleanStateIPLDs(tx, rng); err != nil {
return err
}
return c.cleanStateMetaData(tx, rng)
case shared.Storage:
if err := c.cleanStorageIPLDs(tx, rng); err != nil {
return err
}
return c.cleanStorageMetaData(tx, rng)
default:
return fmt.Errorf("eth cleaner unrecognized type: %s", t.String())
}
}
func (c *Cleaner) vacuumAnalyze(t shared.DataType) error {
switch t {
case shared.Full, shared.Headers:
return c.vacuumFull()
case shared.Uncles:
if err := c.vacuumUncles(); err != nil {
return err
}
case shared.Transactions:
if err := c.vacuumTxs(); err != nil {
return err
}
if err := c.vacuumRcts(); err != nil {
return err
}
case shared.Receipts:
if err := c.vacuumRcts(); err != nil {
return err
}
case shared.State:
if err := c.vacuumState(); err != nil {
return err
}
if err := c.vacuumAccounts(); err != nil {
return err
}
if err := c.vacuumStorage(); err != nil {
return err
}
case shared.Storage:
if err := c.vacuumStorage(); err != nil {
return err
}
default:
return fmt.Errorf("eth cleaner unrecognized type: %s", t.String())
}
return c.vacuumIPLDs()
}
func (c *Cleaner) vacuumFull() error {
if err := c.vacuumHeaders(); err != nil {
return err
}
if err := c.vacuumUncles(); err != nil {
return err
}
if err := c.vacuumTxs(); err != nil {
return err
}
if err := c.vacuumRcts(); err != nil {
return err
}
if err := c.vacuumState(); err != nil {
return err
}
if err := c.vacuumAccounts(); err != nil {
return err
}
return c.vacuumStorage()
}
func (c *Cleaner) vacuumHeaders() error {
_, err := c.db.Exec(`VACUUM ANALYZE eth.header_cids`)
return err
}
func (c *Cleaner) vacuumUncles() error {
_, err := c.db.Exec(`VACUUM ANALYZE eth.uncle_cids`)
return err
}
func (c *Cleaner) vacuumTxs() error {
_, err := c.db.Exec(`VACUUM ANALYZE eth.transaction_cids`)
return err
}
func (c *Cleaner) vacuumRcts() error {
_, err := c.db.Exec(`VACUUM ANALYZE eth.receipt_cids`)
return err
}
func (c *Cleaner) vacuumState() error {
_, err := c.db.Exec(`VACUUM ANALYZE eth.state_cids`)
return err
}
func (c *Cleaner) vacuumAccounts() error {
_, err := c.db.Exec(`VACUUM ANALYZE eth.state_accounts`)
return err
}
func (c *Cleaner) vacuumStorage() error {
_, err := c.db.Exec(`VACUUM ANALYZE eth.storage_cids`)
return err
}
func (c *Cleaner) vacuumIPLDs() error {
_, err := c.db.Exec(`VACUUM ANALYZE public.blocks`)
return err
}
func (c *Cleaner) cleanFull(tx *sqlx.Tx, rng [2]uint64) error {
if err := c.cleanStorageIPLDs(tx, rng); err != nil {
return err
}
if err := c.cleanStateIPLDs(tx, rng); err != nil {
return err
}
if err := c.cleanReceiptIPLDs(tx, rng); err != nil {
return err
}
if err := c.cleanTransactionIPLDs(tx, rng); err != nil {
return err
}
if err := c.cleanUncleIPLDs(tx, rng); err != nil {
return err
}
if err := c.cleanHeaderIPLDs(tx, rng); err != nil {
return err
}
return c.cleanHeaderMetaData(tx, rng)
}
func (c *Cleaner) cleanStorageIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A
USING eth.storage_cids B, eth.state_cids C, eth.header_cids D
WHERE A.key = B.mh_key
AND B.state_id = C.id
AND C.header_id = D.id
AND D.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanStorageMetaData(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM eth.storage_cids A
USING eth.state_cids B, eth.header_cids C
WHERE A.state_id = B.id
AND B.header_id = C.id
AND C.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanStateIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A
USING eth.state_cids B, eth.header_cids C
WHERE A.key = B.mh_key
AND B.header_id = C.id
AND C.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanStateMetaData(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM eth.state_cids A
USING eth.header_cids B
WHERE A.header_id = B.id
AND B.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanReceiptIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A
USING eth.receipt_cids B, eth.transaction_cids C, eth.header_cids D
WHERE A.key = B.mh_key
AND B.tx_id = C.id
AND C.header_id = D.id
AND D.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanReceiptMetaData(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM eth.receipt_cids A
USING eth.transaction_cids B, eth.header_cids C
WHERE A.tx_id = B.id
AND B.header_id = C.id
AND C.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanTransactionIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A
USING eth.transaction_cids B, eth.header_cids C
WHERE A.key = B.mh_key
AND B.header_id = C.id
AND C.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanTransactionMetaData(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM eth.transaction_cids A
USING eth.header_cids B
WHERE A.header_id = B.id
AND B.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanUncleIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A
USING eth.uncle_cids B, eth.header_cids C
WHERE A.key = B.mh_key
AND B.header_id = C.id
AND C.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanUncleMetaData(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM eth.uncle_cids A
USING eth.header_cids B
WHERE A.header_id = B.id
AND B.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanHeaderIPLDs(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM public.blocks A
USING eth.header_cids B
WHERE A.key = B.mh_key
AND B.block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}
func (c *Cleaner) cleanHeaderMetaData(tx *sqlx.Tx, rng [2]uint64) error {
pgStr := `DELETE FROM eth.header_cids
WHERE block_number BETWEEN $1 AND $2`
_, err := tx.Exec(pgStr, rng[0], rng[1])
return err
}

View File

@ -1,698 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
var (
// Block 0
// header variables
blockHash1 = crypto.Keccak256Hash([]byte{00, 02})
blocKNumber1 = big.NewInt(0)
headerCID1 = shared.TestCID([]byte("mockHeader1CID"))
headerMhKey1 = shared.MultihashKeyFromCID(headerCID1)
parentHash = crypto.Keccak256Hash([]byte{00, 01})
totalDifficulty = "50000000000000000000"
reward = "5000000000000000000"
headerModel = eth.HeaderModel{
BlockHash: blockHash1.String(),
BlockNumber: blocKNumber1.String(),
CID: headerCID1.String(),
MhKey: headerMhKey1,
ParentHash: parentHash.String(),
TotalDifficulty: totalDifficulty,
Reward: reward,
}
// tx variables
tx1CID = shared.TestCID([]byte("mockTx1CID"))
tx1MhKey = shared.MultihashKeyFromCID(tx1CID)
tx2CID = shared.TestCID([]byte("mockTx2CID"))
tx2MhKey = shared.MultihashKeyFromCID(tx2CID)
tx1Hash = crypto.Keccak256Hash([]byte{01, 01})
tx2Hash = crypto.Keccak256Hash([]byte{01, 02})
txSrc = common.HexToAddress("0x010a")
txDst = common.HexToAddress("0x020a")
txModels1 = []eth.TxModel{
{
CID: tx1CID.String(),
MhKey: tx1MhKey,
TxHash: tx1Hash.String(),
Index: 0,
},
{
CID: tx2CID.String(),
MhKey: tx2MhKey,
TxHash: tx2Hash.String(),
Index: 1,
},
}
// uncle variables
uncleCID = shared.TestCID([]byte("mockUncle1CID"))
uncleMhKey = shared.MultihashKeyFromCID(uncleCID)
uncleHash = crypto.Keccak256Hash([]byte{02, 02})
uncleParentHash = crypto.Keccak256Hash([]byte{02, 01})
uncleReward = "1000000000000000000"
uncleModels1 = []eth.UncleModel{
{
CID: uncleCID.String(),
MhKey: uncleMhKey,
Reward: uncleReward,
BlockHash: uncleHash.String(),
ParentHash: uncleParentHash.String(),
},
}
// receipt variables
rct1CID = shared.TestCID([]byte("mockRct1CID"))
rct1MhKey = shared.MultihashKeyFromCID(rct1CID)
rct2CID = shared.TestCID([]byte("mockRct2CID"))
rct2MhKey = shared.MultihashKeyFromCID(rct2CID)
rct1Contract = common.Address{}
rct2Contract = common.HexToAddress("0x010c")
receiptModels1 = map[common.Hash]eth.ReceiptModel{
tx1Hash: {
CID: rct1CID.String(),
MhKey: rct1MhKey,
ContractHash: crypto.Keccak256Hash(rct1Contract.Bytes()).String(),
},
tx2Hash: {
CID: rct2CID.String(),
MhKey: rct2MhKey,
ContractHash: crypto.Keccak256Hash(rct2Contract.Bytes()).String(),
},
}
// state variables
state1CID1 = shared.TestCID([]byte("mockState1CID1"))
state1MhKey1 = shared.MultihashKeyFromCID(state1CID1)
state1Path = []byte{'\x01'}
state1Key = crypto.Keccak256Hash(txSrc.Bytes())
state2CID1 = shared.TestCID([]byte("mockState2CID1"))
state2MhKey1 = shared.MultihashKeyFromCID(state2CID1)
state2Path = []byte{'\x02'}
state2Key = crypto.Keccak256Hash(txDst.Bytes())
stateModels1 = []eth.StateNodeModel{
{
CID: state1CID1.String(),
MhKey: state1MhKey1,
Path: state1Path,
NodeType: 2,
StateKey: state1Key.String(),
},
{
CID: state2CID1.String(),
MhKey: state2MhKey1,
Path: state2Path,
NodeType: 2,
StateKey: state2Key.String(),
},
}
// storage variables
storageCID = shared.TestCID([]byte("mockStorageCID1"))
storageMhKey = shared.MultihashKeyFromCID(storageCID)
storagePath = []byte{'\x01'}
storageKey = crypto.Keccak256Hash(common.Hex2Bytes("0x0000000000000000000000000000000000000000000000000000000000000000"))
storageModels1 = map[string][]eth.StorageNodeModel{
common.Bytes2Hex(state1Path): {
{
CID: storageCID.String(),
MhKey: storageMhKey,
StorageKey: storageKey.String(),
Path: storagePath,
NodeType: 2,
},
},
}
mockCIDPayload1 = &eth.CIDPayload{
HeaderCID: headerModel,
UncleCIDs: uncleModels1,
TransactionCIDs: txModels1,
ReceiptCIDs: receiptModels1,
StateNodeCIDs: stateModels1,
StorageNodeCIDs: storageModels1,
}
// Block 1
// header variables
blockHash2 = crypto.Keccak256Hash([]byte{00, 03})
blocKNumber2 = big.NewInt(1)
headerCID2 = shared.TestCID([]byte("mockHeaderCID2"))
headerMhKey2 = shared.MultihashKeyFromCID(headerCID2)
headerModel2 = eth.HeaderModel{
BlockHash: blockHash2.String(),
BlockNumber: blocKNumber2.String(),
CID: headerCID2.String(),
MhKey: headerMhKey2,
ParentHash: blockHash1.String(),
TotalDifficulty: totalDifficulty,
Reward: reward,
}
// tx variables
tx3CID = shared.TestCID([]byte("mockTx3CID"))
tx3MhKey = shared.MultihashKeyFromCID(tx3CID)
tx3Hash = crypto.Keccak256Hash([]byte{01, 03})
txModels2 = []eth.TxModel{
{
CID: tx3CID.String(),
MhKey: tx3MhKey,
TxHash: tx3Hash.String(),
Index: 0,
},
}
// receipt variables
rct3CID = shared.TestCID([]byte("mockRct3CID"))
rct3MhKey = shared.MultihashKeyFromCID(rct3CID)
receiptModels2 = map[common.Hash]eth.ReceiptModel{
tx3Hash: {
CID: rct3CID.String(),
MhKey: rct3MhKey,
ContractHash: crypto.Keccak256Hash(rct1Contract.Bytes()).String(),
},
}
// state variables
state1CID2 = shared.TestCID([]byte("mockState1CID2"))
state1MhKey2 = shared.MultihashKeyFromCID(state1CID2)
stateModels2 = []eth.StateNodeModel{
{
CID: state1CID2.String(),
MhKey: state1MhKey2,
Path: state1Path,
NodeType: 2,
StateKey: state1Key.String(),
},
}
mockCIDPayload2 = &eth.CIDPayload{
HeaderCID: headerModel2,
TransactionCIDs: txModels2,
ReceiptCIDs: receiptModels2,
StateNodeCIDs: stateModels2,
}
rngs = [][2]uint64{{0, 1}}
mhKeys = []string{
headerMhKey1,
headerMhKey2,
uncleMhKey,
tx1MhKey,
tx2MhKey,
tx3MhKey,
rct1MhKey,
rct2MhKey,
rct3MhKey,
state1MhKey1,
state2MhKey1,
state1MhKey2,
storageMhKey,
}
mockData = []byte{'\x01'}
)
var _ = Describe("Cleaner", func() {
var (
db *postgres.DB
repo *eth.CIDIndexer
cleaner *eth.Cleaner
)
BeforeEach(func() {
var err error
db, err = shared.SetupDB()
Expect(err).ToNot(HaveOccurred())
repo = eth.NewCIDIndexer(db)
cleaner = eth.NewCleaner(db)
})
Describe("Clean", func() {
BeforeEach(func() {
for _, key := range mhKeys {
_, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, key, mockData)
Expect(err).ToNot(HaveOccurred())
}
err := repo.Index(mockCIDPayload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(mockCIDPayload2)
Expect(err).ToNot(HaveOccurred())
tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred())
var startingIPFSBlocksCount int
pgStr := `SELECT COUNT(*) FROM public.blocks`
err = tx.Get(&startingIPFSBlocksCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var startingStorageCount int
pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
err = tx.Get(&startingStorageCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var startingStateCount int
pgStr = `SELECT COUNT(*) FROM eth.state_cids`
err = tx.Get(&startingStateCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var startingReceiptCount int
pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
err = tx.Get(&startingReceiptCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var startingTxCount int
pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
err = tx.Get(&startingTxCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var startingUncleCount int
pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
err = tx.Get(&startingUncleCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var startingHeaderCount int
pgStr = `SELECT COUNT(*) FROM eth.header_cids`
err = tx.Get(&startingHeaderCount, pgStr)
Expect(err).ToNot(HaveOccurred())
err = tx.Commit()
Expect(err).ToNot(HaveOccurred())
Expect(startingIPFSBlocksCount).To(Equal(13))
Expect(startingStorageCount).To(Equal(1))
Expect(startingStateCount).To(Equal(3))
Expect(startingReceiptCount).To(Equal(3))
Expect(startingTxCount).To(Equal(3))
Expect(startingUncleCount).To(Equal(1))
Expect(startingHeaderCount).To(Equal(2))
})
AfterEach(func() {
eth.TearDownDB(db)
})
It("Cleans everything", func() {
err := cleaner.Clean(rngs, shared.Full)
Expect(err).ToNot(HaveOccurred())
tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred())
pgStr := `SELECT COUNT(*) FROM eth.header_cids`
var headerCount int
err = tx.Get(&headerCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var uncleCount int
pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
err = tx.Get(&uncleCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txCount int
pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
err = tx.Get(&txCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var rctCount int
pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
err = tx.Get(&rctCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var stateCount int
pgStr = `SELECT COUNT(*) FROM eth.state_cids`
err = tx.Get(&stateCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var storageCount int
pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
err = tx.Get(&storageCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var blocksCount int
pgStr = `SELECT COUNT(*) FROM public.blocks`
err = tx.Get(&blocksCount, pgStr)
Expect(err).ToNot(HaveOccurred())
err = tx.Commit()
Expect(err).ToNot(HaveOccurred())
Expect(headerCount).To(Equal(0))
Expect(uncleCount).To(Equal(0))
Expect(txCount).To(Equal(0))
Expect(rctCount).To(Equal(0))
Expect(stateCount).To(Equal(0))
Expect(storageCount).To(Equal(0))
Expect(blocksCount).To(Equal(0))
})
It("Cleans headers and all linked data (same as full)", func() {
err := cleaner.Clean(rngs, shared.Headers)
Expect(err).ToNot(HaveOccurred())
tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred())
var headerCount int
pgStr := `SELECT COUNT(*) FROM eth.header_cids`
err = tx.Get(&headerCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var uncleCount int
pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
err = tx.Get(&uncleCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txCount int
pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
err = tx.Get(&txCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var rctCount int
pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
err = tx.Get(&rctCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var stateCount int
pgStr = `SELECT COUNT(*) FROM eth.state_cids`
err = tx.Get(&stateCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var storageCount int
pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
err = tx.Get(&storageCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var blocksCount int
pgStr = `SELECT COUNT(*) FROM public.blocks`
err = tx.Get(&blocksCount, pgStr)
Expect(err).ToNot(HaveOccurred())
err = tx.Commit()
Expect(err).ToNot(HaveOccurred())
Expect(headerCount).To(Equal(0))
Expect(uncleCount).To(Equal(0))
Expect(txCount).To(Equal(0))
Expect(rctCount).To(Equal(0))
Expect(stateCount).To(Equal(0))
Expect(storageCount).To(Equal(0))
Expect(blocksCount).To(Equal(0))
})
It("Cleans uncles", func() {
err := cleaner.Clean(rngs, shared.Uncles)
Expect(err).ToNot(HaveOccurred())
tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred())
var headerCount int
pgStr := `SELECT COUNT(*) FROM eth.header_cids`
err = tx.Get(&headerCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var uncleCount int
pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
err = tx.Get(&uncleCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txCount int
pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
err = tx.Get(&txCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var rctCount int
pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
err = tx.Get(&rctCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var stateCount int
pgStr = `SELECT COUNT(*) FROM eth.state_cids`
err = tx.Get(&stateCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var storageCount int
pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
err = tx.Get(&storageCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var blocksCount int
pgStr = `SELECT COUNT(*) FROM public.blocks`
err = tx.Get(&blocksCount, pgStr)
Expect(err).ToNot(HaveOccurred())
err = tx.Commit()
Expect(err).ToNot(HaveOccurred())
Expect(headerCount).To(Equal(2))
Expect(uncleCount).To(Equal(0))
Expect(txCount).To(Equal(3))
Expect(rctCount).To(Equal(3))
Expect(stateCount).To(Equal(3))
Expect(storageCount).To(Equal(1))
Expect(blocksCount).To(Equal(12))
})
It("Cleans transactions and linked receipts", func() {
err := cleaner.Clean(rngs, shared.Transactions)
Expect(err).ToNot(HaveOccurred())
tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred())
var headerCount int
pgStr := `SELECT COUNT(*) FROM eth.header_cids`
err = tx.Get(&headerCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var uncleCount int
pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
err = tx.Get(&uncleCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txCount int
pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
err = tx.Get(&txCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var rctCount int
pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
err = tx.Get(&rctCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var stateCount int
pgStr = `SELECT COUNT(*) FROM eth.state_cids`
err = tx.Get(&stateCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var storageCount int
pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
err = tx.Get(&storageCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var blocksCount int
pgStr = `SELECT COUNT(*) FROM public.blocks`
err = tx.Get(&blocksCount, pgStr)
Expect(err).ToNot(HaveOccurred())
err = tx.Commit()
Expect(err).ToNot(HaveOccurred())
Expect(headerCount).To(Equal(2))
Expect(uncleCount).To(Equal(1))
Expect(txCount).To(Equal(0))
Expect(rctCount).To(Equal(0))
Expect(stateCount).To(Equal(3))
Expect(storageCount).To(Equal(1))
Expect(blocksCount).To(Equal(7))
})
It("Cleans receipts", func() {
err := cleaner.Clean(rngs, shared.Receipts)
Expect(err).ToNot(HaveOccurred())
tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred())
var headerCount int
pgStr := `SELECT COUNT(*) FROM eth.header_cids`
err = tx.Get(&headerCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var uncleCount int
pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
err = tx.Get(&uncleCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txCount int
pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
err = tx.Get(&txCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var rctCount int
pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
err = tx.Get(&rctCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var stateCount int
pgStr = `SELECT COUNT(*) FROM eth.state_cids`
err = tx.Get(&stateCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var storageCount int
pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
err = tx.Get(&storageCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var blocksCount int
pgStr = `SELECT COUNT(*) FROM public.blocks`
err = tx.Get(&blocksCount, pgStr)
Expect(err).ToNot(HaveOccurred())
err = tx.Commit()
Expect(err).ToNot(HaveOccurred())
Expect(headerCount).To(Equal(2))
Expect(uncleCount).To(Equal(1))
Expect(txCount).To(Equal(3))
Expect(rctCount).To(Equal(0))
Expect(stateCount).To(Equal(3))
Expect(storageCount).To(Equal(1))
Expect(blocksCount).To(Equal(10))
})
It("Cleans state and linked storage", func() {
err := cleaner.Clean(rngs, shared.State)
Expect(err).ToNot(HaveOccurred())
tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred())
var headerCount int
pgStr := `SELECT COUNT(*) FROM eth.header_cids`
err = tx.Get(&headerCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var uncleCount int
pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
err = tx.Get(&uncleCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txCount int
pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
err = tx.Get(&txCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var rctCount int
pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
err = tx.Get(&rctCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var stateCount int
pgStr = `SELECT COUNT(*) FROM eth.state_cids`
err = tx.Get(&stateCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var storageCount int
pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
err = tx.Get(&storageCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var blocksCount int
pgStr = `SELECT COUNT(*) FROM public.blocks`
err = tx.Get(&blocksCount, pgStr)
Expect(err).ToNot(HaveOccurred())
err = tx.Commit()
Expect(err).ToNot(HaveOccurred())
Expect(headerCount).To(Equal(2))
Expect(uncleCount).To(Equal(1))
Expect(txCount).To(Equal(3))
Expect(rctCount).To(Equal(3))
Expect(stateCount).To(Equal(0))
Expect(storageCount).To(Equal(0))
Expect(blocksCount).To(Equal(9))
})
It("Cleans storage", func() {
err := cleaner.Clean(rngs, shared.Storage)
Expect(err).ToNot(HaveOccurred())
tx, err := db.Beginx()
Expect(err).ToNot(HaveOccurred())
var headerCount int
pgStr := `SELECT COUNT(*) FROM eth.header_cids`
err = tx.Get(&headerCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var uncleCount int
pgStr = `SELECT COUNT(*) FROM eth.uncle_cids`
err = tx.Get(&uncleCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var txCount int
pgStr = `SELECT COUNT(*) FROM eth.transaction_cids`
err = tx.Get(&txCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var rctCount int
pgStr = `SELECT COUNT(*) FROM eth.receipt_cids`
err = tx.Get(&rctCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var stateCount int
pgStr = `SELECT COUNT(*) FROM eth.state_cids`
err = tx.Get(&stateCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var storageCount int
pgStr = `SELECT COUNT(*) FROM eth.storage_cids`
err = tx.Get(&storageCount, pgStr)
Expect(err).ToNot(HaveOccurred())
var blocksCount int
pgStr = `SELECT COUNT(*) FROM public.blocks`
err = tx.Get(&blocksCount, pgStr)
Expect(err).ToNot(HaveOccurred())
err = tx.Commit()
Expect(err).ToNot(HaveOccurred())
Expect(headerCount).To(Equal(2))
Expect(uncleCount).To(Equal(1))
Expect(txCount).To(Equal(3))
Expect(rctCount).To(Equal(3))
Expect(stateCount).To(Equal(3))
Expect(storageCount).To(Equal(0))
Expect(blocksCount).To(Equal(12))
})
})
Describe("ResetValidation", func() {
BeforeEach(func() {
for _, key := range mhKeys {
_, err := db.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2)`, key, mockData)
Expect(err).ToNot(HaveOccurred())
}
err := repo.Index(mockCIDPayload1)
Expect(err).ToNot(HaveOccurred())
err = repo.Index(mockCIDPayload2)
Expect(err).ToNot(HaveOccurred())
var validationTimes []int
pgStr := `SELECT times_validated FROM eth.header_cids`
err = db.Select(&validationTimes, pgStr)
Expect(err).ToNot(HaveOccurred())
Expect(len(validationTimes)).To(Equal(2))
Expect(validationTimes[0]).To(Equal(1))
Expect(validationTimes[1]).To(Equal(1))
err = repo.Index(mockCIDPayload1)
Expect(err).ToNot(HaveOccurred())
validationTimes = []int{}
pgStr = `SELECT times_validated FROM eth.header_cids ORDER BY block_number`
err = db.Select(&validationTimes, pgStr)
Expect(err).ToNot(HaveOccurred())
Expect(len(validationTimes)).To(Equal(2))
Expect(validationTimes[0]).To(Equal(2))
Expect(validationTimes[1]).To(Equal(1))
})
AfterEach(func() {
eth.TearDownDB(db)
})
It("Resets the validation level", func() {
err := cleaner.ResetValidation(rngs)
Expect(err).ToNot(HaveOccurred())
var validationTimes []int
pgStr := `SELECT times_validated FROM eth.header_cids`
err = db.Select(&validationTimes, pgStr)
Expect(err).ToNot(HaveOccurred())
Expect(len(validationTimes)).To(Equal(2))
Expect(validationTimes[0]).To(Equal(0))
Expect(validationTimes[1]).To(Equal(0))
err = repo.Index(mockCIDPayload2)
Expect(err).ToNot(HaveOccurred())
validationTimes = []int{}
pgStr = `SELECT times_validated FROM eth.header_cids ORDER BY block_number`
err = db.Select(&validationTimes, pgStr)
Expect(err).ToNot(HaveOccurred())
Expect(len(validationTimes)).To(Equal(2))
Expect(validationTimes[0]).To(Equal(0))
Expect(validationTimes[1]).To(Equal(1))
})
})
})

View File

@ -1,155 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// PayloadConverter satisfies the PayloadConverter interface for ethereum
type PayloadConverter struct {
chainConfig *params.ChainConfig
}
// NewPayloadConverter creates a pointer to a new PayloadConverter which satisfies the PayloadConverter interface
func NewPayloadConverter(chainConfig *params.ChainConfig) *PayloadConverter {
return &PayloadConverter{
chainConfig: chainConfig,
}
}
// Convert method is used to convert a eth statediff.Payload to an IPLDPayload
// Satisfies the shared.PayloadConverter interface
func (pc *PayloadConverter) Convert(payload shared.RawChainData) (shared.ConvertedData, error) {
stateDiffPayload, ok := payload.(statediff.Payload)
if !ok {
return nil, fmt.Errorf("eth converter: expected payload type %T got %T", statediff.Payload{}, payload)
}
// Unpack block rlp to access fields
block := new(types.Block)
if err := rlp.DecodeBytes(stateDiffPayload.BlockRlp, block); err != nil {
return nil, err
}
trxLen := len(block.Transactions())
convertedPayload := ConvertedPayload{
TotalDifficulty: stateDiffPayload.TotalDifficulty,
Block: block,
TxMetaData: make([]TxModel, 0, trxLen),
Receipts: make(types.Receipts, 0, trxLen),
ReceiptMetaData: make([]ReceiptModel, 0, trxLen),
StateNodes: make([]TrieNode, 0),
StorageNodes: make(map[string][]TrieNode),
}
signer := types.MakeSigner(pc.chainConfig, block.Number())
transactions := block.Transactions()
for i, trx := range transactions {
// Extract to and from data from the the transactions for indexing
from, err := types.Sender(signer, trx)
if err != nil {
return nil, err
}
txMeta := TxModel{
Dst: shared.HandleZeroAddrPointer(trx.To()),
Src: shared.HandleZeroAddr(from),
TxHash: trx.Hash().String(),
Index: int64(i),
Data: trx.Data(),
}
// txMeta will have same index as its corresponding trx in the convertedPayload.BlockBody
convertedPayload.TxMetaData = append(convertedPayload.TxMetaData, txMeta)
}
// Decode receipts for this block
receipts := make(types.Receipts, 0)
if err := rlp.DecodeBytes(stateDiffPayload.ReceiptsRlp, &receipts); err != nil {
return nil, err
}
// Derive any missing fields
if err := receipts.DeriveFields(pc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions()); err != nil {
return nil, err
}
for i, receipt := range receipts {
// Extract topic and contract data from the receipt for indexing
topicSets := make([][]string, 4)
mappedContracts := make(map[string]bool) // use map to avoid duplicate addresses
for _, log := range receipt.Logs {
for i, topic := range log.Topics {
topicSets[i] = append(topicSets[i], topic.Hex())
}
mappedContracts[log.Address.String()] = true
}
// These are the contracts seen in the logs
logContracts := make([]string, 0, len(mappedContracts))
for addr := range mappedContracts {
logContracts = append(logContracts, addr)
}
// This is the contract address if this receipt is for a contract creation tx
contract := shared.HandleZeroAddr(receipt.ContractAddress)
var contractHash string
if contract != "" {
convertedPayload.TxMetaData[i].Deployment = true
contractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
}
rctMeta := ReceiptModel{
Topic0s: topicSets[0],
Topic1s: topicSets[1],
Topic2s: topicSets[2],
Topic3s: topicSets[3],
Contract: contract,
ContractHash: contractHash,
LogContracts: logContracts,
}
// receipt and rctMeta will have same indexes
convertedPayload.Receipts = append(convertedPayload.Receipts, receipt)
convertedPayload.ReceiptMetaData = append(convertedPayload.ReceiptMetaData, rctMeta)
}
// Unpack state diff rlp to access fields
stateDiff := new(statediff.StateObject)
if err := rlp.DecodeBytes(stateDiffPayload.StateObjectRlp, stateDiff); err != nil {
return nil, err
}
for _, stateNode := range stateDiff.Nodes {
statePath := common.Bytes2Hex(stateNode.Path)
convertedPayload.StateNodes = append(convertedPayload.StateNodes, TrieNode{
Path: stateNode.Path,
Value: stateNode.NodeValue,
Type: stateNode.NodeType,
LeafKey: common.BytesToHash(stateNode.LeafKey),
})
for _, storageNode := range stateNode.StorageNodes {
convertedPayload.StorageNodes[statePath] = append(convertedPayload.StorageNodes[statePath], TrieNode{
Path: storageNode.Path,
Value: storageNode.NodeValue,
Type: storageNode.NodeType,
LeafKey: common.BytesToHash(storageNode.LeafKey),
})
}
}
return convertedPayload, nil
}

View File

@ -1,54 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks"
)
var _ = Describe("Converter", func() {
Describe("Convert", func() {
It("Converts mock statediff.Payloads into the expected IPLDPayloads", func() {
converter := eth.NewPayloadConverter(params.MainnetChainConfig)
payload, err := converter.Convert(mocks.MockStateDiffPayload)
Expect(err).ToNot(HaveOccurred())
convertedPayload, ok := payload.(eth.ConvertedPayload)
Expect(ok).To(BeTrue())
Expect(convertedPayload.Block.Number().String()).To(Equal(mocks.BlockNumber.String()))
Expect(convertedPayload.Block.Hash().String()).To(Equal(mocks.MockBlock.Hash().String()))
Expect(convertedPayload.StateNodes).To(Equal(mocks.MockStateNodes))
Expect(convertedPayload.StorageNodes).To(Equal(mocks.MockStorageNodes))
Expect(convertedPayload.TotalDifficulty.Int64()).To(Equal(mocks.MockStateDiffPayload.TotalDifficulty.Int64()))
gotBody, err := rlp.EncodeToBytes(convertedPayload.Block.Body())
Expect(err).ToNot(HaveOccurred())
expectedBody, err := rlp.EncodeToBytes(mocks.MockBlock.Body())
Expect(err).ToNot(HaveOccurred())
Expect(gotBody).To(Equal(expectedBody))
gotHeader, err := rlp.EncodeToBytes(convertedPayload.Block.Header())
Expect(err).ToNot(HaveOccurred())
Expect(gotHeader).To(Equal(mocks.MockHeaderRlp))
Expect(convertedPayload.TxMetaData).To(Equal(mocks.MockTrxMeta))
Expect(convertedPayload.ReceiptMetaData).To(Equal(mocks.MockRctMeta))
})
})
})

View File

@ -1,206 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/jmoiron/sqlx"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
var (
nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
)
// Indexer satisfies the Indexer interface for ethereum
type CIDIndexer struct {
db *postgres.DB
}
// NewCIDIndexer creates a new pointer to a Indexer which satisfies the CIDIndexer interface
func NewCIDIndexer(db *postgres.DB) *CIDIndexer {
return &CIDIndexer{
db: db,
}
}
// Index indexes a cidPayload in Postgres
func (in *CIDIndexer) Index(cids shared.CIDsForIndexing) error {
cidPayload, ok := cids.(*CIDPayload)
if !ok {
return fmt.Errorf("eth indexer expected cids type %T got %T", &CIDPayload{}, cids)
}
// Begin new db tx
tx, err := in.db.Beginx()
if err != nil {
return err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
headerID, err := in.indexHeaderCID(tx, cidPayload.HeaderCID)
if err != nil {
log.Error("eth indexer error when indexing header")
return err
}
for _, uncle := range cidPayload.UncleCIDs {
if err := in.indexUncleCID(tx, uncle, headerID); err != nil {
log.Error("eth indexer error when indexing uncle")
return err
}
}
if err := in.indexTransactionAndReceiptCIDs(tx, cidPayload, headerID); err != nil {
log.Error("eth indexer error when indexing transactions and receipts")
return err
}
err = in.indexStateAndStorageCIDs(tx, cidPayload, headerID)
if err != nil {
log.Error("eth indexer error when indexing state and storage nodes")
}
return err
}
func (in *CIDIndexer) indexHeaderCID(tx *sqlx.Tx, header HeaderModel) (int64, error) {
var headerID int64
err := tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1)
RETURNING id`,
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, in.db.NodeID, header.Reward, header.StateRoot, header.TxRoot,
header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1).Scan(&headerID)
return headerID, err
}
func (in *CIDIndexer) indexUncleCID(tx *sqlx.Tx, uncle UncleModel, headerID int64) error {
_, err := tx.Exec(`INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward, mh_key) = ($3, $4, $5, $6)`,
uncle.BlockHash, headerID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey)
return err
}
func (in *CIDIndexer) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
for _, trxCidMeta := range payload.TransactionCIDs {
var txID int64
err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, deployment) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data, deployment) = ($3, $4, $5, $6, $7, $8, $9)
RETURNING id`,
headerID, trxCidMeta.TxHash, trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src, trxCidMeta.Index, trxCidMeta.MhKey, trxCidMeta.Data, trxCidMeta.Deployment).Scan(&txID)
if err != nil {
return err
}
receiptCidMeta, ok := payload.ReceiptCIDs[common.HexToHash(trxCidMeta.TxHash)]
if ok {
if err := in.indexReceiptCID(tx, receiptCidMeta, txID); err != nil {
return err
}
}
}
return nil
}
func (in *CIDIndexer) indexTransactionCID(tx *sqlx.Tx, transaction TxModel, headerID int64) (int64, error) {
var txID int64
err := tx.QueryRowx(`INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, deployment) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data, deployment) = ($3, $4, $5, $6, $7, $8, $9)
RETURNING id`,
headerID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Deployment).Scan(&txID)
return txID, err
}
func (in *CIDIndexer) indexReceiptCID(tx *sqlx.Tx, cidMeta ReceiptModel, txID int64) error {
_, err := tx.Exec(`INSERT INTO eth.receipt_cids (tx_id, cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT (tx_id) DO UPDATE SET (cid, contract, contract_hash, topic0s, topic1s, topic2s, topic3s, log_contracts, mh_key) = ($2, $3, $4, $5, $6, $7, $8, $9, $10)`,
txID, cidMeta.CID, cidMeta.Contract, cidMeta.ContractHash, cidMeta.Topic0s, cidMeta.Topic1s, cidMeta.Topic2s, cidMeta.Topic3s, cidMeta.LogContracts, cidMeta.MhKey)
return err
}
func (in *CIDIndexer) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *CIDPayload, headerID int64) error {
for _, stateCID := range payload.StateNodeCIDs {
var stateID int64
var stateKey string
if stateCID.StateKey != nullHash.String() {
stateKey = stateCID.StateKey
}
err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)
RETURNING id`,
headerID, stateKey, stateCID.CID, stateCID.Path, stateCID.NodeType, true, stateCID.MhKey).Scan(&stateID)
if err != nil {
return err
}
// If we have a state leaf node, index the associated account and storage nodes
if stateCID.NodeType == 2 {
statePath := common.Bytes2Hex(stateCID.Path)
for _, storageCID := range payload.StorageNodeCIDs[statePath] {
if err := in.indexStorageCID(tx, storageCID, stateID); err != nil {
return err
}
}
if stateAccount, ok := payload.StateAccounts[statePath]; ok {
if err := in.indexStateAccount(tx, stateAccount, stateID); err != nil {
return err
}
}
}
}
return nil
}
func (in *CIDIndexer) indexStateCID(tx *sqlx.Tx, stateNode StateNodeModel, headerID int64) (int64, error) {
var stateID int64
var stateKey string
if stateNode.StateKey != nullHash.String() {
stateKey = stateNode.StateKey
}
err := tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)
RETURNING id`,
headerID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID)
return stateID, err
}
func (in *CIDIndexer) indexStateAccount(tx *sqlx.Tx, stateAccount StateAccountModel, stateID int64) error {
_, err := tx.Exec(`INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = ($2, $3, $4, $5)`,
stateID, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot)
return err
}
func (in *CIDIndexer) indexStorageCID(tx *sqlx.Tx, storageCID StorageNodeModel, stateID int64) error {
var storageKey string
if storageCID.StorageKey != nullHash.String() {
storageKey = storageCID.StorageKey
}
_, err := tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)`,
stateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey)
return err
}

View File

@ -1,137 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
var _ = Describe("Indexer", func() {
var (
db *postgres.DB
err error
repo *eth.CIDIndexer
)
BeforeEach(func() {
db, err = shared.SetupDB()
Expect(err).ToNot(HaveOccurred())
repo = eth.NewCIDIndexer(db)
// need entries in the public.blocks with the mhkeys or the FK constraint will fail
shared.PublishMockIPLD(db, mocks.HeaderMhKey, mockData)
shared.PublishMockIPLD(db, mocks.Trx1MhKey, mockData)
shared.PublishMockIPLD(db, mocks.Trx2MhKey, mockData)
shared.PublishMockIPLD(db, mocks.Trx3MhKey, mockData)
shared.PublishMockIPLD(db, mocks.Rct1MhKey, mockData)
shared.PublishMockIPLD(db, mocks.Rct2MhKey, mockData)
shared.PublishMockIPLD(db, mocks.Rct3MhKey, mockData)
shared.PublishMockIPLD(db, mocks.State1MhKey, mockData)
shared.PublishMockIPLD(db, mocks.State2MhKey, mockData)
shared.PublishMockIPLD(db, mocks.StorageMhKey, mockData)
})
AfterEach(func() {
eth.TearDownDB(db)
})
Describe("Index", func() {
It("Indexes CIDs and related metadata into vulcanizedb", func() {
err = repo.Index(mocks.MockCIDPayload)
Expect(err).ToNot(HaveOccurred())
pgStr := `SELECT cid, td, reward, id
FROM eth.header_cids
WHERE block_number = $1`
// check header was properly indexed
type res struct {
CID string
TD string
Reward string
ID int
}
headers := new(res)
err = db.QueryRowx(pgStr, 1).StructScan(headers)
Expect(err).ToNot(HaveOccurred())
Expect(headers.CID).To(Equal(mocks.HeaderCID.String()))
Expect(headers.TD).To(Equal(mocks.MockBlock.Difficulty().String()))
Expect(headers.Reward).To(Equal("5000000000000000000"))
// check trxs were properly indexed
trxs := make([]string, 0)
pgStr = `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
err = db.Select(&trxs, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(trxs)).To(Equal(3))
Expect(shared.ListContainsString(trxs, mocks.Trx1CID.String())).To(BeTrue())
Expect(shared.ListContainsString(trxs, mocks.Trx2CID.String())).To(BeTrue())
Expect(shared.ListContainsString(trxs, mocks.Trx3CID.String())).To(BeTrue())
// check receipts were properly indexed
rcts := make([]string, 0)
pgStr = `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.id
AND transaction_cids.header_id = header_cids.id
AND header_cids.block_number = $1`
err = db.Select(&rcts, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(rcts)).To(Equal(3))
Expect(shared.ListContainsString(rcts, mocks.Rct1CID.String())).To(BeTrue())
Expect(shared.ListContainsString(rcts, mocks.Rct2CID.String())).To(BeTrue())
Expect(shared.ListContainsString(rcts, mocks.Rct3CID.String())).To(BeTrue())
// check that state nodes were properly indexed
stateNodes := make([]eth.StateNodeModel, 0)
pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
err = db.Select(&stateNodes, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(stateNodes)).To(Equal(2))
for _, stateNode := range stateNodes {
if stateNode.CID == mocks.State1CID.String() {
Expect(stateNode.NodeType).To(Equal(2))
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(mocks.ContractLeafKey).Hex()))
Expect(stateNode.Path).To(Equal([]byte{'\x06'}))
}
if stateNode.CID == mocks.State2CID.String() {
Expect(stateNode.NodeType).To(Equal(2))
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(mocks.AccountLeafKey).Hex()))
Expect(stateNode.Path).To(Equal([]byte{'\x0c'}))
}
}
// check that storage nodes were properly indexed
storageNodes := make([]eth.StorageNodeWithStateKeyModel, 0)
pgStr = `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE storage_cids.state_id = state_cids.id
AND state_cids.header_id = header_cids.id
AND header_cids.block_number = $1`
err = db.Select(&storageNodes, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(storageNodes)).To(Equal(1))
Expect(storageNodes[0]).To(Equal(eth.StorageNodeWithStateKeyModel{
CID: mocks.StorageCID.String(),
NodeType: 2,
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
Path: []byte{},
}))
})
})
})

View File

@ -1,582 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package mocks
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"math/big"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff"
"github.com/ethereum/go-ethereum/statediff/testhelpers"
"github.com/ipfs/go-block-format"
"github.com/multiformats/go-multihash"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld"
)
// Test variables
var (
// block data
BlockNumber = big.NewInt(1)
MockHeader = types.Header{
Time: 0,
Number: new(big.Int).Set(BlockNumber),
Root: common.HexToHash("0x0"),
TxHash: common.HexToHash("0x0"),
ReceiptHash: common.HexToHash("0x0"),
Difficulty: big.NewInt(5000000),
Extra: []byte{},
}
MockTransactions, MockReceipts, SenderAddr = createTransactionsAndReceipts()
ReceiptsRlp, _ = rlp.EncodeToBytes(MockReceipts)
MockBlock = types.NewBlock(&MockHeader, MockTransactions, nil, MockReceipts)
MockBlockRlp, _ = rlp.EncodeToBytes(MockBlock)
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
ContractHash = crypto.Keccak256Hash(ContractAddress.Bytes()).String()
MockContractByteCode = []byte{0, 1, 2, 3, 4, 5}
mockTopic11 = common.HexToHash("0x04")
mockTopic12 = common.HexToHash("0x06")
mockTopic21 = common.HexToHash("0x05")
mockTopic22 = common.HexToHash("0x07")
MockLog1 = &types.Log{
Address: Address,
Topics: []common.Hash{mockTopic11, mockTopic12},
Data: []byte{},
}
MockLog2 = &types.Log{
Address: AnotherAddress,
Topics: []common.Hash{mockTopic21, mockTopic22},
Data: []byte{},
}
HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
HeaderMhKey = shared.MultihashKeyFromCID(HeaderCID)
Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(0), multihash.KECCAK_256)
Trx1MhKey = shared.MultihashKeyFromCID(Trx1CID)
Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(1), multihash.KECCAK_256)
Trx2MhKey = shared.MultihashKeyFromCID(Trx2CID)
Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(2), multihash.KECCAK_256)
Trx3MhKey = shared.MultihashKeyFromCID(Trx3CID)
Rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(0), multihash.KECCAK_256)
Rct1MhKey = shared.MultihashKeyFromCID(Rct1CID)
Rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(1), multihash.KECCAK_256)
Rct2MhKey = shared.MultihashKeyFromCID(Rct2CID)
Rct3CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(2), multihash.KECCAK_256)
Rct3MhKey = shared.MultihashKeyFromCID(Rct3CID)
State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256)
State1MhKey = shared.MultihashKeyFromCID(State1CID)
State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256)
State2MhKey = shared.MultihashKeyFromCID(State2CID)
StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256)
StorageMhKey = shared.MultihashKeyFromCID(StorageCID)
MockTrxMeta = []eth.TxModel{
{
CID: "", // This is empty until we go to publish to ipfs
MhKey: "",
Src: SenderAddr.Hex(),
Dst: Address.String(),
Index: 0,
TxHash: MockTransactions[0].Hash().String(),
Data: []byte{},
Deployment: false,
},
{
CID: "",
MhKey: "",
Src: SenderAddr.Hex(),
Dst: AnotherAddress.String(),
Index: 1,
TxHash: MockTransactions[1].Hash().String(),
Data: []byte{},
Deployment: false,
},
{
CID: "",
MhKey: "",
Src: SenderAddr.Hex(),
Dst: "",
Index: 2,
TxHash: MockTransactions[2].Hash().String(),
Data: MockContractByteCode,
Deployment: true,
},
}
MockTrxMetaPostPublsh = []eth.TxModel{
{
CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
MhKey: Trx1MhKey,
Src: SenderAddr.Hex(),
Dst: Address.String(),
Index: 0,
TxHash: MockTransactions[0].Hash().String(),
Data: []byte{},
Deployment: false,
},
{
CID: Trx2CID.String(),
MhKey: Trx2MhKey,
Src: SenderAddr.Hex(),
Dst: AnotherAddress.String(),
Index: 1,
TxHash: MockTransactions[1].Hash().String(),
Data: []byte{},
Deployment: false,
},
{
CID: Trx3CID.String(),
MhKey: Trx3MhKey,
Src: SenderAddr.Hex(),
Dst: "",
Index: 2,
TxHash: MockTransactions[2].Hash().String(),
Data: MockContractByteCode,
Deployment: true,
},
}
MockRctMeta = []eth.ReceiptModel{
{
CID: "",
MhKey: "",
Topic0s: []string{
mockTopic11.String(),
},
Topic1s: []string{
mockTopic12.String(),
},
Contract: "",
ContractHash: "",
LogContracts: []string{
Address.String(),
},
},
{
CID: "",
MhKey: "",
Topic0s: []string{
mockTopic21.String(),
},
Topic1s: []string{
mockTopic22.String(),
},
Contract: "",
ContractHash: "",
LogContracts: []string{
AnotherAddress.String(),
},
},
{
CID: "",
MhKey: "",
Contract: ContractAddress.String(),
ContractHash: ContractHash,
LogContracts: []string{},
},
}
MockRctMetaPostPublish = []eth.ReceiptModel{
{
CID: Rct1CID.String(),
MhKey: Rct1MhKey,
Topic0s: []string{
mockTopic11.String(),
},
Topic1s: []string{
mockTopic12.String(),
},
Contract: "",
ContractHash: "",
LogContracts: []string{
Address.String(),
},
},
{
CID: Rct2CID.String(),
MhKey: Rct2MhKey,
Topic0s: []string{
mockTopic21.String(),
},
Topic1s: []string{
mockTopic22.String(),
},
Contract: "",
ContractHash: "",
LogContracts: []string{
AnotherAddress.String(),
},
},
{
CID: Rct3CID.String(),
MhKey: Rct3MhKey,
Contract: ContractAddress.String(),
ContractHash: ContractHash,
LogContracts: []string{},
},
}
// statediff data
storageLocation = common.HexToHash("0")
StorageLeafKey = crypto.Keccak256Hash(storageLocation[:]).Bytes()
StorageValue = common.Hex2Bytes("01")
StoragePartialPath = common.Hex2Bytes("20290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
StorageLeafNode, _ = rlp.EncodeToBytes([]interface{}{
StoragePartialPath,
StorageValue,
})
nonce1 = uint64(1)
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
ContractCodeHash = common.HexToHash("0x753f98a8d4328b15636e46f66f2cb4bc860100aa17967cc145fcd17d1d4710ea")
contractPath = common.Bytes2Hex([]byte{'\x06'})
ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress)
ContractAccount, _ = rlp.EncodeToBytes(state.Account{
Nonce: nonce1,
Balance: big.NewInt(0),
CodeHash: ContractCodeHash.Bytes(),
Root: common.HexToHash(ContractRoot),
})
ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45")
ContractLeafNode, _ = rlp.EncodeToBytes([]interface{}{
ContractPartialPath,
ContractAccount,
})
nonce0 = uint64(0)
AccountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
accountPath = common.Bytes2Hex([]byte{'\x0c'})
AccountAddresss = common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e")
AccountLeafKey = testhelpers.Account2LeafKey
Account, _ = rlp.EncodeToBytes(state.Account{
Nonce: nonce0,
Balance: big.NewInt(1000),
CodeHash: AccountCodeHash.Bytes(),
Root: common.HexToHash(AccountRoot),
})
AccountPartialPath = common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45")
AccountLeafNode, _ = rlp.EncodeToBytes([]interface{}{
AccountPartialPath,
Account,
})
StateDiffs = []statediff.StateNode{
{
Path: []byte{'\x06'},
NodeType: statediff.Leaf,
LeafKey: ContractLeafKey,
NodeValue: ContractLeafNode,
StorageNodes: []statediff.StorageNode{
{
Path: []byte{},
NodeType: statediff.Leaf,
LeafKey: StorageLeafKey,
NodeValue: StorageLeafNode,
},
},
},
{
Path: []byte{'\x0c'},
NodeType: statediff.Leaf,
LeafKey: AccountLeafKey,
NodeValue: AccountLeafNode,
StorageNodes: []statediff.StorageNode{},
},
}
MockStateDiff = statediff.StateObject{
BlockNumber: new(big.Int).Set(BlockNumber),
BlockHash: MockBlock.Hash(),
Nodes: StateDiffs,
}
MockStateDiffBytes, _ = rlp.EncodeToBytes(MockStateDiff)
MockStateNodes = []eth.TrieNode{
{
LeafKey: common.BytesToHash(ContractLeafKey),
Path: []byte{'\x06'},
Value: ContractLeafNode,
Type: statediff.Leaf,
},
{
LeafKey: common.BytesToHash(AccountLeafKey),
Path: []byte{'\x0c'},
Value: AccountLeafNode,
Type: statediff.Leaf,
},
}
MockStateMetaPostPublish = []eth.StateNodeModel{
{
CID: State1CID.String(),
MhKey: State1MhKey,
Path: []byte{'\x06'},
NodeType: 2,
StateKey: common.BytesToHash(ContractLeafKey).Hex(),
},
{
CID: State2CID.String(),
MhKey: State2MhKey,
Path: []byte{'\x0c'},
NodeType: 2,
StateKey: common.BytesToHash(AccountLeafKey).Hex(),
},
}
MockStorageNodes = map[string][]eth.TrieNode{
contractPath: {
{
LeafKey: common.BytesToHash(StorageLeafKey),
Value: StorageLeafNode,
Type: statediff.Leaf,
Path: []byte{},
},
},
}
// aggregate payloads
MockStateDiffPayload = statediff.Payload{
BlockRlp: MockBlockRlp,
StateObjectRlp: MockStateDiffBytes,
ReceiptsRlp: ReceiptsRlp,
TotalDifficulty: MockBlock.Difficulty(),
}
MockConvertedPayload = eth.ConvertedPayload{
TotalDifficulty: MockBlock.Difficulty(),
Block: MockBlock,
Receipts: MockReceipts,
TxMetaData: MockTrxMeta,
ReceiptMetaData: MockRctMeta,
StorageNodes: MockStorageNodes,
StateNodes: MockStateNodes,
}
MockCIDPayload = &eth.CIDPayload{
HeaderCID: eth.HeaderModel{
BlockHash: MockBlock.Hash().String(),
BlockNumber: MockBlock.Number().String(),
CID: HeaderCID.String(),
MhKey: HeaderMhKey,
ParentHash: MockBlock.ParentHash().String(),
TotalDifficulty: MockBlock.Difficulty().String(),
Reward: "5000000000000000000",
StateRoot: MockBlock.Root().String(),
RctRoot: MockBlock.ReceiptHash().String(),
TxRoot: MockBlock.TxHash().String(),
UncleRoot: MockBlock.UncleHash().String(),
Bloom: MockBlock.Bloom().Bytes(),
Timestamp: MockBlock.Time(),
},
UncleCIDs: []eth.UncleModel{},
TransactionCIDs: MockTrxMetaPostPublsh,
ReceiptCIDs: map[common.Hash]eth.ReceiptModel{
MockTransactions[0].Hash(): MockRctMetaPostPublish[0],
MockTransactions[1].Hash(): MockRctMetaPostPublish[1],
MockTransactions[2].Hash(): MockRctMetaPostPublish[2],
},
StateNodeCIDs: MockStateMetaPostPublish,
StorageNodeCIDs: map[string][]eth.StorageNodeModel{
contractPath: {
{
CID: StorageCID.String(),
MhKey: StorageMhKey,
Path: []byte{},
StorageKey: common.BytesToHash(StorageLeafKey).Hex(),
NodeType: 2,
},
},
},
StateAccounts: map[string]eth.StateAccountModel{
contractPath: {
Balance: big.NewInt(0).String(),
Nonce: nonce1,
CodeHash: ContractCodeHash.Bytes(),
StorageRoot: common.HexToHash(ContractRoot).String(),
},
accountPath: {
Balance: big.NewInt(1000).String(),
Nonce: nonce0,
CodeHash: AccountCodeHash.Bytes(),
StorageRoot: common.HexToHash(AccountRoot).String(),
},
},
}
MockCIDWrapper = &eth.CIDWrapper{
BlockNumber: new(big.Int).Set(BlockNumber),
Header: eth.HeaderModel{
BlockNumber: "1",
BlockHash: MockBlock.Hash().String(),
ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
CID: HeaderCID.String(),
MhKey: HeaderMhKey,
TotalDifficulty: MockBlock.Difficulty().String(),
Reward: "5000000000000000000",
StateRoot: MockBlock.Root().String(),
RctRoot: MockBlock.ReceiptHash().String(),
TxRoot: MockBlock.TxHash().String(),
UncleRoot: MockBlock.UncleHash().String(),
Bloom: MockBlock.Bloom().Bytes(),
Timestamp: MockBlock.Time(),
TimesValidated: 1,
},
Transactions: MockTrxMetaPostPublsh,
Receipts: MockRctMetaPostPublish,
Uncles: []eth.UncleModel{},
StateNodes: MockStateMetaPostPublish,
StorageNodes: []eth.StorageNodeWithStateKeyModel{
{
Path: []byte{},
CID: StorageCID.String(),
MhKey: StorageMhKey,
NodeType: 2,
StateKey: common.BytesToHash(ContractLeafKey).Hex(),
StorageKey: common.BytesToHash(StorageLeafKey).Hex(),
},
},
}
HeaderIPLD, _ = blocks.NewBlockWithCid(MockHeaderRlp, HeaderCID)
Trx1IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(0), Trx1CID)
Trx2IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(1), Trx2CID)
Trx3IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(2), Trx3CID)
Rct1IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(0), Rct1CID)
Rct2IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(1), Rct2CID)
Rct3IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(2), Rct3CID)
State1IPLD, _ = blocks.NewBlockWithCid(ContractLeafNode, State1CID)
State2IPLD, _ = blocks.NewBlockWithCid(AccountLeafNode, State2CID)
StorageIPLD, _ = blocks.NewBlockWithCid(StorageLeafNode, StorageCID)
MockIPLDs = eth.IPLDs{
BlockNumber: new(big.Int).Set(BlockNumber),
Header: ipfs.BlockModel{
Data: HeaderIPLD.RawData(),
CID: HeaderIPLD.Cid().String(),
},
Transactions: []ipfs.BlockModel{
{
Data: Trx1IPLD.RawData(),
CID: Trx1IPLD.Cid().String(),
},
{
Data: Trx2IPLD.RawData(),
CID: Trx2IPLD.Cid().String(),
},
{
Data: Trx3IPLD.RawData(),
CID: Trx3IPLD.Cid().String(),
},
},
Receipts: []ipfs.BlockModel{
{
Data: Rct1IPLD.RawData(),
CID: Rct1IPLD.Cid().String(),
},
{
Data: Rct2IPLD.RawData(),
CID: Rct2IPLD.Cid().String(),
},
{
Data: Rct3IPLD.RawData(),
CID: Rct3IPLD.Cid().String(),
},
},
StateNodes: []eth.StateNode{
{
StateLeafKey: common.BytesToHash(ContractLeafKey),
Type: statediff.Leaf,
IPLD: ipfs.BlockModel{
Data: State1IPLD.RawData(),
CID: State1IPLD.Cid().String(),
},
Path: []byte{'\x06'},
},
{
StateLeafKey: common.BytesToHash(AccountLeafKey),
Type: statediff.Leaf,
IPLD: ipfs.BlockModel{
Data: State2IPLD.RawData(),
CID: State2IPLD.Cid().String(),
},
Path: []byte{'\x0c'},
},
},
StorageNodes: []eth.StorageNode{
{
StateLeafKey: common.BytesToHash(ContractLeafKey),
StorageLeafKey: common.BytesToHash(StorageLeafKey),
Type: statediff.Leaf,
IPLD: ipfs.BlockModel{
Data: StorageIPLD.RawData(),
CID: StorageIPLD.Cid().String(),
},
Path: []byte{},
},
},
}
)
// createTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common.Address) {
// make transactions
trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{})
trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode)
transactionSigner := types.MakeSigner(params.MainnetChainConfig, new(big.Int).Set(BlockNumber))
mockCurve := elliptic.P256()
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
if err != nil {
log.Fatal(err)
}
signedTrx1, err := types.SignTx(trx1, transactionSigner, mockPrvKey)
if err != nil {
log.Fatal(err)
}
signedTrx2, err := types.SignTx(trx2, transactionSigner, mockPrvKey)
if err != nil {
log.Fatal(err)
}
signedTrx3, err := types.SignTx(trx3, transactionSigner, mockPrvKey)
if err != nil {
log.Fatal(err)
}
SenderAddr, err := types.Sender(transactionSigner, signedTrx1) // same for both trx
if err != nil {
log.Fatal(err)
}
// make receipts
mockReceipt1 := types.NewReceipt(common.HexToHash("0x0").Bytes(), false, 50)
mockReceipt1.Logs = []*types.Log{MockLog1}
mockReceipt1.TxHash = signedTrx1.Hash()
mockReceipt2 := types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100)
mockReceipt2.Logs = []*types.Log{MockLog2}
mockReceipt2.TxHash = signedTrx2.Hash()
mockReceipt3 := types.NewReceipt(common.HexToHash("0x2").Bytes(), false, 75)
mockReceipt3.Logs = []*types.Log{}
mockReceipt3.TxHash = signedTrx3.Hash()
return types.Transactions{signedTrx1, signedTrx2, signedTrx3}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3}, SenderAddr
}

View File

@ -1,126 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import "github.com/lib/pq"
// HeaderModel is the db model for eth.header_cids
type HeaderModel struct {
ID int64 `db:"id"`
BlockNumber string `db:"block_number"`
BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
TotalDifficulty string `db:"td"`
NodeID int64 `db:"node_id"`
Reward string `db:"reward"`
StateRoot string `db:"state_root"`
UncleRoot string `db:"uncle_root"`
TxRoot string `db:"tx_root"`
RctRoot string `db:"receipt_root"`
Bloom []byte `db:"bloom"`
Timestamp uint64 `db:"timestamp"`
TimesValidated int64 `db:"times_validated"`
}
// UncleModel is the db model for eth.uncle_cids
type UncleModel struct {
ID int64 `db:"id"`
HeaderID int64 `db:"header_id"`
BlockHash string `db:"block_hash"`
ParentHash string `db:"parent_hash"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
Reward string `db:"reward"`
}
// TxModel is the db model for eth.transaction_cids
type TxModel struct {
ID int64 `db:"id"`
HeaderID int64 `db:"header_id"`
Index int64 `db:"index"`
TxHash string `db:"tx_hash"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
Dst string `db:"dst"`
Src string `db:"src"`
Data []byte `db:"tx_data"`
Deployment bool `db:"deployment"`
}
// ReceiptModel is the db model for eth.receipt_cids
type ReceiptModel struct {
ID int64 `db:"id"`
TxID int64 `db:"tx_id"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
Contract string `db:"contract"`
ContractHash string `db:"contract_hash"`
LogContracts pq.StringArray `db:"log_contracts"`
Topic0s pq.StringArray `db:"topic0s"`
Topic1s pq.StringArray `db:"topic1s"`
Topic2s pq.StringArray `db:"topic2s"`
Topic3s pq.StringArray `db:"topic3s"`
}
// StateNodeModel is the db model for eth.state_cids
type StateNodeModel struct {
ID int64 `db:"id"`
HeaderID int64 `db:"header_id"`
Path []byte `db:"state_path"`
StateKey string `db:"state_leaf_key"`
NodeType int `db:"node_type"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
Diff bool `db:"diff"`
}
// StorageNodeModel is the db model for eth.storage_cids
type StorageNodeModel struct {
ID int64 `db:"id"`
StateID int64 `db:"state_id"`
Path []byte `db:"storage_path"`
StorageKey string `db:"storage_leaf_key"`
NodeType int `db:"node_type"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
Diff bool `db:"diff"`
}
// StorageNodeWithStateKeyModel is a db model for eth.storage_cids + eth.state_cids.state_key
type StorageNodeWithStateKeyModel struct {
ID int64 `db:"id"`
StateID int64 `db:"state_id"`
Path []byte `db:"storage_path"`
StateKey string `db:"state_leaf_key"`
StorageKey string `db:"storage_leaf_key"`
NodeType int `db:"node_type"`
CID string `db:"cid"`
MhKey string `db:"mh_key"`
Diff bool `db:"diff"`
}
// StateAccountModel is a db model for an eth state account (decoded value of state leaf node)
type StateAccountModel struct {
ID int64 `db:"id"`
StateID int64 `db:"state_id"`
Balance string `db:"balance"`
Nonce uint64 `db:"nonce"`
CodeHash []byte `db:"code_hash"`
StorageRoot string `db:"storage_root"`
}

View File

@ -1,88 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// BatchClient is an interface to a batch-fetching geth rpc client; created to allow mock insertion
type BatchClient interface {
BatchCallContext(ctx context.Context, batch []rpc.BatchElem) error
}
// PayloadFetcher satisfies the PayloadFetcher interface for ethereum
type PayloadFetcher struct {
// PayloadFetcher is thread-safe as long as the underlying client is thread-safe, since it has/modifies no other state
// http.Client is thread-safe
client BatchClient
timeout time.Duration
params statediff.Params
}
const method = "statediff_stateDiffAt"
// NewPayloadFetcher returns a PayloadFetcher
func NewPayloadFetcher(bc BatchClient, timeout time.Duration) *PayloadFetcher {
return &PayloadFetcher{
client: bc,
timeout: timeout,
params: statediff.Params{
IncludeReceipts: true,
IncludeTD: true,
IncludeBlock: true,
IntermediateStateNodes: true,
IntermediateStorageNodes: true,
},
}
}
// FetchAt fetches the statediff payloads at the given block heights
// Calls StateDiffAt(ctx context.Context, blockNumber uint64, params Params) (*Payload, error)
func (fetcher *PayloadFetcher) FetchAt(blockHeights []uint64) ([]shared.RawChainData, error) {
batch := make([]rpc.BatchElem, 0)
for _, height := range blockHeights {
batch = append(batch, rpc.BatchElem{
Method: method,
Args: []interface{}{height, fetcher.params},
Result: new(statediff.Payload),
})
}
ctx, cancel := context.WithTimeout(context.Background(), fetcher.timeout)
defer cancel()
if err := fetcher.client.BatchCallContext(ctx, batch); err != nil {
return nil, fmt.Errorf("ethereum PayloadFetcher batch err for block range %d-%d: %s", blockHeights[0], blockHeights[len(blockHeights)-1], err.Error())
}
results := make([]shared.RawChainData, 0, len(blockHeights))
for _, batchElem := range batch {
if batchElem.Error != nil {
return nil, fmt.Errorf("ethereum PayloadFetcher err at blockheight %d: %s", batchElem.Args[0].(uint64), batchElem.Error.Error())
}
payload, ok := batchElem.Result.(*statediff.Payload)
if ok {
results = append(results, *payload)
}
}
return results, nil
}

View File

@ -1,65 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"time"
"github.com/ethereum/go-ethereum/statediff"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks"
)
var _ = Describe("StateDiffFetcher", func() {
Describe("FetchStateDiffsAt", func() {
var (
mc *mocks.BackFillerClient
stateDiffFetcher *eth.PayloadFetcher
payload2 statediff.Payload
blockNumber2 uint64
)
BeforeEach(func() {
mc = new(mocks.BackFillerClient)
err := mc.SetReturnDiffAt(mocks.BlockNumber.Uint64(), mocks.MockStateDiffPayload)
Expect(err).ToNot(HaveOccurred())
payload2 = mocks.MockStateDiffPayload
payload2.BlockRlp = []byte{}
blockNumber2 = mocks.BlockNumber.Uint64() + 1
err = mc.SetReturnDiffAt(blockNumber2, payload2)
Expect(err).ToNot(HaveOccurred())
stateDiffFetcher = eth.NewPayloadFetcher(mc, time.Second*60)
})
It("Batch calls statediff_stateDiffAt", func() {
blockHeights := []uint64{
mocks.BlockNumber.Uint64(),
blockNumber2,
}
stateDiffPayloads, err := stateDiffFetcher.FetchAt(blockHeights)
Expect(err).ToNot(HaveOccurred())
Expect(len(stateDiffPayloads)).To(Equal(2))
payload1, ok := stateDiffPayloads[0].(statediff.Payload)
Expect(ok).To(BeTrue())
payload2, ok := stateDiffPayloads[1].(statediff.Payload)
Expect(ok).To(BeTrue())
Expect(payload1).To(Equal(mocks.MockStateDiffPayload))
Expect(payload2).To(Equal(payload2))
})
})
})

View File

@ -1,228 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff"
"github.com/jmoiron/sqlx"
"github.com/multiformats/go-multihash"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs/ipld"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
// IPLDPublisher satisfies the IPLDPublisher interface for ethereum
// It interfaces directly with the public.blocks table of PG-IPFS rather than going through an ipfs intermediary
// It publishes and indexes IPLDs together in a single sqlx.Tx
type IPLDPublisher struct {
indexer *CIDIndexer
}
// NewIPLDPublisher creates a pointer to a new IPLDPublisher which satisfies the IPLDPublisher interface
func NewIPLDPublisher(db *postgres.DB) *IPLDPublisher {
return &IPLDPublisher{
indexer: NewCIDIndexer(db),
}
}
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
func (pub *IPLDPublisher) Publish(payload shared.ConvertedData) error {
ipldPayload, ok := payload.(ConvertedPayload)
if !ok {
return fmt.Errorf("eth IPLDPublisher expected payload type %T got %T", ConvertedPayload{}, payload)
}
// Generate the iplds
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, err := ipld.FromBlockAndReceipts(ipldPayload.Block, ipldPayload.Receipts)
if err != nil {
return err
}
// Begin new db tx
tx, err := pub.indexer.db.Beginx()
if err != nil {
return err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
// Publish trie nodes
for _, node := range txTrieNodes {
if err := shared.PublishIPLD(tx, node); err != nil {
return err
}
}
for _, node := range rctTrieNodes {
if err := shared.PublishIPLD(tx, node); err != nil {
return err
}
}
// Publish and index header
if err := shared.PublishIPLD(tx, headerNode); err != nil {
return err
}
reward := CalcEthBlockReward(ipldPayload.Block.Header(), ipldPayload.Block.Uncles(), ipldPayload.Block.Transactions(), ipldPayload.Receipts)
header := HeaderModel{
CID: headerNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: ipldPayload.Block.ParentHash().String(),
BlockNumber: ipldPayload.Block.Number().String(),
BlockHash: ipldPayload.Block.Hash().String(),
TotalDifficulty: ipldPayload.TotalDifficulty.String(),
Reward: reward.String(),
Bloom: ipldPayload.Block.Bloom().Bytes(),
StateRoot: ipldPayload.Block.Root().String(),
RctRoot: ipldPayload.Block.ReceiptHash().String(),
TxRoot: ipldPayload.Block.TxHash().String(),
UncleRoot: ipldPayload.Block.UncleHash().String(),
Timestamp: ipldPayload.Block.Time(),
}
headerID, err := pub.indexer.indexHeaderCID(tx, header)
if err != nil {
return err
}
// Publish and index uncles
for _, uncleNode := range uncleNodes {
if err := shared.PublishIPLD(tx, uncleNode); err != nil {
return err
}
uncleReward := CalcUncleMinerReward(ipldPayload.Block.Number().Int64(), uncleNode.Number.Int64())
uncle := UncleModel{
CID: uncleNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
ParentHash: uncleNode.ParentHash.String(),
BlockHash: uncleNode.Hash().String(),
Reward: uncleReward.String(),
}
if err := pub.indexer.indexUncleCID(tx, uncle, headerID); err != nil {
return err
}
}
// Publish and index txs and receipts
for i, txNode := range txNodes {
if err := shared.PublishIPLD(tx, txNode); err != nil {
return err
}
rctNode := rctNodes[i]
if err := shared.PublishIPLD(tx, rctNode); err != nil {
return err
}
txModel := ipldPayload.TxMetaData[i]
txModel.CID = txNode.Cid().String()
txModel.MhKey = shared.MultihashKeyFromCID(txNode.Cid())
txID, err := pub.indexer.indexTransactionCID(tx, txModel, headerID)
if err != nil {
return err
}
// If tx is a contract deployment, publish the data (code)
if txModel.Deployment { // codec doesn't matter in this case sine we are not interested in the cid and the db key is multihash-derived
if _, err = shared.PublishRaw(tx, ipld.MEthStorageTrie, multihash.KECCAK_256, txModel.Data); err != nil {
return err
}
}
rctModel := ipldPayload.ReceiptMetaData[i]
rctModel.CID = rctNode.Cid().String()
rctModel.MhKey = shared.MultihashKeyFromCID(rctNode.Cid())
if err := pub.indexer.indexReceiptCID(tx, rctModel, txID); err != nil {
return err
}
}
// Publish and index state and storage
err = pub.publishAndIndexStateAndStorage(tx, ipldPayload, headerID)
return err // return err variable explicitly so that we return the err = tx.Commit() assignment in the defer
}
func (pub *IPLDPublisher) publishAndIndexStateAndStorage(tx *sqlx.Tx, ipldPayload ConvertedPayload, headerID int64) error {
// Publish and index state and storage
for _, stateNode := range ipldPayload.StateNodes {
stateCIDStr, err := shared.PublishRaw(tx, ipld.MEthStateTrie, multihash.KECCAK_256, stateNode.Value)
if err != nil {
return err
}
mhKey, _ := shared.MultihashKeyFromCIDString(stateCIDStr)
stateModel := StateNodeModel{
Path: stateNode.Path,
StateKey: stateNode.LeafKey.String(),
CID: stateCIDStr,
MhKey: mhKey,
NodeType: ResolveFromNodeType(stateNode.Type),
}
stateID, err := pub.indexer.indexStateCID(tx, stateModel, headerID)
if err != nil {
return err
}
// If we have a leaf, decode and index the account data and any associated storage diffs
if stateNode.Type == statediff.Leaf {
var i []interface{}
if err := rlp.DecodeBytes(stateNode.Value, &i); err != nil {
return err
}
if len(i) != 2 {
return fmt.Errorf("eth IPLDPublisher expected state leaf node rlp to decode into two elements")
}
var account state.Account
if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
return err
}
accountModel := StateAccountModel{
Balance: account.Balance.String(),
Nonce: account.Nonce,
CodeHash: account.CodeHash,
StorageRoot: account.Root.String(),
}
if err := pub.indexer.indexStateAccount(tx, accountModel, stateID); err != nil {
return err
}
for _, storageNode := range ipldPayload.StorageNodes[common.Bytes2Hex(stateNode.Path)] {
storageCIDStr, err := shared.PublishRaw(tx, ipld.MEthStorageTrie, multihash.KECCAK_256, storageNode.Value)
if err != nil {
return err
}
mhKey, _ := shared.MultihashKeyFromCIDString(storageCIDStr)
storageModel := StorageNodeModel{
Path: storageNode.Path,
StorageKey: storageNode.LeafKey.Hex(),
CID: storageCIDStr,
MhKey: mhKey,
NodeType: ResolveFromNodeType(storageNode.Type),
}
if err := pub.indexer.indexStorageCID(tx, storageModel, stateID); err != nil {
return err
}
}
}
}
return nil
}

View File

@ -1,233 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-ipfs-blockstore"
"github.com/ipfs/go-ipfs-ds-help"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
var _ = Describe("PublishAndIndexer", func() {
var (
db *postgres.DB
err error
repo *eth.IPLDPublisher
ipfsPgGet = `SELECT data FROM public.blocks
WHERE key = $1`
)
BeforeEach(func() {
db, err = shared.SetupDB()
Expect(err).ToNot(HaveOccurred())
repo = eth.NewIPLDPublisher(db)
})
AfterEach(func() {
eth.TearDownDB(db)
})
Describe("Publish", func() {
It("Published and indexes header IPLDs in a single tx", func() {
err = repo.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
pgStr := `SELECT cid, td, reward, id
FROM eth.header_cids
WHERE block_number = $1`
// check header was properly indexed
type res struct {
CID string
TD string
Reward string
ID int
}
header := new(res)
err = db.QueryRowx(pgStr, 1).StructScan(header)
Expect(err).ToNot(HaveOccurred())
Expect(header.CID).To(Equal(mocks.HeaderCID.String()))
Expect(header.TD).To(Equal(mocks.MockBlock.Difficulty().String()))
Expect(header.Reward).To(Equal("5000000000000000000"))
dc, err := cid.Decode(header.CID)
Expect(err).ToNot(HaveOccurred())
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(mocks.MockHeaderRlp))
})
It("Publishes and indexes transaction IPLDs in a single tx", func() {
err = repo.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
// check that txs were properly indexed
trxs := make([]string, 0)
pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
err = db.Select(&trxs, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(trxs)).To(Equal(3))
Expect(shared.ListContainsString(trxs, mocks.Trx1CID.String())).To(BeTrue())
Expect(shared.ListContainsString(trxs, mocks.Trx2CID.String())).To(BeTrue())
Expect(shared.ListContainsString(trxs, mocks.Trx3CID.String())).To(BeTrue())
// and published
for _, c := range trxs {
dc, err := cid.Decode(c)
Expect(err).ToNot(HaveOccurred())
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
switch c {
case mocks.Trx1CID.String():
Expect(data).To(Equal(mocks.MockTransactions.GetRlp(0)))
case mocks.Trx2CID.String():
Expect(data).To(Equal(mocks.MockTransactions.GetRlp(1)))
case mocks.Trx3CID.String():
Expect(data).To(Equal(mocks.MockTransactions.GetRlp(2)))
}
}
})
It("Publishes and indexes receipt IPLDs in a single tx", func() {
err = repo.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
// check receipts were properly indexed
rcts := make([]string, 0)
pgStr := `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.id
AND transaction_cids.header_id = header_cids.id
AND header_cids.block_number = $1`
err = db.Select(&rcts, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(rcts)).To(Equal(3))
Expect(shared.ListContainsString(rcts, mocks.Rct1CID.String())).To(BeTrue())
Expect(shared.ListContainsString(rcts, mocks.Rct2CID.String())).To(BeTrue())
Expect(shared.ListContainsString(rcts, mocks.Rct3CID.String())).To(BeTrue())
// and published
for _, c := range rcts {
dc, err := cid.Decode(c)
Expect(err).ToNot(HaveOccurred())
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
switch c {
case mocks.Rct1CID.String():
Expect(data).To(Equal(mocks.MockReceipts.GetRlp(0)))
case mocks.Rct2CID.String():
Expect(data).To(Equal(mocks.MockReceipts.GetRlp(1)))
case mocks.Rct3CID.String():
Expect(data).To(Equal(mocks.MockReceipts.GetRlp(2)))
}
}
})
It("Publishes and indexes state IPLDs in a single tx", func() {
err = repo.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
// check that state nodes were properly indexed and published
stateNodes := make([]eth.StateNodeModel, 0)
pgStr := `SELECT state_cids.id, state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
WHERE header_cids.block_number = $1`
err = db.Select(&stateNodes, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(stateNodes)).To(Equal(2))
for _, stateNode := range stateNodes {
var data []byte
dc, err := cid.Decode(stateNode.CID)
Expect(err).ToNot(HaveOccurred())
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
pgStr = `SELECT * from eth.state_accounts WHERE state_id = $1`
var account eth.StateAccountModel
err = db.Get(&account, pgStr, stateNode.ID)
Expect(err).ToNot(HaveOccurred())
if stateNode.CID == mocks.State1CID.String() {
Expect(stateNode.NodeType).To(Equal(2))
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(mocks.ContractLeafKey).Hex()))
Expect(stateNode.Path).To(Equal([]byte{'\x06'}))
Expect(data).To(Equal(mocks.ContractLeafNode))
Expect(account).To(Equal(eth.StateAccountModel{
ID: account.ID,
StateID: stateNode.ID,
Balance: "0",
CodeHash: mocks.ContractCodeHash.Bytes(),
StorageRoot: mocks.ContractRoot,
Nonce: 1,
}))
}
if stateNode.CID == mocks.State2CID.String() {
Expect(stateNode.NodeType).To(Equal(2))
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(mocks.AccountLeafKey).Hex()))
Expect(stateNode.Path).To(Equal([]byte{'\x0c'}))
Expect(data).To(Equal(mocks.AccountLeafNode))
Expect(account).To(Equal(eth.StateAccountModel{
ID: account.ID,
StateID: stateNode.ID,
Balance: "1000",
CodeHash: mocks.AccountCodeHash.Bytes(),
StorageRoot: mocks.AccountRoot,
Nonce: 0,
}))
}
}
pgStr = `SELECT * from eth.state_accounts WHERE state_id = $1`
})
It("Publishes and indexes storage IPLDs in a single tx", func() {
err = repo.Publish(mocks.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
// check that storage nodes were properly indexed
storageNodes := make([]eth.StorageNodeWithStateKeyModel, 0)
pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE storage_cids.state_id = state_cids.id
AND state_cids.header_id = header_cids.id
AND header_cids.block_number = $1`
err = db.Select(&storageNodes, pgStr, 1)
Expect(err).ToNot(HaveOccurred())
Expect(len(storageNodes)).To(Equal(1))
Expect(storageNodes[0]).To(Equal(eth.StorageNodeWithStateKeyModel{
CID: mocks.StorageCID.String(),
NodeType: 2,
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
Path: []byte{},
}))
var data []byte
dc, err := cid.Decode(storageNodes[0].CID)
Expect(err).ToNot(HaveOccurred())
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
err = db.Get(&data, ipfsPgGet, prefixedKey)
Expect(err).ToNot(HaveOccurred())
Expect(data).To(Equal(mocks.StorageLeafNode))
})
})
})

View File

@ -1,76 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"math/big"
"github.com/ethereum/go-ethereum/core/types"
)
func CalcEthBlockReward(header *types.Header, uncles []*types.Header, txs types.Transactions, receipts types.Receipts) *big.Int {
staticBlockReward := staticRewardByBlockNumber(header.Number.Int64())
transactionFees := calcEthTransactionFees(txs, receipts)
uncleInclusionRewards := calcEthUncleInclusionRewards(header, uncles)
tmp := transactionFees.Add(transactionFees, uncleInclusionRewards)
return tmp.Add(tmp, staticBlockReward)
}
func CalcUncleMinerReward(blockNumber, uncleBlockNumber int64) *big.Int {
staticBlockReward := staticRewardByBlockNumber(blockNumber)
rewardDiv8 := staticBlockReward.Div(staticBlockReward, big.NewInt(8))
mainBlock := big.NewInt(blockNumber)
uncleBlock := big.NewInt(uncleBlockNumber)
uncleBlockPlus8 := uncleBlock.Add(uncleBlock, big.NewInt(8))
uncleBlockPlus8MinusMainBlock := uncleBlockPlus8.Sub(uncleBlockPlus8, mainBlock)
return rewardDiv8.Mul(rewardDiv8, uncleBlockPlus8MinusMainBlock)
}
func staticRewardByBlockNumber(blockNumber int64) *big.Int {
staticBlockReward := new(big.Int)
//https://blog.ethereum.org/2017/10/12/byzantium-hf-announcement/
if blockNumber >= 7280000 {
staticBlockReward.SetString("2000000000000000000", 10)
} else if blockNumber >= 4370000 {
staticBlockReward.SetString("3000000000000000000", 10)
} else {
staticBlockReward.SetString("5000000000000000000", 10)
}
return staticBlockReward
}
func calcEthTransactionFees(txs types.Transactions, receipts types.Receipts) *big.Int {
transactionFees := new(big.Int)
for i, transaction := range txs {
receipt := receipts[i]
gasPrice := big.NewInt(transaction.GasPrice().Int64())
gasUsed := big.NewInt(int64(receipt.GasUsed))
transactionFee := gasPrice.Mul(gasPrice, gasUsed)
transactionFees = transactionFees.Add(transactionFees, transactionFee)
}
return transactionFees
}
func calcEthUncleInclusionRewards(header *types.Header, uncles []*types.Header) *big.Int {
uncleInclusionRewards := new(big.Int)
for range uncles {
staticBlockReward := staticRewardByBlockNumber(header.Number.Int64())
staticBlockReward.Div(staticBlockReward, big.NewInt(32))
uncleInclusionRewards.Add(uncleInclusionRewards, staticBlockReward)
}
return uncleInclusionRewards
}

View File

@ -1,72 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"context"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
"github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
const (
PayloadChanBufferSize = 20000 // the max eth sub buffer size
)
// StreamClient is an interface for subscribing and streaming from geth
type StreamClient interface {
Subscribe(ctx context.Context, namespace string, payloadChan interface{}, args ...interface{}) (*rpc.ClientSubscription, error)
}
// PayloadStreamer satisfies the PayloadStreamer interface for ethereum
type PayloadStreamer struct {
Client StreamClient
params statediff.Params
}
// NewPayloadStreamer creates a pointer to a new PayloadStreamer which satisfies the PayloadStreamer interface for ethereum
func NewPayloadStreamer(client StreamClient) *PayloadStreamer {
return &PayloadStreamer{
Client: client,
params: statediff.Params{
IncludeBlock: true,
IncludeTD: true,
IncludeReceipts: true,
IntermediateStorageNodes: true,
IntermediateStateNodes: true,
},
}
}
// Stream is the main loop for subscribing to data from the Geth state diff process
// Satisfies the shared.PayloadStreamer interface
func (ps *PayloadStreamer) Stream(payloadChan chan shared.RawChainData) (shared.ClientSubscription, error) {
stateDiffChan := make(chan statediff.Payload, PayloadChanBufferSize)
logrus.Debug("streaming diffs from geth")
go func() {
for {
select {
case payload := <-stateDiffChan:
payloadChan <- payload
}
}
}()
return ps.Client.Subscribe(context.Background(), "statediff", stateDiffChan, "stream", ps.params)
}

View File

@ -1,34 +0,0 @@
// Copyright 2019 Vulcanize
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package eth_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
)
var _ = Describe("StateDiff Streamer", func() {
It("subscribes to the geth statediff service", func() {
client := &mocks.StreamClient{}
streamer := eth.NewPayloadStreamer(client)
payloadChan := make(chan shared.RawChainData)
_, err := streamer.Stream(payloadChan)
Expect(err).NotTo(HaveOccurred())
})
})

View File

@ -1,112 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/statediff"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/ipfs"
)
// ConvertedPayload is a custom type which packages raw ETH data for publishing to IPFS and filtering to subscribers
// Returned by PayloadConverter
// Passed to IPLDPublisher and ResponseFilterer
type ConvertedPayload struct {
TotalDifficulty *big.Int
Block *types.Block
TxMetaData []TxModel
Receipts types.Receipts
ReceiptMetaData []ReceiptModel
StateNodes []TrieNode
StorageNodes map[string][]TrieNode
}
// Height satisfies the StreamedIPLDs interface
func (i ConvertedPayload) Height() int64 {
return i.Block.Number().Int64()
}
// Trie struct used to flag node as leaf or not
type TrieNode struct {
Path []byte
LeafKey common.Hash
Value []byte
Type statediff.NodeType
}
// CIDPayload is a struct to hold all the CIDs and their associated meta data for indexing in Postgres
// Returned by IPLDPublisher
// Passed to CIDIndexer
type CIDPayload struct {
HeaderCID HeaderModel
UncleCIDs []UncleModel
TransactionCIDs []TxModel
ReceiptCIDs map[common.Hash]ReceiptModel
StateNodeCIDs []StateNodeModel
StateAccounts map[string]StateAccountModel
StorageNodeCIDs map[string][]StorageNodeModel
}
// CIDWrapper is used to direct fetching of IPLDs from IPFS
// Returned by CIDRetriever
// Passed to IPLDFetcher
type CIDWrapper struct {
BlockNumber *big.Int
Header HeaderModel
Uncles []UncleModel
Transactions []TxModel
Receipts []ReceiptModel
StateNodes []StateNodeModel
StorageNodes []StorageNodeWithStateKeyModel
}
// IPLDs is used to package raw IPLD block data fetched from IPFS and returned by the server
// Returned by IPLDFetcher and ResponseFilterer
type IPLDs struct {
BlockNumber *big.Int
TotalDifficulty *big.Int
Header ipfs.BlockModel
Uncles []ipfs.BlockModel
Transactions []ipfs.BlockModel
Receipts []ipfs.BlockModel
StateNodes []StateNode
StorageNodes []StorageNode
}
// Height satisfies the StreamedIPLDs interface
func (i IPLDs) Height() int64 {
return i.BlockNumber.Int64()
}
type StateNode struct {
Type statediff.NodeType
StateLeafKey common.Hash
Path []byte
IPLD ipfs.BlockModel
}
type StorageNode struct {
Type statediff.NodeType
StateLeafKey common.Hash
StorageLeafKey common.Hash
Path []byte
IPLD ipfs.BlockModel
}

View File

@ -1,135 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package historical
import (
"fmt"
"time"
"github.com/spf13/viper"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/config"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/vulcanize/ipfs-blockchain-watcher/utils"
)
// Env variables
const (
SUPERNODE_CHAIN = "SUPERNODE_CHAIN"
SUPERNODE_FREQUENCY = "SUPERNODE_FREQUENCY"
SUPERNODE_BATCH_SIZE = "SUPERNODE_BATCH_SIZE"
SUPERNODE_BATCH_NUMBER = "SUPERNODE_BATCH_NUMBER"
SUPERNODE_VALIDATION_LEVEL = "SUPERNODE_VALIDATION_LEVEL"
BACKFILL_MAX_IDLE_CONNECTIONS = "BACKFILL_MAX_IDLE_CONNECTIONS"
BACKFILL_MAX_OPEN_CONNECTIONS = "BACKFILL_MAX_OPEN_CONNECTIONS"
BACKFILL_MAX_CONN_LIFETIME = "BACKFILL_MAX_CONN_LIFETIME"
)
// Config struct
type Config struct {
Chain shared.ChainType
DBConfig config.Database
DB *postgres.DB
HTTPClient interface{}
Frequency time.Duration
BatchSize uint64
BatchNumber uint64
ValidationLevel int
Timeout time.Duration // HTTP connection timeout in seconds
NodeInfo node.Node
}
// NewConfig is used to initialize a historical config from a .toml file
func NewConfig() (*Config, error) {
c := new(Config)
var err error
viper.BindEnv("watcher.chain", SUPERNODE_CHAIN)
chain := viper.GetString("watcher.chain")
c.Chain, err = shared.NewChainType(chain)
if err != nil {
return nil, err
}
c.DBConfig.Init()
if err := c.init(); err != nil {
return nil, err
}
return c, nil
}
func (c *Config) init() error {
var err error
viper.BindEnv("ethereum.httpPath", shared.ETH_HTTP_PATH)
viper.BindEnv("bitcoin.httpPath", shared.BTC_HTTP_PATH)
viper.BindEnv("watcher.frequency", SUPERNODE_FREQUENCY)
viper.BindEnv("watcher.batchSize", SUPERNODE_BATCH_SIZE)
viper.BindEnv("watcher.batchNumber", SUPERNODE_BATCH_NUMBER)
viper.BindEnv("watcher.validationLevel", SUPERNODE_VALIDATION_LEVEL)
viper.BindEnv("watcher.timeout", shared.HTTP_TIMEOUT)
timeout := viper.GetInt("watcher.timeout")
if timeout < 15 {
timeout = 15
}
c.Timeout = time.Second * time.Duration(timeout)
switch c.Chain {
case shared.Ethereum:
ethHTTP := viper.GetString("ethereum.httpPath")
c.NodeInfo, c.HTTPClient, err = shared.GetEthNodeAndClient(fmt.Sprintf("http://%s", ethHTTP))
if err != nil {
return err
}
case shared.Bitcoin:
btcHTTP := viper.GetString("bitcoin.httpPath")
c.NodeInfo, c.HTTPClient = shared.GetBtcNodeAndClient(btcHTTP)
}
freq := viper.GetInt("watcher.frequency")
var frequency time.Duration
if freq <= 0 {
frequency = time.Second * 30
} else {
frequency = time.Second * time.Duration(freq)
}
c.Frequency = frequency
c.BatchSize = uint64(viper.GetInt64("watcher.batchSize"))
c.BatchNumber = uint64(viper.GetInt64("watcher.batchNumber"))
c.ValidationLevel = viper.GetInt("watcher.validationLevel")
dbConn := overrideDBConnConfig(c.DBConfig)
db := utils.LoadPostgres(dbConn, c.NodeInfo)
c.DB = &db
return nil
}
func overrideDBConnConfig(con config.Database) config.Database {
viper.BindEnv("database.backFill.maxIdle", BACKFILL_MAX_IDLE_CONNECTIONS)
viper.BindEnv("database.backFill.maxOpen", BACKFILL_MAX_OPEN_CONNECTIONS)
viper.BindEnv("database.backFill.maxLifetime", BACKFILL_MAX_CONN_LIFETIME)
con.MaxIdle = viper.GetInt("database.backFill.maxIdle")
con.MaxOpen = viper.GetInt("database.backFill.maxOpen")
con.MaxLifetime = viper.GetInt("database.backFill.maxLifetime")
return con
}

View File

@ -1,35 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package historical_test
import (
"io/ioutil"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
)
func TestIPFSWatcher(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "IPFS Watcher Historical Suite Test")
}
var _ = BeforeSuite(func() {
logrus.SetOutput(ioutil.Discard)
})

View File

@ -1,196 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package historical
import (
"sync"
"time"
log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/builders"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/vulcanize/ipfs-blockchain-watcher/utils"
)
// BackFillInterface for filling in gaps in the ipfs-blockchain-watcher db
type BackFillInterface interface {
// Method for the watcher to periodically check for and fill in gaps in its data using an archival node
BackFill(wg *sync.WaitGroup)
Stop() error
}
// BackFillService for filling in gaps in the watcher
type BackFillService struct {
// Interface for converting payloads into IPLD object payloads
Converter shared.PayloadConverter
// Interface for publishing the IPLD payloads to IPFS
Publisher shared.IPLDPublisher
// Interface for searching and retrieving CIDs from Postgres index
Retriever shared.CIDRetriever
// Interface for fetching payloads over at historical blocks; over http
Fetcher shared.PayloadFetcher
// Channel for forwarding backfill payloads to the ScreenAndServe process
ScreenAndServeChan chan shared.ConvertedData
// Check frequency
GapCheckFrequency time.Duration
// Size of batch fetches
BatchSize uint64
// Number of goroutines
BatchNumber int64
// Channel for receiving quit signal
QuitChan chan bool
// Chain type
chain shared.ChainType
// Headers with times_validated lower than this will be resynced
validationLevel int
}
// NewBackFillService returns a new BackFillInterface
func NewBackFillService(settings *Config, screenAndServeChan chan shared.ConvertedData) (BackFillInterface, error) {
publisher, err := builders.NewIPLDPublisher(settings.Chain, settings.DB)
if err != nil {
return nil, err
}
converter, err := builders.NewPayloadConverter(settings.Chain, settings.NodeInfo.ChainID)
if err != nil {
return nil, err
}
retriever, err := builders.NewCIDRetriever(settings.Chain, settings.DB)
if err != nil {
return nil, err
}
fetcher, err := builders.NewPaylaodFetcher(settings.Chain, settings.HTTPClient, settings.Timeout)
if err != nil {
return nil, err
}
batchSize := settings.BatchSize
if batchSize == 0 {
batchSize = shared.DefaultMaxBatchSize
}
batchNumber := int64(settings.BatchNumber)
if batchNumber == 0 {
batchNumber = shared.DefaultMaxBatchNumber
}
return &BackFillService{
Converter: converter,
Publisher: publisher,
Retriever: retriever,
Fetcher: fetcher,
GapCheckFrequency: settings.Frequency,
BatchSize: batchSize,
BatchNumber: int64(batchNumber),
ScreenAndServeChan: screenAndServeChan,
QuitChan: make(chan bool),
chain: settings.Chain,
validationLevel: settings.ValidationLevel,
}, nil
}
// BackFill periodically checks for and fills in gaps in the watcher db
func (bfs *BackFillService) BackFill(wg *sync.WaitGroup) {
ticker := time.NewTicker(bfs.GapCheckFrequency)
go func() {
wg.Add(1)
defer wg.Done()
for {
select {
case <-bfs.QuitChan:
log.Infof("quiting %s BackFill process", bfs.chain.String())
return
case <-ticker.C:
gaps, err := bfs.Retriever.RetrieveGapsInData(bfs.validationLevel)
if err != nil {
log.Errorf("%s watcher db backFill RetrieveGapsInData error: %v", bfs.chain.String(), err)
continue
}
// spin up worker goroutines for this search pass
// we start and kill a new batch of workers for each pass
// so that we know each of the previous workers is done before we search for new gaps
heightsChan := make(chan []uint64)
for i := 1; i <= int(bfs.BatchNumber); i++ {
go bfs.backFill(wg, i, heightsChan)
}
for _, gap := range gaps {
log.Infof("backFilling %s data from %d to %d", bfs.chain.String(), gap.Start, gap.Stop)
blockRangeBins, err := utils.GetBlockHeightBins(gap.Start, gap.Stop, bfs.BatchSize)
if err != nil {
log.Errorf("%s watcher db backFill GetBlockHeightBins error: %v", bfs.chain.String(), err)
continue
}
for _, heights := range blockRangeBins {
select {
case <-bfs.QuitChan:
log.Infof("quiting %s BackFill process", bfs.chain.String())
return
default:
heightsChan <- heights
}
}
}
// send a quit signal to each worker
// this blocks until each worker has finished its current task and is free to receive from the quit channel
for i := 1; i <= int(bfs.BatchNumber); i++ {
bfs.QuitChan <- true
}
}
}
}()
log.Infof("%s BackFill goroutine successfully spun up", bfs.chain.String())
}
func (bfs *BackFillService) backFill(wg *sync.WaitGroup, id int, heightChan chan []uint64) {
wg.Add(1)
defer wg.Done()
for {
select {
case heights := <-heightChan:
log.Debugf("%s backFill worker %d processing section from %d to %d", bfs.chain.String(), id, heights[0], heights[len(heights)-1])
payloads, err := bfs.Fetcher.FetchAt(heights)
if err != nil {
log.Errorf("%s backFill worker %d fetcher error: %s", bfs.chain.String(), id, err.Error())
}
for _, payload := range payloads {
ipldPayload, err := bfs.Converter.Convert(payload)
if err != nil {
log.Errorf("%s backFill worker %d converter error: %s", bfs.chain.String(), id, err.Error())
}
// If there is a ScreenAndServe process listening, forward converted payload to it
select {
case bfs.ScreenAndServeChan <- ipldPayload:
log.Debugf("%s backFill worker %d forwarded converted payload to server", bfs.chain.String(), id)
default:
log.Debugf("%s backFill worker %d unable to forward converted payload to server; no channel ready to receive", bfs.chain.String(), id)
}
if err := bfs.Publisher.Publish(ipldPayload); err != nil {
log.Errorf("%s backFill worker %d publisher error: %s", bfs.chain.String(), id, err.Error())
continue
}
}
log.Infof("%s backFill worker %d finished section from %d to %d", bfs.chain.String(), id, heights[0], heights[len(heights)-1])
case <-bfs.QuitChan:
log.Infof("%s backFill worker %d shutting down", bfs.chain.String(), id)
return
}
}
}
func (bfs *BackFillService) Stop() error {
log.Infof("Stopping %s backFill service", bfs.chain.String())
close(bfs.QuitChan)
return nil
}

View File

@ -1,180 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package historical_test
import (
"sync"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/historical"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
mocks2 "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared/mocks"
)
var _ = Describe("BackFiller", func() {
Describe("FillGaps", func() {
It("Periodically checks for and fills in gaps in the watcher's data", func() {
mockPublisher := &mocks.IterativeIPLDPublisher{
ReturnCIDPayload: []*eth.CIDPayload{mocks.MockCIDPayload, mocks.MockCIDPayload},
ReturnErr: nil,
}
mockConverter := &mocks.IterativePayloadConverter{
ReturnIPLDPayload: []eth.ConvertedPayload{mocks.MockConvertedPayload, mocks.MockConvertedPayload},
ReturnErr: nil,
}
mockRetriever := &mocks2.CIDRetriever{
FirstBlockNumberToReturn: 0,
GapsToRetrieve: []shared.Gap{
{
Start: 100, Stop: 101,
},
},
}
mockFetcher := &mocks2.PayloadFetcher{
PayloadsToReturn: map[uint64]shared.RawChainData{
100: mocks.MockStateDiffPayload,
101: mocks.MockStateDiffPayload,
},
}
quitChan := make(chan bool, 1)
backfiller := &historical.BackFillService{
Publisher: mockPublisher,
Converter: mockConverter,
Fetcher: mockFetcher,
Retriever: mockRetriever,
GapCheckFrequency: time.Second * 2,
BatchSize: shared.DefaultMaxBatchSize,
BatchNumber: shared.DefaultMaxBatchNumber,
QuitChan: quitChan,
}
wg := &sync.WaitGroup{}
backfiller.BackFill(wg)
time.Sleep(time.Second * 3)
quitChan <- true
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(2))
Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockConvertedPayload))
Expect(mockPublisher.PassedIPLDPayload[1]).To(Equal(mocks.MockConvertedPayload))
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(2))
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
Expect(mockConverter.PassedStatediffPayload[1]).To(Equal(mocks.MockStateDiffPayload))
Expect(mockRetriever.CalledTimes).To(Equal(1))
Expect(len(mockFetcher.CalledAtBlockHeights)).To(Equal(1))
Expect(mockFetcher.CalledAtBlockHeights[0]).To(Equal([]uint64{100, 101}))
})
It("Works for single block `ranges`", func() {
mockPublisher := &mocks.IterativeIPLDPublisher{
ReturnCIDPayload: []*eth.CIDPayload{mocks.MockCIDPayload},
ReturnErr: nil,
}
mockConverter := &mocks.IterativePayloadConverter{
ReturnIPLDPayload: []eth.ConvertedPayload{mocks.MockConvertedPayload},
ReturnErr: nil,
}
mockRetriever := &mocks2.CIDRetriever{
FirstBlockNumberToReturn: 0,
GapsToRetrieve: []shared.Gap{
{
Start: 100, Stop: 100,
},
},
}
mockFetcher := &mocks2.PayloadFetcher{
PayloadsToReturn: map[uint64]shared.RawChainData{
100: mocks.MockStateDiffPayload,
},
}
quitChan := make(chan bool, 1)
backfiller := &historical.BackFillService{
Publisher: mockPublisher,
Converter: mockConverter,
Fetcher: mockFetcher,
Retriever: mockRetriever,
GapCheckFrequency: time.Second * 2,
BatchSize: shared.DefaultMaxBatchSize,
BatchNumber: shared.DefaultMaxBatchNumber,
QuitChan: quitChan,
}
wg := &sync.WaitGroup{}
backfiller.BackFill(wg)
time.Sleep(time.Second * 3)
quitChan <- true
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(1))
Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockConvertedPayload))
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(1))
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
Expect(mockRetriever.CalledTimes).To(Equal(1))
Expect(len(mockFetcher.CalledAtBlockHeights)).To(Equal(1))
Expect(mockFetcher.CalledAtBlockHeights[0]).To(Equal([]uint64{100}))
})
It("Finds beginning gap", func() {
mockPublisher := &mocks.IterativeIPLDPublisher{
ReturnCIDPayload: []*eth.CIDPayload{mocks.MockCIDPayload, mocks.MockCIDPayload},
ReturnErr: nil,
}
mockConverter := &mocks.IterativePayloadConverter{
ReturnIPLDPayload: []eth.ConvertedPayload{mocks.MockConvertedPayload, mocks.MockConvertedPayload},
ReturnErr: nil,
}
mockRetriever := &mocks2.CIDRetriever{
FirstBlockNumberToReturn: 3,
GapsToRetrieve: []shared.Gap{
{
Start: 0,
Stop: 2,
},
},
}
mockFetcher := &mocks2.PayloadFetcher{
PayloadsToReturn: map[uint64]shared.RawChainData{
1: mocks.MockStateDiffPayload,
2: mocks.MockStateDiffPayload,
},
}
quitChan := make(chan bool, 1)
backfiller := &historical.BackFillService{
Publisher: mockPublisher,
Converter: mockConverter,
Fetcher: mockFetcher,
Retriever: mockRetriever,
GapCheckFrequency: time.Second * 2,
BatchSize: shared.DefaultMaxBatchSize,
BatchNumber: shared.DefaultMaxBatchNumber,
QuitChan: quitChan,
}
wg := &sync.WaitGroup{}
backfiller.BackFill(wg)
time.Sleep(time.Second * 3)
quitChan <- true
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(2))
Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockConvertedPayload))
Expect(mockPublisher.PassedIPLDPayload[1]).To(Equal(mocks.MockConvertedPayload))
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(2))
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
Expect(mockConverter.PassedStatediffPayload[1]).To(Equal(mocks.MockStateDiffPayload))
Expect(mockRetriever.CalledTimes).To(Equal(1))
Expect(len(mockFetcher.CalledAtBlockHeights)).To(Equal(1))
Expect(mockFetcher.CalledAtBlockHeights[0]).To(Equal([]uint64{0, 1, 2}))
})
})
})

View File

@ -1,183 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"bytes"
"encoding/hex"
"fmt"
"github.com/btcsuite/btcd/wire"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
)
type BtcHeader struct {
*wire.BlockHeader
rawdata []byte
cid cid.Cid
}
// Static (compile time) check that BtcBtcHeader satisfies the node.Node interface.
var _ node.Node = (*BtcHeader)(nil)
/*
INPUT
*/
// NewBtcHeader converts a *wire.Header into an BtcHeader IPLD node
func NewBtcHeader(header *wire.BlockHeader) (*BtcHeader, error) {
w := bytes.NewBuffer(make([]byte, 0, 80))
if err := header.Serialize(w); err != nil {
return nil, err
}
rawdata := w.Bytes()
c, err := RawdataToCid(MBitcoinHeader, rawdata, mh.DBL_SHA2_256)
if err != nil {
return nil, err
}
return &BtcHeader{
BlockHeader: header,
cid: c,
rawdata: rawdata,
}, nil
}
/*
Block INTERFACE
*/
func (b *BtcHeader) Cid() cid.Cid {
return b.cid
}
func (b *BtcHeader) RawData() []byte {
return b.rawdata
}
func (b *BtcHeader) String() string {
return fmt.Sprintf("<BtcHeader %s>", b.cid)
}
func (b *BtcHeader) Loggable() map[string]interface{} {
// TODO: more helpful info here
return map[string]interface{}{
"type": "bitcoin_block",
}
}
/*
Node INTERFACE
*/
func (b *BtcHeader) Links() []*node.Link {
return []*node.Link{
{
Name: "tx",
Cid: sha256ToCid(MBitcoinTx, b.MerkleRoot.CloneBytes()),
},
{
Name: "parent",
Cid: sha256ToCid(MBitcoinHeader, b.PrevBlock.CloneBytes()),
},
}
}
// Resolve attempts to traverse a path through this block.
func (b *BtcHeader) Resolve(path []string) (interface{}, []string, error) {
if len(path) == 0 {
return nil, nil, fmt.Errorf("zero length path")
}
switch path[0] {
case "version":
return b.Version, path[1:], nil
case "timestamp":
return b.Timestamp, path[1:], nil
case "bits":
return b.Bits, path[1:], nil
case "nonce":
return b.Nonce, path[1:], nil
case "parent":
return &node.Link{Cid: sha256ToCid(MBitcoinHeader, b.PrevBlock.CloneBytes())}, path[1:], nil
case "tx":
return &node.Link{Cid: sha256ToCid(MBitcoinTx, b.MerkleRoot.CloneBytes())}, path[1:], nil
default:
return nil, nil, fmt.Errorf("no such link")
}
}
// ResolveLink is a helper function that allows easier traversal of links through blocks
func (b *BtcHeader) ResolveLink(path []string) (*node.Link, []string, error) {
out, rest, err := b.Resolve(path)
if err != nil {
return nil, nil, err
}
lnk, ok := out.(*node.Link)
if !ok {
return nil, nil, fmt.Errorf("object at path was not a link")
}
return lnk, rest, nil
}
func cidToHash(c cid.Cid) []byte {
h := []byte(c.Hash())
return h[len(h)-32:]
}
func hashToCid(hv []byte, t uint64) cid.Cid {
h, _ := mh.Encode(hv, mh.DBL_SHA2_256)
return cid.NewCidV1(t, h)
}
func (b *BtcHeader) Size() (uint64, error) {
return uint64(len(b.rawdata)), nil
}
func (b *BtcHeader) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
func (b *BtcHeader) Tree(p string, depth int) []string {
// TODO: this isnt a correct implementation yet
return []string{"difficulty", "nonce", "version", "timestamp", "tx", "parent"}
}
func (b *BtcHeader) BTCSha() []byte {
blkmh, _ := mh.Sum(b.rawdata, mh.DBL_SHA2_256, -1)
return blkmh[2:]
}
func (b *BtcHeader) HexHash() string {
return hex.EncodeToString(revString(b.BTCSha()))
}
func (b *BtcHeader) Copy() node.Node {
nb := *b // cheating shallow copy
return &nb
}
func revString(s []byte) []byte {
b := make([]byte, len(s))
for i, v := range []byte(s) {
b[len(b)-(i+1)] = v
}
return b
}

View File

@ -1,74 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
node "github.com/ipfs/go-ipld-format"
)
// FromHeaderAndTxs takes a block header and txs and processes it
// to return it a set of IPLD nodes for further processing.
func FromHeaderAndTxs(header *wire.BlockHeader, txs []*btcutil.Tx) (*BtcHeader, []*BtcTx, []*BtcTxTrie, error) {
var txNodes []*BtcTx
for _, tx := range txs {
txNode, err := NewBtcTx(tx.MsgTx())
if err != nil {
return nil, nil, nil, err
}
txNodes = append(txNodes, txNode)
}
txTrie, err := mkMerkleTree(txNodes)
if err != nil {
return nil, nil, nil, err
}
headerNode, err := NewBtcHeader(header)
return headerNode, txNodes, txTrie, err
}
func mkMerkleTree(txs []*BtcTx) ([]*BtcTxTrie, error) {
layer := make([]node.Node, len(txs))
for i, tx := range txs {
layer[i] = tx
}
var out []*BtcTxTrie
var next []node.Node
for len(layer) > 1 {
if len(layer)%2 != 0 {
layer = append(layer, layer[len(layer)-1])
}
for i := 0; i < len(layer)/2; i++ {
var left, right node.Node
left = layer[i*2]
right = layer[(i*2)+1]
t := &BtcTxTrie{
Left: &node.Link{Cid: left.Cid()},
Right: &node.Link{Cid: right.Cid()},
}
out = append(out, t)
next = append(next, t)
}
layer = next
next = nil
}
return out, nil
}

View File

@ -1,258 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"bytes"
"encoding/hex"
"fmt"
"strconv"
"github.com/btcsuite/btcd/wire"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
)
type BtcTx struct {
*wire.MsgTx
rawdata []byte
cid cid.Cid
}
// Static (compile time) check that BtcBtcHeader satisfies the node.Node interface.
var _ node.Node = (*BtcTx)(nil)
/*
INPUT
*/
// NewBtcTx converts a *wire.MsgTx into an BtcTx IPLD node
func NewBtcTx(tx *wire.MsgTx) (*BtcTx, error) {
w := bytes.NewBuffer(make([]byte, 0, tx.SerializeSize()))
if err := tx.Serialize(w); err != nil {
return nil, err
}
rawdata := w.Bytes()
c, err := RawdataToCid(MBitcoinTx, rawdata, mh.DBL_SHA2_256)
if err != nil {
return nil, err
}
return &BtcTx{
MsgTx: tx,
cid: c,
rawdata: rawdata,
}, nil
}
/*
Block INTERFACE
*/
func (t *BtcTx) Cid() cid.Cid {
return t.cid
}
func (t *BtcTx) RawData() []byte {
return t.rawdata
}
func (t *BtcTx) String() string {
return fmt.Sprintf("<BtcTx %s>", t.cid)
}
func (t *BtcTx) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "bitcoinTx",
}
}
/*
Node INTERFACE
*/
func (t *BtcTx) Links() []*node.Link {
var out []*node.Link
for i, in := range t.MsgTx.TxIn {
lnk := &node.Link{Cid: sha256ToCid(MBitcoinTx, in.PreviousOutPoint.Hash.CloneBytes())}
lnk.Name = fmt.Sprintf("inputs/%d/prevTx", i)
out = append(out, lnk)
}
return out
}
func (t *BtcTx) Resolve(path []string) (interface{}, []string, error) {
switch path[0] {
case "version":
return t.Version, path[1:], nil
case "lockTime":
return t.LockTime, path[1:], nil
case "inputs":
if len(path) == 1 {
return t.MsgTx.TxIn, nil, nil
}
index, err := strconv.Atoi(path[1])
if err != nil {
return nil, nil, err
}
if index >= len(t.MsgTx.TxIn) || index < 0 {
return nil, nil, fmt.Errorf("index out of range")
}
inp := t.MsgTx.TxIn[index]
if len(path) == 2 {
return inp, nil, nil
}
switch path[2] {
case "prevTx":
return &node.Link{Cid: sha256ToCid(MBitcoinTx, inp.PreviousOutPoint.Hash.CloneBytes())}, path[3:], nil
case "seqNo":
return inp.Sequence, path[3:], nil
case "script":
return inp.SignatureScript, path[3:], nil
default:
return nil, nil, fmt.Errorf("no such link")
}
case "outputs":
if len(path) == 1 {
return t.TxOut, nil, nil
}
index, err := strconv.Atoi(path[1])
if err != nil {
return nil, nil, err
}
if index >= len(t.TxOut) || index < 0 {
return nil, nil, fmt.Errorf("index out of range")
}
outp := t.TxOut[index]
if len(path) == 2 {
return outp, path[2:], nil
}
switch path[2] {
case "value":
return outp.Value, path[3:], nil
case "script":
/*
if outp.Script[0] == 0x6a { // OP_RETURN
c, err := cid.Decode(string(outp.Script[1:]))
if err == nil {
return &node.Link{Cid: c}, path[3:], nil
}
}
*/
return outp.PkScript, path[3:], nil
default:
return nil, nil, fmt.Errorf("no such link")
}
default:
return nil, nil, fmt.Errorf("no such link")
}
}
func (t *BtcTx) ResolveLink(path []string) (*node.Link, []string, error) {
i, rest, err := t.Resolve(path)
if err != nil {
return nil, rest, err
}
lnk, ok := i.(*node.Link)
if !ok {
return nil, nil, fmt.Errorf("value was not a link")
}
return lnk, rest, nil
}
func (t *BtcTx) Size() (uint64, error) {
return uint64(len(t.RawData())), nil
}
func (t *BtcTx) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
func (t *BtcTx) Copy() node.Node {
nt := *t // cheating shallow copy
return &nt
}
func (t *BtcTx) Tree(p string, depth int) []string {
if depth == 0 {
return nil
}
switch p {
case "inputs":
return t.treeInputs(nil, depth+1)
case "outputs":
return t.treeOutputs(nil, depth+1)
case "":
out := []string{"version", "timeLock", "inputs", "outputs"}
out = t.treeInputs(out, depth)
out = t.treeOutputs(out, depth)
return out
default:
return nil
}
}
func (t *BtcTx) treeInputs(out []string, depth int) []string {
if depth < 2 {
return out
}
for i := range t.TxIn {
inp := "inputs/" + fmt.Sprint(i)
out = append(out, inp)
if depth > 2 {
out = append(out, inp+"/prevTx", inp+"/seqNo", inp+"/script")
}
}
return out
}
func (t *BtcTx) treeOutputs(out []string, depth int) []string {
if depth < 2 {
return out
}
for i := range t.TxOut {
o := "outputs/" + fmt.Sprint(i)
out = append(out, o)
if depth > 2 {
out = append(out, o+"/script", o+"/value")
}
}
return out
}
func (t *BtcTx) BTCSha() []byte {
mh, _ := mh.Sum(t.RawData(), mh.DBL_SHA2_256, -1)
return []byte(mh[2:])
}
func (t *BtcTx) HexHash() string {
return hex.EncodeToString(revString(t.BTCSha()))
}

View File

@ -1,110 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"fmt"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
)
type BtcTxTrie struct {
Left *node.Link
Right *node.Link
}
func (t *BtcTxTrie) BTCSha() []byte {
return cidToHash(t.Cid())
}
func (t *BtcTxTrie) Cid() cid.Cid {
h, _ := mh.Sum(t.RawData(), mh.DBL_SHA2_256, -1)
return cid.NewCidV1(cid.BitcoinTx, h)
}
func (t *BtcTxTrie) Links() []*node.Link {
return []*node.Link{t.Left, t.Right}
}
func (t *BtcTxTrie) RawData() []byte {
out := make([]byte, 64)
lbytes := cidToHash(t.Left.Cid)
copy(out[:32], lbytes)
rbytes := cidToHash(t.Right.Cid)
copy(out[32:], rbytes)
return out
}
func (t *BtcTxTrie) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "bitcoin_tx_tree",
}
}
func (t *BtcTxTrie) Resolve(path []string) (interface{}, []string, error) {
if len(path) == 0 {
return nil, nil, fmt.Errorf("zero length path")
}
switch path[0] {
case "0":
return t.Left, path[1:], nil
case "1":
return t.Right, path[1:], nil
default:
return nil, nil, fmt.Errorf("no such link")
}
}
func (t *BtcTxTrie) Copy() node.Node {
nt := *t
return &nt
}
func (t *BtcTxTrie) ResolveLink(path []string) (*node.Link, []string, error) {
out, rest, err := t.Resolve(path)
if err != nil {
return nil, nil, err
}
lnk, ok := out.(*node.Link)
if ok {
return lnk, rest, nil
}
return nil, nil, fmt.Errorf("path did not lead to link")
}
func (t *BtcTxTrie) Size() (uint64, error) {
return uint64(len(t.RawData())), nil
}
func (t *BtcTxTrie) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
func (t *BtcTxTrie) String() string {
return fmt.Sprintf("[bitcoin transaction tree]")
}
func (t *BtcTxTrie) Tree(p string, depth int) []string {
return []string{"0", "1"}
}

View File

@ -1,175 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"encoding/json"
"fmt"
"math/big"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
)
// EthAccountSnapshot (eth-account-snapshot codec 0x97)
// represents an ethereum account, i.e. a wallet address or
// a smart contract
type EthAccountSnapshot struct {
*EthAccount
cid cid.Cid
rawdata []byte
}
// EthAccount is the building block of EthAccountSnapshot.
// Or, is the former stripped of its cid and rawdata components.
type EthAccount struct {
Nonce uint64
Balance *big.Int
Root []byte // This is the storage root trie
CodeHash []byte // This is the hash of the EVM code
}
// Static (compile time) check that EthAccountSnapshot satisfies the
// node.Node interface.
var _ node.Node = (*EthAccountSnapshot)(nil)
/*
INPUT
*/
// Input should be managed by EthStateTrie
/*
OUTPUT
*/
// Output should be managed by EthStateTrie
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the account snapshot.
func (as *EthAccountSnapshot) RawData() []byte {
return as.rawdata
}
// Cid returns the cid of the transaction.
func (as *EthAccountSnapshot) Cid() cid.Cid {
return as.cid
}
// String is a helper for output
func (as *EthAccountSnapshot) String() string {
return fmt.Sprintf("<EthereumAccountSnapshot %s>", as.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (as *EthAccountSnapshot) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-account-snapshot",
}
}
/*
Node INTERFACE
*/
// Resolve resolves a path through this node, stopping at any link boundary
// and returning the object found as well as the remaining path to traverse
func (as *EthAccountSnapshot) Resolve(p []string) (interface{}, []string, error) {
if len(p) == 0 {
return as, nil, nil
}
if len(p) > 1 {
return nil, nil, fmt.Errorf("unexpected path elements past %s", p[0])
}
switch p[0] {
case "balance":
return as.Balance, nil, nil
case "codeHash":
return &node.Link{Cid: keccak256ToCid(RawBinary, as.CodeHash)}, nil, nil
case "nonce":
return as.Nonce, nil, nil
case "root":
return &node.Link{Cid: keccak256ToCid(MEthStorageTrie, as.Root)}, nil, nil
default:
return nil, nil, fmt.Errorf("no such link")
}
}
// Tree lists all paths within the object under 'path', and up to the given depth.
// To list the entire object (similar to `find .`) pass "" and -1
func (as *EthAccountSnapshot) Tree(p string, depth int) []string {
if p != "" || depth == 0 {
return nil
}
return []string{"balance", "codeHash", "nonce", "root"}
}
// ResolveLink is a helper function that calls resolve and asserts the
// output is a link
func (as *EthAccountSnapshot) ResolveLink(p []string) (*node.Link, []string, error) {
obj, rest, err := as.Resolve(p)
if err != nil {
return nil, nil, err
}
if lnk, ok := obj.(*node.Link); ok {
return lnk, rest, nil
}
return nil, nil, fmt.Errorf("resolved item was not a link")
}
// Copy will go away. It is here to comply with the interface.
func (as *EthAccountSnapshot) Copy() node.Node {
panic("dont use this yet")
}
// Links is a helper function that returns all links within this object
func (as *EthAccountSnapshot) Links() []*node.Link {
return nil
}
// Stat will go away. It is here to comply with the interface.
func (as *EthAccountSnapshot) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
// Size will go away. It is here to comply with the interface.
func (as *EthAccountSnapshot) Size() (uint64, error) {
return 0, nil
}
/*
EthAccountSnapshot functions
*/
// MarshalJSON processes the transaction into readable JSON format.
func (as *EthAccountSnapshot) MarshalJSON() ([]byte, error) {
out := map[string]interface{}{
"balance": as.Balance,
"codeHash": keccak256ToCid(RawBinary, as.CodeHash),
"nonce": as.Nonce,
"root": keccak256ToCid(MEthStorageTrie, as.Root),
}
return json.Marshal(out)
}

View File

@ -1,256 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
)
// EthHeader (eth-block, codec 0x90), represents an ethereum block header
type EthHeader struct {
*types.Header
cid cid.Cid
rawdata []byte
}
// Static (compile time) check that EthHeader satisfies the node.Node interface.
var _ node.Node = (*EthHeader)(nil)
/*
INPUT
*/
// NewEthHeader converts a *types.Header into an EthHeader IPLD node
func NewEthHeader(header *types.Header) (*EthHeader, error) {
headerRLP, err := rlp.EncodeToBytes(header)
if err != nil {
return nil, err
}
c, err := RawdataToCid(MEthHeader, headerRLP, mh.KECCAK_256)
if err != nil {
return nil, err
}
return &EthHeader{
Header: header,
cid: c,
rawdata: headerRLP,
}, nil
}
/*
OUTPUT
*/
// DecodeEthHeader takes a cid and its raw binary data
// from IPFS and returns an EthTx object for further processing.
func DecodeEthHeader(c cid.Cid, b []byte) (*EthHeader, error) {
var h *types.Header
if err := rlp.DecodeBytes(b, h); err != nil {
return nil, err
}
return &EthHeader{
Header: h,
cid: c,
rawdata: b,
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the block header.
func (b *EthHeader) RawData() []byte {
return b.rawdata
}
// Cid returns the cid of the block header.
func (b *EthHeader) Cid() cid.Cid {
return b.cid
}
// String is a helper for output
func (b *EthHeader) String() string {
return fmt.Sprintf("<EthHeader %s>", b.cid)
}
// Loggable returns a map the type of IPLD Link.
func (b *EthHeader) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-block",
}
}
/*
Node INTERFACE
*/
// Resolve resolves a path through this node, stopping at any link boundary
// and returning the object found as well as the remaining path to traverse
func (b *EthHeader) Resolve(p []string) (interface{}, []string, error) {
if len(p) == 0 {
return b, nil, nil
}
first, rest := p[0], p[1:]
switch first {
case "parent":
return &node.Link{Cid: commonHashToCid(MEthHeader, b.ParentHash)}, rest, nil
case "receipts":
return &node.Link{Cid: commonHashToCid(MEthTxReceiptTrie, b.ReceiptHash)}, rest, nil
case "root":
return &node.Link{Cid: commonHashToCid(MEthStateTrie, b.Root)}, rest, nil
case "tx":
return &node.Link{Cid: commonHashToCid(MEthTxTrie, b.TxHash)}, rest, nil
case "uncles":
return &node.Link{Cid: commonHashToCid(MEthHeaderList, b.UncleHash)}, rest, nil
}
if len(p) != 1 {
return nil, nil, fmt.Errorf("unexpected path elements past %s", first)
}
switch first {
case "bloom":
return b.Bloom, nil, nil
case "coinbase":
return b.Coinbase, nil, nil
case "difficulty":
return b.Difficulty, nil, nil
case "extra":
// This is a []byte. By default they are marshalled into Base64.
return fmt.Sprintf("0x%x", b.Extra), nil, nil
case "gaslimit":
return b.GasLimit, nil, nil
case "gasused":
return b.GasUsed, nil, nil
case "mixdigest":
return b.MixDigest, nil, nil
case "nonce":
return b.Nonce, nil, nil
case "number":
return b.Number, nil, nil
case "time":
return b.Time, nil, nil
default:
return nil, nil, fmt.Errorf("no such link")
}
}
// Tree lists all paths within the object under 'path', and up to the given depth.
// To list the entire object (similar to `find .`) pass "" and -1
func (b *EthHeader) Tree(p string, depth int) []string {
if p != "" || depth == 0 {
return nil
}
return []string{
"time",
"bloom",
"coinbase",
"difficulty",
"extra",
"gaslimit",
"gasused",
"mixdigest",
"nonce",
"number",
"parent",
"receipts",
"root",
"tx",
"uncles",
}
}
// ResolveLink is a helper function that allows easier traversal of links through blocks
func (b *EthHeader) ResolveLink(p []string) (*node.Link, []string, error) {
obj, rest, err := b.Resolve(p)
if err != nil {
return nil, nil, err
}
if lnk, ok := obj.(*node.Link); ok {
return lnk, rest, nil
}
return nil, nil, fmt.Errorf("resolved item was not a link")
}
// Copy will go away. It is here to comply with the Node interface.
func (b *EthHeader) Copy() node.Node {
panic("implement me")
}
// Links is a helper function that returns all links within this object
// HINT: Use `ipfs refs <cid>`
func (b *EthHeader) Links() []*node.Link {
return []*node.Link{
{Cid: commonHashToCid(MEthHeader, b.ParentHash)},
{Cid: commonHashToCid(MEthTxReceiptTrie, b.ReceiptHash)},
{Cid: commonHashToCid(MEthStateTrie, b.Root)},
{Cid: commonHashToCid(MEthTxTrie, b.TxHash)},
{Cid: commonHashToCid(MEthHeaderList, b.UncleHash)},
}
}
// Stat will go away. It is here to comply with the Node interface.
func (b *EthHeader) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
// Size will go away. It is here to comply with the Node interface.
func (b *EthHeader) Size() (uint64, error) {
return 0, nil
}
/*
EthHeader functions
*/
// MarshalJSON processes the block header into readable JSON format,
// converting the right links into their cids, and keeping the original
// hex hash, allowing the user to simplify external queries.
func (b *EthHeader) MarshalJSON() ([]byte, error) {
out := map[string]interface{}{
"time": b.Time,
"bloom": b.Bloom,
"coinbase": b.Coinbase,
"difficulty": b.Difficulty,
"extra": fmt.Sprintf("0x%x", b.Extra),
"gaslimit": b.GasLimit,
"gasused": b.GasUsed,
"mixdigest": b.MixDigest,
"nonce": b.Nonce,
"number": b.Number,
"parent": commonHashToCid(MEthHeader, b.ParentHash),
"receipts": commonHashToCid(MEthTxReceiptTrie, b.ReceiptHash),
"root": commonHashToCid(MEthStateTrie, b.Root),
"tx": commonHashToCid(MEthTxTrie, b.TxHash),
"uncles": commonHashToCid(MEthHeaderList, b.UncleHash),
}
return json.Marshal(out)
}

View File

@ -1,97 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/core/types"
)
// FromBlockAndReceipts takes a block and processes it
// to return it a set of IPLD nodes for further processing.
func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHeader, []*EthHeader, []*EthTx, []*EthTxTrie, []*EthReceipt, []*EthRctTrie, error) {
// Process the header
headerNode, err := NewEthHeader(block.Header())
if err != nil {
return nil, nil, nil, nil, nil, nil, err
}
// Process the uncles
uncleNodes := make([]*EthHeader, len(block.Uncles()))
for i, uncle := range block.Uncles() {
uncleNode, err := NewEthHeader(uncle)
if err != nil {
return nil, nil, nil, nil, nil, nil, err
}
uncleNodes[i] = uncleNode
}
// Process the txs
ethTxNodes, ethTxTrieNodes, err := processTransactions(block.Transactions(),
block.Header().TxHash[:])
if err != nil {
return nil, nil, nil, nil, nil, nil, err
}
// Process the receipts
ethRctNodes, ethRctTrieNodes, err := processReceipts(receipts,
block.Header().ReceiptHash[:])
return headerNode, uncleNodes, ethTxNodes, ethTxTrieNodes, ethRctNodes, ethRctTrieNodes, err
}
// processTransactions will take the found transactions in a parsed block body
// to return IPLD node slices for eth-tx and eth-tx-trie
func processTransactions(txs []*types.Transaction, expectedTxRoot []byte) ([]*EthTx, []*EthTxTrie, error) {
var ethTxNodes []*EthTx
transactionTrie := newTxTrie()
for idx, tx := range txs {
ethTx, err := NewEthTx(tx)
if err != nil {
return nil, nil, err
}
ethTxNodes = append(ethTxNodes, ethTx)
transactionTrie.add(idx, ethTx.RawData())
}
if !bytes.Equal(transactionTrie.rootHash(), expectedTxRoot) {
return nil, nil, fmt.Errorf("wrong transaction hash computed")
}
return ethTxNodes, transactionTrie.getNodes(), nil
}
// processReceipts will take in receipts
// to return IPLD node slices for eth-rct and eth-rct-trie
func processReceipts(rcts []*types.Receipt, expectedRctRoot []byte) ([]*EthReceipt, []*EthRctTrie, error) {
var ethRctNodes []*EthReceipt
receiptTrie := newRctTrie()
for idx, rct := range rcts {
ethRct, err := NewReceipt(rct)
if err != nil {
return nil, nil, err
}
ethRctNodes = append(ethRctNodes, ethRct)
receiptTrie.add(idx, ethRct.RawData())
}
if !bytes.Equal(receiptTrie.rootHash(), expectedRctRoot) {
return nil, nil, fmt.Errorf("wrong receipt hash computed")
}
return ethRctNodes, receiptTrie.getNodes(), nil
}

View File

@ -1,199 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"encoding/json"
"fmt"
"strconv"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
)
type EthReceipt struct {
*types.Receipt
rawdata []byte
cid cid.Cid
}
// Static (compile time) check that EthReceipt satisfies the node.Node interface.
var _ node.Node = (*EthReceipt)(nil)
/*
INPUT
*/
// NewReceipt converts a types.ReceiptForStorage to an EthReceipt IPLD node
func NewReceipt(receipt *types.Receipt) (*EthReceipt, error) {
receiptRLP, err := rlp.EncodeToBytes(receipt)
if err != nil {
return nil, err
}
c, err := RawdataToCid(MEthTxReceipt, receiptRLP, mh.KECCAK_256)
if err != nil {
return nil, err
}
return &EthReceipt{
Receipt: receipt,
cid: c,
rawdata: receiptRLP,
}, nil
}
/*
OUTPUT
*/
// DecodeEthReceipt takes a cid and its raw binary data
// from IPFS and returns an EthTx object for further processing.
func DecodeEthReceipt(c cid.Cid, b []byte) (*EthReceipt, error) {
var r *types.Receipt
if err := rlp.DecodeBytes(b, r); err != nil {
return nil, err
}
return &EthReceipt{
Receipt: r,
cid: c,
rawdata: b,
}, nil
}
/*
Block INTERFACE
*/
func (node *EthReceipt) RawData() []byte {
return node.rawdata
}
func (node *EthReceipt) Cid() cid.Cid {
return node.cid
}
// String is a helper for output
func (r *EthReceipt) String() string {
return fmt.Sprintf("<EthereumReceipt %s>", r.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (r *EthReceipt) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-receipt",
}
}
// Resolve resolves a path through this node, stopping at any link boundary
// and returning the object found as well as the remaining path to traverse
func (r *EthReceipt) Resolve(p []string) (interface{}, []string, error) {
if len(p) == 0 {
return r, nil, nil
}
if len(p) > 1 {
return nil, nil, fmt.Errorf("unexpected path elements past %s", p[0])
}
switch p[0] {
case "root":
return r.PostState, nil, nil
case "status":
return r.Status, nil, nil
case "cumulativeGasUsed":
return r.CumulativeGasUsed, nil, nil
case "logsBloom":
return r.Bloom, nil, nil
case "logs":
return r.Logs, nil, nil
case "transactionHash":
return r.TxHash, nil, nil
case "contractAddress":
return r.ContractAddress, nil, nil
case "gasUsed":
return r.GasUsed, nil, nil
default:
return nil, nil, fmt.Errorf("no such link")
}
}
// Tree lists all paths within the object under 'path', and up to the given depth.
// To list the entire object (similar to `find .`) pass "" and -1
func (r *EthReceipt) Tree(p string, depth int) []string {
if p != "" || depth == 0 {
return nil
}
return []string{"root", "status", "cumulativeGasUsed", "logsBloom", "logs", "transactionHash", "contractAddress", "gasUsed"}
}
// ResolveLink is a helper function that calls resolve and asserts the
// output is a link
func (r *EthReceipt) ResolveLink(p []string) (*node.Link, []string, error) {
obj, rest, err := r.Resolve(p)
if err != nil {
return nil, nil, err
}
if lnk, ok := obj.(*node.Link); ok {
return lnk, rest, nil
}
return nil, nil, fmt.Errorf("resolved item was not a link")
}
// Copy will go away. It is here to comply with the Node interface.
func (*EthReceipt) Copy() node.Node {
panic("implement me")
}
// Links is a helper function that returns all links within this object
func (*EthReceipt) Links() []*node.Link {
return nil
}
// Stat will go away. It is here to comply with the interface.
func (r *EthReceipt) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
// Size will go away. It is here to comply with the interface.
func (r *EthReceipt) Size() (uint64, error) {
return strconv.ParseUint(r.Receipt.Size().String(), 10, 64)
}
/*
EthReceipt functions
*/
// MarshalJSON processes the receipt into readable JSON format.
func (r *EthReceipt) MarshalJSON() ([]byte, error) {
out := map[string]interface{}{
"root": r.PostState,
"status": r.Status,
"cumulativeGasUsed": r.CumulativeGasUsed,
"logsBloom": r.Bloom,
"logs": r.Logs,
"transactionHash": r.TxHash,
"contractAddress": r.ContractAddress,
"gasUsed": r.GasUsed,
}
return json.Marshal(out)
}

View File

@ -1,152 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"fmt"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
// EthRctTrie (eth-tx-trie codec 0x92) represents
// a node from the transaction trie in ethereum.
type EthRctTrie struct {
*TrieNode
}
// Static (compile time) check that EthRctTrie satisfies the node.Node interface.
var _ node.Node = (*EthRctTrie)(nil)
/*
INPUT
*/
// To create a proper trie of the eth-tx-trie objects, it is required
// to input all transactions belonging to a forest in a single step.
// We are adding the transactions, and creating its trie on
// block body parsing time.
/*
OUTPUT
*/
// DecodeEthRctTrie returns an EthRctTrie object from its cid and rawdata.
func DecodeEthRctTrie(c cid.Cid, b []byte) (*EthRctTrie, error) {
tn, err := decodeTrieNode(c, b, decodeEthRctTrieLeaf)
if err != nil {
return nil, err
}
return &EthRctTrie{TrieNode: tn}, nil
}
// decodeEthRctTrieLeaf parses a eth-rct-trie leaf
//from decoded RLP elements
func decodeEthRctTrieLeaf(i []interface{}) ([]interface{}, error) {
var r types.Receipt
err := rlp.DecodeBytes(i[1].([]byte), &r)
if err != nil {
return nil, err
}
c, err := RawdataToCid(MEthTxReceipt, i[1].([]byte), multihash.KECCAK_256)
if err != nil {
return nil, err
}
return []interface{}{
i[0].([]byte),
&EthReceipt{
Receipt: &r,
cid: c,
rawdata: i[1].([]byte),
},
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the transaction.
func (t *EthRctTrie) RawData() []byte {
return t.rawdata
}
// Cid returns the cid of the transaction.
func (t *EthRctTrie) Cid() cid.Cid {
return t.cid
}
// String is a helper for output
func (t *EthRctTrie) String() string {
return fmt.Sprintf("<EthereumRctTrie %s>", t.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (t *EthRctTrie) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-rct-trie",
}
}
/*
EthRctTrie functions
*/
// rctTrie wraps a localTrie for use on the receipt trie.
type rctTrie struct {
*localTrie
}
// newRctTrie initializes and returns a rctTrie.
func newRctTrie() *rctTrie {
return &rctTrie{
localTrie: newLocalTrie(),
}
}
// getNodes invokes the localTrie, which computes the root hash of the
// transaction trie and returns its database keys, to return a slice
// of EthRctTrie nodes.
func (rt *rctTrie) getNodes() []*EthRctTrie {
keys := rt.getKeys()
var out []*EthRctTrie
it := rt.trie.NodeIterator([]byte{})
for it.Next(true) {
}
for _, k := range keys {
rawdata, err := rt.db.Get(k)
if err != nil {
panic(err)
}
c, err := RawdataToCid(MEthTxReceiptTrie, rawdata, multihash.KECCAK_256)
if err != nil {
return nil
}
tn := &TrieNode{
cid: c,
rawdata: rawdata,
}
out = append(out, &EthRctTrie{TrieNode: tn})
}
return out
}

View File

@ -1,114 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"fmt"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/rlp"
)
// EthStateTrie (eth-state-trie, codec 0x96), represents
// a node from the satte trie in ethereum.
type EthStateTrie struct {
*TrieNode
}
// Static (compile time) check that EthStateTrie satisfies the node.Node interface.
var _ node.Node = (*EthStateTrie)(nil)
/*
INPUT
*/
// FromStateTrieRLP takes the RLP representation of an ethereum
// state trie node to return it as an IPLD node for further processing.
func FromStateTrieRLP(raw []byte) (*EthStateTrie, error) {
c, err := RawdataToCid(MEthStateTrie, raw, multihash.KECCAK_256)
if err != nil {
return nil, err
}
// Let's run the whole mile and process the nodeKind and
// its elements, in case somebody would need this function
// to parse an RLP element from the filesystem
return DecodeEthStateTrie(c, raw)
}
/*
OUTPUT
*/
// DecodeEthStateTrie returns an EthStateTrie object from its cid and rawdata.
func DecodeEthStateTrie(c cid.Cid, b []byte) (*EthStateTrie, error) {
tn, err := decodeTrieNode(c, b, decodeEthStateTrieLeaf)
if err != nil {
return nil, err
}
return &EthStateTrie{TrieNode: tn}, nil
}
// decodeEthStateTrieLeaf parses a eth-tx-trie leaf
// from decoded RLP elements
func decodeEthStateTrieLeaf(i []interface{}) ([]interface{}, error) {
var account EthAccount
err := rlp.DecodeBytes(i[1].([]byte), &account)
if err != nil {
return nil, err
}
c, err := RawdataToCid(MEthAccountSnapshot, i[1].([]byte), multihash.KECCAK_256)
if err != nil {
return nil, err
}
return []interface{}{
i[0].([]byte),
&EthAccountSnapshot{
EthAccount: &account,
cid: c,
rawdata: i[1].([]byte),
},
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the state trie node.
func (st *EthStateTrie) RawData() []byte {
return st.rawdata
}
// Cid returns the cid of the state trie node.
func (st *EthStateTrie) Cid() cid.Cid {
return st.cid
}
// String is a helper for output
func (st *EthStateTrie) String() string {
return fmt.Sprintf("<EthereumStateTrie %s>", st.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (st *EthStateTrie) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-state-trie",
}
}

View File

@ -1,100 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"fmt"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
)
// EthStorageTrie (eth-storage-trie, codec 0x98), represents
// a node from the storage trie in ethereum.
type EthStorageTrie struct {
*TrieNode
}
// Static (compile time) check that EthStorageTrie satisfies the node.Node interface.
var _ node.Node = (*EthStorageTrie)(nil)
/*
INPUT
*/
// FromStorageTrieRLP takes the RLP representation of an ethereum
// storage trie node to return it as an IPLD node for further processing.
func FromStorageTrieRLP(raw []byte) (*EthStorageTrie, error) {
c, err := RawdataToCid(MEthStorageTrie, raw, multihash.KECCAK_256)
if err != nil {
return nil, err
}
// Let's run the whole mile and process the nodeKind and
// its elements, in case somebody would need this function
// to parse an RLP element from the filesystem
return DecodeEthStorageTrie(c, raw)
}
/*
OUTPUT
*/
// DecodeEthStorageTrie returns an EthStorageTrie object from its cid and rawdata.
func DecodeEthStorageTrie(c cid.Cid, b []byte) (*EthStorageTrie, error) {
tn, err := decodeTrieNode(c, b, decodeEthStorageTrieLeaf)
if err != nil {
return nil, err
}
return &EthStorageTrie{TrieNode: tn}, nil
}
// decodeEthStorageTrieLeaf parses a eth-tx-trie leaf
// from decoded RLP elements
func decodeEthStorageTrieLeaf(i []interface{}) ([]interface{}, error) {
return []interface{}{
i[0].([]byte),
i[1].([]byte),
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the storage trie node.
func (st *EthStorageTrie) RawData() []byte {
return st.rawdata
}
// Cid returns the cid of the storage trie node.
func (st *EthStorageTrie) Cid() cid.Cid {
return st.cid
}
// String is a helper for output
func (st *EthStorageTrie) String() string {
return fmt.Sprintf("<EthereumStorageTrie %s>", st.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (st *EthStorageTrie) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-storage-trie",
}
}

View File

@ -1,215 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"encoding/json"
"fmt"
"strconv"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
mh "github.com/multiformats/go-multihash"
)
// EthTx (eth-tx codec 0x93) represents an ethereum transaction
type EthTx struct {
*types.Transaction
cid cid.Cid
rawdata []byte
}
// Static (compile time) check that EthTx satisfies the node.Node interface.
var _ node.Node = (*EthTx)(nil)
/*
INPUT
*/
// NewEthTx converts a *types.Transaction to an EthTx IPLD node
func NewEthTx(tx *types.Transaction) (*EthTx, error) {
txRLP, err := rlp.EncodeToBytes(tx)
if err != nil {
return nil, err
}
c, err := RawdataToCid(MEthTx, txRLP, mh.KECCAK_256)
if err != nil {
return nil, err
}
return &EthTx{
Transaction: tx,
cid: c,
rawdata: txRLP,
}, nil
}
/*
OUTPUT
*/
// DecodeEthTx takes a cid and its raw binary data
// from IPFS and returns an EthTx object for further processing.
func DecodeEthTx(c cid.Cid, b []byte) (*EthTx, error) {
var t *types.Transaction
if err := rlp.DecodeBytes(b, t); err != nil {
return nil, err
}
return &EthTx{
Transaction: t,
cid: c,
rawdata: b,
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the transaction.
func (t *EthTx) RawData() []byte {
return t.rawdata
}
// Cid returns the cid of the transaction.
func (t *EthTx) Cid() cid.Cid {
return t.cid
}
// String is a helper for output
func (t *EthTx) String() string {
return fmt.Sprintf("<EthereumTx %s>", t.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (t *EthTx) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-tx",
}
}
/*
Node INTERFACE
*/
// Resolve resolves a path through this node, stopping at any link boundary
// and returning the object found as well as the remaining path to traverse
func (t *EthTx) Resolve(p []string) (interface{}, []string, error) {
if len(p) == 0 {
return t, nil, nil
}
if len(p) > 1 {
return nil, nil, fmt.Errorf("unexpected path elements past %s", p[0])
}
switch p[0] {
case "gas":
return t.Gas(), nil, nil
case "gasPrice":
return t.GasPrice(), nil, nil
case "input":
return fmt.Sprintf("%x", t.Data()), nil, nil
case "nonce":
return t.Nonce(), nil, nil
case "r":
_, r, _ := t.RawSignatureValues()
return hexutil.EncodeBig(r), nil, nil
case "s":
_, _, s := t.RawSignatureValues()
return hexutil.EncodeBig(s), nil, nil
case "toAddress":
return t.To(), nil, nil
case "v":
v, _, _ := t.RawSignatureValues()
return hexutil.EncodeBig(v), nil, nil
case "value":
return hexutil.EncodeBig(t.Value()), nil, nil
default:
return nil, nil, fmt.Errorf("no such link")
}
}
// Tree lists all paths within the object under 'path', and up to the given depth.
// To list the entire object (similar to `find .`) pass "" and -1
func (t *EthTx) Tree(p string, depth int) []string {
if p != "" || depth == 0 {
return nil
}
return []string{"gas", "gasPrice", "input", "nonce", "r", "s", "toAddress", "v", "value"}
}
// ResolveLink is a helper function that calls resolve and asserts the
// output is a link
func (t *EthTx) ResolveLink(p []string) (*node.Link, []string, error) {
obj, rest, err := t.Resolve(p)
if err != nil {
return nil, nil, err
}
if lnk, ok := obj.(*node.Link); ok {
return lnk, rest, nil
}
return nil, nil, fmt.Errorf("resolved item was not a link")
}
// Copy will go away. It is here to comply with the interface.
func (t *EthTx) Copy() node.Node {
panic("implement me")
}
// Links is a helper function that returns all links within this object
func (t *EthTx) Links() []*node.Link {
return nil
}
// Stat will go away. It is here to comply with the interface.
func (t *EthTx) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
// Size will go away. It is here to comply with the interface.
func (t *EthTx) Size() (uint64, error) {
return strconv.ParseUint(t.Transaction.Size().String(), 10, 64)
}
/*
EthTx functions
*/
// MarshalJSON processes the transaction into readable JSON format.
func (t *EthTx) MarshalJSON() ([]byte, error) {
v, r, s := t.RawSignatureValues()
out := map[string]interface{}{
"gas": t.Gas(),
"gasPrice": hexutil.EncodeBig(t.GasPrice()),
"input": fmt.Sprintf("%x", t.Data()),
"nonce": t.Nonce(),
"r": hexutil.EncodeBig(r),
"s": hexutil.EncodeBig(s),
"toAddress": t.To(),
"v": hexutil.EncodeBig(v),
"value": hexutil.EncodeBig(t.Value()),
}
return json.Marshal(out)
}

View File

@ -1,152 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"fmt"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
// EthTxTrie (eth-tx-trie codec 0x92) represents
// a node from the transaction trie in ethereum.
type EthTxTrie struct {
*TrieNode
}
// Static (compile time) check that EthTxTrie satisfies the node.Node interface.
var _ node.Node = (*EthTxTrie)(nil)
/*
INPUT
*/
// To create a proper trie of the eth-tx-trie objects, it is required
// to input all transactions belonging to a forest in a single step.
// We are adding the transactions, and creating its trie on
// block body parsing time.
/*
OUTPUT
*/
// DecodeEthTxTrie returns an EthTxTrie object from its cid and rawdata.
func DecodeEthTxTrie(c cid.Cid, b []byte) (*EthTxTrie, error) {
tn, err := decodeTrieNode(c, b, decodeEthTxTrieLeaf)
if err != nil {
return nil, err
}
return &EthTxTrie{TrieNode: tn}, nil
}
// decodeEthTxTrieLeaf parses a eth-tx-trie leaf
//from decoded RLP elements
func decodeEthTxTrieLeaf(i []interface{}) ([]interface{}, error) {
var t types.Transaction
err := rlp.DecodeBytes(i[1].([]byte), &t)
if err != nil {
return nil, err
}
c, err := RawdataToCid(MEthTx, i[1].([]byte), multihash.KECCAK_256)
if err != nil {
return nil, err
}
return []interface{}{
i[0].([]byte),
&EthTx{
Transaction: &t,
cid: c,
rawdata: i[1].([]byte),
},
}, nil
}
/*
Block INTERFACE
*/
// RawData returns the binary of the RLP encode of the transaction.
func (t *EthTxTrie) RawData() []byte {
return t.rawdata
}
// Cid returns the cid of the transaction.
func (t *EthTxTrie) Cid() cid.Cid {
return t.cid
}
// String is a helper for output
func (t *EthTxTrie) String() string {
return fmt.Sprintf("<EthereumTxTrie %s>", t.cid)
}
// Loggable returns in a map the type of IPLD Link.
func (t *EthTxTrie) Loggable() map[string]interface{} {
return map[string]interface{}{
"type": "eth-tx-trie",
}
}
/*
EthTxTrie functions
*/
// txTrie wraps a localTrie for use on the transaction trie.
type txTrie struct {
*localTrie
}
// newTxTrie initializes and returns a txTrie.
func newTxTrie() *txTrie {
return &txTrie{
localTrie: newLocalTrie(),
}
}
// getNodes invokes the localTrie, which computes the root hash of the
// transaction trie and returns its database keys, to return a slice
// of EthTxTrie nodes.
func (tt *txTrie) getNodes() []*EthTxTrie {
keys := tt.getKeys()
var out []*EthTxTrie
it := tt.trie.NodeIterator([]byte{})
for it.Next(true) {
}
for _, k := range keys {
rawdata, err := tt.db.Get(k)
if err != nil {
panic(err)
}
c, err := RawdataToCid(MEthTxTrie, rawdata, multihash.KECCAK_256)
if err != nil {
return nil
}
tn := &TrieNode{
cid: c,
rawdata: rawdata,
}
out = append(out, &EthTxTrie{TrieNode: tn})
}
return out
}

View File

@ -1,151 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"bytes"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
)
// IPLD Codecs for Ethereum
// See the authoritative document:
// https://github.com/multiformats/multicodec/blob/master/table.csv
const (
RawBinary = 0x55
MEthHeader = 0x90
MEthHeaderList = 0x91
MEthTxTrie = 0x92
MEthTx = 0x93
MEthTxReceiptTrie = 0x94
MEthTxReceipt = 0x95
MEthStateTrie = 0x96
MEthAccountSnapshot = 0x97
MEthStorageTrie = 0x98
MBitcoinHeader = 0xb0
MBitcoinTx = 0xb1
)
// RawdataToCid takes the desired codec and a slice of bytes
// and returns the proper cid of the object.
func RawdataToCid(codec uint64, rawdata []byte, multiHash uint64) (cid.Cid, error) {
c, err := cid.Prefix{
Codec: codec,
Version: 1,
MhType: multiHash,
MhLength: -1,
}.Sum(rawdata)
if err != nil {
return cid.Cid{}, err
}
return c, nil
}
// keccak256ToCid takes a keccak256 hash and returns its cid based on
// the codec given.
func keccak256ToCid(codec uint64, h []byte) cid.Cid {
buf, err := mh.Encode(h, mh.KECCAK_256)
if err != nil {
panic(err)
}
return cid.NewCidV1(codec, mh.Multihash(buf))
}
// commonHashToCid takes a go-ethereum common.Hash and returns its
// cid based on the codec given,
func commonHashToCid(codec uint64, h common.Hash) cid.Cid {
mhash, err := mh.Encode(h[:], mh.KECCAK_256)
if err != nil {
panic(err)
}
return cid.NewCidV1(codec, mhash)
}
// sha256ToCid takes a sha246 hash and returns its cid based on the
// codec given
func sha256ToCid(codec uint64, h []byte) cid.Cid {
hash, err := mh.Encode(h, mh.DBL_SHA2_256)
if err != nil {
panic(err)
}
return cid.NewCidV1(codec, hash)
}
// getRLP encodes the given object to RLP returning its bytes.
func getRLP(object interface{}) []byte {
buf := new(bytes.Buffer)
if err := rlp.Encode(buf, object); err != nil {
panic(err)
}
return buf.Bytes()
}
// localTrie wraps a go-ethereum trie and its underlying memory db.
// It contributes to the creation of the trie node objects.
type localTrie struct {
keys [][]byte
db ethdb.Database
trie *trie.Trie
}
// newLocalTrie initializes and returns a localTrie object
func newLocalTrie() *localTrie {
var err error
lt := &localTrie{}
lt.db = rawdb.NewMemoryDatabase()
lt.trie, err = trie.New(common.Hash{}, trie.NewDatabase(lt.db))
if err != nil {
panic(err)
}
return lt
}
// add receives the index of an object and its rawdata value
// and includes it into the localTrie
func (lt *localTrie) add(idx int, rawdata []byte) {
key, err := rlp.EncodeToBytes(uint(idx))
if err != nil {
panic(err)
}
lt.keys = append(lt.keys, key)
if err := lt.db.Put(key, rawdata); err != nil {
panic(err)
}
lt.trie.Update(key, rawdata)
}
// rootHash returns the computed trie root.
// Useful for sanity checks on parsed data.
func (lt *localTrie) rootHash() []byte {
return lt.trie.Hash().Bytes()
}
// getKeys returns the stored keys of the memory database
// of the localTrie for further processing.
func (lt *localTrie) getKeys() [][]byte {
return lt.keys
}

View File

@ -1,444 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipld
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
)
// TrieNode is the general abstraction for
//ethereum IPLD trie nodes.
type TrieNode struct {
// leaf, extension or branch
nodeKind string
// If leaf or extension: [0] is key, [1] is val.
// If branch: [0] - [16] are children.
elements []interface{}
// IPLD block information
cid cid.Cid
rawdata []byte
}
/*
OUTPUT
*/
type trieNodeLeafDecoder func([]interface{}) ([]interface{}, error)
// decodeTrieNode returns a TrieNode object from an IPLD block's
// cid and rawdata.
func decodeTrieNode(c cid.Cid, b []byte,
leafDecoder trieNodeLeafDecoder) (*TrieNode, error) {
var (
i, decoded, elements []interface{}
nodeKind string
err error
)
if err = rlp.DecodeBytes(b, &i); err != nil {
return nil, err
}
codec := c.Type()
switch len(i) {
case 2:
nodeKind, decoded, err = decodeCompactKey(i)
if err != nil {
return nil, err
}
if nodeKind == "extension" {
elements, err = parseTrieNodeExtension(decoded, codec)
}
if nodeKind == "leaf" {
elements, err = leafDecoder(decoded)
}
if nodeKind != "extension" && nodeKind != "leaf" {
return nil, fmt.Errorf("unexpected nodeKind returned from decoder")
}
case 17:
nodeKind = "branch"
elements, err = parseTrieNodeBranch(i, codec)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unknown trie node type")
}
return &TrieNode{
nodeKind: nodeKind,
elements: elements,
rawdata: b,
cid: c,
}, nil
}
// decodeCompactKey takes a compact key, and returns its nodeKind and value.
func decodeCompactKey(i []interface{}) (string, []interface{}, error) {
first := i[0].([]byte)
last := i[1].([]byte)
switch first[0] / 16 {
case '\x00':
return "extension", []interface{}{
nibbleToByte(first)[2:],
last,
}, nil
case '\x01':
return "extension", []interface{}{
nibbleToByte(first)[1:],
last,
}, nil
case '\x02':
return "leaf", []interface{}{
nibbleToByte(first)[2:],
last,
}, nil
case '\x03':
return "leaf", []interface{}{
nibbleToByte(first)[1:],
last,
}, nil
default:
return "", nil, fmt.Errorf("unknown hex prefix")
}
}
// parseTrieNodeExtension helper improves readability
func parseTrieNodeExtension(i []interface{}, codec uint64) ([]interface{}, error) {
return []interface{}{
i[0].([]byte),
keccak256ToCid(codec, i[1].([]byte)),
}, nil
}
// parseTrieNodeBranch helper improves readability
func parseTrieNodeBranch(i []interface{}, codec uint64) ([]interface{}, error) {
var out []interface{}
for i, vi := range i {
v, ok := vi.([]byte)
// Sometimes this throws "panic: interface conversion: interface {} is []interface {}, not []uint8"
// Figure out why, and if it is okay to continue
if !ok {
return nil, fmt.Errorf("unable to decode branch node entry into []byte at position: %d value: %+v", i, vi)
}
switch len(v) {
case 0:
out = append(out, nil)
case 32:
out = append(out, keccak256ToCid(codec, v))
default:
return nil, fmt.Errorf("unrecognized object: %v", v)
}
}
return out, nil
}
/*
Node INTERFACE
*/
// Resolve resolves a path through this node, stopping at any link boundary
// and returning the object found as well as the remaining path to traverse
func (t *TrieNode) Resolve(p []string) (interface{}, []string, error) {
switch t.nodeKind {
case "extension":
return t.resolveTrieNodeExtension(p)
case "leaf":
return t.resolveTrieNodeLeaf(p)
case "branch":
return t.resolveTrieNodeBranch(p)
default:
return nil, nil, fmt.Errorf("nodeKind case not implemented")
}
}
// Tree lists all paths within the object under 'path', and up to the given depth.
// To list the entire object (similar to `find .`) pass "" and -1
func (t *TrieNode) Tree(p string, depth int) []string {
if p != "" || depth == 0 {
return nil
}
var out []string
switch t.nodeKind {
case "extension":
var val string
for _, e := range t.elements[0].([]byte) {
val += fmt.Sprintf("%x", e)
}
return []string{val}
case "branch":
for i, elem := range t.elements {
if _, ok := elem.(*cid.Cid); ok {
out = append(out, fmt.Sprintf("%x", i))
}
}
return out
default:
return nil
}
}
// ResolveLink is a helper function that calls resolve and asserts the
// output is a link
func (t *TrieNode) ResolveLink(p []string) (*node.Link, []string, error) {
obj, rest, err := t.Resolve(p)
if err != nil {
return nil, nil, err
}
lnk, ok := obj.(*node.Link)
if !ok {
return nil, nil, fmt.Errorf("was not a link")
}
return lnk, rest, nil
}
// Copy will go away. It is here to comply with the interface.
func (t *TrieNode) Copy() node.Node {
panic("dont use this yet")
}
// Links is a helper function that returns all links within this object
func (t *TrieNode) Links() []*node.Link {
var out []*node.Link
for _, i := range t.elements {
c, ok := i.(cid.Cid)
if ok {
out = append(out, &node.Link{Cid: c})
}
}
return out
}
// Stat will go away. It is here to comply with the interface.
func (t *TrieNode) Stat() (*node.NodeStat, error) {
return &node.NodeStat{}, nil
}
// Size will go away. It is here to comply with the interface.
func (t *TrieNode) Size() (uint64, error) {
return 0, nil
}
/*
TrieNode functions
*/
// MarshalJSON processes the transaction trie into readable JSON format.
func (t *TrieNode) MarshalJSON() ([]byte, error) {
var out map[string]interface{}
switch t.nodeKind {
case "extension":
fallthrough
case "leaf":
var hexPrefix string
for _, e := range t.elements[0].([]byte) {
hexPrefix += fmt.Sprintf("%x", e)
}
// if we got a byte we need to do this casting otherwise
// it will be marshaled to a base64 encoded value
if _, ok := t.elements[1].([]byte); ok {
var hexVal string
for _, e := range t.elements[1].([]byte) {
hexVal += fmt.Sprintf("%x", e)
}
t.elements[1] = hexVal
}
out = map[string]interface{}{
"type": t.nodeKind,
hexPrefix: t.elements[1],
}
case "branch":
out = map[string]interface{}{
"type": "branch",
"0": t.elements[0],
"1": t.elements[1],
"2": t.elements[2],
"3": t.elements[3],
"4": t.elements[4],
"5": t.elements[5],
"6": t.elements[6],
"7": t.elements[7],
"8": t.elements[8],
"9": t.elements[9],
"a": t.elements[10],
"b": t.elements[11],
"c": t.elements[12],
"d": t.elements[13],
"e": t.elements[14],
"f": t.elements[15],
}
default:
return nil, fmt.Errorf("nodeKind %s not supported", t.nodeKind)
}
return json.Marshal(out)
}
// nibbleToByte expands the nibbles of a byte slice into their own bytes.
func nibbleToByte(k []byte) []byte {
var out []byte
for _, b := range k {
out = append(out, b/16)
out = append(out, b%16)
}
return out
}
// Resolve reading conveniences
func (t *TrieNode) resolveTrieNodeExtension(p []string) (interface{}, []string, error) {
nibbles := t.elements[0].([]byte)
idx, rest := shiftFromPath(p, len(nibbles))
if len(idx) < len(nibbles) {
return nil, nil, fmt.Errorf("not enough nibbles to traverse this extension")
}
for _, i := range idx {
if getHexIndex(string(i)) == -1 {
return nil, nil, fmt.Errorf("invalid path element")
}
}
for i, n := range nibbles {
if string(idx[i]) != fmt.Sprintf("%x", n) {
return nil, nil, fmt.Errorf("no such link in this extension")
}
}
return &node.Link{Cid: t.elements[1].(cid.Cid)}, rest, nil
}
func (t *TrieNode) resolveTrieNodeLeaf(p []string) (interface{}, []string, error) {
nibbles := t.elements[0].([]byte)
if len(nibbles) != 0 {
idx, rest := shiftFromPath(p, len(nibbles))
if len(idx) < len(nibbles) {
return nil, nil, fmt.Errorf("not enough nibbles to traverse this leaf")
}
for _, i := range idx {
if getHexIndex(string(i)) == -1 {
return nil, nil, fmt.Errorf("invalid path element")
}
}
for i, n := range nibbles {
if string(idx[i]) != fmt.Sprintf("%x", n) {
return nil, nil, fmt.Errorf("no such link in this extension")
}
}
p = rest
}
link, ok := t.elements[1].(node.Node)
if !ok {
return nil, nil, fmt.Errorf("leaf children is not an IPLD node")
}
return link.Resolve(p)
}
func (t *TrieNode) resolveTrieNodeBranch(p []string) (interface{}, []string, error) {
idx, rest := shiftFromPath(p, 1)
hidx := getHexIndex(idx)
if hidx == -1 {
return nil, nil, fmt.Errorf("incorrect path")
}
child := t.elements[hidx]
if child != nil {
return &node.Link{Cid: child.(cid.Cid)}, rest, nil
}
return nil, nil, fmt.Errorf("no such link in this branch")
}
// shiftFromPath extracts from a given path (as a slice of strings)
// the given number of elements as a single string, returning whatever
// it has not taken.
//
// Examples:
// ["0", "a", "something"] and 1 -> "0" and ["a", "something"]
// ["ab", "c", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
// ["abc", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
func shiftFromPath(p []string, i int) (string, []string) {
var (
out string
rest []string
)
for _, pe := range p {
re := ""
for _, c := range pe {
if len(out) < i {
out += string(c)
} else {
re += string(c)
}
}
if len(out) == i && re != "" {
rest = append(rest, re)
}
}
return out, rest
}
// getHexIndex returns to you the integer 0 - 15 equivalent to your
// string character if applicable, or -1 otherwise.
func getHexIndex(s string) int {
if len(s) != 1 {
return -1
}
c := byte(s[0])
switch {
case '0' <= c && c <= '9':
return int(c - '0')
case 'a' <= c && c <= 'f':
return int(c - 'a' + 10)
}
return -1
}

View File

@ -1,22 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package ipfs
type BlockModel struct {
CID string `db:"key"`
Data []byte `db:"data"`
}

View File

@ -1,25 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package node
type Node struct {
GenesisBlock string
NetworkID string
ChainID uint64
ID string
ClientName string
}

View File

@ -1,37 +0,0 @@
package postgres
import (
"fmt"
)
const (
BeginTransactionFailedMsg = "failed to begin transaction"
DbConnectionFailedMsg = "db connection failed"
DeleteQueryFailedMsg = "delete query failed"
InsertQueryFailedMsg = "insert query failed"
SettingNodeFailedMsg = "unable to set db node"
)
func ErrBeginTransactionFailed(beginErr error) error {
return formatError(BeginTransactionFailedMsg, beginErr.Error())
}
func ErrDBConnectionFailed(connectErr error) error {
return formatError(DbConnectionFailedMsg, connectErr.Error())
}
func ErrDBDeleteFailed(deleteErr error) error {
return formatError(DeleteQueryFailedMsg, deleteErr.Error())
}
func ErrDBInsertFailed(insertErr error) error {
return formatError(InsertQueryFailedMsg, insertErr.Error())
}
func ErrUnableToSetNode(setErr error) error {
return formatError(SettingNodeFailedMsg, setErr.Error())
}
func formatError(msg, err string) error {
return fmt.Errorf("%s: %s", msg, err)
}

View File

@ -1,76 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package postgres
import (
"time"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //postgres driver
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/config"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
)
type DB struct {
*sqlx.DB
Node node.Node
NodeID int64
}
func NewDB(databaseConfig config.Database, node node.Node) (*DB, error) {
connectString := config.DbConnectionString(databaseConfig)
db, connectErr := sqlx.Connect("postgres", connectString)
if connectErr != nil {
return &DB{}, ErrDBConnectionFailed(connectErr)
}
if databaseConfig.MaxOpen > 0 {
db.SetMaxOpenConns(databaseConfig.MaxOpen)
}
if databaseConfig.MaxIdle > 0 {
db.SetMaxIdleConns(databaseConfig.MaxIdle)
}
if databaseConfig.MaxLifetime > 0 {
lifetime := time.Duration(databaseConfig.MaxLifetime) * time.Second
db.SetConnMaxLifetime(lifetime)
}
pg := DB{DB: db, Node: node}
nodeErr := pg.CreateNode(&node)
if nodeErr != nil {
return &DB{}, ErrUnableToSetNode(nodeErr)
}
return &pg, nil
}
func (db *DB) CreateNode(node *node.Node) error {
var nodeID int64
err := db.QueryRow(
`INSERT INTO nodes (genesis_block, network_id, node_id, client_name)
VALUES ($1, $2, $3, $4)
ON CONFLICT (genesis_block, network_id, node_id)
DO UPDATE
SET genesis_block = $1,
network_id = $2,
node_id = $3,
client_name = $4
RETURNING id`,
node.GenesisBlock, node.NetworkID, node.ID, node.ClientName).Scan(&nodeID)
if err != nil {
return ErrUnableToSetNode(err)
}
db.NodeID = nodeID
return nil
}

View File

@ -1,36 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package postgres_test
import (
"io/ioutil"
"testing"
log "github.com/sirupsen/logrus"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func init() {
log.SetOutput(ioutil.Discard)
}
func TestPostgres(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Postgres Suite")
}

View File

@ -1,104 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package postgres_test
import (
"fmt"
"strings"
"math/big"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/config"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/test_config"
)
var _ = Describe("Postgres DB", func() {
var sqlxdb *sqlx.DB
It("connects to the database", func() {
var err error
pgConfig := config.DbConnectionString(test_config.DBConfig)
sqlxdb, err = sqlx.Connect("postgres", pgConfig)
Expect(err).Should(BeNil())
Expect(sqlxdb).ShouldNot(BeNil())
})
It("serializes big.Int to db", func() {
// postgres driver doesn't support go big.Int type
// various casts in golang uint64, int64, overflow for
// transaction value (in wei) even though
// postgres numeric can handle an arbitrary
// sized int, so use string representation of big.Int
// and cast on insert
pgConnectString := config.DbConnectionString(test_config.DBConfig)
db, err := sqlx.Connect("postgres", pgConnectString)
Expect(err).NotTo(HaveOccurred())
bi := new(big.Int)
bi.SetString("34940183920000000000", 10)
Expect(bi.String()).To(Equal("34940183920000000000"))
defer db.Exec(`DROP TABLE IF EXISTS example`)
_, err = db.Exec("CREATE TABLE example ( id INTEGER, data NUMERIC )")
Expect(err).ToNot(HaveOccurred())
sqlStatement := `
INSERT INTO example (id, data)
VALUES (1, cast($1 AS NUMERIC))`
_, err = db.Exec(sqlStatement, bi.String())
Expect(err).ToNot(HaveOccurred())
var data string
err = db.QueryRow(`SELECT data FROM example WHERE id = 1`).Scan(&data)
Expect(err).ToNot(HaveOccurred())
Expect(bi.String()).To(Equal(data))
actual := new(big.Int)
actual.SetString(data, 10)
Expect(actual).To(Equal(bi))
})
It("throws error when can't connect to the database", func() {
invalidDatabase := config.Database{}
node := node.Node{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"}
_, err := postgres.NewDB(invalidDatabase, node)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring(postgres.DbConnectionFailedMsg))
})
It("throws error when can't create node", func() {
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
node := node.Node{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
_, err := postgres.NewDB(test_config.DBConfig, node)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring(postgres.SettingNodeFailedMsg))
})
})

View File

@ -1,128 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package resync
import (
"fmt"
"time"
"github.com/spf13/viper"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/config"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/node"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/postgres"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/vulcanize/ipfs-blockchain-watcher/utils"
)
// Env variables
const (
RESYNC_CHAIN = "RESYNC_CHAIN"
RESYNC_START = "RESYNC_START"
RESYNC_STOP = "RESYNC_STOP"
RESYNC_BATCH_SIZE = "RESYNC_BATCH_SIZE"
RESYNC_BATCH_NUMBER = "RESYNC_BATCH_NUMBER"
RESYNC_CLEAR_OLD_CACHE = "RESYNC_CLEAR_OLD_CACHE"
RESYNC_TYPE = "RESYNC_TYPE"
RESYNC_RESET_VALIDATION = "RESYNC_RESET_VALIDATION"
)
// Config holds the parameters needed to perform a resync
type Config struct {
Chain shared.ChainType // The type of resync to perform
ResyncType shared.DataType // The type of data to resync
ClearOldCache bool // Resync will first clear all the data within the range
ResetValidation bool // If true, resync will reset the validation level to 0 for the given range
// DB info
DB *postgres.DB
DBConfig config.Database
HTTPClient interface{} // Note this client is expected to support the retrieval of the specified data type(s)
NodeInfo node.Node // Info for the associated node
Ranges [][2]uint64 // The block height ranges to resync
BatchSize uint64 // BatchSize for the resync http calls (client has to support batch sizing)
Timeout time.Duration // HTTP connection timeout in seconds
BatchNumber uint64
}
// NewConfig fills and returns a resync config from toml parameters
func NewConfig() (*Config, error) {
c := new(Config)
var err error
viper.BindEnv("resync.start", RESYNC_START)
viper.BindEnv("resync.stop", RESYNC_STOP)
viper.BindEnv("resync.clearOldCache", RESYNC_CLEAR_OLD_CACHE)
viper.BindEnv("resync.type", RESYNC_TYPE)
viper.BindEnv("resync.chain", RESYNC_CHAIN)
viper.BindEnv("ethereum.httpPath", shared.ETH_HTTP_PATH)
viper.BindEnv("bitcoin.httpPath", shared.BTC_HTTP_PATH)
viper.BindEnv("resync.batchSize", RESYNC_BATCH_SIZE)
viper.BindEnv("resync.batchNumber", RESYNC_BATCH_NUMBER)
viper.BindEnv("resync.resetValidation", RESYNC_RESET_VALIDATION)
viper.BindEnv("resync.timeout", shared.HTTP_TIMEOUT)
timeout := viper.GetInt("resync.timeout")
if timeout < 5 {
timeout = 5
}
c.Timeout = time.Second * time.Duration(timeout)
start := uint64(viper.GetInt64("resync.start"))
stop := uint64(viper.GetInt64("resync.stop"))
c.Ranges = [][2]uint64{{start, stop}}
c.ClearOldCache = viper.GetBool("resync.clearOldCache")
c.ResetValidation = viper.GetBool("resync.resetValidation")
resyncType := viper.GetString("resync.type")
c.ResyncType, err = shared.GenerateDataTypeFromString(resyncType)
if err != nil {
return nil, err
}
chain := viper.GetString("resync.chain")
c.Chain, err = shared.NewChainType(chain)
if err != nil {
return nil, err
}
if ok, err := shared.SupportedDataType(c.ResyncType, c.Chain); !ok {
if err != nil {
return nil, err
}
return nil, fmt.Errorf("chain type %s does not support data type %s", c.Chain.String(), c.ResyncType.String())
}
switch c.Chain {
case shared.Ethereum:
ethHTTP := viper.GetString("ethereum.httpPath")
c.NodeInfo, c.HTTPClient, err = shared.GetEthNodeAndClient(fmt.Sprintf("http://%s", ethHTTP))
if err != nil {
return nil, err
}
case shared.Bitcoin:
btcHTTP := viper.GetString("bitcoin.httpPath")
c.NodeInfo, c.HTTPClient = shared.GetBtcNodeAndClient(btcHTTP)
}
c.DBConfig.Init()
db := utils.LoadPostgres(c.DBConfig, c.NodeInfo)
c.DB = &db
c.BatchSize = uint64(viper.GetInt64("resync.batchSize"))
c.BatchNumber = uint64(viper.GetInt64("resync.batchNumber"))
return c, nil
}

View File

@ -1,174 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package resync
import (
"fmt"
"github.com/sirupsen/logrus"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/builders"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
"github.com/vulcanize/ipfs-blockchain-watcher/utils"
)
type Resync interface {
Resync() error
}
type Service struct {
// Interface for converting payloads into IPLD object payloads
Converter shared.PayloadConverter
// Interface for publishing the IPLD payloads to IPFS
Publisher shared.IPLDPublisher
// Interface for searching and retrieving CIDs from Postgres index
Retriever shared.CIDRetriever
// Interface for fetching payloads over at historical blocks; over http
Fetcher shared.PayloadFetcher
// Interface for cleaning out data before resyncing (if clearOldCache is on)
Cleaner shared.Cleaner
// Size of batch fetches
BatchSize uint64
// Number of goroutines
BatchNumber int64
// Channel for receiving quit signal
quitChan chan bool
// Chain type
chain shared.ChainType
// Resync data type
data shared.DataType
// Resync ranges
ranges [][2]uint64
// Flag to turn on or off old cache destruction
clearOldCache bool
// Flag to turn on or off validation level reset
resetValidation bool
}
// NewResyncService creates and returns a resync service from the provided settings
func NewResyncService(settings *Config) (Resync, error) {
publisher, err := builders.NewIPLDPublisher(settings.Chain, settings.DB)
if err != nil {
return nil, err
}
converter, err := builders.NewPayloadConverter(settings.Chain, settings.NodeInfo.ChainID)
if err != nil {
return nil, err
}
retriever, err := builders.NewCIDRetriever(settings.Chain, settings.DB)
if err != nil {
return nil, err
}
fetcher, err := builders.NewPaylaodFetcher(settings.Chain, settings.HTTPClient, settings.Timeout)
if err != nil {
return nil, err
}
cleaner, err := builders.NewCleaner(settings.Chain, settings.DB)
if err != nil {
return nil, err
}
batchSize := settings.BatchSize
if batchSize == 0 {
batchSize = shared.DefaultMaxBatchSize
}
batchNumber := int64(settings.BatchNumber)
if batchNumber == 0 {
batchNumber = shared.DefaultMaxBatchNumber
}
return &Service{
Converter: converter,
Publisher: publisher,
Retriever: retriever,
Fetcher: fetcher,
Cleaner: cleaner,
BatchSize: batchSize,
BatchNumber: int64(batchNumber),
quitChan: make(chan bool),
chain: settings.Chain,
ranges: settings.Ranges,
data: settings.ResyncType,
clearOldCache: settings.ClearOldCache,
resetValidation: settings.ResetValidation,
}, nil
}
func (rs *Service) Resync() error {
if rs.resetValidation {
logrus.Infof("resetting validation level")
if err := rs.Cleaner.ResetValidation(rs.ranges); err != nil {
return fmt.Errorf("validation reset failed: %v", err)
}
}
if rs.clearOldCache {
logrus.Infof("cleaning out old data from Postgres")
if err := rs.Cleaner.Clean(rs.ranges, rs.data); err != nil {
return fmt.Errorf("%s %s data resync cleaning error: %v", rs.chain.String(), rs.data.String(), err)
}
}
// spin up worker goroutines
heightsChan := make(chan []uint64)
for i := 1; i <= int(rs.BatchNumber); i++ {
go rs.resync(i, heightsChan)
}
for _, rng := range rs.ranges {
if rng[1] < rng[0] {
logrus.Errorf("%s resync range ending block number needs to be greater than the starting block number", rs.chain.String())
continue
}
logrus.Infof("resyncing %s data from %d to %d", rs.chain.String(), rng[0], rng[1])
// break the range up into bins of smaller ranges
blockRangeBins, err := utils.GetBlockHeightBins(rng[0], rng[1], rs.BatchSize)
if err != nil {
return err
}
for _, heights := range blockRangeBins {
heightsChan <- heights
}
}
// send a quit signal to each worker
// this blocks until each worker has finished its current task and can receive from the quit channel
for i := 1; i <= int(rs.BatchNumber); i++ {
rs.quitChan <- true
}
return nil
}
func (rs *Service) resync(id int, heightChan chan []uint64) {
for {
select {
case heights := <-heightChan:
logrus.Debugf("%s resync worker %d processing section from %d to %d", rs.chain.String(), id, heights[0], heights[len(heights)-1])
payloads, err := rs.Fetcher.FetchAt(heights)
if err != nil {
logrus.Errorf("%s resync worker %d fetcher error: %s", rs.chain.String(), id, err.Error())
}
for _, payload := range payloads {
ipldPayload, err := rs.Converter.Convert(payload)
if err != nil {
logrus.Errorf("%s resync worker %d converter error: %s", rs.chain.String(), id, err.Error())
}
if err := rs.Publisher.Publish(ipldPayload); err != nil {
logrus.Errorf("%s resync worker %d publisher error: %s", rs.chain.String(), id, err.Error())
}
}
logrus.Infof("%s resync worker %d finished section from %d to %d", rs.chain.String(), id, heights[0], heights[len(heights)-1])
case <-rs.quitChan:
logrus.Infof("%s resync worker %d goroutine shutting down", rs.chain.String(), id)
return
}
}
}

View File

@ -1,144 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package shared
import (
"fmt"
"strings"
)
// DataType is an enum to loosely represent type of chain data
type DataType int
const (
UnknownDataType DataType = iota - 1
Full
Headers
Uncles
Transactions
Receipts
State
Storage
)
// String() method to resolve ReSyncType enum
func (r DataType) String() string {
switch r {
case Full:
return "full"
case Headers:
return "headers"
case Uncles:
return "uncles"
case Transactions:
return "transactions"
case Receipts:
return "receipts"
case State:
return "state"
case Storage:
return "storage"
default:
return "unknown"
}
}
// GenerateDataTypeFromString
func GenerateDataTypeFromString(str string) (DataType, error) {
switch strings.ToLower(str) {
case "full", "f":
return Full, nil
case "headers", "header", "h":
return Headers, nil
case "uncles", "u":
return Uncles, nil
case "transactions", "transaction", "trxs", "txs", "trx", "tx", "t":
return Transactions, nil
case "receipts", "receipt", "rcts", "rct", "r":
return Receipts, nil
case "state":
return State, nil
case "storage":
return Storage, nil
default:
return UnknownDataType, fmt.Errorf("unrecognized resync type: %s", str)
}
}
func SupportedDataType(d DataType, c ChainType) (bool, error) {
switch c {
case Ethereum:
switch d {
case Full:
return true, nil
case Headers:
return true, nil
case Uncles:
return true, nil
case Transactions:
return true, nil
case Receipts:
return true, nil
case State:
return true, nil
case Storage:
return true, nil
default:
return true, nil
}
case Bitcoin:
switch d {
case Full:
return true, nil
case Headers:
return true, nil
case Uncles:
return false, nil
case Transactions:
return true, nil
case Receipts:
return false, nil
case State:
return false, nil
case Storage:
return false, nil
default:
return false, nil
}
case Omni:
switch d {
case Full:
return false, nil
case Headers:
return false, nil
case Uncles:
return false, nil
case Transactions:
return false, nil
case Receipts:
return false, nil
case State:
return false, nil
case Storage:
return false, nil
default:
return false, nil
}
default:
return false, fmt.Errorf("unrecognized chain type %s", c.String())
}
}

View File

@ -1,83 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package shared
import (
"math/big"
)
// PayloadStreamer streams chain-specific payloads to the provided channel
type PayloadStreamer interface {
Stream(payloadChan chan RawChainData) (ClientSubscription, error)
}
// PayloadFetcher fetches chain-specific payloads
type PayloadFetcher interface {
FetchAt(blockHeights []uint64) ([]RawChainData, error)
}
// PayloadConverter converts chain-specific payloads into IPLD payloads for publishing
type PayloadConverter interface {
Convert(payload RawChainData) (ConvertedData, error)
}
// IPLDPublisher publishes IPLD payloads and returns a CID payload for indexing
type IPLDPublisher interface {
Publish(payload ConvertedData) error
}
// ResponseFilterer applies a filter to an IPLD payload to return a subscription response packet
type ResponseFilterer interface {
Filter(filter SubscriptionSettings, payload ConvertedData) (response IPLDs, err error)
}
// CIDRetriever retrieves cids according to a provided filter and returns a CID wrapper
type CIDRetriever interface {
Retrieve(filter SubscriptionSettings, blockNumber int64) ([]CIDsForFetching, bool, error)
RetrieveFirstBlockNumber() (int64, error)
RetrieveLastBlockNumber() (int64, error)
RetrieveGapsInData(validationLevel int) ([]Gap, error)
}
// IPLDFetcher uses a CID wrapper to fetch an IPLD wrapper
type IPLDFetcher interface {
Fetch(cids CIDsForFetching) (IPLDs, error)
}
// ClientSubscription is a general interface for chain data subscriptions
type ClientSubscription interface {
Err() <-chan error
Unsubscribe()
}
// Cleaner is for cleaning out data from the cache within the given ranges
type Cleaner interface {
Clean(rngs [][2]uint64, t DataType) error
ResetValidation(rngs [][2]uint64) error
}
// SubscriptionSettings is the interface every subscription filter type needs to satisfy, no matter the chain
// Further specifics of the underlying filter type depend on the internal needs of the types
// which satisfy the ResponseFilterer and CIDRetriever interfaces for a specific chain
// The underlying type needs to be rlp serializable
type SubscriptionSettings interface {
StartingBlock() *big.Int
EndingBlock() *big.Int
ChainType() ChainType
HistoricalData() bool
HistoricalDataOnly() bool
}

View File

@ -1,41 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package shared
// Very loose interface types for generic processing of different blockchains
// TODO: split different blockchain support into separate repos
// These types serve as very loose wrappers around a generic underlying interface{}
type RawChainData interface{}
// The concrete type underneath StreamedIPLDs should not be a pointer
type ConvertedData interface {
Height() int64
}
type CIDsForIndexing interface{}
type CIDsForFetching interface{}
type IPLDs interface {
Height() int64
}
type Gap struct {
Start uint64
Stop uint64
}

View File

@ -1,72 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package watch_test
import (
"sync"
"time"
"github.com/ethereum/go-ethereum/rpc"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/eth/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared"
mocks2 "github.com/vulcanize/ipfs-blockchain-watcher/pkg/shared/mocks"
"github.com/vulcanize/ipfs-blockchain-watcher/pkg/watch"
)
var _ = Describe("Service", func() {
Describe("Sync", func() {
It("Streams statediff.Payloads, converts them to IPLDPayloads, publishes IPLDPayloads, and indexes CIDPayloads", func() {
wg := new(sync.WaitGroup)
payloadChan := make(chan shared.RawChainData, 1)
quitChan := make(chan bool, 1)
mockPublisher := &mocks.IPLDPublisher{
ReturnCIDPayload: mocks.MockCIDPayload,
ReturnErr: nil,
}
mockStreamer := &mocks2.PayloadStreamer{
ReturnSub: &rpc.ClientSubscription{},
StreamPayloads: []shared.RawChainData{
mocks.MockStateDiffPayload,
},
ReturnErr: nil,
}
mockConverter := &mocks.PayloadConverter{
ReturnIPLDPayload: mocks.MockConvertedPayload,
ReturnErr: nil,
}
processor := &watch.Service{
Publisher: mockPublisher,
Streamer: mockStreamer,
Converter: mockConverter,
PayloadChan: payloadChan,
QuitChan: quitChan,
WorkerPoolSize: 1,
}
err := processor.Sync(wg, nil)
Expect(err).ToNot(HaveOccurred())
time.Sleep(2 * time.Second)
close(quitChan)
wg.Wait()
Expect(mockConverter.PassedStatediffPayload).To(Equal(mocks.MockStateDiffPayload))
Expect(mockPublisher.PassedIPLDPayload).To(Equal(mocks.MockConvertedPayload))
Expect(mockStreamer.PassedPayloadChan).To(Equal(payloadChan))
})
})
})

View File

@ -1,35 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package watch_test
import (
"io/ioutil"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
)
func TestIPFSWatcher(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "IPFS Watcher Suite Test")
}
var _ = BeforeSuite(func() {
logrus.SetOutput(ioutil.Discard)
})

Binary file not shown.