Function To Add entry to known_gaps

This commit contains all the code needed to update the `known_gaps` table. It uses a generic function `PushKnownGaps` to insert gaps to the database.

The current test is very "basic." All it does it insert a dummy gap into the database. As long as their is no error it will return succesfully.

I have also removed the foundry subfolder. I am using the `related-repository` setup for testing.
This commit is contained in:
Abdul Rabbani 2022-03-21 16:28:13 -04:00
parent 3045068a45
commit 921538ba64
24 changed files with 109 additions and 459 deletions

5
.gitignore vendored
View File

@ -49,3 +49,8 @@ profile.cov
**/yarn-error.log
foundry/deployments/local-private-network/geth-linux-amd64
foundry/projects/local-private-network/geth-linux-amd64
# Helpful repos
related-repositories/foundry-test/**
related-repositories/hive/**
related-repositories/ipld-eth-db/**

3
.gitmodules vendored
View File

@ -6,6 +6,3 @@
path = tests/evm-benchmarks
url = https://github.com/ipsilon/evm-benchmarks
shallow = true
[submodule "foundry/projects/local-private-network/Stateful/lib/ds-test"]
path = foundry/projects/local-private-network/Stateful/lib/ds-test
url = https://github.com/dapphub/ds-test

View File

@ -1,59 +0,0 @@
# Foundry README
# Overview
This document will go through the steps needed to test using Foundry. Currently, we use Foundry in the following capacity.
1. Create a private network with our internal version of Geth.
2. Deploy a smart contract to the private network.
3. Test the smart contract on the private network.
4. Create a transaction on the private network.
# Steps
The steps to create a new project are as follows.
## 1. Creating New Project
1. `cd foundry/projects`.
2. Create a directory that captures your project: `mkdir local-private-network; cd local-private-network`.
3. Create a [new foundry project](https://onbjerg.github.io/foundry-book/forge/creating-a-new-project.html): `forge init stateful`.
4. Follow the foundry [documentation](https://onbjerg.github.io/foundry-book/forge/tests.html) for writing smart contract tests.
## 2. Deployments
You can choose to have custom deployments for your workflow. However, it is recommended to utilize Docker.
# Existing Projects
Below, you can find existing projects and their descriptions.
## `local-private-network`
The purpose of this project is as follows:
1. Compile the geth from the local source.
2. Build a docker container with `ipld-eth-db` and another container for the `local-private-network`.
3. Run the compiled version of geth.
4. Deploy a smart contract to the private blockchain.
5. Trigger a transaction on the newly deployed smart contract.
## Using This Project
If you want to test your local geth code, do the following:
1. cd `foundry/projects/local-private-network`.
2. `./wrapper.sh` - This script will do all the heavy lifting for you.
3. Keep an eye out for the outputs from the docker container.
4. Enter the docker container and do as you please.
5. If you want to change your geth code, you will have to run `./wrapper.sh` for subsequent runs.
6. If you do not change your geth code, you have to run: `docker-compose up --build`.
### Key Notes:
- The command to [deploy](https://onbjerg.github.io/foundry-book/forge/deploying.html) the smart contract is: `forge create --keystore $ETH_KEYSTORE_FILE --rpc-url [http://127.0.0.1:8545](http://127.0.0.1:8545/) --constructor-args 1 --password "" --legacy /root/stateful/src/Stateful.sol:Stateful`
- The command to interact create a [transaction](https://onbjerg.github.io/foundry-book/reference/cast.html) is: `cast send --keystore $ETH_KEYSTORE_FILE --rpc-url [http://127.0.0.1:8545](http://127.0.0.1:8545/) --password "" --legacy $DEPLOYED_ADDRESS "off()"`
- The `Dockerfile` compiles `cast` and `forge`.
- The `foundry/projects/local-private-network/deploy-local-network.sh` file does most heavy lifting. It spins up geth and triggers various events.
- The `foundry/projects/local-private-network/start-private-network.sh` file triggers `deploy-local-network.sh`. This file runs all the tests.
- The `geth` node will stay running even after the tests are terminated.

View File

@ -1,37 +0,0 @@
FROM frolvlad/alpine-bash
# copy all files
RUN apk update ; apk add --no-cache --allow-untrusted ca-certificates curl bash git jq
ENV GLIBC_REPO=https://github.com/sgerrand/alpine-pkg-glibc
ENV GLIBC_VERSION=2.35-r0
RUN set -ex && \
apk --update add libstdc++ curl ca-certificates && \
for pkg in glibc-${GLIBC_VERSION} glibc-bin-${GLIBC_VERSION}; \
do curl -sSL ${GLIBC_REPO}/releases/download/${GLIBC_VERSION}/${pkg}.apk -o /tmp/${pkg}.apk; done && \
apk add --allow-untrusted /tmp/*.apk ; \
rm -v /tmp/*.apk ;/usr/glibc-compat/sbin/ldconfig /lib /usr/glibc-compat/lib
RUN apk add gcompat; echo "Sorry"
WORKDIR /root
COPY stateful ./stateful
ADD ./start-private-network.sh .
ADD ./deploy-local-network.sh .
ADD ../../geth-linux-amd64 /bin/geth
RUN curl -L https://foundry.paradigm.xyz | bash; \
/bin/bash -c 'source $HOME/.bashrc'; \
/root/.foundry/bin/foundryup
ENV PATH "$PATH:/root/.foundry/bin/"
RUN echo "export PATH=${PATH}" >> $HOME/.bashrc;
RUN chmod +x /bin/geth
EXPOSE 8545
EXPOSE 8546
ENTRYPOINT ["./start-private-network.sh"]

@ -1 +0,0 @@
Subproject commit 0a5da56b0d65960e6a994d2ec8245e6edd38c248

View File

@ -1,16 +0,0 @@
#!/bin/bash
set -e
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m'
start_path=$(pwd)
cd ../../../
echo -e "${GREEN}Building geth!${NC}"
docker build -t vulcanize/go-ethereum -f Dockerfile .
docker run --rm --entrypoint cat vulcanize/go-ethereum /usr/local/bin/geth > foundry/projects/local-private-network/geth-linux-amd64
chmod +x foundry/projects/local-private-network/geth-linux-amd64
echo -e "${GREEN}geth build complete!${NC}"
cd $start_path

View File

@ -1,201 +0,0 @@
#!/bin/bash
set -e
OPTS="./deploy-local-network.sh [<options>] <args>...
./deploy-local-network.sh --help
--
db-user=name database user
db-password=password database password
db-name=name database name
db-host=address database host
db-port=port database port
db-write=bool turn on database write mode
db-type=name the type of database
db-driver=name the driver used for the database
db-waitforsync=bool Should the statediff service start once geth has synced to head (default: false)
rpc-port=port change RPC port (default: 8545)
rpc-addr=address change RPC address (default: 127.0.0.1)
chain-id=number change chain ID (default: 99)
period=seconds use a block time instead of instamine
accounts=number create multiple accounts (default: 1)
address=address eth address to add to genesis
save=name after finishing, save snapshot
load=name start from a previously saved snapshot
dir=directory testnet directory
"
eval "$(
git rev-parse --parseopt -- "$@" <<<"$OPTS" || echo exit $?
)"
DB_USER=vdbm
DB_PASSWORD=password
DB_NAME=vulcanize_public
DB_HOST=127.0.0.1
DB_PORT=5432
DB_TYPE=postgres
DB_DRIVER=sqlx
DB_WAIT_FOR_SYNC=false
RPC_PORT=8545
RPC_ADDRESS=127.0.0.1
PERIOD=0
CHAINID=99
ACCOUNTS=0
ADDRESS=
gethdir=$HOME/testnet
while [[ $1 ]]; do
case $1 in
--) shift; break;;
--db-user) shift; DB_USER=$1;;
--db-password) shift; DB_PASSWORD=$1;;
--db-name) shift; DB_NAME=$1;;
--db-host) shift; DB_HOST=$1;;
--db-port) shift; DB_PORT=$1;;
--db-write) shift; DB_WRITE=$1;;
--db-type) shift; DB_TYPE=$1;;
--db-driver) shift; DB_DRIVER=$1;;
--db-waitforsync) shift; DB_WAIT_FOR_SYNC=$1;;
--rpc-port) shift; RPC_PORT=$1;;
--rpc-addr) shift; RPC_ADDRESS=$1;;
--chain-id) shift; CHAINID=$1;;
--period) shift; PERIOD=$1;;
--accounts) shift; ACCOUNTS=$1;;
--save) shift; SAVE=$1;;
--address) shift; ADDRESS=$1;;
--load) shift; LOAD=$1;;
--dir) shift; gethdir=$1;;
*) printf "${0##*/}: internal error: %q\\n" "$1"; exit 1
esac; shift
done
chaindir=$gethdir/$RPC_PORT
#while true; do
# if [[ ! -d "$gethdir/$CHAINID" ]]; then break; fi
# CHAINID=$((CHAINID + 1))
#done
mkdir -p "$chaindir/config"
#if [ -n "$ADDRESS" ]; then
# balance+=(-n {} -s "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" -i balance \
# -i "$ADDRESS")
#fi
for i in $(seq 0 "$ACCOUNTS"); do
address+=( "$(
geth 2>/dev/null account new --datadir "$chaindir" --password=<(exit) 2>/dev/null \
| grep -o -E "0x[A-Fa-f0-9]*" )" )
# balance+=(-n {} -s "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" -i balance \
# -i "${address[i]}")
balance+=(' "'"${address[i]}"'": { "balance": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"}')
done
if [[ ! -f "./genesis.json" ]]
then
EXTRA_DATA="0x3132333400000000000000000000000000000000000000000000000000000000${address[0]#0x}0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
JSON_VAL='{
"config": {
"chainId": '"$CHAINID"',
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"clique": {
"period": '"$PERIOD"',
"epoch": 3000
}
},
"difficulty": "0x1",
"gaslimit": "0xffffffffffff",
"extraData": "'"$EXTRA_DATA"'",
"alloc": {'"$balance"'}
}'
echo $JSON_VAL | jq . > $chaindir/config/genesis.json
geth 2>/dev/null --datadir "$chaindir" init "$chaindir/config/genesis.json"
else
echo "Using local genesis file"
geth 2>/dev/null --datadir "$chaindir" init "./genesis.json"
fi
export ETH_RPC_URL=http://$RPC_ADDRESS:$RPC_PORT
port=$((RPC_PORT + 30000))
geth version
echo >&2 "testnet: RPC URL: $ETH_RPC_URL"
echo >&2 "testnet: DB ADDRESS: $DB_HOST"
echo >&2 "testnet: TCP port: $port"
echo >&2 "testnet: Chain ID: $CHAINID"
echo >&2 "testnet: Database: $chaindir"
echo >&2 "testnet: Geth log: $chaindir/geth.log"
printf "%s\n" "${address[@]}" > "$chaindir/config/account"
echo "$ETH_RPC_URL" > "$chaindir/config/rpc-url"
echo "$port" > "$chaindir/config/node-port"
set +m
# Uncomment below once waitforsync has been merged
# geth \
# 2> >(tee "$chaindir/geth.log" | grep --line-buffered Success | sed 's/^/geth: /' >&2) \
# --datadir "$chaindir" --networkid "$CHAINID" --port="$port" \
# --mine --miner.threads=1 --allow-insecure-unlock \
# --http --http.api "web3,eth,net,debug,personal,statediff" --http.corsdomain '*' --http.vhosts '*' --nodiscover \
# --http.addr="$RPC_ADDRESS" --http.port="$RPC_PORT" --syncmode=full --gcmode=archive \
# --statediff --statediff.db.host="$DB_HOST" --statediff.db.port="$DB_PORT" --statediff.db.user="$DB_USER" \
# --statediff.db.password="$DB_PASSWORD" --statediff.db.name="$DB_NAME" \
# --statediff.db.nodeid 1 --statediff.db.clientname test1 --statediff.writing="$DB_WRITE" \
# --statediff.db.type="$DB_TYPE" --statediff.db.driver="$DB_DRIVER" --statediff.waitforsync="$DB_WAIT_FOR_SYNC" \
# --ws --ws.addr="0.0.0.0" --unlock="$(IFS=,; echo "${address[*]}")" --password=<(exit) &
echo "Starting Geth with following flags"
geth \
2> >(tee "$chaindir/geth.log" | grep --line-buffered Success | sed 's/^/geth: /' >&2) \
--datadir "$chaindir" --networkid "$CHAINID" --port="$port" \
--mine --miner.threads=1 --allow-insecure-unlock \
--http --http.api "admin,debug,eth,miner,net,personal,txpool,web3,statediff" --http.corsdomain '*' --http.vhosts '*' --nodiscover \
--http.addr="$RPC_ADDRESS" --http.port="$RPC_PORT" --syncmode=full --gcmode=archive \
--statediff --statediff.db.host="$DB_HOST" --statediff.db.port="$DB_PORT" --statediff.db.user="$DB_USER" \
--statediff.db.password="$DB_PASSWORD" --statediff.db.name="$DB_NAME" \
--statediff.db.nodeid 1 --statediff.db.clientname test1 --statediff.writing="$DB_WRITE" \
--statediff.db.type="$DB_TYPE" --statediff.db.driver="$DB_DRIVER" \
--ws --ws.addr="0.0.0.0" --ws.origins '*' --ws.api=admin,debug,eth,miner,net,personal,txpool,web3 \
--nat=none --miner.gasprice 16000000000 --nat=none \
--unlock="$(IFS=,; echo "${address[*]}")" --password=<(exit) &
gethpid=$!
echo "Geth started"
echo "Geth PID: $gethpid"
clean() {
( set -x; kill -INT $gethpid; wait )
if [[ $SAVE ]]; then
echo >&2 "testnet: saving $gethdir/snapshots/$SAVE"
mkdir -p "$gethdir/snapshots/$SAVE"
cp -r "$chaindir/keystore" "$gethdir/snapshots/$SAVE"
cp -r "$chaindir/config" "$gethdir/snapshots/$SAVE"
geth >/dev/null 2>&1 --datadir "$chaindir" \
export "$gethdir/snapshots/$SAVE/backup"
fi
( set -x; rm -rf "$chaindir" )
}
trap clean EXIT
echo "Curling: $ETH_RPC_URL"
until curl -s "$ETH_RPC_URL"; do sleep 1; done
echo "Curling: $ETH_RPC_URL complete"
# UPDATE
#ETH_FROM=$(seth --rpc-url="$ETH_RPC_URL" rpc eth_coinbase)
#export ETH_FROM
export ETH_KEYSTORE=$chaindir/keystore
export ETH_PASSWORD=/dev/null
printf 'testnet: Account: %s (default)\n' "${address[0]}" >&2
[[ "${#address[@]}" -gt 1 ]] && printf 'testnet: Account: %s\n' "${address[@]:1}" >&2
echo "Geth Start up completed!"
while true; do sleep 3600; done

View File

@ -1,37 +0,0 @@
version: "3.2"
services:
foundry:
restart: unless-stopped
depends_on:
- ipld-eth-db
build: ./
environment:
DB_USER: vdbm
DB_NAME: vulcanize_testing
DB_HOST: ipld-eth-db
DB_PORT: 5432
DB_PASSWORD: password
DB_WRITE: "true"
DB_TYPE: postgres
DB_DRIVER: sqlx
DB_WAIT_FOR_SYNC: "true"
ports:
- "127.0.0.1:8545:8545"
- "127.0.0.1:8546:8546"
ipld-eth-db:
restart: always
image: vulcanize/ipld-eth-db:v3.0.6
environment:
POSTGRES_USER: "vdbm"
POSTGRES_DB: "vulcanize_testing"
POSTGRES_PASSWORD: "password"
volumes:
- vdb_db_eth_server:/var/lib/postgresql/data
ports:
- "127.0.0.1:8077:5432"
command: ["postgres", "-c", "log_statement=all"]
volumes:
vdb_db_eth_server:

View File

@ -1,50 +0,0 @@
#!/bin/bash
set -ex
# clean up
trap 'killall geth && rm -rf "$TMPDIR"' EXIT
trap "exit 1" SIGINT SIGTERM
TMPDIR=$(mktemp -d)
/bin/bash deploy-local-network.sh --rpc-addr 0.0.0.0 --chain-id 4 --db-user $DB_USER --db-password $DB_PASSWORD --db-name $DB_NAME \
--db-host $DB_HOST --db-port $DB_PORT --db-write $DB_WRITE --dir "$TMPDIR" --address $ADDRESS \
--db-type $DB_TYPE --db-driver $DB_DRIVER --db-waitforsync $DB_WAIT_FOR_SYNC --chain-id $CHAIN_ID &
echo "sleeping 90 sec"
# give it a few secs to start up
sleep 90
# Run tests
cd stateful
forge build
forge test --fork-url http://localhost:8545
# Deploy contracts
ETH_KEYSTORE_FILES=()
echo "ETH KEYSTORE: $TMPDIR/8545/keystore"
for entry in `ls $TMPDIR/8545/keystore`; do
ETH_KEYSTORE_FILES+=("${TMPDIR}/8545/keystore/${entry}")
done
echo "ETH_KEYSTORE_FILES: $ETH_KEYSTORE_FILES"
ETH_KEYSTORE_FILE=${ETH_KEYSTORE_FILES[0]}
if [ "${#ETH_KEYSTORE_FILES[@]}" -eq 1 ]; then
echo "Only one KEYSTORE"
else
echo "WARNING: More than one file in keystore: ${ETH_KEYSTORE_FILES}"
fi
DEPLOYED_ADDRESS=$(forge create --keystore $ETH_KEYSTORE_FILE --rpc-url http://127.0.0.1:8545 --constructor-args 1 --password "" --legacy /root/stateful/src/Stateful.sol:Stateful | grep "Deployed to:" | cut -d " " -f 3)
echo "Contract has been deployed to: $DEPLOYED_ADDRESS"
# Call a transaction
TX_OUT=$(cast send --keystore $ETH_KEYSTORE_FILE --rpc-url http://127.0.0.1:8545 --password "" --legacy $DEPLOYED_ADDRESS "off()")
echo "TX OUTPUT: $TX_OUT"
# Run forever
tail -f /dev/null

View File

@ -1,7 +0,0 @@
[default]
src = 'src'
out = 'out'
libs = ['lib']
remappings = ['ds-test/=lib/ds-test/src/']
# See more config options https://github.com/gakonst/foundry/tree/master/config

View File

@ -1,20 +0,0 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity 0.8.10;
contract Stateful {
uint x;
constructor(uint y) public {
x = y;
}
function off() public {
require(x == 1);
x = 0;
}
function on() public {
require(x == 0);
x = 1;
}
}

View File

@ -1,19 +0,0 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity 0.8.10;
import "ds-test/test.sol";
import {Stateful} from "../Stateful.sol";
contract StatefulTest is DSTest {
Stateful contractA;
//contractA A;
uint x;
function setUp() public {
x = 1;
contractA = new Stateful(x);
}
function testExample() public {
contractA.off();
}
}

View File

@ -1,7 +0,0 @@
#!/bin/bash
# This script will run everthing for you. Sit back and enjoy they show.
set -e
./compile-geth.sh
docker-compose up --build

BIN
geth-linux-amd64 Executable file

Binary file not shown.

View File

@ -0,0 +1,15 @@
# Overview
The folder will allow developers to clone/move related repositories to this directory. This will allow developers to locally work with these related repositories and `vulcanize/go-ethereum` in one place. This can make testing easier.
# Recommended Setup
## Moving Repositories
It is recommended that you move the following repositories under this folder. Keep the repository names!
- `vulcanize/foundry-tests`
- `vulcanize/hive`
- `vulcanize/ipld-eth-db`
## Symlinks

View File

@ -496,3 +496,7 @@ func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAnd
func (sdi *StateDiffIndexer) Close() error {
return sdi.dump.Close()
}
func (sdi *StateDiffIndexer) PushKnownGaps(startingBlockNumber *big.Int, endingBlockNumber *big.Int, checkedOut bool, processingKey int64) error {
return nil
}

View File

@ -478,3 +478,7 @@ func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAnd
func (sdi *StateDiffIndexer) Close() error {
return sdi.fileWriter.Close()
}
func (sdi *StateDiffIndexer) PushKnownGaps(startingBlockNumber *big.Int, endingBlockNumber *big.Int, checkedOut bool, processingKey int64) error {
return nil
}

View File

@ -553,3 +553,27 @@ func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAnd
func (sdi *StateDiffIndexer) Close() error {
return sdi.dbWriter.Close()
}
// CLEAN UP!!
func (sdi *StateDiffIndexer) PushKnownGaps(startingBlockNumber *big.Int, endingBlockNumber *big.Int, checkedOut bool, processingKey int64) error {
knownGap := models.KnownGapsModel{
StartingBlockNumber: startingBlockNumber.String(),
EndingBlockNumber: endingBlockNumber.String(),
CheckedOut: checkedOut,
ProcessingKey: processingKey,
}
tx, err := sdi.dbWriter.db.Begin(sdi.ctx)
if err != nil {
return err
}
if err := sdi.dbWriter.upsertKnownGaps(tx, knownGap); err != nil {
return err
}
if err := tx.Commit(sdi.ctx); err != nil {
return err
}
return nil
}

View File

@ -54,6 +54,7 @@ type Statements interface {
InsertStorageStm() string
InsertIPLDStm() string
InsertIPLDsStm() string
InsertKnownGapsStm() string
}
// Tx interface to accommodate different concrete SQL transaction types

View File

@ -75,12 +75,17 @@ func testPushBlockAndState(t *testing.T, block *types.Block, receipts types.Rece
})
}
func setup(t *testing.T, testBlock *types.Block, testReceipts types.Receipts) {
func setupDb(t *testing.T) (interfaces.StateDiffIndexer, error) {
db, err = postgres.SetupSQLXDB()
if err != nil {
t.Fatal(err)
}
ind, err = sql.NewStateDiffIndexer(context.Background(), chainConf, db)
return ind, err
}
func setup(t *testing.T, testBlock *types.Block, testReceipts types.Receipts) (interfaces.StateDiffIndexer, interfaces.Batch) {
ind, err = setupDb(t)
require.NoError(t, err)
var tx interfaces.Batch
tx, err = ind.PushBlock(
@ -100,6 +105,7 @@ func setup(t *testing.T, testBlock *types.Block, testReceipts types.Receipts) {
}
test_helpers.ExpectEqual(t, tx.(*sql.BatchTx).BlockNumber, testBlock.Number().Uint64())
return ind, tx
}
func tearDown(t *testing.T) {
@ -110,3 +116,24 @@ func tearDown(t *testing.T) {
err = ind.Close()
require.NoError(t, err)
}
func TestKnownGapsUpsert(t *testing.T) {
var blockNumber int64 = 111
startBlock := big.NewInt(blockNumber)
endBlock := big.NewInt(blockNumber + 10)
ind, err := setupDb(t)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
processGapError := ind.PushKnownGaps(startBlock, endBlock, false, 1)
if processGapError != nil {
t.Fatal(processGapError)
}
// Read data from the database!
// And compare
require.NoError(t, processGapError)
}

View File

@ -36,6 +36,7 @@ type DB struct {
}
// InsertHeaderStm satisfies the sql.Statements interface
// Stm == Statement
func (db *DB) InsertHeaderStm() string {
return `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
@ -99,3 +100,8 @@ func (db *DB) InsertIPLDStm() string {
func (db *DB) InsertIPLDsStm() string {
return `INSERT INTO public.blocks (key, data) VALUES (unnest($1::TEXT[]), unnest($2::BYTEA[])) ON CONFLICT (key) DO NOTHING`
}
func (db *DB) InsertKnownGapsStm() string {
return `INSERT INTO eth.known_gaps (starting_block_number, ending_block_number, checked_out, processing_key) VALUES ($1, $2, $3, $4)`
//return `INSERT INTO eth.known_gaps (starting_block_number, ending_block_number, checked_out, processing_key) VALUES (1, 2, true, 1)`
}

View File

@ -182,3 +182,15 @@ func (w *Writer) upsertStorageCID(tx Tx, storageCID models.StorageNodeModel) err
}
return nil
}
func (w *Writer) upsertKnownGaps(tx Tx, knownGaps models.KnownGapsModel) error {
res, err := tx.Exec(w.db.Context(), w.db.InsertKnownGapsStm(),
knownGaps.StartingBlockNumber, knownGaps.EndingBlockNumber, knownGaps.CheckedOut, knownGaps.ProcessingKey)
if err != nil {
return fmt.Errorf("error upserting known_gaps entry: %v", err)
}
ret, _ := res.RowsAffected()
fmt.Println("Res:", ret)
return nil
}

View File

@ -32,6 +32,7 @@ type StateDiffIndexer interface {
PushStateNode(tx Batch, stateNode sdtypes.StateNode, headerID string) error
PushCodeAndCodeHash(tx Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error
ReportDBMetrics(delay time.Duration, quit <-chan bool)
PushKnownGaps(startingBlockNumber *big.Int, endingBlockNumber *big.Int, checkedOut bool, processingKey int64) error
io.Closer
}

View File

@ -147,3 +147,11 @@ type LogsModel struct {
Topic2 string `db:"topic2"`
Topic3 string `db:"topic3"`
}
// KnownGaps is the data structure for eth.known_gaps
type KnownGapsModel struct {
StartingBlockNumber string `db:"starting_block_number"`
EndingBlockNumber string `db:"ending_block_number"`
CheckedOut bool `db:"checked_out"`
ProcessingKey int64 `db:"processing_key"`
}