Fix SQL query to check for empty storage value.

This commit is contained in:
Arijit Das 2021-09-24 15:34:09 +05:30
parent f09f665b11
commit 500bba43b4
7 changed files with 25 additions and 29 deletions

View File

@ -1,18 +1,26 @@
-- +goose Up
-- +goose StatementBegin
-- returns if a storage node at the provided path was removed in the range >= the provided height and <= the provided block hash
CREATE OR REPLACE FUNCTION was_state_leaf_removed(state_leaf_key BYTEA, block_num BIGINT) RETURNS BOOLEAN
-- returns if a state leaf node was removed within the provided block number
CREATE OR REPLACE FUNCTION was_state_leaf_removed(key character varying, hash character varying) RETURNS boolean
LANGUAGE plpgsql
AS $$
SELECT exists(SELECT 1
DECLARE
rec RECORD;
BEGIN
FOR rec IN SELECT state_cids.node_type
FROM eth.state_cids
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
WHERE state_leaf_key = state_leaf_key
AND block_number <= block_num
AND state_cids.node_type = 3
LIMIT 1);
$$ LANGUAGE SQL;
WHERE state_leaf_key = key
AND block_number <= (SELECT block_number FROM eth.header_cids WHERE block_hash = hash)
ORDER BY state_cids.id DESC LIMIT 1
LOOP
IF rec.node_type = 3 THEN
RETURN TRUE;
END IF;
END LOOP;
RETURN FALSE;
END;
$$;
-- +goose StatementEnd
-- +goose StatementBegin

View File

@ -20,7 +20,7 @@ services:
restart: on-failure
depends_on:
- db
image: vulcanize/statediff-migrations:v0.7.0
image: vulcanize/statediff-migrations:0.7.0
environment:
DATABASE_USER: vdbm
DATABASE_NAME: vulcanize_public
@ -39,7 +39,6 @@ services:
- vdb_db_eth_server:/var/lib/postgresql/data
ports:
- "127.0.0.1:8077:5432"
command: ["postgres", "-c", "log_statement=all"]
eth-server:
restart: unless-stopped

View File

@ -127,7 +127,7 @@ const (
AND block_number <= $2
ORDER BY block_number DESC
LIMIT 1`
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed(state_leaf_key, block_number) AS state_leaf_removed
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
FROM eth.storage_cids
INNER JOIN eth.state_cids ON (storage_cids.state_id = state_cids.id)
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
@ -137,7 +137,7 @@ const (
AND block_number <= $3
ORDER BY block_number DESC
LIMIT 1`
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed(state_leaf_key, block_number) AS state_leaf_removed
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
FROM eth.storage_cids
INNER JOIN eth.state_cids ON (storage_cids.state_id = state_cids.id)
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)

View File

@ -7,7 +7,7 @@ docker-compose down --remove-orphans --volumes
# Build and start the containers.
# Note: Build only if `ipld-eth-server` or other container code is modified. Otherwise comment this line.
docker-compose -f docker-compose.test.yml -f docker-compose.yml build eth-server
docker-compose -f docker-compose.test.yml -f docker-compose.yml up -d db dapptools contract eth-server 2>&1 | tee docker.logs
docker-compose -f docker-compose.test.yml -f docker-compose.yml up -d db dapptools contract eth-server
export PGPASSWORD=password
export DATABASE_USER=vdbm

View File

@ -15,8 +15,6 @@ fastify.get('/v1/deployContract', async (req, reply) => {
const token = await GLDToken.deploy();
await token.deployed();
console.log(`Deployed block ${token.deployTransaction.blockNumber}`)
return {
address: token.address,
txHash: token.deployTransaction.hash,
@ -33,7 +31,6 @@ fastify.get('/v1/destroyContract', async (req, reply) => {
await token.destroy();
const blockNum = await hre.ethers.provider.getBlockNumber()
console.log(`Destroyed block ${blockNum}`)
return {
blockNumber: blockNum,

View File

@ -54,7 +54,6 @@ func DestroyContract(addr string) (*ContractDestroyed, error) {
}
defer res.Body.Close()
fmt.Println(res.Body)
var data ContractDestroyed
decoder := json.NewDecoder(res.Body)

View File

@ -2,7 +2,6 @@ package integration_test
import (
"context"
"fmt"
"math/big"
"time"
@ -318,8 +317,6 @@ var _ = Describe("Integration test", func() {
contract, contractErr = integration.DeployContract()
erc20TotalSupply, bigIntResult = new(big.Int).SetString("1000000000000000000000", 10)
fmt.Printf("Deployed address: %d\n", contract.BlockNumber)
time.Sleep(sleepInterval)
})
@ -396,7 +393,7 @@ var _ = Describe("Integration test", func() {
Expect(gethStorage).To(Equal(ipldStorage))
})
FIt("get storage after self destruct", func() {
It("get storage after self destruct", func() {
totalSupplyIndex := "0x2"
tx, err := integration.DestroyContract(contract.Address)
@ -404,10 +401,6 @@ var _ = Describe("Integration test", func() {
time.Sleep(sleepInterval)
fmt.Printf("Destroyed address: %d\n", tx.BlockNumber)
fmt.Printf("Contract Address: %s \n", contract.Address)
gethStorage1, err := gethClient.StorageAt(ctx, common.HexToAddress(contract.Address), common.HexToHash(totalSupplyIndex), big.NewInt(tx.BlockNumber-1))
Expect(err).ToNot(HaveOccurred())
gethStorage2, err := gethClient.StorageAt(ctx, common.HexToAddress(contract.Address), common.HexToHash(totalSupplyIndex), big.NewInt(tx.BlockNumber))