1162162c0a
* Write state diff to CSV (#2)
* port statediff from 9b7fd9af80/statediff/statediff.go
; minor fixes
* integrating state diff extracting, building, and persisting into geth processes
* work towards persisting created statediffs in ipfs; based off github.com/vulcanize/eth-block-extractor
* Add a state diff service
* Remove diff extractor from blockchain
* Update imports
* Move statediff on/off check to geth cmd config
* Update starting state diff service
* Add debugging logs for creating diff
* Add statediff extractor and builder tests and small refactoring
* Start to write statediff to a CSV
* Restructure statediff directory
* Pull CSV publishing methods into their own file
* Reformatting due to go fmt
* Add gomega to vendor dir
* Remove testing focuses
* Update statediff tests to use golang test pkg
instead of ginkgo
- builder_test
- extractor_test
- publisher_test
* Use hexutil.Encode instead of deprecated common.ToHex
* Remove OldValue from DiffBigInt and DiffUint64 fields
* Update builder test
* Remove old storage value from updated accounts
* Remove old values from created/deleted accounts
* Update publisher to account for only storing current account values
* Update service loop and fetching previous block
* Update testing
- remove statediff ginkgo test suite file
- move mocks to their own dir
* Updates per go fmt
* Updates to tests
* Pass statediff mode and path in through cli
* Return filename from publisher
* Remove some duplication in builder
* Remove code field from state diff output
this is the contract byte code, and it can still be obtained by querying
the db by the codeHash
* Consolidate acct diff structs for updated & updated/deleted accts
* Include block number in csv filename
* Clean up error logging
* Cleanup formatting, spelling, etc
* Address PR comments
* Add contract address and storage value to csv
* Refactor accumulating account row in csv publisher
* Add DiffStorage struct
* Add storage key to csv
* Address PR comments
* Fix publisher to include rows for accounts that don't have store updates
* Update builder test after merging in release/1.8
* Update test contract to include storage on contract intialization
- so that we're able to test that storage diffing works for created and
deleted accounts (not just updated accounts).
* Factor out a common trie iterator method in builder
* Apply goimports to statediff
* Apply gosimple changes to statediff
* Gracefully exit geth command(#4)
* Statediff for full node (#6)
* Open a trie from the in-memory database
* Use a node's LeafKey as an identifier instead of the address
It was proving difficult to find look the address up from a given path
with a full node (sometimes the value wouldn't exist in the disk db).
So, instead, for now we are using the node's LeafKey with is a Keccak256
hash of the address, so if we know the address we can figure out which
LeafKey it matches up to.
* Make sure that statediff has been processed before pruning
* Use blockchain stateCache.OpenTrie for storage diffs
* Clean up log lines and remove unnecessary fields from builder
* Apply go fmt changes
* Add a sleep to the blockchain test
* Address PR comments
* Address PR comments
* refactoring/reorganizing packages
* refactoring statediff builder and types and adjusted to relay proofs and paths (still need to make this optional)
* refactoring state diff service and adding api which allows for streaming state diff payloads over an rpc websocket subscription
* make proofs and paths optional + compress service loop into single for loop (may be missing something here)
* option to process intermediate nodes
* make state diff rlp serializable
* cli parameter to limit statediffing to select account addresses + test
* review fixes and fixes for issues ran into in integration
* review fixes; proper method signature for api; adjust service so that statediff processing is halted/paused until there is at least one subscriber listening for the results
* adjust buffering to improve stability; doc.go; fix notifier
err handling
* relay receipts with the rest of the data + review fixes/changes
* rpc method to get statediff at specific block; requires archival node or the block be within the pruning range
* review fixes
* fixes after rebase
* statediff verison meta
* fix linter issues
* include total difficulty to the payload
* fix state diff builder: emit actual leaf nodes instead of value nodes; diff on the leaf not on the value; emit correct path for intermediate nodes
* adjust statediff builder tests to changes and extend to test intermediate nodes; golint
* add genesis block to test; handle block 0 in StateDiffAt
* rlp files for mainnet blocks 0-3, for tests
* builder test on mainnet blocks
* common.BytesToHash(path) => crypto.Keaccak256(hash) in builder; BytesToHash produces same hash for e.g. []byte{} and []byte{\x00} - prefix \x00 steps are inconsequential to the hash result
* complete tests for early mainnet blocks
* diff type for representing deleted accounts
* fix builder so that we handle account deletions properly and properly diff storage when an account is moved to a new path; update params
* remove cli params; moving them to subscriber defined
* remove unneeded bc methods
* update service and api; statediffing params are now defined by user through api rather than by service provider by cli
* update top level tests
* add ability to watch specific storage slots (leaf keys) only
* comments; explain logic
* update mainnet blocks test
* update api_test.go
* storage leafkey filter test
* cleanup chain maker
* adjust chain maker for tests to add an empty account in block1 and switch to EIP-158 afterwards (now we just need to generate enough accounts until one causes the empty account to be touched and removed post-EIP-158 so we can simulate and test that process...); also added 2 new blocks where more contract storage is set and old slots are set to zero so they are removed so we can test that
* found an account whose creation causes the empty account to be moved to a new path; this should count as 'touching; the empty account and cause it to be removed according to eip-158... but it doesn't
* use new contract in unit tests that has self-destruct ability, so we can test eip-158 since simply moving an account to new path doesn't count as 'touchin' it
* handle storage deletions
* tests for eip-158 account removal and storage value deletions; there is one edge case left to test where we remove 1 account when only two exist such that the remaining account is moved up and replaces the root branch node
* finish testing known edge cases
* add endpoint to fetch all state and storage nodes at a given blockheight; useful for generating a recent atate cache/snapshot that we can diff forward from rather than needing to collect all diffs from genesis
* test for state trie builder
* minor changes/fixes
* update version meta
* if statediffing is on, lock tries in triedb until the statediffing service signals they are done using them
* update version meta
* fix mock blockchain; golint; bump patch
* increase maxRequestContentLength; bump patch
* log the sizes of the state objects we are sending
* CI build (#20)
* CI: run build on PR and on push to master
* CI: debug building geth
* CI: fix coping file
* CI: fix coping file v2
* CI: temporary upload file to release asset
* CI: get release upload_url by tag, upload asset to current relase
* CI: fix tag name
* fix ci build on statediff_at_anyblock-1.9.11 branch
* fix publishing assets in release
* bump version meta
* use context deadline for timeout in eth_call
* collect and emit codehash=>code mappings for state objects
* subscription endpoint for retrieving all the codehash=>code mappings that exist at provided height
* bump version meta
* Implement WriteStateDiffAt
* Writes state diffs directly to postgres
* Adds CLI flags to configure PG
* Refactors builder output with callbacks
* Copies refactored postgres handling code from ipld-eth-indexer
* rename PostgresCIDWriter.{index->upsert}*
* less ambiguous
* go.mod update
* rm unused
* cleanup
* output code & codehash iteratively
* had to rf some types for this
* prometheus metrics output
* duplicate recent eth-indexer changes
* migrations and metrics...
* [wip] prom.Init() here? another CLI flag?
* cleanup
* tidy & DRY
* statediff WriteLoop service + CLI flag
* [wip] update test mocks
* todo - do something meaningful to test write loop
* logging
* use geth log
* port tests to go testing
* drop ginkgo/gomega
* fix and cleanup tests
* fail before defer statement
* delete vendor/ dir
* unused
* bump version meta
* fixes after rebase onto 1.9.23
* bump version meta
* fix API registration
* bump version meta
* use golang 1.15.5 version (#34)
* bump version meta; add 0.0.11 branch to actions
* bump version meta; update github actions workflows
* statediff: refactor metrics
* Remove redundant statediff/indexer/prom tooling and use existing
prometheus integration.
* cleanup
* "indexer" namespace for metrics
* add reporting loop for db metrics
* doc
* metrics for statediff stats
* metrics namespace/subsystem = statediff/{indexer,service}
* statediff: use a worker pool (for direct writes)
* fix test
* fix chain event subscription
* log tweaks
* func name
* unused import
* intermediate chain event channel for metrics
* cleanup
* bump version meta
* update github actions; linting
* add poststate and status to receipt ipld indexes
* bump statediff version
* stateDiffFor endpoints for fetching or writing statediff object by blockhash; bump statediff version
* fixes after rebase on to v1.10.1
* update github actions and version meta; go fmt
* add leaf key to removed 'nodes'
* include Postgres migrations and schema
* service documentation
* touching up
158 lines
5.5 KiB
PL/PgSQL
158 lines
5.5 KiB
PL/PgSQL
-- +goose Up
|
|
-- +goose StatementBegin
|
|
-- returns if a storage node at the provided path was removed in the range > the provided height and <= the provided block hash
|
|
CREATE OR REPLACE FUNCTION was_storage_removed(path BYTEA, height BIGINT, hash VARCHAR(66)) RETURNS BOOLEAN
|
|
AS $$
|
|
SELECT exists(SELECT 1
|
|
FROM eth.storage_cids
|
|
INNER JOIN eth.state_cids ON (storage_cids.state_id = state_cids.id)
|
|
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
|
WHERE storage_path = path
|
|
AND block_number > height
|
|
AND block_number <= (SELECT block_number
|
|
FROM eth.header_cids
|
|
WHERE block_hash = hash)
|
|
AND storage_cids.node_type = 3
|
|
LIMIT 1);
|
|
$$ LANGUAGE SQL;
|
|
-- +goose StatementEnd
|
|
|
|
-- +goose StatementBegin
|
|
-- returns if a state node at the provided path was removed in the range > the provided height and <= the provided block hash
|
|
CREATE OR REPLACE FUNCTION was_state_removed(path BYTEA, height BIGINT, hash VARCHAR(66)) RETURNS BOOLEAN
|
|
AS $$
|
|
SELECT exists(SELECT 1
|
|
FROM eth.state_cids
|
|
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
|
WHERE state_path = path
|
|
AND block_number > height
|
|
AND block_number <= (SELECT block_number
|
|
FROM eth.header_cids
|
|
WHERE block_hash = hash)
|
|
AND state_cids.node_type = 3
|
|
LIMIT 1);
|
|
$$ LANGUAGE SQL;
|
|
-- +goose StatementEnd
|
|
|
|
-- +goose StatementBegin
|
|
CREATE TYPE child_result AS (
|
|
has_child BOOLEAN,
|
|
children eth.header_cids[]
|
|
);
|
|
|
|
CREATE OR REPLACE FUNCTION has_child(hash VARCHAR(66), height BIGINT) RETURNS child_result AS
|
|
$BODY$
|
|
DECLARE
|
|
child_height INT;
|
|
temp_child eth.header_cids;
|
|
new_child_result child_result;
|
|
BEGIN
|
|
child_height = height + 1;
|
|
-- short circuit if there are no children
|
|
SELECT exists(SELECT 1
|
|
FROM eth.header_cids
|
|
WHERE parent_hash = hash
|
|
AND block_number = child_height
|
|
LIMIT 1)
|
|
INTO new_child_result.has_child;
|
|
-- collect all the children for this header
|
|
IF new_child_result.has_child THEN
|
|
FOR temp_child IN
|
|
SELECT * FROM eth.header_cids WHERE parent_hash = hash AND block_number = child_height
|
|
LOOP
|
|
new_child_result.children = array_append(new_child_result.children, temp_child);
|
|
END LOOP;
|
|
END IF;
|
|
RETURN new_child_result;
|
|
END
|
|
$BODY$
|
|
LANGUAGE 'plpgsql';
|
|
-- +goose StatementEnd
|
|
|
|
-- +goose StatementBegin
|
|
CREATE OR REPLACE FUNCTION canonical_header_from_array(headers eth.header_cids[]) RETURNS eth.header_cids AS
|
|
$BODY$
|
|
DECLARE
|
|
canonical_header eth.header_cids;
|
|
canonical_child eth.header_cids;
|
|
header eth.header_cids;
|
|
current_child_result child_result;
|
|
child_headers eth.header_cids[];
|
|
current_header_with_child eth.header_cids;
|
|
has_children_count INT DEFAULT 0;
|
|
BEGIN
|
|
-- for each header in the provided set
|
|
FOREACH header IN ARRAY headers
|
|
LOOP
|
|
-- check if it has any children
|
|
current_child_result = has_child(header.block_hash, header.block_number);
|
|
IF current_child_result.has_child THEN
|
|
-- if it does, take note
|
|
has_children_count = has_children_count + 1;
|
|
current_header_with_child = header;
|
|
-- and add the children to the growing set of child headers
|
|
child_headers = array_cat(child_headers, current_child_result.children);
|
|
END IF;
|
|
END LOOP;
|
|
-- if none of the headers had children, none is more canonical than the other
|
|
IF has_children_count = 0 THEN
|
|
-- return the first one selected
|
|
SELECT * INTO canonical_header FROM unnest(headers) LIMIT 1;
|
|
-- if only one header had children, it can be considered the heaviest/canonical header of the set
|
|
ELSIF has_children_count = 1 THEN
|
|
-- return the only header with a child
|
|
canonical_header = current_header_with_child;
|
|
-- if there are multiple headers with children
|
|
ELSE
|
|
-- find the canonical header from the child set
|
|
canonical_child = canonical_header_from_array(child_headers);
|
|
-- the header that is parent to this header, is the canonical header at this level
|
|
SELECT * INTO canonical_header FROM unnest(headers)
|
|
WHERE block_hash = canonical_child.parent_hash;
|
|
END IF;
|
|
RETURN canonical_header;
|
|
END
|
|
$BODY$
|
|
LANGUAGE 'plpgsql';
|
|
-- +goose StatementEnd
|
|
|
|
-- +goose StatementBegin
|
|
CREATE OR REPLACE FUNCTION canonical_header_id(height BIGINT) RETURNS INTEGER AS
|
|
$BODY$
|
|
DECLARE
|
|
canonical_header eth.header_cids;
|
|
headers eth.header_cids[];
|
|
header_count INT;
|
|
temp_header eth.header_cids;
|
|
BEGIN
|
|
-- collect all headers at this height
|
|
FOR temp_header IN
|
|
SELECT * FROM eth.header_cids WHERE block_number = height
|
|
LOOP
|
|
headers = array_append(headers, temp_header);
|
|
END LOOP;
|
|
-- count the number of headers collected
|
|
header_count = array_length(headers, 1);
|
|
-- if we have less than 1 header, return NULL
|
|
IF header_count IS NULL OR header_count < 1 THEN
|
|
RETURN NULL;
|
|
-- if we have one header, return its id
|
|
ELSIF header_count = 1 THEN
|
|
RETURN headers[1].id;
|
|
-- if we have multiple headers we need to determine which one is canonical
|
|
ELSE
|
|
canonical_header = canonical_header_from_array(headers);
|
|
RETURN canonical_header.id;
|
|
END IF;
|
|
END;
|
|
$BODY$
|
|
LANGUAGE 'plpgsql';
|
|
-- +goose StatementEnd
|
|
|
|
-- +goose Down
|
|
DROP FUNCTION was_storage_removed;
|
|
DROP FUNCTION was_state_removed;
|
|
DROP FUNCTION canonical_header_id;
|
|
DROP FUNCTION canonical_header_from_array;
|
|
DROP FUNCTION has_child;
|
|
DROP TYPE child_result; |