* Write state diff to CSV (#2)
* port statediff from 9b7fd9af80/statediff/statediff.go
; minor fixes
* integrating state diff extracting, building, and persisting into geth processes
* work towards persisting created statediffs in ipfs; based off github.com/vulcanize/eth-block-extractor
* Add a state diff service
* Remove diff extractor from blockchain
* Update imports
* Move statediff on/off check to geth cmd config
* Update starting state diff service
* Add debugging logs for creating diff
* Add statediff extractor and builder tests and small refactoring
* Start to write statediff to a CSV
* Restructure statediff directory
* Pull CSV publishing methods into their own file
* Reformatting due to go fmt
* Add gomega to vendor dir
* Remove testing focuses
* Update statediff tests to use golang test pkg
instead of ginkgo
- builder_test
- extractor_test
- publisher_test
* Use hexutil.Encode instead of deprecated common.ToHex
* Remove OldValue from DiffBigInt and DiffUint64 fields
* Update builder test
* Remove old storage value from updated accounts
* Remove old values from created/deleted accounts
* Update publisher to account for only storing current account values
* Update service loop and fetching previous block
* Update testing
- remove statediff ginkgo test suite file
- move mocks to their own dir
* Updates per go fmt
* Updates to tests
* Pass statediff mode and path in through cli
* Return filename from publisher
* Remove some duplication in builder
* Remove code field from state diff output
this is the contract byte code, and it can still be obtained by querying
the db by the codeHash
* Consolidate acct diff structs for updated & updated/deleted accts
* Include block number in csv filename
* Clean up error logging
* Cleanup formatting, spelling, etc
* Address PR comments
* Add contract address and storage value to csv
* Refactor accumulating account row in csv publisher
* Add DiffStorage struct
* Add storage key to csv
* Address PR comments
* Fix publisher to include rows for accounts that don't have store updates
* Update builder test after merging in release/1.8
* Update test contract to include storage on contract intialization
- so that we're able to test that storage diffing works for created and
deleted accounts (not just updated accounts).
* Factor out a common trie iterator method in builder
* Apply goimports to statediff
* Apply gosimple changes to statediff
* Gracefully exit geth command(#4)
* Statediff for full node (#6)
* Open a trie from the in-memory database
* Use a node's LeafKey as an identifier instead of the address
It was proving difficult to find look the address up from a given path
with a full node (sometimes the value wouldn't exist in the disk db).
So, instead, for now we are using the node's LeafKey with is a Keccak256
hash of the address, so if we know the address we can figure out which
LeafKey it matches up to.
* Make sure that statediff has been processed before pruning
* Use blockchain stateCache.OpenTrie for storage diffs
* Clean up log lines and remove unnecessary fields from builder
* Apply go fmt changes
* Add a sleep to the blockchain test
* Address PR comments
* Address PR comments
* refactoring/reorganizing packages
* refactoring statediff builder and types and adjusted to relay proofs and paths (still need to make this optional)
* refactoring state diff service and adding api which allows for streaming state diff payloads over an rpc websocket subscription
* make proofs and paths optional + compress service loop into single for loop (may be missing something here)
* option to process intermediate nodes
* make state diff rlp serializable
* cli parameter to limit statediffing to select account addresses + test
* review fixes and fixes for issues ran into in integration
* review fixes; proper method signature for api; adjust service so that statediff processing is halted/paused until there is at least one subscriber listening for the results
* adjust buffering to improve stability; doc.go; fix notifier
err handling
* relay receipts with the rest of the data + review fixes/changes
* rpc method to get statediff at specific block; requires archival node or the block be within the pruning range
* review fixes
* fixes after rebase
* statediff verison meta
* fix linter issues
* include total difficulty to the payload
* fix state diff builder: emit actual leaf nodes instead of value nodes; diff on the leaf not on the value; emit correct path for intermediate nodes
* adjust statediff builder tests to changes and extend to test intermediate nodes; golint
* add genesis block to test; handle block 0 in StateDiffAt
* rlp files for mainnet blocks 0-3, for tests
* builder test on mainnet blocks
* common.BytesToHash(path) => crypto.Keaccak256(hash) in builder; BytesToHash produces same hash for e.g. []byte{} and []byte{\x00} - prefix \x00 steps are inconsequential to the hash result
* complete tests for early mainnet blocks
* diff type for representing deleted accounts
* fix builder so that we handle account deletions properly and properly diff storage when an account is moved to a new path; update params
* remove cli params; moving them to subscriber defined
* remove unneeded bc methods
* update service and api; statediffing params are now defined by user through api rather than by service provider by cli
* update top level tests
* add ability to watch specific storage slots (leaf keys) only
* comments; explain logic
* update mainnet blocks test
* update api_test.go
* storage leafkey filter test
* cleanup chain maker
* adjust chain maker for tests to add an empty account in block1 and switch to EIP-158 afterwards (now we just need to generate enough accounts until one causes the empty account to be touched and removed post-EIP-158 so we can simulate and test that process...); also added 2 new blocks where more contract storage is set and old slots are set to zero so they are removed so we can test that
* found an account whose creation causes the empty account to be moved to a new path; this should count as 'touching; the empty account and cause it to be removed according to eip-158... but it doesn't
* use new contract in unit tests that has self-destruct ability, so we can test eip-158 since simply moving an account to new path doesn't count as 'touchin' it
* handle storage deletions
* tests for eip-158 account removal and storage value deletions; there is one edge case left to test where we remove 1 account when only two exist such that the remaining account is moved up and replaces the root branch node
* finish testing known edge cases
* add endpoint to fetch all state and storage nodes at a given blockheight; useful for generating a recent atate cache/snapshot that we can diff forward from rather than needing to collect all diffs from genesis
* test for state trie builder
* minor changes/fixes
* update version meta
* if statediffing is on, lock tries in triedb until the statediffing service signals they are done using them
* update version meta
* fix mock blockchain; golint; bump patch
* increase maxRequestContentLength; bump patch
* log the sizes of the state objects we are sending
* CI build (#20)
* CI: run build on PR and on push to master
* CI: debug building geth
* CI: fix coping file
* CI: fix coping file v2
* CI: temporary upload file to release asset
* CI: get release upload_url by tag, upload asset to current relase
* CI: fix tag name
* fix ci build on statediff_at_anyblock-1.9.11 branch
* fix publishing assets in release
* bump version meta
* use context deadline for timeout in eth_call
* collect and emit codehash=>code mappings for state objects
* subscription endpoint for retrieving all the codehash=>code mappings that exist at provided height
* bump version meta
* Implement WriteStateDiffAt
* Writes state diffs directly to postgres
* Adds CLI flags to configure PG
* Refactors builder output with callbacks
* Copies refactored postgres handling code from ipld-eth-indexer
* rename PostgresCIDWriter.{index->upsert}*
* less ambiguous
* go.mod update
* rm unused
* cleanup
* output code & codehash iteratively
* had to rf some types for this
* prometheus metrics output
* duplicate recent eth-indexer changes
* migrations and metrics...
* [wip] prom.Init() here? another CLI flag?
* cleanup
* tidy & DRY
* statediff WriteLoop service + CLI flag
* [wip] update test mocks
* todo - do something meaningful to test write loop
* logging
* use geth log
* port tests to go testing
* drop ginkgo/gomega
* fix and cleanup tests
* fail before defer statement
* delete vendor/ dir
* unused
* bump version meta
* fixes after rebase onto 1.9.23
* bump version meta
* fix API registration
* bump version meta
* use golang 1.15.5 version (#34)
* bump version meta; add 0.0.11 branch to actions
* bump version meta; update github actions workflows
* statediff: refactor metrics
* Remove redundant statediff/indexer/prom tooling and use existing
prometheus integration.
* cleanup
* "indexer" namespace for metrics
* add reporting loop for db metrics
* doc
* metrics for statediff stats
* metrics namespace/subsystem = statediff/{indexer,service}
* statediff: use a worker pool (for direct writes)
* fix test
* fix chain event subscription
* log tweaks
* func name
* unused import
* intermediate chain event channel for metrics
* cleanup
* bump version meta
* update github actions; linting
* add poststate and status to receipt ipld indexes
* bump statediff version
* stateDiffFor endpoints for fetching or writing statediff object by blockhash; bump statediff version
457 lines
10 KiB
Go
457 lines
10 KiB
Go
// VulcanizeDB
|
|
// Copyright © 2019 Vulcanize
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Affero General Public License for more details.
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
package ipld
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
|
"github.com/ipfs/go-cid"
|
|
node "github.com/ipfs/go-ipld-format"
|
|
)
|
|
|
|
const (
|
|
extension = "extension"
|
|
leaf = "leaf"
|
|
branch = "branch"
|
|
)
|
|
|
|
// TrieNode is the general abstraction for
|
|
//ethereum IPLD trie nodes.
|
|
type TrieNode struct {
|
|
// leaf, extension or branch
|
|
nodeKind string
|
|
|
|
// If leaf or extension: [0] is key, [1] is val.
|
|
// If branch: [0] - [16] are children.
|
|
elements []interface{}
|
|
|
|
// IPLD block information
|
|
cid cid.Cid
|
|
rawdata []byte
|
|
}
|
|
|
|
/*
|
|
OUTPUT
|
|
*/
|
|
|
|
type trieNodeLeafDecoder func([]interface{}) ([]interface{}, error)
|
|
|
|
// decodeTrieNode returns a TrieNode object from an IPLD block's
|
|
// cid and rawdata.
|
|
func decodeTrieNode(c cid.Cid, b []byte,
|
|
leafDecoder trieNodeLeafDecoder) (*TrieNode, error) {
|
|
var (
|
|
i, decoded, elements []interface{}
|
|
nodeKind string
|
|
err error
|
|
)
|
|
|
|
if err = rlp.DecodeBytes(b, &i); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
codec := c.Type()
|
|
switch len(i) {
|
|
case 2:
|
|
nodeKind, decoded, err = decodeCompactKey(i)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if nodeKind == extension {
|
|
elements, err = parseTrieNodeExtension(decoded, codec)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
if nodeKind == leaf {
|
|
elements, err = leafDecoder(decoded)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
if nodeKind != extension && nodeKind != leaf {
|
|
return nil, fmt.Errorf("unexpected nodeKind returned from decoder")
|
|
}
|
|
case 17:
|
|
nodeKind = branch
|
|
elements, err = parseTrieNodeBranch(i, codec)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
default:
|
|
return nil, fmt.Errorf("unknown trie node type")
|
|
}
|
|
|
|
return &TrieNode{
|
|
nodeKind: nodeKind,
|
|
elements: elements,
|
|
rawdata: b,
|
|
cid: c,
|
|
}, nil
|
|
}
|
|
|
|
// decodeCompactKey takes a compact key, and returns its nodeKind and value.
|
|
func decodeCompactKey(i []interface{}) (string, []interface{}, error) {
|
|
first := i[0].([]byte)
|
|
last := i[1].([]byte)
|
|
|
|
switch first[0] / 16 {
|
|
case '\x00':
|
|
return extension, []interface{}{
|
|
nibbleToByte(first)[2:],
|
|
last,
|
|
}, nil
|
|
case '\x01':
|
|
return extension, []interface{}{
|
|
nibbleToByte(first)[1:],
|
|
last,
|
|
}, nil
|
|
case '\x02':
|
|
return leaf, []interface{}{
|
|
nibbleToByte(first)[2:],
|
|
last,
|
|
}, nil
|
|
case '\x03':
|
|
return leaf, []interface{}{
|
|
nibbleToByte(first)[1:],
|
|
last,
|
|
}, nil
|
|
default:
|
|
return "", nil, fmt.Errorf("unknown hex prefix")
|
|
}
|
|
}
|
|
|
|
// parseTrieNodeExtension helper improves readability
|
|
func parseTrieNodeExtension(i []interface{}, codec uint64) ([]interface{}, error) {
|
|
return []interface{}{
|
|
i[0].([]byte),
|
|
keccak256ToCid(codec, i[1].([]byte)),
|
|
}, nil
|
|
}
|
|
|
|
// parseTrieNodeBranch helper improves readability
|
|
func parseTrieNodeBranch(i []interface{}, codec uint64) ([]interface{}, error) {
|
|
var out []interface{}
|
|
|
|
for i, vi := range i {
|
|
v, ok := vi.([]byte)
|
|
// Sometimes this throws "panic: interface conversion: interface {} is []interface {}, not []uint8"
|
|
// Figure out why, and if it is okay to continue
|
|
if !ok {
|
|
return nil, fmt.Errorf("unable to decode branch node entry into []byte at position: %d value: %+v", i, vi)
|
|
}
|
|
|
|
switch len(v) {
|
|
case 0:
|
|
out = append(out, nil)
|
|
case 32:
|
|
out = append(out, keccak256ToCid(codec, v))
|
|
default:
|
|
return nil, fmt.Errorf("unrecognized object: %v", v)
|
|
}
|
|
}
|
|
|
|
return out, nil
|
|
}
|
|
|
|
/*
|
|
Node INTERFACE
|
|
*/
|
|
|
|
// Resolve resolves a path through this node, stopping at any link boundary
|
|
// and returning the object found as well as the remaining path to traverse
|
|
func (t *TrieNode) Resolve(p []string) (interface{}, []string, error) {
|
|
switch t.nodeKind {
|
|
case extension:
|
|
return t.resolveTrieNodeExtension(p)
|
|
case leaf:
|
|
return t.resolveTrieNodeLeaf(p)
|
|
case branch:
|
|
return t.resolveTrieNodeBranch(p)
|
|
default:
|
|
return nil, nil, fmt.Errorf("nodeKind case not implemented")
|
|
}
|
|
}
|
|
|
|
// Tree lists all paths within the object under 'path', and up to the given depth.
|
|
// To list the entire object (similar to `find .`) pass "" and -1
|
|
func (t *TrieNode) Tree(p string, depth int) []string {
|
|
if p != "" || depth == 0 {
|
|
return nil
|
|
}
|
|
|
|
var out []string
|
|
|
|
switch t.nodeKind {
|
|
case extension:
|
|
var val string
|
|
for _, e := range t.elements[0].([]byte) {
|
|
val += fmt.Sprintf("%x", e)
|
|
}
|
|
return []string{val}
|
|
case branch:
|
|
for i, elem := range t.elements {
|
|
if _, ok := elem.(*cid.Cid); ok {
|
|
out = append(out, fmt.Sprintf("%x", i))
|
|
}
|
|
}
|
|
return out
|
|
|
|
default:
|
|
return nil
|
|
}
|
|
}
|
|
|
|
// ResolveLink is a helper function that calls resolve and asserts the
|
|
// output is a link
|
|
func (t *TrieNode) ResolveLink(p []string) (*node.Link, []string, error) {
|
|
obj, rest, err := t.Resolve(p)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
lnk, ok := obj.(*node.Link)
|
|
if !ok {
|
|
return nil, nil, fmt.Errorf("was not a link")
|
|
}
|
|
|
|
return lnk, rest, nil
|
|
}
|
|
|
|
// Copy will go away. It is here to comply with the interface.
|
|
func (t *TrieNode) Copy() node.Node {
|
|
panic("dont use this yet")
|
|
}
|
|
|
|
// Links is a helper function that returns all links within this object
|
|
func (t *TrieNode) Links() []*node.Link {
|
|
var out []*node.Link
|
|
|
|
for _, i := range t.elements {
|
|
c, ok := i.(cid.Cid)
|
|
if ok {
|
|
out = append(out, &node.Link{Cid: c})
|
|
}
|
|
}
|
|
|
|
return out
|
|
}
|
|
|
|
// Stat will go away. It is here to comply with the interface.
|
|
func (t *TrieNode) Stat() (*node.NodeStat, error) {
|
|
return &node.NodeStat{}, nil
|
|
}
|
|
|
|
// Size will go away. It is here to comply with the interface.
|
|
func (t *TrieNode) Size() (uint64, error) {
|
|
return 0, nil
|
|
}
|
|
|
|
/*
|
|
TrieNode functions
|
|
*/
|
|
|
|
// MarshalJSON processes the transaction trie into readable JSON format.
|
|
func (t *TrieNode) MarshalJSON() ([]byte, error) {
|
|
var out map[string]interface{}
|
|
|
|
switch t.nodeKind {
|
|
case extension:
|
|
fallthrough
|
|
case leaf:
|
|
var hexPrefix string
|
|
for _, e := range t.elements[0].([]byte) {
|
|
hexPrefix += fmt.Sprintf("%x", e)
|
|
}
|
|
|
|
// if we got a byte we need to do this casting otherwise
|
|
// it will be marshaled to a base64 encoded value
|
|
if _, ok := t.elements[1].([]byte); ok {
|
|
var hexVal string
|
|
for _, e := range t.elements[1].([]byte) {
|
|
hexVal += fmt.Sprintf("%x", e)
|
|
}
|
|
|
|
t.elements[1] = hexVal
|
|
}
|
|
|
|
out = map[string]interface{}{
|
|
"type": t.nodeKind,
|
|
hexPrefix: t.elements[1],
|
|
}
|
|
|
|
case branch:
|
|
out = map[string]interface{}{
|
|
"type": branch,
|
|
"0": t.elements[0],
|
|
"1": t.elements[1],
|
|
"2": t.elements[2],
|
|
"3": t.elements[3],
|
|
"4": t.elements[4],
|
|
"5": t.elements[5],
|
|
"6": t.elements[6],
|
|
"7": t.elements[7],
|
|
"8": t.elements[8],
|
|
"9": t.elements[9],
|
|
"a": t.elements[10],
|
|
"b": t.elements[11],
|
|
"c": t.elements[12],
|
|
"d": t.elements[13],
|
|
"e": t.elements[14],
|
|
"f": t.elements[15],
|
|
}
|
|
default:
|
|
return nil, fmt.Errorf("nodeKind %s not supported", t.nodeKind)
|
|
}
|
|
|
|
return json.Marshal(out)
|
|
}
|
|
|
|
// nibbleToByte expands the nibbles of a byte slice into their own bytes.
|
|
func nibbleToByte(k []byte) []byte {
|
|
var out []byte
|
|
|
|
for _, b := range k {
|
|
out = append(out, b/16)
|
|
out = append(out, b%16)
|
|
}
|
|
|
|
return out
|
|
}
|
|
|
|
// Resolve reading conveniences
|
|
func (t *TrieNode) resolveTrieNodeExtension(p []string) (interface{}, []string, error) {
|
|
nibbles := t.elements[0].([]byte)
|
|
idx, rest := shiftFromPath(p, len(nibbles))
|
|
if len(idx) < len(nibbles) {
|
|
return nil, nil, fmt.Errorf("not enough nibbles to traverse this extension")
|
|
}
|
|
|
|
for _, i := range idx {
|
|
if getHexIndex(string(i)) == -1 {
|
|
return nil, nil, fmt.Errorf("invalid path element")
|
|
}
|
|
}
|
|
|
|
for i, n := range nibbles {
|
|
if string(idx[i]) != fmt.Sprintf("%x", n) {
|
|
return nil, nil, fmt.Errorf("no such link in this extension")
|
|
}
|
|
}
|
|
|
|
return &node.Link{Cid: t.elements[1].(cid.Cid)}, rest, nil
|
|
}
|
|
|
|
func (t *TrieNode) resolveTrieNodeLeaf(p []string) (interface{}, []string, error) {
|
|
nibbles := t.elements[0].([]byte)
|
|
|
|
if len(nibbles) != 0 {
|
|
idx, rest := shiftFromPath(p, len(nibbles))
|
|
if len(idx) < len(nibbles) {
|
|
return nil, nil, fmt.Errorf("not enough nibbles to traverse this leaf")
|
|
}
|
|
|
|
for _, i := range idx {
|
|
if getHexIndex(string(i)) == -1 {
|
|
return nil, nil, fmt.Errorf("invalid path element")
|
|
}
|
|
}
|
|
|
|
for i, n := range nibbles {
|
|
if string(idx[i]) != fmt.Sprintf("%x", n) {
|
|
return nil, nil, fmt.Errorf("no such link in this extension")
|
|
}
|
|
}
|
|
|
|
p = rest
|
|
}
|
|
|
|
link, ok := t.elements[1].(node.Node)
|
|
if !ok {
|
|
return nil, nil, fmt.Errorf("leaf children is not an IPLD node")
|
|
}
|
|
|
|
return link.Resolve(p)
|
|
}
|
|
|
|
func (t *TrieNode) resolveTrieNodeBranch(p []string) (interface{}, []string, error) {
|
|
idx, rest := shiftFromPath(p, 1)
|
|
hidx := getHexIndex(idx)
|
|
if hidx == -1 {
|
|
return nil, nil, fmt.Errorf("incorrect path")
|
|
}
|
|
|
|
child := t.elements[hidx]
|
|
if child != nil {
|
|
return &node.Link{Cid: child.(cid.Cid)}, rest, nil
|
|
}
|
|
return nil, nil, fmt.Errorf("no such link in this branch")
|
|
}
|
|
|
|
// shiftFromPath extracts from a given path (as a slice of strings)
|
|
// the given number of elements as a single string, returning whatever
|
|
// it has not taken.
|
|
//
|
|
// Examples:
|
|
// ["0", "a", "something"] and 1 -> "0" and ["a", "something"]
|
|
// ["ab", "c", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
|
|
// ["abc", "d", "1"] and 2 -> "ab" and ["c", "d", "1"]
|
|
func shiftFromPath(p []string, i int) (string, []string) {
|
|
var (
|
|
out string
|
|
rest []string
|
|
)
|
|
|
|
for _, pe := range p {
|
|
re := ""
|
|
for _, c := range pe {
|
|
if len(out) < i {
|
|
out += string(c)
|
|
} else {
|
|
re += string(c)
|
|
}
|
|
}
|
|
|
|
if len(out) == i && re != "" {
|
|
rest = append(rest, re)
|
|
}
|
|
}
|
|
|
|
return out, rest
|
|
}
|
|
|
|
// getHexIndex returns to you the integer 0 - 15 equivalent to your
|
|
// string character if applicable, or -1 otherwise.
|
|
func getHexIndex(s string) int {
|
|
if len(s) != 1 {
|
|
return -1
|
|
}
|
|
|
|
c := s[0]
|
|
switch {
|
|
case '0' <= c && c <= '9':
|
|
return int(c - '0')
|
|
case 'a' <= c && c <= 'f':
|
|
return int(c - 'a' + 10)
|
|
}
|
|
|
|
return -1
|
|
}
|