Fix the helper function to create a slice of required paths

This commit is contained in:
Prathamesh Musale 2022-12-13 14:42:05 +05:30
parent 7cf38e742a
commit bc408ec680
2 changed files with 40 additions and 18 deletions

View File

@ -74,26 +74,37 @@ func getPaths(path string, depth int) ([]byte, [][]byte, [][]byte, error) {
return headPath, stemPaths, slicePaths, nil
}
// iterative function to generate the set of slice paths
// An iterative function to generate the set of slice paths
func makeSlicePaths(path []byte, depth int, slicePaths *[][]byte) {
depth-- // decrement the depth
nextPaths := make([][]byte, 16) // slice to hold the next 16 paths
for i, step := range pathSteps { // iterate through steps
nextPath := append(path, step) // create next paths by adding steps to current path
nextPaths[i] = nextPath
newSlicePaths := append(*slicePaths, nextPath) // add next paths to the collection of all slice paths
slicePaths = &newSlicePaths
}
if depth == 0 { // if depth has reach 0, return
// return if depth has reached 0
if depth <= 0 {
return
}
for _, nextPath := range nextPaths { // if not, then we iterate over the next paths
makeSlicePaths(nextPath, depth, slicePaths) // and repeat the process for each one
depth--
// slice to hold the next 16 paths
nextPaths := make([][]byte, 0, 16)
for _, step := range pathSteps {
// create next paths by adding steps to current path
nextPath := make([]byte, len(path))
copy(nextPath, path)
nextPath = append(nextPath, step)
nextPaths = append(nextPaths, nextPath)
// also add the next path to the collection of all slice paths
dst := make([]byte, len(nextPath))
copy(dst, nextPath)
*slicePaths = append(*slicePaths, dst)
}
// iterate over the next paths to repeat the process if not
for _, nextPath := range nextPaths {
makeSlicePaths(nextPath, depth, slicePaths)
}
}
// use to return timestamp in milliseconds
// Timestamp in milliseconds
func makeTimestamp() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}

View File

@ -17,6 +17,7 @@
package eth
import (
"database/sql"
"fmt"
"strconv"
@ -239,7 +240,7 @@ const (
)
WHERE tx_hash = $1
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
RetrieveStateByPathAndBlockNumberPgStr = `SELECT state_cids.cid, data
RetrieveStateByPathAndBlockNumberPgStr = `SELECT cid, data, node_type
FROM eth.state_cids
INNER JOIN public.blocks ON (
state_cids.mh_key = blocks.key
@ -250,7 +251,7 @@ const (
AND node_type != 3
ORDER BY state_cids.block_number DESC
LIMIT 1`
RetrieveStorageByStateLeafKeyAndPathAndBlockNumberPgStr = `SELECT storage_cids.cid, data
RetrieveStorageByStateLeafKeyAndPathAndBlockNumberPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type
FROM eth.storage_cids
INNER JOIN eth.state_cids ON (
storage_cids.state_path = state_cids.state_path
@ -765,9 +766,14 @@ func (r *IPLDRetriever) RetrieveStatesByPathsAndBlockNumber(tx *sqlx.Tx, paths [
// TODO: fetch all nodes in a single query
for _, path := range paths {
// Create a result object, select: cid, data
// Create a result object, select: cid, data, node_type
res := new(nodeInfo)
if err := tx.Get(res, RetrieveStateByPathAndBlockNumberPgStr, path, number); err != nil {
// we will not find a node for each path
if err == sql.ErrNoRows {
continue
}
return nil, nil, nil, nil, 0, err
}
@ -805,9 +811,14 @@ func (r *IPLDRetriever) RetrieveStorageByStateLeafKeyAndPathsAndBlockNumber(tx *
// TODO: fetch all nodes in a single query
for _, path := range paths {
// Create a result object, select: cid, data
// Create a result object, select: cid, data, node_type
res := new(nodeInfo)
if err := tx.Get(res, RetrieveStorageByStateLeafKeyAndPathAndBlockNumberPgStr, stateLeafKey, path, number); err != nil {
// we will not find a node for each path
if err == sql.ErrNoRows {
continue
}
return nil, nil, nil, nil, 0, err
}