First pass at decoded packed storage slots
This commit is contained in:
parent
af6190ea09
commit
b0fff9a1dd
@ -33,6 +33,8 @@ func Decode(row StorageDiffRow, metadata StorageValueMetadata) (interface{}, err
|
||||
return decodeAddress(row.StorageValue.Bytes()), nil
|
||||
case Bytes32:
|
||||
return row.StorageValue.Hex(), nil
|
||||
case PackedSlot:
|
||||
return decodePackedSlot(row.StorageValue.Bytes(), metadata.PackedTypes), nil
|
||||
default:
|
||||
panic(fmt.Sprintf("can't decode unknown type: %d", metadata.Type))
|
||||
}
|
||||
@ -51,3 +53,41 @@ func decodeUint48(raw []byte) string {
|
||||
func decodeAddress(raw []byte) string {
|
||||
return common.BytesToAddress(raw).Hex()
|
||||
}
|
||||
|
||||
//this may need to return a slice of strings, a string for each item
|
||||
func decodePackedSlot(raw []byte, packedTypes map[int]ValueType) []string{
|
||||
storageSlot := raw
|
||||
var results []string
|
||||
//the reason we're using a map and not a slice is that golang doesn't guarantee the order of a slice
|
||||
for _, valueType := range packedTypes {
|
||||
lengthOfStorageSlot := len(storageSlot)
|
||||
lengthOfItem := getLengthOfItem(valueType)
|
||||
itemStartingIndex := lengthOfStorageSlot - lengthOfItem
|
||||
value := storageSlot[itemStartingIndex:]
|
||||
decodedValue := decodeIndividualItems(value, valueType)
|
||||
results = append(results, decodedValue)
|
||||
|
||||
//pop last item off slot before moving on
|
||||
storageSlot = storageSlot[0:itemStartingIndex]
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func decodeIndividualItems(itemBytes []byte, valueType ValueType) string {
|
||||
switch valueType {
|
||||
case Uint48:
|
||||
return decodeUint48(itemBytes)
|
||||
default:
|
||||
panic(fmt.Sprintf("can't decode unknown type: %d", valueType))
|
||||
}
|
||||
}
|
||||
|
||||
func getLengthOfItem(valueType ValueType) int{
|
||||
switch valueType {
|
||||
case Uint48:
|
||||
return 6
|
||||
default:
|
||||
panic(fmt.Sprintf("ValueType %d not recognized", valueType))
|
||||
}
|
||||
}
|
||||
|
@ -49,6 +49,31 @@ var _ = Describe("Storage decoder", func() {
|
||||
Expect(result).To(Equal(big.NewInt(0).SetBytes(fakeInt.Bytes()).String()))
|
||||
})
|
||||
|
||||
Describe("when there are multiple items packed in the storage slot", func() {
|
||||
It("decodes the first uint48 item packed in", func() {
|
||||
packedStorage := common.HexToHash("000000000000000000000000000000000000000000000002a300000000002a30")
|
||||
row := utils.StorageDiffRow{StorageValue: packedStorage}
|
||||
packedTypes := map[int]utils.ValueType{}
|
||||
packedTypes[0] = utils.Uint48
|
||||
packedTypes[1] = utils.Uint48
|
||||
|
||||
metadata := utils.StorageValueMetadata{
|
||||
Type: utils.PackedSlot,
|
||||
PackedTypes: packedTypes,
|
||||
}
|
||||
|
||||
result, err := utils.Decode(row, metadata)
|
||||
decodedValues := result.([]string)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectedResult1 := big.NewInt(0).SetBytes(common.HexToHash("2a30").Bytes()).String()
|
||||
expectedResult2 := big.NewInt(0).SetBytes(common.HexToHash("2a300").Bytes()).String()
|
||||
Expect(decodedValues[0]).To(Equal(expectedResult1))
|
||||
Expect(decodedValues[1]).To(Equal(expectedResult2))
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
It("decodes address", func() {
|
||||
fakeAddress := common.HexToAddress("0x12345")
|
||||
row := utils.StorageDiffRow{StorageValue: fakeAddress.Hash()}
|
||||
|
@ -23,6 +23,7 @@ const (
|
||||
Uint48
|
||||
Bytes32
|
||||
Address
|
||||
PackedSlot
|
||||
)
|
||||
|
||||
type Key string
|
||||
@ -31,6 +32,7 @@ type StorageValueMetadata struct {
|
||||
Name string
|
||||
Keys map[Key]string
|
||||
Type ValueType
|
||||
PackedTypes map[int]ValueType //type of each item packed and their order
|
||||
}
|
||||
|
||||
func GetStorageValueMetadata(name string, keys map[Key]string, t ValueType) StorageValueMetadata {
|
||||
|
@ -61,4 +61,3 @@ func RetrieveAndUpdateHeaders(blockChain core.BlockChain, headerRepository datas
|
||||
}
|
||||
return len(blockNumbers), nil
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user