Merge remote-tracking branch 'upstream/master' into statediff-additions
This commit is contained in:
commit
2bd23e1f3c
@ -121,6 +121,7 @@ type StateDB interface {
|
|||||||
|
|
||||||
type RWStateDB interface {
|
type RWStateDB interface {
|
||||||
StateDB
|
StateDB
|
||||||
|
AddBalance(add Address, amount *big.Int)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ScopeContext interface {
|
type ScopeContext interface {
|
||||||
|
@ -14,7 +14,7 @@ Flags
|
|||||||
|
|
||||||
* **Name:** Flags
|
* **Name:** Flags
|
||||||
* **Type:** `flag.FlagSet`_
|
* **Type:** `flag.FlagSet`_
|
||||||
* **Behavior:** This FlagSet will be parsed and your plugin will be able to access the resulting flags. Flags will be passed to Geth from the command line and are intended to of the plugin. Note that if any flags are provided, certain checks are disabled within Geth to avoid failing due to unexpected flags.
|
* **Behavior:** This FlagSet will be parsed and your plugin will be able to access the resulting flags. Flags will be passed to Geth from the command line and are intended to configure the behavior of the plugin. Passed flags must follow ``--`` to be parsed by this FlagSet, which is necessary to avoid Geth failing due to unexpected flags.
|
||||||
|
|
||||||
Subcommands
|
Subcommands
|
||||||
-----------
|
-----------
|
||||||
@ -356,4 +356,4 @@ logging based on the interfaces of `Log15 <https://github.com/inconshreveable/lo
|
|||||||
.. _PluGeth-Utils: https://github.com/openrelayxyz/plugeth-utils
|
.. _PluGeth-Utils: https://github.com/openrelayxyz/plugeth-utils
|
||||||
.. _*cli.Context: https://pkg.go.dev/github.com/urfave/cli#Context
|
.. _*cli.Context: https://pkg.go.dev/github.com/urfave/cli#Context
|
||||||
.. _flag.FlagSet: https://pkg.go.dev/flag#FlagSet
|
.. _flag.FlagSet: https://pkg.go.dev/flag#FlagSet
|
||||||
.. _Native Plugin System: https://pkg.go.dev/plugin
|
.. _Native Plugin System: https://pkg.go.dev/plugin
|
||||||
|
@ -15,7 +15,7 @@ These plugins provide new json rpc methods to access several objects containing
|
|||||||
Subcommand
|
Subcommand
|
||||||
------------
|
------------
|
||||||
|
|
||||||
A subcommand redifines the total behavior of Geth and could stand on its own. In contrast with the other plugin types which, in general, are meant to capture and manipulate information, a subcommand is meant to change the overall behavior of Geth. It may do this in order to capture information but the primary fuctionality is a modulation of geth behaviour.
|
A subcommand redefines the total behavior of Geth and could stand on its own. In contrast with the other plugin types which, in general, are meant to capture and manipulate information, a subcommand is meant to change the overall behavior of Geth. It may do this in order to capture information but the primary fuctionality is a modulation of geth behaviour.
|
||||||
|
|
||||||
Tracers
|
Tracers
|
||||||
-------
|
-------
|
||||||
|
28
go.mod
28
go.mod
@ -4,20 +4,32 @@ go 1.19
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0
|
||||||
|
github.com/consensys/gnark-crypto v0.12.1
|
||||||
|
github.com/crate-crypto/go-kzg-4844 v0.7.0
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa
|
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4
|
||||||
github.com/holiman/uint256 v1.2.0
|
github.com/google/gofuzz v1.2.0
|
||||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e
|
github.com/holiman/uint256 v1.2.4
|
||||||
|
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267
|
||||||
github.com/kylelemons/godebug v1.1.0
|
github.com/kylelemons/godebug v1.1.0
|
||||||
github.com/stretchr/testify v1.7.2
|
github.com/openrelayxyz/cardinal-types v1.1.1
|
||||||
golang.org/x/crypto v0.1.0
|
github.com/stretchr/testify v1.8.4
|
||||||
golang.org/x/sys v0.5.0
|
golang.org/x/crypto v0.17.0
|
||||||
|
golang.org/x/sys v0.15.0
|
||||||
|
golang.org/x/tools v0.15.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/bits-and-blooms/bitset v1.10.0 // indirect
|
||||||
|
github.com/consensys/bavard v0.1.13 // indirect
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||||
github.com/kr/pretty v0.1.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||||
|
github.com/supranational/blst v0.3.11 // indirect
|
||||||
|
golang.org/x/mod v0.14.0 // indirect
|
||||||
|
golang.org/x/sync v0.5.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
rsc.io/tmplfunc v0.0.3 // indirect
|
||||||
)
|
)
|
||||||
|
94
go.sum
94
go.sum
@ -1,6 +1,16 @@
|
|||||||
|
github.com/aws/aws-sdk-go v1.44.36/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||||
|
github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88=
|
||||||
|
github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||||
|
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||||
|
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||||
|
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||||
|
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||||
|
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
|
||||||
|
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||||
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
@ -8,36 +18,70 @@ github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK
|
|||||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||||
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64=
|
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
|
||||||
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
|
||||||
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
|
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||||
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
|
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/inconshreveable/log15 v0.0.0-20201112154412-8562bdadbbac/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E=
|
||||||
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
|
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||||
|
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||||
|
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||||
|
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||||
|
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||||
|
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
|
||||||
|
github.com/openrelayxyz/cardinal-types v1.1.1 h1:Lw6Lr/eiHYCnLi851rciCzw/1S3UytUX7kj5zh3QS/Y=
|
||||||
|
github.com/openrelayxyz/cardinal-types v1.1.1/go.mod h1:8aaMg6i94V0hhWe3V6Fzc0RSggMx+/Kabsf5o7wMf/E=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/pubnub/go-metrics-statsd v0.0.0-20170124014003-7da61f429d6b/go.mod h1:5UoZ1X6PWZWpPxwpR8qZ/qTN2BXIrrYTV9j+6TaQngA=
|
||||||
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
|
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||||
|
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||||
|
github.com/savaki/cloudmetrics v0.0.0-20160314183336-c82bfea3c09e/go.mod h1:KzTM/+pS9NbNPoC7/EBZq77Za7His7hp1NJhA0DrMns=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
|
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
|
||||||
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
golang.org/x/net v0.0.0-20220615171555-694bf12d69de/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||||
|
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
|
||||||
|
golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
||||||
|
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
|
||||||
|
110
restricted/crypto/kzg4844/kzg4844.go
Normal file
110
restricted/crypto/kzg4844/kzg4844.go
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package kzg4844 implements the KZG crypto for EIP-4844.
|
||||||
|
package kzg4844
|
||||||
|
|
||||||
|
import (
|
||||||
|
"embed"
|
||||||
|
"errors"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed trusted_setup.json
|
||||||
|
var content embed.FS
|
||||||
|
|
||||||
|
// Blob represents a 4844 data blob.
|
||||||
|
type Blob [131072]byte
|
||||||
|
|
||||||
|
// Commitment is a serialized commitment to a polynomial.
|
||||||
|
type Commitment [48]byte
|
||||||
|
|
||||||
|
// Proof is a serialized commitment to the quotient polynomial.
|
||||||
|
type Proof [48]byte
|
||||||
|
|
||||||
|
// Point is a BLS field element.
|
||||||
|
type Point [32]byte
|
||||||
|
|
||||||
|
// Claim is a claimed evaluation value in a specific point.
|
||||||
|
type Claim [32]byte
|
||||||
|
|
||||||
|
// useCKZG controls whether the cryptography should use the Go or C backend.
|
||||||
|
var useCKZG atomic.Bool
|
||||||
|
|
||||||
|
// UseCKZG can be called to switch the default Go implementation of KZG to the C
|
||||||
|
// library if fo some reason the user wishes to do so (e.g. consensus bug in one
|
||||||
|
// or the other).
|
||||||
|
func UseCKZG(use bool) error {
|
||||||
|
if use && !ckzgAvailable {
|
||||||
|
return errors.New("CKZG unavailable on your platform")
|
||||||
|
}
|
||||||
|
useCKZG.Store(use)
|
||||||
|
|
||||||
|
// Initializing the library can take 2-4 seconds - and can potentially crash
|
||||||
|
// on CKZG and non-ADX CPUs - so might as well do it now and don't wait until
|
||||||
|
// a crypto operation is actually needed live.
|
||||||
|
if use {
|
||||||
|
ckzgIniter.Do(ckzgInit)
|
||||||
|
} else {
|
||||||
|
gokzgIniter.Do(gokzgInit)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobToCommitment creates a small commitment out of a data blob.
|
||||||
|
func BlobToCommitment(blob Blob) (Commitment, error) {
|
||||||
|
if useCKZG.Load() {
|
||||||
|
return ckzgBlobToCommitment(blob)
|
||||||
|
}
|
||||||
|
return gokzgBlobToCommitment(blob)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputeProof computes the KZG proof at the given point for the polynomial
|
||||||
|
// represented by the blob.
|
||||||
|
func ComputeProof(blob Blob, point Point) (Proof, Claim, error) {
|
||||||
|
if useCKZG.Load() {
|
||||||
|
return ckzgComputeProof(blob, point)
|
||||||
|
}
|
||||||
|
return gokzgComputeProof(blob, point)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyProof verifies the KZG proof that the polynomial represented by the blob
|
||||||
|
// evaluated at the given point is the claimed value.
|
||||||
|
func VerifyProof(commitment Commitment, point Point, claim Claim, proof Proof) error {
|
||||||
|
if useCKZG.Load() {
|
||||||
|
return ckzgVerifyProof(commitment, point, claim, proof)
|
||||||
|
}
|
||||||
|
return gokzgVerifyProof(commitment, point, claim, proof)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputeBlobProof returns the KZG proof that is used to verify the blob against
|
||||||
|
// the commitment.
|
||||||
|
//
|
||||||
|
// This method does not verify that the commitment is correct with respect to blob.
|
||||||
|
func ComputeBlobProof(blob Blob, commitment Commitment) (Proof, error) {
|
||||||
|
if useCKZG.Load() {
|
||||||
|
return ckzgComputeBlobProof(blob, commitment)
|
||||||
|
}
|
||||||
|
return gokzgComputeBlobProof(blob, commitment)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyBlobProof verifies that the blob data corresponds to the provided commitment.
|
||||||
|
func VerifyBlobProof(blob Blob, commitment Commitment, proof Proof) error {
|
||||||
|
if useCKZG.Load() {
|
||||||
|
return ckzgVerifyBlobProof(blob, commitment, proof)
|
||||||
|
}
|
||||||
|
return gokzgVerifyBlobProof(blob, commitment, proof)
|
||||||
|
}
|
127
restricted/crypto/kzg4844/kzg4844_ckzg_cgo.go
Normal file
127
restricted/crypto/kzg4844/kzg4844_ckzg_cgo.go
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build ckzg && !nacl && !js && cgo && !gofuzz
|
||||||
|
|
||||||
|
package kzg4844
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
gokzg4844 "github.com/crate-crypto/go-kzg-4844"
|
||||||
|
ckzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||||
|
"github.com/openrelayxyz/cardinal-types/hexutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ckzgAvailable signals whether the library was compiled into Geth.
|
||||||
|
const ckzgAvailable = true
|
||||||
|
|
||||||
|
// ckzgIniter ensures that we initialize the KZG library once before using it.
|
||||||
|
var ckzgIniter sync.Once
|
||||||
|
|
||||||
|
// ckzgInit initializes the KZG library with the provided trusted setup.
|
||||||
|
func ckzgInit() {
|
||||||
|
config, err := content.ReadFile("trusted_setup.json")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
params := new(gokzg4844.JSONTrustedSetup)
|
||||||
|
if err = json.Unmarshal(config, params); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err = gokzg4844.CheckTrustedSetupIsWellFormed(params); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
g1s := make([]byte, len(params.SetupG1Lagrange)*(len(params.SetupG1Lagrange[0])-2)/2)
|
||||||
|
for i, g1 := range params.SetupG1Lagrange {
|
||||||
|
copy(g1s[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||||
|
}
|
||||||
|
g2s := make([]byte, len(params.SetupG2)*(len(params.SetupG2[0])-2)/2)
|
||||||
|
for i, g2 := range params.SetupG2 {
|
||||||
|
copy(g2s[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
|
||||||
|
}
|
||||||
|
if err = ckzg4844.LoadTrustedSetup(g1s, g2s); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ckzgBlobToCommitment creates a small commitment out of a data blob.
|
||||||
|
func ckzgBlobToCommitment(blob Blob) (Commitment, error) {
|
||||||
|
ckzgIniter.Do(ckzgInit)
|
||||||
|
|
||||||
|
commitment, err := ckzg4844.BlobToKZGCommitment((ckzg4844.Blob)(blob))
|
||||||
|
if err != nil {
|
||||||
|
return Commitment{}, err
|
||||||
|
}
|
||||||
|
return (Commitment)(commitment), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ckzgComputeProof computes the KZG proof at the given point for the polynomial
|
||||||
|
// represented by the blob.
|
||||||
|
func ckzgComputeProof(blob Blob, point Point) (Proof, Claim, error) {
|
||||||
|
ckzgIniter.Do(ckzgInit)
|
||||||
|
|
||||||
|
proof, claim, err := ckzg4844.ComputeKZGProof((ckzg4844.Blob)(blob), (ckzg4844.Bytes32)(point))
|
||||||
|
if err != nil {
|
||||||
|
return Proof{}, Claim{}, err
|
||||||
|
}
|
||||||
|
return (Proof)(proof), (Claim)(claim), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ckzgVerifyProof verifies the KZG proof that the polynomial represented by the blob
|
||||||
|
// evaluated at the given point is the claimed value.
|
||||||
|
func ckzgVerifyProof(commitment Commitment, point Point, claim Claim, proof Proof) error {
|
||||||
|
ckzgIniter.Do(ckzgInit)
|
||||||
|
|
||||||
|
valid, err := ckzg4844.VerifyKZGProof((ckzg4844.Bytes48)(commitment), (ckzg4844.Bytes32)(point), (ckzg4844.Bytes32)(claim), (ckzg4844.Bytes48)(proof))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !valid {
|
||||||
|
return errors.New("invalid proof")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ckzgComputeBlobProof returns the KZG proof that is used to verify the blob against
|
||||||
|
// the commitment.
|
||||||
|
//
|
||||||
|
// This method does not verify that the commitment is correct with respect to blob.
|
||||||
|
func ckzgComputeBlobProof(blob Blob, commitment Commitment) (Proof, error) {
|
||||||
|
ckzgIniter.Do(ckzgInit)
|
||||||
|
|
||||||
|
proof, err := ckzg4844.ComputeBlobKZGProof((ckzg4844.Blob)(blob), (ckzg4844.Bytes48)(commitment))
|
||||||
|
if err != nil {
|
||||||
|
return Proof{}, err
|
||||||
|
}
|
||||||
|
return (Proof)(proof), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ckzgVerifyBlobProof verifies that the blob data corresponds to the provided commitment.
|
||||||
|
func ckzgVerifyBlobProof(blob Blob, commitment Commitment, proof Proof) error {
|
||||||
|
ckzgIniter.Do(ckzgInit)
|
||||||
|
|
||||||
|
valid, err := ckzg4844.VerifyBlobKZGProof((ckzg4844.Blob)(blob), (ckzg4844.Bytes48)(commitment), (ckzg4844.Bytes48)(proof))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !valid {
|
||||||
|
return errors.New("invalid proof")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
62
restricted/crypto/kzg4844/kzg4844_ckzg_nocgo.go
Normal file
62
restricted/crypto/kzg4844/kzg4844_ckzg_nocgo.go
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build !ckzg || nacl || js || !cgo || gofuzz
|
||||||
|
|
||||||
|
package kzg4844
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// ckzgAvailable signals whether the library was compiled into Geth.
|
||||||
|
const ckzgAvailable = false
|
||||||
|
|
||||||
|
// ckzgIniter ensures that we initialize the KZG library once before using it.
|
||||||
|
var ckzgIniter sync.Once
|
||||||
|
|
||||||
|
// ckzgInit initializes the KZG library with the provided trusted setup.
|
||||||
|
func ckzgInit() {
|
||||||
|
panic("unsupported platform")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ckzgBlobToCommitment creates a small commitment out of a data blob.
|
||||||
|
func ckzgBlobToCommitment(blob Blob) (Commitment, error) {
|
||||||
|
panic("unsupported platform")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ckzgComputeProof computes the KZG proof at the given point for the polynomial
|
||||||
|
// represented by the blob.
|
||||||
|
func ckzgComputeProof(blob Blob, point Point) (Proof, Claim, error) {
|
||||||
|
panic("unsupported platform")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ckzgVerifyProof verifies the KZG proof that the polynomial represented by the blob
|
||||||
|
// evaluated at the given point is the claimed value.
|
||||||
|
func ckzgVerifyProof(commitment Commitment, point Point, claim Claim, proof Proof) error {
|
||||||
|
panic("unsupported platform")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ckzgComputeBlobProof returns the KZG proof that is used to verify the blob against
|
||||||
|
// the commitment.
|
||||||
|
//
|
||||||
|
// This method does not verify that the commitment is correct with respect to blob.
|
||||||
|
func ckzgComputeBlobProof(blob Blob, commitment Commitment) (Proof, error) {
|
||||||
|
panic("unsupported platform")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ckzgVerifyBlobProof verifies that the blob data corresponds to the provided commitment.
|
||||||
|
func ckzgVerifyBlobProof(blob Blob, commitment Commitment, proof Proof) error {
|
||||||
|
panic("unsupported platform")
|
||||||
|
}
|
98
restricted/crypto/kzg4844/kzg4844_gokzg.go
Normal file
98
restricted/crypto/kzg4844/kzg4844_gokzg.go
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package kzg4844
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
gokzg4844 "github.com/crate-crypto/go-kzg-4844"
|
||||||
|
)
|
||||||
|
|
||||||
|
// context is the crypto primitive pre-seeded with the trusted setup parameters.
|
||||||
|
var context *gokzg4844.Context
|
||||||
|
|
||||||
|
// gokzgIniter ensures that we initialize the KZG library once before using it.
|
||||||
|
var gokzgIniter sync.Once
|
||||||
|
|
||||||
|
// gokzgInit initializes the KZG library with the provided trusted setup.
|
||||||
|
func gokzgInit() {
|
||||||
|
config, err := content.ReadFile("trusted_setup.json")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
params := new(gokzg4844.JSONTrustedSetup)
|
||||||
|
if err = json.Unmarshal(config, params); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
context, err = gokzg4844.NewContext4096(params)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// gokzgBlobToCommitment creates a small commitment out of a data blob.
|
||||||
|
func gokzgBlobToCommitment(blob Blob) (Commitment, error) {
|
||||||
|
gokzgIniter.Do(gokzgInit)
|
||||||
|
|
||||||
|
commitment, err := context.BlobToKZGCommitment((gokzg4844.Blob)(blob), 0)
|
||||||
|
if err != nil {
|
||||||
|
return Commitment{}, err
|
||||||
|
}
|
||||||
|
return (Commitment)(commitment), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// gokzgComputeProof computes the KZG proof at the given point for the polynomial
|
||||||
|
// represented by the blob.
|
||||||
|
func gokzgComputeProof(blob Blob, point Point) (Proof, Claim, error) {
|
||||||
|
gokzgIniter.Do(gokzgInit)
|
||||||
|
|
||||||
|
proof, claim, err := context.ComputeKZGProof((gokzg4844.Blob)(blob), (gokzg4844.Scalar)(point), 0)
|
||||||
|
if err != nil {
|
||||||
|
return Proof{}, Claim{}, err
|
||||||
|
}
|
||||||
|
return (Proof)(proof), (Claim)(claim), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// gokzgVerifyProof verifies the KZG proof that the polynomial represented by the blob
|
||||||
|
// evaluated at the given point is the claimed value.
|
||||||
|
func gokzgVerifyProof(commitment Commitment, point Point, claim Claim, proof Proof) error {
|
||||||
|
gokzgIniter.Do(gokzgInit)
|
||||||
|
|
||||||
|
return context.VerifyKZGProof((gokzg4844.KZGCommitment)(commitment), (gokzg4844.Scalar)(point), (gokzg4844.Scalar)(claim), (gokzg4844.KZGProof)(proof))
|
||||||
|
}
|
||||||
|
|
||||||
|
// gokzgComputeBlobProof returns the KZG proof that is used to verify the blob against
|
||||||
|
// the commitment.
|
||||||
|
//
|
||||||
|
// This method does not verify that the commitment is correct with respect to blob.
|
||||||
|
func gokzgComputeBlobProof(blob Blob, commitment Commitment) (Proof, error) {
|
||||||
|
gokzgIniter.Do(gokzgInit)
|
||||||
|
|
||||||
|
proof, err := context.ComputeBlobKZGProof((gokzg4844.Blob)(blob), (gokzg4844.KZGCommitment)(commitment), 0)
|
||||||
|
if err != nil {
|
||||||
|
return Proof{}, err
|
||||||
|
}
|
||||||
|
return (Proof)(proof), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// gokzgVerifyBlobProof verifies that the blob data corresponds to the provided commitment.
|
||||||
|
func gokzgVerifyBlobProof(blob Blob, commitment Commitment, proof Proof) error {
|
||||||
|
gokzgIniter.Do(gokzgInit)
|
||||||
|
|
||||||
|
return context.VerifyBlobKZGProof((gokzg4844.Blob)(blob), (gokzg4844.KZGCommitment)(commitment), (gokzg4844.KZGProof)(proof))
|
||||||
|
}
|
195
restricted/crypto/kzg4844/kzg4844_test.go
Normal file
195
restricted/crypto/kzg4844/kzg4844_test.go
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package kzg4844
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||||
|
gokzg4844 "github.com/crate-crypto/go-kzg-4844"
|
||||||
|
)
|
||||||
|
|
||||||
|
func randFieldElement() [32]byte {
|
||||||
|
bytes := make([]byte, 32)
|
||||||
|
_, err := rand.Read(bytes)
|
||||||
|
if err != nil {
|
||||||
|
panic("failed to get random field element")
|
||||||
|
}
|
||||||
|
var r fr.Element
|
||||||
|
r.SetBytes(bytes)
|
||||||
|
|
||||||
|
return gokzg4844.SerializeScalar(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func randBlob() Blob {
|
||||||
|
var blob Blob
|
||||||
|
for i := 0; i < len(blob); i += gokzg4844.SerializedScalarSize {
|
||||||
|
fieldElementBytes := randFieldElement()
|
||||||
|
copy(blob[i:i+gokzg4844.SerializedScalarSize], fieldElementBytes[:])
|
||||||
|
}
|
||||||
|
return blob
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCKZGWithPoint(t *testing.T) { testKZGWithPoint(t, true) }
|
||||||
|
func TestGoKZGWithPoint(t *testing.T) { testKZGWithPoint(t, false) }
|
||||||
|
func testKZGWithPoint(t *testing.T, ckzg bool) {
|
||||||
|
if ckzg && !ckzgAvailable {
|
||||||
|
t.Skip("CKZG unavailable in this test build")
|
||||||
|
}
|
||||||
|
defer func(old bool) { useCKZG.Store(old) }(useCKZG.Load())
|
||||||
|
useCKZG.Store(ckzg)
|
||||||
|
|
||||||
|
blob := randBlob()
|
||||||
|
|
||||||
|
commitment, err := BlobToCommitment(blob)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create KZG commitment from blob: %v", err)
|
||||||
|
}
|
||||||
|
point := randFieldElement()
|
||||||
|
proof, claim, err := ComputeProof(blob, point)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create KZG proof at point: %v", err)
|
||||||
|
}
|
||||||
|
if err := VerifyProof(commitment, point, claim, proof); err != nil {
|
||||||
|
t.Fatalf("failed to verify KZG proof at point: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCKZGWithBlob(t *testing.T) { testKZGWithBlob(t, true) }
|
||||||
|
func TestGoKZGWithBlob(t *testing.T) { testKZGWithBlob(t, false) }
|
||||||
|
func testKZGWithBlob(t *testing.T, ckzg bool) {
|
||||||
|
if ckzg && !ckzgAvailable {
|
||||||
|
t.Skip("CKZG unavailable in this test build")
|
||||||
|
}
|
||||||
|
defer func(old bool) { useCKZG.Store(old) }(useCKZG.Load())
|
||||||
|
useCKZG.Store(ckzg)
|
||||||
|
|
||||||
|
blob := randBlob()
|
||||||
|
|
||||||
|
commitment, err := BlobToCommitment(blob)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create KZG commitment from blob: %v", err)
|
||||||
|
}
|
||||||
|
proof, err := ComputeBlobProof(blob, commitment)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create KZG proof for blob: %v", err)
|
||||||
|
}
|
||||||
|
if err := VerifyBlobProof(blob, commitment, proof); err != nil {
|
||||||
|
t.Fatalf("failed to verify KZG proof for blob: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCKZGBlobToCommitment(b *testing.B) { benchmarkBlobToCommitment(b, true) }
|
||||||
|
func BenchmarkGoKZGBlobToCommitment(b *testing.B) { benchmarkBlobToCommitment(b, false) }
|
||||||
|
func benchmarkBlobToCommitment(b *testing.B, ckzg bool) {
|
||||||
|
if ckzg && !ckzgAvailable {
|
||||||
|
b.Skip("CKZG unavailable in this test build")
|
||||||
|
}
|
||||||
|
defer func(old bool) { useCKZG.Store(old) }(useCKZG.Load())
|
||||||
|
useCKZG.Store(ckzg)
|
||||||
|
|
||||||
|
blob := randBlob()
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
BlobToCommitment(blob)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCKZGComputeProof(b *testing.B) { benchmarkComputeProof(b, true) }
|
||||||
|
func BenchmarkGoKZGComputeProof(b *testing.B) { benchmarkComputeProof(b, false) }
|
||||||
|
func benchmarkComputeProof(b *testing.B, ckzg bool) {
|
||||||
|
if ckzg && !ckzgAvailable {
|
||||||
|
b.Skip("CKZG unavailable in this test build")
|
||||||
|
}
|
||||||
|
defer func(old bool) { useCKZG.Store(old) }(useCKZG.Load())
|
||||||
|
useCKZG.Store(ckzg)
|
||||||
|
|
||||||
|
var (
|
||||||
|
blob = randBlob()
|
||||||
|
point = randFieldElement()
|
||||||
|
)
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ComputeProof(blob, point)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCKZGVerifyProof(b *testing.B) { benchmarkVerifyProof(b, true) }
|
||||||
|
func BenchmarkGoKZGVerifyProof(b *testing.B) { benchmarkVerifyProof(b, false) }
|
||||||
|
func benchmarkVerifyProof(b *testing.B, ckzg bool) {
|
||||||
|
if ckzg && !ckzgAvailable {
|
||||||
|
b.Skip("CKZG unavailable in this test build")
|
||||||
|
}
|
||||||
|
defer func(old bool) { useCKZG.Store(old) }(useCKZG.Load())
|
||||||
|
useCKZG.Store(ckzg)
|
||||||
|
|
||||||
|
var (
|
||||||
|
blob = randBlob()
|
||||||
|
point = randFieldElement()
|
||||||
|
commitment, _ = BlobToCommitment(blob)
|
||||||
|
proof, claim, _ = ComputeProof(blob, point)
|
||||||
|
)
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
VerifyProof(commitment, point, claim, proof)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCKZGComputeBlobProof(b *testing.B) { benchmarkComputeBlobProof(b, true) }
|
||||||
|
func BenchmarkGoKZGComputeBlobProof(b *testing.B) { benchmarkComputeBlobProof(b, false) }
|
||||||
|
func benchmarkComputeBlobProof(b *testing.B, ckzg bool) {
|
||||||
|
if ckzg && !ckzgAvailable {
|
||||||
|
b.Skip("CKZG unavailable in this test build")
|
||||||
|
}
|
||||||
|
defer func(old bool) { useCKZG.Store(old) }(useCKZG.Load())
|
||||||
|
useCKZG.Store(ckzg)
|
||||||
|
|
||||||
|
var (
|
||||||
|
blob = randBlob()
|
||||||
|
commitment, _ = BlobToCommitment(blob)
|
||||||
|
)
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ComputeBlobProof(blob, commitment)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCKZGVerifyBlobProof(b *testing.B) { benchmarkVerifyBlobProof(b, true) }
|
||||||
|
func BenchmarkGoKZGVerifyBlobProof(b *testing.B) { benchmarkVerifyBlobProof(b, false) }
|
||||||
|
func benchmarkVerifyBlobProof(b *testing.B, ckzg bool) {
|
||||||
|
if ckzg && !ckzgAvailable {
|
||||||
|
b.Skip("CKZG unavailable in this test build")
|
||||||
|
}
|
||||||
|
defer func(old bool) { useCKZG.Store(old) }(useCKZG.Load())
|
||||||
|
useCKZG.Store(ckzg)
|
||||||
|
|
||||||
|
var (
|
||||||
|
blob = randBlob()
|
||||||
|
commitment, _ = BlobToCommitment(blob)
|
||||||
|
proof, _ = ComputeBlobProof(blob, commitment)
|
||||||
|
)
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
VerifyBlobProof(blob, commitment, proof)
|
||||||
|
}
|
||||||
|
}
|
4167
restricted/crypto/kzg4844/trusted_setup.json
Normal file
4167
restricted/crypto/kzg4844/trusted_setup.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -21,7 +21,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -213,9 +212,7 @@ func (st *StackTrie) TryUpdate(key, value []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (st *StackTrie) Update(key, value []byte) {
|
func (st *StackTrie) Update(key, value []byte) {
|
||||||
if err := st.TryUpdate(key, value); err != nil {
|
st.TryUpdate(key, value)
|
||||||
fmt.Println("Unhandled trie error in StackTrie.Update:", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *StackTrie) Reset() {
|
func (st *StackTrie) Reset() {
|
||||||
|
@ -152,6 +152,20 @@ const (
|
|||||||
Bls12381MapG1Gas uint64 = 5500 // Gas price for BLS12-381 mapping field element to G1 operation
|
Bls12381MapG1Gas uint64 = 5500 // Gas price for BLS12-381 mapping field element to G1 operation
|
||||||
Bls12381MapG2Gas uint64 = 110000 // Gas price for BLS12-381 mapping field element to G2 operation
|
Bls12381MapG2Gas uint64 = 110000 // Gas price for BLS12-381 mapping field element to G2 operation
|
||||||
|
|
||||||
|
BlobTxHashVersion = 0x01 // Version byte of the commitment hash
|
||||||
|
BlobTxBlobGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size)
|
||||||
|
BlobTxMaxDataGasPerBlock = 1 << 19 // Maximum consumable data gas for data blobs per block
|
||||||
|
BlobTxTargetDataGasPerBlock = 1 << 18 // Target consumable data gas for data blobs per block (for 1559-like pricing)
|
||||||
|
BlobTxDataGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size)
|
||||||
|
BlobTxMinDataGasprice = 1 // Minimum gas price for data blobs
|
||||||
|
BlobTxDataGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for data gas price
|
||||||
|
BlobTxPointEvaluationPrecompileGas = 50000 // Gas price for the point evaluation precompile.
|
||||||
|
BlobTxTargetBlobGasPerBlock = 1 << 18 // Target consumable blob gas for data blobs per block (for 1559-like pricing)
|
||||||
|
|
||||||
|
BlobTxMinBlobGasprice = 1 // Minimum gas price for data blobs
|
||||||
|
BlobTxBlobGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for blob gas price
|
||||||
|
|
||||||
|
|
||||||
// The Refund Quotient is the cap on how much of the used gas can be refunded. Before EIP-3529,
|
// The Refund Quotient is the cap on how much of the used gas can be refunded. Before EIP-3529,
|
||||||
// up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529
|
// up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529
|
||||||
RefundQuotient uint64 = 2
|
RefundQuotient uint64 = 2
|
||||||
|
@ -27,6 +27,9 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted/rlp/internal/rlpstruct"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
//lint:ignore ST1012 EOL is not an error.
|
//lint:ignore ST1012 EOL is not an error.
|
||||||
@ -50,6 +53,7 @@ var (
|
|||||||
errUintOverflow = errors.New("rlp: uint overflow")
|
errUintOverflow = errors.New("rlp: uint overflow")
|
||||||
errNoPointer = errors.New("rlp: interface given to Decode must be a pointer")
|
errNoPointer = errors.New("rlp: interface given to Decode must be a pointer")
|
||||||
errDecodeIntoNil = errors.New("rlp: pointer given to Decode must not be nil")
|
errDecodeIntoNil = errors.New("rlp: pointer given to Decode must not be nil")
|
||||||
|
errUint256Large = errors.New("rlp: value too large for uint256")
|
||||||
|
|
||||||
streamPool = sync.Pool{
|
streamPool = sync.Pool{
|
||||||
New: func() interface{} { return new(Stream) },
|
New: func() interface{} { return new(Stream) },
|
||||||
@ -74,7 +78,7 @@ type Decoder interface {
|
|||||||
// Note that Decode does not set an input limit for all readers and may be vulnerable to
|
// Note that Decode does not set an input limit for all readers and may be vulnerable to
|
||||||
// panics cause by huge value sizes. If you need an input limit, use
|
// panics cause by huge value sizes. If you need an input limit, use
|
||||||
//
|
//
|
||||||
// NewStream(r, limit).Decode(val)
|
// NewStream(r, limit).Decode(val)
|
||||||
func Decode(r io.Reader, val interface{}) error {
|
func Decode(r io.Reader, val interface{}) error {
|
||||||
stream := streamPool.Get().(*Stream)
|
stream := streamPool.Get().(*Stream)
|
||||||
defer streamPool.Put(stream)
|
defer streamPool.Put(stream)
|
||||||
@ -86,7 +90,7 @@ func Decode(r io.Reader, val interface{}) error {
|
|||||||
// DecodeBytes parses RLP data from b into val. Please see package-level documentation for
|
// DecodeBytes parses RLP data from b into val. Please see package-level documentation for
|
||||||
// the decoding rules. The input must contain exactly one value and no trailing data.
|
// the decoding rules. The input must contain exactly one value and no trailing data.
|
||||||
func DecodeBytes(b []byte, val interface{}) error {
|
func DecodeBytes(b []byte, val interface{}) error {
|
||||||
r := bytes.NewReader(b)
|
r := (*sliceReader)(&b)
|
||||||
|
|
||||||
stream := streamPool.Get().(*Stream)
|
stream := streamPool.Get().(*Stream)
|
||||||
defer streamPool.Put(stream)
|
defer streamPool.Put(stream)
|
||||||
@ -95,7 +99,7 @@ func DecodeBytes(b []byte, val interface{}) error {
|
|||||||
if err := stream.Decode(val); err != nil {
|
if err := stream.Decode(val); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if r.Len() > 0 {
|
if len(b) > 0 {
|
||||||
return ErrMoreThanOneValue
|
return ErrMoreThanOneValue
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -146,9 +150,10 @@ func addErrorContext(err error, ctx string) error {
|
|||||||
var (
|
var (
|
||||||
decoderInterface = reflect.TypeOf(new(Decoder)).Elem()
|
decoderInterface = reflect.TypeOf(new(Decoder)).Elem()
|
||||||
bigInt = reflect.TypeOf(big.Int{})
|
bigInt = reflect.TypeOf(big.Int{})
|
||||||
|
u256Int = reflect.TypeOf(uint256.Int{})
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeDecoder(typ reflect.Type, tags tags) (dec decoder, err error) {
|
func makeDecoder(typ reflect.Type, tags rlpstruct.Tags) (dec decoder, err error) {
|
||||||
kind := typ.Kind()
|
kind := typ.Kind()
|
||||||
switch {
|
switch {
|
||||||
case typ == rawValueType:
|
case typ == rawValueType:
|
||||||
@ -157,6 +162,10 @@ func makeDecoder(typ reflect.Type, tags tags) (dec decoder, err error) {
|
|||||||
return decodeBigInt, nil
|
return decodeBigInt, nil
|
||||||
case typ.AssignableTo(bigInt):
|
case typ.AssignableTo(bigInt):
|
||||||
return decodeBigIntNoPtr, nil
|
return decodeBigIntNoPtr, nil
|
||||||
|
case typ == reflect.PtrTo(u256Int):
|
||||||
|
return decodeU256, nil
|
||||||
|
case typ == u256Int:
|
||||||
|
return decodeU256NoPtr, nil
|
||||||
case kind == reflect.Ptr:
|
case kind == reflect.Ptr:
|
||||||
return makePtrDecoder(typ, tags)
|
return makePtrDecoder(typ, tags)
|
||||||
case reflect.PtrTo(typ).Implements(decoderInterface):
|
case reflect.PtrTo(typ).Implements(decoderInterface):
|
||||||
@ -220,55 +229,38 @@ func decodeBigIntNoPtr(s *Stream, val reflect.Value) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func decodeBigInt(s *Stream, val reflect.Value) error {
|
func decodeBigInt(s *Stream, val reflect.Value) error {
|
||||||
var buffer []byte
|
|
||||||
kind, size, err := s.Kind()
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
return wrapStreamError(err, val.Type())
|
|
||||||
case kind == List:
|
|
||||||
return wrapStreamError(ErrExpectedString, val.Type())
|
|
||||||
case kind == Byte:
|
|
||||||
buffer = s.uintbuf[:1]
|
|
||||||
buffer[0] = s.byteval
|
|
||||||
s.kind = -1 // re-arm Kind
|
|
||||||
case size == 0:
|
|
||||||
// Avoid zero-length read.
|
|
||||||
s.kind = -1
|
|
||||||
case size <= uint64(len(s.uintbuf)):
|
|
||||||
// For integers smaller than s.uintbuf, allocating a buffer
|
|
||||||
// can be avoided.
|
|
||||||
buffer = s.uintbuf[:size]
|
|
||||||
if err := s.readFull(buffer); err != nil {
|
|
||||||
return wrapStreamError(err, val.Type())
|
|
||||||
}
|
|
||||||
// Reject inputs where single byte encoding should have been used.
|
|
||||||
if size == 1 && buffer[0] < 128 {
|
|
||||||
return wrapStreamError(ErrCanonSize, val.Type())
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// For large integers, a temporary buffer is needed.
|
|
||||||
buffer = make([]byte, size)
|
|
||||||
if err := s.readFull(buffer); err != nil {
|
|
||||||
return wrapStreamError(err, val.Type())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reject leading zero bytes.
|
|
||||||
if len(buffer) > 0 && buffer[0] == 0 {
|
|
||||||
return wrapStreamError(ErrCanonInt, val.Type())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the integer bytes.
|
|
||||||
i := val.Interface().(*big.Int)
|
i := val.Interface().(*big.Int)
|
||||||
if i == nil {
|
if i == nil {
|
||||||
i = new(big.Int)
|
i = new(big.Int)
|
||||||
val.Set(reflect.ValueOf(i))
|
val.Set(reflect.ValueOf(i))
|
||||||
}
|
}
|
||||||
i.SetBytes(buffer)
|
|
||||||
|
err := s.decodeBigInt(i)
|
||||||
|
if err != nil {
|
||||||
|
return wrapStreamError(err, val.Type())
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeListDecoder(typ reflect.Type, tag tags) (decoder, error) {
|
func decodeU256NoPtr(s *Stream, val reflect.Value) error {
|
||||||
|
return decodeU256(s, val.Addr())
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeU256(s *Stream, val reflect.Value) error {
|
||||||
|
i := val.Interface().(*uint256.Int)
|
||||||
|
if i == nil {
|
||||||
|
i = new(uint256.Int)
|
||||||
|
val.Set(reflect.ValueOf(i))
|
||||||
|
}
|
||||||
|
|
||||||
|
err := s.ReadUint256(i)
|
||||||
|
if err != nil {
|
||||||
|
return wrapStreamError(err, val.Type())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeListDecoder(typ reflect.Type, tag rlpstruct.Tags) (decoder, error) {
|
||||||
etype := typ.Elem()
|
etype := typ.Elem()
|
||||||
if etype.Kind() == reflect.Uint8 && !reflect.PtrTo(etype).Implements(decoderInterface) {
|
if etype.Kind() == reflect.Uint8 && !reflect.PtrTo(etype).Implements(decoderInterface) {
|
||||||
if typ.Kind() == reflect.Array {
|
if typ.Kind() == reflect.Array {
|
||||||
@ -276,7 +268,7 @@ func makeListDecoder(typ reflect.Type, tag tags) (decoder, error) {
|
|||||||
}
|
}
|
||||||
return decodeByteSlice, nil
|
return decodeByteSlice, nil
|
||||||
}
|
}
|
||||||
etypeinfo := theTC.infoWhileGenerating(etype, tags{})
|
etypeinfo := theTC.infoWhileGenerating(etype, rlpstruct.Tags{})
|
||||||
if etypeinfo.decoderErr != nil {
|
if etypeinfo.decoderErr != nil {
|
||||||
return nil, etypeinfo.decoderErr
|
return nil, etypeinfo.decoderErr
|
||||||
}
|
}
|
||||||
@ -286,7 +278,7 @@ func makeListDecoder(typ reflect.Type, tag tags) (decoder, error) {
|
|||||||
dec = func(s *Stream, val reflect.Value) error {
|
dec = func(s *Stream, val reflect.Value) error {
|
||||||
return decodeListArray(s, val, etypeinfo.decoder)
|
return decodeListArray(s, val, etypeinfo.decoder)
|
||||||
}
|
}
|
||||||
case tag.tail:
|
case tag.Tail:
|
||||||
// A slice with "tail" tag can occur as the last field
|
// A slice with "tail" tag can occur as the last field
|
||||||
// of a struct and is supposed to swallow all remaining
|
// of a struct and is supposed to swallow all remaining
|
||||||
// list elements. The struct decoder already called s.List,
|
// list elements. The struct decoder already called s.List,
|
||||||
@ -379,7 +371,7 @@ func decodeByteArray(s *Stream, val reflect.Value) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
slice := byteArrayBytes(val)
|
slice := byteArrayBytes(val, val.Len())
|
||||||
switch kind {
|
switch kind {
|
||||||
case Byte:
|
case Byte:
|
||||||
if len(slice) == 0 {
|
if len(slice) == 0 {
|
||||||
@ -451,16 +443,16 @@ func zeroFields(structval reflect.Value, fields []field) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// makePtrDecoder creates a decoder that decodes into the pointer's element type.
|
// makePtrDecoder creates a decoder that decodes into the pointer's element type.
|
||||||
func makePtrDecoder(typ reflect.Type, tag tags) (decoder, error) {
|
func makePtrDecoder(typ reflect.Type, tag rlpstruct.Tags) (decoder, error) {
|
||||||
etype := typ.Elem()
|
etype := typ.Elem()
|
||||||
etypeinfo := theTC.infoWhileGenerating(etype, tags{})
|
etypeinfo := theTC.infoWhileGenerating(etype, rlpstruct.Tags{})
|
||||||
switch {
|
switch {
|
||||||
case etypeinfo.decoderErr != nil:
|
case etypeinfo.decoderErr != nil:
|
||||||
return nil, etypeinfo.decoderErr
|
return nil, etypeinfo.decoderErr
|
||||||
case !tag.nilOK:
|
case !tag.NilOK:
|
||||||
return makeSimplePtrDecoder(etype, etypeinfo), nil
|
return makeSimplePtrDecoder(etype, etypeinfo), nil
|
||||||
default:
|
default:
|
||||||
return makeNilPtrDecoder(etype, etypeinfo, tag.nilKind), nil
|
return makeNilPtrDecoder(etype, etypeinfo, tag), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -481,9 +473,13 @@ func makeSimplePtrDecoder(etype reflect.Type, etypeinfo *typeinfo) decoder {
|
|||||||
// values are decoded into a value of the element type, just like makePtrDecoder does.
|
// values are decoded into a value of the element type, just like makePtrDecoder does.
|
||||||
//
|
//
|
||||||
// This decoder is used for pointer-typed struct fields with struct tag "nil".
|
// This decoder is used for pointer-typed struct fields with struct tag "nil".
|
||||||
func makeNilPtrDecoder(etype reflect.Type, etypeinfo *typeinfo, nilKind Kind) decoder {
|
func makeNilPtrDecoder(etype reflect.Type, etypeinfo *typeinfo, ts rlpstruct.Tags) decoder {
|
||||||
typ := reflect.PtrTo(etype)
|
typ := reflect.PtrTo(etype)
|
||||||
nilPtr := reflect.Zero(typ)
|
nilPtr := reflect.Zero(typ)
|
||||||
|
|
||||||
|
// Determine the value kind that results in nil pointer.
|
||||||
|
nilKind := typeNilKind(etype, ts)
|
||||||
|
|
||||||
return func(s *Stream, val reflect.Value) (err error) {
|
return func(s *Stream, val reflect.Value) (err error) {
|
||||||
kind, size, err := s.Kind()
|
kind, size, err := s.Kind()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -659,6 +655,37 @@ func (s *Stream) Bytes() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadBytes decodes the next RLP value and stores the result in b.
|
||||||
|
// The value size must match len(b) exactly.
|
||||||
|
func (s *Stream) ReadBytes(b []byte) error {
|
||||||
|
kind, size, err := s.Kind()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch kind {
|
||||||
|
case Byte:
|
||||||
|
if len(b) != 1 {
|
||||||
|
return fmt.Errorf("input value has wrong size 1, want %d", len(b))
|
||||||
|
}
|
||||||
|
b[0] = s.byteval
|
||||||
|
s.kind = -1 // rearm Kind
|
||||||
|
return nil
|
||||||
|
case String:
|
||||||
|
if uint64(len(b)) != size {
|
||||||
|
return fmt.Errorf("input value has wrong size %d, want %d", size, len(b))
|
||||||
|
}
|
||||||
|
if err = s.readFull(b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if size == 1 && b[0] < 128 {
|
||||||
|
return ErrCanonSize
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return ErrExpectedString
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Raw reads a raw encoded value including RLP type information.
|
// Raw reads a raw encoded value including RLP type information.
|
||||||
func (s *Stream) Raw() ([]byte, error) {
|
func (s *Stream) Raw() ([]byte, error) {
|
||||||
kind, size, err := s.Kind()
|
kind, size, err := s.Kind()
|
||||||
@ -687,10 +714,31 @@ func (s *Stream) Raw() ([]byte, error) {
|
|||||||
// Uint reads an RLP string of up to 8 bytes and returns its contents
|
// Uint reads an RLP string of up to 8 bytes and returns its contents
|
||||||
// as an unsigned integer. If the input does not contain an RLP string, the
|
// as an unsigned integer. If the input does not contain an RLP string, the
|
||||||
// returned error will be ErrExpectedString.
|
// returned error will be ErrExpectedString.
|
||||||
|
//
|
||||||
|
// Deprecated: use s.Uint64 instead.
|
||||||
func (s *Stream) Uint() (uint64, error) {
|
func (s *Stream) Uint() (uint64, error) {
|
||||||
return s.uint(64)
|
return s.uint(64)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Stream) Uint64() (uint64, error) {
|
||||||
|
return s.uint(64)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) Uint32() (uint32, error) {
|
||||||
|
i, err := s.uint(32)
|
||||||
|
return uint32(i), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) Uint16() (uint16, error) {
|
||||||
|
i, err := s.uint(16)
|
||||||
|
return uint16(i), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) Uint8() (uint8, error) {
|
||||||
|
i, err := s.uint(8)
|
||||||
|
return uint8(i), err
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Stream) uint(maxbits int) (uint64, error) {
|
func (s *Stream) uint(maxbits int) (uint64, error) {
|
||||||
kind, size, err := s.Kind()
|
kind, size, err := s.Kind()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -781,6 +829,104 @@ func (s *Stream) ListEnd() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MoreDataInList reports whether the current list context contains
|
||||||
|
// more data to be read.
|
||||||
|
func (s *Stream) MoreDataInList() bool {
|
||||||
|
_, listLimit := s.listLimit()
|
||||||
|
return listLimit > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// BigInt decodes an arbitrary-size integer value.
|
||||||
|
func (s *Stream) BigInt() (*big.Int, error) {
|
||||||
|
i := new(big.Int)
|
||||||
|
if err := s.decodeBigInt(i); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) decodeBigInt(dst *big.Int) error {
|
||||||
|
var buffer []byte
|
||||||
|
kind, size, err := s.Kind()
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case kind == List:
|
||||||
|
return ErrExpectedString
|
||||||
|
case kind == Byte:
|
||||||
|
buffer = s.uintbuf[:1]
|
||||||
|
buffer[0] = s.byteval
|
||||||
|
s.kind = -1 // re-arm Kind
|
||||||
|
case size == 0:
|
||||||
|
// Avoid zero-length read.
|
||||||
|
s.kind = -1
|
||||||
|
case size <= uint64(len(s.uintbuf)):
|
||||||
|
// For integers smaller than s.uintbuf, allocating a buffer
|
||||||
|
// can be avoided.
|
||||||
|
buffer = s.uintbuf[:size]
|
||||||
|
if err := s.readFull(buffer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Reject inputs where single byte encoding should have been used.
|
||||||
|
if size == 1 && buffer[0] < 128 {
|
||||||
|
return ErrCanonSize
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// For large integers, a temporary buffer is needed.
|
||||||
|
buffer = make([]byte, size)
|
||||||
|
if err := s.readFull(buffer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reject leading zero bytes.
|
||||||
|
if len(buffer) > 0 && buffer[0] == 0 {
|
||||||
|
return ErrCanonInt
|
||||||
|
}
|
||||||
|
// Set the integer bytes.
|
||||||
|
dst.SetBytes(buffer)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadUint256 decodes the next value as a uint256.
|
||||||
|
func (s *Stream) ReadUint256(dst *uint256.Int) error {
|
||||||
|
var buffer []byte
|
||||||
|
kind, size, err := s.Kind()
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case kind == List:
|
||||||
|
return ErrExpectedString
|
||||||
|
case kind == Byte:
|
||||||
|
buffer = s.uintbuf[:1]
|
||||||
|
buffer[0] = s.byteval
|
||||||
|
s.kind = -1 // re-arm Kind
|
||||||
|
case size == 0:
|
||||||
|
// Avoid zero-length read.
|
||||||
|
s.kind = -1
|
||||||
|
case size <= uint64(len(s.uintbuf)):
|
||||||
|
// All possible uint256 values fit into s.uintbuf.
|
||||||
|
buffer = s.uintbuf[:size]
|
||||||
|
if err := s.readFull(buffer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Reject inputs where single byte encoding should have been used.
|
||||||
|
if size == 1 && buffer[0] < 128 {
|
||||||
|
return ErrCanonSize
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return errUint256Large
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reject leading zero bytes.
|
||||||
|
if len(buffer) > 0 && buffer[0] == 0 {
|
||||||
|
return ErrCanonInt
|
||||||
|
}
|
||||||
|
// Set the integer bytes.
|
||||||
|
dst.SetBytes(buffer)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Decode decodes a value and stores the result in the value pointed
|
// Decode decodes a value and stores the result in the value pointed
|
||||||
// to by val. Please see the documentation for the Decode function
|
// to by val. Please see the documentation for the Decode function
|
||||||
// to learn about the decoding rules.
|
// to learn about the decoding rules.
|
||||||
@ -1036,3 +1182,23 @@ func (s *Stream) listLimit() (inList bool, limit uint64) {
|
|||||||
}
|
}
|
||||||
return true, s.stack[len(s.stack)-1]
|
return true, s.stack[len(s.stack)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type sliceReader []byte
|
||||||
|
|
||||||
|
func (sr *sliceReader) Read(b []byte) (int, error) {
|
||||||
|
if len(*sr) == 0 {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
n := copy(b, *sr)
|
||||||
|
*sr = (*sr)[n:]
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr *sliceReader) ReadByte() (byte, error) {
|
||||||
|
if len(*sr) == 0 {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
b := (*sr)[0]
|
||||||
|
*sr = (*sr)[1:]
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -27,8 +27,7 @@ value zero equivalent to the empty string).
|
|||||||
RLP values are distinguished by a type tag. The type tag precedes the value in the input
|
RLP values are distinguished by a type tag. The type tag precedes the value in the input
|
||||||
stream and defines the size and kind of the bytes that follow.
|
stream and defines the size and kind of the bytes that follow.
|
||||||
|
|
||||||
|
# Encoding Rules
|
||||||
Encoding Rules
|
|
||||||
|
|
||||||
Package rlp uses reflection and encodes RLP based on the Go type of the value.
|
Package rlp uses reflection and encodes RLP based on the Go type of the value.
|
||||||
|
|
||||||
@ -37,7 +36,7 @@ call EncodeRLP on nil pointer values.
|
|||||||
|
|
||||||
To encode a pointer, the value being pointed to is encoded. A nil pointer to a struct
|
To encode a pointer, the value being pointed to is encoded. A nil pointer to a struct
|
||||||
type, slice or array always encodes as an empty RLP list unless the slice or array has
|
type, slice or array always encodes as an empty RLP list unless the slice or array has
|
||||||
elememt type byte. A nil pointer to any other value encodes as the empty string.
|
element type byte. A nil pointer to any other value encodes as the empty string.
|
||||||
|
|
||||||
Struct values are encoded as an RLP list of all their encoded public fields. Recursive
|
Struct values are encoded as an RLP list of all their encoded public fields. Recursive
|
||||||
struct types are supported.
|
struct types are supported.
|
||||||
@ -58,8 +57,7 @@ An interface value encodes as the value contained in the interface.
|
|||||||
|
|
||||||
Floating point numbers, maps, channels and functions are not supported.
|
Floating point numbers, maps, channels and functions are not supported.
|
||||||
|
|
||||||
|
# Decoding Rules
|
||||||
Decoding Rules
|
|
||||||
|
|
||||||
Decoding uses the following type-dependent rules:
|
Decoding uses the following type-dependent rules:
|
||||||
|
|
||||||
@ -93,30 +91,29 @@ or one (true).
|
|||||||
|
|
||||||
To decode into an interface value, one of these types is stored in the value:
|
To decode into an interface value, one of these types is stored in the value:
|
||||||
|
|
||||||
[]interface{}, for RLP lists
|
[]interface{}, for RLP lists
|
||||||
[]byte, for RLP strings
|
[]byte, for RLP strings
|
||||||
|
|
||||||
Non-empty interface types are not supported when decoding.
|
Non-empty interface types are not supported when decoding.
|
||||||
Signed integers, floating point numbers, maps, channels and functions cannot be decoded into.
|
Signed integers, floating point numbers, maps, channels and functions cannot be decoded into.
|
||||||
|
|
||||||
|
# Struct Tags
|
||||||
Struct Tags
|
|
||||||
|
|
||||||
As with other encoding packages, the "-" tag ignores fields.
|
As with other encoding packages, the "-" tag ignores fields.
|
||||||
|
|
||||||
type StructWithIgnoredField struct{
|
type StructWithIgnoredField struct{
|
||||||
Ignored uint `rlp:"-"`
|
Ignored uint `rlp:"-"`
|
||||||
Field uint
|
Field uint
|
||||||
}
|
}
|
||||||
|
|
||||||
Go struct values encode/decode as RLP lists. There are two ways of influencing the mapping
|
Go struct values encode/decode as RLP lists. There are two ways of influencing the mapping
|
||||||
of fields to list elements. The "tail" tag, which may only be used on the last exported
|
of fields to list elements. The "tail" tag, which may only be used on the last exported
|
||||||
struct field, allows slurping up any excess list elements into a slice.
|
struct field, allows slurping up any excess list elements into a slice.
|
||||||
|
|
||||||
type StructWithTail struct{
|
type StructWithTail struct{
|
||||||
Field uint
|
Field uint
|
||||||
Tail []string `rlp:"tail"`
|
Tail []string `rlp:"tail"`
|
||||||
}
|
}
|
||||||
|
|
||||||
The "optional" tag says that the field may be omitted if it is zero-valued. If this tag is
|
The "optional" tag says that the field may be omitted if it is zero-valued. If this tag is
|
||||||
used on a struct field, all subsequent public fields must also be declared optional.
|
used on a struct field, all subsequent public fields must also be declared optional.
|
||||||
@ -128,11 +125,11 @@ When decoding into a struct, optional fields may be omitted from the end of the
|
|||||||
list. For the example below, this means input lists of one, two, or three elements are
|
list. For the example below, this means input lists of one, two, or three elements are
|
||||||
accepted.
|
accepted.
|
||||||
|
|
||||||
type StructWithOptionalFields struct{
|
type StructWithOptionalFields struct{
|
||||||
Required uint
|
Required uint
|
||||||
Optional1 uint `rlp:"optional"`
|
Optional1 uint `rlp:"optional"`
|
||||||
Optional2 uint `rlp:"optional"`
|
Optional2 uint `rlp:"optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
The "nil", "nilList" and "nilString" tags apply to pointer-typed fields only, and change
|
The "nil", "nilList" and "nilString" tags apply to pointer-typed fields only, and change
|
||||||
the decoding rules for the field type. For regular pointer fields without the "nil" tag,
|
the decoding rules for the field type. For regular pointer fields without the "nil" tag,
|
||||||
@ -140,9 +137,9 @@ input values must always match the required input length exactly and the decoder
|
|||||||
produce nil values. When the "nil" tag is set, input values of size zero decode as a nil
|
produce nil values. When the "nil" tag is set, input values of size zero decode as a nil
|
||||||
pointer. This is especially useful for recursive types.
|
pointer. This is especially useful for recursive types.
|
||||||
|
|
||||||
type StructWithNilField struct {
|
type StructWithNilField struct {
|
||||||
Field *[3]byte `rlp:"nil"`
|
Field *[3]byte `rlp:"nil"`
|
||||||
}
|
}
|
||||||
|
|
||||||
In the example above, Field allows two possible input sizes. For input 0xC180 (a list
|
In the example above, Field allows two possible input sizes. For input 0xC180 (a list
|
||||||
containing an empty string) Field is set to nil after decoding. For input 0xC483000000 (a
|
containing an empty string) Field is set to nil after decoding. For input 0xC483000000 (a
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
// "reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
@ -56,18 +56,18 @@ func (buf *encBuffer) size() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// makeBytes creates the encoder output.
|
// makeBytes creates the encoder output.
|
||||||
func (w *encBuffer) makeBytes() []byte {
|
func (buf *encBuffer) makeBytes() []byte {
|
||||||
out := make([]byte, w.size())
|
out := make([]byte, buf.size())
|
||||||
w.copyTo(out)
|
buf.copyTo(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *encBuffer) copyTo(dst []byte) {
|
func (buf *encBuffer) copyTo(dst []byte) {
|
||||||
strpos := 0
|
strpos := 0
|
||||||
pos := 0
|
pos := 0
|
||||||
for _, head := range w.lheads {
|
for _, head := range buf.lheads {
|
||||||
// write string data before header
|
// write string data before header
|
||||||
n := copy(dst[pos:], w.str[strpos:head.offset])
|
n := copy(dst[pos:], buf.str[strpos:head.offset])
|
||||||
pos += n
|
pos += n
|
||||||
strpos += n
|
strpos += n
|
||||||
// write the header
|
// write the header
|
||||||
@ -75,7 +75,7 @@ func (w *encBuffer) copyTo(dst []byte) {
|
|||||||
pos += len(enc)
|
pos += len(enc)
|
||||||
}
|
}
|
||||||
// copy string data after the last list header
|
// copy string data after the last list header
|
||||||
copy(dst[pos:], w.str[strpos:])
|
copy(dst[pos:], buf.str[strpos:])
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeTo writes the encoder output to w.
|
// writeTo writes the encoder output to w.
|
||||||
@ -145,38 +145,38 @@ func (buf *encBuffer) writeString(s string) {
|
|||||||
buf.writeBytes([]byte(s))
|
buf.writeBytes([]byte(s))
|
||||||
}
|
}
|
||||||
|
|
||||||
// // wordBytes is the number of bytes in a big.Word
|
// wordBytes is the number of bytes in a big.Word
|
||||||
// const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8
|
const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8
|
||||||
|
|
||||||
// writeBigInt writes i as an integer.
|
// writeBigInt writes i as an integer.
|
||||||
func (w *encBuffer) writeBigInt(i *big.Int) {
|
func (buf *encBuffer) writeBigInt(i *big.Int) {
|
||||||
bitlen := i.BitLen()
|
bitlen := i.BitLen()
|
||||||
if bitlen <= 64 {
|
if bitlen <= 64 {
|
||||||
w.writeUint64(i.Uint64())
|
buf.writeUint64(i.Uint64())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Integer is larger than 64 bits, encode from i.Bits().
|
// Integer is larger than 64 bits, encode from i.Bits().
|
||||||
// The minimal byte length is bitlen rounded up to the next
|
// The minimal byte length is bitlen rounded up to the next
|
||||||
// multiple of 8, divided by 8.
|
// multiple of 8, divided by 8.
|
||||||
length := ((bitlen + 7) & -8) >> 3
|
length := ((bitlen + 7) & -8) >> 3
|
||||||
w.encodeStringHeader(length)
|
buf.encodeStringHeader(length)
|
||||||
w.str = append(w.str, make([]byte, length)...)
|
buf.str = append(buf.str, make([]byte, length)...)
|
||||||
index := length
|
index := length
|
||||||
buf := w.str[len(w.str)-length:]
|
bytesBuf := buf.str[len(buf.str)-length:]
|
||||||
for _, d := range i.Bits() {
|
for _, d := range i.Bits() {
|
||||||
for j := 0; j < wordBytes && index > 0; j++ {
|
for j := 0; j < wordBytes && index > 0; j++ {
|
||||||
index--
|
index--
|
||||||
buf[index] = byte(d)
|
bytesBuf[index] = byte(d)
|
||||||
d >>= 8
|
d >>= 8
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeUint256 writes z as an integer.
|
// writeUint256 writes z as an integer.
|
||||||
func (w *encBuffer) writeUint256(z *uint256.Int) {
|
func (buf *encBuffer) writeUint256(z *uint256.Int) {
|
||||||
bitlen := z.BitLen()
|
bitlen := z.BitLen()
|
||||||
if bitlen <= 64 {
|
if bitlen <= 64 {
|
||||||
w.writeUint64(z.Uint64())
|
buf.writeUint64(z.Uint64())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
nBytes := byte((bitlen + 7) / 8)
|
nBytes := byte((bitlen + 7) / 8)
|
||||||
@ -186,7 +186,7 @@ func (w *encBuffer) writeUint256(z *uint256.Int) {
|
|||||||
binary.BigEndian.PutUint64(b[17:25], z[1])
|
binary.BigEndian.PutUint64(b[17:25], z[1])
|
||||||
binary.BigEndian.PutUint64(b[25:33], z[0])
|
binary.BigEndian.PutUint64(b[25:33], z[0])
|
||||||
b[32-nBytes] = 0x80 + nBytes
|
b[32-nBytes] = 0x80 + nBytes
|
||||||
w.str = append(w.str, b[32-nBytes:]...)
|
buf.str = append(buf.str, b[32-nBytes:]...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// list adds a new list header to the header stack. It returns the index of the header.
|
// list adds a new list header to the header stack. It returns the index of the header.
|
||||||
@ -206,14 +206,14 @@ func (buf *encBuffer) listEnd(index int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// func (buf *encBuffer) encode(val interface{}) error {
|
func (buf *encBuffer) encode(val interface{}) error {
|
||||||
// rval := reflect.ValueOf(val)
|
rval := reflect.ValueOf(val)
|
||||||
// writer, err := cachedWriter(rval.Type())
|
writer, err := cachedWriter(rval.Type())
|
||||||
// if err != nil {
|
if err != nil {
|
||||||
// return err
|
return err
|
||||||
// }
|
}
|
||||||
// return writer(rval, buf)
|
return writer(rval, buf)
|
||||||
// }
|
}
|
||||||
|
|
||||||
func (buf *encBuffer) encodeStringHeader(size int) {
|
func (buf *encBuffer) encodeStringHeader(size int) {
|
||||||
if size < 56 {
|
if size < 56 {
|
||||||
@ -225,72 +225,72 @@ func (buf *encBuffer) encodeStringHeader(size int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// // encReader is the io.Reader returned by EncodeToReader.
|
// encReader is the io.Reader returned by EncodeToReader.
|
||||||
// // It releases its encbuf at EOF.
|
// It releases its encbuf at EOF.
|
||||||
// type encReader struct {
|
type encReader struct {
|
||||||
// buf *encBuffer // the buffer we're reading from. this is nil when we're at EOF.
|
buf *encBuffer // the buffer we're reading from. this is nil when we're at EOF.
|
||||||
// lhpos int // index of list header that we're reading
|
lhpos int // index of list header that we're reading
|
||||||
// strpos int // current position in string buffer
|
strpos int // current position in string buffer
|
||||||
// piece []byte // next piece to be read
|
piece []byte // next piece to be read
|
||||||
// }
|
}
|
||||||
|
|
||||||
// func (r *encReader) Read(b []byte) (n int, err error) {
|
func (r *encReader) Read(b []byte) (n int, err error) {
|
||||||
// for {
|
for {
|
||||||
// if r.piece = r.next(); r.piece == nil {
|
if r.piece = r.next(); r.piece == nil {
|
||||||
// // Put the encode buffer back into the pool at EOF when it
|
// Put the encode buffer back into the pool at EOF when it
|
||||||
// // is first encountered. Subsequent calls still return EOF
|
// is first encountered. Subsequent calls still return EOF
|
||||||
// // as the error but the buffer is no longer valid.
|
// as the error but the buffer is no longer valid.
|
||||||
// if r.buf != nil {
|
if r.buf != nil {
|
||||||
// encBufferPool.Put(r.buf)
|
encBufferPool.Put(r.buf)
|
||||||
// r.buf = nil
|
r.buf = nil
|
||||||
// }
|
}
|
||||||
// return n, io.EOF
|
return n, io.EOF
|
||||||
// }
|
}
|
||||||
// nn := copy(b[n:], r.piece)
|
nn := copy(b[n:], r.piece)
|
||||||
// n += nn
|
n += nn
|
||||||
// if nn < len(r.piece) {
|
if nn < len(r.piece) {
|
||||||
// // piece didn't fit, see you next time.
|
// piece didn't fit, see you next time.
|
||||||
// r.piece = r.piece[nn:]
|
r.piece = r.piece[nn:]
|
||||||
// return n, nil
|
return n, nil
|
||||||
// }
|
}
|
||||||
// r.piece = nil
|
r.piece = nil
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
|
|
||||||
// // next returns the next piece of data to be read.
|
// next returns the next piece of data to be read.
|
||||||
// // it returns nil at EOF.
|
// it returns nil at EOF.
|
||||||
// func (r *encReader) next() []byte {
|
func (r *encReader) next() []byte {
|
||||||
// switch {
|
switch {
|
||||||
// case r.buf == nil:
|
case r.buf == nil:
|
||||||
// return nil
|
return nil
|
||||||
|
|
||||||
// case r.piece != nil:
|
case r.piece != nil:
|
||||||
// // There is still data available for reading.
|
// There is still data available for reading.
|
||||||
// return r.piece
|
return r.piece
|
||||||
|
|
||||||
// case r.lhpos < len(r.buf.lheads):
|
case r.lhpos < len(r.buf.lheads):
|
||||||
// // We're before the last list header.
|
// We're before the last list header.
|
||||||
// head := r.buf.lheads[r.lhpos]
|
head := r.buf.lheads[r.lhpos]
|
||||||
// sizebefore := head.offset - r.strpos
|
sizebefore := head.offset - r.strpos
|
||||||
// if sizebefore > 0 {
|
if sizebefore > 0 {
|
||||||
// // String data before header.
|
// String data before header.
|
||||||
// p := r.buf.str[r.strpos:head.offset]
|
p := r.buf.str[r.strpos:head.offset]
|
||||||
// r.strpos += sizebefore
|
r.strpos += sizebefore
|
||||||
// return p
|
return p
|
||||||
// }
|
}
|
||||||
// r.lhpos++
|
r.lhpos++
|
||||||
// return head.encode(r.buf.sizebuf[:])
|
return head.encode(r.buf.sizebuf[:])
|
||||||
|
|
||||||
// case r.strpos < len(r.buf.str):
|
case r.strpos < len(r.buf.str):
|
||||||
// // String data at the end, after all list headers.
|
// String data at the end, after all list headers.
|
||||||
// p := r.buf.str[r.strpos:]
|
p := r.buf.str[r.strpos:]
|
||||||
// r.strpos = len(r.buf.str)
|
r.strpos = len(r.buf.str)
|
||||||
// return p
|
return p
|
||||||
|
|
||||||
// default:
|
default:
|
||||||
// return nil
|
return nil
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
|
|
||||||
func encBufferFromWriter(w io.Writer) *encBuffer {
|
func encBufferFromWriter(w io.Writer) *encBuffer {
|
||||||
switch w := w.(type) {
|
switch w := w.(type) {
|
||||||
|
@ -17,20 +17,28 @@
|
|||||||
package rlp
|
package rlp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted/rlp/internal/rlpstruct"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Common encoded values.
|
// Common encoded values.
|
||||||
// These are useful when implementing EncodeRLP.
|
// These are useful when implementing EncodeRLP.
|
||||||
|
|
||||||
|
// EmptyString is the encoding of an empty string.
|
||||||
EmptyString = []byte{0x80}
|
EmptyString = []byte{0x80}
|
||||||
EmptyList = []byte{0xC0}
|
// EmptyList is the encoding of an empty list.
|
||||||
|
EmptyList = []byte{0xC0}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var ErrNegativeBigInt = errors.New("rlp: cannot encode negative big.Int")
|
||||||
|
|
||||||
// Encoder is implemented by types that require custom
|
// Encoder is implemented by types that require custom
|
||||||
// encoding rules or want to encode private fields.
|
// encoding rules or want to encode private fields.
|
||||||
type Encoder interface {
|
type Encoder interface {
|
||||||
@ -51,30 +59,29 @@ type Encoder interface {
|
|||||||
//
|
//
|
||||||
// Please see package-level documentation of encoding rules.
|
// Please see package-level documentation of encoding rules.
|
||||||
func Encode(w io.Writer, val interface{}) error {
|
func Encode(w io.Writer, val interface{}) error {
|
||||||
if outer, ok := w.(*encbuf); ok {
|
// Optimization: reuse *encBuffer when called by EncodeRLP.
|
||||||
// Encode was called by some type's EncodeRLP.
|
if buf := encBufferFromWriter(w); buf != nil {
|
||||||
// Avoid copying by writing to the outer encbuf directly.
|
return buf.encode(val)
|
||||||
return outer.encode(val)
|
|
||||||
}
|
}
|
||||||
eb := encbufPool.Get().(*encbuf)
|
|
||||||
defer encbufPool.Put(eb)
|
buf := getEncBuffer()
|
||||||
eb.reset()
|
defer encBufferPool.Put(buf)
|
||||||
if err := eb.encode(val); err != nil {
|
if err := buf.encode(val); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return eb.toWriter(w)
|
return buf.writeTo(w)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeToBytes returns the RLP encoding of val.
|
// EncodeToBytes returns the RLP encoding of val.
|
||||||
// Please see package-level documentation for the encoding rules.
|
// Please see package-level documentation for the encoding rules.
|
||||||
func EncodeToBytes(val interface{}) ([]byte, error) {
|
func EncodeToBytes(val interface{}) ([]byte, error) {
|
||||||
eb := encbufPool.Get().(*encbuf)
|
buf := getEncBuffer()
|
||||||
defer encbufPool.Put(eb)
|
defer encBufferPool.Put(buf)
|
||||||
eb.reset()
|
|
||||||
if err := eb.encode(val); err != nil {
|
if err := buf.encode(val); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return eb.toBytes(), nil
|
return buf.makeBytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeToReader returns a reader from which the RLP encoding of val
|
// EncodeToReader returns a reader from which the RLP encoding of val
|
||||||
@ -83,12 +90,15 @@ func EncodeToBytes(val interface{}) ([]byte, error) {
|
|||||||
//
|
//
|
||||||
// Please see the documentation of Encode for the encoding rules.
|
// Please see the documentation of Encode for the encoding rules.
|
||||||
func EncodeToReader(val interface{}) (size int, r io.Reader, err error) {
|
func EncodeToReader(val interface{}) (size int, r io.Reader, err error) {
|
||||||
eb := encbufPool.Get().(*encbuf)
|
buf := getEncBuffer()
|
||||||
eb.reset()
|
if err := buf.encode(val); err != nil {
|
||||||
if err := eb.encode(val); err != nil {
|
encBufferPool.Put(buf)
|
||||||
return 0, nil, err
|
return 0, nil, err
|
||||||
}
|
}
|
||||||
return eb.size(), &encReader{buf: eb}, nil
|
// Note: can't put the reader back into the pool here
|
||||||
|
// because it is held by encReader. The reader puts it
|
||||||
|
// back when it has been fully consumed.
|
||||||
|
return buf.size(), &encReader{buf: buf}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type listhead struct {
|
type listhead struct {
|
||||||
@ -123,207 +133,10 @@ func puthead(buf []byte, smalltag, largetag byte, size uint64) int {
|
|||||||
return sizesize + 1
|
return sizesize + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
type encbuf struct {
|
|
||||||
str []byte // string data, contains everything except list headers
|
|
||||||
lheads []listhead // all list headers
|
|
||||||
lhsize int // sum of sizes of all encoded list headers
|
|
||||||
sizebuf [9]byte // auxiliary buffer for uint encoding
|
|
||||||
}
|
|
||||||
|
|
||||||
// encbufs are pooled.
|
|
||||||
var encbufPool = sync.Pool{
|
|
||||||
New: func() interface{} { return new(encbuf) },
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *encbuf) reset() {
|
|
||||||
w.lhsize = 0
|
|
||||||
w.str = w.str[:0]
|
|
||||||
w.lheads = w.lheads[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// encbuf implements io.Writer so it can be passed it into EncodeRLP.
|
|
||||||
func (w *encbuf) Write(b []byte) (int, error) {
|
|
||||||
w.str = append(w.str, b...)
|
|
||||||
return len(b), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *encbuf) encode(val interface{}) error {
|
|
||||||
rval := reflect.ValueOf(val)
|
|
||||||
writer, err := cachedWriter(rval.Type())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return writer(rval, w)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *encbuf) encodeStringHeader(size int) {
|
|
||||||
if size < 56 {
|
|
||||||
w.str = append(w.str, 0x80+byte(size))
|
|
||||||
} else {
|
|
||||||
sizesize := putint(w.sizebuf[1:], uint64(size))
|
|
||||||
w.sizebuf[0] = 0xB7 + byte(sizesize)
|
|
||||||
w.str = append(w.str, w.sizebuf[:sizesize+1]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *encbuf) encodeString(b []byte) {
|
|
||||||
if len(b) == 1 && b[0] <= 0x7F {
|
|
||||||
// fits single byte, no string header
|
|
||||||
w.str = append(w.str, b[0])
|
|
||||||
} else {
|
|
||||||
w.encodeStringHeader(len(b))
|
|
||||||
w.str = append(w.str, b...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *encbuf) encodeUint(i uint64) {
|
|
||||||
if i == 0 {
|
|
||||||
w.str = append(w.str, 0x80)
|
|
||||||
} else if i < 128 {
|
|
||||||
// fits single byte
|
|
||||||
w.str = append(w.str, byte(i))
|
|
||||||
} else {
|
|
||||||
s := putint(w.sizebuf[1:], i)
|
|
||||||
w.sizebuf[0] = 0x80 + byte(s)
|
|
||||||
w.str = append(w.str, w.sizebuf[:s+1]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// list adds a new list header to the header stack. It returns the index
|
|
||||||
// of the header. The caller must call listEnd with this index after encoding
|
|
||||||
// the content of the list.
|
|
||||||
func (w *encbuf) list() int {
|
|
||||||
w.lheads = append(w.lheads, listhead{offset: len(w.str), size: w.lhsize})
|
|
||||||
return len(w.lheads) - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *encbuf) listEnd(index int) {
|
|
||||||
lh := &w.lheads[index]
|
|
||||||
lh.size = w.size() - lh.offset - lh.size
|
|
||||||
if lh.size < 56 {
|
|
||||||
w.lhsize++ // length encoded into kind tag
|
|
||||||
} else {
|
|
||||||
w.lhsize += 1 + intsize(uint64(lh.size))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *encbuf) size() int {
|
|
||||||
return len(w.str) + w.lhsize
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *encbuf) toBytes() []byte {
|
|
||||||
out := make([]byte, w.size())
|
|
||||||
strpos := 0
|
|
||||||
pos := 0
|
|
||||||
for _, head := range w.lheads {
|
|
||||||
// write string data before header
|
|
||||||
n := copy(out[pos:], w.str[strpos:head.offset])
|
|
||||||
pos += n
|
|
||||||
strpos += n
|
|
||||||
// write the header
|
|
||||||
enc := head.encode(out[pos:])
|
|
||||||
pos += len(enc)
|
|
||||||
}
|
|
||||||
// copy string data after the last list header
|
|
||||||
copy(out[pos:], w.str[strpos:])
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *encbuf) toWriter(out io.Writer) (err error) {
|
|
||||||
strpos := 0
|
|
||||||
for _, head := range w.lheads {
|
|
||||||
// write string data before header
|
|
||||||
if head.offset-strpos > 0 {
|
|
||||||
n, err := out.Write(w.str[strpos:head.offset])
|
|
||||||
strpos += n
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// write the header
|
|
||||||
enc := head.encode(w.sizebuf[:])
|
|
||||||
if _, err = out.Write(enc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if strpos < len(w.str) {
|
|
||||||
// write string data after the last list header
|
|
||||||
_, err = out.Write(w.str[strpos:])
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// encReader is the io.Reader returned by EncodeToReader.
|
|
||||||
// It releases its encbuf at EOF.
|
|
||||||
type encReader struct {
|
|
||||||
buf *encbuf // the buffer we're reading from. this is nil when we're at EOF.
|
|
||||||
lhpos int // index of list header that we're reading
|
|
||||||
strpos int // current position in string buffer
|
|
||||||
piece []byte // next piece to be read
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *encReader) Read(b []byte) (n int, err error) {
|
|
||||||
for {
|
|
||||||
if r.piece = r.next(); r.piece == nil {
|
|
||||||
// Put the encode buffer back into the pool at EOF when it
|
|
||||||
// is first encountered. Subsequent calls still return EOF
|
|
||||||
// as the error but the buffer is no longer valid.
|
|
||||||
if r.buf != nil {
|
|
||||||
encbufPool.Put(r.buf)
|
|
||||||
r.buf = nil
|
|
||||||
}
|
|
||||||
return n, io.EOF
|
|
||||||
}
|
|
||||||
nn := copy(b[n:], r.piece)
|
|
||||||
n += nn
|
|
||||||
if nn < len(r.piece) {
|
|
||||||
// piece didn't fit, see you next time.
|
|
||||||
r.piece = r.piece[nn:]
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
r.piece = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// next returns the next piece of data to be read.
|
|
||||||
// it returns nil at EOF.
|
|
||||||
func (r *encReader) next() []byte {
|
|
||||||
switch {
|
|
||||||
case r.buf == nil:
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case r.piece != nil:
|
|
||||||
// There is still data available for reading.
|
|
||||||
return r.piece
|
|
||||||
|
|
||||||
case r.lhpos < len(r.buf.lheads):
|
|
||||||
// We're before the last list header.
|
|
||||||
head := r.buf.lheads[r.lhpos]
|
|
||||||
sizebefore := head.offset - r.strpos
|
|
||||||
if sizebefore > 0 {
|
|
||||||
// String data before header.
|
|
||||||
p := r.buf.str[r.strpos:head.offset]
|
|
||||||
r.strpos += sizebefore
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
r.lhpos++
|
|
||||||
return head.encode(r.buf.sizebuf[:])
|
|
||||||
|
|
||||||
case r.strpos < len(r.buf.str):
|
|
||||||
// String data at the end, after all list headers.
|
|
||||||
p := r.buf.str[r.strpos:]
|
|
||||||
r.strpos = len(r.buf.str)
|
|
||||||
return p
|
|
||||||
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var encoderInterface = reflect.TypeOf(new(Encoder)).Elem()
|
var encoderInterface = reflect.TypeOf(new(Encoder)).Elem()
|
||||||
|
|
||||||
// makeWriter creates a writer function for the given type.
|
// makeWriter creates a writer function for the given type.
|
||||||
func makeWriter(typ reflect.Type, ts tags) (writer, error) {
|
func makeWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) {
|
||||||
kind := typ.Kind()
|
kind := typ.Kind()
|
||||||
switch {
|
switch {
|
||||||
case typ == rawValueType:
|
case typ == rawValueType:
|
||||||
@ -332,6 +145,10 @@ func makeWriter(typ reflect.Type, ts tags) (writer, error) {
|
|||||||
return writeBigIntPtr, nil
|
return writeBigIntPtr, nil
|
||||||
case typ.AssignableTo(bigInt):
|
case typ.AssignableTo(bigInt):
|
||||||
return writeBigIntNoPtr, nil
|
return writeBigIntNoPtr, nil
|
||||||
|
case typ == reflect.PtrTo(u256Int):
|
||||||
|
return writeU256IntPtr, nil
|
||||||
|
case typ == u256Int:
|
||||||
|
return writeU256IntNoPtr, nil
|
||||||
case kind == reflect.Ptr:
|
case kind == reflect.Ptr:
|
||||||
return makePtrWriter(typ, ts)
|
return makePtrWriter(typ, ts)
|
||||||
case reflect.PtrTo(typ).Implements(encoderInterface):
|
case reflect.PtrTo(typ).Implements(encoderInterface):
|
||||||
@ -357,71 +174,61 @@ func makeWriter(typ reflect.Type, ts tags) (writer, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeRawValue(val reflect.Value, w *encbuf) error {
|
func writeRawValue(val reflect.Value, w *encBuffer) error {
|
||||||
w.str = append(w.str, val.Bytes()...)
|
w.str = append(w.str, val.Bytes()...)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeUint(val reflect.Value, w *encbuf) error {
|
func writeUint(val reflect.Value, w *encBuffer) error {
|
||||||
w.encodeUint(val.Uint())
|
w.writeUint64(val.Uint())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeBool(val reflect.Value, w *encbuf) error {
|
func writeBool(val reflect.Value, w *encBuffer) error {
|
||||||
if val.Bool() {
|
w.writeBool(val.Bool())
|
||||||
w.str = append(w.str, 0x01)
|
|
||||||
} else {
|
|
||||||
w.str = append(w.str, 0x80)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeBigIntPtr(val reflect.Value, w *encbuf) error {
|
func writeBigIntPtr(val reflect.Value, w *encBuffer) error {
|
||||||
ptr := val.Interface().(*big.Int)
|
ptr := val.Interface().(*big.Int)
|
||||||
if ptr == nil {
|
if ptr == nil {
|
||||||
w.str = append(w.str, 0x80)
|
w.str = append(w.str, 0x80)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return writeBigInt(ptr, w)
|
if ptr.Sign() == -1 {
|
||||||
}
|
return ErrNegativeBigInt
|
||||||
|
|
||||||
func writeBigIntNoPtr(val reflect.Value, w *encbuf) error {
|
|
||||||
i := val.Interface().(big.Int)
|
|
||||||
return writeBigInt(&i, w)
|
|
||||||
}
|
|
||||||
|
|
||||||
// wordBytes is the number of bytes in a big.Word
|
|
||||||
const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8
|
|
||||||
|
|
||||||
func writeBigInt(i *big.Int, w *encbuf) error {
|
|
||||||
if i.Sign() == -1 {
|
|
||||||
return fmt.Errorf("rlp: cannot encode negative *big.Int")
|
|
||||||
}
|
|
||||||
bitlen := i.BitLen()
|
|
||||||
if bitlen <= 64 {
|
|
||||||
w.encodeUint(i.Uint64())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Integer is larger than 64 bits, encode from i.Bits().
|
|
||||||
// The minimal byte length is bitlen rounded up to the next
|
|
||||||
// multiple of 8, divided by 8.
|
|
||||||
length := ((bitlen + 7) & -8) >> 3
|
|
||||||
w.encodeStringHeader(length)
|
|
||||||
w.str = append(w.str, make([]byte, length)...)
|
|
||||||
index := length
|
|
||||||
buf := w.str[len(w.str)-length:]
|
|
||||||
for _, d := range i.Bits() {
|
|
||||||
for j := 0; j < wordBytes && index > 0; j++ {
|
|
||||||
index--
|
|
||||||
buf[index] = byte(d)
|
|
||||||
d >>= 8
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
w.writeBigInt(ptr)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeBytes(val reflect.Value, w *encbuf) error {
|
func writeBigIntNoPtr(val reflect.Value, w *encBuffer) error {
|
||||||
w.encodeString(val.Bytes())
|
i := val.Interface().(big.Int)
|
||||||
|
if i.Sign() == -1 {
|
||||||
|
return ErrNegativeBigInt
|
||||||
|
}
|
||||||
|
w.writeBigInt(&i)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeU256IntPtr(val reflect.Value, w *encBuffer) error {
|
||||||
|
ptr := val.Interface().(*uint256.Int)
|
||||||
|
if ptr == nil {
|
||||||
|
w.str = append(w.str, 0x80)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.writeUint256(ptr)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeU256IntNoPtr(val reflect.Value, w *encBuffer) error {
|
||||||
|
i := val.Interface().(uint256.Int)
|
||||||
|
w.writeUint256(&i)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeBytes(val reflect.Value, w *encBuffer) error {
|
||||||
|
w.writeBytes(val.Bytes())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -432,16 +239,29 @@ func makeByteArrayWriter(typ reflect.Type) writer {
|
|||||||
case 1:
|
case 1:
|
||||||
return writeLengthOneByteArray
|
return writeLengthOneByteArray
|
||||||
default:
|
default:
|
||||||
return writeByteArray
|
length := typ.Len()
|
||||||
|
return func(val reflect.Value, w *encBuffer) error {
|
||||||
|
if !val.CanAddr() {
|
||||||
|
// Getting the byte slice of val requires it to be addressable. Make it
|
||||||
|
// addressable by copying.
|
||||||
|
copy := reflect.New(val.Type()).Elem()
|
||||||
|
copy.Set(val)
|
||||||
|
val = copy
|
||||||
|
}
|
||||||
|
slice := byteArrayBytes(val, length)
|
||||||
|
w.encodeStringHeader(len(slice))
|
||||||
|
w.str = append(w.str, slice...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeLengthZeroByteArray(val reflect.Value, w *encbuf) error {
|
func writeLengthZeroByteArray(val reflect.Value, w *encBuffer) error {
|
||||||
w.str = append(w.str, 0x80)
|
w.str = append(w.str, 0x80)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeLengthOneByteArray(val reflect.Value, w *encbuf) error {
|
func writeLengthOneByteArray(val reflect.Value, w *encBuffer) error {
|
||||||
b := byte(val.Index(0).Uint())
|
b := byte(val.Index(0).Uint())
|
||||||
if b <= 0x7f {
|
if b <= 0x7f {
|
||||||
w.str = append(w.str, b)
|
w.str = append(w.str, b)
|
||||||
@ -451,22 +271,7 @@ func writeLengthOneByteArray(val reflect.Value, w *encbuf) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeByteArray(val reflect.Value, w *encbuf) error {
|
func writeString(val reflect.Value, w *encBuffer) error {
|
||||||
if !val.CanAddr() {
|
|
||||||
// Getting the byte slice of val requires it to be addressable. Make it
|
|
||||||
// addressable by copying.
|
|
||||||
copy := reflect.New(val.Type()).Elem()
|
|
||||||
copy.Set(val)
|
|
||||||
val = copy
|
|
||||||
}
|
|
||||||
|
|
||||||
slice := byteArrayBytes(val)
|
|
||||||
w.encodeStringHeader(len(slice))
|
|
||||||
w.str = append(w.str, slice...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeString(val reflect.Value, w *encbuf) error {
|
|
||||||
s := val.String()
|
s := val.String()
|
||||||
if len(s) == 1 && s[0] <= 0x7f {
|
if len(s) == 1 && s[0] <= 0x7f {
|
||||||
// fits single byte, no string header
|
// fits single byte, no string header
|
||||||
@ -478,7 +283,7 @@ func writeString(val reflect.Value, w *encbuf) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeInterface(val reflect.Value, w *encbuf) error {
|
func writeInterface(val reflect.Value, w *encBuffer) error {
|
||||||
if val.IsNil() {
|
if val.IsNil() {
|
||||||
// Write empty list. This is consistent with the previous RLP
|
// Write empty list. This is consistent with the previous RLP
|
||||||
// encoder that we had and should therefore avoid any
|
// encoder that we had and should therefore avoid any
|
||||||
@ -494,24 +299,44 @@ func writeInterface(val reflect.Value, w *encbuf) error {
|
|||||||
return writer(eval, w)
|
return writer(eval, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeSliceWriter(typ reflect.Type, ts tags) (writer, error) {
|
func makeSliceWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) {
|
||||||
etypeinfo := theTC.infoWhileGenerating(typ.Elem(), tags{})
|
etypeinfo := theTC.infoWhileGenerating(typ.Elem(), rlpstruct.Tags{})
|
||||||
if etypeinfo.writerErr != nil {
|
if etypeinfo.writerErr != nil {
|
||||||
return nil, etypeinfo.writerErr
|
return nil, etypeinfo.writerErr
|
||||||
}
|
}
|
||||||
writer := func(val reflect.Value, w *encbuf) error {
|
|
||||||
if !ts.tail {
|
var wfn writer
|
||||||
defer w.listEnd(w.list())
|
if ts.Tail {
|
||||||
}
|
// This is for struct tail slices.
|
||||||
vlen := val.Len()
|
// w.list is not called for them.
|
||||||
for i := 0; i < vlen; i++ {
|
wfn = func(val reflect.Value, w *encBuffer) error {
|
||||||
if err := etypeinfo.writer(val.Index(i), w); err != nil {
|
vlen := val.Len()
|
||||||
return err
|
for i := 0; i < vlen; i++ {
|
||||||
|
if err := etypeinfo.writer(val.Index(i), w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// This is for regular slices and arrays.
|
||||||
|
wfn = func(val reflect.Value, w *encBuffer) error {
|
||||||
|
vlen := val.Len()
|
||||||
|
if vlen == 0 {
|
||||||
|
w.str = append(w.str, 0xC0)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
listOffset := w.list()
|
||||||
|
for i := 0; i < vlen; i++ {
|
||||||
|
if err := etypeinfo.writer(val.Index(i), w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.listEnd(listOffset)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
return writer, nil
|
return wfn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeStructWriter(typ reflect.Type) (writer, error) {
|
func makeStructWriter(typ reflect.Type) (writer, error) {
|
||||||
@ -529,7 +354,7 @@ func makeStructWriter(typ reflect.Type) (writer, error) {
|
|||||||
firstOptionalField := firstOptionalField(fields)
|
firstOptionalField := firstOptionalField(fields)
|
||||||
if firstOptionalField == len(fields) {
|
if firstOptionalField == len(fields) {
|
||||||
// This is the writer function for structs without any optional fields.
|
// This is the writer function for structs without any optional fields.
|
||||||
writer = func(val reflect.Value, w *encbuf) error {
|
writer = func(val reflect.Value, w *encBuffer) error {
|
||||||
lh := w.list()
|
lh := w.list()
|
||||||
for _, f := range fields {
|
for _, f := range fields {
|
||||||
if err := f.info.writer(val.Field(f.index), w); err != nil {
|
if err := f.info.writer(val.Field(f.index), w); err != nil {
|
||||||
@ -542,7 +367,7 @@ func makeStructWriter(typ reflect.Type) (writer, error) {
|
|||||||
} else {
|
} else {
|
||||||
// If there are any "optional" fields, the writer needs to perform additional
|
// If there are any "optional" fields, the writer needs to perform additional
|
||||||
// checks to determine the output list length.
|
// checks to determine the output list length.
|
||||||
writer = func(val reflect.Value, w *encbuf) error {
|
writer = func(val reflect.Value, w *encBuffer) error {
|
||||||
lastField := len(fields) - 1
|
lastField := len(fields) - 1
|
||||||
for ; lastField >= firstOptionalField; lastField-- {
|
for ; lastField >= firstOptionalField; lastField-- {
|
||||||
if !val.Field(fields[lastField].index).IsZero() {
|
if !val.Field(fields[lastField].index).IsZero() {
|
||||||
@ -562,45 +387,39 @@ func makeStructWriter(typ reflect.Type) (writer, error) {
|
|||||||
return writer, nil
|
return writer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makePtrWriter(typ reflect.Type, ts tags) (writer, error) {
|
func makePtrWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) {
|
||||||
etypeinfo := theTC.infoWhileGenerating(typ.Elem(), tags{})
|
nilEncoding := byte(0xC0)
|
||||||
|
if typeNilKind(typ.Elem(), ts) == String {
|
||||||
|
nilEncoding = 0x80
|
||||||
|
}
|
||||||
|
|
||||||
|
etypeinfo := theTC.infoWhileGenerating(typ.Elem(), rlpstruct.Tags{})
|
||||||
if etypeinfo.writerErr != nil {
|
if etypeinfo.writerErr != nil {
|
||||||
return nil, etypeinfo.writerErr
|
return nil, etypeinfo.writerErr
|
||||||
}
|
}
|
||||||
// Determine how to encode nil pointers.
|
|
||||||
var nilKind Kind
|
|
||||||
if ts.nilOK {
|
|
||||||
nilKind = ts.nilKind // use struct tag if provided
|
|
||||||
} else {
|
|
||||||
nilKind = defaultNilKind(typ.Elem())
|
|
||||||
}
|
|
||||||
|
|
||||||
writer := func(val reflect.Value, w *encbuf) error {
|
writer := func(val reflect.Value, w *encBuffer) error {
|
||||||
if val.IsNil() {
|
if ev := val.Elem(); ev.IsValid() {
|
||||||
if nilKind == String {
|
return etypeinfo.writer(ev, w)
|
||||||
w.str = append(w.str, 0x80)
|
|
||||||
} else {
|
|
||||||
w.listEnd(w.list())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
return etypeinfo.writer(val.Elem(), w)
|
w.str = append(w.str, nilEncoding)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
return writer, nil
|
return writer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeEncoderWriter(typ reflect.Type) writer {
|
func makeEncoderWriter(typ reflect.Type) writer {
|
||||||
if typ.Implements(encoderInterface) {
|
if typ.Implements(encoderInterface) {
|
||||||
return func(val reflect.Value, w *encbuf) error {
|
return func(val reflect.Value, w *encBuffer) error {
|
||||||
return val.Interface().(Encoder).EncodeRLP(w)
|
return val.Interface().(Encoder).EncodeRLP(w)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
w := func(val reflect.Value, w *encbuf) error {
|
w := func(val reflect.Value, w *encBuffer) error {
|
||||||
if !val.CanAddr() {
|
if !val.CanAddr() {
|
||||||
// package json simply doesn't call MarshalJSON for this case, but encodes the
|
// package json simply doesn't call MarshalJSON for this case, but encodes the
|
||||||
// value as if it didn't implement the interface. We don't want to handle it that
|
// value as if it didn't implement the interface. We don't want to handle it that
|
||||||
// way.
|
// way.
|
||||||
return fmt.Errorf("rlp: unadressable value of type %v, EncodeRLP is pointer method", val.Type())
|
return fmt.Errorf("rlp: unaddressable value of type %v, EncodeRLP is pointer method", val.Type())
|
||||||
}
|
}
|
||||||
return val.Addr().Interface().(Encoder).EncodeRLP(w)
|
return val.Addr().Interface().(Encoder).EncodeRLP(w)
|
||||||
}
|
}
|
||||||
|
@ -1,540 +0,0 @@
|
|||||||
// Copyright 2014 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package rlp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/big"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
type testEncoder struct {
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *testEncoder) EncodeRLP(w io.Writer) error {
|
|
||||||
if e == nil {
|
|
||||||
panic("EncodeRLP called on nil value")
|
|
||||||
}
|
|
||||||
if e.err != nil {
|
|
||||||
return e.err
|
|
||||||
}
|
|
||||||
w.Write([]byte{0, 1, 0, 1, 0, 1, 0, 1, 0, 1})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type testEncoderValueMethod struct{}
|
|
||||||
|
|
||||||
func (e testEncoderValueMethod) EncodeRLP(w io.Writer) error {
|
|
||||||
w.Write([]byte{0xFA, 0xFE, 0xF0})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type byteEncoder byte
|
|
||||||
|
|
||||||
func (e byteEncoder) EncodeRLP(w io.Writer) error {
|
|
||||||
w.Write(EmptyList)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type undecodableEncoder func()
|
|
||||||
|
|
||||||
func (f undecodableEncoder) EncodeRLP(w io.Writer) error {
|
|
||||||
w.Write([]byte{0xF5, 0xF5, 0xF5})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type encodableReader struct {
|
|
||||||
A, B uint
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encodableReader) Read(b []byte) (int, error) {
|
|
||||||
panic("called")
|
|
||||||
}
|
|
||||||
|
|
||||||
type namedByteType byte
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ = Encoder(&testEncoder{})
|
|
||||||
_ = Encoder(byteEncoder(0))
|
|
||||||
|
|
||||||
reader io.Reader = &encodableReader{1, 2}
|
|
||||||
)
|
|
||||||
|
|
||||||
type encTest struct {
|
|
||||||
val interface{}
|
|
||||||
output, error string
|
|
||||||
}
|
|
||||||
|
|
||||||
var encTests = []encTest{
|
|
||||||
// booleans
|
|
||||||
{val: true, output: "01"},
|
|
||||||
{val: false, output: "80"},
|
|
||||||
|
|
||||||
// integers
|
|
||||||
{val: uint32(0), output: "80"},
|
|
||||||
{val: uint32(127), output: "7F"},
|
|
||||||
{val: uint32(128), output: "8180"},
|
|
||||||
{val: uint32(256), output: "820100"},
|
|
||||||
{val: uint32(1024), output: "820400"},
|
|
||||||
{val: uint32(0xFFFFFF), output: "83FFFFFF"},
|
|
||||||
{val: uint32(0xFFFFFFFF), output: "84FFFFFFFF"},
|
|
||||||
{val: uint64(0xFFFFFFFF), output: "84FFFFFFFF"},
|
|
||||||
{val: uint64(0xFFFFFFFFFF), output: "85FFFFFFFFFF"},
|
|
||||||
{val: uint64(0xFFFFFFFFFFFF), output: "86FFFFFFFFFFFF"},
|
|
||||||
{val: uint64(0xFFFFFFFFFFFFFF), output: "87FFFFFFFFFFFFFF"},
|
|
||||||
{val: uint64(0xFFFFFFFFFFFFFFFF), output: "88FFFFFFFFFFFFFFFF"},
|
|
||||||
|
|
||||||
// big integers (should match uint for small values)
|
|
||||||
{val: big.NewInt(0), output: "80"},
|
|
||||||
{val: big.NewInt(1), output: "01"},
|
|
||||||
{val: big.NewInt(127), output: "7F"},
|
|
||||||
{val: big.NewInt(128), output: "8180"},
|
|
||||||
{val: big.NewInt(256), output: "820100"},
|
|
||||||
{val: big.NewInt(1024), output: "820400"},
|
|
||||||
{val: big.NewInt(0xFFFFFF), output: "83FFFFFF"},
|
|
||||||
{val: big.NewInt(0xFFFFFFFF), output: "84FFFFFFFF"},
|
|
||||||
{val: big.NewInt(0xFFFFFFFFFF), output: "85FFFFFFFFFF"},
|
|
||||||
{val: big.NewInt(0xFFFFFFFFFFFF), output: "86FFFFFFFFFFFF"},
|
|
||||||
{val: big.NewInt(0xFFFFFFFFFFFFFF), output: "87FFFFFFFFFFFFFF"},
|
|
||||||
{
|
|
||||||
val: big.NewInt(0).SetBytes(unhex("102030405060708090A0B0C0D0E0F2")),
|
|
||||||
output: "8F102030405060708090A0B0C0D0E0F2",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: big.NewInt(0).SetBytes(unhex("0100020003000400050006000700080009000A000B000C000D000E01")),
|
|
||||||
output: "9C0100020003000400050006000700080009000A000B000C000D000E01",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: big.NewInt(0).SetBytes(unhex("010000000000000000000000000000000000000000000000000000000000000000")),
|
|
||||||
output: "A1010000000000000000000000000000000000000000000000000000000000000000",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: veryBigInt,
|
|
||||||
output: "89FFFFFFFFFFFFFFFFFF",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: veryVeryBigInt,
|
|
||||||
output: "B848FFFFFFFFFFFFFFFFF800000000000000001BFFFFFFFFFFFFFFFFC8000000000000000045FFFFFFFFFFFFFFFFC800000000000000001BFFFFFFFFFFFFFFFFF8000000000000000001",
|
|
||||||
},
|
|
||||||
|
|
||||||
// non-pointer big.Int
|
|
||||||
{val: *big.NewInt(0), output: "80"},
|
|
||||||
{val: *big.NewInt(0xFFFFFF), output: "83FFFFFF"},
|
|
||||||
|
|
||||||
// negative ints are not supported
|
|
||||||
{val: big.NewInt(-1), error: "rlp: cannot encode negative *big.Int"},
|
|
||||||
|
|
||||||
// byte arrays
|
|
||||||
{val: [0]byte{}, output: "80"},
|
|
||||||
{val: [1]byte{0}, output: "00"},
|
|
||||||
{val: [1]byte{1}, output: "01"},
|
|
||||||
{val: [1]byte{0x7F}, output: "7F"},
|
|
||||||
{val: [1]byte{0x80}, output: "8180"},
|
|
||||||
{val: [1]byte{0xFF}, output: "81FF"},
|
|
||||||
{val: [3]byte{1, 2, 3}, output: "83010203"},
|
|
||||||
{val: [57]byte{1, 2, 3}, output: "B839010203000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
|
|
||||||
|
|
||||||
// named byte type arrays
|
|
||||||
{val: [0]namedByteType{}, output: "80"},
|
|
||||||
{val: [1]namedByteType{0}, output: "00"},
|
|
||||||
{val: [1]namedByteType{1}, output: "01"},
|
|
||||||
{val: [1]namedByteType{0x7F}, output: "7F"},
|
|
||||||
{val: [1]namedByteType{0x80}, output: "8180"},
|
|
||||||
{val: [1]namedByteType{0xFF}, output: "81FF"},
|
|
||||||
{val: [3]namedByteType{1, 2, 3}, output: "83010203"},
|
|
||||||
{val: [57]namedByteType{1, 2, 3}, output: "B839010203000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
|
|
||||||
|
|
||||||
// byte slices
|
|
||||||
{val: []byte{}, output: "80"},
|
|
||||||
{val: []byte{0}, output: "00"},
|
|
||||||
{val: []byte{0x7E}, output: "7E"},
|
|
||||||
{val: []byte{0x7F}, output: "7F"},
|
|
||||||
{val: []byte{0x80}, output: "8180"},
|
|
||||||
{val: []byte{1, 2, 3}, output: "83010203"},
|
|
||||||
|
|
||||||
// named byte type slices
|
|
||||||
{val: []namedByteType{}, output: "80"},
|
|
||||||
{val: []namedByteType{0}, output: "00"},
|
|
||||||
{val: []namedByteType{0x7E}, output: "7E"},
|
|
||||||
{val: []namedByteType{0x7F}, output: "7F"},
|
|
||||||
{val: []namedByteType{0x80}, output: "8180"},
|
|
||||||
{val: []namedByteType{1, 2, 3}, output: "83010203"},
|
|
||||||
|
|
||||||
// strings
|
|
||||||
{val: "", output: "80"},
|
|
||||||
{val: "\x7E", output: "7E"},
|
|
||||||
{val: "\x7F", output: "7F"},
|
|
||||||
{val: "\x80", output: "8180"},
|
|
||||||
{val: "dog", output: "83646F67"},
|
|
||||||
{
|
|
||||||
val: "Lorem ipsum dolor sit amet, consectetur adipisicing eli",
|
|
||||||
output: "B74C6F72656D20697073756D20646F6C6F722073697420616D65742C20636F6E7365637465747572206164697069736963696E6720656C69",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: "Lorem ipsum dolor sit amet, consectetur adipisicing elit",
|
|
||||||
output: "B8384C6F72656D20697073756D20646F6C6F722073697420616D65742C20636F6E7365637465747572206164697069736963696E6720656C6974",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur mauris magna, suscipit sed vehicula non, iaculis faucibus tortor. Proin suscipit ultricies malesuada. Duis tortor elit, dictum quis tristique eu, ultrices at risus. Morbi a est imperdiet mi ullamcorper aliquet suscipit nec lorem. Aenean quis leo mollis, vulputate elit varius, consequat enim. Nulla ultrices turpis justo, et posuere urna consectetur nec. Proin non convallis metus. Donec tempor ipsum in mauris congue sollicitudin. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Suspendisse convallis sem vel massa faucibus, eget lacinia lacus tempor. Nulla quis ultricies purus. Proin auctor rhoncus nibh condimentum mollis. Aliquam consequat enim at metus luctus, a eleifend purus egestas. Curabitur at nibh metus. Nam bibendum, neque at auctor tristique, lorem libero aliquet arcu, non interdum tellus lectus sit amet eros. Cras rhoncus, metus ac ornare cursus, dolor justo ultrices metus, at ullamcorper volutpat",
|
|
||||||
output: "B904004C6F72656D20697073756D20646F6C6F722073697420616D65742C20636F6E73656374657475722061646970697363696E6720656C69742E20437572616269747572206D6175726973206D61676E612C20737573636970697420736564207665686963756C61206E6F6E2C20696163756C697320666175636962757320746F72746F722E2050726F696E20737573636970697420756C74726963696573206D616C6573756164612E204475697320746F72746F7220656C69742C2064696374756D2071756973207472697374697175652065752C20756C7472696365732061742072697375732E204D6F72626920612065737420696D70657264696574206D6920756C6C616D636F7270657220616C6971756574207375736369706974206E6563206C6F72656D2E2041656E65616E2071756973206C656F206D6F6C6C69732C2076756C70757461746520656C6974207661726975732C20636F6E73657175617420656E696D2E204E756C6C6120756C74726963657320747572706973206A7573746F2C20657420706F73756572652075726E6120636F6E7365637465747572206E65632E2050726F696E206E6F6E20636F6E76616C6C6973206D657475732E20446F6E65632074656D706F7220697073756D20696E206D617572697320636F6E67756520736F6C6C696369747564696E2E20566573746962756C756D20616E746520697073756D207072696D697320696E206661756369627573206F726369206C756374757320657420756C74726963657320706F737565726520637562696C69612043757261653B2053757370656E646973736520636F6E76616C6C69732073656D2076656C206D617373612066617563696275732C2065676574206C6163696E6961206C616375732074656D706F722E204E756C6C61207175697320756C747269636965732070757275732E2050726F696E20617563746F722072686F6E637573206E69626820636F6E64696D656E74756D206D6F6C6C69732E20416C697175616D20636F6E73657175617420656E696D206174206D65747573206C75637475732C206120656C656966656E6420707572757320656765737461732E20437572616269747572206174206E696268206D657475732E204E616D20626962656E64756D2C206E6571756520617420617563746F72207472697374697175652C206C6F72656D206C696265726F20616C697175657420617263752C206E6F6E20696E74657264756D2074656C6C7573206C65637475732073697420616D65742065726F732E20437261732072686F6E6375732C206D65747573206163206F726E617265206375727375732C20646F6C6F72206A7573746F20756C747269636573206D657475732C20617420756C6C616D636F7270657220766F6C7574706174",
|
|
||||||
},
|
|
||||||
|
|
||||||
// slices
|
|
||||||
{val: []uint{}, output: "C0"},
|
|
||||||
{val: []uint{1, 2, 3}, output: "C3010203"},
|
|
||||||
{
|
|
||||||
// [ [], [[]], [ [], [[]] ] ]
|
|
||||||
val: []interface{}{[]interface{}{}, [][]interface{}{{}}, []interface{}{[]interface{}{}, [][]interface{}{{}}}},
|
|
||||||
output: "C7C0C1C0C3C0C1C0",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: []string{"aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo"},
|
|
||||||
output: "F83C836161618362626283636363836464648365656583666666836767678368686883696969836A6A6A836B6B6B836C6C6C836D6D6D836E6E6E836F6F6F",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: []interface{}{uint(1), uint(0xFFFFFF), []interface{}{[]uint{4, 5, 5}}, "abc"},
|
|
||||||
output: "CE0183FFFFFFC4C304050583616263",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: [][]string{
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
{"asdf", "qwer", "zxcv"},
|
|
||||||
},
|
|
||||||
output: "F90200CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376CF84617364668471776572847A786376",
|
|
||||||
},
|
|
||||||
|
|
||||||
// RawValue
|
|
||||||
{val: RawValue(unhex("01")), output: "01"},
|
|
||||||
{val: RawValue(unhex("82FFFF")), output: "82FFFF"},
|
|
||||||
{val: []RawValue{unhex("01"), unhex("02")}, output: "C20102"},
|
|
||||||
|
|
||||||
// structs
|
|
||||||
{val: simplestruct{}, output: "C28080"},
|
|
||||||
{val: simplestruct{A: 3, B: "foo"}, output: "C50383666F6F"},
|
|
||||||
{val: &recstruct{5, nil}, output: "C205C0"},
|
|
||||||
{val: &recstruct{5, &recstruct{4, &recstruct{3, nil}}}, output: "C605C404C203C0"},
|
|
||||||
{val: &intField{X: 3}, error: "rlp: type int is not RLP-serializable (struct field rlp.intField.X)"},
|
|
||||||
|
|
||||||
// struct tag "-"
|
|
||||||
{val: &ignoredField{A: 1, B: 2, C: 3}, output: "C20103"},
|
|
||||||
|
|
||||||
// struct tag "tail"
|
|
||||||
{val: &tailRaw{A: 1, Tail: []RawValue{unhex("02"), unhex("03")}}, output: "C3010203"},
|
|
||||||
{val: &tailRaw{A: 1, Tail: []RawValue{unhex("02")}}, output: "C20102"},
|
|
||||||
{val: &tailRaw{A: 1, Tail: []RawValue{}}, output: "C101"},
|
|
||||||
{val: &tailRaw{A: 1, Tail: nil}, output: "C101"},
|
|
||||||
|
|
||||||
// struct tag "optional"
|
|
||||||
{val: &optionalFields{}, output: "C180"},
|
|
||||||
{val: &optionalFields{A: 1}, output: "C101"},
|
|
||||||
{val: &optionalFields{A: 1, B: 2}, output: "C20102"},
|
|
||||||
{val: &optionalFields{A: 1, B: 2, C: 3}, output: "C3010203"},
|
|
||||||
{val: &optionalFields{A: 1, B: 0, C: 3}, output: "C3018003"},
|
|
||||||
{val: &optionalAndTailField{A: 1}, output: "C101"},
|
|
||||||
{val: &optionalAndTailField{A: 1, B: 2}, output: "C20102"},
|
|
||||||
{val: &optionalAndTailField{A: 1, Tail: []uint{5, 6}}, output: "C401800506"},
|
|
||||||
{val: &optionalAndTailField{A: 1, Tail: []uint{5, 6}}, output: "C401800506"},
|
|
||||||
{val: &optionalBigIntField{A: 1}, output: "C101"},
|
|
||||||
{val: &optionalPtrField{A: 1}, output: "C101"},
|
|
||||||
{val: &optionalPtrFieldNil{A: 1}, output: "C101"},
|
|
||||||
|
|
||||||
// nil
|
|
||||||
{val: (*uint)(nil), output: "80"},
|
|
||||||
{val: (*string)(nil), output: "80"},
|
|
||||||
{val: (*[]byte)(nil), output: "80"},
|
|
||||||
{val: (*[10]byte)(nil), output: "80"},
|
|
||||||
{val: (*big.Int)(nil), output: "80"},
|
|
||||||
{val: (*[]string)(nil), output: "C0"},
|
|
||||||
{val: (*[10]string)(nil), output: "C0"},
|
|
||||||
{val: (*[]interface{})(nil), output: "C0"},
|
|
||||||
{val: (*[]struct{ uint })(nil), output: "C0"},
|
|
||||||
{val: (*interface{})(nil), output: "C0"},
|
|
||||||
|
|
||||||
// nil struct fields
|
|
||||||
{
|
|
||||||
val: struct {
|
|
||||||
X *[]byte
|
|
||||||
}{},
|
|
||||||
output: "C180",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: struct {
|
|
||||||
X *[2]byte
|
|
||||||
}{},
|
|
||||||
output: "C180",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: struct {
|
|
||||||
X *uint64
|
|
||||||
}{},
|
|
||||||
output: "C180",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: struct {
|
|
||||||
X *uint64 `rlp:"nilList"`
|
|
||||||
}{},
|
|
||||||
output: "C1C0",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: struct {
|
|
||||||
X *[]uint64
|
|
||||||
}{},
|
|
||||||
output: "C1C0",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
val: struct {
|
|
||||||
X *[]uint64 `rlp:"nilString"`
|
|
||||||
}{},
|
|
||||||
output: "C180",
|
|
||||||
},
|
|
||||||
|
|
||||||
// interfaces
|
|
||||||
{val: []io.Reader{reader}, output: "C3C20102"}, // the contained value is a struct
|
|
||||||
|
|
||||||
// Encoder
|
|
||||||
{val: (*testEncoder)(nil), output: "C0"},
|
|
||||||
{val: &testEncoder{}, output: "00010001000100010001"},
|
|
||||||
{val: &testEncoder{errors.New("test error")}, error: "test error"},
|
|
||||||
{val: struct{ E testEncoderValueMethod }{}, output: "C3FAFEF0"},
|
|
||||||
{val: struct{ E *testEncoderValueMethod }{}, output: "C1C0"},
|
|
||||||
|
|
||||||
// Verify that the Encoder interface works for unsupported types like func().
|
|
||||||
{val: undecodableEncoder(func() {}), output: "F5F5F5"},
|
|
||||||
|
|
||||||
// Verify that pointer method testEncoder.EncodeRLP is called for
|
|
||||||
// addressable non-pointer values.
|
|
||||||
{val: &struct{ TE testEncoder }{testEncoder{}}, output: "CA00010001000100010001"},
|
|
||||||
{val: &struct{ TE testEncoder }{testEncoder{errors.New("test error")}}, error: "test error"},
|
|
||||||
|
|
||||||
// Verify the error for non-addressable non-pointer Encoder.
|
|
||||||
{val: testEncoder{}, error: "rlp: unadressable value of type rlp.testEncoder, EncodeRLP is pointer method"},
|
|
||||||
|
|
||||||
// Verify Encoder takes precedence over []byte.
|
|
||||||
{val: []byteEncoder{0, 1, 2, 3, 4}, output: "C5C0C0C0C0C0"},
|
|
||||||
}
|
|
||||||
|
|
||||||
func runEncTests(t *testing.T, f func(val interface{}) ([]byte, error)) {
|
|
||||||
for i, test := range encTests {
|
|
||||||
output, err := f(test.val)
|
|
||||||
if err != nil && test.error == "" {
|
|
||||||
t.Errorf("test %d: unexpected error: %v\nvalue %#v\ntype %T",
|
|
||||||
i, err, test.val, test.val)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if test.error != "" && fmt.Sprint(err) != test.error {
|
|
||||||
t.Errorf("test %d: error mismatch\ngot %v\nwant %v\nvalue %#v\ntype %T",
|
|
||||||
i, err, test.error, test.val, test.val)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err == nil && !bytes.Equal(output, unhex(test.output)) {
|
|
||||||
t.Errorf("test %d: output mismatch:\ngot %X\nwant %s\nvalue %#v\ntype %T",
|
|
||||||
i, output, test.output, test.val, test.val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEncode(t *testing.T) {
|
|
||||||
runEncTests(t, func(val interface{}) ([]byte, error) {
|
|
||||||
b := new(bytes.Buffer)
|
|
||||||
err := Encode(b, val)
|
|
||||||
return b.Bytes(), err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEncodeToBytes(t *testing.T) {
|
|
||||||
runEncTests(t, EncodeToBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEncodeToReader(t *testing.T) {
|
|
||||||
runEncTests(t, func(val interface{}) ([]byte, error) {
|
|
||||||
_, r, err := EncodeToReader(val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ioutil.ReadAll(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEncodeToReaderPiecewise(t *testing.T) {
|
|
||||||
runEncTests(t, func(val interface{}) ([]byte, error) {
|
|
||||||
size, r, err := EncodeToReader(val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// read output piecewise
|
|
||||||
output := make([]byte, size)
|
|
||||||
for start, end := 0, 0; start < size; start = end {
|
|
||||||
if remaining := size - start; remaining < 3 {
|
|
||||||
end += remaining
|
|
||||||
} else {
|
|
||||||
end = start + 3
|
|
||||||
}
|
|
||||||
n, err := r.Read(output[start:end])
|
|
||||||
end = start + n
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return output, nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a regression test verifying that encReader
|
|
||||||
// returns its encbuf to the pool only once.
|
|
||||||
func TestEncodeToReaderReturnToPool(t *testing.T) {
|
|
||||||
buf := make([]byte, 50)
|
|
||||||
wg := new(sync.WaitGroup)
|
|
||||||
for i := 0; i < 5; i++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
_, r, _ := EncodeToReader("foo")
|
|
||||||
ioutil.ReadAll(r)
|
|
||||||
r.Read(buf)
|
|
||||||
r.Read(buf)
|
|
||||||
r.Read(buf)
|
|
||||||
r.Read(buf)
|
|
||||||
}
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
var sink interface{}
|
|
||||||
|
|
||||||
func BenchmarkIntsize(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
sink = intsize(0x12345678)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkPutint(b *testing.B) {
|
|
||||||
buf := make([]byte, 8)
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
putint(buf, 0x12345678)
|
|
||||||
sink = buf
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkEncodeBigInts(b *testing.B) {
|
|
||||||
ints := make([]*big.Int, 200)
|
|
||||||
for i := range ints {
|
|
||||||
ints[i] = new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(i)), nil)
|
|
||||||
}
|
|
||||||
out := bytes.NewBuffer(make([]byte, 0, 4096))
|
|
||||||
b.ResetTimer()
|
|
||||||
b.ReportAllocs()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
out.Reset()
|
|
||||||
if err := Encode(out, ints); err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkEncodeConcurrentInterface(b *testing.B) {
|
|
||||||
type struct1 struct {
|
|
||||||
A string
|
|
||||||
B *big.Int
|
|
||||||
C [20]byte
|
|
||||||
}
|
|
||||||
value := []interface{}{
|
|
||||||
uint(999),
|
|
||||||
&struct1{A: "hello", B: big.NewInt(0xFFFFFFFF)},
|
|
||||||
[10]byte{1, 2, 3, 4, 5, 6},
|
|
||||||
[]string{"yeah", "yeah", "yeah"},
|
|
||||||
}
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
for cpu := 0; cpu < runtime.NumCPU(); cpu++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
var buffer bytes.Buffer
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
buffer.Reset()
|
|
||||||
err := Encode(&buffer, value)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
type byteArrayStruct struct {
|
|
||||||
A [20]byte
|
|
||||||
B [32]byte
|
|
||||||
C [32]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkEncodeByteArrayStruct(b *testing.B) {
|
|
||||||
var out bytes.Buffer
|
|
||||||
var value byteArrayStruct
|
|
||||||
|
|
||||||
b.ReportAllocs()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
out.Reset()
|
|
||||||
if err := Encode(&out, &value); err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,46 +0,0 @@
|
|||||||
// Copyright 2014 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package rlp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MyCoolType struct {
|
|
||||||
Name string
|
|
||||||
a, b uint
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeRLP writes x as RLP list [a, b] that omits the Name field.
|
|
||||||
func (x *MyCoolType) EncodeRLP(w io.Writer) (err error) {
|
|
||||||
return Encode(w, []uint{x.a, x.b})
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleEncoder() {
|
|
||||||
var t *MyCoolType // t is nil pointer to MyCoolType
|
|
||||||
bytes, _ := EncodeToBytes(t)
|
|
||||||
fmt.Printf("%v → %X\n", t, bytes)
|
|
||||||
|
|
||||||
t = &MyCoolType{Name: "foobar", a: 5, b: 6}
|
|
||||||
bytes, _ = EncodeToBytes(t)
|
|
||||||
fmt.Printf("%v → %X\n", t, bytes)
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// <nil> → C0
|
|
||||||
// &{foobar 5 6} → C20506
|
|
||||||
}
|
|
213
restricted/rlp/internal/rlpstruct/rlpstruct.go
Normal file
213
restricted/rlp/internal/rlpstruct/rlpstruct.go
Normal file
@ -0,0 +1,213 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package rlpstruct implements struct processing for RLP encoding/decoding.
|
||||||
|
//
|
||||||
|
// In particular, this package handles all rules around field filtering,
|
||||||
|
// struct tags and nil value determination.
|
||||||
|
package rlpstruct
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Field represents a struct field.
|
||||||
|
type Field struct {
|
||||||
|
Name string
|
||||||
|
Index int
|
||||||
|
Exported bool
|
||||||
|
Type Type
|
||||||
|
Tag string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type represents the attributes of a Go type.
|
||||||
|
type Type struct {
|
||||||
|
Name string
|
||||||
|
Kind reflect.Kind
|
||||||
|
IsEncoder bool // whether type implements rlp.Encoder
|
||||||
|
IsDecoder bool // whether type implements rlp.Decoder
|
||||||
|
Elem *Type // non-nil for Kind values of Ptr, Slice, Array
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultNilValue determines whether a nil pointer to t encodes/decodes
|
||||||
|
// as an empty string or empty list.
|
||||||
|
func (t Type) DefaultNilValue() NilKind {
|
||||||
|
k := t.Kind
|
||||||
|
if isUint(k) || k == reflect.String || k == reflect.Bool || isByteArray(t) {
|
||||||
|
return NilKindString
|
||||||
|
}
|
||||||
|
return NilKindList
|
||||||
|
}
|
||||||
|
|
||||||
|
// NilKind is the RLP value encoded in place of nil pointers.
|
||||||
|
type NilKind uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
NilKindString NilKind = 0x80
|
||||||
|
NilKindList NilKind = 0xC0
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tags represents struct tags.
|
||||||
|
type Tags struct {
|
||||||
|
// rlp:"nil" controls whether empty input results in a nil pointer.
|
||||||
|
// nilKind is the kind of empty value allowed for the field.
|
||||||
|
NilKind NilKind
|
||||||
|
NilOK bool
|
||||||
|
|
||||||
|
// rlp:"optional" allows for a field to be missing in the input list.
|
||||||
|
// If this is set, all subsequent fields must also be optional.
|
||||||
|
Optional bool
|
||||||
|
|
||||||
|
// rlp:"tail" controls whether this field swallows additional list elements. It can
|
||||||
|
// only be set for the last field, which must be of slice type.
|
||||||
|
Tail bool
|
||||||
|
|
||||||
|
// rlp:"-" ignores fields.
|
||||||
|
Ignored bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagError is raised for invalid struct tags.
|
||||||
|
type TagError struct {
|
||||||
|
StructType string
|
||||||
|
|
||||||
|
// These are set by this package.
|
||||||
|
Field string
|
||||||
|
Tag string
|
||||||
|
Err string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e TagError) Error() string {
|
||||||
|
field := "field " + e.Field
|
||||||
|
if e.StructType != "" {
|
||||||
|
field = e.StructType + "." + e.Field
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("rlp: invalid struct tag %q for %s (%s)", e.Tag, field, e.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessFields filters the given struct fields, returning only fields
|
||||||
|
// that should be considered for encoding/decoding.
|
||||||
|
func ProcessFields(allFields []Field) ([]Field, []Tags, error) {
|
||||||
|
lastPublic := lastPublicField(allFields)
|
||||||
|
|
||||||
|
// Gather all exported fields and their tags.
|
||||||
|
var fields []Field
|
||||||
|
var tags []Tags
|
||||||
|
for _, field := range allFields {
|
||||||
|
if !field.Exported {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ts, err := parseTag(field, lastPublic)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if ts.Ignored {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields = append(fields, field)
|
||||||
|
tags = append(tags, ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify optional field consistency. If any optional field exists,
|
||||||
|
// all fields after it must also be optional. Note: optional + tail
|
||||||
|
// is supported.
|
||||||
|
var anyOptional bool
|
||||||
|
var firstOptionalName string
|
||||||
|
for i, ts := range tags {
|
||||||
|
name := fields[i].Name
|
||||||
|
if ts.Optional || ts.Tail {
|
||||||
|
if !anyOptional {
|
||||||
|
firstOptionalName = name
|
||||||
|
}
|
||||||
|
anyOptional = true
|
||||||
|
} else {
|
||||||
|
if anyOptional {
|
||||||
|
msg := fmt.Sprintf("must be optional because preceding field %q is optional", firstOptionalName)
|
||||||
|
return nil, nil, TagError{Field: name, Err: msg}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fields, tags, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTag(field Field, lastPublic int) (Tags, error) {
|
||||||
|
name := field.Name
|
||||||
|
tag := reflect.StructTag(field.Tag)
|
||||||
|
var ts Tags
|
||||||
|
for _, t := range strings.Split(tag.Get("rlp"), ",") {
|
||||||
|
switch t = strings.TrimSpace(t); t {
|
||||||
|
case "":
|
||||||
|
// empty tag is allowed for some reason
|
||||||
|
case "-":
|
||||||
|
ts.Ignored = true
|
||||||
|
case "nil", "nilString", "nilList":
|
||||||
|
ts.NilOK = true
|
||||||
|
if field.Type.Kind != reflect.Ptr {
|
||||||
|
return ts, TagError{Field: name, Tag: t, Err: "field is not a pointer"}
|
||||||
|
}
|
||||||
|
switch t {
|
||||||
|
case "nil":
|
||||||
|
ts.NilKind = field.Type.Elem.DefaultNilValue()
|
||||||
|
case "nilString":
|
||||||
|
ts.NilKind = NilKindString
|
||||||
|
case "nilList":
|
||||||
|
ts.NilKind = NilKindList
|
||||||
|
}
|
||||||
|
case "optional":
|
||||||
|
ts.Optional = true
|
||||||
|
if ts.Tail {
|
||||||
|
return ts, TagError{Field: name, Tag: t, Err: `also has "tail" tag`}
|
||||||
|
}
|
||||||
|
case "tail":
|
||||||
|
ts.Tail = true
|
||||||
|
if field.Index != lastPublic {
|
||||||
|
return ts, TagError{Field: name, Tag: t, Err: "must be on last field"}
|
||||||
|
}
|
||||||
|
if ts.Optional {
|
||||||
|
return ts, TagError{Field: name, Tag: t, Err: `also has "optional" tag`}
|
||||||
|
}
|
||||||
|
if field.Type.Kind != reflect.Slice {
|
||||||
|
return ts, TagError{Field: name, Tag: t, Err: "field type is not slice"}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return ts, TagError{Field: name, Tag: t, Err: "unknown tag"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func lastPublicField(fields []Field) int {
|
||||||
|
last := 0
|
||||||
|
for _, f := range fields {
|
||||||
|
if f.Exported {
|
||||||
|
last = f.Index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return last
|
||||||
|
}
|
||||||
|
|
||||||
|
func isUint(k reflect.Kind) bool {
|
||||||
|
return k >= reflect.Uint && k <= reflect.Uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
func isByte(typ Type) bool {
|
||||||
|
return typ.Kind == reflect.Uint8 && !typ.IsEncoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func isByteArray(typ Type) bool {
|
||||||
|
return (typ.Kind == reflect.Slice || typ.Kind == reflect.Array) && isByte(*typ.Elem)
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2019 The go-ethereum Authors
|
// Copyright 2020 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
@ -36,7 +36,6 @@ func NewListIterator(data RawValue) (*listIterator, error) {
|
|||||||
data: data[t : t+c],
|
data: data[t : t+c],
|
||||||
}
|
}
|
||||||
return it, nil
|
return it, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next forwards the iterator one step, returns true if it was not at end yet
|
// Next forwards the iterator one step, returns true if it was not at end yet
|
||||||
|
@ -1,59 +0,0 @@
|
|||||||
// Copyright 2019 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package rlp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/openrelayxyz/plugeth-utils/restricted/hexutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIterator tests some basic things about the ListIterator. A more
|
|
||||||
// comprehensive test can be found in core/rlp_test.go, where we can
|
|
||||||
// use both types and rlp without dependency cycles
|
|
||||||
func TestIterator(t *testing.T) {
|
|
||||||
bodyRlpHex := "0xf902cbf8d6f869800182c35094000000000000000000000000000000000000aaaa808a000000000000000000001ba01025c66fad28b4ce3370222624d952c35529e602af7cbe04f667371f61b0e3b3a00ab8813514d1217059748fd903288ace1b4001a4bc5fbde2790debdc8167de2ff869010182c35094000000000000000000000000000000000000aaaa808a000000000000000000001ca05ac4cf1d19be06f3742c21df6c49a7e929ceb3dbaf6a09f3cfb56ff6828bd9a7a06875970133a35e63ac06d360aa166d228cc013e9b96e0a2cae7f55b22e1ee2e8f901f0f901eda0c75448377c0e426b8017b23c5f77379ecf69abc1d5c224284ad3ba1c46c59adaa00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000808080808080a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
|
|
||||||
bodyRlp := hexutil.MustDecode(bodyRlpHex)
|
|
||||||
|
|
||||||
it, err := NewListIterator(bodyRlp)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// Check that txs exist
|
|
||||||
if !it.Next() {
|
|
||||||
t.Fatal("expected two elems, got zero")
|
|
||||||
}
|
|
||||||
txs := it.Value()
|
|
||||||
// Check that uncles exist
|
|
||||||
if !it.Next() {
|
|
||||||
t.Fatal("expected two elems, got one")
|
|
||||||
}
|
|
||||||
txit, err := NewListIterator(txs)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
var i = 0
|
|
||||||
for txit.Next() {
|
|
||||||
if txit.err != nil {
|
|
||||||
t.Fatal(txit.err)
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if exp := 2; i != exp {
|
|
||||||
t.Errorf("count wrong, expected %d got %d", i, exp)
|
|
||||||
}
|
|
||||||
}
|
|
@ -28,13 +28,46 @@ type RawValue []byte
|
|||||||
|
|
||||||
var rawValueType = reflect.TypeOf(RawValue{})
|
var rawValueType = reflect.TypeOf(RawValue{})
|
||||||
|
|
||||||
|
// StringSize returns the encoded size of a string.
|
||||||
|
func StringSize(s string) uint64 {
|
||||||
|
switch {
|
||||||
|
case len(s) == 0:
|
||||||
|
return 1
|
||||||
|
case len(s) == 1:
|
||||||
|
if s[0] <= 0x7f {
|
||||||
|
return 1
|
||||||
|
} else {
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return uint64(headsize(uint64(len(s))) + len(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesSize returns the encoded size of a byte slice.
|
||||||
|
func BytesSize(b []byte) uint64 {
|
||||||
|
switch {
|
||||||
|
case len(b) == 0:
|
||||||
|
return 1
|
||||||
|
case len(b) == 1:
|
||||||
|
if b[0] <= 0x7f {
|
||||||
|
return 1
|
||||||
|
} else {
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return uint64(headsize(uint64(len(b))) + len(b))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ListSize returns the encoded size of an RLP list with the given
|
// ListSize returns the encoded size of an RLP list with the given
|
||||||
// content size.
|
// content size.
|
||||||
func ListSize(contentSize uint64) uint64 {
|
func ListSize(contentSize uint64) uint64 {
|
||||||
return uint64(headsize(contentSize)) + contentSize
|
return uint64(headsize(contentSize)) + contentSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// IntSize returns the encoded size of the integer x.
|
// IntSize returns the encoded size of the integer x. Note: The return type of this
|
||||||
|
// function is 'int' for backwards-compatibility reasons. The result is always positive.
|
||||||
func IntSize(x uint64) int {
|
func IntSize(x uint64) int {
|
||||||
if x < 0x80 {
|
if x < 0x80 {
|
||||||
return 1
|
return 1
|
||||||
|
@ -18,8 +18,8 @@ package rlp
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"reflect"
|
|
||||||
"testing"
|
"testing"
|
||||||
"testing/quick"
|
"testing/quick"
|
||||||
)
|
)
|
||||||
@ -54,21 +54,41 @@ func TestCountValues(t *testing.T) {
|
|||||||
if count != test.count {
|
if count != test.count {
|
||||||
t.Errorf("test %d: count mismatch, got %d want %d\ninput: %s", i, count, test.count, test.input)
|
t.Errorf("test %d: count mismatch, got %d want %d\ninput: %s", i, count, test.count, test.input)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(err, test.err) {
|
if !errors.Is(err, test.err) {
|
||||||
t.Errorf("test %d: err mismatch, got %q want %q\ninput: %s", i, err, test.err, test.input)
|
t.Errorf("test %d: err mismatch, got %q want %q\ninput: %s", i, err, test.err, test.input)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSplitTypes(t *testing.T) {
|
func TestSplitString(t *testing.T) {
|
||||||
if _, _, err := SplitString(unhex("C100")); err != ErrExpectedString {
|
for i, test := range []string{
|
||||||
t.Errorf("SplitString returned %q, want %q", err, ErrExpectedString)
|
"C0",
|
||||||
|
"C100",
|
||||||
|
"C3010203",
|
||||||
|
"C88363617483646F67",
|
||||||
|
"F8384C6F72656D20697073756D20646F6C6F722073697420616D65742C20636F6E7365637465747572206164697069736963696E6720656C6974",
|
||||||
|
} {
|
||||||
|
if _, _, err := SplitString(unhex(test)); !errors.Is(err, ErrExpectedString) {
|
||||||
|
t.Errorf("test %d: error mismatch: have %q, want %q", i, err, ErrExpectedString)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if _, _, err := SplitList(unhex("01")); err != ErrExpectedList {
|
}
|
||||||
t.Errorf("SplitString returned %q, want %q", err, ErrExpectedList)
|
|
||||||
}
|
func TestSplitList(t *testing.T) {
|
||||||
if _, _, err := SplitList(unhex("81FF")); err != ErrExpectedList {
|
for i, test := range []string{
|
||||||
t.Errorf("SplitString returned %q, want %q", err, ErrExpectedList)
|
"80",
|
||||||
|
"00",
|
||||||
|
"01",
|
||||||
|
"8180",
|
||||||
|
"81FF",
|
||||||
|
"820400",
|
||||||
|
"83636174",
|
||||||
|
"83646F67",
|
||||||
|
"B8384C6F72656D20697073756D20646F6C6F722073697420616D65742C20636F6E7365637465747572206164697069736963696E6720656C6974",
|
||||||
|
} {
|
||||||
|
if _, _, err := SplitList(unhex(test)); !errors.Is(err, ErrExpectedList) {
|
||||||
|
t.Errorf("test %d: error mismatch: have %q, want %q", i, err, ErrExpectedList)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -283,3 +303,36 @@ func TestAppendUint64Random(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBytesSize(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
v []byte
|
||||||
|
size uint64
|
||||||
|
}{
|
||||||
|
{v: []byte{}, size: 1},
|
||||||
|
{v: []byte{0x1}, size: 1},
|
||||||
|
{v: []byte{0x7E}, size: 1},
|
||||||
|
{v: []byte{0x7F}, size: 1},
|
||||||
|
{v: []byte{0x80}, size: 2},
|
||||||
|
{v: []byte{0xFF}, size: 2},
|
||||||
|
{v: []byte{0xFF, 0xF0}, size: 3},
|
||||||
|
{v: make([]byte, 55), size: 56},
|
||||||
|
{v: make([]byte, 56), size: 58},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
s := BytesSize(test.v)
|
||||||
|
if s != test.size {
|
||||||
|
t.Errorf("BytesSize(%#x) -> %d, want %d", test.v, s, test.size)
|
||||||
|
}
|
||||||
|
s = StringSize(string(test.v))
|
||||||
|
if s != test.size {
|
||||||
|
t.Errorf("StringSize(%#x) -> %d, want %d", test.v, s, test.size)
|
||||||
|
}
|
||||||
|
// Sanity check:
|
||||||
|
enc, _ := EncodeToBytes(test.v)
|
||||||
|
if uint64(len(enc)) != test.size {
|
||||||
|
t.Errorf("len(EncodeToBytes(%#x)) -> %d, test says %d", test.v, len(enc), test.size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
800
restricted/rlp/rlpgen/gen.go
Normal file
800
restricted/rlp/rlpgen/gen.go
Normal file
@ -0,0 +1,800 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/format"
|
||||||
|
"go/types"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted/rlp/internal/rlpstruct"
|
||||||
|
)
|
||||||
|
|
||||||
|
// buildContext keeps the data needed for make*Op.
|
||||||
|
type buildContext struct {
|
||||||
|
topType *types.Named // the type we're creating methods for
|
||||||
|
|
||||||
|
encoderIface *types.Interface
|
||||||
|
decoderIface *types.Interface
|
||||||
|
rawValueType *types.Named
|
||||||
|
|
||||||
|
typeToStructCache map[types.Type]*rlpstruct.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBuildContext(packageRLP *types.Package) *buildContext {
|
||||||
|
enc := packageRLP.Scope().Lookup("Encoder").Type().Underlying()
|
||||||
|
dec := packageRLP.Scope().Lookup("Decoder").Type().Underlying()
|
||||||
|
rawv := packageRLP.Scope().Lookup("RawValue").Type()
|
||||||
|
return &buildContext{
|
||||||
|
typeToStructCache: make(map[types.Type]*rlpstruct.Type),
|
||||||
|
encoderIface: enc.(*types.Interface),
|
||||||
|
decoderIface: dec.(*types.Interface),
|
||||||
|
rawValueType: rawv.(*types.Named),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bctx *buildContext) isEncoder(typ types.Type) bool {
|
||||||
|
return types.Implements(typ, bctx.encoderIface)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bctx *buildContext) isDecoder(typ types.Type) bool {
|
||||||
|
return types.Implements(typ, bctx.decoderIface)
|
||||||
|
}
|
||||||
|
|
||||||
|
// typeToStructType converts typ to rlpstruct.Type.
|
||||||
|
func (bctx *buildContext) typeToStructType(typ types.Type) *rlpstruct.Type {
|
||||||
|
if prev := bctx.typeToStructCache[typ]; prev != nil {
|
||||||
|
return prev // short-circuit for recursive types.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve named types to their underlying type, but keep the name.
|
||||||
|
name := types.TypeString(typ, nil)
|
||||||
|
for {
|
||||||
|
utype := typ.Underlying()
|
||||||
|
if utype == typ {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
typ = utype
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the type and store it in cache.
|
||||||
|
t := &rlpstruct.Type{
|
||||||
|
Name: name,
|
||||||
|
Kind: typeReflectKind(typ),
|
||||||
|
IsEncoder: bctx.isEncoder(typ),
|
||||||
|
IsDecoder: bctx.isDecoder(typ),
|
||||||
|
}
|
||||||
|
bctx.typeToStructCache[typ] = t
|
||||||
|
|
||||||
|
// Assign element type.
|
||||||
|
switch typ.(type) {
|
||||||
|
case *types.Array, *types.Slice, *types.Pointer:
|
||||||
|
etype := typ.(interface{ Elem() types.Type }).Elem()
|
||||||
|
t.Elem = bctx.typeToStructType(etype)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// genContext is passed to the gen* methods of op when generating
|
||||||
|
// the output code. It tracks packages to be imported by the output
|
||||||
|
// file and assigns unique names of temporary variables.
|
||||||
|
type genContext struct {
|
||||||
|
inPackage *types.Package
|
||||||
|
imports map[string]struct{}
|
||||||
|
tempCounter int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newGenContext(inPackage *types.Package) *genContext {
|
||||||
|
return &genContext{
|
||||||
|
inPackage: inPackage,
|
||||||
|
imports: make(map[string]struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *genContext) temp() string {
|
||||||
|
v := fmt.Sprintf("_tmp%d", ctx.tempCounter)
|
||||||
|
ctx.tempCounter++
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *genContext) resetTemp() {
|
||||||
|
ctx.tempCounter = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *genContext) addImport(path string) {
|
||||||
|
if path == ctx.inPackage.Path() {
|
||||||
|
return // avoid importing the package that we're generating in.
|
||||||
|
}
|
||||||
|
// TODO: renaming?
|
||||||
|
ctx.imports[path] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// importsList returns all packages that need to be imported.
|
||||||
|
func (ctx *genContext) importsList() []string {
|
||||||
|
imp := make([]string, 0, len(ctx.imports))
|
||||||
|
for k := range ctx.imports {
|
||||||
|
imp = append(imp, k)
|
||||||
|
}
|
||||||
|
sort.Strings(imp)
|
||||||
|
return imp
|
||||||
|
}
|
||||||
|
|
||||||
|
// qualify is the types.Qualifier used for printing types.
|
||||||
|
func (ctx *genContext) qualify(pkg *types.Package) string {
|
||||||
|
if pkg.Path() == ctx.inPackage.Path() {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
ctx.addImport(pkg.Path())
|
||||||
|
// TODO: renaming?
|
||||||
|
return pkg.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
type op interface {
|
||||||
|
// genWrite creates the encoder. The generated code should write v,
|
||||||
|
// which is any Go expression, to the rlp.EncoderBuffer 'w'.
|
||||||
|
genWrite(ctx *genContext, v string) string
|
||||||
|
|
||||||
|
// genDecode creates the decoder. The generated code should read
|
||||||
|
// a value from the rlp.Stream 'dec' and store it to dst.
|
||||||
|
genDecode(ctx *genContext) (string, string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// basicOp handles basic types bool, uint*, string.
|
||||||
|
type basicOp struct {
|
||||||
|
typ types.Type
|
||||||
|
writeMethod string // calle write the value
|
||||||
|
writeArgType types.Type // parameter type of writeMethod
|
||||||
|
decMethod string
|
||||||
|
decResultType types.Type // return type of decMethod
|
||||||
|
decUseBitSize bool // if true, result bit size is appended to decMethod
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*buildContext) makeBasicOp(typ *types.Basic) (op, error) {
|
||||||
|
op := basicOp{typ: typ}
|
||||||
|
kind := typ.Kind()
|
||||||
|
switch {
|
||||||
|
case kind == types.Bool:
|
||||||
|
op.writeMethod = "WriteBool"
|
||||||
|
op.writeArgType = types.Typ[types.Bool]
|
||||||
|
op.decMethod = "Bool"
|
||||||
|
op.decResultType = types.Typ[types.Bool]
|
||||||
|
case kind >= types.Uint8 && kind <= types.Uint64:
|
||||||
|
op.writeMethod = "WriteUint64"
|
||||||
|
op.writeArgType = types.Typ[types.Uint64]
|
||||||
|
op.decMethod = "Uint"
|
||||||
|
op.decResultType = typ
|
||||||
|
op.decUseBitSize = true
|
||||||
|
case kind == types.String:
|
||||||
|
op.writeMethod = "WriteString"
|
||||||
|
op.writeArgType = types.Typ[types.String]
|
||||||
|
op.decMethod = "String"
|
||||||
|
op.decResultType = types.Typ[types.String]
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unhandled basic type: %v", typ)
|
||||||
|
}
|
||||||
|
return op, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*buildContext) makeByteSliceOp(typ *types.Slice) op {
|
||||||
|
if !isByte(typ.Elem()) {
|
||||||
|
panic("non-byte slice type in makeByteSliceOp")
|
||||||
|
}
|
||||||
|
bslice := types.NewSlice(types.Typ[types.Uint8])
|
||||||
|
return basicOp{
|
||||||
|
typ: typ,
|
||||||
|
writeMethod: "WriteBytes",
|
||||||
|
writeArgType: bslice,
|
||||||
|
decMethod: "Bytes",
|
||||||
|
decResultType: bslice,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bctx *buildContext) makeRawValueOp() op {
|
||||||
|
bslice := types.NewSlice(types.Typ[types.Uint8])
|
||||||
|
return basicOp{
|
||||||
|
typ: bctx.rawValueType,
|
||||||
|
writeMethod: "Write",
|
||||||
|
writeArgType: bslice,
|
||||||
|
decMethod: "Raw",
|
||||||
|
decResultType: bslice,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op basicOp) writeNeedsConversion() bool {
|
||||||
|
return !types.AssignableTo(op.typ, op.writeArgType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op basicOp) decodeNeedsConversion() bool {
|
||||||
|
return !types.AssignableTo(op.decResultType, op.typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op basicOp) genWrite(ctx *genContext, v string) string {
|
||||||
|
if op.writeNeedsConversion() {
|
||||||
|
v = fmt.Sprintf("%s(%s)", op.writeArgType, v)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("w.%s(%s)\n", op.writeMethod, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op basicOp) genDecode(ctx *genContext) (string, string) {
|
||||||
|
var (
|
||||||
|
resultV = ctx.temp()
|
||||||
|
result = resultV
|
||||||
|
method = op.decMethod
|
||||||
|
)
|
||||||
|
if op.decUseBitSize {
|
||||||
|
// Note: For now, this only works for platform-independent integer
|
||||||
|
// sizes. makeBasicOp forbids the platform-dependent types.
|
||||||
|
var sizes types.StdSizes
|
||||||
|
method = fmt.Sprintf("%s%d", op.decMethod, sizes.Sizeof(op.typ)*8)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call the decoder method.
|
||||||
|
var b bytes.Buffer
|
||||||
|
fmt.Fprintf(&b, "%s, err := dec.%s()\n", resultV, method)
|
||||||
|
fmt.Fprintf(&b, "if err != nil { return err }\n")
|
||||||
|
if op.decodeNeedsConversion() {
|
||||||
|
conv := ctx.temp()
|
||||||
|
fmt.Fprintf(&b, "%s := %s(%s)\n", conv, types.TypeString(op.typ, ctx.qualify), resultV)
|
||||||
|
result = conv
|
||||||
|
}
|
||||||
|
return result, b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// byteArrayOp handles [...]byte.
|
||||||
|
type byteArrayOp struct {
|
||||||
|
typ types.Type
|
||||||
|
name types.Type // name != typ for named byte array types (e.g. common.Address)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bctx *buildContext) makeByteArrayOp(name *types.Named, typ *types.Array) byteArrayOp {
|
||||||
|
nt := types.Type(name)
|
||||||
|
if name == nil {
|
||||||
|
nt = typ
|
||||||
|
}
|
||||||
|
return byteArrayOp{typ, nt}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op byteArrayOp) genWrite(ctx *genContext, v string) string {
|
||||||
|
return fmt.Sprintf("w.WriteBytes(%s[:])\n", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op byteArrayOp) genDecode(ctx *genContext) (string, string) {
|
||||||
|
var resultV = ctx.temp()
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
fmt.Fprintf(&b, "var %s %s\n", resultV, types.TypeString(op.name, ctx.qualify))
|
||||||
|
fmt.Fprintf(&b, "if err := dec.ReadBytes(%s[:]); err != nil { return err }\n", resultV)
|
||||||
|
return resultV, b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// bigIntOp handles big.Int.
|
||||||
|
// This exists because big.Int has it's own decoder operation on rlp.Stream,
|
||||||
|
// but the decode method returns *big.Int, so it needs to be dereferenced.
|
||||||
|
type bigIntOp struct {
|
||||||
|
pointer bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op bigIntOp) genWrite(ctx *genContext, v string) string {
|
||||||
|
var b bytes.Buffer
|
||||||
|
|
||||||
|
fmt.Fprintf(&b, "if %s.Sign() == -1 {\n", v)
|
||||||
|
fmt.Fprintf(&b, " return rlp.ErrNegativeBigInt\n")
|
||||||
|
fmt.Fprintf(&b, "}\n")
|
||||||
|
dst := v
|
||||||
|
if !op.pointer {
|
||||||
|
dst = "&" + v
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&b, "w.WriteBigInt(%s)\n", dst)
|
||||||
|
|
||||||
|
// Wrap with nil check.
|
||||||
|
if op.pointer {
|
||||||
|
code := b.String()
|
||||||
|
b.Reset()
|
||||||
|
fmt.Fprintf(&b, "if %s == nil {\n", v)
|
||||||
|
fmt.Fprintf(&b, " w.Write(rlp.EmptyString)")
|
||||||
|
fmt.Fprintf(&b, "} else {\n")
|
||||||
|
fmt.Fprint(&b, code)
|
||||||
|
fmt.Fprintf(&b, "}\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op bigIntOp) genDecode(ctx *genContext) (string, string) {
|
||||||
|
var resultV = ctx.temp()
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
fmt.Fprintf(&b, "%s, err := dec.BigInt()\n", resultV)
|
||||||
|
fmt.Fprintf(&b, "if err != nil { return err }\n")
|
||||||
|
|
||||||
|
result := resultV
|
||||||
|
if !op.pointer {
|
||||||
|
result = "(*" + resultV + ")"
|
||||||
|
}
|
||||||
|
return result, b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// uint256Op handles "github.com/holiman/uint256".Int
|
||||||
|
type uint256Op struct {
|
||||||
|
pointer bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op uint256Op) genWrite(ctx *genContext, v string) string {
|
||||||
|
var b bytes.Buffer
|
||||||
|
|
||||||
|
dst := v
|
||||||
|
if !op.pointer {
|
||||||
|
dst = "&" + v
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&b, "w.WriteUint256(%s)\n", dst)
|
||||||
|
|
||||||
|
// Wrap with nil check.
|
||||||
|
if op.pointer {
|
||||||
|
code := b.String()
|
||||||
|
b.Reset()
|
||||||
|
fmt.Fprintf(&b, "if %s == nil {\n", v)
|
||||||
|
fmt.Fprintf(&b, " w.Write(rlp.EmptyString)")
|
||||||
|
fmt.Fprintf(&b, "} else {\n")
|
||||||
|
fmt.Fprint(&b, code)
|
||||||
|
fmt.Fprintf(&b, "}\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op uint256Op) genDecode(ctx *genContext) (string, string) {
|
||||||
|
ctx.addImport("github.com/holiman/uint256")
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
resultV := ctx.temp()
|
||||||
|
fmt.Fprintf(&b, "var %s uint256.Int\n", resultV)
|
||||||
|
fmt.Fprintf(&b, "if err := dec.ReadUint256(&%s); err != nil { return err }\n", resultV)
|
||||||
|
|
||||||
|
result := resultV
|
||||||
|
if op.pointer {
|
||||||
|
result = "&" + resultV
|
||||||
|
}
|
||||||
|
return result, b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// encoderDecoderOp handles rlp.Encoder and rlp.Decoder.
|
||||||
|
// In order to be used with this, the type must implement both interfaces.
|
||||||
|
// This restriction may be lifted in the future by creating separate ops for
|
||||||
|
// encoding and decoding.
|
||||||
|
type encoderDecoderOp struct {
|
||||||
|
typ types.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op encoderDecoderOp) genWrite(ctx *genContext, v string) string {
|
||||||
|
return fmt.Sprintf("if err := %s.EncodeRLP(w); err != nil { return err }\n", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op encoderDecoderOp) genDecode(ctx *genContext) (string, string) {
|
||||||
|
// DecodeRLP must have pointer receiver, and this is verified in makeOp.
|
||||||
|
etyp := op.typ.(*types.Pointer).Elem()
|
||||||
|
var resultV = ctx.temp()
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
fmt.Fprintf(&b, "%s := new(%s)\n", resultV, types.TypeString(etyp, ctx.qualify))
|
||||||
|
fmt.Fprintf(&b, "if err := %s.DecodeRLP(dec); err != nil { return err }\n", resultV)
|
||||||
|
return resultV, b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ptrOp handles pointer types.
|
||||||
|
type ptrOp struct {
|
||||||
|
elemTyp types.Type
|
||||||
|
elem op
|
||||||
|
nilOK bool
|
||||||
|
nilValue rlpstruct.NilKind
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bctx *buildContext) makePtrOp(elemTyp types.Type, tags rlpstruct.Tags) (op, error) {
|
||||||
|
elemOp, err := bctx.makeOp(nil, elemTyp, rlpstruct.Tags{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
op := ptrOp{elemTyp: elemTyp, elem: elemOp}
|
||||||
|
|
||||||
|
// Determine nil value.
|
||||||
|
if tags.NilOK {
|
||||||
|
op.nilOK = true
|
||||||
|
op.nilValue = tags.NilKind
|
||||||
|
} else {
|
||||||
|
styp := bctx.typeToStructType(elemTyp)
|
||||||
|
op.nilValue = styp.DefaultNilValue()
|
||||||
|
}
|
||||||
|
return op, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op ptrOp) genWrite(ctx *genContext, v string) string {
|
||||||
|
// Note: in writer functions, accesses to v are read-only, i.e. v is any Go
|
||||||
|
// expression. To make all accesses work through the pointer, we substitute
|
||||||
|
// v with (*v). This is required for most accesses including `v`, `call(v)`,
|
||||||
|
// and `v[index]` on slices.
|
||||||
|
//
|
||||||
|
// For `v.field` and `v[:]` on arrays, the dereference operation is not required.
|
||||||
|
var vv string
|
||||||
|
_, isStruct := op.elem.(structOp)
|
||||||
|
_, isByteArray := op.elem.(byteArrayOp)
|
||||||
|
if isStruct || isByteArray {
|
||||||
|
vv = v
|
||||||
|
} else {
|
||||||
|
vv = fmt.Sprintf("(*%s)", v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
fmt.Fprintf(&b, "if %s == nil {\n", v)
|
||||||
|
fmt.Fprintf(&b, " w.Write([]byte{0x%X})\n", op.nilValue)
|
||||||
|
fmt.Fprintf(&b, "} else {\n")
|
||||||
|
fmt.Fprintf(&b, " %s", op.elem.genWrite(ctx, vv))
|
||||||
|
fmt.Fprintf(&b, "}\n")
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op ptrOp) genDecode(ctx *genContext) (string, string) {
|
||||||
|
result, code := op.elem.genDecode(ctx)
|
||||||
|
if !op.nilOK {
|
||||||
|
// If nil pointers are not allowed, we can just decode the element.
|
||||||
|
return "&" + result, code
|
||||||
|
}
|
||||||
|
|
||||||
|
// nil is allowed, so check the kind and size first.
|
||||||
|
// If size is zero and kind matches the nilKind of the type,
|
||||||
|
// the value decodes as a nil pointer.
|
||||||
|
var (
|
||||||
|
resultV = ctx.temp()
|
||||||
|
kindV = ctx.temp()
|
||||||
|
sizeV = ctx.temp()
|
||||||
|
wantKind string
|
||||||
|
)
|
||||||
|
if op.nilValue == rlpstruct.NilKindList {
|
||||||
|
wantKind = "rlp.List"
|
||||||
|
} else {
|
||||||
|
wantKind = "rlp.String"
|
||||||
|
}
|
||||||
|
var b bytes.Buffer
|
||||||
|
fmt.Fprintf(&b, "var %s %s\n", resultV, types.TypeString(types.NewPointer(op.elemTyp), ctx.qualify))
|
||||||
|
fmt.Fprintf(&b, "if %s, %s, err := dec.Kind(); err != nil {\n", kindV, sizeV)
|
||||||
|
fmt.Fprintf(&b, " return err\n")
|
||||||
|
fmt.Fprintf(&b, "} else if %s != 0 || %s != %s {\n", sizeV, kindV, wantKind)
|
||||||
|
fmt.Fprint(&b, code)
|
||||||
|
fmt.Fprintf(&b, " %s = &%s\n", resultV, result)
|
||||||
|
fmt.Fprintf(&b, "}\n")
|
||||||
|
return resultV, b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// structOp handles struct types.
|
||||||
|
type structOp struct {
|
||||||
|
named *types.Named
|
||||||
|
typ *types.Struct
|
||||||
|
fields []*structField
|
||||||
|
optionalFields []*structField
|
||||||
|
}
|
||||||
|
|
||||||
|
type structField struct {
|
||||||
|
name string
|
||||||
|
typ types.Type
|
||||||
|
elem op
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bctx *buildContext) makeStructOp(named *types.Named, typ *types.Struct) (op, error) {
|
||||||
|
// Convert fields to []rlpstruct.Field.
|
||||||
|
var allStructFields []rlpstruct.Field
|
||||||
|
for i := 0; i < typ.NumFields(); i++ {
|
||||||
|
f := typ.Field(i)
|
||||||
|
allStructFields = append(allStructFields, rlpstruct.Field{
|
||||||
|
Name: f.Name(),
|
||||||
|
Exported: f.Exported(),
|
||||||
|
Index: i,
|
||||||
|
Tag: typ.Tag(i),
|
||||||
|
Type: *bctx.typeToStructType(f.Type()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter/validate fields.
|
||||||
|
fields, tags, err := rlpstruct.ProcessFields(allStructFields)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create field ops.
|
||||||
|
var op = structOp{named: named, typ: typ}
|
||||||
|
for i, field := range fields {
|
||||||
|
// Advanced struct tags are not supported yet.
|
||||||
|
tag := tags[i]
|
||||||
|
if err := checkUnsupportedTags(field.Name, tag); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
typ := typ.Field(field.Index).Type()
|
||||||
|
elem, err := bctx.makeOp(nil, typ, tags[i])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("field %s: %v", field.Name, err)
|
||||||
|
}
|
||||||
|
f := &structField{name: field.Name, typ: typ, elem: elem}
|
||||||
|
if tag.Optional {
|
||||||
|
op.optionalFields = append(op.optionalFields, f)
|
||||||
|
} else {
|
||||||
|
op.fields = append(op.fields, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return op, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkUnsupportedTags(field string, tag rlpstruct.Tags) error {
|
||||||
|
if tag.Tail {
|
||||||
|
return fmt.Errorf(`field %s has unsupported struct tag "tail"`, field)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op structOp) genWrite(ctx *genContext, v string) string {
|
||||||
|
var b bytes.Buffer
|
||||||
|
var listMarker = ctx.temp()
|
||||||
|
fmt.Fprintf(&b, "%s := w.List()\n", listMarker)
|
||||||
|
for _, field := range op.fields {
|
||||||
|
selector := v + "." + field.name
|
||||||
|
fmt.Fprint(&b, field.elem.genWrite(ctx, selector))
|
||||||
|
}
|
||||||
|
op.writeOptionalFields(&b, ctx, v)
|
||||||
|
fmt.Fprintf(&b, "w.ListEnd(%s)\n", listMarker)
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op structOp) writeOptionalFields(b *bytes.Buffer, ctx *genContext, v string) {
|
||||||
|
if len(op.optionalFields) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// First check zero-ness of all optional fields.
|
||||||
|
var zeroV = make([]string, len(op.optionalFields))
|
||||||
|
for i, field := range op.optionalFields {
|
||||||
|
selector := v + "." + field.name
|
||||||
|
zeroV[i] = ctx.temp()
|
||||||
|
fmt.Fprintf(b, "%s := %s\n", zeroV[i], nonZeroCheck(selector, field.typ, ctx.qualify))
|
||||||
|
}
|
||||||
|
// Now write the fields.
|
||||||
|
for i, field := range op.optionalFields {
|
||||||
|
selector := v + "." + field.name
|
||||||
|
cond := ""
|
||||||
|
for j := i; j < len(op.optionalFields); j++ {
|
||||||
|
if j > i {
|
||||||
|
cond += " || "
|
||||||
|
}
|
||||||
|
cond += zeroV[j]
|
||||||
|
}
|
||||||
|
fmt.Fprintf(b, "if %s {\n", cond)
|
||||||
|
fmt.Fprint(b, field.elem.genWrite(ctx, selector))
|
||||||
|
fmt.Fprintf(b, "}\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op structOp) genDecode(ctx *genContext) (string, string) {
|
||||||
|
// Get the string representation of the type.
|
||||||
|
// Here, named types are handled separately because the output
|
||||||
|
// would contain a copy of the struct definition otherwise.
|
||||||
|
var typeName string
|
||||||
|
if op.named != nil {
|
||||||
|
typeName = types.TypeString(op.named, ctx.qualify)
|
||||||
|
} else {
|
||||||
|
typeName = types.TypeString(op.typ, ctx.qualify)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create struct object.
|
||||||
|
var resultV = ctx.temp()
|
||||||
|
var b bytes.Buffer
|
||||||
|
fmt.Fprintf(&b, "var %s %s\n", resultV, typeName)
|
||||||
|
|
||||||
|
// Decode fields.
|
||||||
|
fmt.Fprintf(&b, "{\n")
|
||||||
|
fmt.Fprintf(&b, "if _, err := dec.List(); err != nil { return err }\n")
|
||||||
|
for _, field := range op.fields {
|
||||||
|
result, code := field.elem.genDecode(ctx)
|
||||||
|
fmt.Fprintf(&b, "// %s:\n", field.name)
|
||||||
|
fmt.Fprint(&b, code)
|
||||||
|
fmt.Fprintf(&b, "%s.%s = %s\n", resultV, field.name, result)
|
||||||
|
}
|
||||||
|
op.decodeOptionalFields(&b, ctx, resultV)
|
||||||
|
fmt.Fprintf(&b, "if err := dec.ListEnd(); err != nil { return err }\n")
|
||||||
|
fmt.Fprintf(&b, "}\n")
|
||||||
|
return resultV, b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op structOp) decodeOptionalFields(b *bytes.Buffer, ctx *genContext, resultV string) {
|
||||||
|
var suffix bytes.Buffer
|
||||||
|
for _, field := range op.optionalFields {
|
||||||
|
result, code := field.elem.genDecode(ctx)
|
||||||
|
fmt.Fprintf(b, "// %s:\n", field.name)
|
||||||
|
fmt.Fprintf(b, "if dec.MoreDataInList() {\n")
|
||||||
|
fmt.Fprint(b, code)
|
||||||
|
fmt.Fprintf(b, "%s.%s = %s\n", resultV, field.name, result)
|
||||||
|
fmt.Fprintf(&suffix, "}\n")
|
||||||
|
}
|
||||||
|
suffix.WriteTo(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sliceOp handles slice types.
|
||||||
|
type sliceOp struct {
|
||||||
|
typ *types.Slice
|
||||||
|
elemOp op
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bctx *buildContext) makeSliceOp(typ *types.Slice) (op, error) {
|
||||||
|
elemOp, err := bctx.makeOp(nil, typ.Elem(), rlpstruct.Tags{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return sliceOp{typ: typ, elemOp: elemOp}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op sliceOp) genWrite(ctx *genContext, v string) string {
|
||||||
|
var (
|
||||||
|
listMarker = ctx.temp() // holds return value of w.List()
|
||||||
|
iterElemV = ctx.temp() // iteration variable
|
||||||
|
elemCode = op.elemOp.genWrite(ctx, iterElemV)
|
||||||
|
)
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
fmt.Fprintf(&b, "%s := w.List()\n", listMarker)
|
||||||
|
fmt.Fprintf(&b, "for _, %s := range %s {\n", iterElemV, v)
|
||||||
|
fmt.Fprint(&b, elemCode)
|
||||||
|
fmt.Fprintf(&b, "}\n")
|
||||||
|
fmt.Fprintf(&b, "w.ListEnd(%s)\n", listMarker)
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (op sliceOp) genDecode(ctx *genContext) (string, string) {
|
||||||
|
var sliceV = ctx.temp() // holds the output slice
|
||||||
|
elemResult, elemCode := op.elemOp.genDecode(ctx)
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
fmt.Fprintf(&b, "var %s %s\n", sliceV, types.TypeString(op.typ, ctx.qualify))
|
||||||
|
fmt.Fprintf(&b, "if _, err := dec.List(); err != nil { return err }\n")
|
||||||
|
fmt.Fprintf(&b, "for dec.MoreDataInList() {\n")
|
||||||
|
fmt.Fprintf(&b, " %s", elemCode)
|
||||||
|
fmt.Fprintf(&b, " %s = append(%s, %s)\n", sliceV, sliceV, elemResult)
|
||||||
|
fmt.Fprintf(&b, "}\n")
|
||||||
|
fmt.Fprintf(&b, "if err := dec.ListEnd(); err != nil { return err }\n")
|
||||||
|
return sliceV, b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bctx *buildContext) makeOp(name *types.Named, typ types.Type, tags rlpstruct.Tags) (op, error) {
|
||||||
|
switch typ := typ.(type) {
|
||||||
|
case *types.Named:
|
||||||
|
if isBigInt(typ) {
|
||||||
|
return bigIntOp{}, nil
|
||||||
|
}
|
||||||
|
if isUint256(typ) {
|
||||||
|
return uint256Op{}, nil
|
||||||
|
}
|
||||||
|
if typ == bctx.rawValueType {
|
||||||
|
return bctx.makeRawValueOp(), nil
|
||||||
|
}
|
||||||
|
if bctx.isDecoder(typ) {
|
||||||
|
return nil, fmt.Errorf("type %v implements rlp.Decoder with non-pointer receiver", typ)
|
||||||
|
}
|
||||||
|
// TODO: same check for encoder?
|
||||||
|
return bctx.makeOp(typ, typ.Underlying(), tags)
|
||||||
|
case *types.Pointer:
|
||||||
|
if isBigInt(typ.Elem()) {
|
||||||
|
return bigIntOp{pointer: true}, nil
|
||||||
|
}
|
||||||
|
if isUint256(typ.Elem()) {
|
||||||
|
return uint256Op{pointer: true}, nil
|
||||||
|
}
|
||||||
|
// Encoder/Decoder interfaces.
|
||||||
|
if bctx.isEncoder(typ) {
|
||||||
|
if bctx.isDecoder(typ) {
|
||||||
|
return encoderDecoderOp{typ}, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("type %v implements rlp.Encoder but not rlp.Decoder", typ)
|
||||||
|
}
|
||||||
|
if bctx.isDecoder(typ) {
|
||||||
|
return nil, fmt.Errorf("type %v implements rlp.Decoder but not rlp.Encoder", typ)
|
||||||
|
}
|
||||||
|
// Default pointer handling.
|
||||||
|
return bctx.makePtrOp(typ.Elem(), tags)
|
||||||
|
case *types.Basic:
|
||||||
|
return bctx.makeBasicOp(typ)
|
||||||
|
case *types.Struct:
|
||||||
|
return bctx.makeStructOp(name, typ)
|
||||||
|
case *types.Slice:
|
||||||
|
etyp := typ.Elem()
|
||||||
|
if isByte(etyp) && !bctx.isEncoder(etyp) {
|
||||||
|
return bctx.makeByteSliceOp(typ), nil
|
||||||
|
}
|
||||||
|
return bctx.makeSliceOp(typ)
|
||||||
|
case *types.Array:
|
||||||
|
etyp := typ.Elem()
|
||||||
|
if isByte(etyp) && !bctx.isEncoder(etyp) {
|
||||||
|
return bctx.makeByteArrayOp(name, typ), nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unhandled array type: %v", typ)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unhandled type: %v", typ)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateDecoder generates the DecodeRLP method on 'typ'.
|
||||||
|
func generateDecoder(ctx *genContext, typ string, op op) []byte {
|
||||||
|
ctx.resetTemp()
|
||||||
|
ctx.addImport(pathOfPackageRLP)
|
||||||
|
|
||||||
|
result, code := op.genDecode(ctx)
|
||||||
|
var b bytes.Buffer
|
||||||
|
fmt.Fprintf(&b, "func (obj *%s) DecodeRLP(dec *rlp.Stream) error {\n", typ)
|
||||||
|
fmt.Fprint(&b, code)
|
||||||
|
fmt.Fprintf(&b, " *obj = %s\n", result)
|
||||||
|
fmt.Fprintf(&b, " return nil\n")
|
||||||
|
fmt.Fprintf(&b, "}\n")
|
||||||
|
return b.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateEncoder generates the EncodeRLP method on 'typ'.
|
||||||
|
func generateEncoder(ctx *genContext, typ string, op op) []byte {
|
||||||
|
ctx.resetTemp()
|
||||||
|
ctx.addImport("io")
|
||||||
|
ctx.addImport(pathOfPackageRLP)
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
fmt.Fprintf(&b, "func (obj *%s) EncodeRLP(_w io.Writer) error {\n", typ)
|
||||||
|
fmt.Fprintf(&b, " w := rlp.NewEncoderBuffer(_w)\n")
|
||||||
|
fmt.Fprint(&b, op.genWrite(ctx, "obj"))
|
||||||
|
fmt.Fprintf(&b, " return w.Flush()\n")
|
||||||
|
fmt.Fprintf(&b, "}\n")
|
||||||
|
return b.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bctx *buildContext) generate(typ *types.Named, encoder, decoder bool) ([]byte, error) {
|
||||||
|
bctx.topType = typ
|
||||||
|
|
||||||
|
pkg := typ.Obj().Pkg()
|
||||||
|
op, err := bctx.makeOp(nil, typ, rlpstruct.Tags{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ctx = newGenContext(pkg)
|
||||||
|
encSource []byte
|
||||||
|
decSource []byte
|
||||||
|
)
|
||||||
|
if encoder {
|
||||||
|
encSource = generateEncoder(ctx, typ.Obj().Name(), op)
|
||||||
|
}
|
||||||
|
if decoder {
|
||||||
|
decSource = generateDecoder(ctx, typ.Obj().Name(), op)
|
||||||
|
}
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
fmt.Fprintf(&b, "package %s\n\n", pkg.Name())
|
||||||
|
for _, imp := range ctx.importsList() {
|
||||||
|
fmt.Fprintf(&b, "import %q\n", imp)
|
||||||
|
}
|
||||||
|
if encoder {
|
||||||
|
fmt.Fprintln(&b)
|
||||||
|
b.Write(encSource)
|
||||||
|
}
|
||||||
|
if decoder {
|
||||||
|
fmt.Fprintln(&b)
|
||||||
|
b.Write(decSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
source := b.Bytes()
|
||||||
|
// fmt.Println(string(source))
|
||||||
|
return format.Source(source)
|
||||||
|
}
|
107
restricted/rlp/rlpgen/gen_test.go
Normal file
107
restricted/rlp/rlpgen/gen_test.go
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/importer"
|
||||||
|
"go/parser"
|
||||||
|
"go/token"
|
||||||
|
"go/types"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Package RLP is loaded only once and reused for all tests.
|
||||||
|
var (
|
||||||
|
testFset = token.NewFileSet()
|
||||||
|
testImporter = importer.ForCompiler(testFset, "source", nil).(types.ImporterFrom)
|
||||||
|
testPackageRLP *types.Package
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
cwd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
testPackageRLP, err = testImporter.ImportFrom(pathOfPackageRLP, cwd, 0)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("can't load package RLP: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint", "uint256"}
|
||||||
|
|
||||||
|
func TestOutput(t *testing.T) {
|
||||||
|
for _, test := range tests {
|
||||||
|
test := test
|
||||||
|
t.Run(test, func(t *testing.T) {
|
||||||
|
inputFile := filepath.Join("testdata", test+".in.txt")
|
||||||
|
outputFile := filepath.Join("testdata", test+".out.txt")
|
||||||
|
bctx, typ, err := loadTestSource(inputFile, "Test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("error loading test source:", err)
|
||||||
|
}
|
||||||
|
output, err := bctx.generate(typ, true, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("error in generate:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set this environment variable to regenerate the test outputs.
|
||||||
|
if os.Getenv("WRITE_TEST_FILES") != "" {
|
||||||
|
os.WriteFile(outputFile, output, 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if output matches.
|
||||||
|
wantOutput, err := os.ReadFile(outputFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("error loading expected test output:", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(output, wantOutput) {
|
||||||
|
t.Fatalf("output mismatch, want: %v got %v", string(wantOutput), string(output))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadTestSource(file string, typeName string) (*buildContext, *types.Named, error) {
|
||||||
|
// Load the test input.
|
||||||
|
content, err := os.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
f, err := parser.ParseFile(testFset, file, content, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
conf := types.Config{Importer: testImporter}
|
||||||
|
pkg, err := conf.Check("test", testFset, []*ast.File{f}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the test struct.
|
||||||
|
bctx := newBuildContext(testPackageRLP)
|
||||||
|
typ, err := lookupStructType(pkg.Scope(), typeName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("can't find type %s: %v", typeName, err)
|
||||||
|
}
|
||||||
|
return bctx, typ, nil
|
||||||
|
}
|
144
restricted/rlp/rlpgen/main.go
Normal file
144
restricted/rlp/rlpgen/main.go
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"go/types"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/tools/go/packages"
|
||||||
|
)
|
||||||
|
|
||||||
|
const pathOfPackageRLP = "github.com/ethereum/go-ethereum/rlp"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var (
|
||||||
|
pkgdir = flag.String("dir", ".", "input package")
|
||||||
|
output = flag.String("out", "-", "output file (default is stdout)")
|
||||||
|
genEncoder = flag.Bool("encoder", true, "generate EncodeRLP?")
|
||||||
|
genDecoder = flag.Bool("decoder", false, "generate DecodeRLP?")
|
||||||
|
typename = flag.String("type", "", "type to generate methods for")
|
||||||
|
)
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
cfg := Config{
|
||||||
|
Dir: *pkgdir,
|
||||||
|
Type: *typename,
|
||||||
|
GenerateEncoder: *genEncoder,
|
||||||
|
GenerateDecoder: *genDecoder,
|
||||||
|
}
|
||||||
|
code, err := cfg.process()
|
||||||
|
if err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
if *output == "-" {
|
||||||
|
os.Stdout.Write(code)
|
||||||
|
} else if err := os.WriteFile(*output, code, 0600); err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatal(args ...interface{}) {
|
||||||
|
fmt.Fprintln(os.Stderr, args...)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Dir string // input package directory
|
||||||
|
Type string
|
||||||
|
|
||||||
|
GenerateEncoder bool
|
||||||
|
GenerateDecoder bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// process generates the Go code.
|
||||||
|
func (cfg *Config) process() (code []byte, err error) {
|
||||||
|
// Load packages.
|
||||||
|
pcfg := &packages.Config{
|
||||||
|
Mode: packages.NeedName | packages.NeedTypes,
|
||||||
|
Dir: cfg.Dir,
|
||||||
|
}
|
||||||
|
ps, err := packages.Load(pcfg, pathOfPackageRLP, ".")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(ps) == 0 {
|
||||||
|
return nil, fmt.Errorf("no Go package found in %s", cfg.Dir)
|
||||||
|
}
|
||||||
|
packages.PrintErrors(ps)
|
||||||
|
|
||||||
|
// Find the packages that were loaded.
|
||||||
|
var (
|
||||||
|
pkg *types.Package
|
||||||
|
packageRLP *types.Package
|
||||||
|
)
|
||||||
|
for _, p := range ps {
|
||||||
|
if len(p.Errors) > 0 {
|
||||||
|
return nil, fmt.Errorf("package %s has errors", p.PkgPath)
|
||||||
|
}
|
||||||
|
if p.PkgPath == pathOfPackageRLP {
|
||||||
|
packageRLP = p.Types
|
||||||
|
} else {
|
||||||
|
pkg = p.Types
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bctx := newBuildContext(packageRLP)
|
||||||
|
|
||||||
|
// Find the type and generate.
|
||||||
|
typ, err := lookupStructType(pkg.Scope(), cfg.Type)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't find %s in %s: %v", cfg.Type, pkg, err)
|
||||||
|
}
|
||||||
|
code, err = bctx.generate(typ, cfg.GenerateEncoder, cfg.GenerateDecoder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add build comments.
|
||||||
|
// This is done here to avoid processing these lines with gofmt.
|
||||||
|
var header bytes.Buffer
|
||||||
|
fmt.Fprint(&header, "// Code generated by rlpgen. DO NOT EDIT.\n\n")
|
||||||
|
return append(header.Bytes(), code...), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func lookupStructType(scope *types.Scope, name string) (*types.Named, error) {
|
||||||
|
typ, err := lookupType(scope, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, ok := typ.Underlying().(*types.Struct)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("not a struct type")
|
||||||
|
}
|
||||||
|
return typ, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func lookupType(scope *types.Scope, name string) (*types.Named, error) {
|
||||||
|
obj := scope.Lookup(name)
|
||||||
|
if obj == nil {
|
||||||
|
return nil, errors.New("no such identifier")
|
||||||
|
}
|
||||||
|
typ, ok := obj.(*types.TypeName)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("not a type")
|
||||||
|
}
|
||||||
|
return typ.Type().(*types.Named), nil
|
||||||
|
}
|
10
restricted/rlp/rlpgen/testdata/bigint.in.txt
vendored
Normal file
10
restricted/rlp/rlpgen/testdata/bigint.in.txt
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
// -*- mode: go -*-
|
||||||
|
|
||||||
|
package test
|
||||||
|
|
||||||
|
import "math/big"
|
||||||
|
|
||||||
|
type Test struct {
|
||||||
|
Int *big.Int
|
||||||
|
IntNoPtr big.Int
|
||||||
|
}
|
49
restricted/rlp/rlpgen/testdata/bigint.out.txt
vendored
Normal file
49
restricted/rlp/rlpgen/testdata/bigint.out.txt
vendored
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package test
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/rlp"
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
func (obj *Test) EncodeRLP(_w io.Writer) error {
|
||||||
|
w := rlp.NewEncoderBuffer(_w)
|
||||||
|
_tmp0 := w.List()
|
||||||
|
if obj.Int == nil {
|
||||||
|
w.Write(rlp.EmptyString)
|
||||||
|
} else {
|
||||||
|
if obj.Int.Sign() == -1 {
|
||||||
|
return rlp.ErrNegativeBigInt
|
||||||
|
}
|
||||||
|
w.WriteBigInt(obj.Int)
|
||||||
|
}
|
||||||
|
if obj.IntNoPtr.Sign() == -1 {
|
||||||
|
return rlp.ErrNegativeBigInt
|
||||||
|
}
|
||||||
|
w.WriteBigInt(&obj.IntNoPtr)
|
||||||
|
w.ListEnd(_tmp0)
|
||||||
|
return w.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
|
||||||
|
var _tmp0 Test
|
||||||
|
{
|
||||||
|
if _, err := dec.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Int:
|
||||||
|
_tmp1, err := dec.BigInt()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.Int = _tmp1
|
||||||
|
// IntNoPtr:
|
||||||
|
_tmp2, err := dec.BigInt()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.IntNoPtr = (*_tmp2)
|
||||||
|
if err := dec.ListEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*obj = _tmp0
|
||||||
|
return nil
|
||||||
|
}
|
30
restricted/rlp/rlpgen/testdata/nil.in.txt
vendored
Normal file
30
restricted/rlp/rlpgen/testdata/nil.in.txt
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
// -*- mode: go -*-
|
||||||
|
|
||||||
|
package test
|
||||||
|
|
||||||
|
type Aux struct{
|
||||||
|
A uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type Test struct{
|
||||||
|
Uint8 *byte `rlp:"nil"`
|
||||||
|
Uint8List *byte `rlp:"nilList"`
|
||||||
|
|
||||||
|
Uint32 *uint32 `rlp:"nil"`
|
||||||
|
Uint32List *uint32 `rlp:"nilList"`
|
||||||
|
|
||||||
|
Uint64 *uint64 `rlp:"nil"`
|
||||||
|
Uint64List *uint64 `rlp:"nilList"`
|
||||||
|
|
||||||
|
String *string `rlp:"nil"`
|
||||||
|
StringList *string `rlp:"nilList"`
|
||||||
|
|
||||||
|
ByteArray *[3]byte `rlp:"nil"`
|
||||||
|
ByteArrayList *[3]byte `rlp:"nilList"`
|
||||||
|
|
||||||
|
ByteSlice *[]byte `rlp:"nil"`
|
||||||
|
ByteSliceList *[]byte `rlp:"nilList"`
|
||||||
|
|
||||||
|
Struct *Aux `rlp:"nil"`
|
||||||
|
StructString *Aux `rlp:"nilString"`
|
||||||
|
}
|
289
restricted/rlp/rlpgen/testdata/nil.out.txt
vendored
Normal file
289
restricted/rlp/rlpgen/testdata/nil.out.txt
vendored
Normal file
@ -0,0 +1,289 @@
|
|||||||
|
package test
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/rlp"
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
func (obj *Test) EncodeRLP(_w io.Writer) error {
|
||||||
|
w := rlp.NewEncoderBuffer(_w)
|
||||||
|
_tmp0 := w.List()
|
||||||
|
if obj.Uint8 == nil {
|
||||||
|
w.Write([]byte{0x80})
|
||||||
|
} else {
|
||||||
|
w.WriteUint64(uint64((*obj.Uint8)))
|
||||||
|
}
|
||||||
|
if obj.Uint8List == nil {
|
||||||
|
w.Write([]byte{0xC0})
|
||||||
|
} else {
|
||||||
|
w.WriteUint64(uint64((*obj.Uint8List)))
|
||||||
|
}
|
||||||
|
if obj.Uint32 == nil {
|
||||||
|
w.Write([]byte{0x80})
|
||||||
|
} else {
|
||||||
|
w.WriteUint64(uint64((*obj.Uint32)))
|
||||||
|
}
|
||||||
|
if obj.Uint32List == nil {
|
||||||
|
w.Write([]byte{0xC0})
|
||||||
|
} else {
|
||||||
|
w.WriteUint64(uint64((*obj.Uint32List)))
|
||||||
|
}
|
||||||
|
if obj.Uint64 == nil {
|
||||||
|
w.Write([]byte{0x80})
|
||||||
|
} else {
|
||||||
|
w.WriteUint64((*obj.Uint64))
|
||||||
|
}
|
||||||
|
if obj.Uint64List == nil {
|
||||||
|
w.Write([]byte{0xC0})
|
||||||
|
} else {
|
||||||
|
w.WriteUint64((*obj.Uint64List))
|
||||||
|
}
|
||||||
|
if obj.String == nil {
|
||||||
|
w.Write([]byte{0x80})
|
||||||
|
} else {
|
||||||
|
w.WriteString((*obj.String))
|
||||||
|
}
|
||||||
|
if obj.StringList == nil {
|
||||||
|
w.Write([]byte{0xC0})
|
||||||
|
} else {
|
||||||
|
w.WriteString((*obj.StringList))
|
||||||
|
}
|
||||||
|
if obj.ByteArray == nil {
|
||||||
|
w.Write([]byte{0x80})
|
||||||
|
} else {
|
||||||
|
w.WriteBytes(obj.ByteArray[:])
|
||||||
|
}
|
||||||
|
if obj.ByteArrayList == nil {
|
||||||
|
w.Write([]byte{0xC0})
|
||||||
|
} else {
|
||||||
|
w.WriteBytes(obj.ByteArrayList[:])
|
||||||
|
}
|
||||||
|
if obj.ByteSlice == nil {
|
||||||
|
w.Write([]byte{0x80})
|
||||||
|
} else {
|
||||||
|
w.WriteBytes((*obj.ByteSlice))
|
||||||
|
}
|
||||||
|
if obj.ByteSliceList == nil {
|
||||||
|
w.Write([]byte{0xC0})
|
||||||
|
} else {
|
||||||
|
w.WriteBytes((*obj.ByteSliceList))
|
||||||
|
}
|
||||||
|
if obj.Struct == nil {
|
||||||
|
w.Write([]byte{0xC0})
|
||||||
|
} else {
|
||||||
|
_tmp1 := w.List()
|
||||||
|
w.WriteUint64(uint64(obj.Struct.A))
|
||||||
|
w.ListEnd(_tmp1)
|
||||||
|
}
|
||||||
|
if obj.StructString == nil {
|
||||||
|
w.Write([]byte{0x80})
|
||||||
|
} else {
|
||||||
|
_tmp2 := w.List()
|
||||||
|
w.WriteUint64(uint64(obj.StructString.A))
|
||||||
|
w.ListEnd(_tmp2)
|
||||||
|
}
|
||||||
|
w.ListEnd(_tmp0)
|
||||||
|
return w.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
|
||||||
|
var _tmp0 Test
|
||||||
|
{
|
||||||
|
if _, err := dec.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Uint8:
|
||||||
|
var _tmp2 *byte
|
||||||
|
if _tmp3, _tmp4, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp4 != 0 || _tmp3 != rlp.String {
|
||||||
|
_tmp1, err := dec.Uint8()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp2 = &_tmp1
|
||||||
|
}
|
||||||
|
_tmp0.Uint8 = _tmp2
|
||||||
|
// Uint8List:
|
||||||
|
var _tmp6 *byte
|
||||||
|
if _tmp7, _tmp8, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp8 != 0 || _tmp7 != rlp.List {
|
||||||
|
_tmp5, err := dec.Uint8()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp6 = &_tmp5
|
||||||
|
}
|
||||||
|
_tmp0.Uint8List = _tmp6
|
||||||
|
// Uint32:
|
||||||
|
var _tmp10 *uint32
|
||||||
|
if _tmp11, _tmp12, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp12 != 0 || _tmp11 != rlp.String {
|
||||||
|
_tmp9, err := dec.Uint32()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp10 = &_tmp9
|
||||||
|
}
|
||||||
|
_tmp0.Uint32 = _tmp10
|
||||||
|
// Uint32List:
|
||||||
|
var _tmp14 *uint32
|
||||||
|
if _tmp15, _tmp16, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp16 != 0 || _tmp15 != rlp.List {
|
||||||
|
_tmp13, err := dec.Uint32()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp14 = &_tmp13
|
||||||
|
}
|
||||||
|
_tmp0.Uint32List = _tmp14
|
||||||
|
// Uint64:
|
||||||
|
var _tmp18 *uint64
|
||||||
|
if _tmp19, _tmp20, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp20 != 0 || _tmp19 != rlp.String {
|
||||||
|
_tmp17, err := dec.Uint64()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp18 = &_tmp17
|
||||||
|
}
|
||||||
|
_tmp0.Uint64 = _tmp18
|
||||||
|
// Uint64List:
|
||||||
|
var _tmp22 *uint64
|
||||||
|
if _tmp23, _tmp24, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp24 != 0 || _tmp23 != rlp.List {
|
||||||
|
_tmp21, err := dec.Uint64()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp22 = &_tmp21
|
||||||
|
}
|
||||||
|
_tmp0.Uint64List = _tmp22
|
||||||
|
// String:
|
||||||
|
var _tmp26 *string
|
||||||
|
if _tmp27, _tmp28, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp28 != 0 || _tmp27 != rlp.String {
|
||||||
|
_tmp25, err := dec.String()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp26 = &_tmp25
|
||||||
|
}
|
||||||
|
_tmp0.String = _tmp26
|
||||||
|
// StringList:
|
||||||
|
var _tmp30 *string
|
||||||
|
if _tmp31, _tmp32, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp32 != 0 || _tmp31 != rlp.List {
|
||||||
|
_tmp29, err := dec.String()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp30 = &_tmp29
|
||||||
|
}
|
||||||
|
_tmp0.StringList = _tmp30
|
||||||
|
// ByteArray:
|
||||||
|
var _tmp34 *[3]byte
|
||||||
|
if _tmp35, _tmp36, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp36 != 0 || _tmp35 != rlp.String {
|
||||||
|
var _tmp33 [3]byte
|
||||||
|
if err := dec.ReadBytes(_tmp33[:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp34 = &_tmp33
|
||||||
|
}
|
||||||
|
_tmp0.ByteArray = _tmp34
|
||||||
|
// ByteArrayList:
|
||||||
|
var _tmp38 *[3]byte
|
||||||
|
if _tmp39, _tmp40, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp40 != 0 || _tmp39 != rlp.List {
|
||||||
|
var _tmp37 [3]byte
|
||||||
|
if err := dec.ReadBytes(_tmp37[:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp38 = &_tmp37
|
||||||
|
}
|
||||||
|
_tmp0.ByteArrayList = _tmp38
|
||||||
|
// ByteSlice:
|
||||||
|
var _tmp42 *[]byte
|
||||||
|
if _tmp43, _tmp44, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp44 != 0 || _tmp43 != rlp.String {
|
||||||
|
_tmp41, err := dec.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp42 = &_tmp41
|
||||||
|
}
|
||||||
|
_tmp0.ByteSlice = _tmp42
|
||||||
|
// ByteSliceList:
|
||||||
|
var _tmp46 *[]byte
|
||||||
|
if _tmp47, _tmp48, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp48 != 0 || _tmp47 != rlp.List {
|
||||||
|
_tmp45, err := dec.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp46 = &_tmp45
|
||||||
|
}
|
||||||
|
_tmp0.ByteSliceList = _tmp46
|
||||||
|
// Struct:
|
||||||
|
var _tmp51 *Aux
|
||||||
|
if _tmp52, _tmp53, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp53 != 0 || _tmp52 != rlp.List {
|
||||||
|
var _tmp49 Aux
|
||||||
|
{
|
||||||
|
if _, err := dec.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// A:
|
||||||
|
_tmp50, err := dec.Uint32()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp49.A = _tmp50
|
||||||
|
if err := dec.ListEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_tmp51 = &_tmp49
|
||||||
|
}
|
||||||
|
_tmp0.Struct = _tmp51
|
||||||
|
// StructString:
|
||||||
|
var _tmp56 *Aux
|
||||||
|
if _tmp57, _tmp58, err := dec.Kind(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if _tmp58 != 0 || _tmp57 != rlp.String {
|
||||||
|
var _tmp54 Aux
|
||||||
|
{
|
||||||
|
if _, err := dec.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// A:
|
||||||
|
_tmp55, err := dec.Uint32()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp54.A = _tmp55
|
||||||
|
if err := dec.ListEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_tmp56 = &_tmp54
|
||||||
|
}
|
||||||
|
_tmp0.StructString = _tmp56
|
||||||
|
if err := dec.ListEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*obj = _tmp0
|
||||||
|
return nil
|
||||||
|
}
|
17
restricted/rlp/rlpgen/testdata/optional.in.txt
vendored
Normal file
17
restricted/rlp/rlpgen/testdata/optional.in.txt
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
// -*- mode: go -*-
|
||||||
|
|
||||||
|
package test
|
||||||
|
|
||||||
|
type Aux struct {
|
||||||
|
A uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type Test struct {
|
||||||
|
Uint64 uint64 `rlp:"optional"`
|
||||||
|
Pointer *uint64 `rlp:"optional"`
|
||||||
|
String string `rlp:"optional"`
|
||||||
|
Slice []uint64 `rlp:"optional"`
|
||||||
|
Array [3]byte `rlp:"optional"`
|
||||||
|
NamedStruct Aux `rlp:"optional"`
|
||||||
|
AnonStruct struct{ A string } `rlp:"optional"`
|
||||||
|
}
|
153
restricted/rlp/rlpgen/testdata/optional.out.txt
vendored
Normal file
153
restricted/rlp/rlpgen/testdata/optional.out.txt
vendored
Normal file
@ -0,0 +1,153 @@
|
|||||||
|
package test
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/rlp"
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
func (obj *Test) EncodeRLP(_w io.Writer) error {
|
||||||
|
w := rlp.NewEncoderBuffer(_w)
|
||||||
|
_tmp0 := w.List()
|
||||||
|
_tmp1 := obj.Uint64 != 0
|
||||||
|
_tmp2 := obj.Pointer != nil
|
||||||
|
_tmp3 := obj.String != ""
|
||||||
|
_tmp4 := len(obj.Slice) > 0
|
||||||
|
_tmp5 := obj.Array != ([3]byte{})
|
||||||
|
_tmp6 := obj.NamedStruct != (Aux{})
|
||||||
|
_tmp7 := obj.AnonStruct != (struct{ A string }{})
|
||||||
|
if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
|
||||||
|
w.WriteUint64(obj.Uint64)
|
||||||
|
}
|
||||||
|
if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
|
||||||
|
if obj.Pointer == nil {
|
||||||
|
w.Write([]byte{0x80})
|
||||||
|
} else {
|
||||||
|
w.WriteUint64((*obj.Pointer))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
|
||||||
|
w.WriteString(obj.String)
|
||||||
|
}
|
||||||
|
if _tmp4 || _tmp5 || _tmp6 || _tmp7 {
|
||||||
|
_tmp8 := w.List()
|
||||||
|
for _, _tmp9 := range obj.Slice {
|
||||||
|
w.WriteUint64(_tmp9)
|
||||||
|
}
|
||||||
|
w.ListEnd(_tmp8)
|
||||||
|
}
|
||||||
|
if _tmp5 || _tmp6 || _tmp7 {
|
||||||
|
w.WriteBytes(obj.Array[:])
|
||||||
|
}
|
||||||
|
if _tmp6 || _tmp7 {
|
||||||
|
_tmp10 := w.List()
|
||||||
|
w.WriteUint64(obj.NamedStruct.A)
|
||||||
|
w.ListEnd(_tmp10)
|
||||||
|
}
|
||||||
|
if _tmp7 {
|
||||||
|
_tmp11 := w.List()
|
||||||
|
w.WriteString(obj.AnonStruct.A)
|
||||||
|
w.ListEnd(_tmp11)
|
||||||
|
}
|
||||||
|
w.ListEnd(_tmp0)
|
||||||
|
return w.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
|
||||||
|
var _tmp0 Test
|
||||||
|
{
|
||||||
|
if _, err := dec.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Uint64:
|
||||||
|
if dec.MoreDataInList() {
|
||||||
|
_tmp1, err := dec.Uint64()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.Uint64 = _tmp1
|
||||||
|
// Pointer:
|
||||||
|
if dec.MoreDataInList() {
|
||||||
|
_tmp2, err := dec.Uint64()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.Pointer = &_tmp2
|
||||||
|
// String:
|
||||||
|
if dec.MoreDataInList() {
|
||||||
|
_tmp3, err := dec.String()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.String = _tmp3
|
||||||
|
// Slice:
|
||||||
|
if dec.MoreDataInList() {
|
||||||
|
var _tmp4 []uint64
|
||||||
|
if _, err := dec.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for dec.MoreDataInList() {
|
||||||
|
_tmp5, err := dec.Uint64()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp4 = append(_tmp4, _tmp5)
|
||||||
|
}
|
||||||
|
if err := dec.ListEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.Slice = _tmp4
|
||||||
|
// Array:
|
||||||
|
if dec.MoreDataInList() {
|
||||||
|
var _tmp6 [3]byte
|
||||||
|
if err := dec.ReadBytes(_tmp6[:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.Array = _tmp6
|
||||||
|
// NamedStruct:
|
||||||
|
if dec.MoreDataInList() {
|
||||||
|
var _tmp7 Aux
|
||||||
|
{
|
||||||
|
if _, err := dec.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// A:
|
||||||
|
_tmp8, err := dec.Uint64()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp7.A = _tmp8
|
||||||
|
if err := dec.ListEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_tmp0.NamedStruct = _tmp7
|
||||||
|
// AnonStruct:
|
||||||
|
if dec.MoreDataInList() {
|
||||||
|
var _tmp9 struct{ A string }
|
||||||
|
{
|
||||||
|
if _, err := dec.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// A:
|
||||||
|
_tmp10, err := dec.String()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp9.A = _tmp10
|
||||||
|
if err := dec.ListEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_tmp0.AnonStruct = _tmp9
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := dec.ListEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*obj = _tmp0
|
||||||
|
return nil
|
||||||
|
}
|
11
restricted/rlp/rlpgen/testdata/rawvalue.in.txt
vendored
Normal file
11
restricted/rlp/rlpgen/testdata/rawvalue.in.txt
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
// -*- mode: go -*-
|
||||||
|
|
||||||
|
package test
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/rlp"
|
||||||
|
|
||||||
|
type Test struct {
|
||||||
|
RawValue rlp.RawValue
|
||||||
|
PointerToRawValue *rlp.RawValue
|
||||||
|
SliceOfRawValue []rlp.RawValue
|
||||||
|
}
|
64
restricted/rlp/rlpgen/testdata/rawvalue.out.txt
vendored
Normal file
64
restricted/rlp/rlpgen/testdata/rawvalue.out.txt
vendored
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
package test
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/rlp"
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
func (obj *Test) EncodeRLP(_w io.Writer) error {
|
||||||
|
w := rlp.NewEncoderBuffer(_w)
|
||||||
|
_tmp0 := w.List()
|
||||||
|
w.Write(obj.RawValue)
|
||||||
|
if obj.PointerToRawValue == nil {
|
||||||
|
w.Write([]byte{0x80})
|
||||||
|
} else {
|
||||||
|
w.Write((*obj.PointerToRawValue))
|
||||||
|
}
|
||||||
|
_tmp1 := w.List()
|
||||||
|
for _, _tmp2 := range obj.SliceOfRawValue {
|
||||||
|
w.Write(_tmp2)
|
||||||
|
}
|
||||||
|
w.ListEnd(_tmp1)
|
||||||
|
w.ListEnd(_tmp0)
|
||||||
|
return w.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
|
||||||
|
var _tmp0 Test
|
||||||
|
{
|
||||||
|
if _, err := dec.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// RawValue:
|
||||||
|
_tmp1, err := dec.Raw()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.RawValue = _tmp1
|
||||||
|
// PointerToRawValue:
|
||||||
|
_tmp2, err := dec.Raw()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.PointerToRawValue = &_tmp2
|
||||||
|
// SliceOfRawValue:
|
||||||
|
var _tmp3 []rlp.RawValue
|
||||||
|
if _, err := dec.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for dec.MoreDataInList() {
|
||||||
|
_tmp4, err := dec.Raw()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp3 = append(_tmp3, _tmp4)
|
||||||
|
}
|
||||||
|
if err := dec.ListEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.SliceOfRawValue = _tmp3
|
||||||
|
if err := dec.ListEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*obj = _tmp0
|
||||||
|
return nil
|
||||||
|
}
|
10
restricted/rlp/rlpgen/testdata/uint256.in.txt
vendored
Normal file
10
restricted/rlp/rlpgen/testdata/uint256.in.txt
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
// -*- mode: go -*-
|
||||||
|
|
||||||
|
package test
|
||||||
|
|
||||||
|
import "github.com/holiman/uint256"
|
||||||
|
|
||||||
|
type Test struct {
|
||||||
|
Int *uint256.Int
|
||||||
|
IntNoPtr uint256.Int
|
||||||
|
}
|
44
restricted/rlp/rlpgen/testdata/uint256.out.txt
vendored
Normal file
44
restricted/rlp/rlpgen/testdata/uint256.out.txt
vendored
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
package test
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/rlp"
|
||||||
|
import "github.com/holiman/uint256"
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
func (obj *Test) EncodeRLP(_w io.Writer) error {
|
||||||
|
w := rlp.NewEncoderBuffer(_w)
|
||||||
|
_tmp0 := w.List()
|
||||||
|
if obj.Int == nil {
|
||||||
|
w.Write(rlp.EmptyString)
|
||||||
|
} else {
|
||||||
|
w.WriteUint256(obj.Int)
|
||||||
|
}
|
||||||
|
w.WriteUint256(&obj.IntNoPtr)
|
||||||
|
w.ListEnd(_tmp0)
|
||||||
|
return w.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
|
||||||
|
var _tmp0 Test
|
||||||
|
{
|
||||||
|
if _, err := dec.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Int:
|
||||||
|
var _tmp1 uint256.Int
|
||||||
|
if err := dec.ReadUint256(&_tmp1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.Int = &_tmp1
|
||||||
|
// IntNoPtr:
|
||||||
|
var _tmp2 uint256.Int
|
||||||
|
if err := dec.ReadUint256(&_tmp2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.IntNoPtr = _tmp2
|
||||||
|
if err := dec.ListEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*obj = _tmp0
|
||||||
|
return nil
|
||||||
|
}
|
10
restricted/rlp/rlpgen/testdata/uints.in.txt
vendored
Normal file
10
restricted/rlp/rlpgen/testdata/uints.in.txt
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
// -*- mode: go -*-
|
||||||
|
|
||||||
|
package test
|
||||||
|
|
||||||
|
type Test struct{
|
||||||
|
A uint8
|
||||||
|
B uint16
|
||||||
|
C uint32
|
||||||
|
D uint64
|
||||||
|
}
|
53
restricted/rlp/rlpgen/testdata/uints.out.txt
vendored
Normal file
53
restricted/rlp/rlpgen/testdata/uints.out.txt
vendored
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
package test
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/rlp"
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
func (obj *Test) EncodeRLP(_w io.Writer) error {
|
||||||
|
w := rlp.NewEncoderBuffer(_w)
|
||||||
|
_tmp0 := w.List()
|
||||||
|
w.WriteUint64(uint64(obj.A))
|
||||||
|
w.WriteUint64(uint64(obj.B))
|
||||||
|
w.WriteUint64(uint64(obj.C))
|
||||||
|
w.WriteUint64(obj.D)
|
||||||
|
w.ListEnd(_tmp0)
|
||||||
|
return w.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
|
||||||
|
var _tmp0 Test
|
||||||
|
{
|
||||||
|
if _, err := dec.List(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// A:
|
||||||
|
_tmp1, err := dec.Uint8()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.A = _tmp1
|
||||||
|
// B:
|
||||||
|
_tmp2, err := dec.Uint16()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.B = _tmp2
|
||||||
|
// C:
|
||||||
|
_tmp3, err := dec.Uint32()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.C = _tmp3
|
||||||
|
// D:
|
||||||
|
_tmp4, err := dec.Uint64()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_tmp0.D = _tmp4
|
||||||
|
if err := dec.ListEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*obj = _tmp0
|
||||||
|
return nil
|
||||||
|
}
|
124
restricted/rlp/rlpgen/types.go
Normal file
124
restricted/rlp/rlpgen/types.go
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"go/types"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// typeReflectKind gives the reflect.Kind that represents typ.
|
||||||
|
func typeReflectKind(typ types.Type) reflect.Kind {
|
||||||
|
switch typ := typ.(type) {
|
||||||
|
case *types.Basic:
|
||||||
|
k := typ.Kind()
|
||||||
|
if k >= types.Bool && k <= types.Complex128 {
|
||||||
|
// value order matches for Bool..Complex128
|
||||||
|
return reflect.Bool + reflect.Kind(k-types.Bool)
|
||||||
|
}
|
||||||
|
if k == types.String {
|
||||||
|
return reflect.String
|
||||||
|
}
|
||||||
|
if k == types.UnsafePointer {
|
||||||
|
return reflect.UnsafePointer
|
||||||
|
}
|
||||||
|
panic(fmt.Errorf("unhandled BasicKind %v", k))
|
||||||
|
case *types.Array:
|
||||||
|
return reflect.Array
|
||||||
|
case *types.Chan:
|
||||||
|
return reflect.Chan
|
||||||
|
case *types.Interface:
|
||||||
|
return reflect.Interface
|
||||||
|
case *types.Map:
|
||||||
|
return reflect.Map
|
||||||
|
case *types.Pointer:
|
||||||
|
return reflect.Ptr
|
||||||
|
case *types.Signature:
|
||||||
|
return reflect.Func
|
||||||
|
case *types.Slice:
|
||||||
|
return reflect.Slice
|
||||||
|
case *types.Struct:
|
||||||
|
return reflect.Struct
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("unhandled type %T", typ))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// nonZeroCheck returns the expression that checks whether 'v' is a non-zero value of type 'vtyp'.
|
||||||
|
func nonZeroCheck(v string, vtyp types.Type, qualify types.Qualifier) string {
|
||||||
|
// Resolve type name.
|
||||||
|
typ := resolveUnderlying(vtyp)
|
||||||
|
switch typ := typ.(type) {
|
||||||
|
case *types.Basic:
|
||||||
|
k := typ.Kind()
|
||||||
|
switch {
|
||||||
|
case k == types.Bool:
|
||||||
|
return v
|
||||||
|
case k >= types.Uint && k <= types.Complex128:
|
||||||
|
return fmt.Sprintf("%s != 0", v)
|
||||||
|
case k == types.String:
|
||||||
|
return fmt.Sprintf(`%s != ""`, v)
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("unhandled BasicKind %v", k))
|
||||||
|
}
|
||||||
|
case *types.Array, *types.Struct:
|
||||||
|
return fmt.Sprintf("%s != (%s{})", v, types.TypeString(vtyp, qualify))
|
||||||
|
case *types.Interface, *types.Pointer, *types.Signature:
|
||||||
|
return fmt.Sprintf("%s != nil", v)
|
||||||
|
case *types.Slice, *types.Map:
|
||||||
|
return fmt.Sprintf("len(%s) > 0", v)
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("unhandled type %T", typ))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isBigInt checks whether 'typ' is "math/big".Int.
|
||||||
|
func isBigInt(typ types.Type) bool {
|
||||||
|
named, ok := typ.(*types.Named)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
name := named.Obj()
|
||||||
|
return name.Pkg().Path() == "math/big" && name.Name() == "Int"
|
||||||
|
}
|
||||||
|
|
||||||
|
// isUint256 checks whether 'typ' is "github.com/holiman/uint256".Int.
|
||||||
|
func isUint256(typ types.Type) bool {
|
||||||
|
named, ok := typ.(*types.Named)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
name := named.Obj()
|
||||||
|
return name.Pkg().Path() == "github.com/holiman/uint256" && name.Name() == "Int"
|
||||||
|
}
|
||||||
|
|
||||||
|
// isByte checks whether the underlying type of 'typ' is uint8.
|
||||||
|
func isByte(typ types.Type) bool {
|
||||||
|
basic, ok := resolveUnderlying(typ).(*types.Basic)
|
||||||
|
return ok && basic.Kind() == types.Uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveUnderlying(typ types.Type) types.Type {
|
||||||
|
for {
|
||||||
|
t := typ.Underlying()
|
||||||
|
if t == typ {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
typ = t
|
||||||
|
}
|
||||||
|
}
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build nacl || js || !cgo
|
||||||
// +build nacl js !cgo
|
// +build nacl js !cgo
|
||||||
|
|
||||||
package rlp
|
package rlp
|
||||||
@ -21,6 +22,6 @@ package rlp
|
|||||||
import "reflect"
|
import "reflect"
|
||||||
|
|
||||||
// byteArrayBytes returns a slice of the byte array v.
|
// byteArrayBytes returns a slice of the byte array v.
|
||||||
func byteArrayBytes(v reflect.Value) []byte {
|
func byteArrayBytes(v reflect.Value, length int) []byte {
|
||||||
return v.Slice(0, v.Len()).Bytes()
|
return v.Slice(0, length).Bytes()
|
||||||
}
|
}
|
||||||
|
@ -19,9 +19,10 @@ package rlp
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted/rlp/internal/rlpstruct"
|
||||||
)
|
)
|
||||||
|
|
||||||
// typeinfo is an entry in the type cache.
|
// typeinfo is an entry in the type cache.
|
||||||
@ -32,35 +33,16 @@ type typeinfo struct {
|
|||||||
writerErr error // error from makeWriter
|
writerErr error // error from makeWriter
|
||||||
}
|
}
|
||||||
|
|
||||||
// tags represents struct tags.
|
|
||||||
type tags struct {
|
|
||||||
// rlp:"nil" controls whether empty input results in a nil pointer.
|
|
||||||
// nilKind is the kind of empty value allowed for the field.
|
|
||||||
nilKind Kind
|
|
||||||
nilOK bool
|
|
||||||
|
|
||||||
// rlp:"optional" allows for a field to be missing in the input list.
|
|
||||||
// If this is set, all subsequent fields must also be optional.
|
|
||||||
optional bool
|
|
||||||
|
|
||||||
// rlp:"tail" controls whether this field swallows additional list elements. It can
|
|
||||||
// only be set for the last field, which must be of slice type.
|
|
||||||
tail bool
|
|
||||||
|
|
||||||
// rlp:"-" ignores fields.
|
|
||||||
ignored bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// typekey is the key of a type in typeCache. It includes the struct tags because
|
// typekey is the key of a type in typeCache. It includes the struct tags because
|
||||||
// they might generate a different decoder.
|
// they might generate a different decoder.
|
||||||
type typekey struct {
|
type typekey struct {
|
||||||
reflect.Type
|
reflect.Type
|
||||||
tags
|
rlpstruct.Tags
|
||||||
}
|
}
|
||||||
|
|
||||||
type decoder func(*Stream, reflect.Value) error
|
type decoder func(*Stream, reflect.Value) error
|
||||||
|
|
||||||
type writer func(reflect.Value, *encbuf) error
|
type writer func(reflect.Value, *encBuffer) error
|
||||||
|
|
||||||
var theTC = newTypeCache()
|
var theTC = newTypeCache()
|
||||||
|
|
||||||
@ -95,10 +77,10 @@ func (c *typeCache) info(typ reflect.Type) *typeinfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Not in the cache, need to generate info for this type.
|
// Not in the cache, need to generate info for this type.
|
||||||
return c.generate(typ, tags{})
|
return c.generate(typ, rlpstruct.Tags{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *typeCache) generate(typ reflect.Type, tags tags) *typeinfo {
|
func (c *typeCache) generate(typ reflect.Type, tags rlpstruct.Tags) *typeinfo {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
@ -122,7 +104,7 @@ func (c *typeCache) generate(typ reflect.Type, tags tags) *typeinfo {
|
|||||||
return info
|
return info
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *typeCache) infoWhileGenerating(typ reflect.Type, tags tags) *typeinfo {
|
func (c *typeCache) infoWhileGenerating(typ reflect.Type, tags rlpstruct.Tags) *typeinfo {
|
||||||
key := typekey{typ, tags}
|
key := typekey{typ, tags}
|
||||||
if info := c.next[key]; info != nil {
|
if info := c.next[key]; info != nil {
|
||||||
return info
|
return info
|
||||||
@ -144,35 +126,40 @@ type field struct {
|
|||||||
|
|
||||||
// structFields resolves the typeinfo of all public fields in a struct type.
|
// structFields resolves the typeinfo of all public fields in a struct type.
|
||||||
func structFields(typ reflect.Type) (fields []field, err error) {
|
func structFields(typ reflect.Type) (fields []field, err error) {
|
||||||
var (
|
// Convert fields to rlpstruct.Field.
|
||||||
lastPublic = lastPublicField(typ)
|
var allStructFields []rlpstruct.Field
|
||||||
anyOptional = false
|
|
||||||
)
|
|
||||||
for i := 0; i < typ.NumField(); i++ {
|
for i := 0; i < typ.NumField(); i++ {
|
||||||
if f := typ.Field(i); f.PkgPath == "" { // exported
|
rf := typ.Field(i)
|
||||||
tags, err := parseStructTag(typ, i, lastPublic)
|
allStructFields = append(allStructFields, rlpstruct.Field{
|
||||||
if err != nil {
|
Name: rf.Name,
|
||||||
return nil, err
|
Index: i,
|
||||||
}
|
Exported: rf.PkgPath == "",
|
||||||
|
Tag: string(rf.Tag),
|
||||||
|
Type: *rtypeToStructType(rf.Type, nil),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Skip rlp:"-" fields.
|
// Filter/validate fields.
|
||||||
if tags.ignored {
|
structFields, structTags, err := rlpstruct.ProcessFields(allStructFields)
|
||||||
continue
|
if err != nil {
|
||||||
}
|
if tagErr, ok := err.(rlpstruct.TagError); ok {
|
||||||
// If any field has the "optional" tag, subsequent fields must also have it.
|
tagErr.StructType = typ.String()
|
||||||
if tags.optional || tags.tail {
|
return nil, tagErr
|
||||||
anyOptional = true
|
|
||||||
} else if anyOptional {
|
|
||||||
return nil, fmt.Errorf(`rlp: struct field %v.%s needs "optional" tag`, typ, f.Name)
|
|
||||||
}
|
|
||||||
info := theTC.infoWhileGenerating(f.Type, tags)
|
|
||||||
fields = append(fields, field{i, info, tags.optional})
|
|
||||||
}
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve typeinfo.
|
||||||
|
for i, sf := range structFields {
|
||||||
|
typ := typ.Field(sf.Index).Type
|
||||||
|
tags := structTags[i]
|
||||||
|
info := theTC.infoWhileGenerating(typ, tags)
|
||||||
|
fields = append(fields, field{sf.Index, info, tags.Optional})
|
||||||
}
|
}
|
||||||
return fields, nil
|
return fields, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// anyOptionalFields returns the index of the first field with "optional" tag.
|
// firstOptionalField returns the index of the first field with "optional" tag.
|
||||||
func firstOptionalField(fields []field) int {
|
func firstOptionalField(fields []field) int {
|
||||||
for i, f := range fields {
|
for i, f := range fields {
|
||||||
if f.optional {
|
if f.optional {
|
||||||
@ -192,82 +179,56 @@ func (e structFieldError) Error() string {
|
|||||||
return fmt.Sprintf("%v (struct field %v.%s)", e.err, e.typ, e.typ.Field(e.field).Name)
|
return fmt.Sprintf("%v (struct field %v.%s)", e.err, e.typ, e.typ.Field(e.field).Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
type structTagError struct {
|
func (i *typeinfo) generate(typ reflect.Type, tags rlpstruct.Tags) {
|
||||||
typ reflect.Type
|
|
||||||
field, tag, err string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e structTagError) Error() string {
|
|
||||||
return fmt.Sprintf("rlp: invalid struct tag %q for %v.%s (%s)", e.tag, e.typ, e.field, e.err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseStructTag(typ reflect.Type, fi, lastPublic int) (tags, error) {
|
|
||||||
f := typ.Field(fi)
|
|
||||||
var ts tags
|
|
||||||
for _, t := range strings.Split(f.Tag.Get("rlp"), ",") {
|
|
||||||
switch t = strings.TrimSpace(t); t {
|
|
||||||
case "":
|
|
||||||
case "-":
|
|
||||||
ts.ignored = true
|
|
||||||
case "nil", "nilString", "nilList":
|
|
||||||
ts.nilOK = true
|
|
||||||
if f.Type.Kind() != reflect.Ptr {
|
|
||||||
return ts, structTagError{typ, f.Name, t, "field is not a pointer"}
|
|
||||||
}
|
|
||||||
switch t {
|
|
||||||
case "nil":
|
|
||||||
ts.nilKind = defaultNilKind(f.Type.Elem())
|
|
||||||
case "nilString":
|
|
||||||
ts.nilKind = String
|
|
||||||
case "nilList":
|
|
||||||
ts.nilKind = List
|
|
||||||
}
|
|
||||||
case "optional":
|
|
||||||
ts.optional = true
|
|
||||||
if ts.tail {
|
|
||||||
return ts, structTagError{typ, f.Name, t, `also has "tail" tag`}
|
|
||||||
}
|
|
||||||
case "tail":
|
|
||||||
ts.tail = true
|
|
||||||
if fi != lastPublic {
|
|
||||||
return ts, structTagError{typ, f.Name, t, "must be on last field"}
|
|
||||||
}
|
|
||||||
if ts.optional {
|
|
||||||
return ts, structTagError{typ, f.Name, t, `also has "optional" tag`}
|
|
||||||
}
|
|
||||||
if f.Type.Kind() != reflect.Slice {
|
|
||||||
return ts, structTagError{typ, f.Name, t, "field type is not slice"}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return ts, fmt.Errorf("rlp: unknown struct tag %q on %v.%s", t, typ, f.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func lastPublicField(typ reflect.Type) int {
|
|
||||||
last := 0
|
|
||||||
for i := 0; i < typ.NumField(); i++ {
|
|
||||||
if typ.Field(i).PkgPath == "" {
|
|
||||||
last = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return last
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *typeinfo) generate(typ reflect.Type, tags tags) {
|
|
||||||
i.decoder, i.decoderErr = makeDecoder(typ, tags)
|
i.decoder, i.decoderErr = makeDecoder(typ, tags)
|
||||||
i.writer, i.writerErr = makeWriter(typ, tags)
|
i.writer, i.writerErr = makeWriter(typ, tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
// defaultNilKind determines whether a nil pointer to typ encodes/decodes
|
// rtypeToStructType converts typ to rlpstruct.Type.
|
||||||
// as an empty string or empty list.
|
func rtypeToStructType(typ reflect.Type, rec map[reflect.Type]*rlpstruct.Type) *rlpstruct.Type {
|
||||||
func defaultNilKind(typ reflect.Type) Kind {
|
|
||||||
k := typ.Kind()
|
k := typ.Kind()
|
||||||
if isUint(k) || k == reflect.String || k == reflect.Bool || isByteArray(typ) {
|
if k == reflect.Invalid {
|
||||||
return String
|
panic("invalid kind")
|
||||||
|
}
|
||||||
|
|
||||||
|
if prev := rec[typ]; prev != nil {
|
||||||
|
return prev // short-circuit for recursive types
|
||||||
|
}
|
||||||
|
if rec == nil {
|
||||||
|
rec = make(map[reflect.Type]*rlpstruct.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
t := &rlpstruct.Type{
|
||||||
|
Name: typ.String(),
|
||||||
|
Kind: k,
|
||||||
|
IsEncoder: typ.Implements(encoderInterface),
|
||||||
|
IsDecoder: typ.Implements(decoderInterface),
|
||||||
|
}
|
||||||
|
rec[typ] = t
|
||||||
|
if k == reflect.Array || k == reflect.Slice || k == reflect.Ptr {
|
||||||
|
t.Elem = rtypeToStructType(typ.Elem(), rec)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// typeNilKind gives the RLP value kind for nil pointers to 'typ'.
|
||||||
|
func typeNilKind(typ reflect.Type, tags rlpstruct.Tags) Kind {
|
||||||
|
styp := rtypeToStructType(typ, nil)
|
||||||
|
|
||||||
|
var nk rlpstruct.NilKind
|
||||||
|
if tags.NilOK {
|
||||||
|
nk = tags.NilKind
|
||||||
|
} else {
|
||||||
|
nk = styp.DefaultNilValue()
|
||||||
|
}
|
||||||
|
switch nk {
|
||||||
|
case rlpstruct.NilKindString:
|
||||||
|
return String
|
||||||
|
case rlpstruct.NilKindList:
|
||||||
|
return List
|
||||||
|
default:
|
||||||
|
panic("invalid nil kind value")
|
||||||
}
|
}
|
||||||
return List
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func isUint(k reflect.Kind) bool {
|
func isUint(k reflect.Kind) bool {
|
||||||
@ -277,7 +238,3 @@ func isUint(k reflect.Kind) bool {
|
|||||||
func isByte(typ reflect.Type) bool {
|
func isByte(typ reflect.Type) bool {
|
||||||
return typ.Kind() == reflect.Uint8 && !typ.Implements(encoderInterface)
|
return typ.Kind() == reflect.Uint8 && !typ.Implements(encoderInterface)
|
||||||
}
|
}
|
||||||
|
|
||||||
func isByteArray(typ reflect.Type) bool {
|
|
||||||
return (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Array) && isByte(typ.Elem())
|
|
||||||
}
|
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build !nacl && !js && cgo
|
||||||
// +build !nacl,!js,cgo
|
// +build !nacl,!js,cgo
|
||||||
|
|
||||||
package rlp
|
package rlp
|
||||||
@ -24,12 +25,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// byteArrayBytes returns a slice of the byte array v.
|
// byteArrayBytes returns a slice of the byte array v.
|
||||||
func byteArrayBytes(v reflect.Value) []byte {
|
func byteArrayBytes(v reflect.Value, length int) []byte {
|
||||||
len := v.Len()
|
|
||||||
var s []byte
|
var s []byte
|
||||||
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s))
|
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s))
|
||||||
hdr.Data = v.UnsafeAddr()
|
hdr.Data = v.UnsafeAddr()
|
||||||
hdr.Cap = len
|
hdr.Cap = length
|
||||||
hdr.Len = len
|
hdr.Len = length
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
@ -18,8 +18,10 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"bytes"
|
||||||
|
|
||||||
"github.com/openrelayxyz/plugeth-utils/core"
|
"github.com/openrelayxyz/plugeth-utils/core"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate go run github.com/fjl/gencodec -type AccessTuple -out gen_access_tuple.go
|
//go:generate go run github.com/fjl/gencodec -type AccessTuple -out gen_access_tuple.go
|
||||||
@ -105,6 +107,9 @@ func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice }
|
|||||||
func (tx *AccessListTx) value() *big.Int { return tx.Value }
|
func (tx *AccessListTx) value() *big.Int { return tx.Value }
|
||||||
func (tx *AccessListTx) nonce() uint64 { return tx.Nonce }
|
func (tx *AccessListTx) nonce() uint64 { return tx.Nonce }
|
||||||
func (tx *AccessListTx) to() *core.Address { return tx.To }
|
func (tx *AccessListTx) to() *core.Address { return tx.To }
|
||||||
|
func (tx *AccessListTx) blobGas() uint64 { return 0}
|
||||||
|
func (tx *AccessListTx) blobGasFeeCap() *big.Int { return nil }
|
||||||
|
func (tx *AccessListTx) blobHashes() []core.Hash { return nil }
|
||||||
|
|
||||||
func (tx *AccessListTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
func (tx *AccessListTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
||||||
return dst.Set(tx.GasPrice)
|
return dst.Set(tx.GasPrice)
|
||||||
@ -117,3 +122,10 @@ func (tx *AccessListTx) rawSignatureValues() (v, r, s *big.Int) {
|
|||||||
func (tx *AccessListTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
func (tx *AccessListTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
||||||
tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s
|
tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s
|
||||||
}
|
}
|
||||||
|
func (tx *AccessListTx) encode(b *bytes.Buffer) error {
|
||||||
|
return rlp.Encode(b, tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *AccessListTx) decode(input []byte) error {
|
||||||
|
return rlp.DecodeBytes(input, tx)
|
||||||
|
}
|
||||||
|
254
restricted/types/blob_tx.go
Normal file
254
restricted/types/blob_tx.go
Normal file
@ -0,0 +1,254 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"math/big"
|
||||||
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/core"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted/params"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted/rlp"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted/crypto/kzg4844"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BlobTx represents an EIP-4844 transaction.
|
||||||
|
type BlobTx struct {
|
||||||
|
ChainID *uint256.Int
|
||||||
|
Nonce uint64
|
||||||
|
GasTipCap *uint256.Int // a.k.a. maxPriorityFeePerGas
|
||||||
|
GasFeeCap *uint256.Int // a.k.a. maxFeePerGas
|
||||||
|
Gas uint64
|
||||||
|
To core.Address
|
||||||
|
Value *uint256.Int
|
||||||
|
Data []byte
|
||||||
|
AccessList AccessList
|
||||||
|
BlobFeeCap *uint256.Int // a.k.a. maxFeePerBlobGas
|
||||||
|
BlobHashes []core.Hash
|
||||||
|
|
||||||
|
// A blob transaction can optionally contain blobs. This field must be set when BlobTx
|
||||||
|
// is used to create a transaction for sigining.
|
||||||
|
Sidecar *BlobTxSidecar `rlp:"-"`
|
||||||
|
|
||||||
|
// Signature values
|
||||||
|
V *uint256.Int `json:"v" gencodec:"required"`
|
||||||
|
R *uint256.Int `json:"r" gencodec:"required"`
|
||||||
|
S *uint256.Int `json:"s" gencodec:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobTxSidecar contains the blobs of a blob transaction.
|
||||||
|
type BlobTxSidecar struct {
|
||||||
|
Blobs []kzg4844.Blob // Blobs needed by the blob pool
|
||||||
|
Commitments []kzg4844.Commitment // Commitments needed by the blob pool
|
||||||
|
Proofs []kzg4844.Proof // Proofs needed by the blob pool
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobHashes computes the blob hashes of the given blobs.
|
||||||
|
func (sc *BlobTxSidecar) BlobHashes() []core.Hash {
|
||||||
|
h := make([]core.Hash, len(sc.Commitments))
|
||||||
|
for i := range sc.Blobs {
|
||||||
|
h[i] = blobHash(&sc.Commitments[i])
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodedSize computes the RLP size of the sidecar elements. This does NOT return the
|
||||||
|
// encoded size of the BlobTxSidecar, it's just a helper for tx.Size().
|
||||||
|
func (sc *BlobTxSidecar) encodedSize() uint64 {
|
||||||
|
var blobs, commitments, proofs uint64
|
||||||
|
for i := range sc.Blobs {
|
||||||
|
blobs += rlp.BytesSize(sc.Blobs[i][:])
|
||||||
|
}
|
||||||
|
for i := range sc.Commitments {
|
||||||
|
commitments += rlp.BytesSize(sc.Commitments[i][:])
|
||||||
|
}
|
||||||
|
for i := range sc.Proofs {
|
||||||
|
proofs += rlp.BytesSize(sc.Proofs[i][:])
|
||||||
|
}
|
||||||
|
return rlp.ListSize(blobs) + rlp.ListSize(commitments) + rlp.ListSize(proofs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// blobTxWithBlobs is used for encoding of transactions when blobs are present.
|
||||||
|
type blobTxWithBlobs struct {
|
||||||
|
BlobTx *BlobTx
|
||||||
|
Blobs []kzg4844.Blob
|
||||||
|
Commitments []kzg4844.Commitment
|
||||||
|
Proofs []kzg4844.Proof
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy creates a deep copy of the transaction data and initializes all fields.
|
||||||
|
func (tx *BlobTx) copy() TxData {
|
||||||
|
cpy := &BlobTx{
|
||||||
|
Nonce: tx.Nonce,
|
||||||
|
To: tx.To,
|
||||||
|
Data: core.CopyBytes(tx.Data),
|
||||||
|
Gas: tx.Gas,
|
||||||
|
// These are copied below.
|
||||||
|
AccessList: make(AccessList, len(tx.AccessList)),
|
||||||
|
BlobHashes: make([]core.Hash, len(tx.BlobHashes)),
|
||||||
|
Value: new(uint256.Int),
|
||||||
|
ChainID: new(uint256.Int),
|
||||||
|
GasTipCap: new(uint256.Int),
|
||||||
|
GasFeeCap: new(uint256.Int),
|
||||||
|
BlobFeeCap: new(uint256.Int),
|
||||||
|
V: new(uint256.Int),
|
||||||
|
R: new(uint256.Int),
|
||||||
|
S: new(uint256.Int),
|
||||||
|
}
|
||||||
|
copy(cpy.AccessList, tx.AccessList)
|
||||||
|
copy(cpy.BlobHashes, tx.BlobHashes)
|
||||||
|
|
||||||
|
if tx.Value != nil {
|
||||||
|
cpy.Value.Set(tx.Value)
|
||||||
|
}
|
||||||
|
if tx.ChainID != nil {
|
||||||
|
cpy.ChainID.Set(tx.ChainID)
|
||||||
|
}
|
||||||
|
if tx.GasTipCap != nil {
|
||||||
|
cpy.GasTipCap.Set(tx.GasTipCap)
|
||||||
|
}
|
||||||
|
if tx.GasFeeCap != nil {
|
||||||
|
cpy.GasFeeCap.Set(tx.GasFeeCap)
|
||||||
|
}
|
||||||
|
if tx.BlobFeeCap != nil {
|
||||||
|
cpy.BlobFeeCap.Set(tx.BlobFeeCap)
|
||||||
|
}
|
||||||
|
if tx.V != nil {
|
||||||
|
cpy.V.Set(tx.V)
|
||||||
|
}
|
||||||
|
if tx.R != nil {
|
||||||
|
cpy.R.Set(tx.R)
|
||||||
|
}
|
||||||
|
if tx.S != nil {
|
||||||
|
cpy.S.Set(tx.S)
|
||||||
|
}
|
||||||
|
if tx.Sidecar != nil {
|
||||||
|
cpy.Sidecar = &BlobTxSidecar{
|
||||||
|
Blobs: append([]kzg4844.Blob(nil), tx.Sidecar.Blobs...),
|
||||||
|
Commitments: append([]kzg4844.Commitment(nil), tx.Sidecar.Commitments...),
|
||||||
|
Proofs: append([]kzg4844.Proof(nil), tx.Sidecar.Proofs...),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// accessors for innerTx.
|
||||||
|
func (tx *BlobTx) txType() byte { return BlobTxType }
|
||||||
|
func (tx *BlobTx) chainID() *big.Int { return tx.ChainID.ToBig() }
|
||||||
|
func (tx *BlobTx) accessList() AccessList { return tx.AccessList }
|
||||||
|
func (tx *BlobTx) data() []byte { return tx.Data }
|
||||||
|
func (tx *BlobTx) gas() uint64 { return tx.Gas }
|
||||||
|
func (tx *BlobTx) gasFeeCap() *big.Int { return tx.GasFeeCap.ToBig() }
|
||||||
|
func (tx *BlobTx) gasTipCap() *big.Int { return tx.GasTipCap.ToBig() }
|
||||||
|
func (tx *BlobTx) gasPrice() *big.Int { return tx.GasFeeCap.ToBig() }
|
||||||
|
func (tx *BlobTx) value() *big.Int { return tx.Value.ToBig() }
|
||||||
|
func (tx *BlobTx) nonce() uint64 { return tx.Nonce }
|
||||||
|
func (tx *BlobTx) to() *core.Address { tmp := tx.To; return &tmp }
|
||||||
|
func (tx *BlobTx) blobGas() uint64 { return params.BlobTxBlobGasPerBlob * uint64(len(tx.BlobHashes)) }
|
||||||
|
func (tx *BlobTx) blobGasFeeCap() *big.Int { return tx.BlobFeeCap.ToBig() }
|
||||||
|
func (tx *BlobTx) blobHashes() []core.Hash { return tx.BlobHashes }
|
||||||
|
|
||||||
|
func (tx *BlobTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
||||||
|
if baseFee == nil {
|
||||||
|
return dst.Set(tx.GasFeeCap.ToBig())
|
||||||
|
}
|
||||||
|
tip := dst.Sub(tx.GasFeeCap.ToBig(), baseFee)
|
||||||
|
if tip.Cmp(tx.GasTipCap.ToBig()) > 0 {
|
||||||
|
tip.Set(tx.GasTipCap.ToBig())
|
||||||
|
}
|
||||||
|
return tip.Add(tip, baseFee)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *BlobTx) rawSignatureValues() (v, r, s *big.Int) {
|
||||||
|
return tx.V.ToBig(), tx.R.ToBig(), tx.S.ToBig()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *BlobTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
||||||
|
tx.ChainID.SetFromBig(chainID)
|
||||||
|
tx.V.SetFromBig(v)
|
||||||
|
tx.R.SetFromBig(r)
|
||||||
|
tx.S.SetFromBig(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *BlobTx) withoutSidecar() *BlobTx {
|
||||||
|
cpy := *tx
|
||||||
|
cpy.Sidecar = nil
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *BlobTx) encode(b *bytes.Buffer) error {
|
||||||
|
if tx.Sidecar == nil {
|
||||||
|
return rlp.Encode(b, tx)
|
||||||
|
}
|
||||||
|
inner := &blobTxWithBlobs{
|
||||||
|
BlobTx: tx,
|
||||||
|
Blobs: tx.Sidecar.Blobs,
|
||||||
|
Commitments: tx.Sidecar.Commitments,
|
||||||
|
Proofs: tx.Sidecar.Proofs,
|
||||||
|
}
|
||||||
|
return rlp.Encode(b, inner)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *BlobTx) decode(input []byte) error {
|
||||||
|
// Here we need to support two formats: the network protocol encoding of the tx (with
|
||||||
|
// blobs) or the canonical encoding without blobs.
|
||||||
|
//
|
||||||
|
// The two encodings can be distinguished by checking whether the first element of the
|
||||||
|
// input list is itself a list.
|
||||||
|
|
||||||
|
outerList, _, err := rlp.SplitList(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstElemKind, _, _, err := rlp.Split(outerList)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if firstElemKind != rlp.List {
|
||||||
|
if err := rlp.DecodeBytes(input, tx); err != nil {
|
||||||
|
// 0xf8b6053d850165a0bc00858bb2c97000825208944f56ffc63c28b72f79b02e91f11a4707bac4043c8080c0858bb2c97000f842a00155acead2ea1da3a7f6e9c743f4d81b1cd9f5a3382e657aa4025837e2b1651fa0019db7c3b9244969140c36c20cdcc71a821e2a995714c1ec2169f3ebd63c648280a00567aaceaed2303b577178069e5f4bfdd70d445e9eff8e63f9f0a33de98dd782a060e39ff4d29ce28325e3b48707bb8dd170ed3671428a62efcab9812555eb6db1
|
||||||
|
return fmt.Errorf("Decode error: %v - %v", err, firstElemKind)
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// It's a tx with blobs.
|
||||||
|
var inner blobTxWithBlobs
|
||||||
|
if err := rlp.DecodeBytes(input, &inner); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*tx = *inner.BlobTx
|
||||||
|
tx.Sidecar = &BlobTxSidecar{
|
||||||
|
Blobs: inner.Blobs,
|
||||||
|
Commitments: inner.Commitments,
|
||||||
|
Proofs: inner.Proofs,
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func blobHash(commit *kzg4844.Commitment) core.Hash {
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write(commit[:])
|
||||||
|
var vhash core.Hash
|
||||||
|
hasher.Sum(vhash[:0])
|
||||||
|
vhash[0] = params.BlobTxHashVersion
|
||||||
|
return vhash
|
||||||
|
}
|
@ -90,11 +90,14 @@ type Header struct {
|
|||||||
// WithdrawalsHash was added by EIP-4895 and is ignored in legacy headers.
|
// WithdrawalsHash was added by EIP-4895 and is ignored in legacy headers.
|
||||||
WithdrawalsHash *core.Hash `json:"withdrawalsRoot" rlp:"optional"`
|
WithdrawalsHash *core.Hash `json:"withdrawalsRoot" rlp:"optional"`
|
||||||
|
|
||||||
/*
|
// BlobGasUsed was added by EIP-4844 and is ignored in legacy headers.
|
||||||
TODO (MariusVanDerWijden) Add this field once needed
|
BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"`
|
||||||
// Random was added during the merge and contains the BeaconState randomness
|
|
||||||
Random core.Hash `json:"random" rlp:"optional"`
|
// ExcessBlobGas was added by EIP-4844 and is ignored in legacy headers.
|
||||||
*/
|
ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"`
|
||||||
|
|
||||||
|
// ParentBeaconRoot was added by EIP-4788 and is ignored in legacy headers.
|
||||||
|
ParentBeaconRoot *core.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// field type overrides for gencodec
|
// field type overrides for gencodec
|
||||||
@ -283,6 +286,18 @@ func CopyHeader(h *Header) *Header {
|
|||||||
if h.WithdrawalsHash != nil {
|
if h.WithdrawalsHash != nil {
|
||||||
*cpy.WithdrawalsHash = *h.WithdrawalsHash
|
*cpy.WithdrawalsHash = *h.WithdrawalsHash
|
||||||
}
|
}
|
||||||
|
if h.ExcessBlobGas != nil {
|
||||||
|
cpy.ExcessBlobGas = new(uint64)
|
||||||
|
*cpy.ExcessBlobGas = *h.ExcessBlobGas
|
||||||
|
}
|
||||||
|
if h.BlobGasUsed != nil {
|
||||||
|
cpy.BlobGasUsed = new(uint64)
|
||||||
|
*cpy.BlobGasUsed = *h.BlobGasUsed
|
||||||
|
}
|
||||||
|
if h.ParentBeaconRoot != nil {
|
||||||
|
cpy.ParentBeaconRoot = new(core.Hash)
|
||||||
|
*cpy.ParentBeaconRoot = *h.ParentBeaconRoot
|
||||||
|
}
|
||||||
return &cpy
|
return &cpy
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -347,6 +362,26 @@ func (b *Block) BaseFee() *big.Int {
|
|||||||
return new(big.Int).Set(b.header.BaseFee)
|
return new(big.Int).Set(b.header.BaseFee)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *Block) BeaconRoot() *core.Hash { return b.header.ParentBeaconRoot }
|
||||||
|
|
||||||
|
func (b *Block) ExcessBlobGas() *uint64 {
|
||||||
|
var excessBlobGas *uint64
|
||||||
|
if b.header.ExcessBlobGas != nil {
|
||||||
|
excessBlobGas = new(uint64)
|
||||||
|
*excessBlobGas = *b.header.ExcessBlobGas
|
||||||
|
}
|
||||||
|
return excessBlobGas
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Block) BlobGasUsed() *uint64 {
|
||||||
|
var blobGasUsed *uint64
|
||||||
|
if b.header.BlobGasUsed != nil {
|
||||||
|
blobGasUsed = new(uint64)
|
||||||
|
*blobGasUsed = *b.header.BlobGasUsed
|
||||||
|
}
|
||||||
|
return blobGasUsed
|
||||||
|
}
|
||||||
|
|
||||||
func (b *Block) Withdrawals() Withdrawals {
|
func (b *Block) Withdrawals() Withdrawals {
|
||||||
return b.withdrawals
|
return b.withdrawals
|
||||||
}
|
}
|
||||||
@ -391,10 +426,8 @@ func CalcUncleHash(uncles []*Header) core.Hash {
|
|||||||
// WithSeal returns a new block with the data from b but the header replaced with
|
// WithSeal returns a new block with the data from b but the header replaced with
|
||||||
// the sealed one.
|
// the sealed one.
|
||||||
func (b *Block) WithSeal(header *Header) *Block {
|
func (b *Block) WithSeal(header *Header) *Block {
|
||||||
cpy := *header
|
|
||||||
|
|
||||||
return &Block{
|
return &Block{
|
||||||
header: &cpy,
|
header: CopyHeader(header),
|
||||||
transactions: b.transactions,
|
transactions: b.transactions,
|
||||||
uncles: b.uncles,
|
uncles: b.uncles,
|
||||||
withdrawals: b.withdrawals,
|
withdrawals: b.withdrawals,
|
||||||
@ -407,6 +440,7 @@ func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
|
|||||||
header: CopyHeader(b.header),
|
header: CopyHeader(b.header),
|
||||||
transactions: make([]*Transaction, len(transactions)),
|
transactions: make([]*Transaction, len(transactions)),
|
||||||
uncles: make([]*Header, len(uncles)),
|
uncles: make([]*Header, len(uncles)),
|
||||||
|
withdrawals: b.withdrawals,
|
||||||
}
|
}
|
||||||
copy(block.transactions, transactions)
|
copy(block.transactions, transactions)
|
||||||
for i := range uncles {
|
for i := range uncles {
|
||||||
@ -417,11 +451,16 @@ func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
|
|||||||
|
|
||||||
// WithWithdrawals sets the withdrawal contents of a block, does not return a new block.
|
// WithWithdrawals sets the withdrawal contents of a block, does not return a new block.
|
||||||
func (b *Block) WithWithdrawals(withdrawals []*Withdrawal) *Block {
|
func (b *Block) WithWithdrawals(withdrawals []*Withdrawal) *Block {
|
||||||
if withdrawals != nil {
|
block := &Block{
|
||||||
b.withdrawals = make([]*Withdrawal, len(withdrawals))
|
header: b.header,
|
||||||
copy(b.withdrawals, withdrawals)
|
transactions: b.transactions,
|
||||||
|
uncles: b.uncles,
|
||||||
}
|
}
|
||||||
return b
|
if withdrawals != nil {
|
||||||
|
block.withdrawals = make([]*Withdrawal, len(withdrawals))
|
||||||
|
copy(block.withdrawals, withdrawals)
|
||||||
|
}
|
||||||
|
return block
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the keccak256 hash of b's header.
|
// Hash returns the keccak256 hash of b's header.
|
||||||
|
@ -17,9 +17,11 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/openrelayxyz/plugeth-utils/core"
|
"github.com/openrelayxyz/plugeth-utils/core"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DynamicFeeTx struct {
|
type DynamicFeeTx struct {
|
||||||
@ -93,6 +95,9 @@ func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap }
|
|||||||
func (tx *DynamicFeeTx) value() *big.Int { return tx.Value }
|
func (tx *DynamicFeeTx) value() *big.Int { return tx.Value }
|
||||||
func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce }
|
func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce }
|
||||||
func (tx *DynamicFeeTx) to() *core.Address { return tx.To }
|
func (tx *DynamicFeeTx) to() *core.Address { return tx.To }
|
||||||
|
func (tx *DynamicFeeTx) blobGas() uint64 { return 0}
|
||||||
|
func (tx *DynamicFeeTx) blobGasFeeCap() *big.Int { return nil }
|
||||||
|
func (tx *DynamicFeeTx) blobHashes() []core.Hash { return nil }
|
||||||
|
|
||||||
func (tx *DynamicFeeTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
func (tx *DynamicFeeTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
||||||
if baseFee == nil {
|
if baseFee == nil {
|
||||||
@ -112,3 +117,11 @@ func (tx *DynamicFeeTx) rawSignatureValues() (v, r, s *big.Int) {
|
|||||||
func (tx *DynamicFeeTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
func (tx *DynamicFeeTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
||||||
tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s
|
tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tx *DynamicFeeTx) encode(b *bytes.Buffer) error {
|
||||||
|
return rlp.Encode(b, tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *DynamicFeeTx) decode(input []byte) error {
|
||||||
|
return rlp.DecodeBytes(input, tx)
|
||||||
|
}
|
@ -25,7 +25,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
|
|||||||
TxHash core.Hash `json:"transactionHash" gencodec:"required"`
|
TxHash core.Hash `json:"transactionHash" gencodec:"required"`
|
||||||
ContractAddress core.Address `json:"contractAddress"`
|
ContractAddress core.Address `json:"contractAddress"`
|
||||||
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
||||||
EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice,omitempty"`
|
EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"`
|
||||||
BlockHash core.Hash `json:"blockHash,omitempty"`
|
BlockHash core.Hash `json:"blockHash,omitempty"`
|
||||||
BlockNumber *hexutil.Big `json:"blockNumber,omitempty"`
|
BlockNumber *hexutil.Big `json:"blockNumber,omitempty"`
|
||||||
TransactionIndex hexutil.Uint `json:"transactionIndex"`
|
TransactionIndex hexutil.Uint `json:"transactionIndex"`
|
||||||
@ -59,7 +59,7 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
|
|||||||
TxHash *core.Hash `json:"transactionHash" gencodec:"required"`
|
TxHash *core.Hash `json:"transactionHash" gencodec:"required"`
|
||||||
ContractAddress *core.Address `json:"contractAddress"`
|
ContractAddress *core.Address `json:"contractAddress"`
|
||||||
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
||||||
EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice,omitempty"`
|
EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"`
|
||||||
BlockHash *core.Hash `json:"blockHash,omitempty"`
|
BlockHash *core.Hash `json:"blockHash,omitempty"`
|
||||||
BlockNumber *hexutil.Big `json:"blockNumber,omitempty"`
|
BlockNumber *hexutil.Big `json:"blockNumber,omitempty"`
|
||||||
TransactionIndex *hexutil.Uint `json:"transactionIndex"`
|
TransactionIndex *hexutil.Uint `json:"transactionIndex"`
|
||||||
|
@ -19,6 +19,8 @@ package types
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"sync"
|
"sync"
|
||||||
|
"math"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/openrelayxyz/plugeth-utils/core"
|
"github.com/openrelayxyz/plugeth-utils/core"
|
||||||
"github.com/openrelayxyz/plugeth-utils/restricted/crypto"
|
"github.com/openrelayxyz/plugeth-utils/restricted/crypto"
|
||||||
@ -111,3 +113,14 @@ func DeriveSha(list DerivableList, hasher TrieHasher) core.Hash {
|
|||||||
}
|
}
|
||||||
return hasher.Hash()
|
return hasher.Hash()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getPooledBuffer(size uint64) ([]byte, *bytes.Buffer, error) {
|
||||||
|
if size > math.MaxInt {
|
||||||
|
return nil, nil, fmt.Errorf("can't get buffer of size %d", size)
|
||||||
|
}
|
||||||
|
buf := encodeBufferPool.Get().(*bytes.Buffer)
|
||||||
|
buf.Reset()
|
||||||
|
buf.Grow(int(size))
|
||||||
|
b := buf.Bytes()[:int(size)]
|
||||||
|
return b, buf, nil
|
||||||
|
}
|
@ -18,8 +18,10 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"bytes"
|
||||||
|
|
||||||
"github.com/openrelayxyz/plugeth-utils/core"
|
"github.com/openrelayxyz/plugeth-utils/core"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LegacyTx is the transaction data of regular Ethereum transactions.
|
// LegacyTx is the transaction data of regular Ethereum transactions.
|
||||||
@ -102,6 +104,9 @@ func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice }
|
|||||||
func (tx *LegacyTx) value() *big.Int { return tx.Value }
|
func (tx *LegacyTx) value() *big.Int { return tx.Value }
|
||||||
func (tx *LegacyTx) nonce() uint64 { return tx.Nonce }
|
func (tx *LegacyTx) nonce() uint64 { return tx.Nonce }
|
||||||
func (tx *LegacyTx) to() *core.Address { return tx.To }
|
func (tx *LegacyTx) to() *core.Address { return tx.To }
|
||||||
|
func (tx *LegacyTx) blobGas() uint64 { return 0}
|
||||||
|
func (tx *LegacyTx) blobGasFeeCap() *big.Int { return nil }
|
||||||
|
func (tx *LegacyTx) blobHashes() []core.Hash { return nil }
|
||||||
|
|
||||||
func (tx *LegacyTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
func (tx *LegacyTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
|
||||||
return dst.Set(tx.GasPrice)
|
return dst.Set(tx.GasPrice)
|
||||||
@ -114,3 +119,11 @@ func (tx *LegacyTx) rawSignatureValues() (v, r, s *big.Int) {
|
|||||||
func (tx *LegacyTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
func (tx *LegacyTx) setSignatureValues(chainID, v, r, s *big.Int) {
|
||||||
tx.V, tx.R, tx.S = v, r, s
|
tx.V, tx.R, tx.S = v, r, s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tx *LegacyTx) encode(b *bytes.Buffer) error {
|
||||||
|
return rlp.Encode(b, tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx *LegacyTx) decode(input []byte) error {
|
||||||
|
return rlp.DecodeBytes(input, tx)
|
||||||
|
}
|
||||||
|
@ -63,7 +63,7 @@ type Receipt struct {
|
|||||||
TxHash core.Hash `json:"transactionHash" gencodec:"required"`
|
TxHash core.Hash `json:"transactionHash" gencodec:"required"`
|
||||||
ContractAddress core.Address `json:"contractAddress"`
|
ContractAddress core.Address `json:"contractAddress"`
|
||||||
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
|
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
|
||||||
EffectiveGasPrice *big.Int `json:"effectiveGasPrice"`
|
EffectiveGasPrice *big.Int `json:"effectiveGasPrice"` // required, but tag omitted for backwards compatibility
|
||||||
|
|
||||||
// Inclusion information: These fields provide information about the inclusion of the
|
// Inclusion information: These fields provide information about the inclusion of the
|
||||||
// transaction corresponding to this receipt.
|
// transaction corresponding to this receipt.
|
||||||
@ -78,6 +78,7 @@ type receiptMarshaling struct {
|
|||||||
Status hexutil.Uint64
|
Status hexutil.Uint64
|
||||||
CumulativeGasUsed hexutil.Uint64
|
CumulativeGasUsed hexutil.Uint64
|
||||||
GasUsed hexutil.Uint64
|
GasUsed hexutil.Uint64
|
||||||
|
EffectiveGasPrice *hexutil.Big
|
||||||
BlockNumber *hexutil.Big
|
BlockNumber *hexutil.Big
|
||||||
TransactionIndex hexutil.Uint
|
TransactionIndex hexutil.Uint
|
||||||
}
|
}
|
||||||
|
@ -83,25 +83,15 @@ var (
|
|||||||
},
|
},
|
||||||
Type: DynamicFeeTxType,
|
Type: DynamicFeeTxType,
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
||||||
func TestDecodeEmptyTypedReceipt(t *testing.T) {
|
|
||||||
input := []byte{0x80}
|
|
||||||
var r Receipt
|
|
||||||
err := rlp.DecodeBytes(input, &r)
|
|
||||||
if err != errEmptyTypedReceipt {
|
|
||||||
t.Fatal("wrong error:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that receipt data can be correctly derived from the contextual infos
|
|
||||||
func TestDeriveFields(t *testing.T) {
|
|
||||||
// Create a few transactions to have receipts for
|
// Create a few transactions to have receipts for
|
||||||
to2 := core.HexToAddress("0x2")
|
to2 = core.HexToAddress("0x2")
|
||||||
to3 := core.HexToAddress("0x3")
|
to3 = core.HexToAddress("0x3")
|
||||||
to4 := core.HexToAddress("0x4")
|
to4 = core.HexToAddress("0x4")
|
||||||
to5 := core.HexToAddress("0x5")
|
to5 = core.HexToAddress("0x5")
|
||||||
txs := Transactions{
|
to6 = core.HexToAddress("0x6")
|
||||||
|
to7 = core.HexToAddress("0x7")
|
||||||
|
txs = Transactions{
|
||||||
NewTx(&LegacyTx{
|
NewTx(&LegacyTx{
|
||||||
Nonce: 1,
|
Nonce: 1,
|
||||||
Value: big.NewInt(1),
|
Value: big.NewInt(1),
|
||||||
@ -141,17 +131,18 @@ func TestDeriveFields(t *testing.T) {
|
|||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
blockNumber := big.NewInt(1)
|
blockNumber = big.NewInt(1)
|
||||||
blockHash := core.BytesToHash([]byte{0x03, 0x14})
|
blockHash = core.BytesToHash([]byte{0x03, 0x14})
|
||||||
|
|
||||||
// Create the corresponding receipts
|
// Create the corresponding receipts
|
||||||
receipts := Receipts{
|
receipts = Receipts{
|
||||||
&Receipt{
|
&Receipt{
|
||||||
Status: ReceiptStatusFailed,
|
Status: ReceiptStatusFailed,
|
||||||
CumulativeGasUsed: 1,
|
CumulativeGasUsed: 1,
|
||||||
Logs: []*Log{
|
Logs: []*Log{
|
||||||
{
|
{
|
||||||
Address: core.BytesToAddress([]byte{0x11}),
|
Address: core.BytesToAddress([]byte{0x11}),
|
||||||
|
Topics: []core.Hash{core.HexToHash("dead"), core.HexToHash("beef")},
|
||||||
// derived fields:
|
// derived fields:
|
||||||
BlockNumber: blockNumber.Uint64(),
|
BlockNumber: blockNumber.Uint64(),
|
||||||
TxHash: txs[0].Hash(),
|
TxHash: txs[0].Hash(),
|
||||||
@ -161,6 +152,7 @@ func TestDeriveFields(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Address: core.BytesToAddress([]byte{0x01, 0x11}),
|
Address: core.BytesToAddress([]byte{0x01, 0x11}),
|
||||||
|
Topics: []core.Hash{core.HexToHash("dead"), core.HexToHash("beef")},
|
||||||
// derived fields:
|
// derived fields:
|
||||||
BlockNumber: blockNumber.Uint64(),
|
BlockNumber: blockNumber.Uint64(),
|
||||||
TxHash: txs[0].Hash(),
|
TxHash: txs[0].Hash(),
|
||||||
@ -184,6 +176,7 @@ func TestDeriveFields(t *testing.T) {
|
|||||||
Logs: []*Log{
|
Logs: []*Log{
|
||||||
{
|
{
|
||||||
Address: core.BytesToAddress([]byte{0x22}),
|
Address: core.BytesToAddress([]byte{0x22}),
|
||||||
|
Topics: []core.Hash{core.HexToHash("dead"), core.HexToHash("beef")},
|
||||||
// derived fields:
|
// derived fields:
|
||||||
BlockNumber: blockNumber.Uint64(),
|
BlockNumber: blockNumber.Uint64(),
|
||||||
TxHash: txs[1].Hash(),
|
TxHash: txs[1].Hash(),
|
||||||
@ -193,6 +186,7 @@ func TestDeriveFields(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Address: core.BytesToAddress([]byte{0x02, 0x22}),
|
Address: core.BytesToAddress([]byte{0x02, 0x22}),
|
||||||
|
Topics: []core.Hash{core.HexToHash("dead"), core.HexToHash("beef")},
|
||||||
// derived fields:
|
// derived fields:
|
||||||
BlockNumber: blockNumber.Uint64(),
|
BlockNumber: blockNumber.Uint64(),
|
||||||
TxHash: txs[1].Hash(),
|
TxHash: txs[1].Hash(),
|
||||||
@ -249,7 +243,19 @@ func TestDeriveFields(t *testing.T) {
|
|||||||
TransactionIndex: 4,
|
TransactionIndex: 4,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDecodeEmptyTypedReceipt(t *testing.T) {
|
||||||
|
input := []byte{0x80}
|
||||||
|
var r Receipt
|
||||||
|
err := rlp.DecodeBytes(input, &r)
|
||||||
|
if err != errEmptyTypedReceipt {
|
||||||
|
t.Fatal("wrong error:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that receipt data can be correctly derived from the contextual infos
|
||||||
|
func TestDeriveFields(t *testing.T) {
|
||||||
// Re-derive receipts.
|
// Re-derive receipts.
|
||||||
basefee := big.NewInt(1000)
|
basefee := big.NewInt(1000)
|
||||||
derivedReceipts := clearComputedFieldsOnReceipts(receipts)
|
derivedReceipts := clearComputedFieldsOnReceipts(receipts)
|
||||||
@ -263,6 +269,7 @@ func TestDeriveFields(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("error marshaling input receipts:", err)
|
t.Fatal("error marshaling input receipts:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
r2, err := json.MarshalIndent(derivedReceipts, "", " ")
|
r2, err := json.MarshalIndent(derivedReceipts, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("error marshaling derived receipts:", err)
|
t.Fatal("error marshaling derived receipts:", err)
|
||||||
@ -273,6 +280,38 @@ func TestDeriveFields(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test that we can marshal/unmarshal receipts to/from json without errors.
|
||||||
|
// This also confirms that our test receipts contain all the required fields.
|
||||||
|
func TestReceiptJSON(t *testing.T) {
|
||||||
|
for i := range receipts {
|
||||||
|
b, err := receipts[i].MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("error marshaling receipt to json:", err)
|
||||||
|
}
|
||||||
|
r := Receipt{}
|
||||||
|
err = r.UnmarshalJSON(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("error unmarshaling receipt from json:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test we can still parse receipt without EffectiveGasPrice for backwards compatibility, even
|
||||||
|
// though it is required per the spec.
|
||||||
|
func TestEffectiveGasPriceNotRequired(t *testing.T) {
|
||||||
|
r := *receipts[0]
|
||||||
|
r.EffectiveGasPrice = nil
|
||||||
|
b, err := r.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("error marshaling receipt to json:", err)
|
||||||
|
}
|
||||||
|
r2 := Receipt{}
|
||||||
|
err = r2.UnmarshalJSON(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("error unmarshaling receipt from json:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TestTypedReceiptEncodingDecoding reproduces a flaw that existed in the receipt
|
// TestTypedReceiptEncodingDecoding reproduces a flaw that existed in the receipt
|
||||||
// rlp decoder, which failed due to a shadowing error.
|
// rlp decoder, which failed due to a shadowing error.
|
||||||
func TestTypedReceiptEncodingDecoding(t *testing.T) {
|
func TestTypedReceiptEncodingDecoding(t *testing.T) {
|
||||||
|
@ -36,6 +36,7 @@ var (
|
|||||||
ErrInvalidTxType = errors.New("transaction type not valid in this context")
|
ErrInvalidTxType = errors.New("transaction type not valid in this context")
|
||||||
ErrTxTypeNotSupported = errors.New("transaction type not supported")
|
ErrTxTypeNotSupported = errors.New("transaction type not supported")
|
||||||
ErrGasFeeCapTooLow = errors.New("fee cap less than base fee")
|
ErrGasFeeCapTooLow = errors.New("fee cap less than base fee")
|
||||||
|
errShortTypedTx = errors.New("typed transaction too short")
|
||||||
errEmptyTypedTx = errors.New("empty typed transaction bytes")
|
errEmptyTypedTx = errors.New("empty typed transaction bytes")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -44,6 +45,7 @@ const (
|
|||||||
LegacyTxType = iota
|
LegacyTxType = iota
|
||||||
AccessListTxType
|
AccessListTxType
|
||||||
DynamicFeeTxType
|
DynamicFeeTxType
|
||||||
|
BlobTxType
|
||||||
)
|
)
|
||||||
|
|
||||||
// Transaction is an Ethereum transaction.
|
// Transaction is an Ethereum transaction.
|
||||||
@ -86,6 +88,9 @@ type TxData interface {
|
|||||||
value() *big.Int
|
value() *big.Int
|
||||||
nonce() uint64
|
nonce() uint64
|
||||||
to() *core.Address
|
to() *core.Address
|
||||||
|
blobGas() uint64
|
||||||
|
blobGasFeeCap() *big.Int
|
||||||
|
blobHashes() []core.Hash
|
||||||
|
|
||||||
rawSignatureValues() (v, r, s *big.Int)
|
rawSignatureValues() (v, r, s *big.Int)
|
||||||
setSignatureValues(chainID, v, r, s *big.Int)
|
setSignatureValues(chainID, v, r, s *big.Int)
|
||||||
@ -97,6 +102,9 @@ type TxData interface {
|
|||||||
// copy of the computed value, i.e. callers are allowed to mutate the result.
|
// copy of the computed value, i.e. callers are allowed to mutate the result.
|
||||||
// Method implementations can use 'dst' to store the result.
|
// Method implementations can use 'dst' to store the result.
|
||||||
effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int
|
effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int
|
||||||
|
|
||||||
|
encode(b *bytes.Buffer) error
|
||||||
|
decode([]byte) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeRLP implements rlp.Encoder
|
// EncodeRLP implements rlp.Encoder
|
||||||
@ -120,7 +128,7 @@ func (tx *Transaction) EncodeRLP(w io.Writer) error {
|
|||||||
// encodeTyped writes the canonical encoding of a typed transaction to w.
|
// encodeTyped writes the canonical encoding of a typed transaction to w.
|
||||||
func (tx *Transaction) encodeTyped(w *bytes.Buffer) error {
|
func (tx *Transaction) encodeTyped(w *bytes.Buffer) error {
|
||||||
w.WriteByte(tx.Type())
|
w.WriteByte(tx.Type())
|
||||||
return rlp.Encode(w, tx.inner)
|
return tx.inner.encode(w)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary returns the canonical consensus encoding of the transaction.
|
// MarshalBinary returns the canonical consensus encoding of the transaction.
|
||||||
@ -189,22 +197,24 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error {
|
|||||||
// decodeTyped decodes a typed transaction from the canonical format.
|
// decodeTyped decodes a typed transaction from the canonical format.
|
||||||
func (tx *Transaction) decodeTyped(b []byte) (TxData, error) {
|
func (tx *Transaction) decodeTyped(b []byte) (TxData, error) {
|
||||||
if len(b) <= 1 {
|
if len(b) <= 1 {
|
||||||
return nil, errEmptyTypedTx
|
return nil, errShortTypedTx
|
||||||
}
|
}
|
||||||
|
var inner TxData
|
||||||
switch b[0] {
|
switch b[0] {
|
||||||
case AccessListTxType:
|
case AccessListTxType:
|
||||||
var inner AccessListTx
|
inner = new(AccessListTx)
|
||||||
err := rlp.DecodeBytes(b[1:], &inner)
|
|
||||||
return &inner, err
|
|
||||||
case DynamicFeeTxType:
|
case DynamicFeeTxType:
|
||||||
var inner DynamicFeeTx
|
inner = new(DynamicFeeTx)
|
||||||
err := rlp.DecodeBytes(b[1:], &inner)
|
case BlobTxType:
|
||||||
return &inner, err
|
inner = new(BlobTx)
|
||||||
default:
|
default:
|
||||||
return nil, ErrTxTypeNotSupported
|
return nil, ErrTxTypeNotSupported
|
||||||
}
|
}
|
||||||
|
err := inner.decode(b[1:])
|
||||||
|
return inner, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// setDecoded sets the inner transaction and size after decoding.
|
// setDecoded sets the inner transaction and size after decoding.
|
||||||
func (tx *Transaction) setDecoded(inner TxData, size uint64) {
|
func (tx *Transaction) setDecoded(inner TxData, size uint64) {
|
||||||
tx.inner = inner
|
tx.inner = inner
|
||||||
@ -295,15 +305,26 @@ func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value
|
|||||||
// Nonce returns the sender account nonce of the transaction.
|
// Nonce returns the sender account nonce of the transaction.
|
||||||
func (tx *Transaction) Nonce() uint64 { return tx.inner.nonce() }
|
func (tx *Transaction) Nonce() uint64 { return tx.inner.nonce() }
|
||||||
|
|
||||||
|
// BlobGasFeeCap returns the fee cap per gas of the transaction.
|
||||||
|
func (tx *Transaction) BlobGasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.blobGasFeeCap()) }
|
||||||
|
|
||||||
|
// BlobGas returns the blob gas of the transaction.
|
||||||
|
func (tx *Transaction) BlobGas() uint64 { return tx.inner.blobGas() }
|
||||||
|
|
||||||
|
func (tx *Transaction) BlobHashes() []core.Hash { return tx.inner.blobHashes() }
|
||||||
|
|
||||||
// To returns the recipient address of the transaction.
|
// To returns the recipient address of the transaction.
|
||||||
// For contract-creation transactions, To returns nil.
|
// For contract-creation transactions, To returns nil.
|
||||||
func (tx *Transaction) To() *core.Address {
|
func (tx *Transaction) To() *core.Address {
|
||||||
return copyAddressPtr(tx.inner.to())
|
return copyAddressPtr(tx.inner.to())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cost returns gas * gasPrice + value.
|
// Cost returns (gas * gasPrice) + (blobGas * blobGasPrice) + value.
|
||||||
func (tx *Transaction) Cost() *big.Int {
|
func (tx *Transaction) Cost() *big.Int {
|
||||||
total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas()))
|
total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas()))
|
||||||
|
if tx.Type() == BlobTxType {
|
||||||
|
total.Add(total, new(big.Int).Mul(tx.BlobGasFeeCap(), new(big.Int).SetUint64(tx.BlobGas())))
|
||||||
|
}
|
||||||
total.Add(total, tx.Value())
|
total.Add(total, tx.Value())
|
||||||
return total
|
return total
|
||||||
}
|
}
|
||||||
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user